content
stringlengths 0
1.55M
|
---|
<import_from_stmt>django.contrib.auth.models User<import_from_stmt>django.db.models.signals post_save<def_stmt>set_api_permissions sender instance=<none> created=<false> **kwargs<block_start><import_from_stmt>utils.user_auth set_api_permissions_for_user<if_stmt>created<block_start>set_api_permissions_for_user(instance)<block_end><block_end>post_save.connect(set_api_permissions sender=User)<line_sep> |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>warnings<import_from_stmt>typing Callable List Optional Union<import_stmt>torch<import_from_stmt>torch.nn.modules.loss _Loss<import_from_stmt>monai.networks one_hot<import_from_stmt>monai.utils LossReduction<class_stmt>TverskyLoss(_Loss)<block_start>"""
Compute the Tversky loss defined in:
Sadegh et al. (2017) Tversky loss function for image segmentation
using 3D fully convolutional deep networks. (https://arxiv.org/abs/1706.05721)
Adapted from:
https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L631
"""<def_stmt>__init__ self include_background:bool=<true> to_onehot_y:bool=<false> sigmoid:bool=<false> softmax:bool=<false> other_act:Optional[Callable]=<none> alpha:float=0.5 beta:float=0.5 reduction:Union[LossReduction str]=LossReduction.MEAN smooth_nr:float=1e-5 smooth_dr:float=1e-5 batch:bool=<false> <arrow><none><block_start>"""
Args:
include_background: If False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: If True, apply a sigmoid function to the prediction.
softmax: If True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
alpha: weight of false positives
beta: weight of false negatives
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, a Dice loss value is computed independently from each item in the batch
before any `reduction`.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""<line_sep>super().__init__(reduction=LossReduction(reduction).value)<if_stmt>other_act<is><not><none><and><not>callable(other_act)<block_start><raise>TypeError(f"other_act must be None or callable but is {type(other_act).__name__}.")<block_end><if_stmt>int(sigmoid)+int(softmax)+int(other_act<is><not><none>)<g>1<block_start><raise>ValueError("Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].")<block_end>self.include_background=include_background<line_sep>self.to_onehot_y=to_onehot_y<line_sep>self.sigmoid=sigmoid<line_sep>self.softmax=softmax<line_sep>self.other_act=other_act<line_sep>self.alpha=alpha<line_sep>self.beta=beta<line_sep>self.smooth_nr=float(smooth_nr)<line_sep>self.smooth_dr=float(smooth_dr)<line_sep>self.batch=batch<block_end><def_stmt>forward self input:torch.Tensor target:torch.Tensor<arrow>torch.Tensor<block_start>"""
Args:
input: the shape should be BNH[WD].
target: the shape should be BNH[WD].
Raises:
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
"""<if_stmt>self.sigmoid<block_start>input=torch.sigmoid(input)<block_end>n_pred_ch=input.shape[1]<if_stmt>self.softmax<block_start><if_stmt>n_pred_ch<eq>1<block_start>warnings.warn("single channel prediction, `softmax=True` ignored.")<block_end><else_stmt><block_start>input=torch.softmax(input 1)<block_end><block_end><if_stmt>self.other_act<is><not><none><block_start>input=self.other_act(input)<block_end><if_stmt>self.to_onehot_y<block_start><if_stmt>n_pred_ch<eq>1<block_start>warnings.warn("single channel prediction, `to_onehot_y=True` ignored.")<block_end><else_stmt><block_start>target=one_hot(target num_classes=n_pred_ch)<block_end><block_end><if_stmt><not>self.include_background<block_start><if_stmt>n_pred_ch<eq>1<block_start>warnings.warn("single channel prediction, `include_background=False` ignored.")<block_end><else_stmt># if skipping background, removing first channel
<block_start>target=target[: 1:]<line_sep>input=input[: 1:]<block_end><block_end><if_stmt>target.shape<ne>input.shape<block_start><raise>AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")<block_end>p0=input<line_sep>p1=1-p0<line_sep>g0=target<line_sep>g1=1-g0<line_sep># reducing only spatial dimensions (not batch nor channels)
reduce_axis:List[int]=torch.arange(2 len(input.shape)).tolist()<if_stmt>self.batch# reducing spatial dimensions and batch
<block_start>reduce_axis=[0]+reduce_axis<block_end>tp=torch.sum(p0<times>g0 reduce_axis)<line_sep>fp=self.alpha<times>torch.sum(p0<times>g1 reduce_axis)<line_sep>fn=self.beta<times>torch.sum(p1<times>g0 reduce_axis)<line_sep>numerator=tp+self.smooth_nr<line_sep>denominator=tp+fp+fn+self.smooth_dr<line_sep>score:torch.Tensor=1.0-numerator/denominator<if_stmt>self.reduction<eq>LossReduction.SUM.value<block_start><return>torch.sum(score)# sum over the batch and channel dims
<block_end><if_stmt>self.reduction<eq>LossReduction.NONE.value<block_start><return>score# returns [N, num_classes] losses
<block_end><if_stmt>self.reduction<eq>LossReduction.MEAN.value<block_start><return>torch.mean(score)<block_end><raise>ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')<block_end><block_end> |
# Copyright (c) 2017 The Verde Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Test the scipy based interpolator.
"""<import_stmt>warnings<import_stmt>numpy<as>np<import_stmt>numpy.testing<as>npt<import_stmt>pandas<as>pd<import_stmt>pytest<import_from_stmt>..coordinates grid_coordinates<import_from_stmt>..scipygridder ScipyGridder<import_from_stmt>..synthetic CheckerBoard<def_stmt>test_scipy_gridder_same_points <block_start>"See if the gridder recovers known points."<line_sep>region=(1000 5000 -8000 -7000)<line_sep>synth=CheckerBoard(region=region)<line_sep>data=synth.scatter(size=1000 random_state=0)<line_sep>coords=(data.easting data.northing)<line_sep># The interpolation should be perfect on top of the data points
<for_stmt>method ["nearest" "linear" "cubic"]<block_start>grd=ScipyGridder(method=method)<line_sep>grd.fit(coords data.scalars)<line_sep>predicted=grd.predict(coords)<line_sep>npt.assert_allclose(predicted data.scalars)<line_sep>npt.assert_allclose(grd.score(coords data.scalars) 1)<block_end><block_end><def_stmt>test_scipy_gridder <block_start>"See if the gridder recovers known points."<line_sep>synth=CheckerBoard(region=(1000 5000 -8000 -6000))<line_sep>data=synth.scatter(size=20000 random_state=0)<line_sep>coords=(data.easting data.northing)<line_sep>pt_coords=(3000 -7000)<line_sep>true_data=synth.predict(pt_coords)<line_sep># nearest will never be too close to the truth
grd=ScipyGridder("cubic").fit(coords data.scalars)<line_sep>npt.assert_almost_equal(grd.predict(pt_coords) true_data decimal=2)<line_sep>grd=ScipyGridder("linear").fit(coords data.scalars)<line_sep>npt.assert_almost_equal(grd.predict(pt_coords) true_data decimal=1)<block_end><def_stmt>test_scipy_gridder_region <block_start>"See if the region is gotten from the data is correct."<line_sep>region=(1000 5000 -8000 -6000)<line_sep>synth=CheckerBoard(region=region)<line_sep># Test using xarray objects
grid=synth.grid(shape=(101 101))<line_sep>coords=grid_coordinates(region grid.scalars.shape)<line_sep>grd=ScipyGridder().fit(coords grid.scalars)<line_sep>npt.assert_allclose(grd.region_ region)<line_sep># Test using pandas objects
data=pd.DataFrame({"easting":coords[0].ravel() "northing":coords[1].ravel() "scalars":grid.scalars.values.ravel() })<line_sep>grd=ScipyGridder().fit((data.easting data.northing) data.scalars)<line_sep>npt.assert_allclose(grd.region_ region)<block_end><def_stmt>test_scipy_gridder_extra_args <block_start>"Passing in extra arguments to scipy"<line_sep>data=CheckerBoard().scatter(random_state=100)<line_sep>coords=(data.easting data.northing)<line_sep>grd=ScipyGridder(method="linear" extra_args=dict(rescale=<true>))<line_sep>grd.fit(coords data.scalars)<line_sep>predicted=grd.predict(coords)<line_sep>npt.assert_allclose(predicted data.scalars)<block_end><def_stmt>test_scipy_gridder_fails <block_start>"fit should fail for invalid method name"<line_sep>data=CheckerBoard().scatter(random_state=0)<line_sep>grd=ScipyGridder(method="some invalid method name")<with_stmt>pytest.raises(ValueError)<block_start>grd.fit((data.easting data.northing) data.scalars)<block_end><block_end><def_stmt>test_scipy_gridder_warns <block_start>"Check that a warning is issued when using weights."<line_sep>data=CheckerBoard().scatter(random_state=100)<line_sep>weights=np.ones_like(data.scalars)<line_sep>grd=ScipyGridder()<line_sep>msg="ScipyGridder does not support weights and they will be ignored."<with_stmt>warnings.catch_warnings(record=<true>)<as>warn<block_start>grd.fit((data.easting data.northing) data.scalars weights=weights)<assert_stmt>len(warn)<eq>1<assert_stmt>issubclass(warn[-1].category UserWarning)<assert_stmt>str(warn[-1].message)<eq>msg<block_end><block_end> |
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_from_stmt>riscv.EnvRISCV EnvRISCV<import_from_stmt>riscv.GenThreadRISCV GenThreadRISCV<import_from_stmt>base.Sequence Sequence<class_stmt>MainSequence(Sequence)<block_start>"""Exercise different combinations of values for the parameters for
the genPA instruction. Focus in this test is to try values of the Size,
Align and CanAlias parameters. Type is always 'D'; Bank is always '0'.
"""<def_stmt>generate self **kargs<block_start>ldstr_byte_ops=["LB##RISCV" "SB##RISCV"]<line_sep>ldstr_half_ops=["LH##RISCV" "SH##RISCV"]<line_sep>ldstr_word_ops=["LW##RISCV" "SW##RISCV"]<line_sep>ldstr_double_ops=["LD##RISCV" "SD##RISCV"]<line_sep>theType="D"<line_sep>theBank=0<line_sep>theCanAlias=0<line_sep>loopCount=2<line_sep># Iterate through Size and Align values. Force requires Align to
# be a power of 2. This 1st block tests smaller values of size -
# 1 byte to 32 bytes.
<for_stmt>theSize [2<power>x<for>x range(0 5)]<block_start><for_stmt>theAlign [2<power>x<for>x range(0 6)]<block_start><if_stmt>theAlign<l>theSize<block_start><continue><block_end><for_stmt>_ range(loopCount)<block_start>rand_PA=self.genPA(Size=theSize Align=theAlign Type=theType Bank=theBank CanAlias=theCanAlias )<line_sep>rand_VA=self.genVAforPA(PA=rand_PA Bank=theBank FlatMap=0 Type=theType Size=theSize )<line_sep>self.notice(">>>>>> Requested Alignment: {:6d} Requested "<concat>"Size: {:6d} PA target= {:16X} VA target= "<concat>"{:16X}".format(theAlign theSize rand_PA rand_VA))<line_sep># Bank argument must be 0 now as the 3rd argument.
# May not be required at some point.
page_info=self.getPageInfo(rand_VA "VA" 0)<line_sep># This section displays the keys and values for the
# second and third level dictionaries.
<if_stmt>"Page"<in>page_info.keys()<block_start>self.notice(">>>>>>>>>> VA Page info <<<<<<<<<<<<<<<<<<<<"<concat>"<<<<<<<<<<<<<")<for_stmt>k page_info["Page"]<block_start><if_stmt>k<ne>"DescriptorDetails"<block_start><if_stmt>k<eq>"MemoryType"<or>k<eq>"MemoryAttr"<block_start>self.notice(">>>>>>>>>> Key: {:15} Value: "<concat>"{}".format(k page_info["Page"][k]))<block_end><else_stmt><block_start>self.notice(">>>>>>>>>> Key: {:15} Value: "<concat>"0x{:x}".format(k page_info["Page"][k]))<block_end><block_end><else_stmt><block_start><for_stmt>j page_info["Page"][k]# Descriptor details are in 3rd level
# dict in page_info object
<block_start>self.notice(">>>>>>>>>> DescriptorDetails: "<concat>"Key: {:22} Value: {}".format(j page_info["Page"][k][j]))<block_end><block_end><block_end><block_end><else_stmt><block_start>self.error(">>>>>>>>>> VA Page info: Nothing returned "<concat>'from getPageInfo "VA"')<block_end><if_stmt>"Table"<in>page_info.keys()<block_start>self.notice(">>>>>>>>>> VA Table info <<<<<<<<<<<<<<<<<"<concat>"<<<<<<<<<<<<<<<<")<for_stmt>k page_info["Table"]<block_start>self.notice(">>>>>>>>>> Key: {:12} Value: {}".format(k page_info["Table"][k]))<block_end><block_end><else_stmt><block_start>self.notice(">>>>>>>>>> VA Table info: No Table info "<concat>'returned from getPageInfo "VA"')<block_end># Just making sure we can actually generate an
# instruction with the rand_VA determined above.
instr_id=self.genInstruction(self.choice(ldstr_byte_ops) {"LSTarget":rand_VA})<block_end><block_end><block_end># Iterate through Size and Align values. Force requires Align to be
# a power of 2. This 2nd block tests larger values of size - 32K to 8M.
<for_stmt>theSize [2<power>x<for>x range(15 17)]<block_start><for_stmt>theAlign [2<power>x<for>x range(15 18)]<block_start><if_stmt>theAlign<l>theSize<block_start><continue><block_end><for_stmt>_ range(loopCount)<block_start>rand_PA=self.genPA(Size=theSize Align=theAlign Type=theType Bank=theBank CanAlias=theCanAlias )<line_sep>rand_VA=self.genVAforPA(PA=rand_PA Bank=theBank FlatMap=0 CanAlias=0 ForceNewAddress=1 Type=theType Size=theSize )<line_sep>self.notice(">>>>>> Requested Alignment: {:6d} Requested "<concat>"Size: {:6d} PA target= {:16X} VA target= "<concat>"{:16X}".format(theAlign theSize rand_PA rand_VA))<line_sep>instr_id=self.genInstruction(self.choice(ldstr_byte_ops) {"LSTarget":rand_VA})<block_end><block_end><block_end><block_end><block_end>MainSequenceClass=MainSequence<line_sep>GenThreadClass=GenThreadRISCV<line_sep>EnvClass=EnvRISCV<line_sep> |
<import_stmt>pyro<import_from_stmt>typing Mapping<import_from_stmt>pyro.infer SVI TraceGraph_ELBO<import_from_stmt>pyro.nn pyro_method<import_from_stmt>pyro.optim Adam<import_from_stmt>torch.distributions Independent<import_stmt>torch<import_from_stmt>pyro.distributions.torch_transform ComposeTransformModule<import_from_stmt>pyro.distributions.transforms ComposeTransform AffineTransform ExpTransform Spline <import_from_stmt>pyro.distributions LowRankMultivariateNormal MultivariateNormal Normal TransformedDistribution<import_from_stmt>deepscm.arch.medical Decoder Encoder<import_from_stmt>deepscm.distributions.transforms.reshape ReshapeTransform<import_from_stmt>deepscm.distributions.transforms.affine LowerCholeskyAffine<import_from_stmt>deepscm.distributions.deep DeepMultivariateNormal DeepIndepNormal Conv2dIndepNormal DeepLowRankMultivariateNormal<import_stmt>numpy<as>np<import_from_stmt>deepscm.experiments.medical.base_experiment BaseCovariateExperiment BaseSEM EXPERIMENT_REGISTRY MODEL_REGISTRY# noqa: F401
<class_stmt>CustomELBO(TraceGraph_ELBO)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<line_sep>self.trace_storage={'model':<none> 'guide':<none>}<block_end><def_stmt>_get_trace self model guide args kwargs<block_start>model_trace,guide_trace=super()._get_trace(model guide args kwargs)<line_sep>self.trace_storage['model']=model_trace<line_sep>self.trace_storage['guide']=guide_trace<line_sep><return>model_trace guide_trace<block_end><block_end><class_stmt>Lambda(torch.nn.Module)<block_start><def_stmt>__init__ self func<block_start>super().__init__()<line_sep>self.func=func<block_end><def_stmt>forward self x<block_start><return>self.func(x)<block_end><block_end><class_stmt>BaseVISEM(BaseSEM)<block_start>context_dim=0<def_stmt>__init__ self latent_dim:int logstd_init:float=-5 enc_filters:str='16,32,64,128' dec_filters:str='128,64,32,16' num_convolutions:int=2 use_upconv:bool=<false> decoder_type:str='fixed_var' decoder_cov_rank:int=10 **kwargs<block_start>super().__init__(**kwargs)<line_sep>self.img_shape=(1 192<floordiv>self.downsample 192<floordiv>self.downsample)<if>self.downsample<g>0<else>(1 192 192)<line_sep>self.latent_dim=latent_dim<line_sep>self.logstd_init=logstd_init<line_sep>self.enc_filters=tuple(int(f.strip())<for>f enc_filters.split(','))<line_sep>self.dec_filters=tuple(int(f.strip())<for>f dec_filters.split(','))<line_sep>self.num_convolutions=num_convolutions<line_sep>self.use_upconv=use_upconv<line_sep>self.decoder_type=decoder_type<line_sep>self.decoder_cov_rank=decoder_cov_rank<line_sep># decoder parts
decoder=Decoder(num_convolutions=self.num_convolutions filters=self.dec_filters latent_dim=self.latent_dim+self.context_dim upconv=self.use_upconv output_size=self.img_shape)<if_stmt>self.decoder_type<eq>'fixed_var'<block_start>self.decoder=Conv2dIndepNormal(decoder 1 1)<line_sep>torch.nn.init.zeros_(self.decoder.logvar_head.weight)<line_sep>self.decoder.logvar_head.weight.requires_grad=<false><line_sep>torch.nn.init.constant_(self.decoder.logvar_head.bias self.logstd_init)<line_sep>self.decoder.logvar_head.bias.requires_grad=<false><block_end><elif_stmt>self.decoder_type<eq>'learned_var'<block_start>self.decoder=Conv2dIndepNormal(decoder 1 1)<line_sep>torch.nn.init.zeros_(self.decoder.logvar_head.weight)<line_sep>self.decoder.logvar_head.weight.requires_grad=<false><line_sep>torch.nn.init.constant_(self.decoder.logvar_head.bias self.logstd_init)<line_sep>self.decoder.logvar_head.bias.requires_grad=<true><block_end><elif_stmt>self.decoder_type<eq>'independent_gaussian'<block_start>self.decoder=Conv2dIndepNormal(decoder 1 1)<line_sep>torch.nn.init.zeros_(self.decoder.logvar_head.weight)<line_sep>self.decoder.logvar_head.weight.requires_grad=<true><line_sep>torch.nn.init.normal_(self.decoder.logvar_head.bias self.logstd_init 1e-1)<line_sep>self.decoder.logvar_head.bias.requires_grad=<true><block_end><elif_stmt>self.decoder_type<eq>'multivariate_gaussian'<block_start>seq=torch.nn.Sequential(decoder Lambda(<lambda>x:x.view(x.shape[0] -1)))<line_sep>self.decoder=DeepMultivariateNormal(seq np.prod(self.img_shape) np.prod(self.img_shape))<block_end><elif_stmt>self.decoder_type<eq>'sharedvar_multivariate_gaussian'<block_start>seq=torch.nn.Sequential(decoder Lambda(<lambda>x:x.view(x.shape[0] -1)))<line_sep>self.decoder=DeepMultivariateNormal(seq np.prod(self.img_shape) np.prod(self.img_shape))<line_sep>torch.nn.init.zeros_(self.decoder.logdiag_head.weight)<line_sep>self.decoder.logdiag_head.weight.requires_grad=<false><line_sep>torch.nn.init.zeros_(self.decoder.lower_head.weight)<line_sep>self.decoder.lower_head.weight.requires_grad=<false><line_sep>torch.nn.init.normal_(self.decoder.logdiag_head.bias self.logstd_init 1e-1)<line_sep>self.decoder.logdiag_head.bias.requires_grad=<true><block_end><elif_stmt>self.decoder_type<eq>'lowrank_multivariate_gaussian'<block_start>seq=torch.nn.Sequential(decoder Lambda(<lambda>x:x.view(x.shape[0] -1)))<line_sep>self.decoder=DeepLowRankMultivariateNormal(seq np.prod(self.img_shape) np.prod(self.img_shape) decoder_cov_rank)<block_end><elif_stmt>self.decoder_type<eq>'sharedvar_lowrank_multivariate_gaussian'<block_start>seq=torch.nn.Sequential(decoder Lambda(<lambda>x:x.view(x.shape[0] -1)))<line_sep>self.decoder=DeepLowRankMultivariateNormal(seq np.prod(self.img_shape) np.prod(self.img_shape) decoder_cov_rank)<line_sep>torch.nn.init.zeros_(self.decoder.logdiag_head.weight)<line_sep>self.decoder.logdiag_head.weight.requires_grad=<false><line_sep>torch.nn.init.zeros_(self.decoder.factor_head.weight)<line_sep>self.decoder.factor_head.weight.requires_grad=<false><line_sep>torch.nn.init.normal_(self.decoder.logdiag_head.bias self.logstd_init 1e-1)<line_sep>self.decoder.logdiag_head.bias.requires_grad=<true><block_end><else_stmt><block_start><raise>ValueError('unknown ')<block_end># encoder parts
self.encoder=Encoder(num_convolutions=self.num_convolutions filters=self.enc_filters latent_dim=self.latent_dim input_size=self.img_shape)<line_sep>latent_layers=torch.nn.Sequential(torch.nn.Linear(self.latent_dim+self.context_dim self.latent_dim) torch.nn.ReLU())<line_sep>self.latent_encoder=DeepIndepNormal(latent_layers self.latent_dim self.latent_dim)<line_sep># priors
self.register_buffer('age_base_loc' torch.zeros([1 ] requires_grad=<false>))<line_sep>self.register_buffer('age_base_scale' torch.ones([1 ] requires_grad=<false>))<line_sep>self.sex_logits=torch.nn.Parameter(torch.zeros([1 ]))<line_sep>self.register_buffer('ventricle_volume_base_loc' torch.zeros([1 ] requires_grad=<false>))<line_sep>self.register_buffer('ventricle_volume_base_scale' torch.ones([1 ] requires_grad=<false>))<line_sep>self.register_buffer('brain_volume_base_loc' torch.zeros([1 ] requires_grad=<false>))<line_sep>self.register_buffer('brain_volume_base_scale' torch.ones([1 ] requires_grad=<false>))<line_sep>self.register_buffer('z_loc' torch.zeros([latent_dim ] requires_grad=<false>))<line_sep>self.register_buffer('z_scale' torch.ones([latent_dim ] requires_grad=<false>))<line_sep>self.register_buffer('x_base_loc' torch.zeros(self.img_shape requires_grad=<false>))<line_sep>self.register_buffer('x_base_scale' torch.ones(self.img_shape requires_grad=<false>))<line_sep>self.register_buffer('age_flow_lognorm_loc' torch.zeros([] requires_grad=<false>))<line_sep>self.register_buffer('age_flow_lognorm_scale' torch.ones([] requires_grad=<false>))<line_sep>self.register_buffer('ventricle_volume_flow_lognorm_loc' torch.zeros([] requires_grad=<false>))<line_sep>self.register_buffer('ventricle_volume_flow_lognorm_scale' torch.ones([] requires_grad=<false>))<line_sep>self.register_buffer('brain_volume_flow_lognorm_loc' torch.zeros([] requires_grad=<false>))<line_sep>self.register_buffer('brain_volume_flow_lognorm_scale' torch.ones([] requires_grad=<false>))<line_sep># age flow
self.age_flow_components=ComposeTransformModule([Spline(1)])<line_sep>self.age_flow_lognorm=AffineTransform(loc=self.age_flow_lognorm_loc.item() scale=self.age_flow_lognorm_scale.item())<line_sep>self.age_flow_constraint_transforms=ComposeTransform([self.age_flow_lognorm ExpTransform()])<line_sep>self.age_flow_transforms=ComposeTransform([self.age_flow_components self.age_flow_constraint_transforms])<line_sep># other flows shared components
self.ventricle_volume_flow_lognorm=AffineTransform(loc=self.ventricle_volume_flow_lognorm_loc.item() scale=self.ventricle_volume_flow_lognorm_scale.item())# noqa: E501
self.ventricle_volume_flow_constraint_transforms=ComposeTransform([self.ventricle_volume_flow_lognorm ExpTransform()])<line_sep>self.brain_volume_flow_lognorm=AffineTransform(loc=self.brain_volume_flow_lognorm_loc.item() scale=self.brain_volume_flow_lognorm_scale.item())<line_sep>self.brain_volume_flow_constraint_transforms=ComposeTransform([self.brain_volume_flow_lognorm ExpTransform()])<block_end><def_stmt>__setattr__ self name value<block_start>super().__setattr__(name value)<if_stmt>name<eq>'age_flow_lognorm_loc'<block_start>self.age_flow_lognorm.loc=self.age_flow_lognorm_loc.item()<block_end><elif_stmt>name<eq>'age_flow_lognorm_scale'<block_start>self.age_flow_lognorm.scale=self.age_flow_lognorm_scale.item()<block_end><elif_stmt>name<eq>'ventricle_volume_flow_lognorm_loc'<block_start>self.ventricle_volume_flow_lognorm.loc=self.ventricle_volume_flow_lognorm_loc.item()<block_end><elif_stmt>name<eq>'ventricle_volume_flow_lognorm_scale'<block_start>self.ventricle_volume_flow_lognorm.scale=self.ventricle_volume_flow_lognorm_scale.item()<block_end><elif_stmt>name<eq>'brain_volume_flow_lognorm_loc'<block_start>self.brain_volume_flow_lognorm.loc=self.brain_volume_flow_lognorm_loc.item()<block_end><elif_stmt>name<eq>'brain_volume_flow_lognorm_scale'<block_start>self.brain_volume_flow_lognorm.scale=self.brain_volume_flow_lognorm_scale.item()<block_end><block_end><def_stmt>_get_preprocess_transforms self<block_start><return>super()._get_preprocess_transforms().inv<block_end><def_stmt>_get_transformed_x_dist self latent<block_start>x_pred_dist=self.decoder.predict(latent)<line_sep>x_base_dist=Normal(self.x_base_loc self.x_base_scale).to_event(3)<line_sep>preprocess_transform=self._get_preprocess_transforms()<if_stmt>isinstance(x_pred_dist MultivariateNormal)<or>isinstance(x_pred_dist LowRankMultivariateNormal)<block_start>chol_transform=LowerCholeskyAffine(x_pred_dist.loc x_pred_dist.scale_tril)<line_sep>reshape_transform=ReshapeTransform(self.img_shape (np.prod(self.img_shape) ))<line_sep>x_reparam_transform=ComposeTransform([reshape_transform chol_transform reshape_transform.inv])<block_end><elif_stmt>isinstance(x_pred_dist Independent)<block_start>x_pred_dist=x_pred_dist.base_dist<line_sep>x_reparam_transform=AffineTransform(x_pred_dist.loc x_pred_dist.scale 3)<block_end><return>TransformedDistribution(x_base_dist ComposeTransform([x_reparam_transform preprocess_transform]))<block_end>@pyro_method<def_stmt>guide self x age sex ventricle_volume brain_volume<block_start><raise>NotImplementedError()<block_end>@pyro_method<def_stmt>svi_guide self x age sex ventricle_volume brain_volume<block_start>self.guide(x age sex ventricle_volume brain_volume)<block_end>@pyro_method<def_stmt>svi_model self x age sex ventricle_volume brain_volume<block_start><with_stmt>pyro.plate('observations' x.shape[0])<block_start>pyro.condition(self.model data={'x':x 'sex':sex 'age':age 'ventricle_volume':ventricle_volume 'brain_volume':brain_volume})()<block_end><block_end>@pyro_method<def_stmt>infer_z self *args **kwargs<block_start><return>self.guide(*args **kwargs)<block_end>@pyro_method<def_stmt>infer self **obs<block_start>_required_data=('x' 'sex' 'age' 'ventricle_volume' 'brain_volume')<assert_stmt>set(obs.keys())<eq>set(_required_data) 'got: {}'.format(tuple(obs.keys()))<line_sep>z=self.infer_z(**obs)<line_sep>exogeneous=self.infer_exogeneous(z=z **obs)<line_sep>exogeneous['z']=z<line_sep><return>exogeneous<block_end>@pyro_method<def_stmt>reconstruct self x age sex ventricle_volume brain_volume num_particles:int=1<block_start>obs={'x':x 'sex':sex 'age':age 'ventricle_volume':ventricle_volume 'brain_volume':brain_volume}<line_sep>z_dist=pyro.poutine.trace(self.guide).get_trace(**obs).nodes['z']['fn']<line_sep>recons=[]<for_stmt>_ range(num_particles)<block_start>z=pyro.sample('z' z_dist)<line_sep>recon,*_=pyro.poutine.condition(self.sample data={'sex':sex 'age':age 'ventricle_volume':ventricle_volume 'brain_volume':brain_volume 'z':z})(x.shape[0])<line_sep>recons<augadd>[recon]<block_end><return>torch.stack(recons).mean(0)<block_end>@pyro_method<def_stmt>counterfactual self obs:Mapping condition:Mapping=<none> num_particles:int=1<block_start>_required_data=('x' 'sex' 'age' 'ventricle_volume' 'brain_volume')<assert_stmt>set(obs.keys())<eq>set(_required_data) 'got: {}'.format(tuple(obs.keys()))<line_sep>z_dist=pyro.poutine.trace(self.guide).get_trace(**obs).nodes['z']['fn']<line_sep>counterfactuals=[]<for_stmt>_ range(num_particles)<block_start>z=pyro.sample('z' z_dist)<line_sep>exogeneous=self.infer_exogeneous(z=z **obs)<line_sep>exogeneous['z']=z<line_sep># condition on sex if sex isn't included in 'do' as it's a root node and we don't have the exogeneous noise for it yet...
<if_stmt>'sex'<not><in>condition.keys()<block_start>exogeneous['sex']=obs['sex']<block_end>counter=pyro.poutine.do(pyro.poutine.condition(self.sample_scm data=exogeneous) data=condition)(obs['x'].shape[0])<line_sep>counterfactuals<augadd>[counter]<block_end><return>{k:v<for>k,v zip(('x' 'z' 'sex' 'age' 'ventricle_volume' 'brain_volume') (torch.stack(c).mean(0)<for>c zip(*counterfactuals)))}<block_end>@classmethod<def_stmt>add_arguments cls parser<block_start>parser=super().add_arguments(parser)<line_sep>parser.add_argument('--latent_dim' default=100 type=int help="latent dimension of model (default: %(default)s)")<line_sep>parser.add_argument('--logstd_init' default=-5 type=float help="init of logstd (default: %(default)s)")<line_sep>parser.add_argument('--enc_filters' default='16,24,32,64,128' type=str help="number of filters to use (default: %(default)s)")<line_sep>parser.add_argument('--dec_filters' default='128,64,32,24,16' type=str help="number of filters to use (default: %(default)s)")<line_sep>parser.add_argument('--num_convolutions' default=3 type=int help="number of convolutions to build model (default: %(default)s)")<line_sep>parser.add_argument('--use_upconv' default=<false> action='store_true' help="toogle upconv (default: %(default)s)")<line_sep>parser.add_argument('--decoder_type' default='fixed_var' help="var type (default: %(default)s)" choices=['fixed_var' 'learned_var' 'independent_gaussian' 'sharedvar_multivariate_gaussian' 'multivariate_gaussian' 'sharedvar_lowrank_multivariate_gaussian' 'lowrank_multivariate_gaussian'])<line_sep>parser.add_argument('--decoder_cov_rank' default=10 type=int help="rank for lowrank cov approximation (requires lowrank decoder) (default: %(default)s)")# noqa: E501
<return>parser<block_end><block_end><class_stmt>SVIExperiment(BaseCovariateExperiment)<block_start><def_stmt>__init__ self hparams pyro_model:BaseSEM<block_start>super().__init__(hparams pyro_model)<line_sep>self.svi_loss=CustomELBO(num_particles=hparams.num_svi_particles)<line_sep>self._build_svi()<block_end><def_stmt>_build_svi self loss=<none><block_start><def_stmt>per_param_callable module_name param_name<block_start>params={'eps':1e-5 'amsgrad':self.hparams.use_amsgrad 'weight_decay':self.hparams.l2}<if_stmt>'flow_components'<in>module_name<or>'sex_logits'<in>param_name<block_start>params['lr']=self.hparams.pgm_lr<block_end><else_stmt><block_start>params['lr']=self.hparams.lr<block_end>print(f'building opt for {module_name} - {param_name} with p: {params}')<line_sep><return>params<block_end><if_stmt>loss<is><none><block_start>loss=self.svi_loss<block_end><if_stmt>self.hparams.use_cf_guide<block_start><def_stmt>guide *args **kwargs<block_start><return>self.pyro_model.counterfactual_guide(*args **kwargs counterfactual_type=self.hparams.cf_elbo_type)<block_end>self.svi=SVI(self.pyro_model.svi_model guide Adam(per_param_callable) loss)<block_end><else_stmt><block_start>self.svi=SVI(self.pyro_model.svi_model self.pyro_model.svi_guide Adam(per_param_callable) loss)<block_end>self.svi.loss_class=loss<block_end><def_stmt>backward self *args **kwargs<block_start><pass><block_end># No loss to backpropagate since we're using Pyro's optimisation machinery
<def_stmt>print_trace_updates self batch<block_start><with_stmt>torch.no_grad()<block_start>print('Traces:\n'+('#'<times>10))<line_sep>guide_trace=pyro.poutine.trace(self.pyro_model.svi_guide).get_trace(**batch)<line_sep>model_trace=pyro.poutine.trace(pyro.poutine.replay(self.pyro_model.svi_model trace=guide_trace)).get_trace(**batch)<line_sep>guide_trace=pyro.poutine.util.prune_subsample_sites(guide_trace)<line_sep>model_trace=pyro.poutine.util.prune_subsample_sites(model_trace)<line_sep>model_trace.compute_log_prob()<line_sep>guide_trace.compute_score_parts()<line_sep>print(f'model: {model_trace.nodes.keys()}')<for_stmt>name,site model_trace.nodes.items()<block_start><if_stmt>site["type"]<eq>"sample"<block_start>fn=site['fn']<if_stmt>isinstance(fn Independent)<block_start>fn=fn.base_dist<block_end>print(f'{name}: {fn} - {fn.support}')<line_sep>log_prob_sum=site["log_prob_sum"]<line_sep>is_obs=site["is_observed"]<line_sep>print(f'model - log p({name}) = {log_prob_sum} | obs={is_obs}')<if_stmt>torch.isnan(log_prob_sum)<block_start>value=site['value'][0]<line_sep>conc0=fn.concentration0<line_sep>conc1=fn.concentration1<line_sep>print(f'got:\n{value}\n{conc0}\n{conc1}')<line_sep><raise>Exception()<block_end><block_end><block_end>print(f'guide: {guide_trace.nodes.keys()}')<for_stmt>name,site guide_trace.nodes.items()<block_start><if_stmt>site["type"]<eq>"sample"<block_start>fn=site['fn']<if_stmt>isinstance(fn Independent)<block_start>fn=fn.base_dist<block_end>print(f'{name}: {fn} - {fn.support}')<line_sep>entropy=site["score_parts"].entropy_term.sum()<line_sep>is_obs=site["is_observed"]<line_sep>print(f'guide - log q({name}) = {entropy} | obs={is_obs}')<block_end><block_end><block_end><block_end><def_stmt>get_trace_metrics self batch<block_start>metrics={}<line_sep>model=self.svi.loss_class.trace_storage['model']<line_sep>guide=self.svi.loss_class.trace_storage['guide']<line_sep>metrics['log p(x)']=model.nodes['x']['log_prob'].mean()<line_sep>metrics['log p(age)']=model.nodes['age']['log_prob'].mean()<line_sep>metrics['log p(sex)']=model.nodes['sex']['log_prob'].mean()<line_sep>metrics['log p(ventricle_volume)']=model.nodes['ventricle_volume']['log_prob'].mean()<line_sep>metrics['log p(brain_volume)']=model.nodes['brain_volume']['log_prob'].mean()<line_sep>metrics['p(z)']=model.nodes['z']['log_prob'].mean()<line_sep>metrics['q(z)']=guide.nodes['z']['log_prob'].mean()<line_sep>metrics['log p(z) - log q(z)']=metrics['p(z)']-metrics['q(z)']<line_sep><return>metrics<block_end><def_stmt>prep_batch self batch<block_start>x=batch['image']<times>255.<line_sep>age=batch['age'].unsqueeze(1).float()<line_sep>sex=batch['sex'].unsqueeze(1).float()<line_sep>ventricle_volume=batch['ventricle_volume'].unsqueeze(1).float()<line_sep>brain_volume=batch['brain_volume'].unsqueeze(1).float()<line_sep>x=x.float()<if_stmt>self.training<block_start>x<augadd>torch.rand_like(x)<block_end><return>{'x':x 'age':age 'sex':sex 'ventricle_volume':ventricle_volume 'brain_volume':brain_volume}<block_end><def_stmt>training_step self batch batch_idx<block_start>batch=self.prep_batch(batch)<if_stmt>self.hparams.validate<block_start>print('Validation:')<line_sep>self.print_trace_updates(batch)<block_end>loss=self.svi.step(**batch)<line_sep>metrics=self.get_trace_metrics(batch)<if_stmt>np.isnan(loss)<block_start>self.logger.experiment.add_text('nan' f'nand at {self.current_epoch}:\n{metrics}')<line_sep><raise>ValueError('loss went to nan with metrics:\n{}'.format(metrics))<block_end>tensorboard_logs={('train/'+k):v<for>k,v metrics.items()}<line_sep>tensorboard_logs['train/loss']=loss<line_sep>self.log_dict(tensorboard_logs)<line_sep><return>torch.Tensor([loss])<block_end><def_stmt>validation_step self batch batch_idx<block_start>batch=self.prep_batch(batch)<line_sep>loss=self.svi.evaluate_loss(**batch)<line_sep>metrics=self.get_trace_metrics(batch)<line_sep><return>{'loss':loss **metrics}<block_end><def_stmt>test_step self batch batch_idx<block_start>batch=self.prep_batch(batch)<line_sep>loss=self.svi.evaluate_loss(**batch)<line_sep>metrics=self.get_trace_metrics(batch)<line_sep>samples=self.build_test_samples(batch)<line_sep><return>{'loss':loss **metrics 'samples':samples}<block_end>@classmethod<def_stmt>add_arguments cls parser<block_start>parser=super().add_arguments(parser)<line_sep>parser.add_argument('--num_svi_particles' default=4 type=int help="number of particles to use for ELBO (default: %(default)s)")<line_sep>parser.add_argument('--num_sample_particles' default=32 type=int help="number of particles to use for MC sampling (default: %(default)s)")<line_sep>parser.add_argument('--use_cf_guide' default=<false> action='store_true' help="whether to use counterfactual guide (default: %(default)s)")<line_sep>parser.add_argument('--cf_elbo_type' default=-1 choices=[-1 0 1 2] help="-1: randomly select per batch, 0: shuffle thickness, 1: shuffle intensity, 2: shuffle both (default: %(default)s)")<line_sep><return>parser<block_end><block_end>EXPERIMENT_REGISTRY[SVIExperiment.__name__]=SVIExperiment<line_sep> |
<import_stmt>sys<import_stmt>click<import_from_stmt>prefect config<import_from_stmt>prefect.backend kv_store<import_from_stmt>prefect.backend.kv_store NON_CLOUD_BACKEND_ERROR_MESSAGE<import_from_stmt>prefect.cli.build_register handle_terminal_error TerminalError log_exception <line_sep>@click.group()<def_stmt>kv <block_start>"""
Interact with Prefect Cloud KV Store
\b
Usage:
$ prefect kv [COMMAND]
"""<if_stmt>config.backend<ne>"cloud"<block_start>click.secho(NON_CLOUD_BACKEND_ERROR_MESSAGE fg="red")<line_sep>sys.exit(1)<block_end><block_end>@kv.command(name="set")@click.argument("key")@click.argument("value")@handle_terminal_error<def_stmt>set_command key value<block_start>"""
Set a key value pair, overriding existing values if key exists
\b
Arguments:
key TEXT Key to set
value TEXT Value associated with key to set
"""<try_stmt><block_start>kv_store.set_key_value(key=key value=value)<line_sep>click.secho("Key value pair set successfully" fg="green")<block_end><except_stmt>Exception<as>exc<block_start>log_exception(exc)<line_sep><raise>TerminalError("An error occurred setting the key value pair")<block_end><block_end>@kv.command(name="get")@click.argument("key")@handle_terminal_error<def_stmt>get_command key<block_start>"""
Get the value of a key
\b
Arguments:
key TEXT Key to get
"""<try_stmt><block_start>result=kv_store.get_key_value(key=key)<line_sep>click.secho(f"Key {key!r} has value {result!r}" fg="green")<block_end><except_stmt>Exception<as>exc<block_start>log_exception(exc)<line_sep><raise>TerminalError(f"Error retrieving value for key {key!r}")<block_end><block_end>@kv.command(name="delete")@click.argument("key")@handle_terminal_error<def_stmt>delete_command key<block_start>"""
Delete a key value pair
\b
Arguments:
key TEXT Key to delete
"""<try_stmt><block_start>kv_store.delete_key(key=key)<line_sep>click.secho(f"Key {key!r} has been deleted" fg="green")<block_end><except_stmt>Exception<as>exc<block_start>log_exception(exc)<line_sep><raise>TerminalError("An error occurred deleting the key")<block_end><block_end>@kv.command(name="list")@handle_terminal_error<def_stmt>list_command <block_start>"""
List all key value pairs
"""<try_stmt><block_start>result=kv_store.list_keys()<if_stmt>result<block_start>click.secho("\n".join(result) fg="green")<block_end><else_stmt><block_start>click.secho("No keys found" fg="yellow")<block_end><block_end><except_stmt>Exception<as>exc<block_start>log_exception(exc)<line_sep><raise>TerminalError("An error occurred when listing keys")<block_end><block_end> |
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>.mot MotTrainer<line_sep>train_factory={'mot':MotTrainer }<line_sep> |
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>sys<import_stmt>os<import_stmt>os.path<as>osp<import_stmt>argparse<import_stmt>time<import_stmt>numpy<as>np<import_from_stmt>tqdm tqdm<import_stmt>json<import_stmt>torch<import_stmt>torch.backends.cudnn<as>cudnn<import_stmt>cv2<import_stmt>_init_paths<import_from_stmt>_init_paths get_path<import_from_stmt>utils.utilitys plot_keypoint PreProcess write load_json<import_from_stmt>config cfg update_config<import_from_stmt>utils.transforms *<import_from_stmt>utils.inference get_final_preds<import_stmt>models<line_sep>sys.path.pop(0)<line_sep>pre_dir,cur_dir,chk_root,data_root,lib_root,output_root=get_path(__file__)<line_sep>cfg_dir=pre_dir+'/experiments/coco/hrnet/'<line_sep>model_dir=chk_root+'hrnet/pose_coco/'<line_sep># Loading human detector model
sys.path.insert(0 lib_root)<import_from_stmt>detector load_model<as>yolo_model<import_from_stmt>detector yolo_human_det<as>yolo_det<import_from_stmt>track.sort Sort<line_sep>sys.path.pop(0)<def_stmt>parse_args <block_start>parser=argparse.ArgumentParser(description='Train keypoints network')<line_sep># general
parser.add_argument('--cfg' type=str default=cfg_dir+'w48_384x288_adam_lr1e-3.yaml' help='experiment configure file name')<line_sep>parser.add_argument('opts' nargs=argparse.REMAINDER default=<none> help="Modify config options using the command-line")<line_sep>parser.add_argument('--modelDir' type=str default=model_dir+'pose_hrnet_w48_384x288.pth' help='The model directory')<line_sep>parser.add_argument('--det-dim' type=int default=416 help='The input dimension of the detected image')<line_sep>parser.add_argument('--thred-score' type=float default=0.70 help='The threshold of object Confidence')<line_sep>parser.add_argument('-a' '--animation' action='store_true' help='output animation')<line_sep>parser.add_argument('-np' '--num-person' type=int default=1 help='The maximum number of estimated poses')<line_sep>parser.add_argument("-v" "--video" type=str default='camera' help="input video file name")<line_sep>args=parser.parse_args()<line_sep><return>args<block_end><def_stmt>reset_config args<block_start>update_config(cfg args)<line_sep># cudnn related setting
cudnn.benchmark=cfg.CUDNN.BENCHMARK<line_sep>torch.backends.cudnn.deterministic=cfg.CUDNN.DETERMINISTIC<line_sep>torch.backends.cudnn.enabled=cfg.CUDNN.ENABLED<block_end># load model
<def_stmt>model_load config<block_start>print('Loading HRNet model ...')<line_sep># lib/models/pose_hrnet.py:get_pose_net
model=eval('models.'+config.MODEL.NAME+'.get_pose_net')(config is_train=<false>)<if_stmt>torch.cuda.is_available()<block_start>model=model.cuda()<block_end>state_dict=torch.load(config.OUTPUT_DIR)<import_from_stmt>collections OrderedDict<line_sep>new_state_dict=OrderedDict()<for_stmt>k,v state_dict.items()<block_start>name=k# remove module.
# print(name,'\t')
new_state_dict[name]=v<block_end>model.load_state_dict(new_state_dict)<line_sep>model.eval()<line_sep>print('HRNet network successfully loaded')<line_sep><return>model<block_end><def_stmt>load_default_model <block_start>args=parse_args()<line_sep>reset_config(args)<line_sep>print('Loading HRNet model ...')<line_sep># lib/models/pose_hrnet.py:get_pose_net
model=eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(cfg is_train=<false>)<if_stmt>torch.cuda.is_available()<block_start>model=model.cuda()<block_end>state_dict=torch.load(cfg.OUTPUT_DIR)<import_from_stmt>collections OrderedDict<line_sep>new_state_dict=OrderedDict()<for_stmt>k,v state_dict.items()<block_start>name=k# remove module.
# print(name,'\t')
new_state_dict[name]=v<block_end>model.load_state_dict(new_state_dict)<line_sep>model.eval()<line_sep>print('HRNet network successfully loaded')<line_sep><return>model<block_end><def_stmt>gen_img_kpts image human_model pose_model human_sort det_dim=416 num_peroson=2<block_start>"""
:param image: Input image matrix instead of image path
:param human_model: The YOLOv3 model
:param pose_model: The HRNet model
:param human_sort: Input initialized sort tracker
:param det_dim: The input dimension of YOLOv3. [160, 320, 416]
:param num_peroson: The number of tracked people
:return:
kpts: (M, N, 2)
scores: (M, N, 1)
bboxs_track: (x1, y1, x2, y2, ID)
human_sort: Updated human_sort
"""<line_sep>args=parse_args()<line_sep>reset_config(args)<line_sep>thred_score=args.thred_score<line_sep>bboxs,bbox_scores=yolo_det(image human_model reso=det_dim confidence=thred_score)<if_stmt>bboxs<is><none><or><not>bboxs.any()<block_start><return><none> <none> <none><block_end># Using Sort to track people
# people_track: Num_bbox × [x1, y1, x2, y2, ID]
people_track=human_sort.update(bboxs)<line_sep># Track the first two people in the video and remove the ID
<if_stmt>people_track.shape[0]<eq>1<block_start>bboxs_track=people_track[-1].reshape(1 5)<block_end><else_stmt><block_start>people_track_=people_track[-num_peroson:].reshape(num_peroson 5)<line_sep>bboxs_track=people_track_[::-1]<block_end><with_stmt>torch.no_grad()# bbox is coordinate location
<block_start>inputs,origin_img,center,scale=PreProcess(image bboxs_track cfg num_peroson)<line_sep>inputs=inputs[: [2 1 0]]<if_stmt>torch.cuda.is_available()<block_start>inputs=inputs.cuda()<block_end>output=pose_model(inputs)<line_sep># compute coordinate
preds,maxvals=get_final_preds(cfg output.clone().cpu().numpy() np.asarray(center) np.asarray(scale))<line_sep>kpts=np.zeros((num_peroson 17 2) dtype=np.float32)<line_sep>scores=np.zeros((num_peroson 17 1) dtype=np.float32)<for_stmt>i,kpt enumerate(preds)<block_start>kpts[i]=kpt<block_end><for_stmt>i,score enumerate(maxvals)<block_start>scores[i]=score<block_end><block_end>human_indexes=[]<for_stmt>i range(len(bboxs_track))<block_start>human_indexes.append(bboxs_track[i -1])<block_end><return>kpts scores human_indexes<block_end><def_stmt>gen_video_kpts video det_dim=416 num_peroson=1 gen_output=<false># Updating configuration
<block_start>args=parse_args()<line_sep>reset_config(args)<line_sep>cap=cv2.VideoCapture(video)<assert_stmt>cap.isOpened() 'Cannot capture source'<line_sep># Loading detector and pose model, initialize sort for track
human_model=yolo_model(inp_dim=det_dim)<line_sep>pose_model=model_load(cfg)<line_sep>people_sort=Sort()<line_sep>video_length=int(cap.get(cv2.CAP_PROP_FRAME_COUNT))<line_sep># video_length = 1000
# collect keypoints coordinate
print('Generating 2D pose ...')<line_sep>kpts_result=[]<line_sep>scores_result=[]<for_stmt>i tqdm(range(video_length))<block_start>ret,frame=cap.read()<if_stmt><not>ret<block_start><continue><block_end># start = time.time()
<try_stmt><block_start>bboxs,scores=yolo_det(frame human_model reso=det_dim confidence=args.thred_score)<if_stmt>bboxs<is><none><or><not>bboxs.any()<block_start>print('No person detected!')<line_sep># print('FPS of the video is {:5.2f}'.format(1 / (time.time() - start)))
<continue><block_end># Using Sort to track people
people_track=people_sort.update(bboxs)<line_sep># Track the first two people in the video and remove the ID
<if_stmt>people_track.shape[0]<eq>1<block_start>people_track_=people_track[-1 :-1].reshape(1 4)<block_end><elif_stmt>people_track.shape[0]<ge>2<block_start>people_track_=people_track[-num_peroson: :-1].reshape(num_peroson 4)<line_sep>people_track_=people_track_[::-1]<block_end><else_stmt><block_start><continue><block_end>track_bboxs=[]<for_stmt>bbox people_track_<block_start>bbox=[round(i 2)<for>i list(bbox)]<line_sep>track_bboxs.append(bbox)<block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep>exit(0)<line_sep><continue><block_end><with_stmt>torch.no_grad()# bbox is coordinate location
<block_start>inputs,origin_img,center,scale=PreProcess(frame track_bboxs cfg num_peroson)<line_sep>inputs=inputs[: [2 1 0]]<if_stmt>torch.cuda.is_available()<block_start>inputs=inputs.cuda()<block_end>output=pose_model(inputs)<line_sep># compute coordinate
preds,maxvals=get_final_preds(cfg output.clone().cpu().numpy() np.asarray(center) np.asarray(scale))<block_end><if_stmt>gen_output<block_start>kpts=np.zeros((num_peroson 17 2) dtype=np.float32)<line_sep>scores=np.zeros((num_peroson 17) dtype=np.float32)<for_stmt>i,kpt enumerate(preds)<block_start>kpts[i]=kpt<block_end><for_stmt>i,score enumerate(maxvals)<block_start>scores[i]=score.squeeze()<block_end>kpts_result.append(kpts)<line_sep>scores_result.append(scores)<block_end><else_stmt><block_start>index_bboxs=[bbox+[i]<for>i,bbox enumerate(track_bboxs)]<line_sep>list(map(<lambda>x:write(x frame) index_bboxs))<line_sep>plot_keypoint(frame preds maxvals 0.3)<line_sep># print('FPS of the video is {:5.2f}'.format(1 / (time.time() - start)))
cv2.imshow('frame' frame)<line_sep>key=cv2.waitKey(1)<if_stmt>key&0xFF<eq>ord('q')<block_start><break><block_end><block_end><block_end><if_stmt>gen_output<block_start>keypoints=np.array(kpts_result)<line_sep>scores=np.array(scores_result)<line_sep>keypoints=keypoints.transpose(1 0 2 3)# (T, M, N, 2) --> (M, T, N, 2)
scores=scores.transpose(1 0 2)# (T, M, N) --> (M, T, N)
<return>keypoints scores<block_end><block_end><def_stmt>generate_ntu_kpts_json video_path kpts_file<block_start>args=parse_args()<line_sep>reset_config(args)<line_sep># Loading detector and pose model, initialize sort for track
human_model=yolo_model()<line_sep>pose_model=model_load(cfg)<line_sep>people_sort=Sort()<with_stmt>torch.no_grad()<block_start>cap=cv2.VideoCapture(video_path)<line_sep>video_length=int(cap.get(cv2.CAP_PROP_FRAME_COUNT))<line_sep># collect keypoints information
kpts_info=dict()<line_sep>data=[]<for_stmt>i tqdm(range(video_length))<block_start>frame_info={'frame_index':i+1}<line_sep>ret,frame=cap.read()<try_stmt><block_start>bboxs,scores=yolo_det(frame human_model confidence=args.thred_score)<if_stmt>bboxs<is><none><or><not>bboxs.any()<block_start>print('No person detected!')<line_sep><continue><block_end># Using Sort to track people
people_track=people_sort.update(bboxs)<line_sep># Track the first two people in the video and remove the ID
<if_stmt>people_track.shape[0]<eq>1<block_start>people_track_=people_track[-1 :-1].reshape(1 4)<block_end><elif_stmt>people_track.shape[0]<ge>2<block_start>people_track_=people_track[-2: :-1].reshape(2 4)<line_sep>people_track_=people_track_[::-1]<block_end><else_stmt><block_start>skeleton={'skeleton':[{'pose':[] 'score':[] 'bbox':[]}]}<line_sep>frame_info.update(skeleton)<line_sep>data.append(frame_info)<line_sep><continue><block_end>track_bboxs=[]<for_stmt>bbox people_track_<block_start>bbox=[round(i 3)<for>i list(bbox)]<line_sep>track_bboxs.append(bbox)<block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep><continue><block_end># bbox is coordinate location
inputs,origin_img,center,scale=PreProcess(frame bboxs cfg args.num_person)<line_sep>inputs=inputs[: [2 1 0]]<if_stmt>torch.cuda.is_available()<block_start>inputs=inputs.cuda()<block_end>output=pose_model(inputs.cuda())<line_sep># compute coordinate
preds,maxvals=get_final_preds(cfg output.clone().cpu().numpy() np.asarray(center) np.asarray(scale))<line_sep>skeleton=[]<for_stmt>num,bbox enumerate(track_bboxs)<block_start>pose=preds[num].tolist()<line_sep>score=maxvals[num].tolist()<line_sep>pose=round_list(pose)<line_sep>score=round_list(score)<line_sep>one_skeleton={'pose':pose 'score':score 'bbox':bbox}<line_sep>skeleton.append(one_skeleton)<block_end>frame_info.update({'skeleton':skeleton})<line_sep>data.append(frame_info)<block_end>kpts_info.update({'data':data})<with_stmt>open(kpts_file 'w')<as>fw<block_start>json.dump(kpts_info fw)<block_end><block_end>print('Finishing!')<block_end><def_stmt>round_list input_list decimals=3<block_start>dim=len(input_list)<for_stmt>i range(dim)<block_start><for_stmt>j range(len(input_list[i]))<block_start>input_list[i][j]=round(input_list[i][j] decimals)<block_end><block_end><return>input_list<block_end> |
<import_from_stmt>octopus.platforms.BTC.explorer BitcoinExplorerRPC<import_from_stmt>octopus.platforms.BTC.explorer RPC_USER RPC_PASSWORD RPC_HOST<import_stmt>unittest<class_stmt>BitcoinExplorerTestCase(unittest.TestCase)<block_start>explorer=BitcoinExplorerRPC(host=('%s:%s@%s'%(RPC_USER RPC_PASSWORD RPC_HOST)))<line_sep>blockhash='00000000000000000024fb37364cbf81fd49cc2d51c09c75c35433c3a1945d04'<line_sep>txid='1b5bfc2681d40c872126919ccb1752de4cca42dcfc594899f2ef11db4b05bb39'<line_sep>tx_raw='0200000001686b654b40737f0daa1532f64e525dc925e60d075403d38cfb12ac9097764015040000006a473044022009ec3f26984906a813faae05d968ec06bf1c68883e09a00b6333126ea87d96b302201cf1d2b9165442aa178fdf772a3909c3d2ba69e454eb8660fa35df8645e3bcb60121022f2caec3ad2f3b174d048a0d46f4f6e8ba4e9d02f6bdbba64ac6817f7ac6c131ffffffff02060d0700000000001976a91407c5acae3abc91735a1471e275e33abbffada89088ac00581300000000001976a91432f2e30111e1dc45f415430ef082cb64225c538a88ac00000000'<line_sep>wallet_address='15wDxrRCn7YiCXdvqjcih6G8svrmq5AQSS'<line_sep>script_hex="76a82096b3fe1f4ec8fd076379267f72443bed81cc49c18a2913f7e1f0727f6f9f4fbf88ac"<line_sep>script_asm='OP_DUP OP_SHA256 96b3fe1f4ec8fd076379267f72443bed81cc49c18a2913f7e1f0727f6f9f4fbf OP_EQUALVERIFY OP_CHECKSIG'<def_stmt>testRPCCommand self#######################
# HIGHT-LEVEL METHODS #
#######################
<block_start>self.assertEqual(self.explorer.get_transaction(self.txid 0) self.tx_raw)<line_sep>self.assertEqual(len(self.explorer.get_block_by_hash(self.blockhash)) 18)<line_sep>self.assertEqual(len(self.explorer.get_block_by_number(500000)) 18)<line_sep>####################
# JSON-RPC METHODS #
####################
self.assertEqual(self.explorer.decoderawtransaction(self.tx_raw)['txid'] self.txid)<line_sep>self.assertEqual(self.explorer.decodescript(self.script_hex)['asm'] self.script_asm)<line_sep>self.assertEqual(len(self.explorer.getbestblockhash()) len(self.blockhash))<line_sep>self.assertEqual(len(self.explorer.getblock(self.blockhash)) 18)<line_sep>self.assertEqual(len(self.explorer.getblockchaininfo()) 11)<line_sep>self.assertEqual(type(self.explorer.getblockcount()) int)<line_sep>self.assertEqual(self.explorer.getblockhash(500000) self.blockhash)<line_sep># self.assertEqual(len(self.explorer.getchaintips()), 2)
self.assertEqual(type(self.explorer.getconnectioncount()) int)<line_sep>self.assertEqual(type(self.explorer.getdifficulty()) float)<line_sep>self.assertEqual(len(self.explorer.getinfo()) 16)<line_sep>self.assertEqual(len(self.explorer.getmempoolinfo()) 5)<line_sep>self.assertEqual(len(self.explorer.getmininginfo()) 8)<line_sep>self.assertEqual(len(self.explorer.getnettotals()) 4)<line_sep>self.assertEqual(type(self.explorer.getnetworkhashps()) float)<line_sep>self.assertEqual(len(self.explorer.getnetworkinfo()) 13)<line_sep>self.assertEqual(len(self.explorer.getpeerinfo()) 8)<line_sep>self.assertEqual(type(self.explorer.getrawmempool()) list)<line_sep>self.assertEqual(self.explorer.getrawtransaction(self.txid) self.tx_raw)<line_sep>self.assertEqual(type(self.explorer.getreceivedbyaccount('')) float)<line_sep>self.assertEqual(type(self.explorer.getreceivedbyaddress(self.wallet_address)) float)<line_sep>self.assertEqual(len(self.explorer.gettxout(self.txid 0)) 5)<line_sep>self.assertEqual(len(self.explorer.gettxoutproof([self.txid])) 818)<line_sep>self.assertEqual(type(self.explorer.getunconfirmedbalance()) float)<line_sep>self.assertEqual(len(self.explorer.getwalletinfo()) 9)<line_sep>self.assertEqual(type(self.explorer.help()) str)<line_sep>self.assertEqual(len(self.explorer.validateaddress(self.wallet_address)) 6)<line_sep>self.assertEqual(self.explorer.verifytxoutproof(self.explorer.gettxoutproof([self.txid])) [self.txid])<line_sep># Not tested
'''
self.explorer.abandontransaction()
self.explorer.addmultisigaddress()
self.explorer.addnode()
self.explorer.createmultisig()
self.explorer.createrawtransaction()
self.explorer.dumpprivkey()
self.explorer.encryptwallet()
self.explorer.estimatefee()
self.explorer.estimatepriority()
self.explorer.getaccountaddress()
self.explorer.getaccount()
self.explorer.getaddednodeinfo()
self.explorer.getaddressesbyaccount()
self.explorer.getbalance()
self.explorer.gettransaction()
self.explorer.keypoolrefill()
self.explorer.listaccounts()
self.explorer.listaddressgroupings()
self.explorer.listlockunspent()
self.explorer.listreceivedbyaccount()
self.explorer.listreceivedbyaddress()
self.explorer.listtransactions()
self.explorer.listunspent()
self.explorer.lockunspent()
self.explorer.prioritisetransaction()
self.explorer.sendfrom()
self.explorer.sendmany()
self.explorer.sendrawtransaction()
self.explorer.sendtoaddress()
self.explorer.settxfee()
self.explorer.signmessage()
self.explorer.signrawtransaction()
self.explorer.submitblock()
self.explorer.verifymessage()
self.explorer.walletlock()
self.explorer.walletpassphrase()
self.explorer.walletpassphrasechange()
'''<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>suite=unittest.TestLoader().loadTestsFromTestCase(BitcoinExplorerTestCase)<line_sep>unittest.TextTestRunner(verbosity=2).run(suite)<block_end> |
<import_stmt>unittest<import_from_stmt>troposphere Parameter Ref<class_stmt>TestInitArguments(unittest.TestCase)<block_start><def_stmt>test_title_max_length self<block_start>title="i"<times>256<with_stmt>self.assertRaises(ValueError)<block_start>Parameter(title Type="String")<block_end><block_end><def_stmt>test_ref_can_be_requested self<block_start>param=Parameter("title" Type="String")<line_sep>reference=param.ref()<line_sep>self.assertIsInstance(reference Ref)<line_sep>self.assertDictEqual(reference.data {"Ref":"title"})<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>.abstract_graph AbstractGraph<import_from_stmt>.graph Graph Edgeless DoubledGraph disjoint_union<import_from_stmt>.lattice Lattice<import_from_stmt>.common_lattices Grid Hypercube Cube Square Chain BCC FCC Diamond Pyrochlore Triangular Honeycomb Kagome <import_from_stmt>netket.utils _hide_submodules<line_sep>_hide_submodules(__name__)<line_sep> |
'''
provide a simple python3 interface to the gsl_fft_real_transform function
'''<import_stmt>sys<import_stmt>itertools<import_from_stmt>gsl_setup *<def_stmt>grouper n iterable fillvalue=<none># http://docs.python.org/dev/3.0/library/itertools.html#module-itertools
<block_start>"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"<line_sep>args=[iter(iterable)]<times>n<line_sep><return>itertools.zip_longest(fillvalue=fillvalue *args)<block_end>real_workspace_alloc=setup(gsl.gsl_fft_real_workspace_alloc [c_ulong ] c_void_p)<line_sep>real_wavetable_alloc=setup(gsl.gsl_fft_real_wavetable_alloc [c_ulong ] c_void_p)<line_sep>real_workspace_free=setup(gsl.gsl_fft_real_workspace_free [c_void_p ])<line_sep>real_wavetable_free=setup(gsl.gsl_fft_real_wavetable_free [c_void_p ])<line_sep>real_transform=setup(gsl.gsl_fft_real_transform [c_void_p c_ulong c_ulong c_void_p c_void_p] )<class_stmt>Real_FFT<block_start>'''
returns the complex values of the real transform of the real data.
return value[0] describes the offset,
[1] is amplitude of term for wavelength = data length
etceteras
[-1] amp of wavelength = twice sample distance
'''<def_stmt>__init__ self<block_start>self.n=0<block_end><def_stmt>__call__ self data<block_start><if_stmt>len(data)<l>2<block_start><if_stmt>1<eq>len(data)<block_start><return>data[:]<block_end><return>[]<block_end><if_stmt>len(data)<ne>self.n<block_start>self.__del__()<line_sep>self.n=len(data)<line_sep>size=c_ulong(self.n)<line_sep>self.workspace=real_workspace_alloc(size)<line_sep>self.wavetable=real_wavetable_alloc(size)<block_end>a=array('d' data)# need a copy of the data
real_transform(ADDRESS(a) 1 self.n self.wavetable self.workspace)<line_sep>rv=[complex(a[0]) ]<line_sep>rv.extend(itertools.starmap(complex grouper(2 a[1:] fillvalue=0)))<line_sep><return>rv<block_end><def_stmt>__del__ self<block_start><if_stmt>self.n<block_start><try_stmt><block_start>real_workspace_free(self.workspace)<line_sep>real_wavetable_free(self.wavetable)<block_end><except_stmt>AttributeError<block_start>print('Attribute error while freeing FFT auxiliary storage' file=sys.stderr)<block_end><except_stmt><block_start>print('error freeing FFT auxiliary storage' file=sys.stderr)<block_end><block_end><block_end><def_stmt>produce_frequency self * samples=<none> sample_interval=<none> sample_rate=<none> total_length=<none><block_start>'''
return the frequency grid based on actual sizes (default sample_interval=1).
'''<line_sep>n=samples<or>self.n<if_stmt><not>n<block_start><return>array('d')<block_end>args_specified=3-((<not>sample_interval)+(<not>sample_rate)+(<not>total_length))<if_stmt>1<l>args_specified<block_start><raise>TypeError('specify at most one of [sample_rate, total_length, sample_interval]')<block_end><if_stmt>0<eq>args_specified<block_start>L=n<block_end><elif_stmt>sample_interval<block_start>L=n<times>sample_interval<block_end><elif_stmt>sample_rate<block_start>L=n/sample_rate<block_end><else_stmt><block_start>L=total_length<block_end><return>as_array(waves/L<for>waves range(1+n<floordiv>2))<block_end><def_stmt>produce_period self *args **kwargs<block_start>'''
return the period grid based on actual sizes.
frequency of zero --> period 0. what else to do?
'''<line_sep>f2T=self.produce_frequency(*args **kwargs)<for_stmt>i range(1 len(f2T))<block_start>f2T[i]=1/f2T[i]<block_end><return>f2T<block_end><block_end>real_fft=Real_FFT()<def_stmt>magnitude a<block_start><return>[abs(b)<for>b a]<block_end><def_stmt>phase a<block_start><return>[phase(b)<for>b a]<block_end> |
# import inspect
# import ctypes
<import_from_future_stmt> absolute_import<import_stmt>threading<line_sep># import time
# def _async_raise(tid, exctype):
# '''Raises an exception in the threads with id tid'''
# if not inspect.isclass(exctype):
# raise TypeError("Only types can be raised (not instances)")
# try:
# res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(exctype))
# except AttributeError:
# # To catch: undefined symbol: PyThreadState_SetAsyncExc
# return
# if res == 0:
# raise ValueError("invalid thread id")
# elif res != 1:
# # "if it returns a number greater than one, you're in trouble,
# # and you should call it again with exc=NULL to revert the effect"
# ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), 0)
# raise SystemError("PyThreadState_SetAsyncExc failed")
# class KillThreadException(Exception):
# pass
<class_stmt>KillableThread(threading.Thread)<block_start><pass><line_sep>'''A thread class that supports raising exception in the thread from
another thread.
'''<line_sep># def _get_my_tid(self):
# """determines this (self's) thread id
# CAREFUL : this function is executed in the context of the caller
# thread, to get the identity of the thread represented by this
# instance.
# """
# if not self.isAlive():
# raise threading.ThreadError("the thread is not active")
# return self.ident
# def _raiseExc(self, exctype):
# """Raises the given exception type in the context of this thread.
# If the thread is busy in a system call (time.sleep(),
# socket.accept(), ...), the exception is simply ignored.
# If you are sure that your exception should terminate the thread,
# one way to ensure that it works is:
# t = ThreadWithExc( ... )
# ...
# t.raiseExc( SomeException )
# while t.isAlive():
# time.sleep( 0.1 )
# t.raiseExc( SomeException )
# If the exception is to be caught by the thread, you need a way to
# check that your thread has caught it.
# CAREFUL : this function is executed in the context of the
# caller thread, to raise an excpetion in the context of the
# thread represented by this instance.
# """
# _async_raise(self._get_my_tid(), exctype)
<def_stmt>kill self force_and_wait=<false><block_start><pass><block_end># try:
# self._raiseExc(KillThreadException)
# if force_and_wait:
# time.sleep(0.1)
# while self.isAlive():
# self._raiseExc(KillThreadException)
# time.sleep(0.1)
# except threading.ThreadError:
# pass
# def onKilled(self):
# pass
# def run(self):
# try:
# self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
# except KillThreadException:
# self.onKilled()
<block_end> |
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
<import_stmt>torch<import_stmt>torchvision<import_stmt>torch_mlir<line_sep>resnet18=torchvision.models.resnet18(pretrained=<true>)<line_sep>resnet18.eval()<line_sep>module=torch_mlir.compile(resnet18 torch.ones(1 3 224 224) output_type=torch_mlir.OutputType.TORCH)<line_sep>print("TORCH OutputType\n" module.operation.get_asm(large_elements_limit=10))<line_sep>module=torch_mlir.compile(resnet18 torch.ones(1 3 224 224) output_type=torch_mlir.OutputType.LINALG_ON_TENSORS)<line_sep>print("LINALG_ON_TENSORS OutputType\n" module.operation.get_asm(large_elements_limit=10))<line_sep># TODO: Debug why this is so slow.
module=torch_mlir.compile(resnet18 torch.ones(1 3 224 224) output_type=torch_mlir.OutputType.TOSA)<line_sep>print("TOSA OutputType\n" module.operation.get_asm(large_elements_limit=10))<line_sep> |
"""
==================================
Wolfcamp Example - Single las file
==================================
This example shows the full petrophysical workflow avaiable in PetroPy
for a single wolfcamp las file courtesy of University Lands Texas.
The workflow progresses in these 11 steps
1. Read las file and create a :class:`petropy.Log` object
2. Load tops from a csv file using :meth:`petropy.Log.tops_from_csv`
3. Create a :class:`petropy.LogViewer` show in edit_mode to fix data
4. Define formations for calculations.
5. Calculate fluid properties by
1. Loading parameters via :meth:`petropy.Log.fluid_properties_parameters_from_csv`
2. Calculating over formations via :meth:`petropy.Log.formation_fluid_properties`
6. Calculate mulitmineral properties by
1. Loading parameters via :meth:`petropy.Log.multimineral_parameters_from_csv`
2. Calculating over formations via :meth:`petropy.Log.formation_multimineral_model`
7. Curve summations via :meth:`petropy.Log.summations`
8. Adding pay flags via :meth:`petropy.Log.add_pay_flag`
9. Clustering intervals into Electrofacies via :meth:`petropy.electrofacies`
10. Exporting log statistics via :meth:`petropy.Log.statistics`
11. Saving LogViewer to png and Log to las
To bulk process a folder of las files at once, use the `bulk example`_ .
Downloading the script at the bottom of this webpage will not download the required las
file or PetroPy logo. To download all files, view the `examples folder`_ on GitHub.
.. _bulk example: wolfcamp_bulk.html
.. _examples folder: https://github.com/toddheitmann/PetroPy/tree/master/examples
"""<import_stmt>petropy<as>ptr<line_sep># import pyplot to add logo to figure
<import_stmt>matplotlib.pyplot<as>plt<line_sep>### 1. Read las file
# create a Log object by reading a file path #
las_file_path='42303347740000.las'<line_sep>log=ptr.Log(las_file_path)<line_sep>### 2. load tops ###
tops_file_path='tops.csv'<line_sep>log.tops_from_csv(tops_file_path)<line_sep>### 3. graphically edit log ###
# use manual mode for fixing borehole washout #
# and other changes requiring redrawing data #
# use bulk shift mode to linearly adjust all #
# curve data #
# close both windows to continue program #
viewer=ptr.LogViewer(log top=6950 height=100)<line_sep>viewer.show(edit_mode=<true>)<line_sep># overwrite log variable with updated log #
# from LogViewer edits #
log=viewer.log<line_sep>### 4. define formations ###
f=['WFMPA' 'WFMPB' 'WFMPC']<line_sep>### 5. fluid properties ###
# load fluid properties from a csv file #
# since path is not specified, load default #
# csv file included with petropy #
log.fluid_properties_parameters_from_csv()<line_sep># calculate fluid properties over defined #
# formations with parameter WFMP from #
# previously loaded csv #
log.formation_fluid_properties(f parameter='WFMP')<line_sep>### 6. multimineral model ###
# load multimineral parameters from csv file #
# since path is not specified, load default #
# csv file included with petropy #
log.multimineral_parameters_from_csv()<line_sep># calculate multiminearl model over defined #
# formations with parameter WFMP from #
# previously loaded csv #
log.formation_multimineral_model(f parameter='WFMP')<line_sep>### 7. summations ###
# define curves to calculate cumulative values #
c=['OIP' 'BVH' 'PHIE']<line_sep># calculate cumulative values over formations #
log.summations(f curves=c)<line_sep>### 8. pay flags ###
# define pay flogs as list of tuples for #
# (curve, value) #
flag_1_gtoe=[('PHIE' 0.03)]<line_sep>flag_2_gtoe=[('PAY_FLAG_1' 1) ('BVH' 0.02)]<line_sep>flag_3_gtoe=[('PAY_FLAG_2' 1)]<line_sep>flag_3_ltoe=[('SW' 0.2)]<line_sep># add pay flags over defined formations #
log.add_pay_flag(f greater_than_or_equal=flag_1_gtoe)<line_sep>log.add_pay_flag(f greater_than_or_equal=flag_2_gtoe)<line_sep>log.add_pay_flag(f greater_than_or_equal=flag_3_gtoe less_than_or_equal=flag_3_ltoe)<line_sep>### 9. electrofacies ###
# define curves to use in electofaceis module #
electro_logs=['GR_N' 'RESDEEP_N' 'NPHI_N' 'RHOB_N' 'PE_N']<line_sep># make a list of Log objects as input #
logs=[log]<line_sep># calculate electrofacies for the defined logs#
# over the specified formations #
# finding 6 clusters of electrofacies #
# with RESDEEP_N logarithmically scaled #
logs=ptr.electrofacies(logs f electro_logs 6 log_scale=['RESDEEP_N'])<line_sep># unpack log object from returned list #
log=logs[0]<line_sep>### 10. statistics ###
# define list of curves to find statistics #
stats_curves=['OIP' 'BVH' 'PHIE' 'SW' 'VCLAY' 'TOC']<line_sep># calculate stats over specified formation and#
# save to csv file wfmp_statistics.csv #
# update the line if the well, formation is #
# already included in the csv file #
log.statistics_to_csv('wfmp_statistics.csv' replace=<true> formations=f curves=stats_curves pay_flags=pay_flags facies=facies_curves)<line_sep>### 11. export data ###
# find way to name well, looking for well name#
# or UWI or API #
<if_stmt>len(log.well['WELL'].value)<g>0<block_start>well_name=log.well['WELL'].value<block_end><elif_stmt>len(str(log.well['UWI'].value))<g>0<block_start>well_name=str(log.well['UWI'].value)<block_end><elif_stmt>len(log.well['API'].value)<g>0<block_start>well_name=str(log.well['API'].value)<block_end><else_stmt><block_start>well_name='UNKNOWN'<block_end>well_name=well_name.replace('.' '')<line_sep># scale height of viewer to top and bottom #
# of calculated values #
wfmpa_top=log.tops['WFMPA']<line_sep>wfmpc_base=log.next_formation_depth('WFMPC')<line_sep>top=wfmpa_top<line_sep>height=wfmpc_base-wfmpa_top<line_sep># create LogViewer with the default full_oil #
# template included in petropy #
viewer=ptr.LogViewer(log top=top height=height template_defaults='full_oil')<line_sep># set viewer to 17x11 inches size for use in #
# PowerPoint or printing to larger paper #
viewer.fig.set_size_inches(17 11)<line_sep># add well_name to title of LogViewer #
viewer.fig.suptitle(well_name fontweight='bold' fontsize=30)<line_sep># add logo to top left corner #
logo_im=plt.imread('company_logo.png')<line_sep>logo_ax=viewer.fig.add_axes([0 0.85 0.2 0.2])<line_sep>logo_ax.imshow(logo_im)<line_sep>logo_ax.axis('off')<line_sep># add text to top right corner #
<if_stmt>len(str(log.well['UWI'].value))<g>0<block_start>label='UWI: '+str(log.well['UWI'].value)+'\n'<block_end><elif_stmt>len(log.well['API'].value)<g>0<block_start>label='API: '+str(log.well['API'].value)+'\n'<block_end><else_stmt><block_start>label=''<block_end>label<augadd>'County: Reagan\nCreated By: <NAME>\n'<line_sep>label<augadd>'Creation Date: October 23, 2017'<line_sep>viewer.axes[0].annotate(label xy=(0.99 0.99) xycoords='figure fraction' horizontalalignment='right' verticalalignment='top' fontsize=14)<line_sep># save figure and log #
viewer_file_name=r'%s_processed.png'%well_name<line_sep>las_file_name=r'%s_processed.las'%well_name<line_sep>viewer.fig.savefig(viewer_file_name)<line_sep>viewer.log.write(las_file_name)<line_sep> |
"""Constants for Raspberry Pi Power Supply Checker."""<line_sep>DOMAIN="rpi_power"<line_sep> |
<import_stmt>unittest<import_from_stmt>podman PodmanClient tests<import_from_stmt>podman.domain.manifests ManifestsManager Manifest<class_stmt>ManifestTestCase(unittest.TestCase)<block_start><def_stmt>setUp self<arrow><none><block_start>super().setUp()<line_sep>self.client=PodmanClient(base_url=tests.BASE_SOCK)<block_end><def_stmt>tearDown self<arrow><none><block_start>super().tearDown()<line_sep>self.client.close()<block_end><def_stmt>test_podmanclient self<block_start>manager=self.client.manifests<line_sep>self.assertIsInstance(manager ManifestsManager)<block_end><def_stmt>test_list self<block_start><with_stmt>self.assertRaises(NotImplementedError)<block_start>self.client.manifests.list()<block_end><block_end><def_stmt>test_name self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>manifest=Manifest(attrs={"names":""})<line_sep>_=manifest.name<block_end><with_stmt>self.assertRaises(ValueError)<block_start>manifest=Manifest()<line_sep>_=manifest.name<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
# -*- coding: utf-8 -*-
<import_stmt>hashlib<import_stmt>string<import_stmt>tempfile<import_stmt>hypothesis<import_stmt>hypothesis.strategies<as>st<import_stmt>six<import_from_stmt>verta._protos.public.common CommonService_pb2<import_from_stmt>verta._internal_utils _artifact_utils<import_from_stmt>verta.tracking.entities._deployable_entity _DeployableEntity<class_stmt>TestBuildArtifactStorePath<block_start>@hypothesis.example(artifact_bytes=b"foo" key="my_artifact" ext="pkl" )@hypothesis.given(artifact_bytes=st.binary(min_size=1) key=st.text(st.characters(blacklist_characters=".") min_size=1) ext=st.text(st.characters(blacklist_characters=".") min_size=1) )<def_stmt>test_with_ext self artifact_bytes key ext<block_start>checksum=hashlib.sha256(artifact_bytes).hexdigest()<line_sep>filename=key+"."+ext<line_sep>expected_path=checksum+"/"+filename<line_sep>artifact_path=_DeployableEntity._build_artifact_store_path(artifact_stream=six.BytesIO(artifact_bytes) key=key ext=ext )<assert_stmt>artifact_path<eq>expected_path<block_end>@hypothesis.example(artifact_bytes=b"foo" key="model")@hypothesis.example(artifact_bytes=b"foo" key="model_api.json")@hypothesis.given(artifact_bytes=st.binary(min_size=1) key=st.text(min_size=1) )<def_stmt>test_no_ext self artifact_bytes key<block_start>checksum=hashlib.sha256(artifact_bytes).hexdigest()<line_sep>filename=key<line_sep>expected_path=checksum+"/"+filename<line_sep>artifact_path=_DeployableEntity._build_artifact_store_path(artifact_stream=six.BytesIO(artifact_bytes) key=key )<assert_stmt>artifact_path<eq>expected_path<block_end><block_end><class_stmt>TestCreateArtifactMsg<block_start>@hypothesis.given(artifact_bytes=st.binary(min_size=1) key=st.text(st.characters(blacklist_categories=("Cs" ) # invalid UTF-8
blacklist_characters="." ) min_size=1 ) ext=st.text(st.characters(whitelist_categories=("Lu" "Ll" "Nd") # alphanumeric
) min_size=1 ) artifact_type=st.sampled_from(CommonService_pb2.ArtifactTypeEnum.ArtifactType.values() ) method=st.text(min_size=1) framework=st.text(min_size=1) )<def_stmt>test_with_ext self artifact_bytes key ext artifact_type method framework <block_start><with_stmt>tempfile.NamedTemporaryFile(suffix="."+ext)<as>tempf<block_start>tempf.write(artifact_bytes)<line_sep>tempf.seek(0)<line_sep>artifact_msg=_DeployableEntity._create_artifact_msg(key tempf artifact_type method framework # no explicit extension
)<block_end>checksum=hashlib.sha256(artifact_bytes).hexdigest()<line_sep>artifact_path=checksum+"/"+key+"."+ext<assert_stmt>artifact_msg<eq>CommonService_pb2.Artifact(key=key path=artifact_path path_only=<false> artifact_type=artifact_type filename_extension=ext serialization=method artifact_subtype=framework )<block_end><block_end> |
# Copyright (c) Open-MMLab. All rights reserved.
<import_stmt>torch<import_from_stmt>torch.nn.parallel._functions _get_stream<def_stmt>scatter input devices streams=<none><block_start>"""Scatters tensor across multiple GPUs.
"""<if_stmt>streams<is><none><block_start>streams=[<none>]<times>len(devices)<block_end><if_stmt>isinstance(input list)<block_start>chunk_size=(len(input)-1)<floordiv>len(devices)+1<line_sep>outputs=[scatter(input[i] [devices[i<floordiv>chunk_size]] [streams[i<floordiv>chunk_size]])<for>i range(len(input))]<line_sep><return>outputs<block_end><elif_stmt>isinstance(input torch.Tensor)<block_start>output=input.contiguous()<line_sep># TODO: copy to a pinned buffer first (if copying from CPU)
stream=streams[0]<if>output.numel()<g>0<else><none><with_stmt>torch.cuda.device(devices[0]) torch.cuda.stream(stream)<block_start>output=output.cuda(devices[0] non_blocking=<true>)<block_end><return>output<block_end><else_stmt><block_start><raise>Exception(f'Unknown type {type(input)}.')<block_end><block_end><def_stmt>synchronize_stream output devices streams<block_start><if_stmt>isinstance(output list)<block_start>chunk_size=len(output)<floordiv>len(devices)<for_stmt>i range(len(devices))<block_start><for_stmt>j range(chunk_size)<block_start>synchronize_stream(output[i<times>chunk_size+j] [devices[i]] [streams[i]])<block_end><block_end><block_end><elif_stmt>isinstance(output torch.Tensor)<block_start><if_stmt>output.numel()<ne>0<block_start><with_stmt>torch.cuda.device(devices[0])<block_start>main_stream=torch.cuda.current_stream()<line_sep>main_stream.wait_stream(streams[0])<line_sep>output.record_stream(main_stream)<block_end><block_end><block_end><else_stmt><block_start><raise>Exception(f'Unknown type {type(output)}.')<block_end><block_end><def_stmt>get_input_device input<block_start><if_stmt>isinstance(input list)<block_start><for_stmt>item input<block_start>input_device=get_input_device(item)<if_stmt>input_device<ne>-1<block_start><return>input_device<block_end><block_end><return>-1<block_end><elif_stmt>isinstance(input torch.Tensor)<block_start><return>input.get_device()<if>input.is_cuda<else>-1<block_end><else_stmt><block_start><raise>Exception(f'Unknown type {type(input)}.')<block_end><block_end><class_stmt>Scatter<block_start>@staticmethod<def_stmt>forward target_gpus input<block_start>input_device=get_input_device(input)<line_sep>streams=<none><if_stmt>input_device<eq>-1# Perform CPU to GPU copies in a background stream
<block_start>streams=[_get_stream(device)<for>device target_gpus]<block_end>outputs=scatter(input target_gpus streams)<line_sep># Synchronize with the copy stream
<if_stmt>streams<is><not><none><block_start>synchronize_stream(outputs target_gpus streams)<block_end><return>tuple(outputs)<block_end><block_end> |
<import_from_stmt>anchorecli.cli archives<line_sep> |
<import_stmt>argparse<import_stmt>os<import_stmt>pdb<import_stmt>pickle<import_stmt>tornado.web<import_stmt>tornado.ioloop<import_stmt>tornado.autoreload<import_stmt>logging<import_stmt>json<import_from_stmt>src.biosyn DictionaryDataset BioSyn TextPreprocess <line_sep>logging.basicConfig(filename='.server.log' format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" datefmt="%m/%d/%Y %H:%M:%S" level=logging.INFO)<line_sep>parser=argparse.ArgumentParser(description='BioSyn Demo')<line_sep># Required
parser.add_argument('--model_name_or_path' required=<true> help='Directory for model')<line_sep># Settings
parser.add_argument('--port' type=int default=8888 help='port number')<line_sep>parser.add_argument('--show_predictions' action="store_true")<line_sep>parser.add_argument('--dictionary_path' type=str default=<none> help='dictionary path')<line_sep>parser.add_argument('--use_cuda' action="store_true")<line_sep>args=parser.parse_args()<def_stmt>cache_or_load_dictionary <block_start>dictionary_name=os.path.splitext(os.path.basename(args.dictionary_path))[0]<line_sep>cached_dictionary_path=os.path.join('./tmp' "cached_{}.pk".format(dictionary_name))<line_sep># If exist, load the cached dictionary
<if_stmt>os.path.exists(cached_dictionary_path)<block_start><with_stmt>open(cached_dictionary_path 'rb')<as>fin<block_start>cached_dictionary=pickle.load(fin)<block_end>print("Loaded dictionary from cached file {}".format(cached_dictionary_path))<line_sep>dictionary,dict_sparse_embeds,dict_dense_embeds=(cached_dictionary['dictionary'] cached_dictionary['dict_sparse_embeds'] cached_dictionary['dict_dense_embeds'] )<block_end><else_stmt><block_start>dictionary=DictionaryDataset(dictionary_path=args.dictionary_path).data<line_sep>dictionary_names=dictionary[: 0]<line_sep>dict_sparse_embeds=biosyn.embed_sparse(names=dictionary_names show_progress=<true>)<line_sep>dict_dense_embeds=biosyn.embed_dense(names=dictionary_names show_progress=<true>)<line_sep>cached_dictionary={'dictionary':dictionary 'dict_sparse_embeds':dict_sparse_embeds 'dict_dense_embeds':dict_dense_embeds}<if_stmt><not>os.path.exists('./tmp')<block_start>os.mkdir('./tmp')<block_end><with_stmt>open(cached_dictionary_path 'wb')<as>fin<block_start>pickle.dump(cached_dictionary fin)<block_end>print("Saving dictionary into cached file {}".format(cached_dictionary_path))<block_end><return>dictionary dict_sparse_embeds dict_dense_embeds<block_end><def_stmt>normalize mention# preprocess mention
<block_start>mention=TextPreprocess().run(mention)<line_sep># embed mention
mention_sparse_embeds=biosyn.embed_sparse(names=[mention])<line_sep>mention_dense_embeds=biosyn.embed_dense(names=[mention])<line_sep># calcuate score matrix and get top 1
sparse_score_matrix=biosyn.get_score_matrix(query_embeds=mention_sparse_embeds dict_embeds=dict_sparse_embeds)<line_sep>dense_score_matrix=biosyn.get_score_matrix(query_embeds=mention_dense_embeds dict_embeds=dict_dense_embeds)<line_sep>sparse_weight=biosyn.get_sparse_weight().item()<line_sep>hybrid_score_matrix=sparse_weight<times>sparse_score_matrix+dense_score_matrix<line_sep>hybrid_candidate_idxs=biosyn.retrieve_candidate(score_matrix=hybrid_score_matrix topk=10)<line_sep># get predictions from dictionary
predictions=dictionary[hybrid_candidate_idxs].squeeze(0)<line_sep>output={'predictions':[]}<for_stmt>prediction predictions<block_start>predicted_name=prediction[0]<line_sep>predicted_id=prediction[1]<line_sep>output['predictions'].append({'name':predicted_name 'id':predicted_id})<block_end><return>output<block_end># load biosyn model
biosyn=BioSyn(use_cuda=args.use_cuda max_length=25)<line_sep>biosyn.load_model(model_name_or_path=args.model_name_or_path)<line_sep># cache or load dictionary
dictionary,dict_sparse_embeds,dict_dense_embeds=cache_or_load_dictionary()<class_stmt>MainHandler(tornado.web.RequestHandler)<block_start><def_stmt>get self<block_start>self.render("./template/index.html")<block_end><block_end><class_stmt>NormalizeHandler(tornado.web.RequestHandler)<block_start><def_stmt>get self<block_start>string=self.get_argument('string' '')<line_sep>logging.info('get!{}'.format({'string':string }))<line_sep>self.set_header("Content-Type" "application/json")<line_sep>output=normalize(mention=string)<line_sep>self.write(json.dumps(output))<block_end><block_end><def_stmt>make_app <block_start>settings={'debug':<true>}<line_sep><return>tornado.web.Application([(r"/" MainHandler) (r"/normalize/" NormalizeHandler) (r'/semantic/(.*)' tornado.web.StaticFileHandler {'path':'./semantic'}) (r'/images/(.*)' tornado.web.StaticFileHandler {'path':'./images'}) ] **settings)<block_end><if_stmt>__name__<eq>'__main__'<block_start>logging.info('Starting biosyn server at http://localhost:{}'.format(args.port))<line_sep>app=make_app()<line_sep>app.listen(args.port)<line_sep>tornado.ioloop.IOLoop.current().start()<block_end> |
<import_from_stmt>google.appengine.ext db<class_stmt>PetModel(db.Model)<block_start>"""
"""<line_sep># 'borrowed' from http://code.google.com/appengine/docs/datastore/entitiesandmodels.html
name=db.StringProperty(required=<true>)<line_sep>type=db.StringProperty(required=<true> choices=set(["cat" "dog" "bird"]))<line_sep>birthdate=db.DateProperty()<line_sep>weight_in_pounds=db.IntegerProperty()<line_sep>spayed_or_neutered=db.BooleanProperty()<block_end><class_stmt>PetExpando(db.Expando)<block_start>"""
"""<line_sep>name=db.StringProperty(required=<true>)<line_sep>type=db.StringProperty(required=<true> choices=set(["cat" "dog" "bird"]))<line_sep>birthdate=db.DateProperty()<line_sep>weight_in_pounds=db.IntegerProperty()<line_sep>spayed_or_neutered=db.BooleanProperty()<block_end><class_stmt>ListModel(db.Model)<block_start>"""
"""<line_sep>numbers=db.ListProperty(long)<block_end><class_stmt>GettableModelStub(db.Model)<block_start>"""
"""<line_sep>gets=[]<line_sep>@staticmethod<def_stmt>get *args **kwargs<block_start>GettableModelStub.gets.append([args kwargs])<block_end><block_end><class_stmt>Author(db.Model)<block_start>name=db.StringProperty()<block_end><class_stmt>Novel(db.Model)<block_start>title=db.StringProperty()<line_sep>author=db.ReferenceProperty(Author)<block_end><class_stmt>EmptyModel(db.Model)<block_start>"""
A model that has no properties but also has no entities in the datastore.
"""<block_end> |
<import_from_stmt>tqdm tqdm<import_stmt>requests<import_stmt>cgi<import_stmt>sys<line_sep># the url of file you want to download, passed from command line arguments
url=sys.argv[1]<line_sep># read 1024 bytes every time
buffer_size=1024<line_sep># download the body of response by chunk, not immediately
response=requests.get(url stream=<true>)<line_sep># get the total file size
file_size=int(response.headers.get("Content-Length" 0))<line_sep># get the default filename
default_filename=url.split("/")[-1]<line_sep># get the content disposition header
content_disposition=response.headers.get("Content-Disposition")<if_stmt>content_disposition# parse the header using cgi
<block_start>value,params=cgi.parse_header(content_disposition)<line_sep># extract filename from content disposition
filename=params.get("filename" default_filename)<block_end><else_stmt># if content dispotion is not available, just use default from URL
<block_start>filename=default_filename<block_end># progress bar, changing the unit to bytes instead of iteration (default by tqdm)
progress=tqdm(response.iter_content(buffer_size) f"Downloading {filename}" total=file_size unit="B" unit_scale=<true> unit_divisor=1024)<with_stmt>open(filename "wb")<as>f<block_start><for_stmt>data progress.iterable# write data read to the file
<block_start>f.write(data)<line_sep># update the progress bar manually
progress.update(len(data))<block_end><block_end> |
<import_from_stmt>collections defaultdict<import_from_stmt>math log sqrt<import_stmt>random<class_stmt>TreeNode(object)<block_start><def_stmt>__init__ self state<block_start>self.state=state<line_sep>self.visit_count=0<line_sep>self.result_sum=0<line_sep>self.result_max=float('-inf')<line_sep>self.action_visit_counts=defaultdict(int)<line_sep>self.action_result_sums=defaultdict(float)<line_sep>self.action_result_maxes=defaultdict(<lambda>:float('-inf'))<line_sep>self.amaf_action_visit_counts=defaultdict(int)<line_sep>self.amaf_action_result_sums=defaultdict(float)<line_sep>self.amaf_action_result_maxes=defaultdict(<lambda>:float('-inf'))<line_sep>self.blocked=<false><block_end><block_end><class_stmt>TreeSearch(object)<block_start><def_stmt>__init__ self env max_tries default_policy=<none><block_start>self.env=env<line_sep>self.max_tries=max_tries<line_sep>self.nodes=dict()# Mapping from state keys to nodes
self.nodes[env.get_key(env.initial_state)]=TreeNode(env.initial_state)<line_sep>self.default_policy=default_policy<block_end><def_stmt>uct_score self node action amaf_threshold=10<block_start>action_visit_count=node.action_visit_counts[action]<line_sep>action_result_max=node.action_result_maxes[action]<line_sep>amaf_action_visit_count=node.amaf_action_visit_counts[action]<line_sep>amaf_action_result_max=node.amaf_action_result_maxes[action]<line_sep># AMAF and Monte Carlo values are weighted equally when the visit count is
# amaf_threshold
amaf_weight=sqrt(amaf_threshold/(3<times>node.visit_count+amaf_threshold))<if_stmt>action_visit_count<g>0<block_start><return>((1.0-amaf_weight)<times>action_result_max+amaf_weight<times>amaf_action_result_max+sqrt(2.0<times>log(node.visit_count)/action_visit_count))<block_end><else_stmt><block_start><return>float('inf')<block_end><block_end><def_stmt>select_action self state<block_start>available_actions=list()<line_sep># Filter out actions leading to blocked nodes
<for_stmt>action self.env.get_available_actions(state)<block_start>next_state=self.env.get_next_state(state action)<line_sep>next_state_key=self.env.get_key(next_state)<if_stmt>(next_state_key<not><in>self.nodes<or><not>self.nodes[next_state_key].blocked)<block_start>available_actions.append(action)<block_end><block_end><if_stmt>available_actions<block_start><try_stmt># Follow tree policy
<block_start>node=self.nodes[self.env.get_key(state)]<line_sep><return>max(available_actions key=<lambda>action:self.uct_score(node action))<block_end><except_stmt>KeyError# State was not visited yet, follow default policy
<block_start><if_stmt>self.default_policy<is><not><none><block_start><return>self.default_policy(state available_actions)<block_end><else_stmt><block_start><return>random.choice(available_actions)<block_end><block_end><block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>update_node self node actions_after result<block_start>node.visit_count<augadd>1<line_sep>node.result_sum<augadd>result<line_sep>node.result_max=max(node.result_max result)<line_sep>node.action_visit_counts[actions_after[0]]<augadd>1<line_sep>node.action_result_sums[actions_after[0]]<augadd>result<line_sep>node.action_result_maxes[actions_after[0]]=max(node.action_result_maxes[actions_after[0]] result)<line_sep># Update AMAF values (once for each unique action)
<for_stmt>action set(actions_after)<block_start>node.amaf_action_visit_counts[action]<augadd>1<line_sep>node.amaf_action_result_sums[action]<augadd>result<line_sep>node.amaf_action_result_maxes[action]=max(node.amaf_action_result_maxes[action] result)<block_end><block_end><def_stmt>run_iteration self<block_start>result=<none><while_stmt>result<is><none># Selection phase
<block_start>states=[self.env.initial_state]<line_sep>actions=[]<line_sep>action=self.select_action(states[-1])<while_stmt>action<is><not><none><and>self.env.get_key(states[-1])<in>self.nodes<block_start>states.append(self.env.get_next_state(states[-1] action))<line_sep>actions.append(action)<line_sep>action=self.select_action(states[-1])<block_end># Expansion phase
last_state_key=self.env.get_key(states[-1])<if_stmt>last_state_key<in>self.nodes<block_start>last_node=self.nodes[last_state_key]<block_end><else_stmt><block_start>last_node=TreeNode(states[-1])<line_sep>self.nodes[last_state_key]=last_node<block_end># Simulation phase
<for_stmt>try_count range(self.max_tries)<block_start>sim_states=states.copy()<line_sep>sim_actions=actions.copy()<line_sep>action=self.select_action(sim_states[-1])<while_stmt>action<is><not><none><block_start>sim_states.append(self.env.get_next_state(sim_states[-1] action))<line_sep>sim_actions.append(action)<line_sep>action=self.select_action(sim_states[-1])<block_end>result=self.env.get_result(sim_states[-1])<if_stmt>result<is><not><none># Result is valid
<block_start><break><block_end><block_end><if_stmt>result<is><none># No valid simulation after max_tries tries, block the last node
# Next loop iteration will select a different node
<block_start>last_node.blocked=<true><line_sep>print("Blocked node:" [self.env.rules.index(rule)<for>rule last_node.state[1]])<block_end><block_end># Backpropagation phase
<for_stmt>i,state enumerate(sim_states[:-1])<block_start>actions_after=sim_actions[i:]<try_stmt><block_start>node=self.nodes[self.env.get_key(state)]<line_sep>self.update_node(node actions_after result)<block_end><except_stmt>KeyError<block_start><pass><block_end><block_end><return>sim_states sim_actions result<block_end><block_end> |
<import_stmt>json<def_stmt>add_user_to_blacklist user_id:int<block_start><with_stmt>open("blacklist.json" "r+")<as>file<block_start>file_data=json.load(file)<line_sep>file_data["ids"].append(user_id)<block_end><with_stmt>open("blacklist.json" "w")<as>file<block_start>file.seek(0)<line_sep>json.dump(file_data file indent=4)<block_end><block_end><def_stmt>remove_user_from_blacklist user_id:int<block_start><with_stmt>open("blacklist.json" "r")<as>file<block_start>file_data=json.load(file)<line_sep>file_data["ids"].remove(user_id)<block_end><with_stmt>open("blacklist.json" "w")<as>file<block_start>file.seek(0)<line_sep>json.dump(file_data file indent=4)<block_end><block_end> |
"""Demo oblivious sorting in MPyC, with full secrecy.
Randomly generated secret-shared lists of numbers (integers or fixed-point numbers)
are sorted using MPyC's built-in functions mcp.sorted() and seclist.sort(),
which are the secure counterparts of Python's built-in function sorted() and
list.sort(), respectively.
"""<import_stmt>sys<import_from_stmt>mpyc.runtime mpc<import_from_stmt>mpyc.seclists seclist<async_keyword><def_stmt>main <block_start><if_stmt>sys.argv[1:]<block_start>n=int(sys.argv[1])<block_end><else_stmt><block_start>n=5<line_sep>print('Setting input to default =' n)<block_end>s=[(-1)<power>i<times>(i+n<floordiv>2)<power>2<for>i range(n)]<line_sep>secnum=mpc.SecInt()<line_sep>print('Using secure integers:' secnum)<line_sep>x=list(map(secnum s))<async_keyword><with_stmt>mpc<block_start>mpc.random.shuffle(secnum x)# secret in-place random shuffle
print('Randomly shuffled input:' <await>mpc.output(x))<line_sep>x=mpc.sorted(x key=<lambda>a:a<power>2)# sort on absolute value
print('Sorted by absolute value:' <await>mpc.output(x))<block_end>secnum=mpc.SecFxp()<line_sep>print('Using secure fixed-point numbers:' secnum)<line_sep>x=list(map(secnum s))<async_keyword><with_stmt>mpc<block_start>mpc.random.shuffle(secnum x)# secret in-place random shuffle
print('Randomly shuffled input:' <await>mpc.output(x))<line_sep>x=seclist(x)<line_sep>x.sort(reverse=<true>)# in-place sort in descending order
print('Sorted by descending value:' <await>mpc.output(list(x)))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>mpc.run(main())<block_end> |
<import_stmt>argparse<import_stmt>sys<import_stmt>asyncio<import_stmt>sc2<import_from_stmt>sc2 Race<import_from_stmt>sc2.player Bot<import_from_stmt>zerg.zerg_rush ZergRushBot<def_stmt>main <block_start>portconfig=sc2.portconfig.Portconfig()<line_sep>print(portconfig.as_json)<line_sep>player_config=[Bot(Race.Zerg ZergRushBot()) Bot(Race.Zerg <none>)]<for_stmt>g sc2.main._host_game_iter(sc2.maps.get("Abyssal Reef LE") player_config realtime=<false> portconfig=portconfig)<block_start>print(g)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
# coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for a stream of logs output by a locally running emulator."""<import_stmt>subprocess<import_from_stmt>typing List<import_from_stmt>android_env.components log_stream<line_sep>_LOGCAT_COMMAND=['logcat' '-v' 'epoch']<class_stmt>AdbLogStream(log_stream.LogStream)<block_start>"""Manages adb logcat process for a locally running emulator."""<def_stmt>__init__ self adb_command_prefix:List[str] *args **kwargs<block_start>super().__init__(*args **kwargs)<line_sep>self._adb_command_prefix=adb_command_prefix<block_end><def_stmt>_get_stream_output self<block_start>cmd=self._adb_command_prefix+_LOGCAT_COMMAND+self._filters<line_sep>self._adb_subprocess=subprocess.Popen(cmd stdout=subprocess.PIPE stderr=subprocess.STDOUT bufsize=1 universal_newlines=<true>)<line_sep><return>self._adb_subprocess.stdout<block_end><def_stmt>stop_stream self<block_start>self._adb_subprocess.kill()<block_end><block_end> |
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
<import_from_stmt>django.dispatch receiver<import_from_stmt>django.urls resolve reverse<import_from_stmt>django.utils.translation gettext_lazy<as>_<import_from_stmt>pretix.base.signals order_paid order_placed<import_from_stmt>pretix.control.signals nav_event<line_sep>@receiver(nav_event dispatch_uid="statistics_nav")<def_stmt>control_nav_import sender request=<none> **kwargs<block_start>url=resolve(request.path_info)<if_stmt><not>request.user.has_event_permission(request.organizer request.event 'can_view_orders' request=request)<block_start><return>[]<block_end><return>[{'label':_('Statistics') 'url':reverse('plugins:statistics:index' kwargs={'event':request.event.slug 'organizer':request.event.organizer.slug }) 'parent':reverse('control:event.orders' kwargs={'event':request.event.slug 'organizer':request.event.organizer.slug }) 'active':(url.namespace<eq>'plugins:statistics') 'icon':'bar-chart' }]<block_end><def_stmt>clear_cache sender *args **kwargs<block_start>cache=sender.cache<line_sep>cache.delete('statistics_obd_data')<line_sep>cache.delete('statistics_obp_data')<line_sep>cache.delete('statistics_rev_data')<block_end>order_placed.connect(clear_cache)<line_sep>order_paid.connect(clear_cache)<line_sep> |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
<import_from_future_stmt> absolute_import division print_function unicode_literals <import_stmt>six<import_from_stmt>fnmatch fnmatchcase<import_stmt>sys<import_from_stmt>. Command<import_from_stmt>.. console<import_from_stmt>..console log<import_from_stmt>..results iter_results<import_from_stmt>. util<class_stmt>Rm(Command)<block_start>@classmethod<def_stmt>setup_arguments cls subparsers<block_start>parser=subparsers.add_parser("rm" help="Remove results from the database" description="""
Removes entries from the results database.
""")<line_sep>parser.add_argument('patterns' nargs='+' help="""Pattern(s) to match, each of the form X=Y. X may
be one of "benchmark", "commit_hash", "python" or any of
the machine or environment params. Y is a case-sensitive
glob pattern.""")<line_sep>parser.add_argument("-y" action="store_true" help="""Don't prompt for confirmation.""")<line_sep>parser.set_defaults(func=cls.run_from_args)<line_sep><return>parser<block_end>@classmethod<def_stmt>run_from_conf_args cls conf args<block_start><return>cls.run(conf args.patterns args.y)<block_end>@classmethod<def_stmt>run cls conf patterns y=<true><block_start>global_patterns={}<line_sep>single_benchmark=<none><line_sep>files_to_remove=set()<line_sep>count=0<for_stmt>pattern patterns<block_start>parts=pattern.split('=' 1)<if_stmt>len(parts)<ne>2<block_start><raise>util.UserError("Invalid pattern '{0}'".format(pattern))<block_end><if_stmt>parts[0]<eq>'benchmark'<block_start><if_stmt>single_benchmark<is><not><none><block_start><raise>util.UserError("'benchmark' appears more than once")<block_end>single_benchmark=parts[1]<block_end><else_stmt><block_start><if_stmt>parts[0]<in>global_patterns<block_start><raise>util.UserError("'{0}' appears more than once".format(parts[0]))<block_end>global_patterns[parts[0]]=parts[1]<block_end><block_end><for_stmt>result iter_results(conf.results_dir)<block_start>found=<true><for_stmt>key,val six.iteritems(global_patterns)<block_start><if_stmt>key<eq>'commit_hash'<block_start><if_stmt><not>util.hash_equal(result.commit_hash val)<block_start>found=<false><line_sep><break><block_end><block_end><elif_stmt>key<eq>'python'<block_start><if_stmt><not>fnmatchcase(result.env.python val)<block_start>found=<false><line_sep><break><block_end><block_end><else_stmt><block_start><if_stmt><not>fnmatchcase(result.params.get(key) val)<block_start>found=<false><line_sep><break><block_end><block_end><block_end><if_stmt><not>found<block_start><continue><block_end><if_stmt>single_benchmark<is><not><none><block_start>found=<false><for_stmt>benchmark list(result.get_all_result_keys())<block_start><if_stmt>fnmatchcase(benchmark single_benchmark)<block_start>count<augadd>1<line_sep>files_to_remove.add(result)<line_sep>result.remove_result(benchmark)<block_end><block_end><block_end><else_stmt><block_start>files_to_remove.add(result)<block_end><block_end><if_stmt>single_benchmark<is><not><none><block_start>log.info("Removing {0} benchmarks in {1} files".format(count len(files_to_remove)))<block_end><else_stmt><block_start>log.info("Removing {0} files".format(len(files_to_remove)))<block_end><if_stmt><not>y<block_start>do=console.get_answer_default("Perform operations" "n")<if_stmt>len(do)<and>do.lower()[0]<ne>'y'<block_start>sys.exit(0)<block_end><block_end><if_stmt>single_benchmark<is><not><none><block_start><for_stmt>result files_to_remove<block_start>result.save(conf.results_dir)<block_end><block_end><else_stmt><block_start><for_stmt>result files_to_remove<block_start>result.rm(conf.results_dir)<block_end><block_end><block_end><block_end> |
<import_stmt>ast<import_stmt>json<import_stmt>logging<import_stmt>subprocess<import_from_stmt>collections defaultdict<import_from_stmt>ipaddress ip_address ip_network<import_stmt>ptf<import_stmt>ptf.packet<as>scapy<import_stmt>ptf.dataplane<as>dataplane<import_from_stmt>ptf config<import_from_stmt>ptf.base_tests BaseTest<import_from_stmt>ptf.testutils *<import_from_stmt>ptf.mask Mask<class_stmt>VlanTest(BaseTest)<block_start><def_stmt>__init__ self<block_start>BaseTest.__init__(self)<line_sep>self.test_params=test_params_get()<block_end>#--------------------------------------------------------------------------
<def_stmt>log self message<block_start>logging.info(message)<block_end>#--------------------------------------------------------------------------
<def_stmt>shell self cmds<block_start>sp=subprocess.Popen(cmds shell=<false> stdout=subprocess.PIPE stderr=subprocess.PIPE)<line_sep>stdout,stderr=sp.communicate()<line_sep>rc=sp.returncode<line_sep><return>stdout stderr rc<block_end>#--------------------------------------------------------------------------
<def_stmt>setUp self<block_start>self.vlan_ports_list=ast.literal_eval(self.test_params["vlan_ports_list"])<line_sep>self.vlan_intf_list=ast.literal_eval(self.test_params["vlan_intf_list"])<line_sep>self.router_mac=self.test_params["router_mac"]<for_stmt>vlan_port self.vlan_ports_list<block_start>vlan_port["pvid"]=int(vlan_port["pvid"])<line_sep>vlan_port["port_index"]=int(vlan_port["port_index"])<block_end>self.dataplane=ptf.dataplane_instance<line_sep>self.test_params=test_params_get()<line_sep>self.log("Create VLAN intf")<for_stmt>vlan_port self.vlan_ports_list<block_start><for_stmt>permit_vlanid vlan_port["permit_vlanid"].keys()<block_start><if_stmt>int(permit_vlanid)<ne>vlan_port["pvid"]<block_start>self.shell(["ip" "link" "add" "link" "eth%d"%vlan_port["port_index"] "name" "eth%d.%s"%(vlan_port["port_index"] permit_vlanid) "type" "vlan" "id" str(permit_vlanid)])<line_sep>self.shell(["ip" "link" "set" "eth%d.%s"%(vlan_port["port_index"] permit_vlanid) "up"])<block_end><block_end><block_end>self.setUpArpResponder()<line_sep>self.log("Start arp_responder")<line_sep>self.shell(["supervisorctl" "start" "arp_responder"])<line_sep>logging.info("VLAN test starting ...")<line_sep><pass><block_end>#--------------------------------------------------------------------------
<def_stmt>setUpArpResponder self<block_start>vlan_ports_list=self.vlan_ports_list<line_sep>d=defaultdict(list)<for_stmt>vlan_port vlan_ports_list<block_start><for_stmt>permit_vlanid vlan_port["permit_vlanid"].keys()<block_start><if_stmt>int(permit_vlanid)<eq>vlan_port["pvid"]<block_start>iface="eth%d"%vlan_port["port_index"]<block_end><else_stmt><block_start>iface="eth%d.%s"%(vlan_port["port_index"] permit_vlanid)<block_end>d[iface].append(vlan_port["permit_vlanid"][str(permit_vlanid)]["peer_ip"])<block_end><block_end><with_stmt>open('/tmp/from_t1.json' 'w')<as>file<block_start>json.dump(d file)<block_end><block_end>#--------------------------------------------------------------------------
<def_stmt>tearDown self<block_start>logging.info("VLAN test ending ...")<line_sep>self.log("Stop arp_responder")<line_sep>self.shell(["supervisorctl" "stop" "arp_responder"])<line_sep>self.log("Delete VLAN intf")<for_stmt>vlan_port self.vlan_ports_list<block_start><for_stmt>permit_vlanid vlan_port["permit_vlanid"].keys()<block_start><if_stmt>int(permit_vlanid)<ne>vlan_port["pvid"]<block_start>self.shell(["ip" "link" "delete" "eth%d.%d"%(vlan_port["port_index"] int(permit_vlanid))])<block_end><block_end><block_end><pass><block_end>#--------------------------------------------------------------------------
<def_stmt>build_icmp_packet self vlan_id src_mac="00:22:00:00:00:02" dst_mac="ff:ff:ff:ff:ff:ff" src_ip="192.168.0.1" dst_ip="192.168.0.2" ttl=64<block_start>pkt=simple_icmp_packet(pktlen=100<if>vlan_id<eq>0<else>104 eth_dst=dst_mac eth_src=src_mac dl_vlan_enable=<false><if>vlan_id<eq>0<else><true> vlan_vid=vlan_id vlan_pcp=0 ip_src=src_ip ip_dst=dst_ip ip_ttl=ttl)<line_sep><return>pkt<block_end>#--------------------------------------------------------------------------
<def_stmt>verify_icmp_packets self vlan_port vlan_id<block_start>untagged_dst_ports=[]<line_sep>tagged_dst_ports=[]<line_sep>untagged_pkts=[]<line_sep>tagged_pkts=[]<line_sep>untagged_pkt=self.build_icmp_packet(0)<line_sep>tagged_pkt=self.build_icmp_packet(vlan_id)<for_stmt>port self.vlan_ports_list<block_start><if_stmt>vlan_port["port_index"]<eq>port["port_index"]# Skip src port
<block_start><continue><block_end><if_stmt>port["pvid"]<eq>vlan_id<block_start>untagged_dst_ports.append(port["port_index"])<line_sep>untagged_pkts.append(untagged_pkt)<block_end><elif_stmt>vlan_id<in>map(int port["permit_vlanid"].keys())<block_start>tagged_dst_ports.append(port["port_index"])<line_sep>tagged_pkts.append(tagged_pkt)<block_end><block_end>self.log("Verify untagged packets from ports "+str(untagged_dst_ports)+" tagged packets from ports "+str(tagged_dst_ports))<line_sep>verify_each_packet_on_each_port(self untagged_pkts+tagged_pkts untagged_dst_ports+tagged_dst_ports)<block_end>#--------------------------------------------------------------------------
<def_stmt>verify_icmp_packets_from_specified_port self port_id vlan_id src_mac dst_mac src_ip dst_ip ttl<block_start>self.log("Verify packet from port "+str(port_id))<line_sep>pkt=self.build_icmp_packet(vlan_id src_mac dst_mac src_ip dst_ip ttl)<line_sep>verify_packet(self pkt port_id)<block_end>#--------------------------------------------------------------------------
<def_stmt>runTest self<block_start>vlan_ports_list=self.vlan_ports_list<line_sep>vlan_intf_list=self.vlan_intf_list<line_sep># Test case #1
self.log("Test case #1 starting ...")<line_sep># Send untagged packets from each port.
# Verify packets egress without tag from ports whose PVID same with ingress port
# Verify packets egress with tag from ports who include VLAN ID but PVID different from ingress port.
<for_stmt>vlan_port vlan_ports_list<block_start>pkt=self.build_icmp_packet(0)<line_sep>self.log("Send untagged packet from {} ...".format(str(vlan_port["port_index"])))<line_sep>self.log(pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%"))<line_sep>send(self vlan_port["port_index"] pkt)<line_sep>self.verify_icmp_packets(vlan_port vlan_port["pvid"])<block_end># Test case #2
self.log("Test case #2 starting ...")<line_sep># Send tagged packets from each port.
# Verify packets egress without tag from ports whose PVID same with ingress port
# Verify packets egress with tag from ports who include VLAN ID but PVID different from ingress port.
<for_stmt>vlan_port vlan_ports_list<block_start><for_stmt>permit_vlanid map(int vlan_port["permit_vlanid"].keys())<block_start>pkt=self.build_icmp_packet(permit_vlanid)<line_sep>self.log("Send tagged({}) packet from {} ...".format(permit_vlanid str(vlan_port["port_index"])))<line_sep>self.log(pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%"))<line_sep>send(self vlan_port["port_index"] pkt)<line_sep>self.verify_icmp_packets(vlan_port permit_vlanid)<block_end><block_end># Test case #3
# Send packets with invalid VLAN ID
# Verify no port can receive these pacekts
self.log("Test case #3 starting ...")<line_sep>invalid_tagged_pkt=self.build_icmp_packet(4095)<line_sep>masked_invalid_tagged_pkt=Mask(invalid_tagged_pkt)<line_sep>masked_invalid_tagged_pkt.set_do_not_care_scapy(scapy.Dot1Q "vlan")<for_stmt>vlan_port vlan_ports_list<block_start>src_port=vlan_port["port_index"]<line_sep>dst_ports=[port["port_index"]<for>port vlan_ports_list<if>port<ne>vlan_port]<line_sep>self.log("Send invalid tagged packet "+" from "+str(src_port)+"...")<line_sep>self.log(invalid_tagged_pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%"))<line_sep>send(self src_port invalid_tagged_pkt)<line_sep>self.log("Check on "+str(dst_ports)+"...")<line_sep>verify_no_packet_any(self masked_invalid_tagged_pkt dst_ports)<block_end># Test case #4
# Send packets over VLAN interfaces.
# Verify packets can be receive on the egress port.
self.log("Test case #4 starting ...")<line_sep>target_list=[]<for_stmt>vlan_port vlan_ports_list<block_start><for_stmt>vlan_id vlan_port["permit_vlanid"].keys()<block_start>item={"vlan_id":int(vlan_id) "port_index":vlan_port["port_index"] "peer_ip":vlan_port["permit_vlanid"][vlan_id]["peer_ip"] "remote_ip":vlan_port["permit_vlanid"][vlan_id]["remote_ip"] "pvid":vlan_port["pvid"]}<line_sep>target_list.append(item)<block_end><block_end><for_stmt>vlan_port vlan_ports_list<block_start>src_port=vlan_port["port_index"]<line_sep>src_mac=self.dataplane.get_mac(0 src_port)<line_sep>dst_mac=self.router_mac<for_stmt>vlan_id map(int vlan_port["permit_vlanid"].keys())# Test for for directly-connected routing
<block_start>src_ip=vlan_port["permit_vlanid"][str(vlan_id)]["peer_ip"]<for_stmt>target target_list<block_start><if_stmt>vlan_id<eq>target["vlan_id"]# Skip same VLAN forwarding
<block_start><continue><block_end>pkt=self.build_icmp_packet(vlan_id<if>vlan_id<ne>vlan_port["pvid"]<else>0 src_mac dst_mac src_ip target["peer_ip"])<line_sep>send(self src_port pkt)<line_sep>self.log("Send {} packet from {} ...".format("untagged"<if>vlan_id<eq>0<else>"tagged(%d)"%vlan_id src_port))<line_sep>self.log(pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%"))<line_sep>self.verify_icmp_packets_from_specified_port(target["port_index"] target["vlan_id"]<if>target["vlan_id"]<ne>target["pvid"]<else>0 dst_mac self.dataplane.get_mac(0 target["port_index"]) src_ip target["peer_ip"] 63)<block_end># Test for for indirectly-connected routing
src_ip=vlan_port["permit_vlanid"][str(vlan_id)]["remote_ip"]<for_stmt>target target_list<block_start><if_stmt>vlan_id<eq>target["vlan_id"]# Skip same VLAN forwarding
<block_start><continue><block_end>pkt=self.build_icmp_packet(vlan_id<if>vlan_id<ne>vlan_port["pvid"]<else>0 src_mac dst_mac src_ip target["remote_ip"])<line_sep>self.log("Send {} packet from {} ...".format("untagged"<if>vlan_id<eq>0<else>"tagged(%d)"%vlan_id src_port))<line_sep>self.log(pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%"))<line_sep>send(self src_port pkt)<line_sep>self.verify_icmp_packets_from_specified_port(target["port_index"] target["vlan_id"]<if>target["vlan_id"]<ne>target["pvid"]<else>0 dst_mac self.dataplane.get_mac(0 target["port_index"]) src_ip target["remote_ip"] 63)<block_end><block_end><block_end># Test case #5
# Send ICMP packets to VLAN interfaces.
# Verify ICMP reply packets can be received from ingress port.
self.log("Test case #5 starting ...")<for_stmt>vlan_port vlan_ports_list<block_start>src_port=vlan_port["port_index"]<line_sep>src_mac=self.dataplane.get_mac(0 src_port)<line_sep>dst_mac=self.router_mac<for_stmt>vlan_id map(int vlan_port["permit_vlanid"].keys())<block_start>src_ip=vlan_port["permit_vlanid"][str(vlan_id)]["peer_ip"]<for_stmt>vlan_intf vlan_intf_list<block_start><if_stmt>int(vlan_intf["vlan_id"])<ne>vlan_id<block_start><continue><block_end>dst_ip=vlan_intf["ip"].split("/")[0]<line_sep>pkt=self.build_icmp_packet(vlan_id<if>vlan_id<ne>vlan_port["pvid"]<else>0 src_mac dst_mac src_ip dst_ip)<line_sep>self.log("Send {} packet from {} ...".format("untagged"<if>vlan_id<eq>0<else>"tagged(%d)"%vlan_id src_port))<line_sep>self.log(pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%"))<line_sep>send(self src_port pkt)<line_sep>exp_pkt=simple_icmp_packet(eth_src=self.router_mac eth_dst=src_mac dl_vlan_enable=<true><if>vlan_id<ne>vlan_port["pvid"]<else><false> vlan_vid=vlan_id<if>vlan_id<ne>vlan_port["pvid"]<else>0 vlan_pcp=0 ip_dst=src_ip ip_src=dst_ip icmp_type=0 icmp_code=0)<line_sep>masked_exp_pkt=Mask(exp_pkt)<line_sep>masked_exp_pkt.set_do_not_care_scapy(scapy.IP "id")<line_sep>verify_packets(self masked_exp_pkt list(str(src_port)))<line_sep>self.log("Verify packet from port "+str(src_port))<block_end><block_end><block_end><block_end>#--------------------------------------------------------------------------
<block_end> |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD 2 clause
<import_stmt>os<import_stmt>csv<import_stmt>pickle<import_stmt>random<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>tqdm<import_from_stmt>tqdm._tqdm trange<import_stmt>time<try_stmt><block_start><import_from_stmt>..utils.check *<block_end><except_stmt><block_start><import_from_stmt>pyhealth.utils.check *<block_end><class_stmt>imagedata<block_start><def_stmt>__init__ self expdata_id root_dir='.'<block_start>"""
experiment data generat class for cms datasets
Parameters
----------
exp_id : str, optional (default='init.test')
name of current experiment
"""<line_sep>self.expdata_id=expdata_id<line_sep>check_expdata_dir(expdata_id=expdata_id)<line_sep>self.root_dir=root_dir<line_sep>self.expdata_dir=os.path.join(self.root_dir 'experiments_data' self.expdata_id)<line_sep>print('Current ExpData_ID: {0} --- Target for CMS'.format(self.expdata_id))<block_end><def_stmt>get_exp_data self sel_task='diagnose' shuffle=<true> split_ratio=[0.64 0.16 0.2] data_root='' n_limit=-1<block_start>"""
Parameters
----------
task : str, optional (default='phenotyping')
name of current healthcare task
shuffle : bool, optional (default=True)
determine whether shuffle data or not
split_ratio : list, optional (default=[0.64,0.16,0.2])
used for split whole data into train/valid/test
data_root : str, (default='')
use data in data_root
n_limit : int, optional (default = -1)
used for sample N-data not for all data, if n_limit==-1, use all data
"""<line_sep>self.sel_task=sel_task<if_stmt>data_root<eq>''<block_start><raise>Exception('fill in correct data_root')<block_end>all_list=[]<line_sep>l_list=[]<line_sep>episode_dir=os.path.join(data_root 'x_data')<line_sep>feat_n,label_n=0 0<line_sep>label_seq=pd.read_csv(os.path.join(data_root 'y_data' self.sel_task+'.csv')).values<for_stmt>row_id trange(len(label_seq))<block_start><if_stmt>n_limit<g>0<and>row_id<g>n_limit<block_start><break><block_end>time.sleep(0.01)<line_sep>row=label_seq[row_id :]<line_sep>concrete_path=os.path.join(episode_dir row[0])<if_stmt>os.path.exists(concrete_path)<is><false><block_start><continue><block_end>all_list.append([concrete_path]+row[1:].astype(float).tolist())<line_sep>label_n=len(row[1:])<block_end># shuffle the list
<if_stmt>shuffle<block_start>random.shuffle(all_list)<block_end>N=len(all_list)<line_sep>x_list=[]<line_sep>y_list=[]<for_stmt>item all_list<block_start>x_list.append(item[0])<line_sep>y_list.append(np.array(item[1:]).astype(float))<block_end>train_ratio=split_ratio[0]<line_sep>valid_ratio=split_ratio[1]<line_sep>training_x=x_list[:int(train_ratio<times>N)]<line_sep>validing_x=x_list[int(train_ratio<times>N):int((train_ratio+valid_ratio)<times>N)]<line_sep>testing_x=x_list[int((train_ratio+valid_ratio)<times>N):]<line_sep>training_y=y_list[:int(train_ratio<times>N)]<line_sep>validing_y=y_list[int(train_ratio<times>N):int((train_ratio+valid_ratio)<times>N)]<line_sep>testing_y=y_list[int((train_ratio+valid_ratio)<times>N):]<if_stmt>os.path.exists(self.expdata_dir)<is><false><block_start>os.makedirs(self.expdata_dir)<block_end>pickle.dump(training_x open(os.path.join(self.expdata_dir 'train_x.pkl') 'wb'))<line_sep>pickle.dump(validing_x open(os.path.join(self.expdata_dir 'valid_x.pkl') 'wb'))<line_sep>pickle.dump(testing_x open(os.path.join(self.expdata_dir 'test_x.pkl') 'wb'))<line_sep>print('finished X generate')<line_sep>pickle.dump(training_y open(os.path.join(self.expdata_dir 'train_y.pkl') 'wb'))<line_sep>pickle.dump(validing_y open(os.path.join(self.expdata_dir 'valid_y.pkl') 'wb'))<line_sep>pickle.dump(testing_y open(os.path.join(self.expdata_dir 'test_y.pkl') 'wb'))<line_sep>print('finished Y generate')<line_sep>expdata_statistic={'task':self.sel_task 'raio':split_ratio 'label_n':label_n 'len_train':len(training_x) 'len_valid':len(validing_x) 'len_test':len(testing_x)}<line_sep>pickle.dump(expdata_statistic open(os.path.join(self.expdata_dir 'expdata_statistic.pkl') 'wb'))<line_sep>self.train={'x':training_x 'y':training_y 'label_n':label_n}<line_sep>self.valid={'x':validing_x 'y':validing_y 'label_n':label_n}<line_sep>self.test={'x':testing_x 'y':testing_y 'label_n':label_n}<line_sep>print('generate finished')<line_sep>print('target Task:' expdata_statistic['task'])<line_sep>print('N of labels:' expdata_statistic['label_n'])<line_sep>print('N of TrainData:' expdata_statistic['len_train'])<line_sep>print('N of ValidData:' expdata_statistic['len_valid'])<line_sep>print('N of TestData:' expdata_statistic['len_test'])<block_end><def_stmt>load_exp_data self<block_start><if_stmt>os.path.exists(self.expdata_dir)<is><false><block_start><raise>Exception('cannot find exp data dir {0}'.format(self.expdata_dir))<block_end>training_x=pickle.load(open(os.path.join(self.expdata_dir 'train_x.pkl') 'rb'))<line_sep>validing_x=pickle.load(open(os.path.join(self.expdata_dir 'valid_x.pkl') 'rb'))<line_sep>testing_x=pickle.load(open(os.path.join(self.expdata_dir 'test_x.pkl') 'rb'))<line_sep>training_y=pickle.load(open(os.path.join(self.expdata_dir 'train_y.pkl') 'rb'))<line_sep>validing_y=pickle.load(open(os.path.join(self.expdata_dir 'valid_y.pkl') 'rb'))<line_sep>testing_y=pickle.load(open(os.path.join(self.expdata_dir 'test_y.pkl') 'rb'))<line_sep>expdata_statistic=pickle.load(open(os.path.join(self.expdata_dir 'expdata_statistic.pkl') 'rb'))<line_sep>label_n=expdata_statistic['label_n']<line_sep>self.train={'x':training_x 'y':training_y 'label_n':label_n}<line_sep>self.valid={'x':validing_x 'y':validing_y 'label_n':label_n}<line_sep>self.test={'x':testing_x 'y':testing_y 'label_n':label_n}<line_sep>print('load finished')<line_sep>print('target Task:' expdata_statistic['task'])<line_sep>print('N of labels:' expdata_statistic['label_n'])<line_sep>print('N of TrainData:' expdata_statistic['len_train'])<line_sep>print('N of ValidData:' expdata_statistic['len_valid'])<line_sep>print('N of TestData:' expdata_statistic['len_test'])<block_end><def_stmt>show_data self k=3<block_start>"""
Parameters
----------
k : int, optional (default=3)
fetch k sample data for show
"""<line_sep>print('------------Train--------------')<line_sep>print('x_data' self.train['x'][:k])<line_sep>print('y_data' self.train['y'][:k])<line_sep>print('------------Valid--------------')<line_sep>print('x_data' self.valid['x'][:k])<line_sep>print('y_data' self.valid['y'][:k])<line_sep>print('------------Test--------------')<line_sep>print('x_data' self.test['x'][:k])<line_sep>print('y_data' self.test['y'][:k])<block_end><block_end><class_stmt>sequencedata<block_start><def_stmt>__init__ self expdata_id root_dir='.'<block_start>"""
experiment data generat class for cms datasets
Parameters
----------
exp_id : str, optional (default='init.test')
name of current experiment
"""<line_sep>self.expdata_id=expdata_id<line_sep>check_expdata_dir(expdata_id=expdata_id)<line_sep>self.root_dir=root_dir<line_sep>self.expdata_dir=os.path.join(self.root_dir 'experiments_data' self.expdata_id)<line_sep>print('Current ExpData_ID: {0} --- Target for MIMIC'.format(self.expdata_id))<block_end><def_stmt>get_exp_data self sel_task='phenotyping' shuffle=<true> split_ratio=[0.64 0.16 0.2] data_root='' n_limit=-1<block_start>"""
Parameters
----------
task : str, optional (default='phenotyping')
name of current healthcare task
shuffle : bool, optional (default=True)
determine whether shuffle data or not
split_ratio : list, optional (default=[0.64,0.16,0.2])
used for split whole data into train/valid/test
data_root : str, optional (default='')
if data_root=='', use data in ./datasets; else use data in data_root
n_limit : int, optional (default = -1)
used for sample N-data not for all data, if n_limit==-1, use all data
"""<line_sep>self.sel_task=sel_task<if_stmt>data_root<eq>''<block_start><raise>Exception('fill in correct data_root')<block_end>all_list=[]<line_sep>l_list=[]<line_sep>episode_dir=os.path.join(data_root 'x_data')<line_sep>feat_n,label_n=0 0<line_sep>label_seq=pd.read_csv(os.path.join(data_root 'y_data' self.sel_task+'.csv')).values<for_stmt>row_id trange(len(label_seq))<block_start><if_stmt>n_limit<g>0<and>row_id<g>n_limit<block_start><break><block_end>time.sleep(0.01)<line_sep>row=label_seq[row_id :]<line_sep>concrete_path=os.path.join(episode_dir row[0])<if_stmt>os.path.exists(concrete_path)<is><false><block_start><continue><block_end>seq_l,feat_n_all=pd.read_csv(concrete_path).shape<if_stmt>seq_l<l>2<block_start><continue><block_end>all_list.append([concrete_path]+[seq_l]+row[1:].astype(float).tolist())<line_sep>label_n=len(row[1:])<block_end>feat_n=feat_n_all-1<line_sep># shuffle the list
<if_stmt>shuffle<block_start>random.shuffle(all_list)<block_end>N=len(all_list)<line_sep>x_list=[]<line_sep>y_list=[]<line_sep>l_list=[]<for_stmt>item all_list<block_start>x_list.append(item[0])<line_sep>l_list.append(item[1])<line_sep>y_list.append(np.array(item[2:]).astype(float))<block_end>train_ratio=split_ratio[0]<line_sep>valid_ratio=split_ratio[1]<line_sep>training_x=x_list[:int(train_ratio<times>N)]<line_sep>validing_x=x_list[int(train_ratio<times>N):int((train_ratio+valid_ratio)<times>N)]<line_sep>testing_x=x_list[int((train_ratio+valid_ratio)<times>N):]<line_sep>training_y=y_list[:int(train_ratio<times>N)]<line_sep>validing_y=y_list[int(train_ratio<times>N):int((train_ratio+valid_ratio)<times>N)]<line_sep>testing_y=y_list[int((train_ratio+valid_ratio)<times>N):]<line_sep>training_l=l_list[:int(train_ratio<times>N)]<line_sep>validing_l=l_list[int(train_ratio<times>N):int((train_ratio+valid_ratio)<times>N)]<line_sep>testing_l=l_list[int((train_ratio+valid_ratio)<times>N):]<if_stmt>os.path.exists(self.expdata_dir)<is><false><block_start>os.makedirs(self.expdata_dir)<block_end>pickle.dump(training_x open(os.path.join(self.expdata_dir 'train_x.pkl') 'wb'))<line_sep>pickle.dump(validing_x open(os.path.join(self.expdata_dir 'valid_x.pkl') 'wb'))<line_sep>pickle.dump(testing_x open(os.path.join(self.expdata_dir 'test_x.pkl') 'wb'))<line_sep>print('finished X generate')<line_sep>pickle.dump(training_y open(os.path.join(self.expdata_dir 'train_y.pkl') 'wb'))<line_sep>pickle.dump(validing_y open(os.path.join(self.expdata_dir 'valid_y.pkl') 'wb'))<line_sep>pickle.dump(testing_y open(os.path.join(self.expdata_dir 'test_y.pkl') 'wb'))<line_sep>print('finished Y generate')<line_sep>pickle.dump(training_l open(os.path.join(self.expdata_dir 'train_l.pkl') 'wb'))<line_sep>pickle.dump(validing_l open(os.path.join(self.expdata_dir 'valid_l.pkl') 'wb'))<line_sep>pickle.dump(testing_l open(os.path.join(self.expdata_dir 'test_l.pkl') 'wb'))<line_sep>print('finished L generate')<line_sep>expdata_statistic={'task':self.sel_task 'raio':split_ratio 'feat_n':feat_n 'label_n':label_n 'len_train':len(training_x) 'len_valid':len(validing_x) 'len_test':len(testing_x)}<line_sep>pickle.dump(expdata_statistic open(os.path.join(self.expdata_dir 'expdata_statistic.pkl') 'wb'))<line_sep>self.train={'x':training_x 'y':training_y 'l':training_l 'feat_n':feat_n 'label_n':label_n}<line_sep>self.valid={'x':validing_x 'y':validing_y 'l':validing_l 'feat_n':feat_n 'label_n':label_n}<line_sep>self.test={'x':testing_x 'y':testing_y 'l':testing_l 'feat_n':feat_n 'label_n':label_n}<line_sep>print('generate finished')<line_sep>print('target Task:' expdata_statistic['task'])<line_sep>print('N of features:' expdata_statistic['feat_n'])<line_sep>print('N of labels:' expdata_statistic['label_n'])<line_sep>print('N of TrainData:' expdata_statistic['len_train'])<line_sep>print('N of ValidData:' expdata_statistic['len_valid'])<line_sep>print('N of TestData:' expdata_statistic['len_test'])<block_end><def_stmt>load_exp_data self<block_start><if_stmt>os.path.exists(self.expdata_dir)<is><false><block_start><raise>Exception('cannot find exp data dir {0}'.format(self.expdata_dir))<block_end>training_x=pickle.load(open(os.path.join(self.expdata_dir 'train_x.pkl') 'rb'))<line_sep>validing_x=pickle.load(open(os.path.join(self.expdata_dir 'valid_x.pkl') 'rb'))<line_sep>testing_x=pickle.load(open(os.path.join(self.expdata_dir 'test_x.pkl') 'rb'))<line_sep>training_y=pickle.load(open(os.path.join(self.expdata_dir 'train_y.pkl') 'rb'))<line_sep>validing_y=pickle.load(open(os.path.join(self.expdata_dir 'valid_y.pkl') 'rb'))<line_sep>testing_y=pickle.load(open(os.path.join(self.expdata_dir 'test_y.pkl') 'rb'))<line_sep>training_l=pickle.load(open(os.path.join(self.expdata_dir 'train_l.pkl') 'rb'))<line_sep>validing_l=pickle.load(open(os.path.join(self.expdata_dir 'valid_l.pkl') 'rb'))<line_sep>testing_l=pickle.load(open(os.path.join(self.expdata_dir 'test_l.pkl') 'rb'))<line_sep>expdata_statistic=pickle.load(open(os.path.join(self.expdata_dir 'expdata_statistic.pkl') 'rb'))<line_sep>feat_n=expdata_statistic['feat_n']<line_sep>label_n=expdata_statistic['label_n']<line_sep>self.train={'x':training_x 'y':training_y 'l':training_l 'feat_n':feat_n 'label_n':label_n}<line_sep>self.valid={'x':validing_x 'y':validing_y 'l':validing_l 'feat_n':feat_n 'label_n':label_n}<line_sep>self.test={'x':testing_x 'y':testing_y 'l':testing_l 'feat_n':feat_n 'label_n':label_n}<line_sep>print('load finished')<line_sep>print('target Task:' expdata_statistic['task'])<line_sep>print('N of features:' expdata_statistic['feat_n'])<line_sep>print('N of labels:' expdata_statistic['label_n'])<line_sep>print('N of TrainData:' expdata_statistic['len_train'])<line_sep>print('N of ValidData:' expdata_statistic['len_valid'])<line_sep>print('N of TestData:' expdata_statistic['len_test'])<block_end><def_stmt>show_data self k=3<block_start>"""
Parameters
----------
k : int, optional (default=3)
fetch k sample data for show
"""<line_sep>print('------------Train--------------')<line_sep>print('x_data' self.train['x'][:k])<line_sep>print('y_data' self.train['y'][:k])<line_sep>print('l_data' self.train['l'][:k])<line_sep>print('------------Valid--------------')<line_sep>print('x_data' self.valid['x'][:k])<line_sep>print('y_data' self.valid['y'][:k])<line_sep>print('l_data' self.valid['l'][:k])<line_sep>print('------------Test--------------')<line_sep>print('x_data' self.test['x'][:k])<line_sep>print('y_data' self.test['y'][:k])<line_sep>print('l_data' self.test['l'][:k])<block_end><block_end><class_stmt>ecgdata<block_start><def_stmt>__init__ self expdata_id root_dir='.'<block_start>"""
experiment data generat class for cms datasets
Parameters
----------
exp_id : str, optional (default='init.test')
name of current experiment
"""<line_sep>self.expdata_id=expdata_id<line_sep>check_expdata_dir(expdata_id=expdata_id)<line_sep>self.root_dir=root_dir<line_sep>self.expdata_dir=os.path.join(self.root_dir 'experiments_data' self.expdata_id)<line_sep>print('Current ExpData_ID: {0} --- Target for ECG'.format(self.expdata_id))<block_end><def_stmt>get_exp_data self sel_task='diagnose' shuffle=<true> split_ratio=[0.64 0.16 0.2] data_root='' n_limit=-1<block_start>"""
Parameters
----------
task : str, optional (default='phenotyping')
name of current healthcare task
shuffle : bool, optional (default=True)
determine whether shuffle data or not
split_ratio : list, optional (default=[0.64,0.16,0.2])
used for split whole data into train/valid/test
data_root : str, optional (default='')
if data_root=='', use data in ./datasets; else use data in data_root
n_limit : int, optional (default = -1)
used for sample N-data not for all data, if n_limit==-1, use all data
"""<line_sep>self.sel_task=sel_task<if_stmt>data_root<eq>''<block_start><raise>Exception('fill in correct data_root')<block_end>all_list=[]<line_sep>l_list=[]<line_sep>episode_dir=os.path.join(data_root 'x_data')<line_sep>feat_n,label_n=0 0<line_sep>feat_seq=pickle.load(open(os.path.join(data_root 'x_data' 'feat.pkl') 'rb'))<line_sep>label_seq=pickle.load(open(os.path.join(data_root 'y_data' self.sel_task+'.pkl') 'rb'))<line_sep>label_n=np.shape(label_seq)[1]<line_sep>feat_n=np.shape(feat_seq)[1]<for_stmt>cur_i,each_label enumerate(label_seq)<block_start>all_list.append(each_label.tolist()+feat_seq[cur_i].tolist())<block_end># shuffle the list
<if_stmt>shuffle<block_start>random.shuffle(all_list)<block_end>N=len(all_list)<line_sep>x_list=[]<line_sep>y_list=[]<for_stmt>item all_list<block_start>x_list.append(np.array(item[label_n:]).astype(float))<line_sep>y_list.append(np.array(item[:label_n]).astype(float))<block_end>train_ratio=split_ratio[0]<line_sep>valid_ratio=split_ratio[1]<line_sep>training_x=x_list[:int(train_ratio<times>N)]<line_sep>validing_x=x_list[int(train_ratio<times>N):int((train_ratio+valid_ratio)<times>N)]<line_sep>testing_x=x_list[int((train_ratio+valid_ratio)<times>N):]<line_sep>training_y=y_list[:int(train_ratio<times>N)]<line_sep>validing_y=y_list[int(train_ratio<times>N):int((train_ratio+valid_ratio)<times>N)]<line_sep>testing_y=y_list[int((train_ratio+valid_ratio)<times>N):]<if_stmt>os.path.exists(self.expdata_dir)<is><false><block_start>os.makedirs(self.expdata_dir)<block_end>pickle.dump(training_x open(os.path.join(self.expdata_dir 'train_x.pkl') 'wb'))<line_sep>pickle.dump(validing_x open(os.path.join(self.expdata_dir 'valid_x.pkl') 'wb'))<line_sep>pickle.dump(testing_x open(os.path.join(self.expdata_dir 'test_x.pkl') 'wb'))<line_sep>print('finished X generate')<line_sep>pickle.dump(training_y open(os.path.join(self.expdata_dir 'train_y.pkl') 'wb'))<line_sep>pickle.dump(validing_y open(os.path.join(self.expdata_dir 'valid_y.pkl') 'wb'))<line_sep>pickle.dump(testing_y open(os.path.join(self.expdata_dir 'test_y.pkl') 'wb'))<line_sep>print('finished Y generate')<line_sep>expdata_statistic={'task':self.sel_task 'raio':split_ratio 'feat_n':feat_n 'label_n':label_n 'len_train':len(training_x) 'len_valid':len(validing_x) 'len_test':len(testing_x)}<line_sep>pickle.dump(expdata_statistic open(os.path.join(self.expdata_dir 'expdata_statistic.pkl') 'wb'))<line_sep>self.train={'x':training_x 'y':training_y 'feat_n':feat_n 'label_n':label_n}<line_sep>self.valid={'x':validing_x 'y':validing_y 'feat_n':feat_n 'label_n':label_n}<line_sep>self.test={'x':testing_x 'y':testing_y 'feat_n':feat_n 'label_n':label_n}<line_sep>print('generate finished')<line_sep>print('target Task:' expdata_statistic['task'])<line_sep>print('N of features:' expdata_statistic['feat_n'])<line_sep>print('N of labels:' expdata_statistic['label_n'])<line_sep>print('N of TrainData:' expdata_statistic['len_train'])<line_sep>print('N of ValidData:' expdata_statistic['len_valid'])<line_sep>print('N of TestData:' expdata_statistic['len_test'])<block_end><def_stmt>load_exp_data self<block_start><if_stmt>os.path.exists(self.expdata_dir)<is><false><block_start><raise>Exception('cannot find exp data dir {0}'.format(self.expdata_dir))<block_end>training_x=pickle.load(open(os.path.join(self.expdata_dir 'train_x.pkl') 'rb'))<line_sep>validing_x=pickle.load(open(os.path.join(self.expdata_dir 'valid_x.pkl') 'rb'))<line_sep>testing_x=pickle.load(open(os.path.join(self.expdata_dir 'test_x.pkl') 'rb'))<line_sep>training_y=pickle.load(open(os.path.join(self.expdata_dir 'train_y.pkl') 'rb'))<line_sep>validing_y=pickle.load(open(os.path.join(self.expdata_dir 'valid_y.pkl') 'rb'))<line_sep>testing_y=pickle.load(open(os.path.join(self.expdata_dir 'test_y.pkl') 'rb'))<line_sep>expdata_statistic=pickle.load(open(os.path.join(self.expdata_dir 'expdata_statistic.pkl') 'rb'))<line_sep>feat_n=expdata_statistic['feat_n']<line_sep>label_n=expdata_statistic['label_n']<line_sep>self.train={'x':training_x 'y':training_y 'feat_n':feat_n 'label_n':label_n}<line_sep>self.valid={'x':validing_x 'y':validing_y 'feat_n':feat_n 'label_n':label_n}<line_sep>self.test={'x':testing_x 'y':testing_y 'feat_n':feat_n 'label_n':label_n}<line_sep>print('load finished')<line_sep>print('target Task:' expdata_statistic['task'])<line_sep>print('N of features:' expdata_statistic['feat_n'])<line_sep>print('N of labels:' expdata_statistic['label_n'])<line_sep>print('N of TrainData:' expdata_statistic['len_train'])<line_sep>print('N of ValidData:' expdata_statistic['len_valid'])<line_sep>print('N of TestData:' expdata_statistic['len_test'])<block_end><def_stmt>show_data self k=3<block_start>"""
Parameters
----------
k : int, optional (default=3)
fetch k sample data for show
"""<line_sep>print('------------Train--------------')<line_sep>print('x_data' self.train['x'][:k])<line_sep>print('y_data' self.train['y'][:k])<line_sep>print('------------Valid--------------')<line_sep>print('x_data' self.valid['x'][:k])<line_sep>print('y_data' self.valid['y'][:k])<line_sep>print('------------Test--------------')<line_sep>print('x_data' self.test['x'][:k])<line_sep>print('y_data' self.test['y'][:k])<block_end><block_end><class_stmt>textdata<block_start><def_stmt>__init__ self expdata_id root_dir='.'<block_start>"""
experiment data generat class for cms datasets
Parameters
----------
exp_id : str, optional (default='init.test')
name of current experiment
"""<line_sep>self.expdata_id=expdata_id<line_sep>check_expdata_dir(expdata_id=expdata_id)<line_sep>self.root_dir=root_dir<line_sep>self.expdata_dir=os.path.join(self.root_dir 'experiments_data' self.expdata_id)<line_sep>print('Current ExpData_ID: {0} --- Target for Clinical Notes'.format(self.expdata_id))<block_end><def_stmt>get_exp_data self sel_task='diagnose' shuffle=<true> split_ratio=[0.64 0.16 0.2] data_root='' n_limit=-1<block_start>"""
Parameters
----------
task : str, optional (default='phenotyping')
name of current healthcare task
shuffle : bool, optional (default=True)
determine whether shuffle data or not
split_ratio : list, optional (default=[0.64,0.16,0.2])
used for split whole data into train/valid/test
data_root : str, (default='')
use data in data_root
n_limit : int, optional (default = -1)
used for sample N-data not for all data, if n_limit==-1, use all data
"""<line_sep>self.sel_task=sel_task<if_stmt>data_root<eq>''<block_start><raise>Exception('fill in correct data_root')<block_end>all_list=[]<line_sep>l_list=[]<line_sep>episode_dir=os.path.join(data_root 'x_data')<line_sep>feat_n,label_n=0 0<line_sep>label_seq=pd.read_csv(os.path.join(data_root 'y_data' self.sel_task+'.csv')).values<for_stmt>row_id trange(len(label_seq))<block_start><if_stmt>n_limit<g>0<and>row_id<g>n_limit<block_start><break><block_end>time.sleep(0.01)<line_sep>row=label_seq[row_id :]<line_sep>concrete_path=os.path.join(episode_dir row[0])<if_stmt>os.path.exists(concrete_path)<is><false><block_start><continue><block_end>all_list.append([concrete_path]+row[1:].astype(float).tolist())<line_sep>label_n=len(row[1:])<block_end># shuffle the list
<if_stmt>shuffle<block_start>random.shuffle(all_list)<block_end>N=len(all_list)<line_sep>x_list=[]<line_sep>y_list=[]<for_stmt>item all_list<block_start>x_list.append(item[0])<line_sep>y_list.append(np.array(item[1:]).astype(float))<block_end>train_ratio=split_ratio[0]<line_sep>valid_ratio=split_ratio[1]<line_sep>training_x=x_list[:int(train_ratio<times>N)]<line_sep>validing_x=x_list[int(train_ratio<times>N):int((train_ratio+valid_ratio)<times>N)]<line_sep>testing_x=x_list[int((train_ratio+valid_ratio)<times>N):]<line_sep>training_y=y_list[:int(train_ratio<times>N)]<line_sep>validing_y=y_list[int(train_ratio<times>N):int((train_ratio+valid_ratio)<times>N)]<line_sep>testing_y=y_list[int((train_ratio+valid_ratio)<times>N):]<if_stmt>os.path.exists(self.expdata_dir)<is><false><block_start>os.makedirs(self.expdata_dir)<block_end>pickle.dump(training_x open(os.path.join(self.expdata_dir 'train_x.pkl') 'wb'))<line_sep>pickle.dump(validing_x open(os.path.join(self.expdata_dir 'valid_x.pkl') 'wb'))<line_sep>pickle.dump(testing_x open(os.path.join(self.expdata_dir 'test_x.pkl') 'wb'))<line_sep>print('finished X generate')<line_sep>pickle.dump(training_y open(os.path.join(self.expdata_dir 'train_y.pkl') 'wb'))<line_sep>pickle.dump(validing_y open(os.path.join(self.expdata_dir 'valid_y.pkl') 'wb'))<line_sep>pickle.dump(testing_y open(os.path.join(self.expdata_dir 'test_y.pkl') 'wb'))<line_sep>print('finished Y generate')<line_sep>expdata_statistic={'task':self.sel_task 'raio':split_ratio 'label_n':label_n 'len_train':len(training_x) 'len_valid':len(validing_x) 'len_test':len(testing_x)}<line_sep>pickle.dump(expdata_statistic open(os.path.join(self.expdata_dir 'expdata_statistic.pkl') 'wb'))<line_sep>self.train={'x':training_x 'y':training_y 'label_n':label_n}<line_sep>self.valid={'x':validing_x 'y':validing_y 'label_n':label_n}<line_sep>self.test={'x':testing_x 'y':testing_y 'label_n':label_n}<line_sep>print('generate finished')<line_sep>print('target Task:' expdata_statistic['task'])<line_sep>print('N of labels:' expdata_statistic['label_n'])<line_sep>print('N of TrainData:' expdata_statistic['len_train'])<line_sep>print('N of ValidData:' expdata_statistic['len_valid'])<line_sep>print('N of TestData:' expdata_statistic['len_test'])<block_end><def_stmt>load_exp_data self<block_start><if_stmt>os.path.exists(self.expdata_dir)<is><false><block_start><raise>Exception('cannot find exp data dir {0}'.format(self.expdata_dir))<block_end>training_x=pickle.load(open(os.path.join(self.expdata_dir 'train_x.pkl') 'rb'))<line_sep>validing_x=pickle.load(open(os.path.join(self.expdata_dir 'valid_x.pkl') 'rb'))<line_sep>testing_x=pickle.load(open(os.path.join(self.expdata_dir 'test_x.pkl') 'rb'))<line_sep>training_y=pickle.load(open(os.path.join(self.expdata_dir 'train_y.pkl') 'rb'))<line_sep>validing_y=pickle.load(open(os.path.join(self.expdata_dir 'valid_y.pkl') 'rb'))<line_sep>testing_y=pickle.load(open(os.path.join(self.expdata_dir 'test_y.pkl') 'rb'))<line_sep>expdata_statistic=pickle.load(open(os.path.join(self.expdata_dir 'expdata_statistic.pkl') 'rb'))<line_sep>label_n=expdata_statistic['label_n']<line_sep>self.train={'x':training_x 'y':training_y 'label_n':label_n}<line_sep>self.valid={'x':validing_x 'y':validing_y 'label_n':label_n}<line_sep>self.test={'x':testing_x 'y':testing_y 'label_n':label_n}<line_sep>print('load finished')<line_sep>print('target Task:' expdata_statistic['task'])<line_sep>print('N of labels:' expdata_statistic['label_n'])<line_sep>print('N of TrainData:' expdata_statistic['len_train'])<line_sep>print('N of ValidData:' expdata_statistic['len_valid'])<line_sep>print('N of TestData:' expdata_statistic['len_test'])<block_end><def_stmt>show_data self k=3<block_start>"""
Parameters
----------
k : int, optional (default=3)
fetch k sample data for show
"""<line_sep>print('------------Train--------------')<line_sep>print('x_data' self.train['x'][:k])<line_sep>print('y_data' self.train['y'][:k])<line_sep>print('------------Valid--------------')<line_sep>print('x_data' self.valid['x'][:k])<line_sep>print('y_data' self.valid['y'][:k])<line_sep>print('------------Test--------------')<line_sep>print('x_data' self.test['x'][:k])<line_sep>print('y_data' self.test['y'][:k])<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>print('hello world')<line_sep>test_txt=textdata('test.1.text')<line_sep>test_txt.get_exp_data(sel_task='diagnose' data_root='./datasets/text')<line_sep>test_txt.load_exp_data()<block_end> |
# 3p
<import_stmt>pymemcache<import_from_stmt>pymemcache.exceptions MemcacheClientError<import_from_stmt>pymemcache.exceptions MemcacheIllegalInputError<import_from_stmt>pymemcache.exceptions MemcacheServerError<import_from_stmt>pymemcache.exceptions MemcacheUnknownCommandError<import_from_stmt>pymemcache.exceptions MemcacheUnknownError<import_stmt>pytest<line_sep># project
<import_from_stmt>ddtrace Pin<import_from_stmt>ddtrace.contrib.pymemcache.client WrappedClient<import_from_stmt>ddtrace.contrib.pymemcache.patch patch<import_from_stmt>ddtrace.contrib.pymemcache.patch unpatch<import_from_stmt>ddtrace.vendor wrapt<import_from_stmt>tests.utils DummyTracer<import_from_stmt>tests.utils TracerTestCase<import_from_stmt>.test_client_mixin PYMEMCACHE_VERSION<import_from_stmt>.test_client_mixin PymemcacheClientTestCaseMixin<import_from_stmt>.test_client_mixin TEST_HOST<import_from_stmt>.test_client_mixin TEST_PORT<import_from_stmt>.utils MockSocket<import_from_stmt>.utils _str<line_sep>_Client=pymemcache.client.base.Client<class_stmt>PymemcacheClientTestCase(PymemcacheClientTestCaseMixin)<block_start>"""Tests for a patched pymemcache.client.base.Client."""<def_stmt>test_patch self<block_start><assert_stmt>issubclass(pymemcache.client.base.Client wrapt.ObjectProxy)<line_sep>client=self.make_client([])<line_sep>self.assertIsInstance(client wrapt.ObjectProxy)<block_end><def_stmt>test_unpatch self<block_start>unpatch()<import_from_stmt>pymemcache.client.base Client<line_sep>self.assertEqual(Client _Client)<block_end><def_stmt>test_set_get self<block_start>client=self.make_client([b"STORED\r\n" b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])<line_sep>client.set(b"key" b"value" noreply=<false>)<line_sep>result=client.get(b"key")<assert_stmt>_str(result)<eq>"value"<line_sep>self.check_spans(2 ["set" "get"] ["set key" "get key"])<block_end><def_stmt>test_append_stored self<block_start>client=self.make_client([b"STORED\r\n"])<line_sep>result=client.append(b"key" b"value" noreply=<false>)<assert_stmt>result<is><true><line_sep>self.check_spans(1 ["append"] ["append key"])<block_end><def_stmt>test_prepend_stored self<block_start>client=self.make_client([b"STORED\r\n"])<line_sep>result=client.prepend(b"key" b"value" noreply=<false>)<assert_stmt>result<is><true><line_sep>self.check_spans(1 ["prepend"] ["prepend key"])<block_end><def_stmt>test_cas_stored self<block_start>client=self.make_client([b"STORED\r\n"])<line_sep>result=client.cas(b"key" b"value" b"0" noreply=<false>)<assert_stmt>result<is><true><line_sep>self.check_spans(1 ["cas"] ["cas key"])<block_end><def_stmt>test_cas_exists self<block_start>client=self.make_client([b"EXISTS\r\n"])<line_sep>result=client.cas(b"key" b"value" b"0" noreply=<false>)<assert_stmt>result<is><false><line_sep>self.check_spans(1 ["cas"] ["cas key"])<block_end><def_stmt>test_cas_not_found self<block_start>client=self.make_client([b"NOT_FOUND\r\n"])<line_sep>result=client.cas(b"key" b"value" b"0" noreply=<false>)<assert_stmt>result<is><none><line_sep>self.check_spans(1 ["cas"] ["cas key"])<block_end><def_stmt>test_delete_exception self<block_start>client=self.make_client([Exception("fail")])<def_stmt>_delete <block_start>client.delete(b"key" noreply=<false>)<block_end>pytest.raises(Exception _delete)<line_sep>spans=self.check_spans(1 ["delete"] ["delete key"])<line_sep>self.assertEqual(spans[0].error 1)<block_end><def_stmt>test_flush_all self<block_start>client=self.make_client([b"OK\r\n"])<line_sep>result=client.flush_all(noreply=<false>)<assert_stmt>result<is><true><line_sep>self.check_spans(1 ["flush_all"] ["flush_all"])<block_end><def_stmt>test_incr_exception self<block_start>client=self.make_client([Exception("fail")])<def_stmt>_incr <block_start>client.incr(b"key" 1)<block_end>pytest.raises(Exception _incr)<line_sep>spans=self.check_spans(1 ["incr"] ["incr key"])<line_sep>self.assertEqual(spans[0].error 1)<block_end><def_stmt>test_get_error self<block_start>client=self.make_client([b"ERROR\r\n"])<def_stmt>_get <block_start>client.get(b"key")<block_end>pytest.raises(MemcacheUnknownCommandError _get)<line_sep>spans=self.check_spans(1 ["get"] ["get key"])<line_sep>self.assertEqual(spans[0].error 1)<block_end><def_stmt>test_get_unknown_error self<block_start>client=self.make_client([b"foobarbaz\r\n"])<def_stmt>_get <block_start>client.get(b"key")<block_end>pytest.raises(MemcacheUnknownError _get)<line_sep>self.check_spans(1 ["get"] ["get key"])<block_end><def_stmt>test_gets_found self<block_start>client=self.make_client([b"VALUE key 0 5 10\r\nvalue\r\nEND\r\n"])<line_sep>result=client.gets(b"key")<assert_stmt>result<eq>(b"value" b"10")<line_sep>self.check_spans(1 ["gets"] ["gets key"])<block_end><def_stmt>test_touch_not_found self<block_start>client=self.make_client([b"NOT_FOUND\r\n"])<line_sep>result=client.touch(b"key" noreply=<false>)<assert_stmt>result<is><false><line_sep>self.check_spans(1 ["touch"] ["touch key"])<block_end><def_stmt>test_set_client_error self<block_start>client=self.make_client([b"CLIENT_ERROR some message\r\n"])<def_stmt>_set <block_start>client.set("key" "value" noreply=<false>)<block_end>pytest.raises(MemcacheClientError _set)<line_sep>spans=self.check_spans(1 ["set"] ["set key"])<line_sep>self.assertEqual(spans[0].error 1)<block_end><def_stmt>test_set_server_error self<block_start>client=self.make_client([b"SERVER_ERROR some message\r\n"])<def_stmt>_set <block_start>client.set(b"key" b"value" noreply=<false>)<block_end>pytest.raises(MemcacheServerError _set)<line_sep>spans=self.check_spans(1 ["set"] ["set key"])<line_sep>self.assertEqual(spans[0].error 1)<block_end><def_stmt>test_set_key_with_space self<block_start>client=self.make_client([b""])<def_stmt>_set <block_start>client.set(b"key has space" b"value" noreply=<false>)<block_end>pytest.raises(MemcacheIllegalInputError _set)<line_sep>spans=self.check_spans(1 ["set"] ["set key has space"])<line_sep>self.assertEqual(spans[0].error 1)<block_end><def_stmt>test_quit self<block_start>client=self.make_client([])<line_sep>result=client.quit()<assert_stmt>result<is><none><line_sep>self.check_spans(1 ["quit"] ["quit"])<block_end><def_stmt>test_replace_not_stored self<block_start>client=self.make_client([b"NOT_STORED\r\n"])<line_sep>result=client.replace(b"key" b"value" noreply=<false>)<assert_stmt>result<is><false><line_sep>self.check_spans(1 ["replace"] ["replace key"])<block_end><def_stmt>test_version_success self<block_start>client=self.make_client([b"VERSION 1.2.3\r\n"] default_noreply=<false>)<line_sep>result=client.version()<assert_stmt>result<eq>b"1.2.3"<line_sep>self.check_spans(1 ["version"] ["version"])<block_end><def_stmt>test_stats self<block_start>client=self.make_client([b"STAT fake_stats 1\r\n" b"END\r\n"])<line_sep>result=client.stats()<if_stmt>PYMEMCACHE_VERSION<ge>(3 4 0)<block_start><assert_stmt>client.sock.send_bufs<eq>[b"stats\r\n"]<block_end><else_stmt><block_start><assert_stmt>client.sock.send_bufs<eq>[b"stats \r\n"]<block_end><assert_stmt>result<eq>{b"fake_stats":1}<line_sep>self.check_spans(1 ["stats"] ["stats"])<block_end><def_stmt>test_service_name_override self<block_start>client=self.make_client([b"STORED\r\n" b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])<line_sep>Pin.override(client service="testsvcname")<line_sep>client.set(b"key" b"value" noreply=<false>)<line_sep>result=client.get(b"key")<assert_stmt>_str(result)<eq>"value"<line_sep>spans=self.get_spans()<line_sep>self.assertEqual(spans[0].service "testsvcname")<line_sep>self.assertEqual(spans[1].service "testsvcname")<block_end><block_end><class_stmt>PymemcacheHashClientTestCase(PymemcacheClientTestCaseMixin)<block_start>"""Tests for a patched pymemcache.client.hash.HashClient."""<def_stmt>make_client_pool self hostname mock_socket_values serializer=<none> **kwargs<block_start>mock_client=pymemcache.client.base.Client(hostname serializer=serializer **kwargs)<line_sep>tracer=DummyTracer()<line_sep>Pin.override(mock_client tracer=tracer)<line_sep>mock_client.sock=MockSocket(mock_socket_values)<line_sep>client=pymemcache.client.base.PooledClient(hostname serializer=serializer)<line_sep>client.client_pool=pymemcache.pool.ObjectPool(<lambda>:mock_client)<line_sep><return>mock_client<block_end><def_stmt>make_client self mock_socket_values **kwargs<block_start><import_from_stmt>pymemcache.client.hash HashClient<line_sep>tracer=DummyTracer()<line_sep>Pin.override(pymemcache tracer=tracer)<line_sep>self.client=HashClient([(TEST_HOST TEST_PORT)] **kwargs)<for_stmt>_c self.client.clients.values()<block_start>_c.sock=MockSocket(list(mock_socket_values))<block_end><return>self.client<block_end><def_stmt>test_patched_hash_client self<block_start>client=self.make_client([b"STORED\r\n"])<if_stmt>PYMEMCACHE_VERSION<ge>(3 2 0)<block_start><assert_stmt>client.client_class<eq>WrappedClient<block_end><assert_stmt>len(client.clients)<for_stmt>_c client.clients.values()<block_start><assert_stmt>isinstance(_c wrapt.ObjectProxy)<block_end><block_end><def_stmt>test_delete_many_found self<block_start>"""
delete_many internally calls client.delete so we should expect to get
delete for our span resource.
for base.Clients self.delete() is called which by-passes our tracing
on delete()
"""<line_sep>client=self.make_client([b"STORED\r" b"\n" b"DELETED\r\n"])<line_sep>result=client.add(b"key" b"value" noreply=<false>)<line_sep>result=client.delete_many([b"key"] noreply=<false>)<assert_stmt>result<is><true><line_sep>self.check_spans(2 ["add" "delete"] ["add key" "delete key"])<block_end><block_end><class_stmt>PymemcacheClientConfiguration(TracerTestCase)<block_start>"""Ensure that pymemache can be configured properly."""<def_stmt>setUp self<block_start>patch()<block_end><def_stmt>tearDown self<block_start>unpatch()<block_end><def_stmt>make_client self mock_socket_values **kwargs<block_start>tracer=DummyTracer()<line_sep>Pin.override(pymemcache tracer=tracer)<line_sep>self.client=pymemcache.client.base.Client((TEST_HOST TEST_PORT) **kwargs)<line_sep>self.client.sock=MockSocket(list(mock_socket_values))<line_sep><return>self.client<block_end><def_stmt>test_same_tracer self<block_start>"""Ensure same tracer reference is used by the pin on pymemache and
Clients.
"""<line_sep>client=pymemcache.client.base.Client((TEST_HOST TEST_PORT))<line_sep>self.assertEqual(Pin.get_from(client).tracer Pin.get_from(pymemcache).tracer)<block_end><def_stmt>test_override_parent_pin self<block_start>"""Test that the service set on `pymemcache` is used for Clients."""<line_sep>Pin.override(pymemcache service="mysvc")<line_sep>client=self.make_client([b"STORED\r\n" b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])<line_sep>client.set(b"key" b"value" noreply=<false>)<line_sep>pin=Pin.get_from(pymemcache)<line_sep>tracer=pin.tracer<line_sep>spans=tracer.pop()<line_sep>self.assertEqual(spans[0].service "mysvc")<block_end><def_stmt>test_override_client_pin self<block_start>"""Test that the service set on `pymemcache` is used for Clients."""<line_sep>client=self.make_client([b"STORED\r\n" b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])<line_sep>Pin.override(client service="mysvc2")<line_sep>client.set(b"key" b"value" noreply=<false>)<line_sep>pin=Pin.get_from(pymemcache)<line_sep>tracer=pin.tracer<line_sep>spans=tracer.pop()<line_sep>self.assertEqual(spans[0].service "mysvc2")<block_end>@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))<def_stmt>test_user_specified_service self<block_start>"""
When a user specifies a service for the app
The pymemcache integration should not use it.
"""<line_sep># Ensure that the service name was configured
<import_from_stmt>ddtrace config<assert_stmt>config.service<eq>"mysvc"<line_sep>client=self.make_client([b"STORED\r\n" b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])<line_sep>client.set(b"key" b"value" noreply=<false>)<line_sep>pin=Pin.get_from(pymemcache)<line_sep>tracer=pin.tracer<line_sep>spans=tracer.pop()<assert_stmt>spans[0].service<ne>"mysvc"<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_from_stmt>convlab2.util.multiwoz.paraphrase_span_detection phrase_idx_utt<def_stmt>paraphrase_span_detection new_text span_info<block_start>new_words=new_text.split()<line_sep>new_span_info=[]<for_stmt>span span_info<block_start>span_words=span[2].split()<line_sep>result=phrase_idx_utt(span_words new_words)<if_stmt>result<is><not><none><block_start>max_start,max_end=result<line_sep>new_span_info.append([span[0] span[1] ' '.join(new_words[max_start:max_end+1]) max_start max_end])<block_end><block_end><return>new_span_info<block_end><def_stmt>span2tuple span_info<block_start>t=[]<for_stmt>span span_info<block_start>t.append((span[0].split('-')[1] span[0].split('-')[0] span[1] span[2]))<block_end><return>t<block_end> |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""APIs to train an on-device recommendation model."""<import_stmt>collections<import_stmt>tempfile<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow_examples.lite.model_maker.core.api mm_export<import_from_stmt>tensorflow_examples.lite.model_maker.core.data_util data_util<import_from_stmt>tensorflow_examples.lite.model_maker.core.data_util recommendation_config<import_from_stmt>tensorflow_examples.lite.model_maker.core.export_format ExportFormat<import_from_stmt>tensorflow_examples.lite.model_maker.core.task custom_model<import_from_stmt>tensorflow_examples.lite.model_maker.core.task model_util<import_from_stmt>tensorflow_examples.lite.model_maker.core.task.model_spec recommendation_spec<import_from_stmt>tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model input_pipeline<import_from_stmt>tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model metrics<as>_metrics<import_from_stmt>tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model recommendation_model_launcher<as>_launcher<line_sep>@mm_export('recommendation.Recommendation')<class_stmt>Recommendation(custom_model.CustomModel)<block_start>"""Recommendation task class."""<line_sep>DEFAULT_EXPORT_FORMAT=(ExportFormat.TFLITE )<line_sep>ALLOWED_EXPORT_FORMAT=(ExportFormat.LABEL ExportFormat.TFLITE ExportFormat.SAVED_MODEL)<line_sep># ID = 0 means a placeholder to OOV. Used for padding.
OOV_ID=0<def_stmt>__init__ self model_spec model_dir shuffle=<true> learning_rate=0.1 gradient_clip_norm=1.0<block_start>"""Init recommendation model.
Args:
model_spec: recommendation model spec.
model_dir: str, path to export model checkpoints and summaries.
shuffle: boolean, whether the training data should be shuffled.
learning_rate: float, learning rate.
gradient_clip_norm: float, clip threshold (<= 0 meaning no clip).
"""<if_stmt><not>isinstance(model_spec recommendation_spec.RecommendationSpec)<block_start><raise>ValueError('Expect RecommendationSpec but got model_spec: {}'.format(model_spec))<block_end>self._model_dir=model_dir<line_sep>self._learning_rate=learning_rate<line_sep>self._gradient_clip_norm=gradient_clip_norm<line_sep>super(Recommendation self).__init__(model_spec shuffle=shuffle)<block_end>@property<def_stmt>input_spec self<arrow>recommendation_config.InputSpec<block_start><return>self.model_spec.input_spec<block_end>@property<def_stmt>model_hparams self<arrow>recommendation_config.ModelHParams<block_start><return>self.model_spec.model_hparams<block_end><def_stmt>create_model self do_train=<true><block_start>"""Creates a model.
Args:
do_train: boolean. Whether to train the model.
Returns:
Keras model.
"""<line_sep>self.model=self.model_spec.create_model()<if_stmt>do_train<block_start>_launcher.compile_model(self.model self.model_hparams.eval_top_k self._learning_rate self._gradient_clip_norm)<block_end><block_end><def_stmt>train self train_data validation_data=<none> batch_size=16 steps_per_epoch=100 epochs=1<block_start>"""Feeds the training data for training.
Args:
train_data: Training dataset.
validation_data: Validation data. If None, skips validation process.
batch_size: int, the batch size.
steps_per_epoch: int, the step of each epoch.
epochs: int, number of epochs.
Returns:
History from model.fit().
"""<line_sep>batch_size=batch_size<if>batch_size<else>self.model_spec.batch_size<line_sep>train_ds=train_data.gen_dataset(batch_size is_training=<true> shuffle=self.shuffle)<if_stmt>validation_data<block_start>validation_ds=validation_data.gen_dataset(batch_size is_training=<false>)<block_end><else_stmt><block_start>validation_ds=<none><block_end>self.create_model(do_train=<true>)<line_sep>history=self.model.fit(x=train_ds validation_data=validation_ds steps_per_epoch=steps_per_epoch epochs=epochs callbacks=self._keras_callbacks(self._model_dir))<line_sep>tf.get_logger().info(history)<line_sep><return>history<block_end><def_stmt>evaluate self data batch_size=10<block_start>"""Evaluate the model.
Args:
data: Evaluation data.
batch_size: int, batch size for evaluation.
Returns:
History from model.evaluate().
"""<line_sep>batch_size=batch_size<if>batch_size<else>self.model_spec.batch_size<line_sep>eval_ds=data.gen_dataset(batch_size is_training=<false>)<line_sep>history=self.model.evaluate(eval_ds)<line_sep>tf.get_logger().info(history)<line_sep><return>history<block_end><def_stmt>_keras_callbacks self model_dir<block_start>"""Returns a list of default keras callbacks for `model.fit`."""<line_sep><return>_launcher.get_callbacks(self.model model_dir)<block_end><def_stmt>_get_serve_fn self keras_model<block_start>"""Gets serve fn for exporting model."""<line_sep>input_specs=input_pipeline.get_serving_input_specs(self.input_spec)<line_sep><return>keras_model.serve.get_concrete_function(**input_specs)<block_end><def_stmt>_export_tflite self tflite_filepath<block_start>"""Exports tflite model."""<line_sep>serve_fn=self._get_serve_fn(self.model)<line_sep># Providing trackable objects is now recommended since it will make the
# concrete function conversion API be based on the new SavedModel importer,
# which will enable new TensorFlow Lite features including variable support,
# resources and variant tensor, and signature concept.
<if_stmt>float('.'.join(tf.__version__.split('.')[:2]))<ge>2.7<block_start>converter=tf.lite.TFLiteConverter.from_concrete_functions([serve_fn] self.model)<block_end><else_stmt><block_start>converter=tf.lite.TFLiteConverter.from_concrete_functions([serve_fn])<block_end>tflite_model=converter.convert()<with_stmt>tf.io.gfile.GFile(tflite_filepath 'wb')<as>f<block_start>f.write(tflite_model)<block_end><block_end><def_stmt>_export_saved_model self filepath<block_start>serve_fn=self._get_serve_fn(self.model)<line_sep>signatures={tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:serve_fn}<line_sep>tf.saved_model.save(self.model export_dir=filepath signatures=signatures)<block_end><def_stmt>evaluate_tflite self tflite_filepath data<block_start>"""Evaluates the tflite model.
The data is padded to required length, and multiple metrics are evaluated.
Args:
tflite_filepath: File path to the TFLite model.
data: Data to be evaluated.
Returns:
Dict of (metric, value), evaluation result of TFLite model.
"""<line_sep>label_name=self.input_spec.label_feature.feature_name<line_sep>lite_runner=model_util.get_lite_runner(tflite_filepath self.model_spec)<line_sep>ds=data.gen_dataset(batch_size=1 is_training=<false>)<line_sep>max_output_size=data.max_vocab_id+1# +1 because 0 is reserved for OOV.
eval_top_k=self.model_hparams.eval_top_k<line_sep>metrics=[_metrics.GlobalRecall(top_k=k name=f'Global_Recall/Recall_{k}')<for>k eval_top_k]<for_stmt>feature,y_true data_util.generate_elements(ds)<block_start>feature.pop(label_name)<line_sep>x=feature<line_sep>ids,scores=lite_runner.run(x)<line_sep># y_true: shape [1, 1]
# y_pred: shape [1, max_output_size]; fill only scores with top-k ids.
y_pred=np.zeros([1 max_output_size])<for_stmt>i,score zip(ids scores)<block_start><if_stmt>i<in>data.vocab# Only set if id is in vocab.
<block_start>y_pred[0 i]=score<block_end><block_end># Update metrics.
<for_stmt>m metrics<block_start>m.update_state(y_true y_pred)<block_end><block_end>result=collections.OrderedDict([(m.name m.result())<for>m metrics])<line_sep><return>result<block_end>@classmethod<def_stmt>create cls train_data model_spec:recommendation_spec.RecommendationSpec model_dir:str=<none> validation_data=<none> batch_size:int=16 steps_per_epoch:int=10000 epochs:int=1 learning_rate:float=0.1 gradient_clip_norm:float=1.0 shuffle:bool=<true> do_train:bool=<true><block_start>"""Loads data and train the model for recommendation.
Args:
train_data: Training data.
model_spec: ModelSpec, Specification for the model.
model_dir: str, path to export model checkpoints and summaries.
validation_data: Validation data.
batch_size: Batch size for training.
steps_per_epoch: int, Number of step per epoch.
epochs: int, Number of epochs for training.
learning_rate: float, learning rate.
gradient_clip_norm: float, clip threshold (<= 0 meaning no clip).
shuffle: boolean, whether the training data should be shuffled.
do_train: boolean, whether to run training.
Returns:
An instance based on Recommendation.
"""<line_sep># Use model_dir or a temp folder to store intermediate checkpoints, etc.
<if_stmt>model_dir<is><none><block_start>model_dir=tempfile.mkdtemp()<block_end>recommendation=cls(model_spec model_dir=model_dir shuffle=shuffle learning_rate=learning_rate gradient_clip_norm=gradient_clip_norm)<if_stmt>do_train<block_start>tf.compat.v1.logging.info('Training recommendation model...')<line_sep>recommendation.train(train_data validation_data batch_size=batch_size steps_per_epoch=steps_per_epoch epochs=epochs)<block_end><else_stmt><block_start>recommendation.create_model(do_train=<false>)<block_end><return>recommendation<block_end><block_end># Shortcut function.
create=Recommendation.create<line_sep>mm_export('recommendation.create').export_constant(__name__ 'create')<line_sep> |
"""user login types
Revision ID: 9d370f33f1a0
Revises: <KEY>
Create Date: 2020-11-30 12:58:31.046646
"""<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<import_from_stmt>sqlalchemy.dialects mysql<line_sep># revision identifiers, used by Alembic.
revision="9d370f33f1a0"<line_sep>down_revision="<KEY>"<line_sep>branch_labels=<none><line_sep>depends_on=<none><def_stmt>upgrade # ### commands auto generated by Alembic - please adjust! ###
<block_start>op.add_column("user" sa.Column("login_type" sa.Enum("LOCAL" "GOOGLE" "GITHUB" name="logintype") nullable=<true> ) )<line_sep># ### end Alembic commands ###
<block_end><def_stmt>downgrade # ### commands auto generated by Alembic - please adjust! ###
<block_start>op.drop_column("user" "login_type")<line_sep># ### end Alembic commands ###
<block_end> |
<import_stmt>decimal<import_stmt>inspect<import_from_stmt>datetime date datetime time<import_from_stmt>functools wraps<import_stmt>typing<import_stmt>typesystem<line_sep>FIELD_ALIASES:typing.Dict[typing.Type typesystem.Field]={int:typesystem.Integer float:typesystem.Float bool:typesystem.Boolean decimal.Decimal:typesystem.Decimal date:typesystem.Date time:typesystem.Time datetime:typesystem.DateTime }<class_stmt>PathConversionError(typesystem.ValidationError)<block_start><pass><block_end><class_stmt>Converter<block_start>__slots__=("func" "signature" "annotations" "required_params")<def_stmt>__init__ self func:typing.Callable<block_start>self.func=func<line_sep>self.signature=inspect.signature(self.func)<line_sep>self.annotations:typing.Dict[str typing.Type]={param.name:param.annotation<for>param self.signature.parameters.values()<if>param.annotation<is><not>inspect.Parameter.empty}<line_sep>self.required_params=set(param.name<for>param self.signature.parameters.values()<if>param.default<is>inspect.Parameter.empty)<block_end><def_stmt>convert self args:tuple kwargs:dict<arrow>typing.Tuple[tuple dict]<block_start>bound:inspect.BoundArguments=self.signature.bind(*args **kwargs)<line_sep>errors:typing.List[typesystem.ValidationError]=[]<for_stmt>param_name,value bound.arguments.items()<block_start><try_stmt><block_start>annotation=self.annotations[param_name]<block_end><except_stmt>KeyError<block_start><continue><block_end># Find the TypeSystem field for the parameter's annotation.
<if_stmt>isinstance(annotation typesystem.Field)<block_start>field=annotation<block_end><else_stmt><block_start><try_stmt><block_start>field=FIELD_ALIASES[annotation]()<block_end><except_stmt>KeyError<block_start><continue><block_end><block_end># Perform validation.
<try_stmt><block_start>value=field.validate(value)<block_end><except_stmt>typesystem.ValidationError<as>exc# NOTE: `add_prefix` sets the key of the error in the final
# error's dict representation.
<block_start>errors.extend(exc.messages(add_prefix=param_name))<block_end><else_stmt><block_start>bound.arguments[param_name]=value<block_end><block_end><if_stmt>errors<block_start><raise>PathConversionError(messages=errors)<block_end># NOTE: apply defaults last to prevent validating the default values.
# It's faster and less bug-prone.
bound.apply_defaults()<line_sep><return>bound.args bound.kwargs<block_end><block_end><class_stmt>ViewConverter(Converter)<block_start>__slots__=("query_parameters" )<def_stmt>__init__ self func:typing.Callable<block_start>super().__init__(func)<line_sep>self.query_parameters=set(param.name<for>param self.signature.parameters.values()<if>param.default<is><not>inspect.Parameter.empty)<block_end><def_stmt>get_query_params self args:tuple kwargs:dict<arrow>dict<block_start><raise>NotImplementedError<block_end><def_stmt>convert self args:tuple kwargs:dict<arrow>typing.Tuple[tuple dict]<block_start>query_params=self.get_query_params(args kwargs)<for_stmt>param_name self.query_parameters<block_start><if_stmt>param_name<in>query_params<block_start>kwargs[param_name]=query_params[param_name]<block_end><block_end><return>super().convert(args kwargs)<block_end><block_end><def_stmt>convert_arguments func:typing.Callable converter_class:typing.Type[Converter]<arrow>typing.Callable<block_start>converter=converter_class(func)<line_sep>@wraps(func)<async_keyword><def_stmt>converted *args **kwargs<block_start>args,kwargs=converter.convert(args kwargs)<line_sep><return><await>func(*args **kwargs)<block_end><return>converted<block_end> |
<import_stmt>numpy<as>np<def_stmt>compute_centroids X idx K# Useful values
<block_start>(m n)=X.shape<line_sep># You need to return the following variable correctly.
centroids=np.zeros((K n))<line_sep># ===================== Your Code Here =====================
# Instructions: Go over every centroid and compute mean of all points that
# belong to it. Concretely, the row vector centroids[i]
# should contain the mean of the data points assigned to
# centroid i.
#
# ==========================================================
<return>centroids<block_end> |
<def_stmt>_flattened_config_walk <block_start><import_stmt>os<import_stmt>os.path<as>path<for_stmt>dir_name,_,files os.walk('cypress/fixtures/atlas_scheduler/.foundations')<block_start><for_stmt>file_name files<block_start><if_stmt>file_name.endswith('.envsubst.yaml')<block_start><yield>path.join(dir_name file_name)<block_end><block_end><block_end><block_end><def_stmt>_config <block_start><import_stmt>os<import_stmt>sys<import_stmt>subprocess<for_stmt>required_env ['CYPRESS_LOCAL_FOUNDATIONS_HOME' 'CYPRESS_SCHEDULER_IP' 'CYPRESS_SCHEDULER_FOUNDATIONS_HOME' 'CYPRESS_SCHEDULER_REDIS_PORT' 'CYPRESS_GUI_HOST' 'CYPRESS_GUI_PORT']<block_start><if_stmt><not>os.environ.get(required_env <none>)<block_start>print(f'Environment variable {required_env} is not set.')<line_sep>sys.exit(1)<block_end><block_end><for_stmt>template_file_name _flattened_config_walk()<block_start>output_file_name=template_file_name[:-len('.envsubst.yaml')]+'.yaml'<line_sep>subprocess.run(f'envsubst < {template_file_name} > {output_file_name}' shell=<true>)<block_end><block_end>_config()<line_sep> |
<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_stmt>numpy.random<as>rng<import_stmt>pandas.io.data<as>web<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<def_stmt>get_prices symbol<block_start>start,end='2007-05-02' '2016-04-11'<line_sep>data=web.DataReader(symbol 'yahoo' start end)<line_sep>data=pd.DataFrame(data)<line_sep>prices=data['Adj Close']<line_sep>prices=prices.astype(float)<line_sep><return>prices<block_end><def_stmt>get_returns prices<block_start><return>((prices-prices.shift(-1))/prices)[:-1]<block_end><def_stmt>get_data list<block_start>l=[]<for_stmt>symbol list<block_start>rets=get_returns(get_prices(symbol))<line_sep>l.append(rets)<block_end><return>np.array(l).T<block_end>symbol_list=['C' 'GS']<line_sep>rets=get_data(symbol_list)<def_stmt>lstm_iterator raw_data num_steps data_size<block_start>batch_size=1<line_sep>raw_data=np.array(raw_data dtype=np.float32)<line_sep>data_len=len(raw_data)<line_sep>batch_len=data_len<floordiv>batch_size<line_sep>data=np.zeros([batch_size batch_len data_size] dtype=np.float32)<for_stmt>i range(batch_size)<block_start>data[i]=raw_data[batch_len<times>i:batch_len<times>(i+1) :]<block_end>epoch_size=(batch_len-1)<floordiv>num_steps<if_stmt>epoch_size<eq>0<block_start><raise>ValueError("epoch_size == 0, decrease batch_size or num_steps")<block_end><for_stmt>i range(epoch_size)<block_start>x=data[: i<times>num_steps:(i+1)<times>num_steps]<line_sep>y=data[: i<times>num_steps+1:(i+1)<times>num_steps+1]<line_sep><yield>(x[0] y[0])<block_end><block_end><class_stmt>LSTMModel(object)<block_start><def_stmt>__init__ self num_steps num_samples#, config):
<block_start>symbol_list=['C' 'GS']<line_sep>positions=tf.constant([-1 0 1])#long, neutral or short
num_positions=3<line_sep>num_symbols=len(symbol_list)<line_sep>self.num_samples=num_samples<line_sep>self.num_steps=num_steps<line_sep>#n_input = num_symbols * 100
hidden_size=21<line_sep>n_classes=num_positions<times>num_symbols<line_sep>#self.num_steps = tf.placeholder(tf.int64)
# define placeholders
self.inputs_=tf.placeholder(tf.float32 [<none> num_symbols])<line_sep>self.targets_=tf.placeholder(tf.float32 [<none> num_symbols])<line_sep>cell=tf.nn.rnn_cell.BasicLSTMCell(hidden_size forget_bias=0.0 state_is_tuple=<true>)<line_sep>#cell = tf.nn.rnn_cell.MultiRNNCell([cell] * config.num_layers)
cell=tf.nn.rnn_cell.InputProjectionWrapper(cell num_symbols)<line_sep>cell=tf.nn.rnn_cell.OutputProjectionWrapper(cell n_classes)<line_sep>outputs=[]<line_sep>self.initial_state=cell.zero_state(1 tf.float32)<line_sep>state=self.initial_state<line_sep>time_step=0<line_sep>'''with tf.variable_scope("RNN"):
def body(x):
inp = self.inputs_[time_step,:]
inp = tf.reshape(inp, [1,-1])
(cell_output, state) = cell(inp, state)
outputs.append(cell_output)
return
def condition(x):
return tf.reduce_sum(x) < 100
tf.while_loop(condition, body, [x])'''<with_stmt>tf.variable_scope("RNN")<block_start><for_stmt>time_step range(self.num_steps)#####num_steps???
<block_start><if_stmt>time_step<g>0<block_start>tf.get_variable_scope().reuse_variables()<block_end>inp=self.inputs_[time_step :]<line_sep>inp=tf.reshape(inp [1 -1])<line_sep>(cell_output state)=cell(inp state)<line_sep>outputs.append(cell_output)<block_end><block_end>#[6,]
self.final_state=state<line_sep>y=tf.reshape(tf.concat(1 outputs) [-1 n_classes])<line_sep># loop through symbol, taking the columns for each symbol's bucket together
pos={}<line_sep>sample_n={}<line_sep>sample_mask={}<line_sep>symbol_returns={}<line_sep>relevant_target_column={}<for_stmt>i range(num_symbols)# isolate the buckets relevant to the symbol and get a softmax as well
<block_start>symbol_probs=y[: i<times>num_positions:(i+1)<times>num_positions]<line_sep>symbol_probs_softmax=tf.nn.softmax(symbol_probs)# softmax[i, j] = exp(logits[i, j]) / sum(exp(logits[i]))
# sample probability to chose our policy's action
sample=tf.multinomial(tf.log(symbol_probs_softmax) num_samples)<for_stmt>sample_iter range(num_samples)<block_start>sample_n[i<times>num_samples+sample_iter]=sample[: sample_iter]<line_sep>pos[i<times>num_samples+sample_iter]=tf.reshape(sample_n[i<times>num_samples+sample_iter] [-1])-1<line_sep>symbol_returns[i<times>num_samples+sample_iter]=tf.mul(tf.cast(pos[i<times>num_samples+sample_iter] tf.float32) self.targets_[: i])<line_sep>sample_mask[i<times>num_samples+sample_iter]=tf.cast(tf.reshape(tf.one_hot(sample_n[i<times>num_samples+sample_iter] 3) [-1 3]) tf.float32)<line_sep>relevant_target_column[i<times>num_samples+sample_iter]=tf.reduce_sum(symbol_probs<times>sample_mask[i<times>num_samples+sample_iter] 1)<block_end><block_end>daily_returns_by_symbol_=tf.concat(1 [tf.reshape(t [-1 1])<for>t symbol_returns.values()])<line_sep>daily_returns_by_symbol=tf.transpose(tf.reshape(daily_returns_by_symbol_ [-1 2 num_samples]) [0 2 1])#[?,5,2]
daily_returns=tf.reduce_mean(daily_returns_by_symbol 2)# [?,5]
total_return=tf.reduce_prod(daily_returns+1 0)<line_sep>z=tf.ones_like(total_return)<times>-1<line_sep>self.total_return=total_return=tf.add(total_return z)<line_sep>ann_vol=tf.mul(tf.sqrt(tf.reduce_mean(tf.pow((daily_returns-tf.reduce_mean(daily_returns 0)) 2) 0)) np.sqrt(252))<line_sep>self.sharpe=tf.div(total_return ann_vol)<line_sep>#Maybe metric slicing later
#segment_ids = tf.ones_like(daily_returns[:,0])
#partial_prod = tf.segment_prod(daily_returns+1, segment_ids)
training_target_cols=tf.concat(1 [tf.reshape(t [-1 1])<for>t relevant_target_column.values()])<line_sep>ones=tf.ones_like(training_target_cols)<line_sep>gradient_=tf.nn.sigmoid_cross_entropy_with_logits(training_target_cols ones)<line_sep>gradient=tf.transpose(tf.reshape(gradient_ [-1 2 num_samples]) [0 2 1])#[?,5,2]
#cost = tf.mul(gradient , daily_returns_by_symbol_reshaped)
#cost = tf.mul(gradient , tf.expand_dims(daily_returns, -1))
cost=tf.mul(gradient tf.expand_dims(total_return -1))<line_sep>#cost = tf.mul(gradient , tf.expand_dims(sharpe, -1))
self.optimizer=tf.train.GradientDescentOptimizer(0.0001).minimize(cost)<line_sep>self.costfn=tf.reduce_mean(cost)<block_end><block_end><def_stmt>run_train_results m epoch<block_start>state=m.initial_state.eval()<line_sep>rs=rets[:-200]<line_sep>full_feed={m.inputs_:rs[:-1] m.targets_:rs[1:] m.initial_state:state}<line_sep>t,s,c=session.run([m.total_return m.sharpe m.costfn] feed_dict=full_feed)<line_sep>t=np.mean(t)<line_sep>s=np.mean(s)<line_sep>print("Epoch:" '%04d'%(epoch+1) "cost=" c "total return=" "{:.9f}".format(t) "sharpe=" "{:.9f}".format(s))<line_sep><return>t<block_end><def_stmt>run_test_results m epoch<block_start>state=m.initial_state.eval()<line_sep>rs=rets[-200:]<line_sep>full_feed={m.inputs_:rs[:-1] m.targets_:rs[1:] m.initial_state:state}<line_sep>t,s,c=session.run([m.total_return m.sharpe m.costfn] feed_dict=full_feed)<line_sep>t=np.mean(t)<line_sep>s=np.mean(s)<line_sep>print("Epoch:" '%04d'%(epoch+1) "cost=" c "total return=" "{:.9f}".format(t) "sharpe=" "{:.9f}".format(s))<line_sep><return>t<block_end><def_stmt>run_epoch m epoch<block_start>full_feed={m.inputs_:rets[:-1] m.targets_:rets[1:]}<line_sep>state=m.initial_state.eval()<for_stmt>step,(x y) enumerate(lstm_iterator(rets 20 2))#m.num_steps = len(x)
<block_start>feed_dict={m.inputs_:x m.targets_:y m.initial_state:state}<line_sep>_,state=session.run([m.optimizer m.final_state] feed_dict=feed_dict)<block_end><return><block_end>results=[]<line_sep>pos_results=[]<with_stmt>tf.Graph().as_default() tf.Session()<as>session<block_start><with_stmt>tf.variable_scope("model" reuse=<none>)#, initializer=init):
<block_start>m=LSTMModel(num_steps=20 num_samples=5)#, config=config)
<block_end><with_stmt>tf.variable_scope("model" reuse=<true>)#, initializer=init):
<block_start>mvalid=LSTMModel(num_steps=len(rets[:-200])-1 num_samples=1)#, config=config)
<block_end><with_stmt>tf.variable_scope("model" reuse=<true>)#, initializer=init):
<block_start>mtest=LSTMModel(num_steps=len(rets[-200:])-1 num_samples=1)<block_end>tf.initialize_all_variables().run()<for_stmt>epoch range(10)<block_start>run_epoch(m epoch)<line_sep>print('getting results...')<line_sep>trt=run_train_results(mvalid epoch)<block_end>print('trt')<line_sep>ttt=run_test_results(mtest epoch)<line_sep>print('test: ' ttt)<line_sep>results.append(ttt)<if_stmt>trt<g>0<block_start>pos_results.append(ttt)<block_end><block_end>print(np.mean(results))<line_sep>print(np.mean(pos_results))<line_sep> |
<import_from_stmt>indy_common.authorize.auth_constraints AuthConstraint IDENTITY_OWNER AuthConstraintForbidden<import_from_stmt>indy_common.constants ENDORSER<import_from_stmt>indy_common.test.auth.metadata.helper validate PLUGIN_FIELD Action<import_from_stmt>plenum.common.constants TRUSTEE<line_sep>MAX_SIG_COUNT=3<def_stmt>test_plugin_simple_rule_1_sig_owner_no_endorser write_auth_req_validator write_request_validation signatures is_owner amount<block_start>validate(auth_constraint=AuthConstraint(role=IDENTITY_OWNER sig_count=1 need_to_be_owner=<true> metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=IDENTITY_OWNER endorser=<none> sigs={IDENTITY_OWNER:s} is_owner=<true> amount=2 extra_sigs=<false>)<for>s range(1 MAX_SIG_COUNT+1)] author=IDENTITY_OWNER endorser=<none> all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_1_sig_owner_endorser write_auth_req_validator write_request_validation signatures is_owner amount<block_start>validate(auth_constraint=AuthConstraint(role=IDENTITY_OWNER sig_count=1 need_to_be_owner=<true> metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=IDENTITY_OWNER endorser=ENDORSER sigs={IDENTITY_OWNER:s1 ENDORSER:s2} is_owner=<true> amount=2 extra_sigs=<true>)<for>s1 range(1 MAX_SIG_COUNT+1)<for>s2 range(1 MAX_SIG_COUNT+1)] author=IDENTITY_OWNER endorser=ENDORSER all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_1_sig_endorser_no_endorser write_auth_req_validator write_request_validation signatures is_owner amount<block_start>validate(auth_constraint=AuthConstraint(role=ENDORSER sig_count=1 need_to_be_owner=<true> metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=ENDORSER endorser=<none> sigs={ENDORSER:s} is_owner=<true> amount=2 extra_sigs=<true>)<for>s range(1 MAX_SIG_COUNT+1)] author=ENDORSER endorser=<none> all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_1_sig_endorser_endorser write_auth_req_validator write_request_validation signatures is_owner amount<block_start>validate(auth_constraint=AuthConstraint(role=ENDORSER sig_count=1 need_to_be_owner=<true> metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=ENDORSER endorser=ENDORSER sigs={ENDORSER:s} is_owner=<true> amount=2 extra_sigs=<true>)<for>s range(1 MAX_SIG_COUNT+1)] author=ENDORSER endorser=ENDORSER all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_1_sig_trustee_no_endorser write_auth_req_validator write_request_validation signatures is_owner amount<block_start>validate(auth_constraint=AuthConstraint(role=ENDORSER sig_count=1 need_to_be_owner=<true> metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=TRUSTEE endorser=<none> sigs={ENDORSER:s1 TRUSTEE:s2} is_owner=<true> amount=2 extra_sigs=<true>)<for>s1 range(1 MAX_SIG_COUNT+1)<for>s2 range(1 MAX_SIG_COUNT+1)] author=TRUSTEE endorser=<none> all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_1_sig_trustee_endorser write_auth_req_validator write_request_validation signatures is_owner amount<block_start>validate(auth_constraint=AuthConstraint(role=ENDORSER sig_count=1 need_to_be_owner=<true> metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=TRUSTEE endorser=ENDORSER sigs={TRUSTEE:s2 ENDORSER:s3} is_owner=<true> amount=2 extra_sigs=<true>)<for>s1 range(1 MAX_SIG_COUNT+1)<for>s2 range(1 MAX_SIG_COUNT+1)<for>s3 range(1 MAX_SIG_COUNT+1)] author=TRUSTEE endorser=ENDORSER all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_1_sig_all_roles_owner_no_endorser write_auth_req_validator write_request_validation signatures is_owner off_ledger_signature amount<block_start>validate(auth_constraint=AuthConstraint(role='*' sig_count=1 need_to_be_owner=<true> off_ledger_signature=off_ledger_signature metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=IDENTITY_OWNER endorser=<none> sigs={IDENTITY_OWNER:s} is_owner=<true> amount=2 extra_sigs=<false>)<for>s range(1 MAX_SIG_COUNT+1)] author=IDENTITY_OWNER endorser=<none> all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_1_sig_all_roles_owner_endorser write_auth_req_validator write_request_validation signatures is_owner off_ledger_signature amount<block_start>validate(auth_constraint=AuthConstraint(role='*' sig_count=1 need_to_be_owner=<true> off_ledger_signature=off_ledger_signature metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=IDENTITY_OWNER endorser=ENDORSER sigs={IDENTITY_OWNER:s1 ENDORSER:s2} is_owner=<true> amount=2 extra_sigs=<true>)<for>s1 range(1 MAX_SIG_COUNT+1)<for>s2 range(1 MAX_SIG_COUNT+1)] author=IDENTITY_OWNER endorser=ENDORSER all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_1_sig_all_roles_trustee_no_endorser write_auth_req_validator write_request_validation signatures is_owner off_ledger_signature amount<block_start>validate(auth_constraint=AuthConstraint(role='*' sig_count=1 need_to_be_owner=<true> off_ledger_signature=off_ledger_signature metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=TRUSTEE endorser=<none> sigs={TRUSTEE:s1} is_owner=<true> amount=2 extra_sigs=<true>)<for>s1 range(1 MAX_SIG_COUNT+1)] author=TRUSTEE endorser=<none> all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_1_sig_all_roles_trustee_endorser write_auth_req_validator write_request_validation signatures is_owner off_ledger_signature amount<block_start>validate(auth_constraint=AuthConstraint(role='*' sig_count=1 need_to_be_owner=<true> off_ledger_signature=off_ledger_signature metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=TRUSTEE endorser=ENDORSER sigs={TRUSTEE:s1 ENDORSER:s2} is_owner=<true> amount=2 extra_sigs=<true>)<for>s1 range(1 MAX_SIG_COUNT+1)<for>s2 range(1 MAX_SIG_COUNT+1)] author=TRUSTEE endorser=ENDORSER all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_3_sig_trustee_no_endorser write_auth_req_validator write_request_validation signatures is_owner amount<block_start>validate(auth_constraint=AuthConstraint(role=TRUSTEE sig_count=3 need_to_be_owner=<false> metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=TRUSTEE endorser=<none> sigs={TRUSTEE:3} is_owner=owner amount=2 extra_sigs=<true>)<for>owner [<true> <false>]] author=TRUSTEE endorser=<none> all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_3_sig_trustee_endorser write_auth_req_validator write_request_validation signatures is_owner amount<block_start>validate(auth_constraint=AuthConstraint(role=TRUSTEE sig_count=3 need_to_be_owner=<false> metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=TRUSTEE endorser=ENDORSER sigs={TRUSTEE:3 ENDORSER:s1} is_owner=owner amount=2 extra_sigs=<true>)<for>s1 range(1 MAX_SIG_COUNT+1)<for>owner [<true> <false>]] author=TRUSTEE endorser=ENDORSER all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_3_sig_owner_no_endorser write_auth_req_validator write_request_validation signatures is_owner amount<block_start>validate(auth_constraint=AuthConstraint(role=TRUSTEE sig_count=3 need_to_be_owner=<false> metadata={PLUGIN_FIELD:2}) valid_actions=[] author=IDENTITY_OWNER endorser=<none> all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_3_sig_owner_endorser write_auth_req_validator write_request_validation signatures is_owner amount<block_start>validate(auth_constraint=AuthConstraint(role=TRUSTEE sig_count=3 need_to_be_owner=<false> metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=IDENTITY_OWNER endorser=ENDORSER sigs={TRUSTEE:3 IDENTITY_OWNER:s1 ENDORSER:s2} is_owner=owner amount=2 extra_sigs=<true>)<for>s1 range(1 MAX_SIG_COUNT+1)<for>s2 range(1 MAX_SIG_COUNT+1)<for>owner [<true> <false>]] author=IDENTITY_OWNER endorser=ENDORSER all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_0_sig_owner_no_endorser write_auth_req_validator write_request_validation signatures is_owner off_ledger_signature amount<block_start>validate(auth_constraint=AuthConstraint(role='*' sig_count=0 need_to_be_owner=<false> off_ledger_signature=off_ledger_signature metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=IDENTITY_OWNER endorser=<none> sigs={} is_owner=owner amount=2 extra_sigs=<false>)<for>owner [<true> <false>]]+[Action(author=IDENTITY_OWNER endorser=<none> sigs={IDENTITY_OWNER:s} is_owner=owner amount=2 extra_sigs=<false>)<for>owner [<true> <false>]<for>s range(1 MAX_SIG_COUNT+1)] author=IDENTITY_OWNER endorser=<none> all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_0_sig_owner_endorser write_auth_req_validator write_request_validation signatures is_owner off_ledger_signature amount<block_start>validate(auth_constraint=AuthConstraint(role='*' sig_count=0 need_to_be_owner=<false> off_ledger_signature=off_ledger_signature metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=IDENTITY_OWNER endorser=ENDORSER sigs={ENDORSER:s} is_owner=owner amount=2 extra_sigs=<true>)<for>s range(1 MAX_SIG_COUNT+1)<for>owner [<true> <false>]] author=IDENTITY_OWNER endorser=ENDORSER all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_0_sig_trustee_no_endorser write_auth_req_validator write_request_validation signatures is_owner off_ledger_signature amount<block_start>validate(auth_constraint=AuthConstraint(role='*' sig_count=0 need_to_be_owner=<false> off_ledger_signature=off_ledger_signature metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=TRUSTEE endorser=<none> sigs=signature is_owner=owner amount=2 extra_sigs=<true>)<for>signature signatures<for>owner [<true> <false>]] author=TRUSTEE endorser=<none> all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_0_sig_trustee_endorser write_auth_req_validator write_request_validation signatures is_owner off_ledger_signature amount<block_start>validate(auth_constraint=AuthConstraint(role='*' sig_count=0 need_to_be_owner=<false> off_ledger_signature=off_ledger_signature metadata={PLUGIN_FIELD:2}) valid_actions=[Action(author=TRUSTEE endorser=ENDORSER sigs={ENDORSER:s} is_owner=owner amount=2 extra_sigs=<true>)<for>s range(1 MAX_SIG_COUNT+1)<for>owner [<true> <false>]] author=TRUSTEE endorser=ENDORSER all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end><def_stmt>test_plugin_simple_rule_not_allowed write_auth_req_validator write_request_validation author endorser signatures is_owner amount<block_start>validate(auth_constraint=AuthConstraintForbidden() valid_actions=[] author=author endorser=endorser all_signatures=signatures is_owner=is_owner amount=amount write_auth_req_validator=write_auth_req_validator write_request_validation=write_request_validation)<block_end> |
<import_from_stmt>django.conf.urls.defaults *<line_sep>urlpatterns=patterns('' # Example:
# (r'^{{ project_name }}/', include('{{ project_name }}.foo.urls')),
# Uncomment this for admin:
# (r'^admin/', include('django.contrib.admin.urls')),
)<line_sep> |
# coding=utf-8
# Copyright 2021 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions for random feature Gaussian process layer.
## References:
[1]: Liu et al. Simple and principled uncertainty estimation with deterministic
deep learning via distance awareness. In _Neural Information Processing
Systems_, 2020.
https://arxiv.org/abs/2006.10108
[2]: Xu et al. Understanding and Improving Layer Normalization. In _Neural
Information Processing Systems_, 2019.
https://papers.nips.cc/paper/2019/file/2f4fe03d77724a7217006e5d16728874-Paper.pdf
[3]: <NAME> and <NAME>. Random Features for Large-Scale Kernel
Machines. In _Neural Information Processing Systems_, 2007.
https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf
[4]: <NAME>, <NAME>, <NAME>. Uncertainty Estimation with Infinitesimal
Jackknife. _arXiv preprint arXiv:2006.07584_, 2020.
https://arxiv.org/abs/2006.07584
"""<import_stmt>dataclasses<import_stmt>functools<import_from_stmt>typing Any Callable Iterable Mapping Optional Tuple Union<import_stmt>flax.linen<as>nn<import_stmt>jax<import_from_stmt>jax lax<import_from_stmt>jax random<import_stmt>jax.numpy<as>jnp<line_sep># Jax-related data types.
PRNGKey=Any<line_sep>Shape=Iterable[int]<line_sep>Dtype=type(jnp.float32)<line_sep>Array=jnp.ndarray<line_sep>Initializer=Callable[[PRNGKey Shape Dtype] Array]<line_sep># Default config for random features.
default_rbf_activation=jnp.cos<line_sep>default_rbf_bias_init=nn.initializers.uniform(scale=2.<times>jnp.pi)<line_sep># Using "he_normal" style random feature distribution. Effectively, this is
# equivalent to approximating a RBF kernel but with the input standardized by
# its dimensionality (i.e., input_scaled = input * sqrt(2. / dim_input)) and
# empirically leads to better performance for neural network inputs.
default_rbf_kernel_init=nn.initializers.variance_scaling(scale=2.0 mode='fan_in' distribution='normal')<line_sep># Default field value for kwargs, to be used for data class declaration.
default_kwarg_dict=<lambda>:dataclasses.field(default_factory=dict)<line_sep>SUPPORTED_LIKELIHOOD=('binary_logistic' 'poisson' 'gaussian')<line_sep>MIN_SCALE_MONTE_CARLO=1e-3<class_stmt>RandomFeatureGaussianProcess(nn.Module)<block_start>"""A Gaussian process layer using random Fourier features [1].
Attributes:
features: the number of output units.
hidden_features: the number of hidden random fourier features.
normalize_input: whether to normalize the input using nn.LayerNorm.
norm_kwargs: Optional keyword arguments to the input nn.LayerNorm layer.
hidden_kwargs: Optional keyword arguments to the random feature layer.
output_kwargs: Optional keyword arguments to the predictive logit layer.
covmat_kwargs: Optional keyword arguments to the predictive covmat layer.
"""<line_sep>features:int<line_sep>hidden_features:int=1024<line_sep>normalize_input:bool=<true><line_sep># Optional keyword arguments.
norm_kwargs:Mapping[str Any]=default_kwarg_dict()<line_sep>hidden_kwargs:Mapping[str Any]=default_kwarg_dict()<line_sep>output_kwargs:Mapping[str Any]=default_kwarg_dict()<line_sep>covmat_kwargs:Mapping[str Any]=default_kwarg_dict()<def_stmt>setup self<block_start>"""Defines model layers."""<line_sep># pylint:disable=invalid-name,not-a-mapping
<if_stmt>self.normalize_input# Prefer a parameter-free version of LayerNorm by default [2]. Can be
# overwritten by passing norm_kwargs=dict(use_bias=..., use_scales=...).
<block_start>LayerNorm=functools.partial(nn.LayerNorm use_bias=<false> use_scale=<false>)<line_sep>self.norm_layer=LayerNorm(**self.norm_kwargs)<block_end>self.hidden_layer=RandomFourierFeatures(features=self.hidden_features **self.hidden_kwargs)<line_sep>self.output_layer=nn.Dense(features=self.features **self.output_kwargs)<line_sep>self.covmat_layer=LaplaceRandomFeatureCovariance(hidden_features=self.hidden_features **self.covmat_kwargs)<line_sep># pylint:enable=invalid-name,not-a-mapping
<block_end><def_stmt>__call__ self inputs:Array return_full_covmat:bool=<false> return_random_features:bool=<false><arrow>Array<block_start>"""Computes Gaussian process outputs.
Args:
inputs: the nd-array of shape (batch_size, ..., input_dim).
return_full_covmat: whether to return the full covariance matrix, shape
(batch_size, batch_size), or only return the predictive variances with
shape (batch_size, ).
return_random_features: whether to return the random fourier features for
the inputs.
Returns:
A tuple of predictive logits, predictive covmat and (optionally)
random Fourier features.
"""<line_sep>gp_inputs=self.norm_layer(inputs)<if>self.normalize_input<else>inputs<line_sep>gp_features=self.hidden_layer(gp_inputs)<line_sep>gp_logits=self.output_layer(gp_features)<line_sep>gp_covmat=self.covmat_layer(gp_features gp_logits diagonal_only=<not>return_full_covmat)<line_sep># Returns predictive logits, covmat and (optionally) random features.
<if_stmt>return_random_features<block_start><return>gp_logits gp_covmat gp_features<block_end><return>gp_logits gp_covmat<block_end><block_end><class_stmt>RandomFourierFeatures(nn.Module)<block_start>"""A random fourier feature (RFF) layer that approximates a kernel model.
The random feature transformation is a one-hidden-layer network with
non-trainable weights (see, e.g., Algorithm 1 of [3]). Specifically:
f(x) = activation(x @ kernel + bias) * output_scale.
The forward pass logic closely follows that of the nn.Dense.
Attributes:
features: the number of output units.
feature_scalefeature_scale: scale to apply to the output
(default: sqrt(2. / features), see Algorithm 1 of [3]).
activation: activation function to apply to the output.
kernel_init: initializer function for the weight matrix.
bias_init: initializer function for the bias.
seed: random seed for generating random features (default: 0). This will
override the external RNGs.
dtype: the dtype of the computation (default: float32).
"""<line_sep>features:int<line_sep>feature_scale:Optional[jnp.float32]=1.<line_sep>activation:Callable[[Array] Array]=default_rbf_activation<line_sep>kernel_init:Initializer=default_rbf_kernel_init<line_sep>bias_init:Initializer=default_rbf_bias_init<line_sep>seed:int=0<line_sep>dtype:Dtype=jnp.float32<line_sep>collection_name:str='random_features'<def_stmt>setup self# Defines the random number generator.
<block_start>self.rng=random.PRNGKey(self.seed)<line_sep># Processes random feature scale.
self._feature_scale=self.feature_scale<if_stmt>self._feature_scale<is><none><block_start>self._feature_scale=jnp.sqrt(2./self.features)<block_end>self._feature_scale=jnp.asarray(self._feature_scale dtype=self.dtype)<block_end>@nn.compact<def_stmt>__call__ self inputs:Array<arrow>Array<block_start>"""Applies random feature transformation along the last dimension of inputs.
Args:
inputs: The nd-array to be transformed.
Returns:
The transformed input.
"""<line_sep># Initializes variables.
input_dim=inputs.shape[-1]<line_sep>kernel_rng,bias_rng=random.split(self.rng num=2)<line_sep>kernel_shape=(input_dim self.features)<line_sep>kernel=self.variable(self.collection_name 'kernel' self.kernel_init kernel_rng kernel_shape self.dtype)<line_sep>bias=self.variable(self.collection_name 'bias' self.bias_init bias_rng (self.features ) self.dtype)<line_sep># Specifies multiplication dimension.
contracting_dims=((inputs.ndim-1 ) (0 ))<line_sep>batch_dims=(() ())<line_sep># Performs forward pass.
inputs=jnp.asarray(inputs self.dtype)<line_sep>outputs=lax.dot_general(inputs kernel.value (contracting_dims batch_dims))<line_sep>outputs=outputs+jnp.broadcast_to(bias.value outputs.shape)<line_sep><return>self._feature_scale<times>self.activation(outputs)<block_end><block_end><class_stmt>LaplaceRandomFeatureCovariance(nn.Module)<block_start>"""Computes the Gaussian Process covariance using Laplace method.
Attributes:
hidden_features: the number of random fourier features.
ridge_penalty: Initial Ridge penalty to weight covariance matrix. This value
is used to stablize the eigenvalues of weight covariance estimate so that
the matrix inverse can be computed for Cov = inv(t(X) @ X + s * I). The
ridge factor s cannot be too large since otherwise it will dominate the
t(X) * X term and make covariance estimate not meaningful.
momentum: A discount factor used to compute the moving average for posterior
precision matrix. Analogous to the momentum factor in batch normalization.
If `None` then update covariance matrix using a naive sum without
momentum, which is desirable if the goal is to compute the exact
covariance matrix by passing through data once (say in the final epoch).
In this case, make sure to reset the precision matrix variable between
epochs by replacing it with self.initial_precision_matrix().
likelihood: The likelihood to use for computing Laplace approximation for
the covariance matrix. Can be one of ('binary_logistic', 'poisson',
'gaussian').
"""<line_sep>hidden_features:int<line_sep>ridge_penalty:float=1.<line_sep>momentum:Optional[float]=<none><line_sep>likelihood:str='gaussian'<line_sep>collection_name:str='laplace_covariance'<line_sep>dtype:Dtype=jnp.float32<def_stmt>setup self<block_start><if_stmt>self.momentum<is><not><none><block_start><if_stmt>self.momentum<l>0.<or>self.momentum<g>1.<block_start><raise>ValueError(f'`momentum` must be between (0, 1). '<concat>f'Got {self.momentum}.')<block_end><block_end><if_stmt>self.likelihood<not><in>SUPPORTED_LIKELIHOOD<block_start><raise>ValueError(f'"likelihood" must be one of {SUPPORTED_LIKELIHOOD}, '<concat>f'got {self.likelihood}.')<block_end><block_end>@nn.compact<def_stmt>__call__ self gp_features:Array gp_logits:Optional[Array]=<none> diagonal_only:bool=<true><arrow>Optional[Array]<block_start>"""Updates the precision matrix and computes the predictive covariance.
NOTE:
The precision matrix will be updated only during training (i.e., when
`self.collection_name` are in the list of mutable variables). The covariance
matrix will be computed only during inference to avoid repeated calls to the
(expensive) `linalg.inv` op.
Args:
gp_features: The nd-array of random fourier features, shape (batch_size,
..., hidden_features).
gp_logits: The nd-array of predictive logits, shape (batch_size, ...,
logit_dim). Cannot be None if self.likelihood is not `gaussian`.
diagonal_only: Whether to return only the diagonal elements of the
predictive covariance matrix (i.e., the predictive variance).
Returns:
The predictive variances of shape (batch_size, ) if diagonal_only=True,
otherwise the predictive covariance matrix of shape
(batch_size, batch_size).
"""<line_sep>gp_features=jnp.asarray(gp_features self.dtype)<line_sep># Flatten GP features and logits to 2-d, by doing so we treat all the
# non-final dimensions as the batch dimensions.
gp_features=jnp.reshape(gp_features [-1 self.hidden_features])<if_stmt>gp_logits<is><not><none><block_start>gp_logits=jnp.asarray(gp_logits self.dtype)<line_sep>gp_logits=jnp.reshape(gp_logits [gp_features.shape[0] -1])<block_end>precision_matrix=self.variable(self.collection_name 'precision_matrix' <lambda>:self.initial_precision_matrix())<line_sep># pylint: disable=unnecessary-lambda
# Updates the precision matrix during training.
initializing=self.is_mutable_collection('params')<line_sep>training=self.is_mutable_collection(self.collection_name)<if_stmt>training<and><not>initializing<block_start>precision_matrix.value=self.update_precision_matrix(gp_features gp_logits precision_matrix.value)<block_end># Computes covariance matrix during inference.
<if_stmt><not>training<block_start><return>self.compute_predictive_covariance(gp_features precision_matrix diagonal_only)<block_end><block_end><def_stmt>initial_precision_matrix self<block_start>"""Returns the initial diagonal precision matrix."""<line_sep><return>jnp.eye(self.hidden_features dtype=self.dtype)<times>self.ridge_penalty<block_end><def_stmt>update_precision_matrix self gp_features:Array gp_logits:Optional[Array] precision_matrix:Array<arrow>Array<block_start>"""Updates precision matrix given a new batch.
Args:
gp_features: random features from the new batch, shape (batch_size,
hidden_features)
gp_logits: predictive logits from the new batch, shape (batch_size,
logit_dim). Currently only logit_dim=1 is supported.
precision_matrix: the current precision matrix, shape (hidden_features,
hidden_features).
Returns:
Updated precision matrix, shape (hidden_features, hidden_features).
Raises:
(ValueError) If the logit is None or not univariate when likelihood is
not Gaussian.
"""<if_stmt>self.likelihood<ne>'gaussian'<block_start><if_stmt>gp_logits<is><none><block_start><raise>ValueError(f'`gp_logits` cannot be None when likelihood=`{self.likelihood}`')<block_end><if_stmt>gp_logits.ndim<g>1<and>gp_logits.shape[-1]<ne>1<block_start><raise>ValueError(f'likelihood `{self.likelihood}` only support univariate logits. '<concat>f'Got logits dimension: {gp_logits.shape[-1]}')<block_end><block_end># Computes precision matrix within new batch.
<if_stmt>self.likelihood<eq>'binary_logistic'<block_start>prob=nn.sigmoid(gp_logits)<line_sep>prob_multiplier=prob<times>(1.-prob)<block_end><elif_stmt>self.likelihood<eq>'poisson'<block_start>prob_multiplier=jnp.exp(gp_logits)<block_end><else_stmt><block_start>prob_multiplier=1.<block_end>gp_features_adj=jnp.sqrt(prob_multiplier)<times>gp_features<line_sep>batch_prec_mat=jnp.matmul(jnp.transpose(gp_features_adj) gp_features_adj)<line_sep># Updates precision matrix.
<if_stmt>self.momentum<is><none># Performs exact update without momentum.
<block_start>precision_matrix_updated=precision_matrix+batch_prec_mat<block_end><else_stmt><block_start>batch_size=gp_features.shape[0]<line_sep>precision_matrix_updated=(self.momentum<times>precision_matrix+(1-self.momentum)<times>batch_prec_mat/batch_size)<block_end><return>precision_matrix_updated<block_end><def_stmt>compute_predictive_covariance self gp_features:Array precision_matrix:nn.Variable diagonal_only:bool<arrow>Array<block_start>"""Computes the predictive covariance.
Approximates the Gaussian process posterior using random features.
Given training random feature Phi_tr (num_train, num_hidden) and testing
random feature Phi_ts (batch_size, num_hidden). The predictive covariance
matrix is computed as (assuming Gaussian likelihood):
s * Phi_ts @ inv(t(Phi_tr) * Phi_tr + s * I) @ t(Phi_ts),
where s is the ridge factor to be used for stablizing the inverse, and I is
the identity matrix with shape (num_hidden, num_hidden).
Args:
gp_features: the random feature of testing data to be used for computing
the covariance matrix. Shape (batch_size, gp_hidden_size).
precision_matrix: the model's precision matrix.
diagonal_only: whether to return only the diagonal elements of the
predictive covariance matrix (i.e., the predictive variances).
Returns:
The predictive variances of shape (batch_size, ) if diagonal_only=True,
otherwise the predictive covariance matrix of shape
(batch_size, batch_size).
"""<line_sep>precision_matrix_inv=jnp.linalg.inv(precision_matrix.value)<line_sep>cov_feature_product=jnp.matmul(precision_matrix_inv jnp.transpose(gp_features))<if_stmt>diagonal_only# Compute diagonal element only, shape (batch_size, ).
# Using the identity diag(A @ B) = col_sum(A * tr(B)).
<block_start>gp_covar=jnp.sum(gp_features<times>jnp.transpose(cov_feature_product) axis=-1)<block_end><else_stmt># Compute full covariance matrix, shape (batch_size, batch_size).
<block_start>gp_covar=jnp.matmul(gp_features cov_feature_product)<block_end><return>self.ridge_penalty<times>gp_covar<block_end><block_end><class_stmt>MCSigmoidDenseFASNGP(nn.Module)<block_start>"""Heteroscedastic SNGP for data with sigmoid output activation.
Output layer which combines the benefits of the heteroscedastic
(https://arxiv.org/abs/2105.10305) and SNGP (https://arxiv.org/abs/2006.10108)
methods. Assumes spectral normalization is applied to network producing
`inputs` to the __call__ method.
Attributes:
num_outputs: Number of outputs for classification task.
num_factors: Number of factors to use in approximation to full rank
covariance matrix.
temperature: The softmax temperature.
parameter_efficient: Whether to use the parameter efficient
version of the method. If True then samples from the latent distribution
are generated as: mu(x) + v(x) * matmul(V, eps_R) + diag(d(x), eps_K)),
where eps_R ~ N(0, I_R), eps_K ~ N(0, I_K). If False then latent samples
are generated as: mu(x) + matmul(V(x), eps_R) + diag(d(x), eps_K)).
Computing V(x) as function of x increases the number of parameters
introduced by the method.
train_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution during training.
test_mc_samples: The number of Monte-Carlo samples used to estimate the
predictive distribution during testing/inference.
share_samples_across_batch: If True, the latent noise samples
are shared across batch elements. If encountering XLA compilation errors
due to dynamic shape inference setting = True may solve.
logits_only: If True, only return the logits from the __call__ method.
return_locs: If True, return the location parameter of the Gaussian
latent variable in place of the `logits`.
eps: Clip probabilities into [eps, 1.0] before applying log.
het_var_weight: Weighting on the heteroscedastic variance when computing
samples from the Gaussian latent variable.
sngp_var_weight: Weighting on the GP variance when computing samples from
the Gaussian latent variable.
hidden_features: Number of features for Random Fourier Feature GP
approximation.
normalize_input: Whether to normalize the input for the GP layer.
norm_kwargs: Normalization keywords for the GP layer.
hidden_kwargs: Hidden layer keywords for the GP layer.
output_kwargs: Output keywords for the GP layer.
covmat_kwargs: Covariance matrix keywords for the GP layer.
"""<line_sep>num_outputs:int<line_sep>num_factors:int# set num_factors = 0 for diagonal method
temperature:float=1.0<line_sep>parameter_efficient:bool=<false><line_sep>train_mc_samples:int=1000<line_sep>test_mc_samples:int=1000<line_sep>share_samples_across_batch:bool=<false><line_sep>logits_only:bool=<false><line_sep>return_locs:bool=<false><line_sep>eps:float=1e-7<line_sep>het_var_weight:float=1.0<line_sep>sngp_var_weight:float=0.0<line_sep>hidden_features:int=1024<line_sep>normalize_input:bool=<true><line_sep># Optional keyword arguments.
norm_kwargs:Mapping[str Any]=default_kwarg_dict()<line_sep>hidden_kwargs:Mapping[str Any]=default_kwarg_dict()<line_sep>output_kwargs:Mapping[str Any]=default_kwarg_dict()<line_sep>covmat_kwargs:Mapping[str Any]=default_kwarg_dict()<def_stmt>setup self<block_start><if_stmt>self.parameter_efficient<block_start>self._scale_layer_homoscedastic=nn.Dense(self.num_outputs name='scale_layer_homoscedastic')<line_sep>self._scale_layer_heteroscedastic=nn.Dense(self.num_outputs name='scale_layer_heteroscedastic')<block_end><elif_stmt>self.num_factors<g>0<block_start>self._scale_layer=nn.Dense(self.num_outputs<times>self.num_factors name='scale_layer')<block_end>self._loc_layer=RandomFeatureGaussianProcess(features=self.num_outputs hidden_features=self.hidden_features normalize_input=self.normalize_input norm_kwargs=self.norm_kwargs hidden_kwargs=self.hidden_kwargs output_kwargs=self.output_kwargs covmat_kwargs=self.covmat_kwargs name='loc_layer')<line_sep>self._diag_layer=nn.Dense(self.num_outputs name='diag_layer')<block_end><def_stmt>_compute_loc_param self inputs:Array<arrow>Array<block_start>"""Computes location parameter of the "logits distribution".
Args:
inputs: The input to the heteroscedastic output layer.
Returns:
Array of shape [batch_size, num_classes].
"""<line_sep><return>self._loc_layer(inputs)<block_end><def_stmt>_compute_scale_param self inputs:Array covmat_sngp:Array training:int<arrow>Tuple[Array Array]<block_start>"""Computes scale parameter of the "logits distribution".
Args:
inputs: The input to the heteroscedastic output layer.
covmat_sngp: GP output layer covariance matrix.
training: in training mode or not.
Returns:
2-Tuple of Array of shape
([batch_size, num_classes * max(num_factors, 1)],
[batch_size, num_classes]).
"""<if_stmt>self.parameter_efficient<or>self.num_factors<le>0<block_start>low_rank=inputs<line_sep>diag=jax.nn.softplus(self._diag_layer(inputs))+MIN_SCALE_MONTE_CARLO<block_end><else_stmt><block_start>low_rank=self._scale_layer(inputs)<line_sep>diag=jax.nn.softplus(self._diag_layer(inputs))+MIN_SCALE_MONTE_CARLO<block_end>initializing=self.is_mutable_collection('params')<if_stmt>training<or>initializing<block_start>diag_comp=diag<block_end><else_stmt># assume diagonal_only=True
<block_start>sngp_marginal_vars=jnp.expand_dims(covmat_sngp -1)<line_sep>diag_comp=jnp.sqrt(self.het_var_weight<times>jnp.square(diag)+self.sngp_var_weight<times>sngp_marginal_vars)<block_end><return>low_rank diag_comp<block_end><def_stmt>_compute_diagonal_noise_samples self diag_scale:Array num_samples:int<arrow>Array<block_start>"""Computes samples of the diagonal elements logit noise.
Args:
diag_scale: Array of shape [batch_size, num_classes]. Diagonal
elements of scale parameters of the distribution to be sampled.
num_samples: Number of Monte-Carlo samples to take.
Returns:
Array. Logit noise samples of shape:
[batch_size, num_samples, num_outputs].
"""<if_stmt>self.share_samples_across_batch<block_start>samples_per_batch=1<block_end><else_stmt><block_start>samples_per_batch=diag_scale.shape[0]<block_end>key=self.make_rng('diag_noise_samples')<line_sep><return>jnp.expand_dims(diag_scale 1)<times>jax.random.normal(key shape=(samples_per_batch num_samples 1))<block_end><def_stmt>_compute_standard_normal_samples self factor_loadings:Array num_samples:int<arrow>Array<block_start>"""Utility that computes samples from a standard normal distribution.
Args:
factor_loadings: Array of shape
[batch_size, num_classes * num_factors]. Factor loadings for scale
parameters of the distribution to be sampled.
num_samples: Number of Monte-Carlo samples to take.
Returns:
Array. Samples of shape: [batch_size, num_samples, num_factors].
"""<if_stmt>self.share_samples_across_batch<block_start>samples_per_batch=1<block_end><else_stmt><block_start>samples_per_batch=factor_loadings.shape[0]<block_end>key=self.make_rng('standard_norm_noise_samples')<line_sep>standard_normal_samples=jax.random.normal(key shape=(samples_per_batch num_samples self.num_factors))<if_stmt>self.share_samples_across_batch<block_start>standard_normal_samples=jnp.tile(standard_normal_samples [factor_loadings.shape[0] 1 1])<block_end><return>standard_normal_samples<block_end><def_stmt>_compute_noise_samples self scale:Tuple[Array Array] num_samples:int<arrow>Array<block_start>"""Utility function that computes additive noise samples.
Args:
scale: Tuple of Array of shape (
[batch_size, num_classes * num_factors],
[batch_size, num_classes]). Factor loadings and diagonal elements
for scale parameters of the distribution to be sampled.
num_samples: Number of Monte-Carlo samples to take.
Returns:
Array. Logit noise samples of shape:
[batch_size, num_samples, num_outputs].
"""<line_sep>factor_loadings,diag_scale=scale<line_sep># Compute the diagonal noise
diag_noise_samples=self._compute_diagonal_noise_samples(diag_scale num_samples)<if_stmt>self.num_factors<g>0# Now compute the factors
<block_start>standard_normal_samples=self._compute_standard_normal_samples(factor_loadings num_samples)<if_stmt>self.parameter_efficient<block_start>res=self._scale_layer_homoscedastic(standard_normal_samples)<line_sep>res<augmul>jnp.expand_dims(self._scale_layer_heteroscedastic(factor_loadings) 1)<block_end><else_stmt># reshape scale vector into factor loadings matrix
<block_start>factor_loadings=jnp.reshape(factor_loadings [-1 self.num_outputs self.num_factors])<line_sep># transform standard normal into ~ full rank covariance Gaussian samples
res=jnp.einsum('ijk,iak->iaj' factor_loadings standard_normal_samples)<block_end><return>res+diag_noise_samples<block_end><return>diag_noise_samples<block_end><def_stmt>_compute_mc_samples self locs:Array scale:Array num_samples:int<arrow>Array<block_start>"""Utility function that computes Monte-Carlo samples (using sigmoid).
Args:
locs: Array of shape [batch_size, total_mc_samples, num_outputs].
Location parameters of the distributions to be sampled.
scale: Array of shape [batch_size, total_mc_samples, num_outputs].
Scale parameters of the distributions to be sampled.
num_samples: Number of Monte-Carlo samples to take.
Returns:
Array of shape [batch_size, num_samples, num_outputs]. Average over the
MC samples.
"""<line_sep>locs=jnp.expand_dims(locs axis=1)<line_sep>noise_samples=self._compute_noise_samples(scale num_samples)<line_sep>latents=locs+noise_samples<line_sep>samples=jax.nn.sigmoid(latents/self.temperature)<line_sep><return>jnp.mean(samples axis=1)<block_end>@nn.compact<def_stmt>__call__ self inputs:Array training:int=<true><arrow>Union[Tuple[Array Array] Tuple[Array Array Array Array]]<block_start>"""Computes predictive and log predictive distributions.
Uses Monte Carlo estimate of sigmoid approximation to HetSNGP model to
compute predictive distribution.
Args:
inputs: The input to the heteroscedastic output layer.
training: Whether we are training or not.
Returns:
Tuple of Array: (logits, covmat_sngp) if logits_only = True. Otherwise,
tuple of (logits, covmat_sngp, log_probs, probs). Logits
represents the argument to a sigmoid function that would yield probs
(logits = inverse_sigmoid(probs)), so logits can be used with the
sigmoid cross-entropy loss function.
"""<line_sep># return_random_features set to False, so guaranteed to return 2-tuple
locs,covmat_sngp=self._compute_loc_param(inputs)# pylint: disable=assignment-from-none,unbalanced-tuple-unpacking
# guaranteed to return 2-tuple due to scale_layer construction
scale=self._compute_scale_param(inputs covmat_sngp training)# pylint: disable=assignment-from-none
<if_stmt>training<block_start>total_mc_samples=self.train_mc_samples<block_end><else_stmt><block_start>total_mc_samples=self.test_mc_samples<block_end>probs_mean=self._compute_mc_samples(locs scale total_mc_samples)<line_sep>probs_mean=jnp.clip(probs_mean a_min=self.eps)<line_sep>log_probs=jnp.log(probs_mean)<line_sep># inverse sigmoid
probs_mean=jnp.clip(probs_mean a_min=self.eps a_max=1.0-self.eps)<line_sep>logits=log_probs-jnp.log(1.0-probs_mean)<if_stmt>self.return_locs<block_start>logits=locs<block_end><if_stmt>self.logits_only<block_start><return>logits covmat_sngp<block_end><return>logits covmat_sngp log_probs probs_mean<block_end><block_end> |
<import_from_stmt>xmlrpc client<line_sep># books data with search_read method
server_url='http://localhost:8069'<line_sep>db_name='book-db-14'<line_sep>username='admin'<line_sep>password='<PASSWORD>'<line_sep>common=client.ServerProxy('%s/xmlrpc/2/common'%server_url)<line_sep>user_id=common.authenticate(db_name username password {})<line_sep>models=client.ServerProxy('%s/xmlrpc/2/object'%server_url)<if_stmt>user_id<block_start>search_domain=['|' ['name' 'ilike' 'odoo'] ['name' 'ilike' 'sql']]<line_sep>books_ids=models.execute_kw(db_name user_id password 'library.book' 'search_read' [search_domain ['name' 'date_release']] {'limit':5})<line_sep>print('Books data:' books_ids)<block_end><else_stmt><block_start>print('Wrong credentials')<block_end> |
# -*- coding: utf-8 -*-
"""
Autograd
========
Autograd is now a core torch package for automatic differentiation.
It uses a tape based system for automatic differentiation.
In the forward phase, the autograd tape will remember all the operations
it executed, and in the backward phase, it will replay the operations.
Tensors that track history
--------------------------
In autograd, if any input ``Tensor`` of an operation has ``requires_grad=True``,
the computation will be tracked. After computing the backward pass, a gradient
w.r.t. this tensor is accumulated into ``.grad`` attribute.
There’s one more class which is very important for autograd
implementation - a ``Function``. ``Tensor`` and ``Function`` are
interconnected and build up an acyclic graph, that encodes a complete
history of computation. Each variable has a ``.grad_fn`` attribute that
references a function that has created a function (except for Tensors
created by the user - these have ``None`` as ``.grad_fn``).
If you want to compute the derivatives, you can call ``.backward()`` on
a ``Tensor``. If ``Tensor`` is a scalar (i.e. it holds a one element
tensor), you don’t need to specify any arguments to ``backward()``,
however if it has more elements, you need to specify a ``grad_output``
argument that is a tensor of matching shape.
"""<import_stmt>torch<line_sep>###############################################################
# Create a tensor and set requires_grad=True to track computation with it
x=torch.ones(2 2 requires_grad=<true>)<line_sep>print(x)<line_sep>###############################################################
#
print(x.data)<line_sep>###############################################################
#
print(x.grad)<line_sep>###############################################################
#
print(x.grad_fn)# we've created x ourselves
###############################################################
# Do an operation of x:
y=x+2<line_sep>print(y)<line_sep>###############################################################
# y was created as a result of an operation,
# so it has a grad_fn
print(y.grad_fn)<line_sep>###############################################################
# More operations on y:
z=y<times>y<times>3<line_sep>out=z.mean()<line_sep>print(z out)<line_sep>################################################################
# ``.requires_grad_( ... )`` changes an existing Tensor's ``requires_grad``
# flag in-place. The input flag defaults to ``True`` if not given.
a=torch.randn(2 2)<line_sep>a=((a<times>3)/(a-1))<line_sep>print(a.requires_grad)<line_sep>a.requires_grad_(<true>)<line_sep>print(a.requires_grad)<line_sep>b=(a<times>a).sum()<line_sep>print(b.grad_fn)<line_sep>###############################################################
# Gradients
# ---------
#
# let's backprop now and print gradients d(out)/dx
out.backward()<line_sep>print(x.grad)<line_sep>###############################################################
# By default, gradient computation flushes all the internal buffers
# contained in the graph, so if you even want to do the backward on some
# part of the graph twice, you need to pass in ``retain_variables = True``
# during the first pass.
x=torch.ones(2 2 requires_grad=<true>)<line_sep>y=x+2<line_sep>y.backward(torch.ones(2 2) retain_graph=<true>)<line_sep># the retain_variables flag will prevent the internal buffers from being freed
print(x.grad)<line_sep>###############################################################
#
z=y<times>y<line_sep>print(z)<line_sep>###############################################################
#
# just backprop random gradients
gradient=torch.randn(2 2)<line_sep># this would fail if we didn't specify
# that we want to retain variables
y.backward(gradient)<line_sep>print(x.grad)<line_sep>###############################################################
# You can also stop autograd from tracking history on Tensors
# with requires_grad=True by wrapping the code block in
# ``with torch.no_grad():``
print(x.requires_grad)<line_sep>print((x<power>2).requires_grad)<with_stmt>torch.no_grad()<block_start>print((x<power>2).requires_grad)<block_end> |
#
# Autogenerated by Thrift Compiler (0.9.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
<import_from_stmt>thrift.Thrift TType TMessageType TException TApplicationException<import_from_stmt>ttypes *<import_from_stmt>thrift.Thrift TProcessor<import_from_stmt>thrift.transport TTransport<import_from_stmt>thrift.protocol TBinaryProtocol TProtocol<try_stmt><block_start><import_from_stmt>thrift.protocol fastbinary<block_end><except_stmt><block_start>fastbinary=<none><block_end><class_stmt>Iface<block_start><def_stmt>exists self table get<block_start>"""
Test for the existence of columns in the table, as specified in the TGet.
@return true if the specified TGet matches one or more keys, false if not
Parameters:
- table: the table to check on
- get: the TGet to check for
"""<line_sep><pass><block_end><def_stmt>get self table get<block_start>"""
Method for getting data from a row.
If the row cannot be found an empty Result is returned.
This can be checked by the empty field of the TResult
@return the result
Parameters:
- table: the table to get from
- get: the TGet to fetch
"""<line_sep><pass><block_end><def_stmt>getMultiple self table gets<block_start>"""
Method for getting multiple rows.
If a row cannot be found there will be a null
value in the result list for that TGet at the
same position.
So the Results are in the same order as the TGets.
Parameters:
- table: the table to get from
- gets: a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error
"""<line_sep><pass><block_end><def_stmt>put self table put<block_start>"""
Commit a TPut to a table.
Parameters:
- table: the table to put data in
- put: the TPut to put
"""<line_sep><pass><block_end><def_stmt>checkAndPut self table row family qualifier value put<block_start>"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the TPut.
@return true if the new put was executed, false otherwise
Parameters:
- table: to check in and put to
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- put: the TPut to put if the check succeeds
"""<line_sep><pass><block_end><def_stmt>putMultiple self table puts<block_start>"""
Commit a List of Puts to the table.
Parameters:
- table: the table to put data in
- puts: a list of TPuts to commit
"""<line_sep><pass><block_end><def_stmt>deleteSingle self table deleteSingle<block_start>"""
Deletes as specified by the TDelete.
Note: "delete" is a reserved keyword and cannot be used in Thrift
thus the inconsistent naming scheme from the other functions.
Parameters:
- table: the table to delete from
- deleteSingle: the TDelete to delete
"""<line_sep><pass><block_end><def_stmt>deleteMultiple self table deletes<block_start>"""
Bulk commit a List of TDeletes to the table.
Throws a TIOError if any of the deletes fail.
Always returns an empty list for backwards compatibility.
Parameters:
- table: the table to delete from
- deletes: list of TDeletes to delete
"""<line_sep><pass><block_end><def_stmt>checkAndDelete self table row family qualifier value deleteSingle<block_start>"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the delete.
@return true if the new delete was executed, false otherwise
Parameters:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- deleteSingle: the TDelete to execute if the check succeeds
"""<line_sep><pass><block_end><def_stmt>increment self table increment<block_start>"""
Parameters:
- table: the table to increment the value on
- increment: the TIncrement to increment
"""<line_sep><pass><block_end><def_stmt>openScanner self table scan<block_start>"""
Get a Scanner for the provided TScan object.
@return Scanner Id to be used with other scanner procedures
Parameters:
- table: the table to get the Scanner for
- scan: the scan object to get a Scanner for
"""<line_sep><pass><block_end><def_stmt>getScannerRows self scannerId numRows<block_start>"""
Grabs multiple rows from a Scanner.
@return Between zero and numRows TResults
Parameters:
- scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
- numRows: number of rows to return
"""<line_sep><pass><block_end><def_stmt>closeScanner self scannerId<block_start>"""
Closes the scanner. Should be called if you need to close
the Scanner before all results are read.
Exhausted scanners are closed automatically.
Parameters:
- scannerId: the Id of the Scanner to close *
"""<line_sep><pass><block_end><block_end><class_stmt>Client(Iface)<block_start><def_stmt>__init__ self iprot oprot=<none><block_start>self._iprot=self._oprot=iprot<if_stmt>oprot<is><not><none><block_start>self._oprot=oprot<block_end>self._seqid=0<block_end><def_stmt>exists self table get<block_start>"""
Test for the existence of columns in the table, as specified in the TGet.
@return true if the specified TGet matches one or more keys, false if not
Parameters:
- table: the table to check on
- get: the TGet to check for
"""<line_sep>self.send_exists(table get)<line_sep><return>self.recv_exists()<block_end><def_stmt>send_exists self table get<block_start>self._oprot.writeMessageBegin('exists' TMessageType.CALL self._seqid)<line_sep>args=exists_args()<line_sep>args.table=table<line_sep>args.get=get<line_sep>args.write(self._oprot)<line_sep>self._oprot.writeMessageEnd()<line_sep>self._oprot.trans.flush()<block_end><def_stmt>recv_exists self <block_start>(fname mtype rseqid)=self._iprot.readMessageBegin()<if_stmt>mtype<eq>TMessageType.EXCEPTION<block_start>x=TApplicationException()<line_sep>x.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<line_sep><raise>x<block_end>result=exists_result()<line_sep>result.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<if_stmt>result.success<is><not><none><block_start><return>result.success<block_end><if_stmt>result.io<is><not><none><block_start><raise>result.io<block_end><raise>TApplicationException(TApplicationException.MISSING_RESULT "exists failed: unknown result")<line_sep><block_end><def_stmt>get self table get<block_start>"""
Method for getting data from a row.
If the row cannot be found an empty Result is returned.
This can be checked by the empty field of the TResult
@return the result
Parameters:
- table: the table to get from
- get: the TGet to fetch
"""<line_sep>self.send_get(table get)<line_sep><return>self.recv_get()<block_end><def_stmt>send_get self table get<block_start>self._oprot.writeMessageBegin('get' TMessageType.CALL self._seqid)<line_sep>args=get_args()<line_sep>args.table=table<line_sep>args.get=get<line_sep>args.write(self._oprot)<line_sep>self._oprot.writeMessageEnd()<line_sep>self._oprot.trans.flush()<block_end><def_stmt>recv_get self <block_start>(fname mtype rseqid)=self._iprot.readMessageBegin()<if_stmt>mtype<eq>TMessageType.EXCEPTION<block_start>x=TApplicationException()<line_sep>x.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<line_sep><raise>x<block_end>result=get_result()<line_sep>result.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<if_stmt>result.success<is><not><none><block_start><return>result.success<block_end><if_stmt>result.io<is><not><none><block_start><raise>result.io<block_end><raise>TApplicationException(TApplicationException.MISSING_RESULT "get failed: unknown result")<line_sep><block_end><def_stmt>getMultiple self table gets<block_start>"""
Method for getting multiple rows.
If a row cannot be found there will be a null
value in the result list for that TGet at the
same position.
So the Results are in the same order as the TGets.
Parameters:
- table: the table to get from
- gets: a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error
"""<line_sep>self.send_getMultiple(table gets)<line_sep><return>self.recv_getMultiple()<block_end><def_stmt>send_getMultiple self table gets<block_start>self._oprot.writeMessageBegin('getMultiple' TMessageType.CALL self._seqid)<line_sep>args=getMultiple_args()<line_sep>args.table=table<line_sep>args.gets=gets<line_sep>args.write(self._oprot)<line_sep>self._oprot.writeMessageEnd()<line_sep>self._oprot.trans.flush()<block_end><def_stmt>recv_getMultiple self <block_start>(fname mtype rseqid)=self._iprot.readMessageBegin()<if_stmt>mtype<eq>TMessageType.EXCEPTION<block_start>x=TApplicationException()<line_sep>x.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<line_sep><raise>x<block_end>result=getMultiple_result()<line_sep>result.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<if_stmt>result.success<is><not><none><block_start><return>result.success<block_end><if_stmt>result.io<is><not><none><block_start><raise>result.io<block_end><raise>TApplicationException(TApplicationException.MISSING_RESULT "getMultiple failed: unknown result")<line_sep><block_end><def_stmt>put self table put<block_start>"""
Commit a TPut to a table.
Parameters:
- table: the table to put data in
- put: the TPut to put
"""<line_sep>self.send_put(table put)<line_sep>self.recv_put()<block_end><def_stmt>send_put self table put<block_start>self._oprot.writeMessageBegin('put' TMessageType.CALL self._seqid)<line_sep>args=put_args()<line_sep>args.table=table<line_sep>args.put=put<line_sep>args.write(self._oprot)<line_sep>self._oprot.writeMessageEnd()<line_sep>self._oprot.trans.flush()<block_end><def_stmt>recv_put self <block_start>(fname mtype rseqid)=self._iprot.readMessageBegin()<if_stmt>mtype<eq>TMessageType.EXCEPTION<block_start>x=TApplicationException()<line_sep>x.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<line_sep><raise>x<block_end>result=put_result()<line_sep>result.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<if_stmt>result.io<is><not><none><block_start><raise>result.io<block_end><return><block_end><def_stmt>checkAndPut self table row family qualifier value put<block_start>"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the TPut.
@return true if the new put was executed, false otherwise
Parameters:
- table: to check in and put to
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- put: the TPut to put if the check succeeds
"""<line_sep>self.send_checkAndPut(table row family qualifier value put)<line_sep><return>self.recv_checkAndPut()<block_end><def_stmt>send_checkAndPut self table row family qualifier value put<block_start>self._oprot.writeMessageBegin('checkAndPut' TMessageType.CALL self._seqid)<line_sep>args=checkAndPut_args()<line_sep>args.table=table<line_sep>args.row=row<line_sep>args.family=family<line_sep>args.qualifier=qualifier<line_sep>args.value=value<line_sep>args.put=put<line_sep>args.write(self._oprot)<line_sep>self._oprot.writeMessageEnd()<line_sep>self._oprot.trans.flush()<block_end><def_stmt>recv_checkAndPut self <block_start>(fname mtype rseqid)=self._iprot.readMessageBegin()<if_stmt>mtype<eq>TMessageType.EXCEPTION<block_start>x=TApplicationException()<line_sep>x.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<line_sep><raise>x<block_end>result=checkAndPut_result()<line_sep>result.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<if_stmt>result.success<is><not><none><block_start><return>result.success<block_end><if_stmt>result.io<is><not><none><block_start><raise>result.io<block_end><raise>TApplicationException(TApplicationException.MISSING_RESULT "checkAndPut failed: unknown result")<line_sep><block_end><def_stmt>putMultiple self table puts<block_start>"""
Commit a List of Puts to the table.
Parameters:
- table: the table to put data in
- puts: a list of TPuts to commit
"""<line_sep>self.send_putMultiple(table puts)<line_sep>self.recv_putMultiple()<block_end><def_stmt>send_putMultiple self table puts<block_start>self._oprot.writeMessageBegin('putMultiple' TMessageType.CALL self._seqid)<line_sep>args=putMultiple_args()<line_sep>args.table=table<line_sep>args.puts=puts<line_sep>args.write(self._oprot)<line_sep>self._oprot.writeMessageEnd()<line_sep>self._oprot.trans.flush()<block_end><def_stmt>recv_putMultiple self <block_start>(fname mtype rseqid)=self._iprot.readMessageBegin()<if_stmt>mtype<eq>TMessageType.EXCEPTION<block_start>x=TApplicationException()<line_sep>x.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<line_sep><raise>x<block_end>result=putMultiple_result()<line_sep>result.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<if_stmt>result.io<is><not><none><block_start><raise>result.io<block_end><return><block_end><def_stmt>deleteSingle self table deleteSingle<block_start>"""
Deletes as specified by the TDelete.
Note: "delete" is a reserved keyword and cannot be used in Thrift
thus the inconsistent naming scheme from the other functions.
Parameters:
- table: the table to delete from
- deleteSingle: the TDelete to delete
"""<line_sep>self.send_deleteSingle(table deleteSingle)<line_sep>self.recv_deleteSingle()<block_end><def_stmt>send_deleteSingle self table deleteSingle<block_start>self._oprot.writeMessageBegin('deleteSingle' TMessageType.CALL self._seqid)<line_sep>args=deleteSingle_args()<line_sep>args.table=table<line_sep>args.deleteSingle=deleteSingle<line_sep>args.write(self._oprot)<line_sep>self._oprot.writeMessageEnd()<line_sep>self._oprot.trans.flush()<block_end><def_stmt>recv_deleteSingle self <block_start>(fname mtype rseqid)=self._iprot.readMessageBegin()<if_stmt>mtype<eq>TMessageType.EXCEPTION<block_start>x=TApplicationException()<line_sep>x.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<line_sep><raise>x<block_end>result=deleteSingle_result()<line_sep>result.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<if_stmt>result.io<is><not><none><block_start><raise>result.io<block_end><return><block_end><def_stmt>deleteMultiple self table deletes<block_start>"""
Bulk commit a List of TDeletes to the table.
Throws a TIOError if any of the deletes fail.
Always returns an empty list for backwards compatibility.
Parameters:
- table: the table to delete from
- deletes: list of TDeletes to delete
"""<line_sep>self.send_deleteMultiple(table deletes)<line_sep><return>self.recv_deleteMultiple()<block_end><def_stmt>send_deleteMultiple self table deletes<block_start>self._oprot.writeMessageBegin('deleteMultiple' TMessageType.CALL self._seqid)<line_sep>args=deleteMultiple_args()<line_sep>args.table=table<line_sep>args.deletes=deletes<line_sep>args.write(self._oprot)<line_sep>self._oprot.writeMessageEnd()<line_sep>self._oprot.trans.flush()<block_end><def_stmt>recv_deleteMultiple self <block_start>(fname mtype rseqid)=self._iprot.readMessageBegin()<if_stmt>mtype<eq>TMessageType.EXCEPTION<block_start>x=TApplicationException()<line_sep>x.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<line_sep><raise>x<block_end>result=deleteMultiple_result()<line_sep>result.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<if_stmt>result.success<is><not><none><block_start><return>result.success<block_end><if_stmt>result.io<is><not><none><block_start><raise>result.io<block_end><raise>TApplicationException(TApplicationException.MISSING_RESULT "deleteMultiple failed: unknown result")<line_sep><block_end><def_stmt>checkAndDelete self table row family qualifier value deleteSingle<block_start>"""
Atomically checks if a row/family/qualifier value matches the expected
value. If it does, it adds the delete.
@return true if the new delete was executed, false otherwise
Parameters:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- deleteSingle: the TDelete to execute if the check succeeds
"""<line_sep>self.send_checkAndDelete(table row family qualifier value deleteSingle)<line_sep><return>self.recv_checkAndDelete()<block_end><def_stmt>send_checkAndDelete self table row family qualifier value deleteSingle<block_start>self._oprot.writeMessageBegin('checkAndDelete' TMessageType.CALL self._seqid)<line_sep>args=checkAndDelete_args()<line_sep>args.table=table<line_sep>args.row=row<line_sep>args.family=family<line_sep>args.qualifier=qualifier<line_sep>args.value=value<line_sep>args.deleteSingle=deleteSingle<line_sep>args.write(self._oprot)<line_sep>self._oprot.writeMessageEnd()<line_sep>self._oprot.trans.flush()<block_end><def_stmt>recv_checkAndDelete self <block_start>(fname mtype rseqid)=self._iprot.readMessageBegin()<if_stmt>mtype<eq>TMessageType.EXCEPTION<block_start>x=TApplicationException()<line_sep>x.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<line_sep><raise>x<block_end>result=checkAndDelete_result()<line_sep>result.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<if_stmt>result.success<is><not><none><block_start><return>result.success<block_end><if_stmt>result.io<is><not><none><block_start><raise>result.io<block_end><raise>TApplicationException(TApplicationException.MISSING_RESULT "checkAndDelete failed: unknown result")<line_sep><block_end><def_stmt>increment self table increment<block_start>"""
Parameters:
- table: the table to increment the value on
- increment: the TIncrement to increment
"""<line_sep>self.send_increment(table increment)<line_sep><return>self.recv_increment()<block_end><def_stmt>send_increment self table increment<block_start>self._oprot.writeMessageBegin('increment' TMessageType.CALL self._seqid)<line_sep>args=increment_args()<line_sep>args.table=table<line_sep>args.increment=increment<line_sep>args.write(self._oprot)<line_sep>self._oprot.writeMessageEnd()<line_sep>self._oprot.trans.flush()<block_end><def_stmt>recv_increment self <block_start>(fname mtype rseqid)=self._iprot.readMessageBegin()<if_stmt>mtype<eq>TMessageType.EXCEPTION<block_start>x=TApplicationException()<line_sep>x.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<line_sep><raise>x<block_end>result=increment_result()<line_sep>result.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<if_stmt>result.success<is><not><none><block_start><return>result.success<block_end><if_stmt>result.io<is><not><none><block_start><raise>result.io<block_end><raise>TApplicationException(TApplicationException.MISSING_RESULT "increment failed: unknown result")<line_sep><block_end><def_stmt>openScanner self table scan<block_start>"""
Get a Scanner for the provided TScan object.
@return Scanner Id to be used with other scanner procedures
Parameters:
- table: the table to get the Scanner for
- scan: the scan object to get a Scanner for
"""<line_sep>self.send_openScanner(table scan)<line_sep><return>self.recv_openScanner()<block_end><def_stmt>send_openScanner self table scan<block_start>self._oprot.writeMessageBegin('openScanner' TMessageType.CALL self._seqid)<line_sep>args=openScanner_args()<line_sep>args.table=table<line_sep>args.scan=scan<line_sep>args.write(self._oprot)<line_sep>self._oprot.writeMessageEnd()<line_sep>self._oprot.trans.flush()<block_end><def_stmt>recv_openScanner self <block_start>(fname mtype rseqid)=self._iprot.readMessageBegin()<if_stmt>mtype<eq>TMessageType.EXCEPTION<block_start>x=TApplicationException()<line_sep>x.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<line_sep><raise>x<block_end>result=openScanner_result()<line_sep>result.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<if_stmt>result.success<is><not><none><block_start><return>result.success<block_end><if_stmt>result.io<is><not><none><block_start><raise>result.io<block_end><raise>TApplicationException(TApplicationException.MISSING_RESULT "openScanner failed: unknown result")<line_sep><block_end><def_stmt>getScannerRows self scannerId numRows<block_start>"""
Grabs multiple rows from a Scanner.
@return Between zero and numRows TResults
Parameters:
- scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
- numRows: number of rows to return
"""<line_sep>self.send_getScannerRows(scannerId numRows)<line_sep><return>self.recv_getScannerRows()<block_end><def_stmt>send_getScannerRows self scannerId numRows<block_start>self._oprot.writeMessageBegin('getScannerRows' TMessageType.CALL self._seqid)<line_sep>args=getScannerRows_args()<line_sep>args.scannerId=scannerId<line_sep>args.numRows=numRows<line_sep>args.write(self._oprot)<line_sep>self._oprot.writeMessageEnd()<line_sep>self._oprot.trans.flush()<block_end><def_stmt>recv_getScannerRows self <block_start>(fname mtype rseqid)=self._iprot.readMessageBegin()<if_stmt>mtype<eq>TMessageType.EXCEPTION<block_start>x=TApplicationException()<line_sep>x.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<line_sep><raise>x<block_end>result=getScannerRows_result()<line_sep>result.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<if_stmt>result.success<is><not><none><block_start><return>result.success<block_end><if_stmt>result.io<is><not><none><block_start><raise>result.io<block_end><if_stmt>result.ia<is><not><none><block_start><raise>result.ia<block_end><raise>TApplicationException(TApplicationException.MISSING_RESULT "getScannerRows failed: unknown result")<line_sep><block_end><def_stmt>closeScanner self scannerId<block_start>"""
Closes the scanner. Should be called if you need to close
the Scanner before all results are read.
Exhausted scanners are closed automatically.
Parameters:
- scannerId: the Id of the Scanner to close *
"""<line_sep>self.send_closeScanner(scannerId)<line_sep>self.recv_closeScanner()<block_end><def_stmt>send_closeScanner self scannerId<block_start>self._oprot.writeMessageBegin('closeScanner' TMessageType.CALL self._seqid)<line_sep>args=closeScanner_args()<line_sep>args.scannerId=scannerId<line_sep>args.write(self._oprot)<line_sep>self._oprot.writeMessageEnd()<line_sep>self._oprot.trans.flush()<block_end><def_stmt>recv_closeScanner self <block_start>(fname mtype rseqid)=self._iprot.readMessageBegin()<if_stmt>mtype<eq>TMessageType.EXCEPTION<block_start>x=TApplicationException()<line_sep>x.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<line_sep><raise>x<block_end>result=closeScanner_result()<line_sep>result.read(self._iprot)<line_sep>self._iprot.readMessageEnd()<if_stmt>result.io<is><not><none><block_start><raise>result.io<block_end><if_stmt>result.ia<is><not><none><block_start><raise>result.ia<block_end><return><block_end><block_end><class_stmt>Processor(Iface TProcessor)<block_start><def_stmt>__init__ self handler<block_start>self._handler=handler<line_sep>self._processMap={}<line_sep>self._processMap["exists"]=Processor.process_exists<line_sep>self._processMap["get"]=Processor.process_get<line_sep>self._processMap["getMultiple"]=Processor.process_getMultiple<line_sep>self._processMap["put"]=Processor.process_put<line_sep>self._processMap["checkAndPut"]=Processor.process_checkAndPut<line_sep>self._processMap["putMultiple"]=Processor.process_putMultiple<line_sep>self._processMap["deleteSingle"]=Processor.process_deleteSingle<line_sep>self._processMap["deleteMultiple"]=Processor.process_deleteMultiple<line_sep>self._processMap["checkAndDelete"]=Processor.process_checkAndDelete<line_sep>self._processMap["increment"]=Processor.process_increment<line_sep>self._processMap["openScanner"]=Processor.process_openScanner<line_sep>self._processMap["getScannerRows"]=Processor.process_getScannerRows<line_sep>self._processMap["closeScanner"]=Processor.process_closeScanner<block_end><def_stmt>process self iprot oprot<block_start>(name type seqid)=iprot.readMessageBegin()<if_stmt>name<not><in>self._processMap<block_start>iprot.skip(TType.STRUCT)<line_sep>iprot.readMessageEnd()<line_sep>x=TApplicationException(TApplicationException.UNKNOWN_METHOD 'Unknown function %s'%(name))<line_sep>oprot.writeMessageBegin(name TMessageType.EXCEPTION seqid)<line_sep>x.write(oprot)<line_sep>oprot.writeMessageEnd()<line_sep>oprot.trans.flush()<line_sep><return><block_end><else_stmt><block_start>self._processMap[name](self seqid iprot oprot)<block_end><return><true><block_end><def_stmt>process_exists self seqid iprot oprot<block_start>args=exists_args()<line_sep>args.read(iprot)<line_sep>iprot.readMessageEnd()<line_sep>result=exists_result()<try_stmt><block_start>result.success=self._handler.exists(args.table args.get)<block_end><except_stmt>TIOError<as>io<block_start>result.io=io<block_end>oprot.writeMessageBegin("exists" TMessageType.REPLY seqid)<line_sep>result.write(oprot)<line_sep>oprot.writeMessageEnd()<line_sep>oprot.trans.flush()<block_end><def_stmt>process_get self seqid iprot oprot<block_start>args=get_args()<line_sep>args.read(iprot)<line_sep>iprot.readMessageEnd()<line_sep>result=get_result()<try_stmt><block_start>result.success=self._handler.get(args.table args.get)<block_end><except_stmt>TIOError<as>io<block_start>result.io=io<block_end>oprot.writeMessageBegin("get" TMessageType.REPLY seqid)<line_sep>result.write(oprot)<line_sep>oprot.writeMessageEnd()<line_sep>oprot.trans.flush()<block_end><def_stmt>process_getMultiple self seqid iprot oprot<block_start>args=getMultiple_args()<line_sep>args.read(iprot)<line_sep>iprot.readMessageEnd()<line_sep>result=getMultiple_result()<try_stmt><block_start>result.success=self._handler.getMultiple(args.table args.gets)<block_end><except_stmt>TIOError<as>io<block_start>result.io=io<block_end>oprot.writeMessageBegin("getMultiple" TMessageType.REPLY seqid)<line_sep>result.write(oprot)<line_sep>oprot.writeMessageEnd()<line_sep>oprot.trans.flush()<block_end><def_stmt>process_put self seqid iprot oprot<block_start>args=put_args()<line_sep>args.read(iprot)<line_sep>iprot.readMessageEnd()<line_sep>result=put_result()<try_stmt><block_start>self._handler.put(args.table args.put)<block_end><except_stmt>TIOError<as>io<block_start>result.io=io<block_end>oprot.writeMessageBegin("put" TMessageType.REPLY seqid)<line_sep>result.write(oprot)<line_sep>oprot.writeMessageEnd()<line_sep>oprot.trans.flush()<block_end><def_stmt>process_checkAndPut self seqid iprot oprot<block_start>args=checkAndPut_args()<line_sep>args.read(iprot)<line_sep>iprot.readMessageEnd()<line_sep>result=checkAndPut_result()<try_stmt><block_start>result.success=self._handler.checkAndPut(args.table args.row args.family args.qualifier args.value args.put)<block_end><except_stmt>TIOError<as>io<block_start>result.io=io<block_end>oprot.writeMessageBegin("checkAndPut" TMessageType.REPLY seqid)<line_sep>result.write(oprot)<line_sep>oprot.writeMessageEnd()<line_sep>oprot.trans.flush()<block_end><def_stmt>process_putMultiple self seqid iprot oprot<block_start>args=putMultiple_args()<line_sep>args.read(iprot)<line_sep>iprot.readMessageEnd()<line_sep>result=putMultiple_result()<try_stmt><block_start>self._handler.putMultiple(args.table args.puts)<block_end><except_stmt>TIOError<as>io<block_start>result.io=io<block_end>oprot.writeMessageBegin("putMultiple" TMessageType.REPLY seqid)<line_sep>result.write(oprot)<line_sep>oprot.writeMessageEnd()<line_sep>oprot.trans.flush()<block_end><def_stmt>process_deleteSingle self seqid iprot oprot<block_start>args=deleteSingle_args()<line_sep>args.read(iprot)<line_sep>iprot.readMessageEnd()<line_sep>result=deleteSingle_result()<try_stmt><block_start>self._handler.deleteSingle(args.table args.deleteSingle)<block_end><except_stmt>TIOError<as>io<block_start>result.io=io<block_end>oprot.writeMessageBegin("deleteSingle" TMessageType.REPLY seqid)<line_sep>result.write(oprot)<line_sep>oprot.writeMessageEnd()<line_sep>oprot.trans.flush()<block_end><def_stmt>process_deleteMultiple self seqid iprot oprot<block_start>args=deleteMultiple_args()<line_sep>args.read(iprot)<line_sep>iprot.readMessageEnd()<line_sep>result=deleteMultiple_result()<try_stmt><block_start>result.success=self._handler.deleteMultiple(args.table args.deletes)<block_end><except_stmt>TIOError<as>io<block_start>result.io=io<block_end>oprot.writeMessageBegin("deleteMultiple" TMessageType.REPLY seqid)<line_sep>result.write(oprot)<line_sep>oprot.writeMessageEnd()<line_sep>oprot.trans.flush()<block_end><def_stmt>process_checkAndDelete self seqid iprot oprot<block_start>args=checkAndDelete_args()<line_sep>args.read(iprot)<line_sep>iprot.readMessageEnd()<line_sep>result=checkAndDelete_result()<try_stmt><block_start>result.success=self._handler.checkAndDelete(args.table args.row args.family args.qualifier args.value args.deleteSingle)<block_end><except_stmt>TIOError<as>io<block_start>result.io=io<block_end>oprot.writeMessageBegin("checkAndDelete" TMessageType.REPLY seqid)<line_sep>result.write(oprot)<line_sep>oprot.writeMessageEnd()<line_sep>oprot.trans.flush()<block_end><def_stmt>process_increment self seqid iprot oprot<block_start>args=increment_args()<line_sep>args.read(iprot)<line_sep>iprot.readMessageEnd()<line_sep>result=increment_result()<try_stmt><block_start>result.success=self._handler.increment(args.table args.increment)<block_end><except_stmt>TIOError<as>io<block_start>result.io=io<block_end>oprot.writeMessageBegin("increment" TMessageType.REPLY seqid)<line_sep>result.write(oprot)<line_sep>oprot.writeMessageEnd()<line_sep>oprot.trans.flush()<block_end><def_stmt>process_openScanner self seqid iprot oprot<block_start>args=openScanner_args()<line_sep>args.read(iprot)<line_sep>iprot.readMessageEnd()<line_sep>result=openScanner_result()<try_stmt><block_start>result.success=self._handler.openScanner(args.table args.scan)<block_end><except_stmt>TIOError<as>io<block_start>result.io=io<block_end>oprot.writeMessageBegin("openScanner" TMessageType.REPLY seqid)<line_sep>result.write(oprot)<line_sep>oprot.writeMessageEnd()<line_sep>oprot.trans.flush()<block_end><def_stmt>process_getScannerRows self seqid iprot oprot<block_start>args=getScannerRows_args()<line_sep>args.read(iprot)<line_sep>iprot.readMessageEnd()<line_sep>result=getScannerRows_result()<try_stmt><block_start>result.success=self._handler.getScannerRows(args.scannerId args.numRows)<block_end><except_stmt>TIOError<as>io<block_start>result.io=io<block_end><except_stmt>TIllegalArgument<as>ia<block_start>result.ia=ia<block_end>oprot.writeMessageBegin("getScannerRows" TMessageType.REPLY seqid)<line_sep>result.write(oprot)<line_sep>oprot.writeMessageEnd()<line_sep>oprot.trans.flush()<block_end><def_stmt>process_closeScanner self seqid iprot oprot<block_start>args=closeScanner_args()<line_sep>args.read(iprot)<line_sep>iprot.readMessageEnd()<line_sep>result=closeScanner_result()<try_stmt><block_start>self._handler.closeScanner(args.scannerId)<block_end><except_stmt>TIOError<as>io<block_start>result.io=io<block_end><except_stmt>TIllegalArgument<as>ia<block_start>result.ia=ia<block_end>oprot.writeMessageBegin("closeScanner" TMessageType.REPLY seqid)<line_sep>result.write(oprot)<line_sep>oprot.writeMessageEnd()<line_sep>oprot.trans.flush()<block_end><block_end># HELPER FUNCTIONS AND STRUCTURES
<class_stmt>exists_args<block_start>"""
Attributes:
- table: the table to check on
- get: the TGet to check for
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRING 'table' <none> <none> ) # 1
(2 TType.STRUCT 'get' (TGet TGet.thrift_spec) <none> ) # 2
)<def_stmt>__init__ self table=<none> get=<none> <block_start>self.table=table<line_sep>self.get=get<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.table=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.get=TGet()<line_sep>self.get.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('exists_args')<if_stmt>self.table<is><not><none><block_start>oprot.writeFieldBegin('table' TType.STRING 1)<line_sep>oprot.writeString(self.table)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.get<is><not><none><block_start>oprot.writeFieldBegin('get' TType.STRUCT 2)<line_sep>self.get.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><if_stmt>self.table<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field table is unset!')<block_end><if_stmt>self.get<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field get is unset!')<block_end><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>exists_result<block_start>"""
Attributes:
- success
- io
"""<line_sep>thrift_spec=((0 TType.BOOL 'success' <none> <none> ) # 0
(1 TType.STRUCT 'io' (TIOError TIOError.thrift_spec) <none> ) # 1
)<def_stmt>__init__ self success=<none> io=<none> <block_start>self.success=success<line_sep>self.io=io<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>0<block_start><if_stmt>ftype<eq>TType.BOOL<block_start>self.success=iprot.readBool()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.io=TIOError()<line_sep>self.io.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('exists_result')<if_stmt>self.success<is><not><none><block_start>oprot.writeFieldBegin('success' TType.BOOL 0)<line_sep>oprot.writeBool(self.success)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.io<is><not><none><block_start>oprot.writeFieldBegin('io' TType.STRUCT 1)<line_sep>self.io.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>get_args<block_start>"""
Attributes:
- table: the table to get from
- get: the TGet to fetch
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRING 'table' <none> <none> ) # 1
(2 TType.STRUCT 'get' (TGet TGet.thrift_spec) <none> ) # 2
)<def_stmt>__init__ self table=<none> get=<none> <block_start>self.table=table<line_sep>self.get=get<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.table=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.get=TGet()<line_sep>self.get.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('get_args')<if_stmt>self.table<is><not><none><block_start>oprot.writeFieldBegin('table' TType.STRING 1)<line_sep>oprot.writeString(self.table)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.get<is><not><none><block_start>oprot.writeFieldBegin('get' TType.STRUCT 2)<line_sep>self.get.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><if_stmt>self.table<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field table is unset!')<block_end><if_stmt>self.get<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field get is unset!')<block_end><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>get_result<block_start>"""
Attributes:
- success
- io
"""<line_sep>thrift_spec=((0 TType.STRUCT 'success' (TResult TResult.thrift_spec) <none> ) # 0
(1 TType.STRUCT 'io' (TIOError TIOError.thrift_spec) <none> ) # 1
)<def_stmt>__init__ self success=<none> io=<none> <block_start>self.success=success<line_sep>self.io=io<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>0<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.success=TResult()<line_sep>self.success.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.io=TIOError()<line_sep>self.io.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('get_result')<if_stmt>self.success<is><not><none><block_start>oprot.writeFieldBegin('success' TType.STRUCT 0)<line_sep>self.success.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.io<is><not><none><block_start>oprot.writeFieldBegin('io' TType.STRUCT 1)<line_sep>self.io.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>getMultiple_args<block_start>"""
Attributes:
- table: the table to get from
- gets: a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRING 'table' <none> <none> ) # 1
(2 TType.LIST 'gets' (TType.STRUCT (TGet TGet.thrift_spec)) <none> ) # 2
)<def_stmt>__init__ self table=<none> gets=<none> <block_start>self.table=table<line_sep>self.gets=gets<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.table=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.LIST<block_start>self.gets=[]<line_sep>(_etype45 _size42)=iprot.readListBegin()<for_stmt>_i46 xrange(_size42)<block_start>_elem47=TGet()<line_sep>_elem47.read(iprot)<line_sep>self.gets.append(_elem47)<block_end>iprot.readListEnd()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('getMultiple_args')<if_stmt>self.table<is><not><none><block_start>oprot.writeFieldBegin('table' TType.STRING 1)<line_sep>oprot.writeString(self.table)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.gets<is><not><none><block_start>oprot.writeFieldBegin('gets' TType.LIST 2)<line_sep>oprot.writeListBegin(TType.STRUCT len(self.gets))<for_stmt>iter48 self.gets<block_start>iter48.write(oprot)<block_end>oprot.writeListEnd()<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><if_stmt>self.table<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field table is unset!')<block_end><if_stmt>self.gets<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field gets is unset!')<block_end><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>getMultiple_result<block_start>"""
Attributes:
- success
- io
"""<line_sep>thrift_spec=((0 TType.LIST 'success' (TType.STRUCT (TResult TResult.thrift_spec)) <none> ) # 0
(1 TType.STRUCT 'io' (TIOError TIOError.thrift_spec) <none> ) # 1
)<def_stmt>__init__ self success=<none> io=<none> <block_start>self.success=success<line_sep>self.io=io<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>0<block_start><if_stmt>ftype<eq>TType.LIST<block_start>self.success=[]<line_sep>(_etype52 _size49)=iprot.readListBegin()<for_stmt>_i53 xrange(_size49)<block_start>_elem54=TResult()<line_sep>_elem54.read(iprot)<line_sep>self.success.append(_elem54)<block_end>iprot.readListEnd()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.io=TIOError()<line_sep>self.io.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('getMultiple_result')<if_stmt>self.success<is><not><none><block_start>oprot.writeFieldBegin('success' TType.LIST 0)<line_sep>oprot.writeListBegin(TType.STRUCT len(self.success))<for_stmt>iter55 self.success<block_start>iter55.write(oprot)<block_end>oprot.writeListEnd()<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.io<is><not><none><block_start>oprot.writeFieldBegin('io' TType.STRUCT 1)<line_sep>self.io.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>put_args<block_start>"""
Attributes:
- table: the table to put data in
- put: the TPut to put
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRING 'table' <none> <none> ) # 1
(2 TType.STRUCT 'put' (TPut TPut.thrift_spec) <none> ) # 2
)<def_stmt>__init__ self table=<none> put=<none> <block_start>self.table=table<line_sep>self.put=put<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.table=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.put=TPut()<line_sep>self.put.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('put_args')<if_stmt>self.table<is><not><none><block_start>oprot.writeFieldBegin('table' TType.STRING 1)<line_sep>oprot.writeString(self.table)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.put<is><not><none><block_start>oprot.writeFieldBegin('put' TType.STRUCT 2)<line_sep>self.put.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><if_stmt>self.table<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field table is unset!')<block_end><if_stmt>self.put<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field put is unset!')<block_end><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>put_result<block_start>"""
Attributes:
- io
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRUCT 'io' (TIOError TIOError.thrift_spec) <none> ) # 1
)<def_stmt>__init__ self io=<none> <block_start>self.io=io<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.io=TIOError()<line_sep>self.io.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('put_result')<if_stmt>self.io<is><not><none><block_start>oprot.writeFieldBegin('io' TType.STRUCT 1)<line_sep>self.io.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>checkAndPut_args<block_start>"""
Attributes:
- table: to check in and put to
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- put: the TPut to put if the check succeeds
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRING 'table' <none> <none> ) # 1
(2 TType.STRING 'row' <none> <none> ) # 2
(3 TType.STRING 'family' <none> <none> ) # 3
(4 TType.STRING 'qualifier' <none> <none> ) # 4
(5 TType.STRING 'value' <none> <none> ) # 5
(6 TType.STRUCT 'put' (TPut TPut.thrift_spec) <none> ) # 6
)<def_stmt>__init__ self table=<none> row=<none> family=<none> qualifier=<none> value=<none> put=<none> <block_start>self.table=table<line_sep>self.row=row<line_sep>self.family=family<line_sep>self.qualifier=qualifier<line_sep>self.value=value<line_sep>self.put=put<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.table=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.row=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>3<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.family=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>4<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.qualifier=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>5<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.value=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>6<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.put=TPut()<line_sep>self.put.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('checkAndPut_args')<if_stmt>self.table<is><not><none><block_start>oprot.writeFieldBegin('table' TType.STRING 1)<line_sep>oprot.writeString(self.table)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.row<is><not><none><block_start>oprot.writeFieldBegin('row' TType.STRING 2)<line_sep>oprot.writeString(self.row)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.family<is><not><none><block_start>oprot.writeFieldBegin('family' TType.STRING 3)<line_sep>oprot.writeString(self.family)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.qualifier<is><not><none><block_start>oprot.writeFieldBegin('qualifier' TType.STRING 4)<line_sep>oprot.writeString(self.qualifier)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.value<is><not><none><block_start>oprot.writeFieldBegin('value' TType.STRING 5)<line_sep>oprot.writeString(self.value)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.put<is><not><none><block_start>oprot.writeFieldBegin('put' TType.STRUCT 6)<line_sep>self.put.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><if_stmt>self.table<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field table is unset!')<block_end><if_stmt>self.row<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field row is unset!')<block_end><if_stmt>self.family<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field family is unset!')<block_end><if_stmt>self.qualifier<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field qualifier is unset!')<block_end><if_stmt>self.put<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field put is unset!')<block_end><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>checkAndPut_result<block_start>"""
Attributes:
- success
- io
"""<line_sep>thrift_spec=((0 TType.BOOL 'success' <none> <none> ) # 0
(1 TType.STRUCT 'io' (TIOError TIOError.thrift_spec) <none> ) # 1
)<def_stmt>__init__ self success=<none> io=<none> <block_start>self.success=success<line_sep>self.io=io<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>0<block_start><if_stmt>ftype<eq>TType.BOOL<block_start>self.success=iprot.readBool()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.io=TIOError()<line_sep>self.io.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('checkAndPut_result')<if_stmt>self.success<is><not><none><block_start>oprot.writeFieldBegin('success' TType.BOOL 0)<line_sep>oprot.writeBool(self.success)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.io<is><not><none><block_start>oprot.writeFieldBegin('io' TType.STRUCT 1)<line_sep>self.io.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>putMultiple_args<block_start>"""
Attributes:
- table: the table to put data in
- puts: a list of TPuts to commit
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRING 'table' <none> <none> ) # 1
(2 TType.LIST 'puts' (TType.STRUCT (TPut TPut.thrift_spec)) <none> ) # 2
)<def_stmt>__init__ self table=<none> puts=<none> <block_start>self.table=table<line_sep>self.puts=puts<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.table=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.LIST<block_start>self.puts=[]<line_sep>(_etype59 _size56)=iprot.readListBegin()<for_stmt>_i60 xrange(_size56)<block_start>_elem61=TPut()<line_sep>_elem61.read(iprot)<line_sep>self.puts.append(_elem61)<block_end>iprot.readListEnd()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('putMultiple_args')<if_stmt>self.table<is><not><none><block_start>oprot.writeFieldBegin('table' TType.STRING 1)<line_sep>oprot.writeString(self.table)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.puts<is><not><none><block_start>oprot.writeFieldBegin('puts' TType.LIST 2)<line_sep>oprot.writeListBegin(TType.STRUCT len(self.puts))<for_stmt>iter62 self.puts<block_start>iter62.write(oprot)<block_end>oprot.writeListEnd()<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><if_stmt>self.table<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field table is unset!')<block_end><if_stmt>self.puts<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field puts is unset!')<block_end><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>putMultiple_result<block_start>"""
Attributes:
- io
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRUCT 'io' (TIOError TIOError.thrift_spec) <none> ) # 1
)<def_stmt>__init__ self io=<none> <block_start>self.io=io<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.io=TIOError()<line_sep>self.io.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('putMultiple_result')<if_stmt>self.io<is><not><none><block_start>oprot.writeFieldBegin('io' TType.STRUCT 1)<line_sep>self.io.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>deleteSingle_args<block_start>"""
Attributes:
- table: the table to delete from
- deleteSingle: the TDelete to delete
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRING 'table' <none> <none> ) # 1
(2 TType.STRUCT 'deleteSingle' (TDelete TDelete.thrift_spec) <none> ) # 2
)<def_stmt>__init__ self table=<none> deleteSingle=<none> <block_start>self.table=table<line_sep>self.deleteSingle=deleteSingle<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.table=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.deleteSingle=TDelete()<line_sep>self.deleteSingle.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('deleteSingle_args')<if_stmt>self.table<is><not><none><block_start>oprot.writeFieldBegin('table' TType.STRING 1)<line_sep>oprot.writeString(self.table)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.deleteSingle<is><not><none><block_start>oprot.writeFieldBegin('deleteSingle' TType.STRUCT 2)<line_sep>self.deleteSingle.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><if_stmt>self.table<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field table is unset!')<block_end><if_stmt>self.deleteSingle<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field deleteSingle is unset!')<block_end><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>deleteSingle_result<block_start>"""
Attributes:
- io
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRUCT 'io' (TIOError TIOError.thrift_spec) <none> ) # 1
)<def_stmt>__init__ self io=<none> <block_start>self.io=io<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.io=TIOError()<line_sep>self.io.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('deleteSingle_result')<if_stmt>self.io<is><not><none><block_start>oprot.writeFieldBegin('io' TType.STRUCT 1)<line_sep>self.io.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>deleteMultiple_args<block_start>"""
Attributes:
- table: the table to delete from
- deletes: list of TDeletes to delete
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRING 'table' <none> <none> ) # 1
(2 TType.LIST 'deletes' (TType.STRUCT (TDelete TDelete.thrift_spec)) <none> ) # 2
)<def_stmt>__init__ self table=<none> deletes=<none> <block_start>self.table=table<line_sep>self.deletes=deletes<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.table=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.LIST<block_start>self.deletes=[]<line_sep>(_etype66 _size63)=iprot.readListBegin()<for_stmt>_i67 xrange(_size63)<block_start>_elem68=TDelete()<line_sep>_elem68.read(iprot)<line_sep>self.deletes.append(_elem68)<block_end>iprot.readListEnd()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('deleteMultiple_args')<if_stmt>self.table<is><not><none><block_start>oprot.writeFieldBegin('table' TType.STRING 1)<line_sep>oprot.writeString(self.table)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.deletes<is><not><none><block_start>oprot.writeFieldBegin('deletes' TType.LIST 2)<line_sep>oprot.writeListBegin(TType.STRUCT len(self.deletes))<for_stmt>iter69 self.deletes<block_start>iter69.write(oprot)<block_end>oprot.writeListEnd()<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><if_stmt>self.table<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field table is unset!')<block_end><if_stmt>self.deletes<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field deletes is unset!')<block_end><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>deleteMultiple_result<block_start>"""
Attributes:
- success
- io
"""<line_sep>thrift_spec=((0 TType.LIST 'success' (TType.STRUCT (TDelete TDelete.thrift_spec)) <none> ) # 0
(1 TType.STRUCT 'io' (TIOError TIOError.thrift_spec) <none> ) # 1
)<def_stmt>__init__ self success=<none> io=<none> <block_start>self.success=success<line_sep>self.io=io<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>0<block_start><if_stmt>ftype<eq>TType.LIST<block_start>self.success=[]<line_sep>(_etype73 _size70)=iprot.readListBegin()<for_stmt>_i74 xrange(_size70)<block_start>_elem75=TDelete()<line_sep>_elem75.read(iprot)<line_sep>self.success.append(_elem75)<block_end>iprot.readListEnd()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.io=TIOError()<line_sep>self.io.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('deleteMultiple_result')<if_stmt>self.success<is><not><none><block_start>oprot.writeFieldBegin('success' TType.LIST 0)<line_sep>oprot.writeListBegin(TType.STRUCT len(self.success))<for_stmt>iter76 self.success<block_start>iter76.write(oprot)<block_end>oprot.writeListEnd()<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.io<is><not><none><block_start>oprot.writeFieldBegin('io' TType.STRUCT 1)<line_sep>self.io.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>checkAndDelete_args<block_start>"""
Attributes:
- table: to check in and delete from
- row: row to check
- family: column family to check
- qualifier: column qualifier to check
- value: the expected value, if not provided the
check is for the non-existence of the
column in question
- deleteSingle: the TDelete to execute if the check succeeds
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRING 'table' <none> <none> ) # 1
(2 TType.STRING 'row' <none> <none> ) # 2
(3 TType.STRING 'family' <none> <none> ) # 3
(4 TType.STRING 'qualifier' <none> <none> ) # 4
(5 TType.STRING 'value' <none> <none> ) # 5
(6 TType.STRUCT 'deleteSingle' (TDelete TDelete.thrift_spec) <none> ) # 6
)<def_stmt>__init__ self table=<none> row=<none> family=<none> qualifier=<none> value=<none> deleteSingle=<none> <block_start>self.table=table<line_sep>self.row=row<line_sep>self.family=family<line_sep>self.qualifier=qualifier<line_sep>self.value=value<line_sep>self.deleteSingle=deleteSingle<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.table=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.row=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>3<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.family=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>4<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.qualifier=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>5<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.value=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>6<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.deleteSingle=TDelete()<line_sep>self.deleteSingle.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('checkAndDelete_args')<if_stmt>self.table<is><not><none><block_start>oprot.writeFieldBegin('table' TType.STRING 1)<line_sep>oprot.writeString(self.table)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.row<is><not><none><block_start>oprot.writeFieldBegin('row' TType.STRING 2)<line_sep>oprot.writeString(self.row)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.family<is><not><none><block_start>oprot.writeFieldBegin('family' TType.STRING 3)<line_sep>oprot.writeString(self.family)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.qualifier<is><not><none><block_start>oprot.writeFieldBegin('qualifier' TType.STRING 4)<line_sep>oprot.writeString(self.qualifier)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.value<is><not><none><block_start>oprot.writeFieldBegin('value' TType.STRING 5)<line_sep>oprot.writeString(self.value)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.deleteSingle<is><not><none><block_start>oprot.writeFieldBegin('deleteSingle' TType.STRUCT 6)<line_sep>self.deleteSingle.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><if_stmt>self.table<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field table is unset!')<block_end><if_stmt>self.row<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field row is unset!')<block_end><if_stmt>self.family<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field family is unset!')<block_end><if_stmt>self.qualifier<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field qualifier is unset!')<block_end><if_stmt>self.deleteSingle<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field deleteSingle is unset!')<block_end><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>checkAndDelete_result<block_start>"""
Attributes:
- success
- io
"""<line_sep>thrift_spec=((0 TType.BOOL 'success' <none> <none> ) # 0
(1 TType.STRUCT 'io' (TIOError TIOError.thrift_spec) <none> ) # 1
)<def_stmt>__init__ self success=<none> io=<none> <block_start>self.success=success<line_sep>self.io=io<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>0<block_start><if_stmt>ftype<eq>TType.BOOL<block_start>self.success=iprot.readBool()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.io=TIOError()<line_sep>self.io.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('checkAndDelete_result')<if_stmt>self.success<is><not><none><block_start>oprot.writeFieldBegin('success' TType.BOOL 0)<line_sep>oprot.writeBool(self.success)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.io<is><not><none><block_start>oprot.writeFieldBegin('io' TType.STRUCT 1)<line_sep>self.io.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>increment_args<block_start>"""
Attributes:
- table: the table to increment the value on
- increment: the TIncrement to increment
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRING 'table' <none> <none> ) # 1
(2 TType.STRUCT 'increment' (TIncrement TIncrement.thrift_spec) <none> ) # 2
)<def_stmt>__init__ self table=<none> increment=<none> <block_start>self.table=table<line_sep>self.increment=increment<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.table=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.increment=TIncrement()<line_sep>self.increment.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('increment_args')<if_stmt>self.table<is><not><none><block_start>oprot.writeFieldBegin('table' TType.STRING 1)<line_sep>oprot.writeString(self.table)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.increment<is><not><none><block_start>oprot.writeFieldBegin('increment' TType.STRUCT 2)<line_sep>self.increment.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><if_stmt>self.table<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field table is unset!')<block_end><if_stmt>self.increment<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field increment is unset!')<block_end><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>increment_result<block_start>"""
Attributes:
- success
- io
"""<line_sep>thrift_spec=((0 TType.STRUCT 'success' (TResult TResult.thrift_spec) <none> ) # 0
(1 TType.STRUCT 'io' (TIOError TIOError.thrift_spec) <none> ) # 1
)<def_stmt>__init__ self success=<none> io=<none> <block_start>self.success=success<line_sep>self.io=io<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>0<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.success=TResult()<line_sep>self.success.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.io=TIOError()<line_sep>self.io.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('increment_result')<if_stmt>self.success<is><not><none><block_start>oprot.writeFieldBegin('success' TType.STRUCT 0)<line_sep>self.success.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.io<is><not><none><block_start>oprot.writeFieldBegin('io' TType.STRUCT 1)<line_sep>self.io.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>openScanner_args<block_start>"""
Attributes:
- table: the table to get the Scanner for
- scan: the scan object to get a Scanner for
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRING 'table' <none> <none> ) # 1
(2 TType.STRUCT 'scan' (TScan TScan.thrift_spec) <none> ) # 2
)<def_stmt>__init__ self table=<none> scan=<none> <block_start>self.table=table<line_sep>self.scan=scan<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRING<block_start>self.table=iprot.readString()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.scan=TScan()<line_sep>self.scan.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('openScanner_args')<if_stmt>self.table<is><not><none><block_start>oprot.writeFieldBegin('table' TType.STRING 1)<line_sep>oprot.writeString(self.table)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.scan<is><not><none><block_start>oprot.writeFieldBegin('scan' TType.STRUCT 2)<line_sep>self.scan.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><if_stmt>self.table<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field table is unset!')<block_end><if_stmt>self.scan<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field scan is unset!')<block_end><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>openScanner_result<block_start>"""
Attributes:
- success
- io
"""<line_sep>thrift_spec=((0 TType.I32 'success' <none> <none> ) # 0
(1 TType.STRUCT 'io' (TIOError TIOError.thrift_spec) <none> ) # 1
)<def_stmt>__init__ self success=<none> io=<none> <block_start>self.success=success<line_sep>self.io=io<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>0<block_start><if_stmt>ftype<eq>TType.I32<block_start>self.success=iprot.readI32()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.io=TIOError()<line_sep>self.io.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('openScanner_result')<if_stmt>self.success<is><not><none><block_start>oprot.writeFieldBegin('success' TType.I32 0)<line_sep>oprot.writeI32(self.success)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.io<is><not><none><block_start>oprot.writeFieldBegin('io' TType.STRUCT 1)<line_sep>self.io.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>getScannerRows_args<block_start>"""
Attributes:
- scannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function.
- numRows: number of rows to return
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.I32 'scannerId' <none> <none> ) # 1
(2 TType.I32 'numRows' <none> 1 ) # 2
)<def_stmt>__init__ self scannerId=<none> numRows=thrift_spec[2][4] <block_start>self.scannerId=scannerId<line_sep>self.numRows=numRows<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.I32<block_start>self.scannerId=iprot.readI32()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.I32<block_start>self.numRows=iprot.readI32()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('getScannerRows_args')<if_stmt>self.scannerId<is><not><none><block_start>oprot.writeFieldBegin('scannerId' TType.I32 1)<line_sep>oprot.writeI32(self.scannerId)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.numRows<is><not><none><block_start>oprot.writeFieldBegin('numRows' TType.I32 2)<line_sep>oprot.writeI32(self.numRows)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><if_stmt>self.scannerId<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field scannerId is unset!')<block_end><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>getScannerRows_result<block_start>"""
Attributes:
- success
- io
- ia: if the scannerId is invalid
"""<line_sep>thrift_spec=((0 TType.LIST 'success' (TType.STRUCT (TResult TResult.thrift_spec)) <none> ) # 0
(1 TType.STRUCT 'io' (TIOError TIOError.thrift_spec) <none> ) # 1
(2 TType.STRUCT 'ia' (TIllegalArgument TIllegalArgument.thrift_spec) <none> ) # 2
)<def_stmt>__init__ self success=<none> io=<none> ia=<none> <block_start>self.success=success<line_sep>self.io=io<line_sep>self.ia=ia<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>0<block_start><if_stmt>ftype<eq>TType.LIST<block_start>self.success=[]<line_sep>(_etype80 _size77)=iprot.readListBegin()<for_stmt>_i81 xrange(_size77)<block_start>_elem82=TResult()<line_sep>_elem82.read(iprot)<line_sep>self.success.append(_elem82)<block_end>iprot.readListEnd()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.io=TIOError()<line_sep>self.io.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.ia=TIllegalArgument()<line_sep>self.ia.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('getScannerRows_result')<if_stmt>self.success<is><not><none><block_start>oprot.writeFieldBegin('success' TType.LIST 0)<line_sep>oprot.writeListBegin(TType.STRUCT len(self.success))<for_stmt>iter83 self.success<block_start>iter83.write(oprot)<block_end>oprot.writeListEnd()<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.io<is><not><none><block_start>oprot.writeFieldBegin('io' TType.STRUCT 1)<line_sep>self.io.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.ia<is><not><none><block_start>oprot.writeFieldBegin('ia' TType.STRUCT 2)<line_sep>self.ia.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>closeScanner_args<block_start>"""
Attributes:
- scannerId: the Id of the Scanner to close *
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.I32 'scannerId' <none> <none> ) # 1
)<def_stmt>__init__ self scannerId=<none> <block_start>self.scannerId=scannerId<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.I32<block_start>self.scannerId=iprot.readI32()<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('closeScanner_args')<if_stmt>self.scannerId<is><not><none><block_start>oprot.writeFieldBegin('scannerId' TType.I32 1)<line_sep>oprot.writeI32(self.scannerId)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><if_stmt>self.scannerId<is><none><block_start><raise>TProtocol.TProtocolException(message='Required field scannerId is unset!')<block_end><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end><class_stmt>closeScanner_result<block_start>"""
Attributes:
- io
- ia: if the scannerId is invalid
"""<line_sep>thrift_spec=(<none> # 0
(1 TType.STRUCT 'io' (TIOError TIOError.thrift_spec) <none> ) # 1
(2 TType.STRUCT 'ia' (TIllegalArgument TIllegalArgument.thrift_spec) <none> ) # 2
)<def_stmt>__init__ self io=<none> ia=<none> <block_start>self.io=io<line_sep>self.ia=ia<block_end><def_stmt>read self iprot<block_start><if_stmt>iprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>isinstance(iprot.trans TTransport.CReadableTransport)<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>fastbinary.decode_binary(self iprot.trans (self.__class__ self.thrift_spec))<line_sep><return><block_end>iprot.readStructBegin()<while_stmt><true><block_start>(fname ftype fid)=iprot.readFieldBegin()<if_stmt>ftype<eq>TType.STOP<block_start><break><block_end><if_stmt>fid<eq>1<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.io=TIOError()<line_sep>self.io.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><elif_stmt>fid<eq>2<block_start><if_stmt>ftype<eq>TType.STRUCT<block_start>self.ia=TIllegalArgument()<line_sep>self.ia.read(iprot)<block_end><else_stmt><block_start>iprot.skip(ftype)<block_end><block_end><else_stmt><block_start>iprot.skip(ftype)<block_end>iprot.readFieldEnd()<block_end>iprot.readStructEnd()<block_end><def_stmt>write self oprot<block_start><if_stmt>oprot.__class__<eq>TBinaryProtocol.TBinaryProtocolAccelerated<and>self.thrift_spec<is><not><none><and>fastbinary<is><not><none><block_start>oprot.trans.write(fastbinary.encode_binary(self (self.__class__ self.thrift_spec)))<line_sep><return><block_end>oprot.writeStructBegin('closeScanner_result')<if_stmt>self.io<is><not><none><block_start>oprot.writeFieldBegin('io' TType.STRUCT 1)<line_sep>self.io.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end><if_stmt>self.ia<is><not><none><block_start>oprot.writeFieldBegin('ia' TType.STRUCT 2)<line_sep>self.ia.write(oprot)<line_sep>oprot.writeFieldEnd()<block_end>oprot.writeFieldStop()<line_sep>oprot.writeStructEnd()<block_end><def_stmt>validate self<block_start><return><block_end><def_stmt>__repr__ self<block_start>L=['%s=%r'%(key value)<for>key,value self.__dict__.iteritems()]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(L))<block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other self.__class__)<and>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>(self<eq>other)<block_end><block_end> |
#! /usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>fractions gcd<line_sep>"""Code generation for ForUtil.java"""<line_sep>MAX_SPECIALIZED_BITS_PER_VALUE=24<line_sep>OUTPUT_FILE="ForUtil.java"<line_sep>PRIMITIVE_SIZE=[8 16 32]<line_sep>HEADER="""// This file has been automatically generated, DO NOT EDIT
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.backward_codecs.lucene84;
import java.io.IOException;
import org.apache.lucene.store.DataInput;
import org.apache.lucene.store.DataOutput;
// Inspired from https://fulmicoton.com/posts/bitpacking/
// Encodes multiple integers in a long to get SIMD-like speedups.
// If bitsPerValue <= 8 then we pack 8 ints per long
// else if bitsPerValue <= 16 we pack 4 ints per long
// else we pack 2 ints per long
final class ForUtil {
static final int BLOCK_SIZE = 128;
private static final int BLOCK_SIZE_LOG2 = 7;
private static long expandMask32(long mask32) {
return mask32 | (mask32 << 32);
}
private static long expandMask16(long mask16) {
return expandMask32(mask16 | (mask16 << 16));
}
private static long expandMask8(long mask8) {
return expandMask16(mask8 | (mask8 << 8));
}
private static long mask32(int bitsPerValue) {
return expandMask32((1L << bitsPerValue) - 1);
}
private static long mask16(int bitsPerValue) {
return expandMask16((1L << bitsPerValue) - 1);
}
private static long mask8(int bitsPerValue) {
return expandMask8((1L << bitsPerValue) - 1);
}
private static void expand8(long[] arr) {
for (int i = 0; i < 16; ++i) {
long l = arr[i];
arr[i] = (l >>> 56) & 0xFFL;
arr[16 + i] = (l >>> 48) & 0xFFL;
arr[32 + i] = (l >>> 40) & 0xFFL;
arr[48 + i] = (l >>> 32) & 0xFFL;
arr[64 + i] = (l >>> 24) & 0xFFL;
arr[80 + i] = (l >>> 16) & 0xFFL;
arr[96 + i] = (l >>> 8) & 0xFFL;
arr[112 + i] = l & 0xFFL;
}
}
private static void expand8To32(long[] arr) {
for (int i = 0; i < 16; ++i) {
long l = arr[i];
arr[i] = (l >>> 24) & 0x000000FF000000FFL;
arr[16 + i] = (l >>> 16) & 0x000000FF000000FFL;
arr[32 + i] = (l >>> 8) & 0x000000FF000000FFL;
arr[48 + i] = l & 0x000000FF000000FFL;
}
}
private static void collapse8(long[] arr) {
for (int i = 0; i < 16; ++i) {
arr[i] =
(arr[i] << 56)
| (arr[16 + i] << 48)
| (arr[32 + i] << 40)
| (arr[48 + i] << 32)
| (arr[64 + i] << 24)
| (arr[80 + i] << 16)
| (arr[96 + i] << 8)
| arr[112 + i];
}
}
private static void expand16(long[] arr) {
for (int i = 0; i < 32; ++i) {
long l = arr[i];
arr[i] = (l >>> 48) & 0xFFFFL;
arr[32 + i] = (l >>> 32) & 0xFFFFL;
arr[64 + i] = (l >>> 16) & 0xFFFFL;
arr[96 + i] = l & 0xFFFFL;
}
}
private static void expand16To32(long[] arr) {
for (int i = 0; i < 32; ++i) {
long l = arr[i];
arr[i] = (l >>> 16) & 0x0000FFFF0000FFFFL;
arr[32 + i] = l & 0x0000FFFF0000FFFFL;
}
}
private static void collapse16(long[] arr) {
for (int i = 0; i < 32; ++i) {
arr[i] = (arr[i] << 48) | (arr[32 + i] << 32) | (arr[64 + i] << 16) | arr[96 + i];
}
}
private static void expand32(long[] arr) {
for (int i = 0; i < 64; ++i) {
long l = arr[i];
arr[i] = l >>> 32;
arr[64 + i] = l & 0xFFFFFFFFL;
}
}
private static void collapse32(long[] arr) {
for (int i = 0; i < 64; ++i) {
arr[i] = (arr[i] << 32) | arr[64 + i];
}
}
private static void prefixSum8(long[] arr, long base) {
expand8To32(arr);
prefixSum32(arr, base);
}
private static void prefixSum16(long[] arr, long base) {
// We need to move to the next primitive size to avoid overflows
expand16To32(arr);
prefixSum32(arr, base);
}
private static void prefixSum32(long[] arr, long base) {
arr[0] += base << 32;
innerPrefixSum32(arr);
expand32(arr);
final long l = arr[BLOCK_SIZE / 2 - 1];
for (int i = BLOCK_SIZE / 2; i < BLOCK_SIZE; ++i) {
arr[i] += l;
}
}
// For some reason unrolling seems to help
private static void innerPrefixSum32(long[] arr) {
arr[1] += arr[0];
arr[2] += arr[1];
arr[3] += arr[2];
arr[4] += arr[3];
arr[5] += arr[4];
arr[6] += arr[5];
arr[7] += arr[6];
arr[8] += arr[7];
arr[9] += arr[8];
arr[10] += arr[9];
arr[11] += arr[10];
arr[12] += arr[11];
arr[13] += arr[12];
arr[14] += arr[13];
arr[15] += arr[14];
arr[16] += arr[15];
arr[17] += arr[16];
arr[18] += arr[17];
arr[19] += arr[18];
arr[20] += arr[19];
arr[21] += arr[20];
arr[22] += arr[21];
arr[23] += arr[22];
arr[24] += arr[23];
arr[25] += arr[24];
arr[26] += arr[25];
arr[27] += arr[26];
arr[28] += arr[27];
arr[29] += arr[28];
arr[30] += arr[29];
arr[31] += arr[30];
arr[32] += arr[31];
arr[33] += arr[32];
arr[34] += arr[33];
arr[35] += arr[34];
arr[36] += arr[35];
arr[37] += arr[36];
arr[38] += arr[37];
arr[39] += arr[38];
arr[40] += arr[39];
arr[41] += arr[40];
arr[42] += arr[41];
arr[43] += arr[42];
arr[44] += arr[43];
arr[45] += arr[44];
arr[46] += arr[45];
arr[47] += arr[46];
arr[48] += arr[47];
arr[49] += arr[48];
arr[50] += arr[49];
arr[51] += arr[50];
arr[52] += arr[51];
arr[53] += arr[52];
arr[54] += arr[53];
arr[55] += arr[54];
arr[56] += arr[55];
arr[57] += arr[56];
arr[58] += arr[57];
arr[59] += arr[58];
arr[60] += arr[59];
arr[61] += arr[60];
arr[62] += arr[61];
arr[63] += arr[62];
}
private static void readLELongs(DataInput in, long[] dst, int offset, int length)
throws IOException {
in.readLongs(dst, offset, length);
for (int i = 0; i < length; ++i) {
dst[offset + i] = Long.reverseBytes(dst[offset + i]);
}
}
private final long[] tmp = new long[BLOCK_SIZE / 2];
/** Encode 128 integers from {@code longs} into {@code out}. */
void encode(long[] longs, int bitsPerValue, DataOutput out) throws IOException {
final int nextPrimitive;
final int numLongs;
if (bitsPerValue <= 8) {
nextPrimitive = 8;
numLongs = BLOCK_SIZE / 8;
collapse8(longs);
} else if (bitsPerValue <= 16) {
nextPrimitive = 16;
numLongs = BLOCK_SIZE / 4;
collapse16(longs);
} else {
nextPrimitive = 32;
numLongs = BLOCK_SIZE / 2;
collapse32(longs);
}
final int numLongsPerShift = bitsPerValue * 2;
int idx = 0;
int shift = nextPrimitive - bitsPerValue;
for (int i = 0; i < numLongsPerShift; ++i) {
tmp[i] = longs[idx++] << shift;
}
for (shift = shift - bitsPerValue; shift >= 0; shift -= bitsPerValue) {
for (int i = 0; i < numLongsPerShift; ++i) {
tmp[i] |= longs[idx++] << shift;
}
}
final int remainingBitsPerLong = shift + bitsPerValue;
final long maskRemainingBitsPerLong;
if (nextPrimitive == 8) {
maskRemainingBitsPerLong = MASKS8[remainingBitsPerLong];
} else if (nextPrimitive == 16) {
maskRemainingBitsPerLong = MASKS16[remainingBitsPerLong];
} else {
maskRemainingBitsPerLong = MASKS32[remainingBitsPerLong];
}
int tmpIdx = 0;
int remainingBitsPerValue = bitsPerValue;
while (idx < numLongs) {
if (remainingBitsPerValue >= remainingBitsPerLong) {
remainingBitsPerValue -= remainingBitsPerLong;
tmp[tmpIdx++] |= (longs[idx] >>> remainingBitsPerValue) & maskRemainingBitsPerLong;
if (remainingBitsPerValue == 0) {
idx++;
remainingBitsPerValue = bitsPerValue;
}
} else {
final long mask1, mask2;
if (nextPrimitive == 8) {
mask1 = MASKS8[remainingBitsPerValue];
mask2 = MASKS8[remainingBitsPerLong - remainingBitsPerValue];
} else if (nextPrimitive == 16) {
mask1 = MASKS16[remainingBitsPerValue];
mask2 = MASKS16[remainingBitsPerLong - remainingBitsPerValue];
} else {
mask1 = MASKS32[remainingBitsPerValue];
mask2 = MASKS32[remainingBitsPerLong - remainingBitsPerValue];
}
tmp[tmpIdx] |= (longs[idx++] & mask1) << (remainingBitsPerLong - remainingBitsPerValue);
remainingBitsPerValue = bitsPerValue - remainingBitsPerLong + remainingBitsPerValue;
tmp[tmpIdx++] |= (longs[idx] >>> remainingBitsPerValue) & mask2;
}
}
for (int i = 0; i < numLongsPerShift; ++i) {
// Java longs are big endian and we want to read little endian longs, so we need to reverse
// bytes
long l = Long.reverseBytes(tmp[i]);
out.writeLong(l);
}
}
/** Number of bytes required to encode 128 integers of {@code bitsPerValue} bits per value. */
int numBytes(int bitsPerValue) throws IOException {
return bitsPerValue << (BLOCK_SIZE_LOG2 - 3);
}
private static void decodeSlow(int bitsPerValue, DataInput in, long[] tmp, long[] longs)
throws IOException {
final int numLongs = bitsPerValue << 1;
readLELongs(in, tmp, 0, numLongs);
final long mask = MASKS32[bitsPerValue];
int longsIdx = 0;
int shift = 32 - bitsPerValue;
for (; shift >= 0; shift -= bitsPerValue) {
shiftLongs(tmp, numLongs, longs, longsIdx, shift, mask);
longsIdx += numLongs;
}
final int remainingBitsPerLong = shift + bitsPerValue;
final long mask32RemainingBitsPerLong = MASKS32[remainingBitsPerLong];
int tmpIdx = 0;
int remainingBits = remainingBitsPerLong;
for (; longsIdx < BLOCK_SIZE / 2; ++longsIdx) {
int b = bitsPerValue - remainingBits;
long l = (tmp[tmpIdx++] & MASKS32[remainingBits]) << b;
while (b >= remainingBitsPerLong) {
b -= remainingBitsPerLong;
l |= (tmp[tmpIdx++] & mask32RemainingBitsPerLong) << b;
}
if (b > 0) {
l |= (tmp[tmpIdx] >>> (remainingBitsPerLong - b)) & MASKS32[b];
remainingBits = remainingBitsPerLong - b;
} else {
remainingBits = remainingBitsPerLong;
}
longs[longsIdx] = l;
}
}
/**
* The pattern that this shiftLongs method applies is recognized by the C2 compiler, which
* generates SIMD instructions for it in order to shift multiple longs at once.
*/
private static void shiftLongs(long[] a, int count, long[] b, int bi, int shift, long mask) {
for (int i = 0; i < count; ++i) {
b[bi + i] = (a[i] >>> shift) & mask;
}
}
"""<def_stmt>writeRemainderWithSIMDOptimize bpv next_primitive remaining_bits_per_long o num_values f<block_start>iteration=1<line_sep>num_longs=bpv<times>num_values/remaining_bits_per_long<while_stmt>num_longs%2<eq>0<and>num_values%2<eq>0<block_start>num_longs<augdiv>2<line_sep>num_values<augdiv>2<line_sep>iteration<augmul>2<block_end>f.write(' shiftLongs(tmp, %d, tmp, 0, 0, MASK%d_%d);\n'%(iteration<times>num_longs next_primitive remaining_bits_per_long))<line_sep>f.write(' for (int iter = 0, tmpIdx = 0, longsIdx = %d; iter < %d; ++iter, tmpIdx += %d, longsIdx += %d) {\n'%(o iteration num_longs num_values))<line_sep>tmp_idx=0<line_sep>b=bpv<line_sep>b<augsub>remaining_bits_per_long<line_sep>f.write(' long l0 = tmp[tmpIdx + %d] << %d;\n'%(tmp_idx b))<line_sep>tmp_idx<augadd>1<while_stmt>b<ge>remaining_bits_per_long<block_start>b<augsub>remaining_bits_per_long<line_sep>f.write(' l0 |= tmp[tmpIdx + %d] << %d;\n'%(tmp_idx b))<line_sep>tmp_idx<augadd>1<block_end>f.write(' longs[longsIdx + 0] = l0;\n')<line_sep>f.write(' }\n')<block_end><def_stmt>writeRemainder bpv next_primitive remaining_bits_per_long o num_values f<block_start>iteration=1<line_sep>num_longs=bpv<times>num_values/remaining_bits_per_long<while_stmt>num_longs%2<eq>0<and>num_values%2<eq>0<block_start>num_longs<augdiv>2<line_sep>num_values<augdiv>2<line_sep>iteration<augmul>2<block_end>f.write(' for (int iter = 0, tmpIdx = 0, longsIdx = %d; iter < %d; ++iter, tmpIdx += %d, longsIdx += %d) {\n'%(o iteration num_longs num_values))<line_sep>i=0<line_sep>remaining_bits=0<line_sep>tmp_idx=0<for_stmt>i range(num_values)<block_start>b=bpv<if_stmt>remaining_bits<eq>0<block_start>b<augsub>remaining_bits_per_long<line_sep>f.write(' long l%d = (tmp[tmpIdx + %d] & MASK%d_%d) << %d;\n'%(i tmp_idx next_primitive remaining_bits_per_long b))<block_end><else_stmt><block_start>b<augsub>remaining_bits<line_sep>f.write(' long l%d = (tmp[tmpIdx + %d] & MASK%d_%d) << %d;\n'%(i tmp_idx next_primitive remaining_bits b))<block_end>tmp_idx<augadd>1<while_stmt>b<ge>remaining_bits_per_long<block_start>b<augsub>remaining_bits_per_long<line_sep>f.write(' l%d |= (tmp[tmpIdx + %d] & MASK%d_%d) << %d;\n'%(i tmp_idx next_primitive remaining_bits_per_long b))<line_sep>tmp_idx<augadd>1<block_end><if_stmt>b<g>0<block_start>f.write(' l%d |= (tmp[tmpIdx + %d] >>> %d) & MASK%d_%d;\n'%(i tmp_idx remaining_bits_per_long-b next_primitive b))<line_sep>remaining_bits=remaining_bits_per_long-b<block_end>f.write(' longs[longsIdx + %d] = l%d;\n'%(i i))<block_end>f.write(' }\n')<block_end><def_stmt>writeDecode bpv f<block_start>next_primitive=32<if_stmt>bpv<le>8<block_start>next_primitive=8<block_end><elif_stmt>bpv<le>16<block_start>next_primitive=16<block_end>f.write(' private static void decode%d(DataInput in, long[] tmp, long[] longs) throws IOException {\n'%bpv)<line_sep>num_values_per_long=64/next_primitive<if_stmt>bpv<eq>next_primitive<block_start>f.write(' readLELongs(in, longs, 0, %d);\n'%(bpv<times>2))<block_end><else_stmt><block_start>f.write(' readLELongs(in, tmp, 0, %d);\n'%(bpv<times>2))<line_sep>shift=next_primitive-bpv<line_sep>o=0<while_stmt>shift<ge>0<block_start>f.write(' shiftLongs(tmp, %d, longs, %d, %d, MASK%d_%d);\n'%(bpv<times>2 o shift next_primitive bpv))<line_sep>o<augadd>bpv<times>2<line_sep>shift<augsub>bpv<block_end><if_stmt>shift+bpv<g>0<block_start><if_stmt>bpv%(next_primitive%bpv)<eq>0<block_start>writeRemainderWithSIMDOptimize(bpv next_primitive shift+bpv o 128/num_values_per_long-o f)<block_end><else_stmt><block_start>writeRemainder(bpv next_primitive shift+bpv o 128/num_values_per_long-o f)<block_end><block_end><block_end>f.write(' }\n')<line_sep>f.write('\n')<block_end><if_stmt>__name__<eq>'__main__'<block_start>f=open(OUTPUT_FILE 'w')<line_sep>f.write(HEADER)<for_stmt>primitive_size PRIMITIVE_SIZE<block_start>f.write(' private static final long[] MASKS%d = new long[%d];\n'%(primitive_size primitive_size))<block_end>f.write('\n')<line_sep>f.write(' static {\n')<for_stmt>primitive_size PRIMITIVE_SIZE<block_start>f.write(' for (int i = 0; i < %d; ++i) {\n'%primitive_size)<line_sep>f.write(' MASKS%d[i] = mask%d(i);\n'%(primitive_size primitive_size))<line_sep>f.write(' }\n')<block_end>f.write(' }\n')<line_sep>f.write(' // mark values in array as final longs to avoid the cost of reading array, arrays should only be\n')<line_sep>f.write(' // used when the idx is a variable\n')<for_stmt>primitive_size PRIMITIVE_SIZE<block_start><for_stmt>bpv range(1 min(MAX_SPECIALIZED_BITS_PER_VALUE+1 primitive_size))<block_start><if_stmt>bpv<times>2<ne>primitive_size<or>primitive_size<eq>8<block_start>f.write(' private static final long MASK%d_%d = MASKS%d[%d];\n'%(primitive_size bpv primitive_size bpv))<block_end><block_end><block_end>f.write("""
/** Decode 128 integers into {@code longs}. */
void decode(int bitsPerValue, DataInput in, long[] longs) throws IOException {
switch (bitsPerValue) {
""")<for_stmt>bpv range(1 MAX_SPECIALIZED_BITS_PER_VALUE+1)<block_start>next_primitive=32<if_stmt>bpv<le>8<block_start>next_primitive=8<block_end><elif_stmt>bpv<le>16<block_start>next_primitive=16<block_end>f.write(' case %d:\n'%bpv)<line_sep>f.write(' decode%d(in, tmp, longs);\n'%bpv)<line_sep>f.write(' expand%d(longs);\n'%next_primitive)<line_sep>f.write(' break;\n')<block_end>f.write(' default:\n')<line_sep>f.write(' decodeSlow(bitsPerValue, in, tmp, longs);\n')<line_sep>f.write(' expand32(longs);\n')<line_sep>f.write(' break;\n')<line_sep>f.write(' }\n')<line_sep>f.write(' }\n')<line_sep>f.write("""
/** Delta-decode 128 integers into {@code longs}. */
void decodeAndPrefixSum(int bitsPerValue, DataInput in, long base, long[] longs)
throws IOException {
switch (bitsPerValue) {
""")<for_stmt>bpv range(1 MAX_SPECIALIZED_BITS_PER_VALUE+1)<block_start>next_primitive=32<if_stmt>bpv<le>8<block_start>next_primitive=8<block_end><elif_stmt>bpv<le>16<block_start>next_primitive=16<block_end>f.write(' case %d:\n'%bpv)<line_sep>f.write(' decode%d(in, tmp, longs);\n'%bpv)<line_sep>f.write(' prefixSum%d(longs, base);\n'%next_primitive)<line_sep>f.write(' break;\n')<block_end>f.write(' default:\n')<line_sep>f.write(' decodeSlow(bitsPerValue, in, tmp, longs);\n')<line_sep>f.write(' prefixSum32(longs, base);\n')<line_sep>f.write(' break;\n')<line_sep>f.write(' }\n')<line_sep>f.write(' }\n')<line_sep>f.write('\n')<for_stmt>i range(1 MAX_SPECIALIZED_BITS_PER_VALUE+1)<block_start>writeDecode(i f)<block_end>f.write('}\n')<block_end> |
<import_stmt>numpy<as>np<import_stmt>pathlib<import_stmt>nanopq<import_stmt>pickle<import_stmt>time<import_stmt>more_itertools<import_stmt>texmex_python<import_stmt>util<line_sep>### If you'd like to debug, please uninstall rii and uncomment the following lines
#import sys
#sys.path.append('../../')
<import_stmt>rii<def_stmt>run engine L Xq gt r<block_start>"""
Given a searcher, run the search. Return the runtime and the accuracy
Args:
engine (rii.Rii): Rii search engine
L (int): The number of candidates for search
Xq (np.array): Query vectors. shape=(Nq, D). dtype=np.float32
gt (np.array): Groundtruth. shape=(Nq, ANY). dtype=np.int32
r (int): Top R
Returns:
(float, float): Duration [sec/query] and recall@r over the queries
"""<assert_stmt>Xq.ndim<eq>2<assert_stmt>Xq.dtype<eq>np.float32<line_sep>Nq=Xq.shape[0]<line_sep>I=np.zeros((Nq r) dtype=int)<line_sep>t0=time.time()<for_stmt>i,q enumerate(Xq)<block_start>I[i],_=engine.query(q=q topk=r L=L)<block_end>t1=time.time()<line_sep>duration=(t1-t0)/Nq# sec/query
recall=util.recall_at_r(I gt r)<line_sep><return>duration recall<block_end># Setup paths
p=pathlib.Path('.')<line_sep>path_train=p/"data/bigann_learn.bvecs"<line_sep>path_base=p/"data/bigann_base.bvecs"<line_sep>path_query=p/"data/bigann_query.bvecs"<line_sep>path_gt=p/"data/gnd/idx_1000M.ivecs"<line_sep># Read queries and groundtruth
Xq=texmex_python.reader.read_bvec(path_query.open("rb")).astype(np.float32)<line_sep>gt=util.ivecs_read(str(path_gt))<line_sep># Reat top Nt vectors for training
print("Start to read training vectors")<line_sep>Xt=[]<line_sep>Nt=10000000# Use top 10M vectors for training
<with_stmt>path_train.open("rb")<as>f<block_start><for_stmt>vec texmex_python.reader.read_bvec_iter(f)<block_start>Xt.append(vec)<if_stmt>len(Xt)<eq>Nt<block_start><break><block_end><block_end><block_end>Xt=np.array(Xt dtype=np.float32)<line_sep>print("Xt.shape: {}, Xt.dtype: {}".format(Xt.shape Xt.dtype))<line_sep># Train a PQ codec and save it
M=8# The number of subspace.
path_codec=p/'cache/codec_m{}.pkl'.format(M)<if_stmt><not>path_codec.exists()<block_start>print("Start to train a codec")<line_sep>codec=nanopq.PQ(M=M verbose=<true>).fit(vecs=Xt)<line_sep>pickle.dump(codec path_codec.open("wb"))<line_sep>print("Dump the codec in {}".format(path_codec))<block_end><else_stmt><block_start>print("Read a codec from cache: {}".format(path_codec))<line_sep>codec=pickle.load(path_codec.open("rb"))<block_end># Construct a search engine
path_engine=p/'cache/engine_m{}.pkl'.format(M)<if_stmt><not>path_engine.exists()<block_start>print("Start to construct a Rii engine")<line_sep>e=rii.Rii(fine_quantizer=codec)<line_sep>batch_size=10000000<with_stmt>path_base.open("rb")<as>f<block_start><for_stmt>n,batch enumerate(more_itertools.chunked(texmex_python.reader.read_bvec_iter(f) batch_size))<block_start>print("batch: {} / {}".format(n int(1000000000/batch_size)))<line_sep>e.add(vecs=np.array(batch dtype=np.float32))<block_end>e.reconfigure()<block_end>pickle.dump(e path_engine.open("wb"))<line_sep>print("Dump the engine in {}".format(path_engine))<block_end><else_stmt><block_start>print("Read an engine from cache: {}".format(path_engine))<line_sep>e=pickle.load(path_engine.open("rb"))<block_end>e.print_params()<line_sep># Run search
r=1# Reacll@r
w=1# The parameter for search candidates. L = L0 * w = N / nlist * w. The default (fastest) setting is w=1
duration,recall=run(engine=e L=e.L0<times>w Xq=Xq gt=gt r=r)<line_sep>print("{} msec/query. Recall@{} = {}".format(duration<times>1000 r recall))<line_sep> |
<import_from_stmt>typing List<import_from_stmt>trescope.config Config AnchorType AnchorCM<import_from_stmt>trescope.core.Utils toListIfNumpyOrTensorArray<class_stmt>VectorField3DConfig(Config)<block_start>"""Config for :py:meth:`trescope.Output.plotVectorField3D`"""<def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.__sizeFactor:float=.5<line_sep>self.__autoScaleByLocation=<false><line_sep>self.__colorScale=[[0 0x88000000] [1 0x88000000]]<line_sep>self.__x:List[float]=[]<line_sep>self.__y:List[float]=[]<line_sep>self.__z:List[float]=[]<line_sep>self.__anchor:str=str(AnchorCM)<block_end><def_stmt>sizeFactor self sizeFactor:float<block_start>"""
Specify size factor .
:param sizeFactor: size factor , default .5
:return: self , for chain call
"""<line_sep>self.__sizeFactor=sizeFactor<line_sep><return>self<block_end><def_stmt>anchor self anchor:AnchorType<block_start>"""
Specify anchor type .
:param anchor: anchor
:return: self , for chain call
"""<line_sep>self.__anchor=str(anchor)<line_sep><return>self<block_end><def_stmt>autoScaleByLocation self autoScale:bool<block_start>"""
Specify auto scale or not .
:param autoScale: auto scale , default `False`
:return: self , for chain call
"""<line_sep>self.__autoScaleByLocation=autoScale<line_sep><return>self<block_end><def_stmt>locations self x:List[float] y:List[float] z:List[float]<block_start>"""
Specify locations .
:param x: x
:param y: y
:param z: z
:return: self , for chain call
"""<line_sep>self.__x,self.__y,self.__z=x y z<line_sep><return>self<block_end><def_stmt>color self color:int<block_start>"""
Specify color .
:param color: color
:return: self , for chain call
"""<line_sep>self.__colorScale=[[0 color] [1 color]]<line_sep><return>self<block_end><def_stmt>toDict self<block_start><return>{**super().toDict() 'sizeFactor':self.__sizeFactor 'autoScaleByLocation':self.__autoScaleByLocation 'colorScale':self.__colorScale 'locationX':toListIfNumpyOrTensorArray(self.__x) 'locationY':toListIfNumpyOrTensorArray(self.__y) 'locationZ':toListIfNumpyOrTensorArray(self.__z) 'anchor':self.__anchor}<block_end><block_end> |
<import_stmt>os os.path discord<import_from_stmt>discord.ext commands<import_from_stmt>colorama Fore<line_sep>os.system('cls'<if>os.name<eq>'nt'<else>'clear')<line_sep>cleardmtitle()<line_sep>print(f"""{y}[{w}+{y}]{w} Enter your token""")<line_sep>token=input(f"""{y}[{b}#
{y}]{w} Token: """)<line_sep>print(f"""\n{y}[{b}#
{y}]{w} Write "!clear" in one of your DMs to delete your messages""")<line_sep><global>bot<line_sep>bot=commands.Bot(command_prefix="!" self_bot=<true>)<line_sep>bot.remove_command("help")<line_sep>@bot.command()<async_keyword><def_stmt>clear ctx limit:int=<none><block_start>passed=0<line_sep>failed=0<async_keyword><for_stmt>msg ctx.message.channel.history(limit=limit)<block_start><if_stmt>msg.author.id<eq>bot.user.id<block_start><try_stmt><block_start><await>msg.delete()<line_sep>passed<augadd>1<block_end><except_stmt><block_start>failed<augadd>1<block_end><block_end><block_end>print(f"\n{y}[{w}+{y}]{w} Removed {passed} messages with {failed} fails")<line_sep>input(f"""\n{y}[{b}#
{y}]{w} Press ENTER to exit""")<line_sep>main()<block_end>bot.run(token bot=<false>)<line_sep> |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except jin compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>os<import_from_stmt>..layers collective<import_from_stmt>..framework Parameter<line_sep>__parallel_ctx__clz__=<none><def_stmt>_is_data_parallel_mode <block_start><global>__parallel_ctx__clz__<line_sep><return>__parallel_ctx__clz__<is><not><none><and>int(os.getenv("PADDLE_TRAINERS_NUM" "1"))<g>1<block_end><def_stmt>_is_parallel_ctx_initialized <block_start><global>__parallel_ctx__clz__<line_sep><return>__parallel_ctx__clz__<is><not><none><block_end><def_stmt>_set_parallel_ctx nccl_parallel_context<block_start><global>__parallel_ctx__clz__<assert_stmt>__parallel_ctx__clz__<is><none> "ParallelContext can only be initialized once."<line_sep>__parallel_ctx__clz__=nccl_parallel_context<block_end><def_stmt>_init_parallel_ctx <block_start><global>__parallel_ctx__clz__<assert_stmt>__parallel_ctx__clz__<is><not><none> "ParallelContext should be initialized."<line_sep>__parallel_ctx__clz__.init()<block_end><def_stmt>_broadcast_parameters parameters<block_start><for_stmt>param parameters# In model parallel, some parameters are split into multiple devices,
# so we could not broadcast these parameters.
<block_start><if_stmt>param.is_distributed<block_start><continue><block_end><if_stmt>isinstance(param Parameter)<and>param.trainable<block_start>collective._broadcast(param 0 sync_mode=<true>)<block_end><block_end><block_end> |
'''
DS1307 RTC drive
Author: shaoziyang
Date: 2018.3
http://www.micropython.org.cn
'''<import_from_stmt>micropython const<line_sep>DS1307_I2C_ADDRESS=const(104)<line_sep>DS1307_REG_SECOND=const(0)<line_sep>DS1307_REG_MINUTE=const(1)<line_sep>DS1307_REG_HOUR=const(2)<line_sep>DS1307_REG_WEEKDAY=const(3)<line_sep>DS1307_REG_DAY=const(4)<line_sep>DS1307_REG_MONTH=const(5)<line_sep>DS1307_REG_YEAR=const(6)<line_sep>DS1307_REG_CTRL=const(7)<line_sep>DS1307_REG_RAM=const(8)<class_stmt>DS1307()<block_start><def_stmt>__init__ self i2c<block_start>self.i2c=i2c<line_sep>self.DT=[0]<times>8<line_sep>self.buf=bytearray(8)<line_sep>self.tb=bytearray(1)<line_sep>self.rb=bytearray(1)<line_sep>self.start()<block_end># set reg
<def_stmt>setReg self reg dat<block_start>self.tb[0]=dat<line_sep>self.i2c.writeto_mem(DS1307_I2C_ADDRESS reg self.tb)<block_end># get reg
<def_stmt>getReg self reg<block_start>self.i2c.readfrom_mem_into(DS1307_I2C_ADDRESS reg self.rb)<line_sep><return>self.rb[0]<block_end><def_stmt>start self<block_start>t=self.getReg(DS1307_REG_SECOND)<line_sep>self.setReg(DS1307_REG_SECOND t&0x7F)<block_end><def_stmt>stop self<block_start>t=self.getReg(DS1307_REG_SECOND)<line_sep>self.setReg(DS1307_REG_SECOND t|0x80)<block_end><def_stmt>DecToHex self dat<block_start><return>(dat<floordiv>10)<times>16+(dat%10)<block_end><def_stmt>HexToDec self dat<block_start><return>(dat<floordiv>16)<times>10+(dat%16)<block_end><def_stmt>datetime self DT=<none><block_start><if_stmt>DT<eq><none><block_start>self.i2c.readfrom_mem_into(DS1307_I2C_ADDRESS DS1307_REG_SECOND self.buf)<line_sep>self.DT[0]=self.HexToDec(self.buf[6])+2000<line_sep>self.DT[1]=self.HexToDec(self.buf[5])<line_sep>self.DT[2]=self.HexToDec(self.buf[4])<line_sep>self.DT[3]=self.HexToDec(self.buf[3])<line_sep>self.DT[4]=self.HexToDec(self.buf[2])<line_sep>self.DT[5]=self.HexToDec(self.buf[1])<line_sep>self.DT[6]=self.HexToDec(self.buf[0])<line_sep>self.DT[7]=0<line_sep><return>self.DT<block_end><else_stmt><block_start>self.buf[0]=0<line_sep>self.buf[1]=self.DecToHex(DT[6]%60)# second
self.buf[2]=self.DecToHex(DT[5]%60)# minute
self.buf[3]=self.DecToHex(DT[4]%24)# hour
self.buf[4]=self.DecToHex(DT[3]%8)# week day
self.buf[5]=self.DecToHex(DT[2]%32)# date
self.buf[6]=self.DecToHex(DT[1]%13)# month
self.buf[7]=self.DecToHex(DT[0]%100)# year
self.i2c.writeto(DS1307_I2C_ADDRESS self.buf)<block_end><block_end><def_stmt>year self year=<none><block_start><if_stmt>year<eq><none><block_start><return>self.HexToDec(self.getReg(DS1307_REG_YEAR))+2000<block_end><else_stmt><block_start>self.setReg(DS1307_REG_YEAR self.DecToHex(year%100))<block_end><block_end><def_stmt>month self month=<none><block_start><if_stmt>month<eq><none><block_start><return>self.HexToDec(self.getReg(DS1307_REG_MONTH))<block_end><else_stmt><block_start>self.setReg(DS1307_REG_MONTH self.DecToHex(month%13))<block_end><block_end><def_stmt>day self day=<none><block_start><if_stmt>day<eq><none><block_start><return>self.HexToDec(self.getReg(DS1307_REG_DAY))<block_end><else_stmt><block_start>self.setReg(DS1307_REG_DAY self.DecToHex(day%32))<block_end><block_end><def_stmt>weekday self weekday=<none><block_start><if_stmt>weekday<eq><none><block_start><return>self.HexToDec(self.getReg(DS1307_REG_WEEKDAY))<block_end><else_stmt><block_start>self.setReg(DS1307_REG_WEEKDAY self.DecToHex(weekday%8))<block_end><block_end><def_stmt>hour self hour=<none><block_start><if_stmt>hour<eq><none><block_start><return>self.HexToDec(self.getReg(DS1307_REG_HOUR))<block_end><else_stmt><block_start>self.setReg(DS1307_REG_HOUR self.DecToHex(hour%24))<block_end><block_end><def_stmt>minute self minute=<none><block_start><if_stmt>minute<eq><none><block_start><return>self.HexToDec(self.getReg(DS1307_REG_MINUTE))<block_end><else_stmt><block_start>self.setReg(DS1307_REG_MINUTE self.DecToHex(minute%60))<block_end><block_end><def_stmt>second self second=<none><block_start><if_stmt>second<eq><none><block_start><return>self.HexToDec(self.getReg(DS1307_REG_SECOND))<block_end><else_stmt><block_start>self.setReg(DS1307_REG_SECOND self.DecToHex(second%60))<block_end><block_end><def_stmt>ram self reg dat=<none><block_start><if_stmt>dat<eq><none><block_start><return>self.getReg(DS1307_REG_RAM+(reg%56))<block_end><else_stmt><block_start>self.setReg(DS1307_REG_RAM+(reg%56) dat)<block_end><block_end><block_end> |
'''
Unit tests for wildcard
'''<import_stmt>os<import_stmt>sys<line_sep>MODULE_PATH=os.path.realpath(os.path.join(__file__ os.pardir os.pardir 'action_plugins'))<line_sep>sys.path.insert(0 MODULE_PATH)<line_sep># pylint: disable=import-error,wrong-import-position,missing-docstring
<import_from_stmt>sanity_checks is_registry_match# noqa: E402
<def_stmt>test_is_registry_match <block_start>'''
Test for is_registry_match
'''<line_sep>pat_allowall="*"<line_sep>pat_docker="docker.io"<line_sep>pat_subdomain="*.example.com"<line_sep>pat_matchport="registry:80"<assert_stmt>is_registry_match("docker.io/repo/my" pat_allowall)<assert_stmt>is_registry_match("example.com:4000/repo/my" pat_allowall)<assert_stmt>is_registry_match("192.168.127.12:4000/a/b/c" pat_allowall)<assert_stmt>is_registry_match("https://registry.com" pat_allowall)<assert_stmt>is_registry_match("example.com/openshift3/ose-${component}:${version}" pat_allowall)<assert_stmt>is_registry_match("docker.io/repo/my" pat_docker)<assert_stmt>is_registry_match("docker.io:443/repo/my" pat_docker)<assert_stmt>is_registry_match("docker.io/openshift3/ose-${component}:${version}" pat_allowall)<assert_stmt><not>is_registry_match("example.com:4000/repo/my" pat_docker)<assert_stmt><not>is_registry_match("index.docker.io/a/b/c" pat_docker)<assert_stmt><not>is_registry_match("https://registry.com" pat_docker)<assert_stmt><not>is_registry_match("example.com/openshift3/ose-${component}:${version}" pat_docker)<assert_stmt>is_registry_match("apps.foo.example.com/prefix" pat_subdomain)<assert_stmt>is_registry_match("sub.example.com:80" pat_subdomain)<assert_stmt><not>is_registry_match("https://example.com:443/prefix" pat_subdomain)<assert_stmt><not>is_registry_match("docker.io/library/my" pat_subdomain)<assert_stmt><not>is_registry_match("https://hello.example.bar" pat_subdomain)<assert_stmt>is_registry_match("registry:80/prefix" pat_matchport)<assert_stmt>is_registry_match("registry/myapp" pat_matchport)<assert_stmt>is_registry_match("registry:443/myap" pat_matchport)<assert_stmt><not>is_registry_match("https://example.com:443/prefix" pat_matchport)<assert_stmt><not>is_registry_match("docker.io/library/my" pat_matchport)<assert_stmt><not>is_registry_match("https://hello.registry/myapp" pat_matchport)<block_end><if_stmt>__name__<eq>'__main__'<block_start>test_is_registry_match()<block_end> |
__all__=['datasets' 'models']<line_sep> |
<import_stmt>argparse<import_stmt>numpy<as>np<import_stmt>os<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.contrib.eager.python tfe<import_from_stmt>keras.models Sequential<import_from_stmt>keras.layers Dense Dropout Activation<import_from_stmt>keras.layers Embedding<import_from_stmt>keras.layers Conv1D GlobalMaxPooling1D<line_sep>tf.logging.set_verbosity(tf.logging.ERROR)<line_sep>max_features=20000<line_sep>maxlen=400<line_sep>embedding_dims=300<line_sep>filters=250<line_sep>kernel_size=3<line_sep>hidden_dims=250<def_stmt>parse_args <block_start>parser=argparse.ArgumentParser()<line_sep># hyperparameters sent by the client are passed as command-line arguments to the script
parser.add_argument('--epochs' type=int default=1)<line_sep>parser.add_argument('--batch_size' type=int default=64)<line_sep># data directories
parser.add_argument('--train' type=str default=os.environ.get('SM_CHANNEL_TRAIN'))<line_sep>parser.add_argument('--test' type=str default=os.environ.get('SM_CHANNEL_TEST'))<line_sep># model directory: we will use the default set by SageMaker, /opt/ml/model
parser.add_argument('--model_dir' type=str default=os.environ.get('SM_MODEL_DIR'))<line_sep><return>parser.parse_known_args()<block_end><def_stmt>get_train_data train_dir<block_start>x_train=np.load(os.path.join(train_dir 'x_train.npy'))<line_sep>y_train=np.load(os.path.join(train_dir 'y_train.npy'))<line_sep>print('x train' x_train.shape 'y train' y_train.shape)<line_sep><return>x_train y_train<block_end><def_stmt>get_test_data test_dir<block_start>x_test=np.load(os.path.join(test_dir 'x_test.npy'))<line_sep>y_test=np.load(os.path.join(test_dir 'y_test.npy'))<line_sep>print('x test' x_test.shape 'y test' y_test.shape)<line_sep><return>x_test y_test<block_end><def_stmt>get_model <block_start>embedding_layer=tf.keras.layers.Embedding(max_features embedding_dims input_length=maxlen)<line_sep>sequence_input=tf.keras.Input(shape=(maxlen ) dtype='int32')<line_sep>embedded_sequences=embedding_layer(sequence_input)<line_sep>x=tf.keras.layers.Dropout(0.2)(embedded_sequences)<line_sep>x=tf.keras.layers.Conv1D(filters kernel_size padding='valid' activation='relu' strides=1)(x)<line_sep>x=tf.keras.layers.MaxPooling1D()(x)<line_sep>x=tf.keras.layers.GlobalMaxPooling1D()(x)<line_sep>x=tf.keras.layers.Dense(hidden_dims activation='relu')(x)<line_sep>x=tf.keras.layers.Dropout(0.2)(x)<line_sep>preds=tf.keras.layers.Dense(1 activation='sigmoid')(x)<line_sep><return>tf.keras.Model(sequence_input preds)<block_end><if_stmt>__name__<eq>"__main__"<block_start>args,_=parse_args()<line_sep>x_train,y_train=get_train_data(args.train)<line_sep>x_test,y_test=get_test_data(args.test)<line_sep>model=get_model()<line_sep>model.compile(loss='binary_crossentropy' optimizer='adam' metrics=['accuracy'])<line_sep>model.fit(x_train y_train batch_size=args.batch_size epochs=args.epochs validation_data=(x_test y_test))<line_sep># create a TensorFlow SavedModel for deployment to a SageMaker endpoint with TensorFlow Serving
tf.contrib.saved_model.save_keras_model(model args.model_dir)<block_end> |
"""Gradient interface"""<import_stmt>torch<import_from_stmt>.modules.utils _single _pair _triple<import_stmt>warnings<def_stmt>_grad_input_padding grad_output input_size stride padding kernel_size dilation=<none><block_start><if_stmt>dilation<is><none># For backward compatibility
<block_start>warnings.warn("_grad_input_padding 'dilation' argument not provided. Default of 1 is used.")<line_sep>dilation=[1]<times>len(stride)<block_end>input_size=list(input_size)<line_sep>k=grad_output.dim()-2<if_stmt>len(input_size)<eq>k+2<block_start>input_size=input_size[-k:]<block_end><if_stmt>len(input_size)<ne>k<block_start><raise>ValueError("input_size must have {} elements (got {})".format(k+2 len(input_size)))<block_end><def_stmt>dim_size d<block_start><return>((grad_output.size(d+2)-1)<times>stride[d]-2<times>padding[d]+1+dilation[d]<times>(kernel_size[d]-1))<block_end>min_sizes=[dim_size(d)<for>d range(k)]<line_sep>max_sizes=[min_sizes[d]+stride[d]-1<for>d range(k)]<for_stmt>size,min_size,max_size zip(input_size min_sizes max_sizes)<block_start><if_stmt>size<l>min_size<or>size<g>max_size<block_start><raise>ValueError(("requested an input grad size of {}, but valid sizes range "<concat>"from {} to {} (for a grad_output of {})").format(input_size min_sizes max_sizes grad_output.size()[2:]))<block_end><block_end><return>tuple(input_size[d]-min_sizes[d]<for>d range(k))<block_end><def_stmt>conv1d_input input_size weight grad_output stride=1 padding=0 dilation=1 groups=1<block_start>r"""
Computes the gradient of conv1d with respect to the input of the convolution.
This is same as the 1D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kW)
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3, requires_grad=True)
>>> weight = torch.randn(1,1,1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv1d_input(input.shape, weight, grad_output)
"""<line_sep>stride=_single(stride)<line_sep>padding=_single(padding)<line_sep>dilation=_single(dilation)<line_sep>kernel_size=[weight.shape[2]]<if_stmt>input_size<is><none><block_start><raise>ValueError("grad.conv1d_input requires specifying an input_size")<block_end>grad_input_padding=_grad_input_padding(grad_output input_size stride padding kernel_size dilation)<line_sep><return>torch.conv_transpose1d(grad_output weight <none> stride padding grad_input_padding groups dilation)<block_end><def_stmt>conv1d_weight input weight_size grad_output stride=1 padding=0 dilation=1 groups=1<block_start>r"""
Computes the gradient of conv1d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3, requires_grad=True)
>>> weight = torch.randn(1,1,1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv1d_weight(input, weight.shape, grad_output)
"""<line_sep>stride=_single(stride)<line_sep>padding=_single(padding)<line_sep>dilation=_single(dilation)<line_sep>in_channels=input.shape[1]<line_sep>out_channels=grad_output.shape[1]<line_sep>min_batch=input.shape[0]<line_sep>grad_output=grad_output.contiguous().repeat(1 in_channels<floordiv>groups 1)<line_sep>grad_output=grad_output.contiguous().view(grad_output.shape[0]<times>grad_output.shape[1] 1 grad_output.shape[2])<line_sep>input=input.contiguous().view(1 input.shape[0]<times>input.shape[1] input.shape[2])<line_sep>grad_weight=torch.conv1d(input grad_output <none> dilation padding stride in_channels<times>min_batch)<line_sep>grad_weight=grad_weight.contiguous().view(min_batch grad_weight.shape[1]<floordiv>min_batch grad_weight.shape[2])<line_sep><return>grad_weight.sum(dim=0).view(in_channels<floordiv>groups out_channels grad_weight.shape[2]).transpose(0 1).narrow(2 0 weight_size[2])<block_end><def_stmt>conv2d_input input_size weight grad_output stride=1 padding=0 dilation=1 groups=1<block_start>r"""
Computes the gradient of conv2d with respect to the input of the convolution.
This is same as the 2D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3,3, requires_grad=True)
>>> weight = torch.randn(1,1,1,2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv2d_input(input.shape, weight, grad_output)
"""<line_sep>stride=_pair(stride)<line_sep>padding=_pair(padding)<line_sep>dilation=_pair(dilation)<line_sep>kernel_size=(weight.shape[2] weight.shape[3])<if_stmt>input_size<is><none><block_start><raise>ValueError("grad.conv2d_input requires specifying an input_size")<block_end>grad_input_padding=_grad_input_padding(grad_output input_size stride padding kernel_size dilation)<line_sep><return>torch.conv_transpose2d(grad_output weight <none> stride padding grad_input_padding groups dilation)<block_end><def_stmt>conv2d_weight input weight_size grad_output stride=1 padding=0 dilation=1 groups=1<block_start>r"""
Computes the gradient of conv2d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3,3, requires_grad=True)
>>> weight = torch.randn(1,1,1,2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv2d_weight(input, weight.shape, grad_output)
"""<line_sep>stride=_pair(stride)<line_sep>padding=_pair(padding)<line_sep>dilation=_pair(dilation)<line_sep>in_channels=input.shape[1]<line_sep>out_channels=grad_output.shape[1]<line_sep>min_batch=input.shape[0]<line_sep>grad_output=grad_output.contiguous().repeat(1 in_channels<floordiv>groups 1 1)<line_sep>grad_output=grad_output.contiguous().view(grad_output.shape[0]<times>grad_output.shape[1] 1 grad_output.shape[2] grad_output.shape[3])<line_sep>input=input.contiguous().view(1 input.shape[0]<times>input.shape[1] input.shape[2] input.shape[3])<line_sep>grad_weight=torch.conv2d(input grad_output <none> dilation padding stride in_channels<times>min_batch)<line_sep>grad_weight=grad_weight.contiguous().view(min_batch grad_weight.shape[1]<floordiv>min_batch grad_weight.shape[2] grad_weight.shape[3])<line_sep><return>grad_weight.sum(dim=0).view(in_channels<floordiv>groups out_channels grad_weight.shape[2] grad_weight.shape[3]).transpose(0 1).narrow(2 0 weight_size[2]).narrow(3 0 weight_size[3])<block_end><def_stmt>conv3d_input input_size weight grad_output stride=1 padding=0 dilation=1 groups=1<block_start>r"""
Computes the gradient of conv3d with respect to the input of the convolution.
This is same as the 3D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv3d_input(input.shape, weight, grad_output)
"""<line_sep>stride=_triple(stride)<line_sep>padding=_triple(padding)<line_sep>dilation=_triple(dilation)<line_sep>kernel_size=(weight.shape[2] weight.shape[3] weight.shape[4])<if_stmt>input_size<is><none><block_start><raise>ValueError("grad.conv3d_input requires specifying an input_size")<block_end>grad_input_padding=_grad_input_padding(grad_output input_size stride padding kernel_size dilation)<line_sep><return>torch.conv_transpose3d(grad_output weight <none> stride padding grad_input_padding groups dilation)<block_end><def_stmt>conv3d_weight input weight_size grad_output stride=1 padding=0 dilation=1 groups=1<block_start>r"""
Computes the gradient of conv3d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iT x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, weight, grad_output)
>>> F.grad.conv3d_weight(input, weight.shape, grad_output)
"""<line_sep>stride=_triple(stride)<line_sep>padding=_triple(padding)<line_sep>dilation=_triple(dilation)<line_sep>in_channels=input.shape[1]<line_sep>out_channels=grad_output.shape[1]<line_sep>min_batch=input.shape[0]<line_sep>grad_output=grad_output.repeat(1 in_channels<floordiv>groups 1 1 1)<line_sep>grad_output=grad_output.contiguous().view(grad_output.shape[0]<times>grad_output.shape[1] 1 grad_output.shape[2] grad_output.shape[3] grad_output.shape[4])<line_sep>input=input.contiguous().view(1 input.shape[0]<times>input.shape[1] input.shape[2] input.shape[3] input.shape[4])<line_sep>grad_weight=torch.conv3d(input grad_output <none> dilation padding stride in_channels<times>min_batch)<line_sep>grad_weight=grad_weight.contiguous().view(min_batch grad_weight.shape[1]<floordiv>min_batch grad_weight.shape[2] grad_weight.shape[3] grad_weight.shape[4])<line_sep><return>grad_weight.sum(dim=0).view(in_channels<floordiv>groups out_channels grad_weight.shape[2] grad_weight.shape[3] grad_weight.shape[4]).transpose(0 1).narrow(2 0 weight_size[2]).narrow(3 0 weight_size[3]).narrow(4 0 weight_size[4])<block_end> |
# PyAlgoTrade
#
# Copyright 2011-2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: <NAME> <<EMAIL>>
"""<import_stmt>pyalgotrade.logger<import_from_stmt>pyalgotrade.optimizer base<import_from_stmt>pyalgotrade.optimizer xmlrpcserver<line_sep>logger=pyalgotrade.logger.getLogger(__name__)<class_stmt>Results(object)<block_start>"""The results of the strategy executions."""<def_stmt>__init__ self parameters result<block_start>self.__parameters=parameters<line_sep>self.__result=result<block_end><def_stmt>getParameters self<block_start>"""Returns a sequence of parameter values."""<line_sep><return>self.__parameters<block_end><def_stmt>getResult self<block_start>"""Returns the result for a given set of parameters."""<line_sep><return>self.__result<block_end><block_end><def_stmt>serve barFeed strategyParameters address port<block_start>"""Executes a server that will provide bars and strategy parameters for workers to use.
:param barFeed: The bar feed that each worker will use to backtest the strategy.
:type barFeed: :class:`pyalgotrade.barfeed.BarFeed`.
:param strategyParameters: The set of parameters to use for backtesting. An iterable object where **each element is a tuple that holds parameter values**.
:param address: The address to listen for incoming worker connections.
:type address: string.
:param port: The port to listen for incoming worker connections.
:type port: int.
:rtype: A :class:`Results` instance with the best results found or None if no results were obtained.
"""<line_sep>paramSource=base.ParameterSource(strategyParameters)<line_sep>resultSinc=base.ResultSinc()<line_sep>s=xmlrpcserver.Server(paramSource resultSinc barFeed address port)<line_sep>logger.info("Starting server")<line_sep>s.serve()<line_sep>logger.info("Server finished")<line_sep>ret=<none><line_sep>bestResult,bestParameters=resultSinc.getBest()<if_stmt>bestResult<is><not><none><block_start>logger.info("Best final result %s with parameters %s"%(bestResult bestParameters.args))<line_sep>ret=Results(bestParameters.args bestResult)<block_end><else_stmt><block_start>logger.error("No results. All jobs failed or no jobs were processed.")<block_end><return>ret<block_end> |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Configuration of a code signer which is specific to the code signing server.
#
# NOTE: DO NOT put any sensitive information here, put it in an actual
# configuration on the signing machine.
<import_from_stmt>pathlib Path<import_from_stmt>codesign.config_common *<line_sep># URL to the timestamping authority.
TIMESTAMP_AUTHORITY_URL='http://timestamp.digicert.com'<line_sep># Full path to the certificate used for signing.
#
# The path and expected file format might vary depending on a platform.
#
# On Windows it is usually is a PKCS #12 key (.pfx), so the path will look
# like Path('C:\\Secret\\Blender.pfx').
CERTIFICATE_FILEPATH:Path<line_sep># https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
LOGGING={'version':1 'formatters':{'default':{'format':'%(asctime)-15s %(levelname)8s %(name)s %(message)s'}} 'handlers':{'console':{'class':'logging.StreamHandler' 'formatter':'default' 'stream':'ext://sys.stderr' }} 'loggers':{'codesign':{'level':'INFO'} } 'root':{'level':'WARNING' 'handlers':['console' ] }}<line_sep> |
"""Prepare the ImageNet dataset"""<import_stmt>os<import_stmt>argparse<import_stmt>tarfile<import_stmt>pickle<import_stmt>gzip<import_stmt>subprocess<import_from_stmt>tqdm tqdm<import_stmt>subprocess<import_from_stmt>encoding.utils check_sha1 download mkdir<line_sep>_TARGET_DIR=os.path.expanduser('~/.encoding/data/ILSVRC2012')<line_sep>_TRAIN_TAR='ILSVRC2012_img_train.tar'<line_sep>_TRAIN_TAR_SHA1='43eda4fe35c1705d6606a6a7a633bc965d194284'<line_sep>_VAL_TAR='ILSVRC2012_img_val.tar'<line_sep>_VAL_TAR_SHA1='5f3f73da3395154b60528b2b2a2caf2374f5f178'<def_stmt>parse_args <block_start>parser=argparse.ArgumentParser(description='Setup the ImageNet dataset.' formatter_class=argparse.ArgumentDefaultsHelpFormatter)<line_sep>parser.add_argument('--download-dir' required=<true> help="The directory that contains downloaded tar files")<line_sep>parser.add_argument('--target-dir' default=_TARGET_DIR help="The directory to store extracted images")<line_sep>parser.add_argument('--checksum' action='store_true' help="If check integrity before extracting.")<line_sep>parser.add_argument('--with-rec' action='store_true' help="If build image record files.")<line_sep>parser.add_argument('--num-thread' type=int default=1 help="Number of threads to use when building image record file.")<line_sep>args=parser.parse_args()<line_sep><return>args<block_end><def_stmt>check_file filename checksum sha1<block_start><if_stmt><not>os.path.exists(filename)<block_start><raise>ValueError('File not found: '+filename)<block_end><if_stmt>checksum<and><not>check_sha1(filename sha1)<block_start><raise>ValueError('Corrupted file: '+filename)<block_end><block_end><def_stmt>extract_train tar_fname target_dir with_rec=<false> num_thread=1<block_start>mkdir(target_dir)<with_stmt>tarfile.open(tar_fname)<as>tar<block_start>print("Extracting "+tar_fname+"...")<line_sep># extract each class one-by-one
pbar=tqdm(total=len(tar.getnames()))<for_stmt>class_tar tar<block_start>pbar.set_description('Extract '+class_tar.name)<line_sep>tar.extract(class_tar target_dir)<line_sep>class_fname=os.path.join(target_dir class_tar.name)<line_sep>class_dir=os.path.splitext(class_fname)[0]<line_sep>os.mkdir(class_dir)<with_stmt>tarfile.open(class_fname)<as>f<block_start>f.extractall(class_dir)<block_end>os.remove(class_fname)<line_sep>pbar.update(1)<block_end>pbar.close()<block_end><block_end><def_stmt>extract_val tar_fname target_dir with_rec=<false> num_thread=1<block_start>mkdir(target_dir)<line_sep>print('Extracting '+tar_fname)<with_stmt>tarfile.open(tar_fname)<as>tar<block_start>tar.extractall(target_dir)<block_end># build rec file before images are moved into subfolders
# move images to proper subfolders
subprocess.call(["wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash"] cwd=target_dir shell=<true>)<block_end><def_stmt>main <block_start>args=parse_args()<line_sep>target_dir=os.path.expanduser(args.target_dir)<line_sep>#if os.path.exists(target_dir):
# raise ValueError('Target dir ['+target_dir+'] exists. Remove it first')
download_dir=os.path.expanduser(args.download_dir)<line_sep>train_tar_fname=os.path.join(download_dir _TRAIN_TAR)<line_sep>check_file(train_tar_fname args.checksum _TRAIN_TAR_SHA1)<line_sep>val_tar_fname=os.path.join(download_dir _VAL_TAR)<line_sep>check_file(val_tar_fname args.checksum _VAL_TAR_SHA1)<line_sep>build_rec=args.with_rec<if_stmt>build_rec<block_start>os.makedirs(os.path.join(target_dir 'rec'))<block_end>extract_train(train_tar_fname os.path.join(target_dir 'train') build_rec args.num_thread)<line_sep>extract_val(val_tar_fname os.path.join(target_dir 'val') build_rec args.num_thread)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
# tests/test_provider_vmware_vmc.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:30:35 UTC)
<def_stmt>test_provider_import <block_start><import_stmt>terrascript.provider.vmware.vmc<block_end><def_stmt>test_resource_import <block_start><import_from_stmt>terrascript.resource.vmware.vmc vmc_cluster<import_from_stmt>terrascript.resource.vmware.vmc vmc_public_ip<import_from_stmt>terrascript.resource.vmware.vmc vmc_sddc<import_from_stmt>terrascript.resource.vmware.vmc vmc_site_recovery<import_from_stmt>terrascript.resource.vmware.vmc vmc_srm_node<block_end><def_stmt>test_datasource_import <block_start><import_from_stmt>terrascript.data.vmware.vmc vmc_connected_accounts<import_from_stmt>terrascript.data.vmware.vmc vmc_customer_subnets<import_from_stmt>terrascript.data.vmware.vmc vmc_org<import_from_stmt>terrascript.data.vmware.vmc vmc_sddc<block_end># TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.vmware.vmc
#
# t = terrascript.provider.vmware.vmc.vmc()
# s = str(t)
#
# assert 'https://github.com/vmware/terraform-provider-vmc' in s
# assert '1.7.0' in s
|
# Script Name : sqlite_table_check.py
# Author : <NAME>
# Created : 07 June 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Checks the main SQLITE database to ensure all the tables should exist
<import_stmt>os<import_stmt>sqlite3<line_sep>dropbox=os.getenv("dropbox")<line_sep>config=os.getenv("my_config")<line_sep>dbfile=("Databases\jarvis.db")<line_sep>listfile=("sqlite_master_table.lst")<line_sep>master_db=os.path.join(dropbox dbfile)<line_sep>config_file=os.path.join(config listfile)<line_sep>tablelist=open(config_file 'r')<line_sep>conn=sqlite3.connect(master_db)<line_sep>cursor=conn.cursor()<line_sep>cursor.execute('SELECT SQLITE_VERSION()')<line_sep>data=cursor.fetchone()<if_stmt>str(data)<eq>"(u'3.6.21',)"<block_start>print("\nCurrently "+master_db+" is on SQLite version: %s"%data+" - OK -\n")<block_end><else_stmt><block_start>print("\nDB On different version than master version - !!!!! \n")<block_end>conn.close()<line_sep>print("\nCheckling "+master_db+" against "+config_file+"\n")<for_stmt>table tablelist.readlines()<block_start>conn=sqlite3.connect(master_db)<line_sep>cursor=conn.cursor()<line_sep>cursor.execute("select count(*) from sqlite_master where name = ?" (table.strip() ))<line_sep>res=cursor.fetchone()<if_stmt>(res[0])<block_start>print('[+] Table : '+table.strip()+' exists [+]')<block_end><else_stmt><block_start>print('[-] Table : '+table.strip()+' does not exist [-]')<block_end><block_end> |
<import_stmt>datetime<import_stmt>json<import_stmt>requests<def_stmt>send_message webhook_url:str content_msg="" title="" title_url="" color=00000000 timestamp=datetime.datetime.now().isoformat() footer_icon="" footer="" thumbnail_url="" author="" author_url="" author_icon_url="" text_name="" text="" <block_start>payload={"content":content_msg "embeds":[{"title":title "url":title_url "color":color "timestamp":timestamp "footer":{"icon_url":footer_icon "text":footer } "thumbnail":{"url":thumbnail_url} "author":{"name":author "url":author_url "icon_url":author_icon_url } "fields":[{"name":text_name "value":text }] }] }<line_sep>print(">> Sending To WebHook...")<line_sep>payload=json.dumps(payload)<line_sep>headers={"Content-Type":"application/json"}<line_sep>response=requests.post(webhook_url headers=headers data=payload)<line_sep><return>response<block_end><def_stmt>example_calling <block_start>webhook_url="your_webhook_url"<line_sep>response=send_message(webhook_url content_msg="Some random text" title="Discord Embed example" title_url="https://discordjs.guide/popular-topics/embeds.html#embed-preview" color=15335679 footer_icon="https://github.githubassets.com/favicons/favicon-dark.png" footer="May the Force be with you" thumbnail_url="https://avatars.githubusercontent.com/u/55619686" author="OjusWiZard" author_url="https://github.com/OjusWiZard/" author_icon_url="https://avatars.githubusercontent.com/u/55619686" text_name=":point_down: :point_down: :point_down:" text="This is a test message" )<line_sep>print("Status: " response.status_code)<block_end><if_stmt>__name__<eq>"__main__"<block_start>example_calling()<block_end> |
# pylint: disable=W0611,W0613
<import_stmt>html<import_stmt>json<import_from_stmt>typing Any Callable Dict List<import_stmt>fugue_sql<import_stmt>pandas<as>pd<import_from_stmt>fugue ExecutionEngine NativeExecutionEngine make_execution_engine register_execution_engine <import_from_stmt>fugue.dataframe YieldedDataFrame<import_from_stmt>fugue.extensions._builtins.outputters Show<import_from_stmt>IPython.core.magic Magics cell_magic magics_class needs_local_scope<import_from_stmt>IPython.display HTML display<import_from_stmt>triad ParamDict Schema<import_from_stmt>triad.utils.convert to_instance<class_stmt>NotebookSetup(object)<block_start>"""Jupyter notebook environment customization template."""<def_stmt>get_pre_conf self<arrow>Dict[str Any]<block_start>"""The default config for all registered execution engine"""<line_sep><return>{}<block_end><def_stmt>get_post_conf self<arrow>Dict[str Any]<block_start>"""The enforced config for all registered execution engine.
Users should not set these configs manually, if they set, the values
must match this dict, otherwise, exceptions will be thrown
"""<line_sep><return>{}<block_end><def_stmt>get_pretty_print self<arrow>Callable<block_start>"""Fugue dataframe pretty print handler"""<line_sep><return>_default_pretty_print<block_end><def_stmt>register_execution_engines self<block_start>"""Register execution engines with names. This will also try to register
spark and dask engines if the dependent packages are available and they
are not registered"""<line_sep>register_execution_engine("native" <lambda>conf **kwargs:NativeExecutionEngine(conf=conf) on_dup="ignore" )<try_stmt><block_start><import_stmt>pyspark# noqa: F401
<import_stmt>fugue_spark# noqa: F401
<block_end><except_stmt>ImportError<block_start><pass><block_end><try_stmt><block_start><import_stmt>dask.dataframe# noqa: F401
<import_stmt>fugue_dask# noqa: F401
<block_end><except_stmt>ImportError<block_start><pass><block_end><block_end><block_end>@magics_class<class_stmt>_FugueSQLMagics(Magics)<block_start>"""Fugue SQL Magics"""<def_stmt>__init__ self shell:Any pre_conf:Dict[str Any] post_conf:Dict[str Any] fsql_ignore_case:bool=<false> # You must call the parent constructor
<block_start>super().__init__(shell)<line_sep>self._pre_conf=pre_conf<line_sep>self._post_conf=post_conf<line_sep>self._fsql_ignore_case=fsql_ignore_case<block_end>@needs_local_scope@cell_magic("fsql")<def_stmt>fsql self line:str cell:str local_ns:Any=<none><arrow><none><block_start>dag=fugue_sql.fsql(cell local_ns fsql_ignore_case=self._fsql_ignore_case)<line_sep>dag.run(self.get_engine(line {}<if>local_ns<is><none><else>local_ns))<for_stmt>k,v dag.yields.items()<block_start><if_stmt>isinstance(v YieldedDataFrame)<block_start>local_ns[k]=v.result# type: ignore
<block_end><else_stmt><block_start>local_ns[k]=v<block_end><block_end><block_end># type: ignore
<def_stmt>get_engine self line:str lc:Dict[str Any]<arrow>ExecutionEngine<block_start>line=line.strip()<line_sep>p=line.find("{")<if_stmt>p<ge>0<block_start>engine=line[:p].strip()<line_sep>conf=json.loads(line[p:])<block_end><else_stmt><block_start>parts=line.split(" " 1)<line_sep>engine=parts[0]<line_sep>conf=ParamDict(<none><if>len(parts)<eq>1<else>lc[parts[1]])<block_end>cf=dict(self._pre_conf)<line_sep>cf.update(conf)<for_stmt>k,v self._post_conf.items()<block_start><if_stmt>k<in>cf<and>cf[k]<ne>v<block_start><raise>ValueError(f"{k} must be {v}, but you set to {cf[k]}, you may unset it")<block_end>cf[k]=v<block_end><if_stmt>"+"<in>engine<block_start><return>make_execution_engine(tuple(engine.split("+" 1)) cf)<block_end><return>make_execution_engine(engine cf)<block_end><block_end><def_stmt>_default_pretty_print schema:Schema head_rows:List[List[Any]] title:Any rows:int count:int <block_start>components:List[Any]=[]<if_stmt>title<is><not><none><block_start>components.append(HTML(f"<h3>{html.escape(title)}</h3>"))<block_end>pdf=pd.DataFrame(head_rows columns=list(schema.names))<line_sep>components.append(pdf)<if_stmt>count<ge>0<block_start>components.append(HTML(f"<strong>total count: {count}</strong>"))<block_end>components.append(HTML(f"<small>schema: {schema}</small>"))<line_sep>display(*components)<block_end><def_stmt>_setup_fugue_notebook ipython:Any setup_obj:Any fsql_ignore_case:bool=<false><arrow><none><block_start>s=NotebookSetup()<if>setup_obj<is><none><else>to_instance(setup_obj NotebookSetup)<line_sep>magics=_FugueSQLMagics(ipython dict(s.get_pre_conf()) dict(s.get_post_conf()) fsql_ignore_case=fsql_ignore_case )<line_sep>ipython.register_magics(magics)<line_sep>s.register_execution_engines()<line_sep>Show.set_hook(s.get_pretty_print())<block_end> |
<import_from_stmt>collections OrderedDict<import_from_stmt>datetime datetime<import_from_stmt>itertools chain product<import_from_stmt>scrapy.utils.spider arg_to_iter<import_stmt>six<try_stmt><block_start><import_from_stmt>itertools izip_longest<block_end><except_stmt>ImportError<block_start><import_from_stmt>itertools zip_longest<as>izip_longest<block_end><import_from_stmt>six.moves.urllib.parse urlencode<class_stmt>IdentityGenerator()<block_start><def_stmt>__call__ self spec<block_start><return>spec<block_end><block_end><class_stmt>UrlGenerator(object)<block_start><def_stmt>__init__ self settings=<none> spider_args=<none><block_start>self._processors={'date':self._process_date 'default':self._process_default 'options':self._process_option 'range':self._process_range 'settings':self._process_setting 'spider_args':self._process_args}<line_sep>self.settings=settings<line_sep>self.spider_args=spider_args<block_end><def_stmt>_process_date self values<block_start>now=datetime.now()<line_sep><return>[now.strftime(v)<for>v values]<block_end><def_stmt>_process_default self values<block_start><return>[str(values[0])]<block_end><def_stmt>_process_option self values<block_start><return>[str(v)<for>v values]<block_end><def_stmt>_process_range self values<block_start><if_stmt>len(values)<g>3<block_start><return>[]<block_end><return>six.moves.range(*values)<block_end><def_stmt>_process_setting self values<block_start><if_stmt>self.settings<is><none><block_start><return>[]<block_end>results=[]<for_stmt>value values<block_start>results.extend(self.settings.getlist(value))<block_end><return>results<block_end><def_stmt>_process_args self values<block_start><if_stmt>self.spider_args<is><none><block_start><return>[]<block_end>results=[]<for_stmt>value values<block_start>results.extend(arg_to_iter(self.spider_args.get(value [])))<block_end><return>results<block_end><def_stmt>_build_section self descriptor params=<false><block_start><if_stmt>'type'<not><in>descriptor<or>'values'<not><in>descriptor<block_start><return>[]# Malformed descriptor
<block_end>processor=self._processors.get(descriptor['type'])<if_stmt>processor<is><none><block_start><return>[]<block_end>processed=processor(descriptor['values'])<if_stmt><not>params<block_start><return>processed<block_end><if_stmt>'name'<not><in>descriptor<block_start><return>[]<block_end><return>izip_longest([] processed fillvalue=descriptor['name'])<block_end><def_stmt>_generate_urls self template paths params_template params<block_start>path_length=len(paths)<if_stmt>params<and><not>paths<block_start>components=product(*params)<block_end><else_stmt><block_start>components=product(*chain(paths params))<block_end><for_stmt>values components<block_start>url=template.format(*values[:path_length])<line_sep>params=values[path_length:]<if_stmt>params_template<or>params<block_start>url_params=OrderedDict(params_template)<for_stmt>name,value params<block_start>url_params[name]=value<block_end>url_params=urlencode(url_params)<line_sep><yield>'{}?{}'.format(url url_params)<block_end><else_stmt><block_start><yield>url<block_end><block_end><block_end><def_stmt>__call__ self spec<block_start>template=spec['template']<line_sep>param=spec.get('params_template' {})<line_sep>paths=[self._build_section(d)<for>d spec.get('paths' [])]<line_sep>params=[self._build_section(d <true>)<for>d spec.get('params' [])]<line_sep>url_generator=self._generate_urls(template paths param params)<line_sep><return>url_generator<block_end><block_end>generator=UrlGenerator()<line_sep> |
<import_stmt>os<import_stmt>pytest<import_stmt>subprocess<import_stmt>ssl<import_stmt>time<import_stmt>trustme<import_stmt>bmemcached<import_stmt>test_simple_functions<line_sep>ca=trustme.CA()<line_sep>server_cert=ca.issue_cert(os.environ["MEMCACHED_HOST"]+u"")<line_sep>@pytest.yield_fixture(scope="module" autouse=<true>)<def_stmt>memcached_tls <block_start>key=server_cert.private_key_pem<line_sep>cert=server_cert.cert_chain_pems[0]<with_stmt>cert.tempfile()<as>c key.tempfile()<as>k<block_start>p=subprocess.Popen(["memcached" "-p5001" "-Z" "-o" "ssl_key={}".format(k) "-o" "ssl_chain_cert={}".format(c) "-o" "ssl_verify_mode=1" ] stdout=subprocess.PIPE stderr=subprocess.PIPE )<line_sep>time.sleep(0.1)<if_stmt>p.poll()<is><not><none><block_start>pytest.skip("Memcached server is not built with TLS support.")<block_end><yield>p<line_sep>p.kill()<line_sep>p.wait()<block_end><block_end><class_stmt>TLSMemcachedTests(test_simple_functions.MemcachedTests)<block_start>"""
Same tests as above, just make sure it works with TLS.
"""<def_stmt>setUp self<block_start>ctx=ssl.create_default_context()<line_sep>ca.configure_trust(ctx)<line_sep>self.server="{}:5001".format(os.environ["MEMCACHED_HOST"])<line_sep>self.client=bmemcached.Client(self.server tls_context=ctx)<line_sep>self.reset()<block_end><block_end> |
<import_from_stmt>keras.models Sequential<import_from_stmt>keras.layers Dense<import_from_stmt>keras.losses binary_crossentropy categorical_crossentropy<import_from_stmt>keras.optimizers SGD<import_from_stmt>keras.metrics top_k_categorical_accuracy<import_from_stmt>keras backend<as>K<import_stmt>numpy<as>np<import_stmt>sys os string random<line_sep>characters=string.printable<line_sep>char_indices=dict((c i)<for>i,c enumerate(characters))<line_sep>indices_char=dict((i c)<for>i,c enumerate(characters))<line_sep>INPUT_VOCAB_SIZE=len(characters)<line_sep>LINE_SIZE=80<line_sep>BATCH_SIZE=200<line_sep>STEPS_PER_EPOCH=5000<line_sep>EPOCHS=4<def_stmt>encode_one_hot line<block_start>x=np.zeros((1 LINE_SIZE INPUT_VOCAB_SIZE))<line_sep>sp_idx=char_indices[' ']<for_stmt>i,c enumerate(line)<block_start>index=char_indices[c]<if>c<in>characters<else>sp_idx<line_sep>x[0][i][index]=1<block_end># Pad with spaces
<for_stmt>i range(len(line) LINE_SIZE)<block_start>x[0][i][sp_idx]=1<block_end><return>x.reshape([1 LINE_SIZE<times>INPUT_VOCAB_SIZE])<block_end><def_stmt>decode_one_hot y<block_start>s=[]<line_sep>x=y.reshape([1 LINE_SIZE INPUT_VOCAB_SIZE])<for_stmt>onehot x[0]<block_start>one_index=np.argmax(onehot)<line_sep>s.append(indices_char[one_index])<block_end><return>''.join(s)<block_end><def_stmt>input_generator nsamples<block_start><def_stmt>generate_line <block_start>inline=[]<line_sep>outline=[]<for_stmt>_ range(LINE_SIZE)<block_start>c=random.choice(characters)<line_sep>expected=c.lower()<if>c<in>string.ascii_letters<else>' '<line_sep>inline.append(c)<line_sep>outline.append(expected)<block_end><for_stmt>i range(LINE_SIZE)<block_start><if_stmt>outline[i]<eq>' '<block_start><continue><block_end><if_stmt>i<g>0<and>i<l>LINE_SIZE-1<block_start>outline[i]=' '<if>outline[i-1]<eq>' '<and>outline[i+1]<eq>' '<else>outline[i]<block_end><if_stmt>(i<eq>0<and>outline[i+1]<eq>' ')<or>(i<eq>LINE_SIZE-1<and>outline[i-1]<eq>' ')<block_start>outline[i]=' '<block_end><block_end><return>''.join(inline) ''.join(outline)<block_end><while_stmt><true><block_start>data_in=np.zeros((nsamples LINE_SIZE<times>INPUT_VOCAB_SIZE))<line_sep>data_out=np.zeros((nsamples LINE_SIZE<times>INPUT_VOCAB_SIZE))<for_stmt>i range(nsamples)<block_start>input_data,expected=generate_line()<line_sep>data_in[i]=encode_one_hot(input_data)[0]<line_sep>data_out[i]=encode_one_hot(expected)[0]<block_end><yield>data_in data_out<block_end><block_end><def_stmt>train model<block_start>model.compile(loss='binary_crossentropy' optimizer='adam' metrics=['accuracy'])<line_sep>input_gen=input_generator(BATCH_SIZE)<line_sep>validation_gen=input_generator(BATCH_SIZE)<line_sep>model.fit_generator(input_gen epochs=EPOCHS workers=1 steps_per_epoch=STEPS_PER_EPOCH validation_data=validation_gen validation_steps=10)<block_end><def_stmt>build_model # Normalize characters using a dense layer
<block_start>model=Sequential()<line_sep>model.add(Dense(LINE_SIZE<times>INPUT_VOCAB_SIZE input_shape=(LINE_SIZE<times>INPUT_VOCAB_SIZE ) activation='sigmoid'))<line_sep><return>model<block_end><def_stmt>build_deep_model # Normalize characters using a dense layer
<block_start>model=Sequential()<line_sep>model.add(Dense(80 input_shape=(LINE_SIZE<times>INPUT_VOCAB_SIZE ) activation='sigmoid'))<line_sep>model.add(Dense(800 activation='sigmoid'))<line_sep>model.add(Dense(LINE_SIZE<times>INPUT_VOCAB_SIZE activation='sigmoid'))<line_sep><return>model<block_end>model=build_deep_model()<line_sep>model.summary()<line_sep>train(model)<line_sep>input("Network has been trained. Press <Enter> to run program.")<with_stmt>open(sys.argv[1])<as>f<block_start><for_stmt>line f<block_start><if_stmt>line.isspace()<block_start><continue><block_end>batch=encode_one_hot(line)<line_sep>preds=model.predict(batch)<line_sep>normal=decode_one_hot(preds)<line_sep>print(normal)<block_end><block_end> |
# Copyright (c) OpenMMLab. All rights reserved.
<import_from_stmt>mmdet.datasets.builder PIPELINES<import_from_stmt>mmdet.datasets.pipelines LoadAnnotations LoadImageFromFile<import_from_stmt>mmtrack.core results2outs<line_sep>@PIPELINES.register_module()<class_stmt>LoadMultiImagesFromFile(LoadImageFromFile)<block_start>"""Load multi images from file.
Please refer to `mmdet.datasets.pipelines.loading.py:LoadImageFromFile`
for detailed docstring.
"""<def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<block_end><def_stmt>__call__ self results<block_start>"""Call function.
For each dict in `results`, call the call function of
`LoadImageFromFile` to load image.
Args:
results (list[dict]): List of dict from
:obj:`mmtrack.CocoVideoDataset`.
Returns:
list[dict]: List of dict that contains loaded image.
"""<line_sep>outs=[]<for_stmt>_results results<block_start>_results=super().__call__(_results)<line_sep>outs.append(_results)<block_end><return>outs<block_end><block_end>@PIPELINES.register_module()<class_stmt>SeqLoadAnnotations(LoadAnnotations)<block_start>"""Sequence load annotations.
Please refer to `mmdet.datasets.pipelines.loading.py:LoadAnnotations`
for detailed docstring.
Args:
with_track (bool): If True, load instance ids of bboxes.
"""<def_stmt>__init__ self with_track=<false> *args **kwargs<block_start>super().__init__(*args **kwargs)<line_sep>self.with_track=with_track<block_end><def_stmt>_load_track self results<block_start>"""Private function to load label annotations.
Args:
results (dict): Result dict from :obj:`mmtrack.CocoVideoDataset`.
Returns:
dict: The dict contains loaded label annotations.
"""<line_sep>results['gt_instance_ids']=results['ann_info']['instance_ids'].copy()<line_sep><return>results<block_end><def_stmt>__call__ self results<block_start>"""Call function.
For each dict in results, call the call function of `LoadAnnotations`
to load annotation.
Args:
results (list[dict]): List of dict that from
:obj:`mmtrack.CocoVideoDataset`.
Returns:
list[dict]: List of dict that contains loaded annotations, such as
bounding boxes, labels, instance ids, masks and semantic
segmentation annotations.
"""<line_sep>outs=[]<for_stmt>_results results<block_start>_results=super().__call__(_results)<if_stmt>self.with_track<block_start>_results=self._load_track(_results)<block_end>outs.append(_results)<block_end><return>outs<block_end><block_end>@PIPELINES.register_module()<class_stmt>LoadDetections(object)<block_start>"""Load public detections from MOT benchmark.
Args:
results (dict): Result dict from :obj:`mmtrack.CocoVideoDataset`.
"""<def_stmt>__call__ self results<block_start>outs_det=results2outs(bbox_results=results['detections'])<line_sep>bboxes=outs_det['bboxes']<line_sep>labels=outs_det['labels']<line_sep>results['public_bboxes']=bboxes[: :4]<if_stmt>bboxes.shape[1]<g>4<block_start>results['public_scores']=bboxes[: -1]<block_end>results['public_labels']=labels<line_sep>results['bbox_fields'].append('public_bboxes')<line_sep><return>results<block_end><block_end> |
<import_from_stmt>abc ABC abstractmethod<class_stmt>Creator(ABC)<block_start><def_stmt>some_operation self<block_start>product=self.create_product()<line_sep>product.do_stuff()<block_end>@abstractmethod<def_stmt>create_product self<block_start><pass><block_end><block_end><class_stmt>Product(ABC)<block_start>@abstractmethod<def_stmt>do_stuff self<block_start><pass><block_end><block_end><class_stmt>ConcreteProductAlpha(Product)<block_start><def_stmt>do_stuff self<block_start>print("Stuff of product Alpha")<block_end><block_end><class_stmt>ConcreteCreatorAlpha(Creator)<block_start><def_stmt>create_product self<block_start><return>ConcreteProductAlpha()<block_end><block_end><class_stmt>ConcreteProductBeta(Product)<block_start><def_stmt>do_stuff self<block_start>print("Stuff of product Beta")<block_end><block_end><class_stmt>ConcreteCreatorBeta(Creator)<block_start><def_stmt>create_product self<block_start><return>ConcreteProductBeta()<block_end><block_end><def_stmt>main <block_start>creatorAlpha=ConcreteCreatorAlpha()<line_sep>creatorAlpha.some_operation()<line_sep>creatorBeta=ConcreteCreatorBeta()<line_sep>creatorBeta.some_operation()<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base classes for a test and validator which upload results
(reference images, error images) to cloud storage."""<import_stmt>os<import_stmt>re<import_stmt>tempfile<import_from_stmt>telemetry test<import_from_stmt>telemetry.core bitmap<import_from_stmt>telemetry.page cloud_storage<import_from_stmt>telemetry.page page_test<line_sep>test_data_dir=os.path.abspath(os.path.join(os.path.dirname(__file__) '..' '..' 'data' 'gpu'))<line_sep>default_generated_data_dir=os.path.join(test_data_dir 'generated')<line_sep>error_image_cloud_storage_bucket='chromium-browser-gpu-tests'<class_stmt>ValidatorBase(page_test.PageTest)<block_start><def_stmt>__init__ self test_method_name<block_start>super(ValidatorBase self).__init__(test_method_name)<line_sep># Parameters for cloud storage reference images.
self.vendor_id=<none><line_sep>self.device_id=<none><line_sep>self.vendor_string=<none><line_sep>self.device_string=<none><line_sep>self.msaa=<false><block_end>###
### Routines working with the local disk (only used for local
### testing without a cloud storage account -- the bots do not use
### this code path).
###
<def_stmt>_UrlToImageName self url<block_start>image_name=re.sub(r'^(http|https|file)://(/*)' '' url)<line_sep>image_name=re.sub(r'\.\./' '' image_name)<line_sep>image_name=re.sub(r'(\.|/|-)' '_' image_name)<line_sep><return>image_name<block_end><def_stmt>_WriteImage self image_path png_image<block_start>output_dir=os.path.dirname(image_path)<if_stmt><not>os.path.exists(output_dir)<block_start>os.makedirs(output_dir)<block_end>png_image.WritePngFile(image_path)<block_end><def_stmt>_WriteErrorImages self img_dir img_name screenshot ref_png<block_start>full_image_name=img_name+'_'+str(self.options.build_revision)<line_sep>full_image_name=full_image_name+'.png'<line_sep># Always write the failing image.
self._WriteImage(os.path.join(img_dir 'FAIL_'+full_image_name) screenshot)<if_stmt>ref_png# Save the reference image.
# This ensures that we get the right revision number.
<block_start>self._WriteImage(os.path.join(img_dir full_image_name) ref_png)<line_sep># Save the difference image.
diff_png=screenshot.Diff(ref_png)<line_sep>self._WriteImage(os.path.join(img_dir 'DIFF_'+full_image_name) diff_png)<block_end><block_end>###
### Cloud storage code path -- the bots use this.
###
<def_stmt>_ComputeGpuInfo self tab<block_start><if_stmt>((self.vendor_id<and>self.device_id)<or>(self.vendor_string<and>self.device_string))<block_start><return><block_end>browser=tab.browser<if_stmt><not>browser.supports_system_info<block_start><raise>Exception('System info must be supported by the browser')<block_end>system_info=browser.GetSystemInfo()<if_stmt><not>system_info.gpu<block_start><raise>Exception('GPU information was absent')<block_end>device=system_info.gpu.devices[0]<if_stmt>device.vendor_id<and>device.device_id<block_start>self.vendor_id=device.vendor_id<line_sep>self.device_id=device.device_id<block_end><elif_stmt>device.vendor_string<and>device.device_string<block_start>self.vendor_string=device.vendor_string<line_sep>self.device_string=device.device_string<block_end><else_stmt><block_start><raise>Exception('GPU device information was incomplete')<block_end>self.msaa=<not>('disable_multisampling'<in>system_info.gpu.driver_bug_workarounds)<block_end><def_stmt>_FormatGpuInfo self tab<block_start>self._ComputeGpuInfo(tab)<line_sep>msaa_string='_msaa'<if>self.msaa<else>'_non_msaa'<if_stmt>self.vendor_id<block_start><return>'%s_%04x_%04x%s'%(self.options.os_type self.vendor_id self.device_id msaa_string)<block_end><else_stmt><block_start><return>'%s_%s_%s%s'%(self.options.os_type self.vendor_string self.device_string msaa_string)<block_end><block_end><def_stmt>_FormatReferenceImageName self img_name page tab<block_start><return>'%s_v%s_%s.png'%(img_name page.revision self._FormatGpuInfo(tab))<block_end><def_stmt>_UploadBitmapToCloudStorage self bucket name bitmap public=<false># This sequence of steps works on all platforms to write a temporary
# PNG to disk, following the pattern in bitmap_unittest.py. The key to
# avoiding PermissionErrors seems to be to not actually try to write to
# the temporary file object, but to re-open its name for all operations.
<block_start>temp_file=tempfile.NamedTemporaryFile().name<line_sep>bitmap.WritePngFile(temp_file)<line_sep>cloud_storage.Insert(bucket name temp_file publicly_readable=public)<block_end><def_stmt>_ConditionallyUploadToCloudStorage self img_name page tab screenshot<block_start>"""Uploads the screenshot to cloud storage as the reference image
for this test, unless it already exists. Returns True if the
upload was actually performed."""<if_stmt><not>self.options.refimg_cloud_storage_bucket<block_start><raise>Exception('--refimg-cloud-storage-bucket argument is required')<block_end>cloud_name=self._FormatReferenceImageName(img_name page tab)<if_stmt><not>cloud_storage.Exists(self.options.refimg_cloud_storage_bucket cloud_name)<block_start>self._UploadBitmapToCloudStorage(self.options.refimg_cloud_storage_bucket cloud_name screenshot)<line_sep><return><true><block_end><return><false><block_end><def_stmt>_DownloadFromCloudStorage self img_name page tab<block_start>"""Downloads the reference image for the given test from cloud
storage, returning it as a Telemetry Bitmap object."""<line_sep># TODO(kbr): there's a race condition between the deletion of the
# temporary file and gsutil's overwriting it.
<if_stmt><not>self.options.refimg_cloud_storage_bucket<block_start><raise>Exception('--refimg-cloud-storage-bucket argument is required')<block_end>temp_file=tempfile.NamedTemporaryFile().name<line_sep>cloud_storage.Get(self.options.refimg_cloud_storage_bucket self._FormatReferenceImageName(img_name page tab) temp_file)<line_sep><return>bitmap.Bitmap.FromPngFile(temp_file)<block_end><def_stmt>_UploadErrorImagesToCloudStorage self image_name screenshot ref_img<block_start>"""For a failing run, uploads the failing image, reference image (if
supplied), and diff image (if reference image was supplied) to cloud
storage. This subsumes the functionality of the
archive_gpu_pixel_test_results.py script."""<line_sep>machine_name=re.sub('\W+' '_' self.options.test_machine_name)<line_sep>upload_dir='%s_%s_telemetry'%(self.options.build_revision machine_name)<line_sep>base_bucket='%s/runs/%s'%(error_image_cloud_storage_bucket upload_dir)<line_sep>image_name_with_revision='%s_%s.png'%(image_name self.options.build_revision)<line_sep>self._UploadBitmapToCloudStorage(base_bucket+'/gen' image_name_with_revision screenshot public=<true>)<if_stmt>ref_img<block_start>self._UploadBitmapToCloudStorage(base_bucket+'/ref' image_name_with_revision ref_img public=<true>)<line_sep>diff_img=screenshot.Diff(ref_img)<line_sep>self._UploadBitmapToCloudStorage(base_bucket+'/diff' image_name_with_revision diff_img public=<true>)<block_end>print('See http://%s.commondatastorage.googleapis.com/'<concat>'view_test_results.html?%s for this run\'s test results')%(error_image_cloud_storage_bucket upload_dir)<block_end><block_end><class_stmt>TestBase(test.Test)<block_start>@staticmethod<def_stmt>_AddTestCommandLineOptions parser option_group<block_start>option_group.add_option('--build-revision' help='Chrome revision being tested.' default="unknownrev")<line_sep>option_group.add_option('--upload-refimg-to-cloud-storage' dest='upload_refimg_to_cloud_storage' action='store_true' default=<false> help='Upload resulting images to cloud storage as reference images')<line_sep>option_group.add_option('--download-refimg-from-cloud-storage' dest='download_refimg_from_cloud_storage' action='store_true' default=<false> help='Download reference images from cloud storage')<line_sep>option_group.add_option('--refimg-cloud-storage-bucket' help='Name of the cloud storage bucket to use for reference images; '<concat>'required with --upload-refimg-to-cloud-storage and '<concat>'--download-refimg-from-cloud-storage. Example: '<concat>'"chromium-gpu-archive/reference-images"')<line_sep>option_group.add_option('--os-type' help='Type of operating system on which the pixel test is being run, '<concat>'used only to distinguish different operating systems with the same '<concat>'graphics card. Any value is acceptable, but canonical values are '<concat>'"win", "mac", and "linux", and probably, eventually, "chromeos" '<concat>'and "android").' default='')<line_sep>option_group.add_option('--test-machine-name' help='Name of the test machine. Specifying this argument causes this '<concat>'script to upload failure images and diffs to cloud storage directly, '<concat>'instead of relying on the archive_gpu_pixel_test_results.py script.' default='')<line_sep>option_group.add_option('--generated-dir' help='Overrides the default on-disk location for generated test images '<concat>'(only used for local testing without a cloud storage account)' default=default_generated_data_dir)<block_end><block_end> |
"""
Functions used to support drawing. No Pyglet/OpenGL here.
"""<import_stmt>math<import_from_stmt>typing Tuple Union cast<import_from_stmt>arcade Color<import_from_stmt>arcade RGBA RGB<def_stmt>get_points_for_thick_line start_x:float start_y:float end_x:float end_y:float line_width:float<block_start>"""
Function used internally for Arcade. OpenGL draws triangles only, so a think
line must be two triangles that make up a rectangle. This calculates those
points.
"""<line_sep>vector_x=start_x-end_x<line_sep>vector_y=start_y-end_y<line_sep>perpendicular_x=vector_y<line_sep>perpendicular_y=-vector_x<line_sep>length=math.sqrt(vector_x<times>vector_x+vector_y<times>vector_y)<if_stmt>length<eq>0<block_start>normal_x=1.0<line_sep>normal_y=1.0<block_end><else_stmt><block_start>normal_x=perpendicular_x/length<line_sep>normal_y=perpendicular_y/length<block_end>r1_x=start_x+normal_x<times>line_width/2<line_sep>r1_y=start_y+normal_y<times>line_width/2<line_sep>r2_x=start_x-normal_x<times>line_width/2<line_sep>r2_y=start_y-normal_y<times>line_width/2<line_sep>r3_x=end_x+normal_x<times>line_width/2<line_sep>r3_y=end_y+normal_y<times>line_width/2<line_sep>r4_x=end_x-normal_x<times>line_width/2<line_sep>r4_y=end_y-normal_y<times>line_width/2<line_sep>points=(r1_x r1_y) (r2_x r2_y) (r4_x r4_y) (r3_x r3_y)<line_sep><return>points<block_end><def_stmt>get_four_byte_color color:Color<arrow>RGBA<block_start>"""
Given a RGB list, it will return RGBA.
Given a RGBA list, it will return the same RGBA.
:param Color color: Three or four byte tuple
:returns: return: Four byte RGBA tuple
"""<if_stmt>len(color)<eq>4<block_start><return>cast(RGBA color)<block_end><elif_stmt>len(color)<eq>3<block_start><return>color[0] color[1] color[2] 255<block_end><else_stmt><block_start><raise>ValueError("This isn't a 3 or 4 byte color")<block_end><block_end><def_stmt>get_four_float_color color:Color<arrow>Tuple[float float float float]<block_start>"""
Given a 3 or 4 RGB/RGBA color where each color goes 0-255, this
returns a RGBA tuple where each item is a scaled float from 0 to 1.
:param Color color: Three or four byte tuple
:return: Four floats as a RGBA tuple
"""<if_stmt>len(color)<eq>4<block_start><return>color[0]/255 color[1]/255 color[2]/255 color[3]/255# type: ignore
<block_end><elif_stmt>len(color)<eq>3<block_start><return>color[0]/255 color[1]/255 color[2]/255 1.0<block_end><else_stmt><block_start><raise>ValueError("This isn't a 3 or 4 byte color")<block_end><block_end><def_stmt>get_three_float_color color:Color<arrow>Tuple[float float float]<block_start>"""
Given a 3 or 4 RGB/RGBA color where each color goes 0-255, this
returns a RGBA tuple where each item is a scaled float from 0 to 1.
:param Color color: Three or four byte tuple
:return: Three floats as a RGB tuple
"""<if_stmt>len(color)<eq>4<or>len(color)<eq>3<block_start><return>color[0]/255 color[1]/255 color[2]/255# type: ignore
<block_end><else_stmt><block_start><raise>ValueError("This isn't a 3 or 4 byte color")<block_end><block_end><def_stmt>make_transparent_color color:Color transparency:float<block_start>"""
Given a RGB color, along with an alpha, returns a RGBA color tuple.
:param Color color: Three or four byte RGBA color
:param float transparency: Transparency
"""<line_sep><return>color[0] color[1] color[2] transparency<block_end><def_stmt>uint24_to_three_byte_color color:int<arrow>RGB<block_start>"""
Given an int between 0 and 16777215, return a RGB color tuple.
:param int color: 3 byte int
"""<line_sep><return>(color&(255<lshift>16))<rshift>16 (color&(255<lshift>8))<rshift>8 color&255<block_end><def_stmt>uint32_to_four_byte_color color:int<arrow>RGBA<block_start>"""
Given an int between 0 and 4294967295, return a RGBA color tuple.
:param int color: 4 byte int
"""<line_sep><return>(color&(255<lshift>24))<rshift>24 (color&(255<lshift>16))<rshift>16 (color&(255<lshift>8))<rshift>8 color&255<block_end><def_stmt>color_from_hex_string code:str<arrow>RGBA<block_start>"""
Make a color from a hex code (3, 4, 6 or 8 characters of hex, normally with a hashtag)
"""<line_sep>code=code.lstrip("#")<if_stmt>len(code)<le>4<block_start>code="".join(i+"0"<for>i code)<block_end><if_stmt>len(code)<eq>6# full opacity if no alpha specified
<block_start><return>int(code[0:2] 16) int(code[2:4] 16) int(code[4:6] 16) 255<block_end><elif_stmt>len(code)<eq>8<block_start><return>int(code[2:4] 16) int(code[4:6] 16) int(code[6:8] 16) int(code[0:2] 16)<block_end><raise>ValueError("Improperly formatted color passed to color_from_hex")<block_end><def_stmt>float_to_byte_color color:Union[Tuple[float float float float] Tuple[float float float]] <arrow>Color<block_start>"""
Converts a float colors to a byte color.
This works for 3 of 4-component colors.
"""<if_stmt>len(color)<eq>3<block_start><return>int(color[0]<times>255) int(color[1]<times>255) int(color[2]<times>255)<block_end><elif_stmt>len(color)<eq>4<block_start>color=cast(Tuple[float float float float] color)<line_sep><return>int(color[0]<times>255) int(color[1]<times>255) int(color[2]<times>255) int(color[3]<times>255)<block_end><else_stmt><block_start><raise>ValueError(f"color needs to have 3 or 4 components, not {color}")<block_end><block_end> |
<import_from_stmt>collections defaultdict<import_from_stmt>django.db.models Exists OuterRef<import_from_stmt>...channel.models Channel<import_from_stmt>...order.models Order<import_from_stmt>...shipping.models ShippingZone<import_from_stmt>..checkout.dataloaders CheckoutByIdLoader CheckoutLineByIdLoader<import_from_stmt>..core.dataloaders DataLoader<import_from_stmt>..order.dataloaders OrderByIdLoader OrderLineByIdLoader<import_from_stmt>..shipping.dataloaders ShippingZoneByIdLoader<class_stmt>ChannelByIdLoader(DataLoader)<block_start>context_key="channel_by_id"<def_stmt>batch_load self keys<block_start>channels=Channel.objects.in_bulk(keys)<line_sep><return>[channels.get(channel_id)<for>channel_id keys]<block_end><block_end><class_stmt>ChannelBySlugLoader(DataLoader)<block_start>context_key="channel_by_slug"<def_stmt>batch_load self keys<block_start>channels=Channel.objects.in_bulk(keys field_name="slug")<line_sep><return>[channels.get(slug)<for>slug keys]<block_end><block_end><class_stmt>ChannelByCheckoutLineIDLoader(DataLoader)<block_start>context_key="channel_by_checkout_line"<def_stmt>batch_load self keys<block_start><def_stmt>channel_by_lines checkout_lines<block_start>checkout_ids=[line.checkout_id<for>line checkout_lines]<def_stmt>channels_by_checkout checkouts<block_start>channel_ids=[checkout.channel_id<for>checkout checkouts]<line_sep><return>ChannelByIdLoader(self.context).load_many(channel_ids)<block_end><return>(CheckoutByIdLoader(self.context).load_many(checkout_ids).then(channels_by_checkout))<block_end><return>(CheckoutLineByIdLoader(self.context).load_many(keys).then(channel_by_lines))<block_end><block_end><class_stmt>ChannelByOrderLineIdLoader(DataLoader)<block_start>context_key="channel_by_orderline"<def_stmt>batch_load self keys<block_start><def_stmt>channel_by_lines order_lines<block_start>order_ids=[line.order_id<for>line order_lines]<def_stmt>channels_by_checkout orders<block_start>channel_ids=[order.channel_id<for>order orders]<line_sep><return>ChannelByIdLoader(self.context).load_many(channel_ids)<block_end><return>(OrderByIdLoader(self.context).load_many(order_ids).then(channels_by_checkout))<block_end><return>OrderLineByIdLoader(self.context).load_many(keys).then(channel_by_lines)<block_end><block_end><class_stmt>ChannelWithHasOrdersByIdLoader(DataLoader)<block_start>context_key="channel_with_has_orders_by_id"<def_stmt>batch_load self keys<block_start>orders=Order.objects.filter(channel=OuterRef("pk"))<line_sep>channels=Channel.objects.annotate(has_orders=Exists(orders)).in_bulk(keys)<line_sep><return>[channels.get(channel_id)<for>channel_id keys]<block_end><block_end><class_stmt>ShippingZonesByChannelIdLoader(DataLoader)<block_start>context_key="shippingzone_by_channel"<def_stmt>batch_load self keys<block_start>zone_and_channel_is_pairs=ShippingZone.objects.filter(channels__id__in=keys).values_list("pk" "channels__id")<line_sep>channel_shipping_zone_map=defaultdict(list)<for_stmt>zone_id,channel_id zone_and_channel_is_pairs<block_start>channel_shipping_zone_map[channel_id].append(zone_id)<block_end><def_stmt>map_shipping_zones shipping_zones<block_start>zone_map={zone.pk:zone<for>zone shipping_zones}<line_sep><return>[[zone_map[zone_id]<for>zone_id channel_shipping_zone_map[channel_id]]<for>channel_id keys]<block_end><return>(ShippingZoneByIdLoader(self.context).load_many({pk<for>pk,_ zone_and_channel_is_pairs}).then(map_shipping_zones))<block_end><block_end> |
<def_stmt>_reset_sys_path # Clear generic sys.path[0]
<block_start><import_stmt>os<import_stmt>sys<line_sep>resources=os.environ["RESOURCEPATH"]<while_stmt>sys.path[0]<eq>resources<block_start><del_stmt>sys.path[0]<block_end><block_end>_reset_sys_path()<line_sep> |
<import_from_stmt>seedwork.infrastructure.repository InMemoryRepository<import_from_stmt>seedwork.domain.entities Entity<class_stmt>Person(Entity)<block_start>first_name:str<line_sep>last_name:str<block_end><def_stmt>test_InMemoryRepository_persist_one # arrange
<block_start>person=Person(first_name="John" last_name="Doe")<line_sep>repository=InMemoryRepository()<line_sep># act
repository.insert(person)<line_sep># assert
<assert_stmt>repository.get_by_id(person.id)<eq>person<block_end><def_stmt>test_InMemoryRepository_persist_two # arrange
<block_start>person1=Person(first_name="John" last_name="Doe")<line_sep>person2=Person(first_name="Mary" last_name="Doe")<line_sep>repository=InMemoryRepository()<line_sep># act
repository.insert(person1)<line_sep>repository.insert(person2)<line_sep># assert
<assert_stmt>repository.get_by_id(person1.id)<eq>person1<assert_stmt>repository.get_by_id(person2.id)<eq>person2<block_end> |
#
# Author: <NAME>
# Copyright 2015-present, NASA-JPL/Caltech
#
<import_stmt>os<import_stmt>logging<import_stmt>isceobj<import_from_stmt>isceobj.Alos2Proc.runSwathMosaic swathMosaic<import_from_stmt>isceobj.Alos2Proc.runSwathMosaic swathMosaicParameters<import_from_stmt>isceobj.Alos2Proc.Alos2ProcPublic create_xml<line_sep>logger=logging.getLogger('isce.alos2burstinsar.runSwathMosaic')<def_stmt>runSwathMosaic self<block_start>'''mosaic subswaths
'''<line_sep>catalog=isceobj.Catalog.createCatalog(self._insar.procDoc.name)<line_sep>self.updateParamemetersFromUser()<line_sep>referenceTrack=self._insar.loadTrack(reference=<true>)<line_sep>secondaryTrack=self._insar.loadTrack(reference=<false>)<for_stmt>i,frameNumber enumerate(self._insar.referenceFrames)<block_start>frameDir='f{}_{}'.format(i+1 frameNumber)<line_sep>os.chdir(frameDir)<line_sep>mosaicDir='mosaic'<line_sep>os.makedirs(mosaicDir exist_ok=<true>)<line_sep>os.chdir(mosaicDir)<if_stmt>self._insar.endingSwath-self._insar.startingSwath+1<eq>1<block_start><import_stmt>shutil<line_sep>swathDir='s{}'.format(referenceTrack.frames[i].swaths[0].swathNumber)<if_stmt><not>os.path.isfile(self._insar.interferogram)<block_start>os.symlink(os.path.join('../' swathDir self._insar.interferogram) self._insar.interferogram)<block_end>shutil.copy2(os.path.join('../' swathDir self._insar.interferogram+'.vrt') self._insar.interferogram+'.vrt')<line_sep>shutil.copy2(os.path.join('../' swathDir self._insar.interferogram+'.xml') self._insar.interferogram+'.xml')<if_stmt><not>os.path.isfile(self._insar.amplitude)<block_start>os.symlink(os.path.join('../' swathDir self._insar.amplitude) self._insar.amplitude)<block_end>shutil.copy2(os.path.join('../' swathDir self._insar.amplitude+'.vrt') self._insar.amplitude+'.vrt')<line_sep>shutil.copy2(os.path.join('../' swathDir self._insar.amplitude+'.xml') self._insar.amplitude+'.xml')<line_sep># os.rename(os.path.join('../', swathDir, self._insar.interferogram), self._insar.interferogram)
# os.rename(os.path.join('../', swathDir, self._insar.interferogram+'.vrt'), self._insar.interferogram+'.vrt')
# os.rename(os.path.join('../', swathDir, self._insar.interferogram+'.xml'), self._insar.interferogram+'.xml')
# os.rename(os.path.join('../', swathDir, self._insar.amplitude), self._insar.amplitude)
# os.rename(os.path.join('../', swathDir, self._insar.amplitude+'.vrt'), self._insar.amplitude+'.vrt')
# os.rename(os.path.join('../', swathDir, self._insar.amplitude+'.xml'), self._insar.amplitude+'.xml')
#update frame parameters
#########################################################
frame=referenceTrack.frames[i]<line_sep>infImg=isceobj.createImage()<line_sep>infImg.load(self._insar.interferogram+'.xml')<line_sep>#mosaic size
frame.numberOfSamples=infImg.width<line_sep>frame.numberOfLines=infImg.length<line_sep>#NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE
#range parameters
frame.startingRange=frame.swaths[0].startingRange<line_sep>frame.rangeSamplingRate=frame.swaths[0].rangeSamplingRate<line_sep>frame.rangePixelSize=frame.swaths[0].rangePixelSize<line_sep>#azimuth parameters
frame.sensingStart=frame.swaths[0].sensingStart<line_sep>frame.prf=frame.swaths[0].prf<line_sep>frame.azimuthPixelSize=frame.swaths[0].azimuthPixelSize<line_sep>frame.azimuthLineInterval=frame.swaths[0].azimuthLineInterval<line_sep>#update frame parameters, secondary
#########################################################
frame=secondaryTrack.frames[i]<line_sep>#mosaic size
frame.numberOfSamples=int(frame.swaths[0].numberOfSamples/self._insar.numberRangeLooks1)<line_sep>frame.numberOfLines=int(frame.swaths[0].numberOfLines/self._insar.numberAzimuthLooks1)<line_sep>#NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE
#range parameters
frame.startingRange=frame.swaths[0].startingRange<line_sep>frame.rangeSamplingRate=frame.swaths[0].rangeSamplingRate<line_sep>frame.rangePixelSize=frame.swaths[0].rangePixelSize<line_sep>#azimuth parameters
frame.sensingStart=frame.swaths[0].sensingStart<line_sep>frame.prf=frame.swaths[0].prf<line_sep>frame.azimuthPixelSize=frame.swaths[0].azimuthPixelSize<line_sep>frame.azimuthLineInterval=frame.swaths[0].azimuthLineInterval<line_sep>os.chdir('../')<line_sep>#save parameter file
self._insar.saveProduct(referenceTrack.frames[i] self._insar.referenceFrameParameter)<line_sep>self._insar.saveProduct(secondaryTrack.frames[i] self._insar.secondaryFrameParameter)<line_sep>os.chdir('../')<line_sep><continue><block_end>#choose offsets
numberOfFrames=len(referenceTrack.frames)<line_sep>numberOfSwaths=len(referenceTrack.frames[i].swaths)<if_stmt>self.swathOffsetMatching#no need to do this as the API support 2-d list
#rangeOffsets = (np.array(self._insar.swathRangeOffsetMatchingReference)).reshape(numberOfFrames, numberOfSwaths)
#azimuthOffsets = (np.array(self._insar.swathAzimuthOffsetMatchingReference)).reshape(numberOfFrames, numberOfSwaths)
<block_start>rangeOffsets=self._insar.swathRangeOffsetMatchingReference<line_sep>azimuthOffsets=self._insar.swathAzimuthOffsetMatchingReference<block_end><else_stmt>#rangeOffsets = (np.array(self._insar.swathRangeOffsetGeometricalReference)).reshape(numberOfFrames, numberOfSwaths)
#azimuthOffsets = (np.array(self._insar.swathAzimuthOffsetGeometricalReference)).reshape(numberOfFrames, numberOfSwaths)
<block_start>rangeOffsets=self._insar.swathRangeOffsetGeometricalReference<line_sep>azimuthOffsets=self._insar.swathAzimuthOffsetGeometricalReference<block_end>rangeOffsets=rangeOffsets[i]<line_sep>azimuthOffsets=azimuthOffsets[i]<line_sep>#list of input files
inputInterferograms=[]<line_sep>inputAmplitudes=[]<for_stmt>j,swathNumber enumerate(range(self._insar.startingSwath self._insar.endingSwath+1))<block_start>swathDir='s{}'.format(swathNumber)<line_sep>inputInterferograms.append(os.path.join('../' swathDir self._insar.interferogram))<line_sep>inputAmplitudes.append(os.path.join('../' swathDir self._insar.amplitude))<block_end>#note that frame parameters are updated after mosaicking
#mosaic amplitudes
swathMosaic(referenceTrack.frames[i] inputAmplitudes self._insar.amplitude rangeOffsets azimuthOffsets self._insar.numberRangeLooks1 self._insar.numberAzimuthLooks1 resamplingMethod=0)<line_sep>#mosaic interferograms
swathMosaic(referenceTrack.frames[i] inputInterferograms self._insar.interferogram rangeOffsets azimuthOffsets self._insar.numberRangeLooks1 self._insar.numberAzimuthLooks1 updateFrame=<true> resamplingMethod=1)<line_sep>create_xml(self._insar.amplitude referenceTrack.frames[i].numberOfSamples referenceTrack.frames[i].numberOfLines 'amp')<line_sep>create_xml(self._insar.interferogram referenceTrack.frames[i].numberOfSamples referenceTrack.frames[i].numberOfLines 'int')<line_sep>#update secondary frame parameters here
#no matching for secondary, always use geometry
rangeOffsets=self._insar.swathRangeOffsetGeometricalSecondary<line_sep>azimuthOffsets=self._insar.swathAzimuthOffsetGeometricalSecondary<line_sep>rangeOffsets=rangeOffsets[i]<line_sep>azimuthOffsets=azimuthOffsets[i]<line_sep>swathMosaicParameters(secondaryTrack.frames[i] rangeOffsets azimuthOffsets self._insar.numberRangeLooks1 self._insar.numberAzimuthLooks1)<line_sep>os.chdir('../')<line_sep>#save parameter file
self._insar.saveProduct(referenceTrack.frames[i] self._insar.referenceFrameParameter)<line_sep>self._insar.saveProduct(secondaryTrack.frames[i] self._insar.secondaryFrameParameter)<line_sep>os.chdir('../')<block_end>#mosaic spectral diversity interferograms
<for_stmt>i,frameNumber enumerate(self._insar.referenceFrames)<block_start>frameDir='f{}_{}'.format(i+1 frameNumber)<line_sep>os.chdir(frameDir)<line_sep>mosaicDir='mosaic'<line_sep>os.makedirs(mosaicDir exist_ok=<true>)<line_sep>os.chdir(mosaicDir)<if_stmt>self._insar.endingSwath-self._insar.startingSwath+1<eq>1<block_start><import_stmt>shutil<line_sep>swathDir='s{}'.format(referenceTrack.frames[i].swaths[0].swathNumber)<for_stmt>sdFile self._insar.interferogramSd<block_start><if_stmt><not>os.path.isfile(sdFile)<block_start>os.symlink(os.path.join('../' swathDir 'spectral_diversity' sdFile) sdFile)<block_end>shutil.copy2(os.path.join('../' swathDir 'spectral_diversity' sdFile+'.vrt') sdFile+'.vrt')<line_sep>shutil.copy2(os.path.join('../' swathDir 'spectral_diversity' sdFile+'.xml') sdFile+'.xml')<block_end>os.chdir('../')<line_sep>os.chdir('../')<line_sep><continue><block_end>#choose offsets
numberOfFrames=len(referenceTrack.frames)<line_sep>numberOfSwaths=len(referenceTrack.frames[i].swaths)<if_stmt>self.swathOffsetMatching#no need to do this as the API support 2-d list
#rangeOffsets = (np.array(self._insar.swathRangeOffsetMatchingReference)).reshape(numberOfFrames, numberOfSwaths)
#azimuthOffsets = (np.array(self._insar.swathAzimuthOffsetMatchingReference)).reshape(numberOfFrames, numberOfSwaths)
<block_start>rangeOffsets=self._insar.swathRangeOffsetMatchingReference<line_sep>azimuthOffsets=self._insar.swathAzimuthOffsetMatchingReference<block_end><else_stmt>#rangeOffsets = (np.array(self._insar.swathRangeOffsetGeometricalReference)).reshape(numberOfFrames, numberOfSwaths)
#azimuthOffsets = (np.array(self._insar.swathAzimuthOffsetGeometricalReference)).reshape(numberOfFrames, numberOfSwaths)
<block_start>rangeOffsets=self._insar.swathRangeOffsetGeometricalReference<line_sep>azimuthOffsets=self._insar.swathAzimuthOffsetGeometricalReference<block_end>rangeOffsets=rangeOffsets[i]<line_sep>azimuthOffsets=azimuthOffsets[i]<line_sep>#list of input files
inputSd=[[] [] []]<for_stmt>j,swathNumber enumerate(range(self._insar.startingSwath self._insar.endingSwath+1))<block_start>swathDir='s{}'.format(swathNumber)<for_stmt>k,sdFile enumerate(self._insar.interferogramSd)<block_start>inputSd[k].append(os.path.join('../' swathDir 'spectral_diversity' sdFile))<block_end><block_end>#mosaic spectral diversity interferograms
<for_stmt>inputSdList,outputSdFile zip(inputSd self._insar.interferogramSd)<block_start>swathMosaic(referenceTrack.frames[i] inputSdList outputSdFile rangeOffsets azimuthOffsets self._insar.numberRangeLooks1 self._insar.numberAzimuthLooks1 updateFrame=<false> phaseCompensation=<true> pcRangeLooks=5 pcAzimuthLooks=5 filt=<true> resamplingMethod=1)<block_end><for_stmt>sdFile self._insar.interferogramSd<block_start>create_xml(sdFile referenceTrack.frames[i].numberOfSamples referenceTrack.frames[i].numberOfLines 'int')<block_end>os.chdir('../')<line_sep>os.chdir('../')<block_end>catalog.printToLog(logger "runSwathMosaic")<line_sep>self._insar.procDoc.addAllFromCatalog(catalog)<block_end> |
<import_stmt>pytest<import_from_stmt>manubot.cite.pubmed get_pmcid_and_pmid_for_doi get_pmid_for_doi get_pubmed_ids_for_doi <line_sep>@pytest.mark.parametrize(("doi" "pmid") [("10.1098/rsif.2017.0387" "29618526") # in PubMed and PMC
("10.1161/CIRCGENETICS.115.001181" "27094199") # in PubMed but not PMC
("10.7717/peerj-cs.134" <none>) # DOI in journal not indexed by PubMed
("10.1161/CIRC" <none>) # invalid DOI
] )<def_stmt>test_get_pmid_for_doi doi pmid<block_start>output=get_pmid_for_doi(doi)<assert_stmt>pmid<eq>output<block_end>@pytest.mark.parametrize(("doi" "id_dict") [("10.1098/rsif.2017.0387" {"PMCID":"PMC5938574" "PMID":"29618526"}) ("10.7554/ELIFE.32822" {"PMCID":"PMC5832410" "PMID":"29424689"}) ("10.1161/CIRCGENETICS.115.001181" {}) # only in PubMed, not in PMC
("10.7717/peerj.000" {}) # Non-existent DOI
("10.peerj.000" {}) # malformed DOI
] )<def_stmt>test_get_pmcid_and_pmid_for_doi doi id_dict<block_start>output=get_pmcid_and_pmid_for_doi(doi)<assert_stmt>id_dict<eq>output<block_end>@pytest.mark.parametrize(("doi" "id_dict") [("10.1098/rsif.2017.0387" {"PMCID":"PMC5938574" "PMID":"29618526"}) ("10.7554/ELIFE.32822" {"PMCID":"PMC5832410" "PMID":"29424689"}) ("10.1161/CIRCGENETICS.115.001181" {"PMID":"27094199"} ) # only in PubMed, not in PMC
("10.7717/peerj.000" {}) # Non-existent DOI
] )<def_stmt>test_get_pubmed_ids_for_doi doi id_dict<block_start>output=get_pubmed_ids_for_doi(doi)<assert_stmt>id_dict<eq>output<block_end> |
<class_stmt>Solution<block_start>"""
Time Complexity: O(N)
Space Complexity: O(1)
"""<def_stmt>balanced_string_split self s:str<arrow>int# initialize variables
<block_start>L_count,R_count=0 0<line_sep>balanced_substring_count=0<line_sep># parse the string
<for_stmt>char s# update the number of Ls and the number of Rs so far
<block_start><if_stmt>char<eq>'L'<block_start>L_count<augadd>1<block_end><elif_stmt>char<eq>'R'<block_start>R_count<augadd>1<block_end># if the string is balanced, increment the balanced substrings count and reset the counters
<if_stmt>L_count<eq>R_count<block_start>balanced_substring_count<augadd>1<line_sep>L_count,R_count=0 0<block_end><block_end><return>balanced_substring_count<block_end><block_end> |
<import_stmt>datetime<import_from_stmt>.. db<line_sep>tags=db.Table('post_tags' db.Column('post_id' db.Integer db.ForeignKey('post.id')) db.Column('tag_id' db.Integer db.ForeignKey('tag.id')))<class_stmt>Post(db.Model)<block_start>id=db.Column(db.Integer() primary_key=<true>)<line_sep>title=db.Column(db.String(255) nullable=<false>)<line_sep>text=db.Column(db.Text() nullable=<false>)<line_sep>publish_date=db.Column(db.DateTime() default=datetime.datetime.now)<line_sep>user_id=db.Column(db.Integer() db.ForeignKey('user.id'))<line_sep>comments=db.relationship('Comment' backref='post' lazy='dynamic')<line_sep>tags=db.relationship('Tag' secondary=tags backref=db.backref('posts' lazy='dynamic'))<def_stmt>__init__ self title=""<block_start>self.title=title<block_end><def_stmt>__repr__ self<block_start><return>"<Post '{}'>".format(self.title)<block_end><block_end><class_stmt>Comment(db.Model)<block_start>id=db.Column(db.Integer() primary_key=<true>)<line_sep>name=db.Column(db.String(255) nullable=<false>)<line_sep>text=db.Column(db.Text() nullable=<false>)<line_sep>date=db.Column(db.DateTime() default=datetime.datetime.now)<line_sep>post_id=db.Column(db.Integer() db.ForeignKey('post.id'))<def_stmt>__repr__ self<block_start><return>"<Comment '{}'>".format(self.text[:15])<block_end><block_end><class_stmt>Tag(db.Model)<block_start>id=db.Column(db.Integer() primary_key=<true>)<line_sep>title=db.Column(db.String(255) nullable=<false> unique=<true>)<def_stmt>__init__ self title=""<block_start>self.title=title<block_end><def_stmt>__repr__ self<block_start><return>"<Tag '{}'>".format(self.title)<block_end><block_end><class_stmt>Reminder(db.Model)<block_start>id=db.Column(db.Integer() primary_key=<true>)<line_sep>date=db.Column(db.DateTime())<line_sep>email=db.Column(db.String())<line_sep>text=db.Column(db.Text())<def_stmt>__repr__ self<block_start><return>"<Reminder '{}'>".format(self.text[:20])<block_end><block_end> |
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>pytest<import_stmt>numpy<as>np<import_from_stmt>collections defaultdict<import_from_stmt>stellargraph.data.unsupervised_sampler UnsupervisedSampler<import_from_stmt>stellargraph.data.explorer UniformRandomWalk<import_from_stmt>..test_utils.graphs line_graph<def_stmt>test_init_parameters line_graph# if no graph is provided
<block_start><with_stmt>pytest.raises(ValueError)<block_start>UnsupervisedSampler(G=<none>)<block_end># walk must have length strictly greater than 1
<with_stmt>pytest.raises(ValueError)<block_start>UnsupervisedSampler(G=line_graph length=1)<block_end># at least 1 walk from each root node
<with_stmt>pytest.raises(ValueError)<block_start>UnsupervisedSampler(G=line_graph number_of_walks=0)<block_end># nodes nodes parameter should be an iterable of node IDs
<with_stmt>pytest.raises(ValueError)<block_start>UnsupervisedSampler(G=line_graph nodes=1)<block_end># if no root nodes are provided for sampling defaulting to using all nodes as root nodes
sampler=UnsupervisedSampler(G=line_graph nodes=<none>)<assert_stmt>sampler.nodes<eq>list(line_graph.nodes())<block_end><def_stmt>test_run_batch_sizes line_graph<block_start>batch_size=4<line_sep>sampler=UnsupervisedSampler(G=line_graph length=2 number_of_walks=2)<line_sep>batches=sampler.run(batch_size)<line_sep># check batch sizes
<assert_stmt>len(batches)<eq>np.ceil(len(line_graph.nodes())<times>4/batch_size)<for_stmt>ids,labels batches[:-1]<block_start><assert_stmt>len(ids)<eq>len(labels)<eq>batch_size<block_end># last batch can be smaller
ids,labels=batches[-1]<assert_stmt>len(ids)<eq>len(labels)<assert_stmt>len(ids)<le>batch_size<block_end><def_stmt>test_run_context_pairs line_graph<block_start>batch_size=4<line_sep>sampler=UnsupervisedSampler(G=line_graph length=2 number_of_walks=2)<line_sep>batches=sampler.run(batch_size)<line_sep>grouped_by_target=defaultdict(list)<for_stmt>ids,labels batches<block_start><for_stmt>(target context),label zip(ids labels)<block_start>grouped_by_target[target].append((context label))<block_end><block_end><assert_stmt>len(grouped_by_target)<eq>len(line_graph.nodes())<for_stmt>target,sampled grouped_by_target.items()# exactly 2 positive and 2 negative context pairs for each target node
<block_start><assert_stmt>len(sampled)<eq>4<line_sep># since each walk has length = 2, there must be an edge between each positive context pair
<for_stmt>context,label sampled<block_start><if_stmt>label<eq>1<block_start><assert_stmt>context<in>set(line_graph.neighbors(target))<block_end><block_end><block_end><block_end><def_stmt>test_walker_uniform_random line_graph<block_start>length=3<line_sep>number_of_walks=2<line_sep>batch_size=4<line_sep>walker=UniformRandomWalk(line_graph n=number_of_walks length=length)<line_sep>sampler=UnsupervisedSampler(line_graph walker=walker)<line_sep>batches=sampler.run(batch_size)<line_sep># batches should match the parameters used to create the walker object, instead of the defaults
# for UnsupervisedSampler
expected_num_batches=np.ceil(line_graph.number_of_nodes()<times>number_of_walks<times>(length-1)<times>2/batch_size)<assert_stmt>len(batches)<eq>expected_num_batches<block_end><class_stmt>CustomWalker<block_start><def_stmt>run self nodes<block_start><return>[[node node]<for>node nodes]<block_end><block_end><def_stmt>test_walker_custom line_graph<block_start>walker=CustomWalker()<line_sep>sampler=UnsupervisedSampler(line_graph walker=walker)<line_sep>batches=sampler.run(2)<assert_stmt>len(batches)<eq>line_graph.number_of_nodes()<line_sep># all positive examples should be self loops, since we defined our custom walker this way
<for_stmt>context_pairs,labels batches<block_start><for_stmt>node,neighbour context_pairs[labels<eq>1]<block_start><assert_stmt>node<eq>neighbour<block_end><block_end><block_end><def_stmt>test_ignored_param_warning line_graph<block_start>walker=UniformRandomWalk(line_graph n=2 length=3)<with_stmt>pytest.raises(ValueError match="cannot specify both 'walker' and 'length'")<block_start>UnsupervisedSampler(line_graph walker=walker length=5)<block_end><with_stmt>pytest.raises(ValueError match="cannot specify both 'walker' and 'number_of_walks'")<block_start>UnsupervisedSampler(line_graph walker=walker number_of_walks=5)<block_end><with_stmt>pytest.raises(ValueError match="cannot specify both 'walker' and 'seed'")<block_start>UnsupervisedSampler(line_graph walker=walker seed=1)<block_end><block_end> |
#Why is the error and how to fix it?
#A: A TypeError menas you are using the wrong type to make an operation. Change print(a+b) to return a+b
<def_stmt>foo a b<block_start>print(a+b)<block_end>x=foo(2 3)<times>10<line_sep> |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>sys<line_sep>sys.path.append('../')<import_from_stmt>auto_scan_test AutoScanTest IgnoreReasons<import_from_stmt>program_config TensorConfig ProgramConfig OpConfig CxxConfig TargetType PrecisionType DataLayoutType Place<import_stmt>unittest<import_stmt>hypothesis<import_from_stmt>hypothesis given settings seed example assume<import_stmt>hypothesis.strategies<as>st<import_stmt>argparse<import_stmt>numpy<as>np<import_from_stmt>functools partial<class_stmt>TestLinspaceOp(AutoScanTest)<block_start><def_stmt>__init__ self *args **kwargs<block_start>AutoScanTest.__init__(self *args **kwargs)<line_sep>self.enable_testing_on_place(TargetType.Host PrecisionType.FP32 DataLayoutType.NCHW thread=[1 2])<block_end><def_stmt>is_program_valid self program_config:ProgramConfig predictor_config:CxxConfig<arrow>bool<block_start><return><true><block_end><def_stmt>sample_program_configs self draw<block_start>start_id=draw(st.integers(min_value=0 max_value=5))<line_sep>stop_id=draw(st.integers(min_value=50 max_value=60))<line_sep>num_data=draw(st.integers(min_value=1 max_value=10))<line_sep>op_type_str=draw(st.sampled_from([5]))<line_sep>#2:int 5:float, lite only support float
<def_stmt>generate_start1 *args **kwargs<block_start><return>np.array([float(start_id)]).astype(np.float32)<block_end><def_stmt>generate_start2 *args **kwargs<block_start><return>np.array([int(start_id)]).astype(np.int32)<block_end><def_stmt>generate_stop1 *args **kwargs<block_start><return>np.array([float(stop_id)]).astype(np.float32)<block_end><def_stmt>generate_stop2 *args **kwargs<block_start><return>np.array([int(stop_id)]).astype(np.int32)<block_end><def_stmt>generate_num *args **kwargs<block_start><return>np.array([int(num_data)]).astype(np.int32)<block_end>build_ops=OpConfig(type="linspace" inputs={"Start":["start_data"] "Stop":["stop_data"] "Num":["num_data"] } outputs={"Out":["output_data"] } attrs={"dtype":int(op_type_str)})<if_stmt>op_type_str<eq>2<block_start>program_config=ProgramConfig(ops=[build_ops] weights={} inputs={"start_data":TensorConfig(data_gen=partial(generate_start2)) "stop_data":TensorConfig(data_gen=partial(generate_stop2)) "num_data":TensorConfig(data_gen=partial(generate_num)) } outputs=["output_data"])<block_end><elif_stmt>op_type_str<eq>5<block_start>program_config=ProgramConfig(ops=[build_ops] weights={} inputs={"start_data":TensorConfig(data_gen=partial(generate_start1)) "stop_data":TensorConfig(data_gen=partial(generate_stop1)) "num_data":TensorConfig(data_gen=partial(generate_num)) } outputs=["output_data"])<block_end><return>program_config<block_end><def_stmt>sample_predictor_configs self<block_start><return>self.get_predictor_configs() ["linspace"] (1e-5 1e-5)<block_end><def_stmt>add_ignore_pass_case self<block_start><pass><block_end><def_stmt>test self *args **kwargs<block_start>self.run_and_statis(quant=<false> max_examples=25)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main(argv=[''])<block_end> |
"""Space switching without constraints or extra DAG nodes.
Contains functions to create a space switching network as well as seamlessly switching
between spaces.
Example Usage
=============
::
import cmt.rig.spaceswitch as spaceswitch
# Create the space switch
spaceswitch.create_space_switch(
pole_vector_control,
[(ik_control, "foot"), (root_control, "root"), (world_control, "world")],
switch_attribute="space",
use_rotate=False,
)
# Seamless switch
spaceswitch.switch_space(pole_vector_control, "space", 1, create_keys=False)
"""<import_stmt>maya.cmds<as>cmds<import_stmt>maya.api.OpenMaya<as>OpenMaya<import_from_stmt>cmt.dge dge<import_stmt>cmt.rig.common<as>common<import_stmt>cmt.shortcuts<as>shortcuts<def_stmt>create_space_switch node drivers switch_attribute=<none> use_translate=<true> use_rotate=<true><block_start>"""Creates a space switch network.
The network uses the offsetParentMatrix attribute and does not create any
constraints or new dag nodes.
:param node: Transform to drive
:param drivers: List of tuples: [(driver1, "spaceName1"), (driver2, "spaceName2")]
:param switch_attribute: Name of the switch attribute to create on the target node.
"""<if_stmt>switch_attribute<is><none><block_start>switch_attribute="space"<block_end><if_stmt>cmds.objExists("{}.{}".format(node switch_attribute))<block_start>cmds.deleteAttr(node at=switch_attribute)<block_end>names=[d[1]<for>d drivers]<line_sep>cmds.addAttr(node ln=switch_attribute at="enum" en=":".join(names) keyable=<true>)<line_sep># Create attribute to toggle translation in the matrices
enable_translate_attr=_create_bool_attribute(node "{}UseTranslate".format(switch_attribute) use_translate)<line_sep># Create attribute to toggle rotation in the matrices
enable_rotate_attr=_create_bool_attribute(node "{}UseRotate".format(switch_attribute) use_rotate)<line_sep>blend=cmds.createNode("blendMatrix" name="{}_spaceswitch".format(node))<line_sep># Get the current offset parent matrix. This is used as the starting blend point
m=OpenMaya.MMatrix(cmds.getAttr("{}.offsetParentMatrix".format(node)))<line_sep>cmds.setAttr("{}.inputMatrix".format(blend) list(m) type="matrix")<line_sep>parent=cmds.listRelatives(node parent=<true> path=<true>)<line_sep>to_parent_local="{}.worldInverseMatrix[0]".format(parent[0])<if>parent<else><none><for_stmt>i,driver enumerate(drivers)<block_start>driver=driver[0]<line_sep>_connect_driver_matrix_network(blend node driver i to_parent_local)<line_sep>target_attr="{}.target[{}]".format(blend i)<line_sep># Hook up the weight toggle when switching spaces
dge("x = switch == {} ? 1 : 0".format(i) x="{}.weight".format(target_attr) switch="{}.{}".format(node switch_attribute) )<line_sep># Connect the translation, rotation toggles
cmds.connectAttr(enable_translate_attr "{}.useTranslate".format(target_attr))<line_sep>cmds.connectAttr(enable_rotate_attr "{}.useRotate".format(target_attr i))<block_end>cmds.connectAttr("{}.outputMatrix".format(blend) "{}.offsetParentMatrix".format(node))<block_end><def_stmt>_create_bool_attribute node attribute default_value<block_start>cmds.addAttr(node ln=attribute at="bool" defaultValue=default_value keyable=<true>)<line_sep><return>"{}.{}".format(node attribute)<block_end><def_stmt>_connect_driver_matrix_network blend node driver index to_parent_local# The multMatrix node will calculate the transformation to blend to when driven
# by this driver transform
<block_start>mult=cmds.createNode("multMatrix" name="spaceswitch_{}_to_{}".format(node driver))<line_sep>offset=(shortcuts.get_dag_path2(node).exclusiveMatrix()<times>OpenMaya.MMatrix(cmds.getAttr("{}.worldInverseMatrix[0]".format(driver))))<line_sep>cmds.setAttr("{}.matrixIn[0]".format(mult) list(offset) type="matrix")<line_sep>cmds.connectAttr("{}.worldMatrix[0]".format(driver) "{}.matrixIn[1]".format(mult))<if_stmt>to_parent_local<block_start>cmds.connectAttr(to_parent_local "{}.matrixIn[2]".format(mult))<block_end>cmds.connectAttr("{}.matrixSum".format(mult) "{}.target[{}].targetMatrix".format(blend index))<block_end><def_stmt>switch_space node attribute space create_keys=<false><block_start>"""Seamlessly switch between spaces
:param node: Node to switch
:param attribute: Space switching attribute on node
:param space: Space index in the space attribute
:param create_keys: True to create switching keys
"""<line_sep>m=cmds.xform(node q=<true> ws=<true> m=<true>)<line_sep>cmds.setAttr("{}.{}".format(node attribute) space)<line_sep>cmds.xform(node ws=<true> m=m)<block_end> |
<import_from_stmt>amaranth.hdl.mem *<import_from_stmt>amaranth.hdl.mem __all__<import_stmt>warnings<line_sep>warnings.warn("instead of nmigen.hdl.mem, use amaranth.hdl.mem" DeprecationWarning stacklevel=2)<line_sep> |
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
<import_stmt>argparse<import_stmt>textwrap<import_from_stmt>starthinker.util.configuration Configuration<import_from_stmt>starthinker.task.smartsheet.run smartsheet<def_stmt>recipe_smartsheet_report_to_bigquery config auth_read auth_write token report dataset table schema<block_start>"""Move report data into a BigQuery table.
Args:
auth_read (authentication) - Credentials used for reading data.
auth_write (authentication) - Credentials used for writing data.
token (string) - Retrieve from SmartSheet account settings.
report (string) - Retrieve from report properties.
dataset (string) - Existing BigQuery dataset.
table (string) - Table to create from this report.
schema (json) - Schema provided in JSON list format or leave empty to auto detect.
"""<line_sep>smartsheet(config {'auth':auth_read 'token':token 'report':report 'out':{'bigquery':{'auth':auth_write 'dataset':dataset 'table':table 'schema':schema}}})<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter description=textwrap.dedent("""
Move report data into a BigQuery table.
1. Specify <a href='https://smartsheet-platform.github.io/api-docs/' target='_blank'>SmartSheet Report</a> token.
2. Locate the ID of a report by viewing its properties.
3. Provide a BigQuery dataset ( must exist ) and table to write the data into.
4. StarThinker will automatically map the correct schema.
"""))<line_sep>parser.add_argument("-project" help="Cloud ID of Google Cloud Project." default=<none>)<line_sep>parser.add_argument("-key" help="API Key of Google Cloud Project." default=<none>)<line_sep>parser.add_argument("-client" help="Path to CLIENT credentials json file." default=<none>)<line_sep>parser.add_argument("-user" help="Path to USER credentials json file." default=<none>)<line_sep>parser.add_argument("-service" help="Path to SERVICE credentials json file." default=<none>)<line_sep>parser.add_argument("-verbose" help="Print all the steps as they happen." action="store_true")<line_sep>parser.add_argument("-auth_read" help="Credentials used for reading data." default='user')<line_sep>parser.add_argument("-auth_write" help="Credentials used for writing data." default='service')<line_sep>parser.add_argument("-token" help="Retrieve from SmartSheet account settings." default='')<line_sep>parser.add_argument("-report" help="Retrieve from report properties." default=<none>)<line_sep>parser.add_argument("-dataset" help="Existing BigQuery dataset." default='')<line_sep>parser.add_argument("-table" help="Table to create from this report." default='')<line_sep>parser.add_argument("-schema" help="Schema provided in JSON list format or leave empty to auto detect." default=<none>)<line_sep>args=parser.parse_args()<line_sep>config=Configuration(project=args.project user=args.user service=args.service client=args.client key=args.key verbose=args.verbose)<line_sep>recipe_smartsheet_report_to_bigquery(config args.auth_read args.auth_write args.token args.report args.dataset args.table args.schema)<block_end> |
"""
Provide tests for atomic swap handler initialization method implementation.
"""<import_stmt>datetime<import_stmt>time<import_stmt>pytest<import_from_stmt>sawtooth_sdk.processor.exceptions InvalidTransaction<import_from_stmt>sawtooth_sdk.protobuf.processor_pb2 TpProcessRequest<import_from_stmt>sawtooth_sdk.protobuf.setting_pb2 Setting<import_from_stmt>sawtooth_sdk.protobuf.transaction_pb2 Transaction TransactionHeader <import_from_stmt>testing.conftest create_signer<import_from_stmt>testing.mocks.stub StubContext<import_from_stmt>testing.utils.client proto_error_msg<import_from_stmt>remme.clients.block_info CONFIG_ADDRESS BlockInfoClient <import_from_stmt>remme.protos.account_pb2 Account<import_from_stmt>remme.protos.atomic_swap_pb2 AtomicSwapInfo AtomicSwapInitPayload AtomicSwapMethod <import_from_stmt>remme.protos.block_info_pb2 BlockInfo BlockInfoConfig<import_from_stmt>remme.protos.transaction_pb2 TransactionPayload<import_from_stmt>remme.shared.utils hash512<import_from_stmt>remme.settings SETTINGS_KEY_ZERO_ADDRESS_OWNERS SETTINGS_SWAP_COMMISSION ZERO_ADDRESS <import_from_stmt>remme.settings.helper _make_settings_key<import_from_stmt>remme.tp.atomic_swap AtomicSwapHandler<import_from_stmt>remme.tp.basic BasicHandler<line_sep>TOKENS_AMOUNT_TO_SWAP=200<line_sep>SWAP_COMMISSION_AMOUNT=100<line_sep>BOT_ETHEREUM_ADDRESS='0xe6ca0e7c974f06471759e9a05d18b538c5ced11e'<line_sep>BOT_PRIVATE_KEY='<KEY>'<line_sep>BOT_PUBLIC_KEY='03ecc5cb4094eb05319be6c7a63ebf17133d4ffaea48cdcfd1d5fc79dac7db7b6b'<line_sep>BOT_ADDRESS='112007b9433e1da5c624ff926477141abedfd57585a36590b0a8edc4104ef28093ee30'<line_sep>ALICE_ETHEREUM_ADDRESS='0x8dfe0f55a1cf9b22b8c85a9ff7a85a28a3879f71'<line_sep>ALICE_ADDRESS='112007db8a00c010402e2e3a7d03491323e761e0ea612481c518605648ceeb5ed454f7'<line_sep>ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR='0x6f4d5666332f5a575a714d4245624455612f2b4345424f704b4256704f5'<line_sep>BOT_IT_IS_INITIATOR_MARK=''<line_sep>SWAP_ID='033102e41346242476b15a3a7966eb5249271025fc7fb0b37ed3fdb4bcce3884'<line_sep>ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY=_make_settings_key(SETTINGS_SWAP_COMMISSION)<line_sep>ADDRESS_TO_GET_GENESIS_MEMBERS_AS_STRING_BY=_make_settings_key(SETTINGS_KEY_ZERO_ADDRESS_OWNERS)<line_sep>ADDRESS_TO_STORE_SWAP_INFO_BY=BasicHandler(name=AtomicSwapHandler().family_name versions=AtomicSwapHandler()._family_versions[0]).make_address_from_data(data=SWAP_ID)<line_sep>TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS={'family_name':AtomicSwapHandler().family_name 'family_version':AtomicSwapHandler()._family_versions[0] }<line_sep>RANDOM_NODE_PUBLIC_KEY='<KEY>'<line_sep>RANDOM_PUBLIC_KEY='<KEY>'<line_sep>CURRENT_TIMESTAMP=int(datetime.datetime.now().timestamp())<line_sep>BLOCK_INFO_CONFIG_ADDRESS=CONFIG_ADDRESS<line_sep>BLOCK_INFO_ADDRESS=BlockInfoClient.create_block_address(1000)<line_sep>block_info_config=BlockInfoConfig()<line_sep>block_info_config.latest_block=1000<line_sep>SERIALIZED_BLOCK_INFO_CONFIG=block_info_config.SerializeToString()<line_sep>block_info=BlockInfo()<line_sep>block_info.timestamp=CURRENT_TIMESTAMP<line_sep>SERIALIZED_BLOCK_INFO=block_info.SerializeToString()<line_sep>INPUTS=[ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY BLOCK_INFO_CONFIG_ADDRESS BLOCK_INFO_ADDRESS BOT_ADDRESS ZERO_ADDRESS ADDRESS_TO_STORE_SWAP_INFO_BY ]<line_sep>OUTPUTS=[ADDRESS_TO_STORE_SWAP_INFO_BY ZERO_ADDRESS BOT_ADDRESS ]<def_stmt>test_atomic_swap_init_with_empty_proto <block_start>"""
Case: send empty proto for init
Expect: invalid transaction error
"""<line_sep>inputs=outputs=[ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY BLOCK_INFO_CONFIG_ADDRESS BLOCK_INFO_ADDRESS BOT_ADDRESS ZERO_ADDRESS ADDRESS_TO_STORE_SWAP_INFO_BY ADDRESS_TO_GET_GENESIS_MEMBERS_AS_STRING_BY ]<line_sep>atomic_swap_init_payload=AtomicSwapInitPayload()<line_sep>transaction_payload=TransactionPayload()<line_sep>transaction_payload.method=AtomicSwapMethod.INIT<line_sep>transaction_payload.data=atomic_swap_init_payload.SerializeToString()<line_sep>serialized_transaction_payload=transaction_payload.SerializeToString()<line_sep>transaction_header=TransactionHeader(signer_public_key=BOT_PUBLIC_KEY family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name') family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version') inputs=inputs outputs=outputs dependencies=[] payload_sha512=hash512(data=serialized_transaction_payload) batcher_public_key=RANDOM_NODE_PUBLIC_KEY nonce=time.time().hex().encode() )<line_sep>serialized_header=transaction_header.SerializeToString()<line_sep>transaction_request=TpProcessRequest(header=transaction_header payload=serialized_transaction_payload signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header) )<line_sep>mock_context=StubContext(inputs=inputs outputs=outputs initial_state={})<with_stmt>pytest.raises(InvalidTransaction)<as>error<block_start>AtomicSwapHandler().apply(transaction=transaction_request context=mock_context)<block_end><assert_stmt>proto_error_msg(AtomicSwapInitPayload {'receiver_address':['Missed address'] 'sender_address_non_local':['This field is required.'] 'amount':['This field is required.'] 'swap_id':['Missed swap_id'] 'created_at':['This field is required.'] })<eq>str(error.value)<block_end><def_stmt>test_atomic_swap_init <block_start>"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens.
Expect: bot sends commission to the zero account address, swap amount is decreased from bot account.
"""<line_sep>atomic_swap_init_payload=AtomicSwapInitPayload(receiver_address=ALICE_ADDRESS sender_address_non_local=BOT_ETHEREUM_ADDRESS amount=TOKENS_AMOUNT_TO_SWAP swap_id=SWAP_ID secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR created_at=CURRENT_TIMESTAMP )<line_sep>transaction_payload=TransactionPayload()<line_sep>transaction_payload.method=AtomicSwapMethod.INIT<line_sep>transaction_payload.data=atomic_swap_init_payload.SerializeToString()<line_sep>serialized_transaction_payload=transaction_payload.SerializeToString()<line_sep>transaction_header=TransactionHeader(signer_public_key=BOT_PUBLIC_KEY family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name') family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version') inputs=INPUTS outputs=OUTPUTS dependencies=[] payload_sha512=hash512(data=serialized_transaction_payload) batcher_public_key=RANDOM_NODE_PUBLIC_KEY nonce=time.time().hex().encode() )<line_sep>serialized_header=transaction_header.SerializeToString()<line_sep>transaction_request=TpProcessRequest(header=transaction_header payload=serialized_transaction_payload signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header) )<line_sep>bot_account=Account()<line_sep>bot_account.balance=5000<line_sep>serialized_bot_account=bot_account.SerializeToString()<line_sep>zero_account=Account()<line_sep>zero_account.balance=0<line_sep>serialized_zero_account=zero_account.SerializeToString()<line_sep>swap_commission_setting=Setting()<line_sep>swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION value=str(SWAP_COMMISSION_AMOUNT))<line_sep>serialized_swap_commission_setting=swap_commission_setting.SerializeToString()<line_sep>genesis_members_setting=Setting()<line_sep>genesis_members_setting.entries.add(key=SETTINGS_KEY_ZERO_ADDRESS_OWNERS value=f'{BOT_PUBLIC_KEY},')<line_sep>serialized_genesis_members_setting=genesis_members_setting.SerializeToString()<line_sep>mock_context=StubContext(inputs=INPUTS outputs=OUTPUTS initial_state={BLOCK_INFO_CONFIG_ADDRESS:SERIALIZED_BLOCK_INFO_CONFIG BLOCK_INFO_ADDRESS:SERIALIZED_BLOCK_INFO BOT_ADDRESS:serialized_bot_account ZERO_ADDRESS:serialized_zero_account ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY:serialized_swap_commission_setting ADDRESS_TO_GET_GENESIS_MEMBERS_AS_STRING_BY:serialized_genesis_members_setting })<line_sep>swap_info=AtomicSwapInfo()<line_sep>swap_info.swap_id=SWAP_ID<line_sep>swap_info.state=AtomicSwapInfo.OPENED<line_sep>swap_info.amount=TOKENS_AMOUNT_TO_SWAP<line_sep>swap_info.created_at=CURRENT_TIMESTAMP<line_sep>swap_info.email_address_encrypted_optional=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR<line_sep>swap_info.sender_address=BOT_ADDRESS<line_sep>swap_info.sender_address_non_local=BOT_ETHEREUM_ADDRESS<line_sep>swap_info.receiver_address=ALICE_ADDRESS<line_sep>swap_info.is_initiator=<true><line_sep>serialized_swap_info=swap_info.SerializeToString()<line_sep>expected_bot_account=Account()<line_sep>expected_bot_account.balance=5000-TOKENS_AMOUNT_TO_SWAP-SWAP_COMMISSION_AMOUNT<line_sep>serialized_expected_bot_account=expected_bot_account.SerializeToString()<line_sep>expected_zero_account=Account()<line_sep>expected_zero_account.balance=SWAP_COMMISSION_AMOUNT<line_sep>serialized_expected_zero_account=expected_zero_account.SerializeToString()<line_sep>expected_state={BOT_ADDRESS:serialized_expected_bot_account ZERO_ADDRESS:serialized_expected_zero_account ADDRESS_TO_STORE_SWAP_INFO_BY:serialized_swap_info }<line_sep>AtomicSwapHandler().apply(transaction=transaction_request context=mock_context)<line_sep>state_as_list=mock_context.get_state(addresses=[ADDRESS_TO_STORE_SWAP_INFO_BY BOT_ADDRESS ZERO_ADDRESS ])<line_sep>state_as_dict={entry.address:entry.data<for>entry state_as_list}<assert_stmt>expected_state<eq>state_as_dict<block_end><def_stmt>test_atomic_swap_init_already_taken_id <block_start>"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with already existing swap id.
Expect: invalid transaction error is raised with atomic swap id has already been taken error message.
"""<line_sep>atomic_swap_init_payload=AtomicSwapInitPayload(receiver_address=ALICE_ADDRESS sender_address_non_local=BOT_ETHEREUM_ADDRESS amount=TOKENS_AMOUNT_TO_SWAP swap_id=SWAP_ID secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR created_at=CURRENT_TIMESTAMP )<line_sep>transaction_payload=TransactionPayload()<line_sep>transaction_payload.method=AtomicSwapMethod.INIT<line_sep>transaction_payload.data=atomic_swap_init_payload.SerializeToString()<line_sep>serialized_transaction_payload=transaction_payload.SerializeToString()<line_sep>transaction_header=TransactionHeader(signer_public_key=BOT_PUBLIC_KEY family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name') family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version') inputs=INPUTS outputs=OUTPUTS dependencies=[] payload_sha512=hash512(data=serialized_transaction_payload) batcher_public_key=RANDOM_NODE_PUBLIC_KEY nonce=time.time().hex().encode() )<line_sep>serialized_header=transaction_header.SerializeToString()<line_sep>transaction_request=TpProcessRequest(header=transaction_header payload=serialized_transaction_payload signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header) )<line_sep>swap_info=AtomicSwapInfo()<line_sep>swap_info.swap_id=SWAP_ID<line_sep>swap_info.state=AtomicSwapInfo.OPENED<line_sep>swap_info.amount=TOKENS_AMOUNT_TO_SWAP<line_sep>swap_info.created_at=CURRENT_TIMESTAMP<line_sep>swap_info.email_address_encrypted_optional=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR<line_sep>swap_info.sender_address=BOT_ADDRESS<line_sep>swap_info.sender_address_non_local=BOT_ETHEREUM_ADDRESS<line_sep>swap_info.receiver_address=ALICE_ADDRESS<line_sep>serialized_swap_info=swap_info.SerializeToString()<line_sep>mock_context=StubContext(inputs=INPUTS outputs=OUTPUTS initial_state={ADDRESS_TO_STORE_SWAP_INFO_BY:serialized_swap_info })<with_stmt>pytest.raises(InvalidTransaction)<as>error<block_start>AtomicSwapHandler().apply(transaction=transaction_request context=mock_context)<block_end><assert_stmt>'Atomic swap ID has already been taken, please use a different one.'<eq>str(error.value)<block_end><def_stmt>test_atomic_swap_init_swap_no_block_config_info <block_start>"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens when no block config settings.
Expect: invalid transaction error is raised with nlock config not found error message.
"""<line_sep>atomic_swap_init_payload=AtomicSwapInitPayload(receiver_address=ALICE_ADDRESS sender_address_non_local=BOT_ETHEREUM_ADDRESS amount=TOKENS_AMOUNT_TO_SWAP swap_id=SWAP_ID secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR created_at=CURRENT_TIMESTAMP )<line_sep>transaction_payload=TransactionPayload()<line_sep>transaction_payload.method=AtomicSwapMethod.INIT<line_sep>transaction_payload.data=atomic_swap_init_payload.SerializeToString()<line_sep>serialized_transaction_payload=transaction_payload.SerializeToString()<line_sep>transaction_header=TransactionHeader(signer_public_key=BOT_PUBLIC_KEY family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name') family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version') inputs=INPUTS outputs=OUTPUTS dependencies=[] payload_sha512=hash512(data=serialized_transaction_payload) batcher_public_key=RANDOM_NODE_PUBLIC_KEY nonce=time.time().hex().encode() )<line_sep>serialized_header=transaction_header.SerializeToString()<line_sep>transaction_request=TpProcessRequest(header=transaction_header payload=serialized_transaction_payload signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header) )<line_sep>mock_context=StubContext(inputs=INPUTS outputs=OUTPUTS initial_state={})<with_stmt>pytest.raises(InvalidTransaction)<as>error<block_start>AtomicSwapHandler().apply(transaction=transaction_request context=mock_context)<block_end><assert_stmt>'Block config not found.'<eq>str(error.value)<block_end><def_stmt>test_atomic_swap_init_swap_no_block_info <block_start>"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens when no needed block information.
Expect: invalid transaction error is raised with nlock config not found error message.
"""<line_sep>atomic_swap_init_payload=AtomicSwapInitPayload(receiver_address=ALICE_ADDRESS sender_address_non_local=BOT_ETHEREUM_ADDRESS amount=TOKENS_AMOUNT_TO_SWAP swap_id=SWAP_ID secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR created_at=CURRENT_TIMESTAMP )<line_sep>transaction_payload=TransactionPayload()<line_sep>transaction_payload.method=AtomicSwapMethod.INIT<line_sep>transaction_payload.data=atomic_swap_init_payload.SerializeToString()<line_sep>serialized_transaction_payload=transaction_payload.SerializeToString()<line_sep>transaction_header=TransactionHeader(signer_public_key=BOT_PUBLIC_KEY family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name') family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version') inputs=INPUTS outputs=OUTPUTS dependencies=[] payload_sha512=hash512(data=serialized_transaction_payload) batcher_public_key=RANDOM_NODE_PUBLIC_KEY nonce=time.time().hex().encode() )<line_sep>serialized_header=transaction_header.SerializeToString()<line_sep>transaction_request=TpProcessRequest(header=transaction_header payload=serialized_transaction_payload signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header) )<line_sep>mock_context=StubContext(inputs=INPUTS outputs=OUTPUTS initial_state={BLOCK_INFO_CONFIG_ADDRESS:SERIALIZED_BLOCK_INFO_CONFIG })<with_stmt>pytest.raises(InvalidTransaction)<as>error<block_start>AtomicSwapHandler().apply(transaction=transaction_request context=mock_context)<block_end><assert_stmt>f'Block {block_info_config.latest_block+1} not found.'<eq>str(error.value)<block_end><def_stmt>test_atomic_swap_init_swap_receiver_address_invalid_type <block_start>"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with invalid Alice node address.
Expect: invalid transaction error is raised with atomic swap id has already been taken error message.
"""<line_sep>invalid_receiver_address='112934y*(J#QJ3UH*PD(:9B&TYDB*I0b0a8edc4104ef28093ee30'<line_sep>atomic_swap_init_payload=AtomicSwapInitPayload(receiver_address=invalid_receiver_address sender_address_non_local=BOT_ETHEREUM_ADDRESS amount=TOKENS_AMOUNT_TO_SWAP swap_id=SWAP_ID secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR created_at=CURRENT_TIMESTAMP )<line_sep>transaction_payload=TransactionPayload()<line_sep>transaction_payload.method=AtomicSwapMethod.INIT<line_sep>transaction_payload.data=atomic_swap_init_payload.SerializeToString()<line_sep>serialized_transaction_payload=transaction_payload.SerializeToString()<line_sep>transaction_header=TransactionHeader(signer_public_key=BOT_PUBLIC_KEY family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name') family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version') inputs=INPUTS outputs=OUTPUTS dependencies=[] payload_sha512=hash512(data=serialized_transaction_payload) batcher_public_key=RANDOM_NODE_PUBLIC_KEY nonce=time.time().hex().encode() )<line_sep>serialized_header=transaction_header.SerializeToString()<line_sep>transaction_request=TpProcessRequest(header=transaction_header payload=serialized_transaction_payload signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header) )<line_sep>mock_context=StubContext(inputs=INPUTS outputs=OUTPUTS initial_state={BLOCK_INFO_CONFIG_ADDRESS:SERIALIZED_BLOCK_INFO_CONFIG BLOCK_INFO_ADDRESS:SERIALIZED_BLOCK_INFO })<with_stmt>pytest.raises(InvalidTransaction)<as>error<block_start>AtomicSwapHandler().apply(transaction=transaction_request context=mock_context)<block_end><assert_stmt>proto_error_msg(AtomicSwapInitPayload {'receiver_address':['Address is not of a blockchain token type.']})<eq>str(error.value)<block_end><def_stmt>test_atomic_swap_init_swap_wrong_commission_address <block_start>"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with wrong commission settings.
Expect: invalid transaction error is raised with wrong commission address error message.
"""<line_sep>atomic_swap_init_payload=AtomicSwapInitPayload(receiver_address=ALICE_ADDRESS sender_address_non_local=BOT_ETHEREUM_ADDRESS amount=TOKENS_AMOUNT_TO_SWAP swap_id=SWAP_ID secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR created_at=CURRENT_TIMESTAMP )<line_sep>transaction_payload=TransactionPayload()<line_sep>transaction_payload.method=AtomicSwapMethod.INIT<line_sep>transaction_payload.data=atomic_swap_init_payload.SerializeToString()<line_sep>serialized_transaction_payload=transaction_payload.SerializeToString()<line_sep>transaction_header=TransactionHeader(signer_public_key=BOT_PUBLIC_KEY family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name') family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version') inputs=INPUTS outputs=OUTPUTS dependencies=[] payload_sha512=hash512(data=serialized_transaction_payload) batcher_public_key=RANDOM_NODE_PUBLIC_KEY nonce=time.time().hex().encode() )<line_sep>serialized_header=transaction_header.SerializeToString()<line_sep>transaction_request=TpProcessRequest(header=transaction_header payload=serialized_transaction_payload signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header) )<line_sep>swap_commission_setting=Setting()<line_sep>swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION value='-1')<line_sep>serialized_swap_commission_setting=swap_commission_setting.SerializeToString()<line_sep>mock_context=StubContext(inputs=INPUTS outputs=OUTPUTS initial_state={BLOCK_INFO_CONFIG_ADDRESS:SERIALIZED_BLOCK_INFO_CONFIG BLOCK_INFO_ADDRESS:SERIALIZED_BLOCK_INFO ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY:serialized_swap_commission_setting })<with_stmt>pytest.raises(InvalidTransaction)<as>error<block_start>AtomicSwapHandler().apply(transaction=transaction_request context=mock_context)<block_end><assert_stmt>'Wrong commission address.'<eq>str(error.value)<block_end><def_stmt>test_atomic_swap_init_swap_no_account_in_state <block_start>"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens from non-existent bot address.
Expect: invalid transaction error is raised with not enough balance error message.
"""<line_sep>atomic_swap_init_payload=AtomicSwapInitPayload(receiver_address=ALICE_ADDRESS sender_address_non_local=BOT_ETHEREUM_ADDRESS amount=TOKENS_AMOUNT_TO_SWAP swap_id=SWAP_ID secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR created_at=CURRENT_TIMESTAMP )<line_sep>transaction_payload=TransactionPayload()<line_sep>transaction_payload.method=AtomicSwapMethod.INIT<line_sep>transaction_payload.data=atomic_swap_init_payload.SerializeToString()<line_sep>serialized_transaction_payload=transaction_payload.SerializeToString()<line_sep>transaction_header=TransactionHeader(signer_public_key=BOT_PUBLIC_KEY family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name') family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version') inputs=INPUTS outputs=OUTPUTS dependencies=[] payload_sha512=hash512(data=serialized_transaction_payload) batcher_public_key=RANDOM_NODE_PUBLIC_KEY nonce=time.time().hex().encode() )<line_sep>serialized_header=transaction_header.SerializeToString()<line_sep>transaction_request=TpProcessRequest(header=transaction_header payload=serialized_transaction_payload signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header) )<line_sep>swap_commission_setting=Setting()<line_sep>swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION value=str(SWAP_COMMISSION_AMOUNT))<line_sep>serialized_swap_commission_setting=swap_commission_setting.SerializeToString()<line_sep>mock_context=StubContext(inputs=INPUTS outputs=OUTPUTS initial_state={BLOCK_INFO_CONFIG_ADDRESS:SERIALIZED_BLOCK_INFO_CONFIG BLOCK_INFO_ADDRESS:SERIALIZED_BLOCK_INFO ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY:serialized_swap_commission_setting })<with_stmt>pytest.raises(InvalidTransaction)<as>error<block_start>AtomicSwapHandler().apply(transaction=transaction_request context=mock_context)<block_end>total_amount=TOKENS_AMOUNT_TO_SWAP+SWAP_COMMISSION_AMOUNT<assert_stmt>f'Not enough balance to perform the transaction in the amount (with a commission) {total_amount}.'<eq>str(error.value)<block_end><def_stmt>test_atomic_swap_init_swap_not_enough_balance <block_start>"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with not enough bot address balance.
Expect: invalid transaction error is raised with not enough balance error message.
"""<line_sep>atomic_swap_init_payload=AtomicSwapInitPayload(receiver_address=ALICE_ADDRESS sender_address_non_local=BOT_ETHEREUM_ADDRESS amount=TOKENS_AMOUNT_TO_SWAP swap_id=SWAP_ID secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR created_at=CURRENT_TIMESTAMP )<line_sep>transaction_payload=TransactionPayload()<line_sep>transaction_payload.method=AtomicSwapMethod.INIT<line_sep>transaction_payload.data=atomic_swap_init_payload.SerializeToString()<line_sep>serialized_transaction_payload=transaction_payload.SerializeToString()<line_sep>transaction_header=TransactionHeader(signer_public_key=BOT_PUBLIC_KEY family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name') family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version') inputs=INPUTS outputs=OUTPUTS dependencies=[] payload_sha512=hash512(data=serialized_transaction_payload) batcher_public_key=RANDOM_NODE_PUBLIC_KEY nonce=time.time().hex().encode() )<line_sep>serialized_header=transaction_header.SerializeToString()<line_sep>transaction_request=TpProcessRequest(header=transaction_header payload=serialized_transaction_payload signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header) )<line_sep>bot_account=Account()<line_sep>bot_account.balance=0<line_sep>serialized_bot_account_balance=bot_account.SerializeToString()<line_sep>swap_commission_setting=Setting()<line_sep>swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION value=str(SWAP_COMMISSION_AMOUNT))<line_sep>serialized_swap_commission_setting=swap_commission_setting.SerializeToString()<line_sep>mock_context=StubContext(inputs=INPUTS outputs=OUTPUTS initial_state={BLOCK_INFO_CONFIG_ADDRESS:SERIALIZED_BLOCK_INFO_CONFIG BLOCK_INFO_ADDRESS:SERIALIZED_BLOCK_INFO BOT_ADDRESS:serialized_bot_account_balance ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY:serialized_swap_commission_setting })<with_stmt>pytest.raises(InvalidTransaction)<as>error<block_start>AtomicSwapHandler().apply(transaction=transaction_request context=mock_context)<block_end>total_amount=TOKENS_AMOUNT_TO_SWAP+SWAP_COMMISSION_AMOUNT<assert_stmt>f'Not enough balance to perform the transaction in the amount (with a commission) {total_amount}.'<eq>str(error.value)<block_end> |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unitests for automatic variable tracing."""<import_stmt>unittest<import_stmt>numpy<as>np<import_stmt>jax.numpy<as>jn<import_stmt>objax<import_from_stmt>objax.zoo.dnnet DNNet<line_sep>global_w=objax.TrainVar(jn.zeros(5))<line_sep>global_b=objax.TrainVar(jn.zeros(1))<line_sep>global_m=objax.nn.Sequential([objax.nn.Conv2D(2 4 3) objax.nn.BatchNorm2D(4)])<class_stmt>TestTracing(unittest.TestCase)<block_start>"""Unit tests for variable tracing using."""<def_stmt>test_function_global_vars self<block_start><def_stmt>loss x y<block_start>pred=jn.dot(x global_w.value)+global_b.value<line_sep><return>0.5<times>((y-pred)<power>2).mean()<block_end>vc=objax.util.find_used_variables(loss)<line_sep>self.assertDictEqual(vc {'global_w':global_w 'global_b':global_b})<block_end><def_stmt>test_function_global_module self<block_start><def_stmt>loss x<block_start><return>jn.sum(global_m(x training=<true>))<block_end>vc=objax.util.find_used_variables(loss)<line_sep>self.assertDictEqual(vc global_m.vars(scope='global_m.'))<block_end><def_stmt>test_function_closure_vars self<block_start>w=objax.TrainVar(jn.zeros(5))<line_sep>b=objax.TrainVar(jn.zeros(1))<def_stmt>loss x y<block_start>pred=jn.dot(x w.value)+b.value<line_sep><return>0.5<times>((y-pred)<power>2).mean()<block_end>vc=objax.util.find_used_variables(loss)<line_sep>self.assertDictEqual(vc {'w':w 'b':b})<block_end><def_stmt>test_function_closure_module self<block_start>m=objax.nn.Sequential([objax.nn.Conv2D(1 2 3) objax.nn.BatchNorm2D(2)])<def_stmt>loss x<block_start><return>jn.sum(m(x training=<true>))<block_end>vc=objax.util.find_used_variables(loss)<line_sep>self.assertDictEqual(vc m.vars(scope='m.'))<block_end><def_stmt>test_lambda_with_closure_vars self<block_start>w=objax.TrainVar(jn.zeros(5))<line_sep>b=objax.TrainVar(jn.zeros(1))<line_sep>loss=<lambda>x y:0.5<times>((y-jn.dot(x w.value)+b.value)<power>2).mean()<line_sep>vc=objax.util.find_used_variables(loss)<line_sep>self.assertDictEqual(vc {'w':w 'b':b})<block_end><def_stmt>test_multiline_lambda_with_closure_vars self<block_start>w=objax.TrainVar(jn.zeros(5))<line_sep>b=objax.TrainVar(jn.zeros(1))<line_sep>loss=<lambda>x y:(0.5<times>((y-jn.dot(x w.value)+b.value)<power>2).mean())<line_sep>vc=objax.util.find_used_variables(loss)<line_sep>self.assertDictEqual(vc {'w':w 'b':b})<block_end><def_stmt>test_closure_overrides_global_vars self# Make sure that global variables are what we expect them to be
<block_start>np.testing.assert_allclose(global_w.value np.zeros(5))<line_sep>np.testing.assert_allclose(global_b.value np.zeros(1))<def_stmt>_do_test # define local variable with the same name as existing global
<block_start>global_w=objax.TrainVar(jn.ones(10))<line_sep># verify that global_w and global_b are what we expect them to be
np.testing.assert_allclose(global_w.value np.ones(10))<line_sep>np.testing.assert_allclose(global_b.value np.zeros(1))<line_sep># loss function which mixes closure vars, global vars and closure var hides global var
<def_stmt>loss x y<block_start>pred=jn.dot(x global_w.value)+global_b.value<line_sep><return>0.5<times>((y-pred)<power>2).mean()<block_end>vc=objax.util.find_used_variables(loss)<line_sep>self.assertDictEqual(vc {'global_w':global_w 'global_b':global_b})<block_end>_do_test()<line_sep># Make sure that global variables didn't change, in other words
# that _do_test operated on local variables
np.testing.assert_allclose(global_w.value np.zeros(5))<line_sep>np.testing.assert_allclose(global_b.value np.zeros(1))<block_end><def_stmt>test_typical_training_loop self# Define model and optimizer
<block_start>model=DNNet((32 10) objax.functional.leaky_relu)<line_sep>opt=objax.optimizer.Momentum(model.vars() nesterov=<true>)<line_sep># Predict op
predict_op=<lambda>x:objax.functional.softmax(model(x training=<false>))<line_sep>self.assertDictEqual(objax.util.find_used_variables(predict_op) model.vars(scope='model.'))<line_sep># Loss function
<def_stmt>loss x label<block_start>logit=model(x training=<true>)<line_sep>xe_loss=objax.functional.loss.cross_entropy_logits_sparse(logit label).mean()<line_sep><return>xe_loss<block_end>self.assertDictEqual(objax.util.find_used_variables(loss) model.vars(scope='model.'))<line_sep># Gradients and loss function
loss_gv=objax.GradValues(loss objax.util.find_used_variables(loss))<def_stmt>train_op x y learning_rate<block_start>grads,loss=loss_gv(x y)<line_sep>opt(learning_rate grads)<line_sep><return>loss<block_end>self.assertDictEqual(objax.util.find_used_variables(train_op) {**model.vars(scope='loss_gv.model.') **opt.vars(scope='opt.')})<block_end><def_stmt>test_lambda_inside_function self<block_start>m=objax.nn.Sequential([objax.nn.Conv2D(1 2 3) objax.nn.BatchNorm2D(2)])<def_stmt>loss x<block_start>get_logits=<lambda>inp:m(inp training=<true>)<line_sep><return>jn.sum(get_logits(x))<block_end>vc=objax.util.find_used_variables(loss)<line_sep>self.assertDictEqual(vc m.vars(scope='m.'))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>..testing_utils DummyConverter DummyLoad DummyNoise DummyOdeSolver DummyVoltageSupply DummyElectricMotor mock_instantiate instantiate_dict<import_from_stmt>gym_electric_motor.physical_systems physical_systems<as>ps converters<as>cv electric_motors<as>em mechanical_loads<as>ml voltage_supplies<as>vs solvers<as>sv<import_from_stmt>gym.spaces Box<import_stmt>pytest<class_stmt>TestSCMLSystem<block_start>"""
Base Class to test all PhysicalSystems that derive from SCMLSystem
"""<line_sep>class_to_test=ps.SCMLSystem<def_stmt>mock_build_state self motor_state torque u_in u_sup<block_start>"""Function to mock an arbitrary build_state function to test the SCMLSystem
"""<line_sep>self.motor_state=motor_state<line_sep>self.torque=torque<line_sep>self.u_in=u_in<line_sep>self.u_sup=u_sup<line_sep><return>np.concatenate((self.motor_state[:len(DummyLoad.state_names)] [torque] self.motor_state[len(DummyLoad.state_names):] [u_sup]))<block_end>@pytest.fixture<def_stmt>scml_system self monkeypatch<block_start>"""
Returns an instantiated SCMLSystem with Dummy Components and mocked abstract functions
"""<line_sep>monkeypatch.setattr(self.class_to_test '_build_state_names' <lambda>_:DummyLoad.state_names+['torque']+DummyElectricMotor.CURRENTS+DummyElectricMotor.VOLTAGES+['u_sup'])<line_sep>monkeypatch.setattr(self.class_to_test '_build_state_space' <lambda>_ state_names:Box(low=np.zeros_like(state_names dtype=float) high=np.zeros_like(state_names dtype=float)))<line_sep><return>self.class_to_test(converter=DummyConverter() motor=DummyElectricMotor() load=DummyLoad() supply=DummyVoltageSupply() ode_solver=DummyOdeSolver() noise_generator=DummyNoise())<block_end><def_stmt>test_reset self scml_system<block_start>"""Test the reset function in the physical system"""<line_sep>scml_system._t=12<line_sep>scml_system._k=33<line_sep>state_space=scml_system.state_space<line_sep>state_positions=scml_system.state_positions<line_sep>initial_state=scml_system.reset()<line_sep>target=(np.array([0 0 0 0 0 0 560])+scml_system._noise_generator.reset())/scml_system.limits<assert_stmt>np.all(initial_state<eq>target) 'Initial states of the system are incorrect'<assert_stmt>scml_system._t<eq>0 'Time of the system was not set to zero after reset'<assert_stmt>scml_system._k<eq>0 'Episode step of the system was not set to zero after reset'<assert_stmt>scml_system.converter.reset_counter<eq>scml_system.electrical_motor.reset_counter<eq>scml_system.mechanical_load.reset_counter<eq>scml_system.supply.reset_counter 'The reset was not passed to all components of the SCMLSystem'<assert_stmt>scml_system._ode_solver.t<eq>0 'The ode solver was not reset correctly'<assert_stmt>all(scml_system._ode_solver.y<eq>np.zeros_like(scml_system.mechanical_load.state_names+scml_system.electrical_motor.CURRENTS dtype=float)) ' The ode solver was not reset correctly'<block_end><def_stmt>test_system_equation self scml_system<block_start>"""Tests the system equation function"""<line_sep>state=np.random.rand(4)<line_sep>currents=state[[2 3]]<line_sep>torque=scml_system.electrical_motor.torque(currents)<line_sep>u_in=np.random.rand(2)<line_sep>t=np.random.rand()<line_sep>derivative=scml_system._system_equation(t state u_in)<assert_stmt>all(derivative<eq>np.array([torque -torque currents[0]-u_in[0] currents[1]-u_in[1]])) 'The system equation return differs from the expected'<assert_stmt>scml_system.mechanical_load.t<eq>t 'The time t was not passed through to the mech. load equation'<assert_stmt>np.all(scml_system.mechanical_load.mechanical_state<eq>state[:2]) 'The mech. state was not returned correctly'<block_end><def_stmt>test_simulate self scml_system<block_start>"""Test the simulation function of the SCMLSystem"""<line_sep># Reset the system and take a random action
scml_system.reset()<line_sep>action=scml_system.action_space.sample()<line_sep># Set a defined intitial state
ode_state=np.array([3 4 5 6])<line_sep>scml_system._ode_solver.set_initial_value(ode_state)<line_sep># Perform the action on the system
next_state=scml_system.simulate(action)<line_sep>solver_state_me=scml_system._ode_solver.y[:len(DummyLoad.state_names)]<line_sep>solver_state_el=scml_system._ode_solver.y[len(DummyLoad.state_names):]<line_sep>torque=[scml_system.electrical_motor.torque(solver_state_el)]<line_sep>u_sup=[scml_system.supply.u_nominal]<line_sep>u_in=[u<times>u_sup[0]<for>u scml_system.converter.u_in]<line_sep># Calculate the next state
desired_next_state=(np.concatenate((solver_state_me torque solver_state_el u_in u_sup))+scml_system._noise_generator.noise())/scml_system.limits<line_sep># Assertions for correct simulation
<assert_stmt>all(desired_next_state<eq>next_state) 'The calculated next state differs from the expected one'<assert_stmt>scml_system.converter.action<eq>action 'The action was not passed correctly to the converter'<assert_stmt>scml_system.converter.action_set_time<eq>0 'The action start time was passed incorrect to the converter'<assert_stmt>scml_system.converter.last_i_out<eq>scml_system.electrical_motor.i_in(scml_system._ode_solver.last_y[2:])<block_end><def_stmt>test_system_jacobian self scml_system<block_start>"""Tests for the system jacobian function"""<line_sep>el_jac=np.arange(4).reshape(2 2)<line_sep>el_over_omega=np.arange(4 6)<line_sep>torque_over_el=np.arange(6 8)<line_sep># Set the el. jacobian returns to specified values
scml_system.electrical_motor.electrical_jac_return=(el_jac el_over_omega torque_over_el)<line_sep>me_jac=np.arange(8 12).reshape(2 2)<line_sep>me_over_torque=np.arange(12 14)<line_sep># Set the mech. jabobian returns to specified values
scml_system.mechanical_load.mechanical_jac_return=me_jac me_over_torque<line_sep>sys_jac=scml_system._system_jacobian(0 np.array([0 1 2 3]) [0 -1])<line_sep>#
<assert_stmt>np.all(sys_jac[-2: -2:]<eq>el_jac) 'The el. jacobian is false'<assert_stmt>np.all(sys_jac[:2 :2]<eq>me_jac) 'The mech. jacobian is false'<assert_stmt>np.all(sys_jac[2: 0]<eq>el_over_omega) 'the derivative of the el.state over omega is false'<assert_stmt>np.all(sys_jac[2: 1]<eq>np.zeros(2))<assert_stmt>np.all(sys_jac[:-2 2:]<eq>np.array([[72 84] [78 91]])) 'The derivative of the mech.state '<concat>'over the currents is false'<block_end><block_end> |
<import_stmt>os.path<as>op<import_stmt>random<import_stmt>time<import_from_stmt>keras.callbacks TensorBoard ModelCheckpoint LearningRateScheduler<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow_addons.callbacks TQDMProgressBar<import_from_stmt>fastmri_recon.data.sequences.oasis_sequences Masked2DSequence KIKISequence<import_from_stmt>fastmri_recon.models.functional_models.kiki_sep kiki_sep_net<import_from_stmt>fastmri_recon.models.utils.data_consistency MultiplyScalar<import_from_stmt>fastmri_recon.models.utils.non_linearities lrelu<line_sep>random.seed(0)<line_sep># paths
train_path='/media/Zaccharie/UHRes/OASIS_data/'<line_sep>n_train=1000<line_sep>n_val=200<line_sep># generators
AF=4<line_sep>train_gen_last=Masked2DSequence(train_path af=AF inner_slices=32 rand=<true> scale_factor=1e-2 seed=0 val_split=0.1)<line_sep>val_gen_last=train_gen_last.val_sequence<line_sep>train_gen_last.filenames=random.sample(train_gen_last.filenames n_train)<line_sep>val_gen_last.filenames=random.sample(val_gen_last.filenames n_val)<line_sep>random.seed(0)<line_sep>train_gen_i=KIKISequence(train_path af=AF inner_slices=32 rand=<true> scale_factor=1e-2 space='I' seed=0 val_split=0.1)<line_sep>val_gen_i=train_gen_i.val_sequence<line_sep>train_gen_i.filenames=random.sample(train_gen_i.filenames n_train)<line_sep>val_gen_i.filenames=random.sample(val_gen_i.filenames n_val)<line_sep>random.seed(0)<line_sep>train_gen_k=KIKISequence(train_path af=AF inner_slices=32 rand=<true> scale_factor=1e-2 space='K' seed=0 val_split=0.1)<line_sep>val_gen_k=train_gen_k.val_sequence<line_sep>train_gen_k.filenames=random.sample(train_gen_k.filenames n_train)<line_sep>val_gen_k.filenames=random.sample(val_gen_k.filenames n_val)<line_sep>random.seed(0)<line_sep>run_params={'n_convs':16 'n_filters':48 'noiseless':<true> 'lr':1e-3 'activation':lrelu 'input_size':(<none> <none> 1) }<line_sep>multiply_scalar=MultiplyScalar()<line_sep>n_epochs=50<def_stmt>learning_rate_from_epoch epoch<block_start><return>10<power>(-(epoch<floordiv>(n_epochs/3))-3)<block_end><def_stmt>train_model model space='K' n=1<block_start>print(model.summary(line_length=150))<line_sep>run_id=f'kikinet_sep_{space}{n}_af{AF}_oasis_{int(time.time())}'<line_sep>chkpt_path=f'checkpoints/{run_id}'+'-{epoch:02d}.hdf5'<line_sep>print(run_id)<line_sep>chkpt_cback=ModelCheckpoint(chkpt_path period=n_epochs<floordiv>2)<line_sep>log_dir=op.join('logs' run_id)<line_sep>tboard_cback=TensorBoard(profile_batch=0 log_dir=log_dir histogram_freq=0 write_graph=<true> write_images=<false> )<line_sep>lrate_cback=LearningRateScheduler(learning_rate_from_epoch)<line_sep>tqdm_cb=TQDMProgressBar()<if_stmt>space<eq>'K'<block_start>train_gen=train_gen_k<line_sep>val_gen=val_gen_k<block_end><elif_stmt>space<eq>'I'<block_start><if_stmt>n<eq>2<block_start>train_gen=train_gen_last<line_sep>val_gen=val_gen_last<block_end><elif_stmt>n<eq>1<block_start>train_gen=train_gen_i<line_sep>val_gen=val_gen_i<block_end><block_end>model.fit_generator(train_gen steps_per_epoch=n_train epochs=n_epochs validation_data=val_gen validation_steps=1 verbose=0 callbacks=[tqdm_cb tboard_cback chkpt_cback lrate_cback ] # max_queue_size=35,
use_multiprocessing=<true> workers=35 shuffle=<true> )<line_sep><return>model<block_end># first K net training
model=kiki_sep_net(<none> multiply_scalar to_add='K' last=<false> **run_params)<line_sep>train_model(model space='K' n=1)<line_sep>model=kiki_sep_net(model multiply_scalar to_add='I' last=<false> **run_params)<line_sep>train_model(model space='I' n=1)<line_sep>model=kiki_sep_net(model multiply_scalar to_add='K' last=<false> **run_params)<line_sep>train_model(model space='K' n=2)<line_sep>model=kiki_sep_net(model multiply_scalar to_add='I' last=<true> fastmri=<false> **run_params)<line_sep>train_model(model space='I' n=2)<line_sep> |
#
# Copyright (c) 2015 Autodesk Inc.
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_stmt>json<import_stmt>logging<import_stmt>ochopod<import_stmt>pykka<import_stmt>time<import_stmt>uuid<import_from_stmt>flask Flask request<import_from_stmt>kazoo.exceptions ConnectionClosedError NodeExistsError<import_from_stmt>kazoo.client KazooClient KazooState<import_from_stmt>kazoo.recipe.lock LockTimeout<import_from_stmt>ochopod.core.fsm shutdown spin_lock Aborted FSM<import_from_stmt>pykka ThreadingFuture Timeout<import_from_stmt>threading Event<line_sep>#: Our ochopod logger
logger=logging.getLogger('ochopod')<line_sep>#: Root zookeeper node path (under which we store the pod data for each cluster). This path will prefix any node
#: we read or write (including the lock).
ROOT='/ochopod/clusters'<line_sep>#: We use the same tick for all our state-machines (namely one second). This quantity can be scaled up or
#: down depending on the actor
SAMPLING=1.0<class_stmt>ZK(FSM)<block_start>"""
Base layer dealing with zookeeper and in charge of writing the pod ephemeral node upon connection. The
reset() state will by default loop back to initial() and properly de-allocate the kazoo driver. Once connected
the machine will spin() until we raise something.
Please note we support an explicit reset request which will trip the machine. This is used from the CLI to
force a pod to completely disconnect/reconnect/reconfigure.
"""<def_stmt>__init__ self brokers scope tag breadcrumbs hints<block_start>super(ZK self).__init__()<line_sep>self.breadcrumbs=breadcrumbs<line_sep>self.connected=0<line_sep>self.brokers=brokers<line_sep>self.force_reset=0<line_sep>self.hints=hints<line_sep>self.hints['state']='follower'<line_sep>self.id=uuid.uuid4()<line_sep>self.prefix='%s/%s.%s'%(ROOT scope tag)<line_sep>self.scope=scope<line_sep>self.seq=<none><line_sep>self.tag=tag<block_end><def_stmt>feedback self state#
# - forward the state change to the actor via a message
# - the specialized() hook will process this safely
#
<block_start>self.actor_ref.tell({'request':'state change' 'state':state})<block_end><def_stmt>reset self data<block_start>self.connected=0<line_sep>self.force_reset=0<line_sep>self.hints['state']='follower'<line_sep>logger.warning('%s : actor reset (%s)'%(self.path data.cause))<if_stmt>hasattr(data 'zk')#
# - gracefully shut our client down
#
<block_start>data.zk.stop()<line_sep>logger.debug('%s : zk client stopped, releasing resources'%self.path)<line_sep>data.zk.close()<block_end><if_stmt>self.terminate<block_start>super(ZK self).reset(data)<block_end><return>'initial' data 0<block_end><def_stmt>initial self data#
# - setup a new kazoo client
#
<block_start>cnx_string=','.join(self.brokers)<line_sep>logger.debug('%s : connecting @ %s'%(self.path cnx_string))<line_sep>data.zk=KazooClient(hosts=cnx_string timeout=5.0 read_only=0 randomize_hosts=1)<line_sep>data.zk.add_listener(self.feedback)<line_sep>data.zk.start()<line_sep>data.n=0<line_sep><return>'wait_for_cnx' data 0<block_end><def_stmt>wait_for_cnx self data<block_start><if_stmt>self.force_reset<or>self.terminate<block_start><raise>Aborted('resetting')<block_end>#
# - loop back if we haven't received a CONNECTED event from the driver
#
<if_stmt><not>self.connected<block_start><return>'wait_for_cnx' data SAMPLING<block_end>#
# - the /pods node holds all our ephemeral per-container data (one container == one child node)
# - the /hash node stores the last recorded md5 hash (local pods + dependencies), which we use to
# flag any change amongst the pods or their dependencies
#
data.zk.ensure_path('%s/pods'%self.prefix)<line_sep>data.zk.ensure_path('%s/hash'%self.prefix)<try_stmt>#
# - register ourselves by creating an ephemeral
# - this is where we can store arbitrary information (e.g our breadcrumbs)
# - we ask for a sequence counter as well which we then keep (e.g in case of connection loss or reset
# we guarantee the pod won't get assigned a new index)
# - this is *critical* for some use-cases (e.g Kafka where the broker index must remain the same)
#
<block_start>path=data.zk.create('%s/pods/%s.'%(self.prefix self.id) ephemeral=<true> sequence=<true>)<line_sep>tokens=path.split('.')<if_stmt>self.seq<is><none><block_start>self.seq=int(tokens[-1])<block_end>self.breadcrumbs['seq']=self.seq<line_sep>js=json.dumps(self.breadcrumbs)<line_sep>data.zk.set(path js)<block_end><except_stmt>NodeExistsError#
# - if the node is already there we just recovered from a zookeeper connection loss
# and /snapshot has not been phased out yet .. this is not an issue, simply pause a bit
# to re-attempt later
#
<block_start>logger.debug('%s : pod %s is already there (probably a zk reconnect)'%(self.path self.id))<line_sep><return>'wait_for_cnx' data 5.0<times>SAMPLING<block_end>logger.debug('%s : registered as %s (#%d)'%(self.path self.id self.seq))<line_sep>data.connected_at=time.time()<line_sep><return>'spin' data 0<block_end><def_stmt>spin self data<block_start><raise>NotImplementedError<block_end><def_stmt>specialized self msg<block_start><assert_stmt>'request'<in>msg 'bogus message received ?'<line_sep>req=msg['request']<if_stmt>req<eq>'state change'#
# - we got a zk state change
# - we only use the switch to CONNECTED to go from wait_for_cnx() to spin()
# - ZK disconnects (LOST or SUSPENDED) are simply flagged when exceptions are raised
#
<block_start>state=msg['state']<line_sep>current='connected'<if>self.connected<else>'disconnected'<line_sep>logger.debug('%s : zk state change -> "%s" (%s)'%(self.path str(state) current))<if_stmt>self.connected<and>state<ne>KazooState.CONNECTED<block_start>logger.warning('%s : lost connection (%s) / forcing a reset'%(self.path str(state)))<line_sep>self.force_reset=1<line_sep>self.connected=0<block_end><elif_stmt>state<eq>KazooState.CONNECTED<block_start>self.connected=1<block_end><block_end><elif_stmt>req<eq>'reset'#
# - we got a request to explicitly force a reset
# - this is typically invoked from the CLI
#
<block_start>self.force_reset=1<block_end><else_stmt><block_start>super(ZK self).specialized(msg)<block_end><block_end><block_end><class_stmt>Coordinator(ZK)<block_start>"""
Leader lock implementation logic, based on :class:`ZK`. The spin() state will attempt to grab a lock (we
simply use the Kazoo recipe). If we obtain the lock we boot the controller actor (e.g the clustering model)
and then stay there by spin-locking on its latch. If the controller goes down for any reason (typically a
zookeeper error or a shutdown request) we'll reset (and disconnect from zookeeper).
"""<def_stmt>__init__ self brokers scope tag port breadcrumbs model hints<block_start>super(Coordinator self).__init__(brokers scope tag breadcrumbs hints)<line_sep>self.model=model<line_sep>self.path='coordinator'<line_sep>self.port=port<block_end><def_stmt>reset self data<block_start><if_stmt>hasattr(data 'controller')#
# - don't forget to nuke our controller before resetting
#
<block_start>shutdown(data.controller)<block_end><if_stmt>hasattr(data 'lock')#
# - make sure to remove the lock attribute
# - it's useless to release the lock as we'll release the client altogether
#
<block_start>delattr(data 'lock')<block_end><return>super(Coordinator self).reset(data)<block_end><def_stmt>spin self data#
# - if the termination trigger is set, abort immediately
#
<block_start><if_stmt>self.force_reset<or>self.terminate<block_start><raise>Aborted('resetting')<block_end>#
# - attempt to fetch the lock
# - allocate it if not already done
# - it is *important* to just allocate one lock as there is a leak in kazoo
#
<if_stmt><not>hasattr(data 'lock')<block_start>data.lock=data.zk.Lock('%s/coordinator'%self.prefix)<block_end><try_stmt>#
# - attempt to lock within a 5 seconds timeout to avoid stalling in some cases
#
<block_start><if_stmt>data.lock.acquire(timeout=5.0<times>SAMPLING)<block_start><return>'start_controller' data 0<block_end><block_end><except_stmt>LockTimeout<block_start><pass><block_end><return>'spin' data 0<block_end><def_stmt>start_controller self data#
# - if the termination trigger is set, abort immediately
# - this is important as it is possible to somehow get the lock after a suspend (acquire() returns
# true in that case which is misleading)
#
<block_start><if_stmt>self.force_reset<or>self.terminate<block_start><raise>Aborted('resetting')<block_end>#
# - we have the lock (e.g we are the leader)
# - start the controller actor
#
data.latch=ThreadingFuture()<line_sep>logger.debug('%s : lock acquired @ %s, now leading'%(self.path self.prefix))<line_sep>data.controller=self.model.start(data.zk self.id self.hints self.scope self.tag self.port data.latch)<line_sep><return>'lock' data 0<block_end><def_stmt>lock self data#
# - if the termination trigger is set, abort immediately
#
<block_start><if_stmt>self.force_reset<or>self.terminate<block_start><raise>Aborted('resetting')<block_end>#
# - spin-lock on the controller latch
# - any catastrophic plug failure will be trapped that way
#
<try_stmt><block_start>Event()<line_sep>out=data.latch.get(SAMPLING)<if_stmt>isinstance(out Exception)<block_start><raise>out<block_end><block_end><except_stmt>Timeout<block_start><pass><block_end><return>'lock' data 0<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>observations.util maybe_download_and_extract<def_stmt>bmt path<block_start>"""data from Section 1.3
The `bmt` data frame has 137 rows and 22 columns.
This data frame contains the following columns:
group
Disease Group 1-ALL, 2-AML Low Risk, 3-AML High Risk
t1
Time To Death Or On Study Time
t2
Disease Free Survival Time (Time To Relapse, Death Or End Of Study)
d1
Death Indicator 1-Dead 0-Alive
d2
Relapse Indicator 1-Relapsed, 0-Disease Free
d3
Disease Free Survival Indicator 1-Dead Or Relapsed, 0-Alive Disease
Free)
ta
Time To Acute Graft-Versus-Host Disease
da
Acute GVHD Indicator 1-Developed Acute GVHD 0-Never Developed Acute
GVHD)
tc
Time To Chronic Graft-Versus-Host Disease
dc
Chronic GVHD Indicator 1-Developed Chronic GVHD 0-Never Developed
Chronic GVHD
tp
Time To Chronic Graft-Versus-Host Disease
dp
Platelet Recovery Indicator 1-Platelets Returned To Normal,
0-Platelets Never Returned to Normal
z1
Patient Age In Years
z2
Donor Age In Years
z3
Patient Sex: 1-Male, 0-Female
z4
Donor Sex: 1-Male, 0-Female
z5
Patient CMV Status: 1-CMV Positive, 0-CMV Negative
z6
Donor CMV Status: 1-CMV Positive, 0-CMV Negative
z7
Waiting Time to Transplant In Days
z8
FAB: 1-FAB Grade 4 Or 5 and AML, 0-Otherwise
z9
Hospital: 1-The Ohio State University, 2-Alferd , 3-St. Vincent,
4-Hahnemann
z10
MTX Used as a Graft-Versus-Host- Prophylactic: 1-Yes 0-No
Klein and Moeschberger (1997) *Survival Analysis Techniques for Censored
and truncated data*, Springer.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `bmt.csv`.
Returns:
Tuple of np.ndarray `x_train` with 137 rows and 22 columns and
dictionary `metadata` of column headers (feature names).
"""<import_stmt>pandas<as>pd<line_sep>path=os.path.expanduser(path)<line_sep>filename='bmt.csv'<if_stmt><not>os.path.exists(os.path.join(path filename))<block_start>url='http://dustintran.com/data/r/KMsurv/bmt.csv'<line_sep>maybe_download_and_extract(path url save_file_name='bmt.csv' resume=<false>)<block_end>data=pd.read_csv(os.path.join(path filename) index_col=0 parse_dates=<true>)<line_sep>x_train=data.values<line_sep>metadata={'columns':data.columns}<line_sep><return>x_train metadata<block_end> |
<import_from_stmt>typing List Optional<import_from_stmt>pydantic BaseModel<import_from_stmt>.music MusicTrack<import_from_stmt>.request BaseResponseData CursorOffsetRequestParams CursorOffsetResponseParams ListRequestParams ListResponseData <import_from_stmt>.user CommonUserDetails<import_from_stmt>.video Video<class_stmt>PostStatistics(BaseModel)# The ID of the post
<block_start>aweme_id:str<line_sep># The number of comments on the post
comment_count:int<line_sep># The number of times the post has been liked
digg_count:int<line_sep># The number of times the post has been forwarded (looks unused?)
forward_count:Optional[int]<line_sep># The number of times the post has been viewed - doesn't appear to be public, so always 0
play_count:int<line_sep># The number of times the post has been shared
share_count:int<block_end><class_stmt>PostStatus(BaseModel)# True if the post allows comments
<block_start>allow_comment:bool<line_sep># True if the post allows sharing
allow_share:bool<line_sep># 0 if the post can be downloaded
download_status:int<line_sep># True if the post is currently being reviewed
in_reviewing:Optional[bool]<line_sep># True if the post has been deleted
is_delete:bool<line_sep># True if the post is private
is_private:bool<line_sep># True if the post contains content that is not allowed on the platform
is_prohibited:Optional[bool]<line_sep># 0 if the post is public
private_status:Optional[int]<line_sep># 1 if the post has been reviewed
reviewed:Optional[int]<block_end><class_stmt>PostTags(BaseModel)# 0 if the tag is for a user; 1 if the tag is for a hashtag
<block_start>type:int<line_sep># The name of the hashtag
hashtag_name:Optional[str]<line_sep># The ID of the tagged user
user_id:Optional[str]<block_end><class_stmt>RiskInfo(BaseModel)# The text shown if the post has been flagged
<block_start>content:str<line_sep># ???
risk_sink:bool=<false><line_sep># The risk type associated with the post - 0 if no risk; 1 if low; 2 if high
type:int<line_sep># ??? - only present if the post has been flagged
vote:Optional[bool]<line_sep># True if a warning should be shown to the user
warn:bool<block_end><class_stmt>ShareInfo(BaseModel)# ???
<block_start>bool_persist:Optional[int]<line_sep># The description used when sharing (if set)
share_desc:str<line_sep># The description used when sharing a link only (if set)
share_link_desc:Optional[str]<line_sep># The quote used when sharing (if set)
share_quote:Optional[str]<line_sep># The signature used when sharing (if set)
share_signature_desc:Optional[str]<line_sep># The signature URL used when sharing (if set)
share_signature_url:Optional[str]<line_sep># The title used when sharing
share_title:str<line_sep># The link to share
share_url:str<line_sep># The description used when sharing on Weibo
share_weibo_desc:str<block_end><class_stmt>StickerInfo(BaseModel)# The ID of the sticker, e.g. 22094
<block_start>id:str<line_sep># The display name of the sticker, e.g. Long Face
name:str<block_end><class_stmt>Post(BaseModel)# Details about the author
<block_start>author:Optional[CommonUserDetails]<line_sep># The ID of the author
author_user_id:str<line_sep># The ID of the post
aweme_id:str<line_sep># The type of post - 0 for a musical.ly
aweme_type:int<line_sep># The timestamp in seconds when the post was created
create_time:int<line_sep># A description of the post
desc:str<line_sep># Details about the music used in the post
music:Optional[MusicTrack]<line_sep># True if the end user should not be provided the option to download the video
prevent_download:Optional[bool]<line_sep># An age rating for the post, e.g. 12
rate:int<line_sep># The 2-letter region the post was created in, e.g. US
region:str<line_sep># Risk information about the post
risk_infos:Optional[RiskInfo]<line_sep># Information used when sharing the post
share_info:Optional[ShareInfo]<line_sep># A link to the video on the musical.ly website that is used when sharing
share_url:str<line_sep># Statistics about the post
statistics:PostStatistics<line_sep># Status information about the post
status:PostStatus<line_sep># Information about the sticker used in the post
sticker_detail:Optional[StickerInfo]<line_sep># The ID of the sticker used in the post (looks to be deprecated by sticker_detail)
stickers:Optional[str]<line_sep># Tagged users and hashtags used in the description
text_extra:List[PostTags]<line_sep># 1 if the logged in user has liked this post
user_digged:int<line_sep># Details about the video in the post
video:Video<line_sep>@property<def_stmt>video_url self<block_start>url=filter(<lambda>url:"watermark"<in>url self.video.download_addr.url_list)<line_sep><return>next(url)<block_end>@property<def_stmt>video_url_without_watermark self<block_start><return>self.video_url.replace("watermark=1" "watermark=0")<block_end><block_end><class_stmt>GetPostResponse(BaseResponseData)<block_start>aweme_detail:Post<block_end><class_stmt>ListPostsRequest(ListRequestParams CursorOffsetRequestParams)# The id of the user whose posts to retrieve
<block_start>user_id:str<block_end><class_stmt>ListPostsResponse(ListResponseData CursorOffsetResponseParams)<block_start>aweme_list:List[Post]<block_end> |
<import_from_stmt>..transitions Transition<import_from_stmt>..suffixes *<line_sep>__all__=["State"]<class_stmt>State(object)<block_start><def_stmt>__init__ self initialState finalState *suffixes<block_start>self.initialState=initialState<line_sep>self.finalState=finalState<if_stmt>suffixes<is><none><block_start>self.suffixes=()<block_end><else_stmt><block_start>self.suffixes=suffixes<block_end><block_end><def_stmt>AddTransitions self word transitions marked<block_start><for_stmt>suffix self.suffixes<block_start><if_stmt>suffix.Match(word)<block_start>transitions.append(Transition(self self.NextState(suffix) word suffix marked))<block_end><block_end><block_end><def_stmt>NextState self suffix<block_start><raise>NotImplementedError("Feature is not implemented.")<block_end><block_end> |
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for PixelCNN Modules."""<import_stmt>pixelcnn<import_from_stmt>flax linen<as>nn<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing parameterized<import_stmt>numpy.testing<as>np_testing<import_from_stmt>jax random<import_stmt>jax.numpy<as>np<import_from_stmt>jax.config config<class_stmt>ModelTest(absltest.TestCase)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self.rng=random.PRNGKey(0)<line_sep>self.x=np.arange(24).reshape(1 4 3 2)<block_end><def_stmt>get_weightnorm self params<block_start><return>[params[k]<for>k ('direction' 'scale' 'bias')]<block_end><def_stmt>assert_mean_and_variance self out# Weightnorm should ensure that, at initialization time, the outputs of the
# module have mean 0 and variance 1 over the non-feature dimensions.
<block_start>np_testing.assert_allclose(np.mean(out (0 1 2)) 0. atol=1e-5)<line_sep>np_testing.assert_allclose(np.var(out (0 1 2)) 1. atol=1e-5)<block_end><def_stmt>test_conv self<block_start>model=pixelcnn.ConvWeightNorm(features=4 kernel_size=(3 2))<line_sep>out,variables=model.init_with_output(self.rng self.x)<line_sep>params=variables['params']['weightnorm_params']<line_sep>direction,scale,bias=self.get_weightnorm(params)<line_sep>self.assertEqual(direction.shape (3 2 2 4))<line_sep>self.assertEqual(scale.shape (4 ))<line_sep>self.assertEqual(bias.shape (4 ))<line_sep>self.assertEqual(out.shape (1 2 2 4))<line_sep>self.assert_mean_and_variance(out)<block_end><def_stmt>test_conv_down self<block_start>model=pixelcnn.ConvDown(features=4)<line_sep>out,variables=model.init_with_output(self.rng self.x)<line_sep>params=variables['params']['ConvWeightNorm_0']['weightnorm_params']<line_sep>direction,scale,bias=self.get_weightnorm(params)<line_sep>self.assertEqual(direction.shape (2 3 2 4))<line_sep>self.assertEqual(scale.shape (4 ))<line_sep>self.assertEqual(bias.shape (4 ))<line_sep>self.assertEqual(out.shape (1 4 3 4))<line_sep>self.assert_mean_and_variance(out)<block_end><def_stmt>test_conv_down_right self<block_start>model=pixelcnn.ConvDownRight(features=4)<line_sep>out,variables=model.init_with_output(self.rng self.x)<line_sep>params=variables['params']['ConvWeightNorm_0']['weightnorm_params']<line_sep>direction,scale,bias=self.get_weightnorm(params)<line_sep>self.assertEqual(direction.shape (2 2 2 4))<line_sep>self.assertEqual(scale.shape (4 ))<line_sep>self.assertEqual(bias.shape (4 ))<line_sep>self.assertEqual(out.shape (1 4 3 4))<line_sep>self.assert_mean_and_variance(out)<block_end><def_stmt>test_conv_transpose self<block_start>model=pixelcnn.ConvTranspose(features=4 kernel_size=(3 2))<line_sep>out,variables=model.init_with_output(self.rng self.x)<line_sep>params=variables['params']['weightnorm_params']<line_sep>direction,scale,bias=self.get_weightnorm(params)<line_sep>self.assertEqual(direction.shape (3 2 2 4))<line_sep>self.assertEqual(scale.shape (4 ))<line_sep>self.assertEqual(bias.shape (4 ))<line_sep>self.assertEqual(out.shape (1 6 4 4))<line_sep>self.assert_mean_and_variance(out)<block_end><def_stmt>test_conv_transpose_down self<block_start>model=pixelcnn.ConvTransposeDown(features=4)<line_sep>out,variables=model.init_with_output(self.rng self.x)<line_sep>params=variables['params']["ConvWeightNorm_0"]["weightnorm_params"]<line_sep>direction,scale,bias=self.get_weightnorm(params)<line_sep>self.assertEqual(direction.shape (2 3 2 4))<line_sep>self.assertEqual(scale.shape (4 ))<line_sep>self.assertEqual(bias.shape (4 ))<line_sep>self.assertEqual(out.shape (1 8 6 4))<block_end><def_stmt>test_conv_transpose_down_right self<block_start>model=pixelcnn.ConvTransposeDownRight(features=4)<line_sep>out,variables=model.init_with_output(self.rng self.x)<line_sep>params=variables['params']['ConvWeightNorm_0']['weightnorm_params']<line_sep>direction,scale,bias=self.get_weightnorm(params)<line_sep>self.assertEqual(direction.shape (2 2 2 4))<line_sep>self.assertEqual(scale.shape (4 ))<line_sep>self.assertEqual(bias.shape (4 ))<line_sep>self.assertEqual(out.shape (1 8 6 4))<block_end><def_stmt>test_pcnn_shape self<block_start>x=random.normal(self.rng (2 4 4 3))<line_sep>model=pixelcnn.PixelCNNPP(depth=0 features=2 dropout_p=0)<line_sep>out,_=model.init_with_output(self.rng x train=<false>)<line_sep>self.assertEqual(out.shape (2 4 4 100))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end> |
<import_from_stmt>copy copy<import_from_stmt>django.forms formsets<import_from_stmt>django.contrib messages<import_from_stmt>django.db.models Q<import_from_stmt>django.forms.formsets formset_factory BaseFormSet all_valid<import_from_stmt>detail *<import_from_stmt>edit *<class_stmt>SearchFormViewMixin(BaseFormView)<block_start>ignore_get_keys=("page" )# TODO this should be ignored in search form?
<def_stmt>get_form_kwargs self<block_start>"""Returns the keyword arguments for instantiating the form."""<line_sep>req=self.request<line_sep>kwargs=dict(initial=self.get_initial())<if_stmt>req.method<in>("POST" "PUT")<block_start>kwargs.update(dict(data=req.POST files=req.FILES))<block_end><elif_stmt>req.GET# do get form processing if there's get data that's not in ignore list
<block_start>get=dict((k v)<for>k,v req.GET.items()<if>k<not><in>self.ignore_get_keys)<if_stmt>get<block_start>kwargs=dict(kwargs initial=get data=get)<block_end><block_end><return>kwargs<block_end><def_stmt>form_get self request<block_start>form=self.get_form()<line_sep>context=self.get_context_data(form=form)<if_stmt>self.request.GET<block_start><if_stmt>form.is_valid()<block_start>context.update(self.form_valid(form))<block_end><else_stmt><block_start>context.update(self.form_invalid(form))<block_end><block_end><return>context<block_end><block_end><class_stmt>SearchFormView(FormView SearchFormViewMixin)<block_start>"""FormView for search pages."""<block_end><class_stmt>OwnObjMixin(SingleObjectMixin)<block_start>"""Access object, checking that it belongs to current user."""<line_sep>item_name=<none># used in permissions error message
owner_field="creator"# object's field to compare to current user to check permission
<def_stmt>permission_error self<block_start>name=self.item_name<or>self.object.__class__.__name__<line_sep><return>HttpResponse("You don't have permissions to access this %s."%name)<block_end><def_stmt>validate self obj<block_start><if_stmt>getattr(obj self.owner_field)<eq>self.request.user<block_start><return><true><block_end><block_end><def_stmt>get_object self queryset=<none><block_start>obj=super(OwnObjMixin self).get_object(queryset)<line_sep><return>obj<if>self.validate(obj)<else><none><block_end><block_end><class_stmt>DeleteOwnObjView(OwnObjMixin DeleteView)<block_start>"""Delete object, checking that it belongs to current user."""<block_end><class_stmt>UpdateOwnObjView(OwnObjMixin UpdateView)<block_start>"""Update object, checking that it belongs to current user."""<block_end><class_stmt>UpdateRelatedView(DetailView UpdateView)<block_start>"""Update object related to detail object; create if does not exist."""<line_sep>detail_model=<none><line_sep>form_model=<none><line_sep>fk_attr=<none><line_sep>related_name=<none><def_stmt>get_modelform_object self queryset=<none><block_start>""" Get related object: detail_model.<related_name>
If does not exist, create: form_model.<fk_attr>
"""<line_sep>obj=self.get_detail_object()<line_sep>kwargs={self.fk_attr:obj}<try_stmt><block_start>related_obj=getattr(obj self.related_name)<block_end><except_stmt>self.form_model.DoesNotExist<block_start>related_obj=self.form_model.obj.create(**kwargs)<line_sep>setattr(obj self.related_name related_obj)<block_end><return>related_obj<block_end><block_end><class_stmt>SearchEditFormset(SearchFormView)<block_start>"""Search form filtering a formset of items to be updated."""<line_sep>model=<none><line_sep>formset_class=<none><line_sep>form_class=<none><def_stmt>get_form_class self<block_start><if_stmt>self.request.method<eq>"GET"<block_start><return>self.form_class<block_end><else_stmt><block_start><return>self.formset_class<block_end><block_end><def_stmt>get_queryset self form=<none><block_start><return>self.model.objects.filter(self.get_query(form))<block_end><def_stmt>get_query self form<block_start>"""This method should always be overridden, applying search from the `form`."""<line_sep><return>Q()<block_end><def_stmt>form_valid self form<block_start>formset=<none><if_stmt>self.request.method<eq>"GET"<block_start>formset=self.formset_class(queryset=self.get_queryset(form))<block_end><else_stmt><block_start>form.save()<line_sep>messages.success(self.request "%s(s) were updated successfully"%self.model.__name__.capitalize())<line_sep>formset=form<line_sep>form=self.form_class(self.request.GET)<block_end><return>self.render_to_response(self.get_context_data(form=form formset=formset))<block_end><def_stmt>form_invalid self form<block_start>formset=form<line_sep>form=self.form_class(self.request.GET)<line_sep><return>self.render_to_response(self.get_context_data(form=form formset=formset))<block_end><def_stmt>get self request *args **kwargs<block_start>form=self.get_form()<if_stmt>form.is_bound<block_start><if_stmt>form.is_valid()<block_start><return>self.form_valid(form)<block_end><else_stmt><block_start><return>self.form_invalid(form)<block_end><block_end><return>self.render_to_response(self.get_context_data(form=form))<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_from_stmt>benedict.dicts.parse parse_util<import_stmt>unittest<class_stmt>parse_util_test_case(unittest.TestCase)<block_start><def_stmt>test_parse_bool self<block_start>f=parse_util.parse_bool<line_sep>self.assertTrue(f(1))<line_sep>self.assertTrue(f(<true>))<line_sep>self.assertTrue(f('1'))<line_sep>self.assertTrue(f('True'))<line_sep>self.assertTrue(f('Yes'))<line_sep>self.assertTrue(f('Ok'))<line_sep>self.assertTrue(f('On'))<line_sep>self.assertFalse(f(<none>))<line_sep>self.assertFalse(f(0))<line_sep>self.assertFalse(f(<false>))<line_sep>self.assertFalse(f('0'))<line_sep>self.assertFalse(f('False'))<line_sep>self.assertFalse(f('No'))<line_sep>self.assertFalse(f('Ko'))<line_sep>self.assertFalse(f('Off'))<block_end><def_stmt>test_parse_date self# TODO
<block_start><pass><block_end><def_stmt>test_parse_datetime self# TODO
<block_start><pass><block_end><def_stmt>test_parse_decimal self# TODO
<block_start><pass><block_end><def_stmt>test_parse_dict self# TODO
<block_start><pass><block_end><def_stmt>test_parse_float self# TODO
<block_start><pass><block_end><def_stmt>test_parse_email self# TODO
<block_start><pass><block_end><def_stmt>test_parse_int self# TODO
<block_start><pass><block_end><def_stmt>test_parse_list self<block_start>f=<lambda>value:parse_util.parse_list(value separator=',')<line_sep>self.assertEqual(f(['0' '1' '2' 'Hello World']) ['0' '1' '2' 'Hello World'])<line_sep>self.assertEqual(f('0,1,2') ['0' '1' '2'])<line_sep>self.assertEqual(f('0') ['0'])<line_sep>self.assertEqual(f('1') ['1'])<line_sep>self.assertEqual(f('') <none>)<line_sep>self.assertEqual(f(<none>) <none>)<block_end><def_stmt>test_parse_list_with_valid_json self<block_start>f=<lambda>value:parse_util.parse_list(value separator=<none>)<line_sep>self.assertEqual(f('[0,1,2,3]') [0 1 2 3])<block_end><def_stmt>test_parse_list_with_invalid_json_with_separator self<block_start>f=<lambda>value:parse_util.parse_list(value separator=',')<line_sep>self.assertEqual(f('[a,b,c]') ['[a' 'b' 'c]'])<block_end><def_stmt>test_parse_list_with_invalid_json_without_separator self<block_start>f=<lambda>value:parse_util.parse_list(value separator=<none>)<line_sep>self.assertEqual(f('[a,b,c]') <none>)<block_end><def_stmt>test_parse_phonenumber self# TODO
<block_start><pass><block_end><def_stmt>test_parse_slug self# TODO
<block_start><pass><block_end><def_stmt>test_parse_str self# TODO
<block_start><pass><block_end><def_stmt>test_parse_uuid self# TODO
<block_start><pass><block_end><block_end> |
<import_from_stmt>.environnement Local_env<import_from_stmt>.environnement Live_env<import_from_stmt>.worker Local_Worker<import_from_stmt>.worker Live_Worker<import_from_stmt>.session Local_session<import_from_stmt>.session Live_session<line_sep> |
"""
Support for local Luftdaten sensors.
Copyright (c) 2019 <NAME>
Licensed under MIT. All rights reserved.
https://github.com/lichtteil/local_luftdaten/
"""<line_sep> |
# queens.py
# From Classic Computer Science Problems in Python Chapter 3
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>csp Constraint CSP<import_from_stmt>typing Dict List Optional<class_stmt>QueensConstraint(Constraint[int int])<block_start><def_stmt>__init__ self columns:List[int]<arrow><none><block_start>super().__init__(columns)<line_sep>self.columns:List[int]=columns<block_end><def_stmt>satisfied self assignment:Dict[int int]<arrow>bool# q1c = queen 1 column, q1r = queen 1 row
<block_start><for_stmt>q1c,q1r assignment.items()# q2c = queen 2 column
<block_start><for_stmt>q2c range(q1c+1 len(self.columns)+1)<block_start><if_stmt>q2c<in>assignment<block_start>q2r:int=assignment[q2c]# q2r = queen 2 row
<if_stmt>q1r<eq>q2r# same row?
<block_start><return><false><block_end><if_stmt>abs(q1r-q2r)<eq>abs(q1c-q2c)# same diagonal?
<block_start><return><false><block_end><block_end><block_end><block_end><return><true><block_end><block_end># no conflict
<if_stmt>__name__<eq>"__main__"<block_start>columns:List[int]=[1 2 3 4 5 6 7 8]<line_sep>rows:Dict[int List[int]]={}<for_stmt>column columns<block_start>rows[column]=[1 2 3 4 5 6 7 8]<block_end>csp:CSP[int int]=CSP(columns rows)<line_sep>csp.add_constraint(QueensConstraint(columns))<line_sep>solution:Optional[Dict[int int]]=csp.backtracking_search()<if_stmt>solution<is><none><block_start>print("No solution found!")<block_end><else_stmt><block_start>print(solution)<block_end><block_end> |
<import_stmt>json<as>stdlib_json# Don't conflict with `corehq.util.json`
<import_from_stmt>traceback format_exception_only<import_from_stmt>django.utils.functional Promise<import_from_stmt>.couch get_document_or_404# noqa: F401
<import_from_stmt>.view_utils reverse# noqa: F401
<def_stmt>flatten_list elements<block_start><return>[item<for>sublist elements<for>item sublist]<block_end><def_stmt>flatten_non_iterable_list elements# actually iterate over the list and ensure element to avoid conversion of strings to chars
# ['abc'] => ['a', 'b', 'c']
<block_start>items=[]<for_stmt>element elements<block_start><if_stmt>isinstance(element list)<block_start>items.extend(flatten_non_iterable_list(element))<block_end><else_stmt><block_start>items.append(element)<block_end><block_end><return>items<block_end><def_stmt>eval_lazy value<block_start><if_stmt>isinstance(value Promise)<block_start>value=value._proxy____cast()<block_end><return>value<block_end><def_stmt>cmp a b<block_start>"""Comparison function for Python 3
https://stackoverflow.com/a/22490617/10840
"""<line_sep><return>(a<g>b)-(a<l>b)<block_end><def_stmt>as_text value<block_start>"""Safely convert object to text"""<if_stmt>isinstance(value str)<block_start><return>value<block_end><if_stmt>isinstance(value bytes)<block_start><return>value.decode("utf8" errors="backslashreplace")<block_end><if_stmt>isinstance(value BaseException)<block_start>lines=format_exception_only(type(value) value)<line_sep><return>"\n".join(x.rstrip("\n")<for>x lines)<block_end><return>repr(value)<block_end><def_stmt>as_json_text value<block_start><if_stmt>value<is><none><block_start><return>''<block_end><if_stmt>isinstance(value dict)<block_start><try_stmt><block_start><return>stdlib_json.dumps(value indent=2)<block_end><except_stmt>TypeError<block_start><pass><block_end><block_end><return>as_text(value)<block_end> |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
<import_from_future_stmt> absolute_import<import_stmt>distutils<import_stmt>awkward<as>ak<line_sep>np=ak.nplike.NumpyMetadata.instance()<def_stmt>_import_pyarrow name# move this to _util
<block_start><try_stmt><block_start><import_stmt>pyarrow<block_end><except_stmt>ImportError<block_start><raise>ImportError("""to use {0}, you must install pyarrow:
pip install pyarrow
or
conda install -c conda-forge pyarrow
""".format(name))<block_end><else_stmt><block_start><if_stmt>distutils.version.LooseVersion(pyarrow.__version__)<l>distutils.version.LooseVersion("5.0.0")<block_start><raise>ImportError("pyarrow 5.0.0 or later required for {0}".format(name))<block_end><return>pyarrow<block_end><block_end><def_stmt>to_arrow array list_to32=<false> string_to32=<true> bytestring_to32=<true> allow_tensor=<true><block_start><pass><block_end># """
# Args:
# array: Data to convert to an Apache Arrow array.
# list_to32 (bool): If True, convert Awkward lists into 32-bit Arrow lists
# if they're small enough, even if it means an extra conversion. Otherwise,
# signed 32-bit #ak.layout.ListOffsetArray maps to Arrow `ListType` and
# all others map to Arrow `LargeListType`.
# string_to32 (bool): Same as the above for Arrow `string` and `large_string`.
# bytestring_to32 (bool): Same as the above for Arrow `binary` and `large_binary`.
# allow_tensor (bool): If True, convert regular-length lists to `pyarrow.lib.Tensor`;
# otherwise, make `pyarrow.lib.ListArray` (generating offsets). This is used
# by #ak.to_parquet, since Parquet files can't contain regular-length tensors.
# Converts an Awkward Array into an Apache Arrow array.
# This produces arrays of type `pyarrow.Array`. You might need to further
# manipulations (using the pyarrow library) to build a `pyarrow.ChunkedArray`,
# a `pyarrow.RecordBatch`, or a `pyarrow.Table`.
# Arrow arrays can maintain the distinction between "option-type but no elements are
# missing" and "not option-type" at all levels except the top level. Also, there is
# no distinction between `?union[X, Y, Z]]` type and `union[?X, ?Y, ?Z]` type. Be
# aware of these type distinctions when passing data through Arrow or Parquet.
# See also #ak.from_arrow, #ak.to_arrow_table, #ak.to_parquet.
# """
# pyarrow = _import_pyarrow("ak.to_arrow")
# layout = to_layout(array, allow_record=False, allow_other=False)
# def recurse(layout, mask, is_option):
# if isinstance(layout, ak.layout.NumpyArray):
# numpy_arr = numpy.asarray(layout)
# length = len(numpy_arr)
# arrow_type = pyarrow.from_numpy_dtype(numpy_arr.dtype)
# if issubclass(numpy_arr.dtype.type, (bool, np.bool_)):
# if numpy_arr.ndim == 1:
# if len(numpy_arr) % 8 == 0:
# ready_to_pack = numpy_arr
# else:
# ready_to_pack = numpy.empty(
# int(numpy.ceil(len(numpy_arr) / 8.0)) * 8,
# dtype=numpy_arr.dtype,
# )
# ready_to_pack[: len(numpy_arr)] = numpy_arr
# ready_to_pack[len(numpy_arr) :] = 0
# numpy_arr = numpy.packbits(
# ready_to_pack.reshape(-1, 8)[:, ::-1].reshape(-1)
# )
# else:
# return recurse(
# from_numpy(numpy_arr, regulararray=True, highlevel=False),
# mask,
# is_option,
# )
# if numpy_arr.ndim == 1:
# if mask is not None:
# return pyarrow.Array.from_buffers(
# arrow_type,
# length,
# [pyarrow.py_buffer(mask), pyarrow.py_buffer(numpy_arr)],
# )
# else:
# return pyarrow.Array.from_buffers(
# arrow_type, length, [None, pyarrow.py_buffer(numpy_arr)]
# )
# elif allow_tensor:
# return pyarrow.Tensor.from_numpy(numpy_arr)
# else:
# return recurse(
# from_numpy(numpy_arr, regulararray=True, highlevel=False),
# mask,
# is_option,
# )
# elif isinstance(layout, ak.layout.EmptyArray):
# return pyarrow.Array.from_buffers(pyarrow.null(), 0, [None])
# elif isinstance(layout, ak.layout.ListOffsetArray32):
# offsets = numpy.asarray(layout.offsets, dtype=np.int32)
# if layout.parameter("__array__") == "bytestring":
# if mask is None:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.binary(),
# len(offsets) - 1,
# [
# None,
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# ],
# children=[],
# )
# else:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.binary(),
# len(offsets) - 1,
# [
# pyarrow.py_buffer(mask),
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# ],
# children=[],
# )
# return arrow_arr
# if layout.parameter("__array__") == "string":
# if mask is None:
# arrow_arr = pyarrow.StringArray.from_buffers(
# len(offsets) - 1,
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# )
# else:
# arrow_arr = pyarrow.StringArray.from_buffers(
# len(offsets) - 1,
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# pyarrow.py_buffer(mask),
# )
# return arrow_arr
# content_buffer = recurse(layout.content[: offsets[-1]], None, False)
# content_type = pyarrow.list_(content_buffer.type).value_field.with_nullable(
# isinstance(
# ak.operations.describe.type(layout.content), ak.types.OptionType
# )
# )
# if mask is None:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.list_(content_type),
# len(offsets) - 1,
# [None, pyarrow.py_buffer(offsets)],
# children=[content_buffer],
# )
# else:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.list_(content_type),
# len(offsets) - 1,
# [pyarrow.py_buffer(mask), pyarrow.py_buffer(offsets)],
# children=[content_buffer],
# )
# return arrow_arr
# elif isinstance(
# layout,
# (ak.layout.ListOffsetArray64, ak.layout.ListOffsetArrayU32),
# ):
# if layout.parameter("__array__") == "bytestring":
# downsize = bytestring_to32
# elif layout.parameter("__array__") == "string":
# downsize = string_to32
# else:
# downsize = list_to32
# offsets = numpy.asarray(layout.offsets)
# if downsize and offsets[-1] <= np.iinfo(np.int32).max:
# small_layout = ak.layout.ListOffsetArray32(
# ak.layout.Index32(offsets.astype(np.int32)),
# layout.content,
# parameters=layout.parameters,
# )
# return recurse(small_layout, mask, is_option)
# offsets = numpy.asarray(layout.offsets, dtype=np.int64)
# if layout.parameter("__array__") == "bytestring":
# if mask is None:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.large_binary(),
# len(offsets) - 1,
# [
# None,
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# ],
# children=[],
# )
# else:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.large_binary(),
# len(offsets) - 1,
# [
# pyarrow.py_buffer(mask),
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# ],
# children=[],
# )
# return arrow_arr
# if layout.parameter("__array__") == "string":
# if mask is None:
# arrow_arr = pyarrow.LargeStringArray.from_buffers(
# len(offsets) - 1,
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# )
# else:
# arrow_arr = pyarrow.LargeStringArray.from_buffers(
# len(offsets) - 1,
# pyarrow.py_buffer(offsets),
# pyarrow.py_buffer(layout.content),
# pyarrow.py_buffer(mask),
# )
# return arrow_arr
# content_buffer = recurse(layout.content[: offsets[-1]], None, False)
# content_type = pyarrow.list_(content_buffer.type).value_field.with_nullable(
# isinstance(
# ak.operations.describe.type(layout.content), ak.types.OptionType
# )
# )
# if mask is None:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.large_list(content_type),
# len(offsets) - 1,
# [None, pyarrow.py_buffer(offsets)],
# children=[content_buffer],
# )
# else:
# arrow_arr = pyarrow.Array.from_buffers(
# pyarrow.large_list(content_type),
# len(offsets) - 1,
# [pyarrow.py_buffer(mask), pyarrow.py_buffer(offsets)],
# children=[content_buffer],
# )
# return arrow_arr
# elif isinstance(layout, ak.layout.RegularArray):
# return recurse(
# layout.broadcast_tooffsets64(layout.compact_offsets64()),
# mask,
# is_option,
# )
# elif isinstance(
# layout,
# (
# ak.layout.ListArray32,
# ak.layout.ListArrayU32,
# ak.layout.ListArray64,
# ),
# ):
# return recurse(
# layout.broadcast_tooffsets64(layout.compact_offsets64()),
# mask,
# is_option,
# )
# elif isinstance(layout, ak.layout.RecordArray):
# values = [
# recurse(x[: len(layout)], mask, is_option) for x in layout.contents
# ]
# min_list_len = min(map(len, values))
# types = pyarrow.struct(
# [
# pyarrow.field(layout.key(i), values[i].type).with_nullable(
# isinstance(ak.operations.describe.type(x), ak.types.OptionType)
# )
# for i, x in enumerate(layout.contents)
# ]
# )
# if mask is not None:
# return pyarrow.Array.from_buffers(
# types, min_list_len, [pyarrow.py_buffer(mask)], children=values
# )
# else:
# return pyarrow.Array.from_buffers(
# types, min_list_len, [None], children=values
# )
# elif isinstance(
# layout,
# (
# ak.layout.UnionArray8_32,
# ak.layout.UnionArray8_64,
# ak.layout.UnionArray8_U32,
# ),
# ):
# tags = numpy.asarray(layout.tags)
# index = numpy.asarray(layout.index)
# copied_index = False
# if mask is not None:
# bytemask = (
# numpy.unpackbits(mask)
# .reshape(-1, 8)[:, ::-1]
# .reshape(-1)
# .view(np.bool_)
# )[: len(tags)]
# values = []
# for tag, content in enumerate(layout.contents):
# selected_tags = tags == tag
# this_index = index[selected_tags]
# if mask is not None:
# length = int(numpy.ceil(len(this_index) / 8.0)) * 8
# if len(numpy.unique(this_index)) == len(this_index):
# this_bytemask = numpy.zeros(length, dtype=np.uint8)
# this_bytemask[this_index] = bytemask[selected_tags]
# else:
# this_bytemask = numpy.empty(length, dtype=np.uint8)
# this_bytemask[: len(this_index)] = bytemask[selected_tags]
# this_bytemask[len(this_index) :] = 0
# content = content[this_index]
# this_index = numpy.arange(len(this_index))
# if not copied_index:
# copied_index = True
# index = numpy.array(index, copy=True)
# index[selected_tags] = this_index
# this_mask = numpy.packbits(
# this_bytemask.reshape(-1, 8)[:, ::-1].reshape(-1)
# )
# else:
# this_mask = None
# values.append(recurse(content, this_mask, is_option))
# types = pyarrow.union(
# [
# pyarrow.field(str(i), values[i].type).with_nullable(
# is_option
# or isinstance(layout.content(i).type, ak.types.OptionType)
# )
# for i in range(len(values))
# ],
# "dense",
# list(range(len(values))),
# )
# return pyarrow.Array.from_buffers(
# types,
# len(layout.tags),
# [
# None,
# pyarrow.py_buffer(tags),
# pyarrow.py_buffer(index.astype(np.int32)),
# ],
# children=values,
# )
# elif isinstance(
# layout,
# (
# ak.layout.IndexedArray32,
# ak.layout.IndexedArrayU32,
# ak.layout.IndexedArray64,
# ),
# ):
# index = numpy.asarray(layout.index)
# if layout.parameter("__array__") == "categorical":
# dictionary = recurse(layout.content, None, False)
# if mask is None:
# return pyarrow.DictionaryArray.from_arrays(index, dictionary)
# else:
# bytemask = (
# numpy.unpackbits(~mask)
# .reshape(-1, 8)[:, ::-1]
# .reshape(-1)
# .view(np.bool_)
# )[: len(index)]
# return pyarrow.DictionaryArray.from_arrays(
# index, dictionary, bytemask
# )
# else:
# layout_content = layout.content
# if len(layout_content) == 0:
# empty = recurse(layout_content, None, False)
# if mask is None:
# return empty
# else:
# return pyarrow.array([None] * len(index)).cast(empty.type)
# elif isinstance(layout_content, ak.layout.RecordArray):
# values = [
# recurse(x[: len(layout_content)][index], mask, is_option)
# for x in layout_content.contents
# ]
# min_list_len = min(map(len, values))
# types = pyarrow.struct(
# [
# pyarrow.field(
# layout_content.key(i), values[i].type
# ).with_nullable(
# isinstance(
# ak.operations.describe.type(x), ak.types.OptionType
# )
# )
# for i, x in enumerate(layout_content.contents)
# ]
# )
# if mask is not None:
# return pyarrow.Array.from_buffers(
# types,
# min_list_len,
# [pyarrow.py_buffer(mask)],
# children=values,
# )
# else:
# return pyarrow.Array.from_buffers(
# types, min_list_len, [None], children=values
# )
# else:
# return recurse(layout_content[index], mask, is_option)
# elif isinstance(
# layout,
# (ak.layout.IndexedOptionArray32, ak.layout.IndexedOptionArray64),
# ):
# index = numpy.array(layout.index, copy=True)
# nulls = index < 0
# index[nulls] = 0
# if layout.parameter("__array__") == "categorical":
# dictionary = recurse(layout.content, None, False)
# if mask is None:
# bytemask = nulls
# else:
# bytemask = (
# numpy.unpackbits(~mask)
# .reshape(-1, 8)[:, ::-1]
# .reshape(-1)
# .view(np.bool_)
# )[: len(index)]
# bytemask[nulls] = True
# return pyarrow.DictionaryArray.from_arrays(index, dictionary, bytemask)
# else:
# if len(nulls) % 8 == 0:
# this_bytemask = (~nulls).view(np.uint8)
# else:
# length = int(numpy.ceil(len(nulls) / 8.0)) * 8
# this_bytemask = numpy.empty(length, dtype=np.uint8)
# this_bytemask[: len(nulls)] = ~nulls
# this_bytemask[len(nulls) :] = 0
# this_bitmask = numpy.packbits(
# this_bytemask.reshape(-1, 8)[:, ::-1].reshape(-1)
# )
# if isinstance(layout, ak.layout.IndexedOptionArray32):
# next = ak.layout.IndexedArray32(
# ak.layout.Index32(index), layout.content
# )
# else:
# next = ak.layout.IndexedArray64(
# ak.layout.Index64(index), layout.content
# )
# if mask is None:
# return recurse(next, this_bitmask, True)
# else:
# return recurse(next, mask & this_bitmask, True)
# elif isinstance(layout, ak.layout.BitMaskedArray):
# bitmask = numpy.asarray(layout.mask, dtype=np.uint8)
# if layout.lsb_order is False:
# bitmask = numpy.packbits(
# numpy.unpackbits(bitmask).reshape(-1, 8)[:, ::-1].reshape(-1)
# )
# if layout.valid_when is False:
# bitmask = ~bitmask
# return recurse(layout.content[: len(layout)], bitmask, True).slice(
# length=min(len(bitmask) * 8, len(layout.content))
# )
# elif isinstance(layout, ak.layout.ByteMaskedArray):
# mask = numpy.asarray(layout.mask, dtype=np.bool_) == layout.valid_when
# bytemask = numpy.zeros(
# 8 * math.ceil(len(layout.content) / 8), dtype=np.bool_
# )
# bytemask[: len(mask)] = mask
# bytemask[len(mask) :] = 0
# bitmask = numpy.packbits(bytemask.reshape(-1, 8)[:, ::-1].reshape(-1))
# return recurse(layout.content[: len(layout)], bitmask, True).slice(
# length=len(mask)
# )
# elif isinstance(layout, (ak.layout.UnmaskedArray)):
# return recurse(layout.content, None, True)
# elif isinstance(layout, (ak.layout.VirtualArray)):
# return recurse(layout.array, None, False)
# elif isinstance(layout, (ak.partition.PartitionedArray)):
# return pyarrow.chunked_array(
# [recurse(x, None, False) for x in layout.partitions]
# )
# else:
# raise TypeError(
# "unrecognized array type: {0}".format(repr(layout))
# + ak._util.exception_suffix(__file__)
# )
# return recurse(layout, None, False)
|
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>deployd.types.deploy_stage DeployStage<import_from_stmt>deployd.types.agent_status AgentStatus<class_stmt>PingReport(object)<block_start><def_stmt>__init__ self jsonValue=<none><block_start>self.deployId=<none><line_sep>self.envId=<none><line_sep>self.envName=<none><line_sep>self.stageName=<none><line_sep>self.deployStage=<none><line_sep>self.status=<none><line_sep>self.errorCode=0<line_sep>self.errorMessage=<none><line_sep>self.failCount=0<line_sep>self.extraInfo=<none><line_sep>self.deployAlias=<none><if_stmt>jsonValue<block_start>self.deployId=jsonValue.get('deployId')<line_sep>self.envId=jsonValue.get('envId')<if_stmt>isinstance(jsonValue.get('deployStage') int)<block_start>self.deployStage=DeployStage._VALUES_TO_NAMES[jsonValue.get('deployStage')]<block_end><else_stmt><block_start>self.deployStage=jsonValue.get('deployStage')<block_end><if_stmt>isinstance(jsonValue.get('status') int)<block_start>self.status=AgentStatus._VALUES_TO_NAMES[jsonValue.get('status')]<block_end><else_stmt><block_start>self.status=jsonValue.get('status')<block_end>self.envName=jsonValue.get('envName')<line_sep>self.stageName=jsonValue.get('stageName')<line_sep>self.errorCode=jsonValue.get('errorCode')<line_sep>self.errorMessage=jsonValue.get('errorMessage')<line_sep>self.failCount=jsonValue.get('failCount')<line_sep>self.extraInfo=jsonValue.get('extraInfo')<line_sep>self.deployAlias=jsonValue.get('deployAlias')<block_end><block_end><def_stmt>__str__ self<block_start><return>"PingReport(deployId={}, envId={}, deployStage={}, status={}, "<concat>"errorCode={}), errorMessage={}, failCount={}, extraInfo={}, "<concat>"deployAlias={})".format(self.deployId self.envId self.deployStage self.status self.errorCode self.errorMessage self.failCount self.extraInfo self.deployAlias )<block_end><block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.