content
stringlengths
0
1.55M
# -*- coding: utf-8 -*- # Copyright (c) 2010-2016, MIT Probabilistic Computing Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>math<import_stmt>pytest<import_stmt>bayeslite.stats<as>stats<import_from_stmt>bayeslite.math_util relerr<def_stmt>abserr expected actual<block_start>"""Computes the absolute error between `expected` and `actual`. :param float expected: The expected value. :param float actual: The actual value. :return: ``abs(actual-expected)`` :rtype: float """<line_sep><return>abs(actual-expected)<block_end><def_stmt>test_pearsonr <block_start><assert_stmt>math.isnan(stats.pearsonr([] []))<assert_stmt>stats.pearsonr([1 2 3] [2 4 6])<eq>+1.0<assert_stmt>stats.pearsonr([1 2 3] [-2 -4 -6])<eq>-1.0<assert_stmt>stats.pearsonr([1 2 3] [6 4 2])<eq>-1.0<assert_stmt>stats.pearsonr([1 2 3] [+1 -1 +1])<eq>0.0<block_end><def_stmt>test_chi2_contingency <block_start><assert_stmt>stats.chi2_contingency([[42]])<eq>0.<assert_stmt>relerr(7.66 stats.chi2_contingency([[4 2 3] [3 16 2]]))<l>0.01<block_end><def_stmt>test_f_oneway <block_start>data=[[6 8 4 5 3 4] [8 12 9 11 6 8] [13 9 11 8 7 12]]<assert_stmt>relerr(9.3 stats.f_oneway(data))<l>0.01<block_end><def_stmt>test_chi2_sf # Non-positive degrees of freedom should throw an error. <block_start><with_stmt>pytest.raises(ValueError)<block_start>stats.chi2_sf(0 0)<block_end><with_stmt>pytest.raises(ValueError)<block_start>stats.chi2_sf(2 -10)<block_end># Survival of x = 0 should be 1. <assert_stmt>relerr(1. stats.chi2_sf(0 12))<l>.05<assert_stmt>relerr(1. stats.chi2_sf(0 6))<l>.05<assert_stmt>relerr(1. stats.chi2_sf(0 130))<l>.05<line_sep># Test x < 1, x >= df against reference values. <assert_stmt>relerr(.0357175 stats.chi2_sf(.8 .1))<l>.05<assert_stmt>relerr(.2730426 stats.chi2_sf(.6 .6))<l>.05<assert_stmt>relerr(.0602823 stats.chi2_sf(.1 .05))<l>.05<line_sep># Test x >= 1, x <= df against reference values. <assert_stmt>relerr(.7029304 stats.chi2_sf(9 12))<l>.05<assert_stmt>relerr(.5934191 stats.chi2_sf(1.9 3))<l>.05<assert_stmt>relerr(.9238371 stats.chi2_sf(1 4.2))<l>.05<line_sep># Test x >= 1, x > df against reference values. <assert_stmt>relerr(.3325939 stats.chi2_sf(8 7))<l>.05<assert_stmt>relerr(.0482861 stats.chi2_sf(3.9 1))<l>.05<assert_stmt>relerr(.3464377e-4 stats.chi2_sf(193 121))<l>.05<block_end><def_stmt>test_f_sf # Non-positive degrees of freedom should throw an error. <block_start><with_stmt>pytest.raises(ValueError)<block_start>stats.f_sf(0 0 0)<block_end><with_stmt>pytest.raises(ValueError)<block_start>stats.f_sf(2 -10 0)<block_end><with_stmt>pytest.raises(ValueError)<block_start>stats.f_sf(2 0 -10)<block_end><with_stmt>pytest.raises(ValueError)<block_start>stats.f_sf(2 -1 1)<block_end><with_stmt>pytest.raises(ValueError)<block_start>stats.f_sf(2 1 -1)<block_end># Survival of x = 0 should be 1. <assert_stmt>relerr(1 stats.f_sf(0 1 12))<l>.05<assert_stmt>relerr(1 stats.f_sf(0 6 0.5))<l>.05<assert_stmt>relerr(1 stats.f_sf(0 130 121))<l>.05<line_sep># Survival of x < 0 should be 1. <assert_stmt>relerr(1 stats.f_sf(-1 1 12))<l>.05<assert_stmt>relerr(1 stats.f_sf(-100 6 0.5))<l>.05<assert_stmt>relerr(1 stats.f_sf(-0.02 130 121))<l>.05<line_sep># Test against reference values. <assert_stmt>relerr(.5173903 stats.f_sf(1 12 8))<l>.05<assert_stmt>relerr(.2618860 stats.f_sf(1.9 1 3))<l>.05<assert_stmt>relerr(.5000000 stats.f_sf(1 100 100))<l>.05<assert_stmt>relerr(.1781364 stats.f_sf(19 14 1))<l>.05<assert_stmt>relerr(.7306588 stats.f_sf(0.76 23 15))<l>.05<assert_stmt>relerr(.0602978 stats.f_sf(4.3 1 12))<l>.05<assert_stmt>relerr(.5590169 stats.f_sf(1.1 2 1))<l>.05<assert_stmt>relerr(.1111111 stats.f_sf(8 2 2))<l>.05<assert_stmt>relerr(.9999999 stats.f_sf(0.2 432 123))<l>.05<assert_stmt>relerr(.9452528 stats.f_sf(0.8 432 123))<l>.05<assert_stmt>relerr(.0434186 stats.f_sf(10 5 3))<l>.05<line_sep># Test against reference very close to zero. <assert_stmt>abserr(.0158130 stats.f_sf(11 19 4))<l>.01<assert_stmt>abserr(.0022310 stats.f_sf(14 9 6))<l>.01<assert_stmt>abserr(.1458691e-112 stats.f_sf(200 432 123))<l>.01<assert_stmt>abserr(.2489256e-13 stats.f_sf(29 23 29))<l>.01<assert_stmt>abserr(.1656276e-06 stats.f_sf(31 11 13))<l>.01<assert_stmt>abserr(.6424023e-5 stats.f_sf(18 14 12))<l>.01<block_end><def_stmt>test_t_cdf # Non-positive degrees of freedom should throw an error. <block_start><with_stmt>pytest.raises(ValueError)<block_start>stats.t_cdf(0 0)<block_end><with_stmt>pytest.raises(ValueError)<block_start>stats.t_cdf(2 -10)<block_end># CDF of x = 0 should be 0.5. <assert_stmt>relerr(.5 stats.t_cdf(0 12))<l>.01<assert_stmt>relerr(.5 stats.t_cdf(0 6))<l>.01<assert_stmt>relerr(.5 stats.t_cdf(0 130))<l>.01<line_sep># Test against various reference values. <assert_stmt>relerr(.57484842931039226 stats.t_cdf(.8 .1))<l>.05<assert_stmt>relerr(.64922051214061649 stats.t_cdf(.6 .6))<l>.05<assert_stmt>relerr(.51046281131211058 stats.t_cdf(.1 .05))<l>.05<assert_stmt>relerr(.99999944795492968 stats.t_cdf(9 12))<l>.05<assert_stmt>relerr(.92318422834700042 stats.t_cdf(1.9 3))<l>.05<assert_stmt>relerr(.81430689864299455 stats.t_cdf(1 4.2))<l>.05<assert_stmt>relerr(.99995442539414559 stats.t_cdf(8 7))<l>.05<assert_stmt>relerr(.92010336338282994 stats.t_cdf(3.9 1))<l>.05<assert_stmt>relerr(1.0 stats.t_cdf(193 121))<l>.05<assert_stmt>relerr(.42515157068960779 stats.t_cdf(-.8 .1))<l>.05<assert_stmt>relerr(.35077948785938345 stats.t_cdf(-.6 .6))<l>.05<assert_stmt>relerr(.48953718868788948 stats.t_cdf(-.1 .05))<l>.05<assert_stmt>relerr(.076815771652999562 stats.t_cdf(-1.9 3))<l>.05<assert_stmt>relerr(.18569310135700545 stats.t_cdf(-1 4.2))<l>.05<assert_stmt>relerr(.17530833141010374 stats.t_cdf(-1 7))<l>.05<assert_stmt>relerr(.079896636617170003 stats.t_cdf(-3.9 1))<l>.05<assert_stmt>relerr(.30899158341328747 stats.t_cdf(-0.5 121))<l>.05<line_sep># Test against reference very close to zero. # XXX Why are we testing chi2_sf here? <assert_stmt>relerr(.346437e-4 stats.chi2_sf(193 121))<l>.01<block_end><def_stmt>test_gauss_suff_stats # High mean, tiny variance would lead to catastrophic cancellation # in a naive implementation that maintained the sum of squares. <block_start>big=400<line_sep>small=0.0000001<line_sep>data=[big-small big big+small]<line_sep>true_sigma=math.sqrt(2<times>small<power>2/3)<line_sep>(ct mean sigma)=stats.gauss_suff_stats(data)<assert_stmt>ct<eq>3<assert_stmt>mean<eq>big<assert_stmt>relerr(true_sigma sigma)<l>1e-5<block_end>
<import_from_stmt>jesse.strategies Strategy<line_sep># test_average_take_profit_exception <class_stmt>Test38(Strategy)<block_start><def_stmt>should_long self<arrow>bool<block_start><return>self.index<eq>0<block_end><def_stmt>should_short self<arrow>bool<block_start><return><false><block_end><def_stmt>go_long self<block_start>qty=1<line_sep>self.buy=qty 2<line_sep>self.stop_loss=qty 1<block_end><def_stmt>go_short self<block_start><pass><block_end><def_stmt>should_cancel self<block_start><return><false><block_end><def_stmt>filters self<block_start><return>[self.filter_1]<block_end><def_stmt>filter_1 self# trying to access average_take_profit without setting it first <block_start><return>self.average_take_profit<g>1<block_end><block_end>
<import_stmt>random<import_from_stmt>typing Union Tuple Any Dict<import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>skimage.measure label<import_from_stmt>...core.transforms_interface DualTransform<import_from_stmt>...core.transforms_interface to_tuple<line_sep>__all__=["MaskDropout"]<class_stmt>MaskDropout(DualTransform)<block_start>""" Image & mask augmentation that zero out mask and image regions corresponding to randomly chosen object instance from mask. Mask must be single-channel image, zero values treated as background. Image can be any number of channels. Inspired by https://www.kaggle.com/c/severstal-steel-defect-detection/discussion/114254 Args: max_objects: Maximum number of labels that can be zeroed out. Can be tuple, in this case it's [min, max] image_fill_value: Fill value to use when filling image. Can be 'inpaint' to apply inpaining (works only for 3-chahnel images) mask_fill_value: Fill value to use when filling mask. Targets: image, mask Image types: uint8, float32 """<def_stmt>__init__ self max_objects:int=1 image_fill_value:Union[int float str]=0 mask_fill_value:Union[int float]=0 always_apply:bool=<false> p:float=0.5 <block_start>super(MaskDropout self).__init__(always_apply p)<line_sep>self.max_objects=to_tuple(max_objects 1)<line_sep>self.image_fill_value=image_fill_value<line_sep>self.mask_fill_value=mask_fill_value<block_end>@property<def_stmt>targets_as_params self<block_start><return>["mask"]<block_end><def_stmt>get_params_dependent_on_targets self params<arrow>Dict[str Any]<block_start>mask=params["mask"]<line_sep>label_image,num_labels=label(mask return_num=<true>)<if_stmt>num_labels<eq>0<block_start>dropout_mask=<none><block_end><else_stmt><block_start>objects_to_drop=random.randint(self.max_objects[0] self.max_objects[1])<line_sep>objects_to_drop=min(num_labels objects_to_drop)<if_stmt>objects_to_drop<eq>num_labels<block_start>dropout_mask=mask<g>0<block_end><else_stmt><block_start>labels_index=random.sample(range(1 num_labels+1) objects_to_drop)<line_sep>dropout_mask=np.zeros((mask.shape[0] mask.shape[1]) dtype=bool)<for_stmt>label_index labels_index<block_start>dropout_mask<augor>label_image<eq>label_index<block_end><block_end><block_end>params.update({"dropout_mask":dropout_mask})<line_sep><return>params<block_end><def_stmt>apply self img:np.ndarray dropout_mask:np.ndarray=<none> **params<arrow>np.ndarray<block_start><if_stmt>dropout_mask<is><none><block_start><return>img<block_end><if_stmt>self.image_fill_value<eq>"inpaint"<block_start>dropout_mask=dropout_mask.astype(np.uint8)<line_sep>_,_,w,h=cv2.boundingRect(dropout_mask)<line_sep>radius=min(3 max(w h)<floordiv>2)<line_sep>img=cv2.inpaint(img dropout_mask radius cv2.INPAINT_NS)<block_end><else_stmt><block_start>img=img.copy()<line_sep>img[dropout_mask]=self.image_fill_value<block_end><return>img<block_end><def_stmt>apply_to_mask self img:np.ndarray dropout_mask:np.ndarray=<none> **params<arrow>np.ndarray<block_start><if_stmt>dropout_mask<is><none><block_start><return>img<block_end>img=img.copy()<line_sep>img[dropout_mask]=self.mask_fill_value<line_sep><return>img<block_end><def_stmt>get_transform_init_args_names self<arrow>Tuple[str <ellipsis>]<block_start><return>"max_objects" "image_fill_value" "mask_fill_value"<block_end><block_end>
#coding:utf-8 <import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>torch.autograd Variable<import_stmt>torch.nn<as>nn<import_from_stmt>torch.nn utils<as>nn_utils<import_from_stmt>.embedding WordEmbedding EntityEmbedding use_cuda VERY_SMALL_NUMBER VERY_NEG_NUMBER<class_stmt>CentralEncoder(nn.Module)<block_start><def_stmt>__init__ self config gnn_layers embed_units trans_units word_embedding entity_embedding<block_start>super(CentralEncoder self).__init__()<line_sep>self.k=2+1<line_sep>self.gnn_layers=gnn_layers<line_sep>self.WordEmbedding=word_embedding<line_sep>self.EntityEmbedding=entity_embedding<line_sep>self.embed_units=embed_units<line_sep>self.trans_units=trans_units<line_sep>self.pagerank_lambda=config.pagerank_lambda<line_sep>self.fact_scale=config.fact_scale<line_sep>self.node_encoder=nn.LSTM(input_size=self.embed_units hidden_size=self.trans_units batch_first=<true> bidirectional=<false>)<line_sep>self.lstm_drop=nn.Dropout(p=config.lstm_dropout)<line_sep>self.softmax_d1=nn.Softmax(dim=1)<line_sep>self.linear_drop=nn.Dropout(p=config.linear_dropout)<line_sep>self.relu=nn.ReLU()<for_stmt>i range(self.gnn_layers)<block_start>self.add_module('q2e_linear'+str(i) nn.Linear(in_features=self.trans_units out_features=self.trans_units))<line_sep>self.add_module('d2e_linear'+str(i) nn.Linear(in_features=self.trans_units out_features=self.trans_units))<line_sep>self.add_module('e2q_linear'+str(i) nn.Linear(in_features=self.k<times>self.trans_units out_features=self.trans_units))<line_sep>self.add_module('e2d_linear'+str(i) nn.Linear(in_features=self.k<times>self.trans_units out_features=self.trans_units))<line_sep>self.add_module('e2e_linear'+str(i) nn.Linear(in_features=self.k<times>self.trans_units out_features=self.trans_units))<line_sep>#use kb self.add_module('kb_head_linear'+str(i) nn.Linear(in_features=self.trans_units out_features=self.trans_units))<line_sep>self.add_module('kb_tail_linear'+str(i) nn.Linear(in_features=self.trans_units out_features=self.trans_units))<line_sep>self.add_module('kb_self_linear'+str(i) nn.Linear(in_features=self.trans_units out_features=self.trans_units))<block_end><block_end><def_stmt>forward self batch_size max_local_entity max_fact query_text local_entity q2e_adj_mat kb_adj_mat kb_fact_rel query_mask# normalized adj matrix <block_start>pagerank_f=use_cuda(Variable(torch.from_numpy(q2e_adj_mat).type('torch.FloatTensor') requires_grad=<true>))<line_sep>q2e_adj_mat=use_cuda(Variable(torch.from_numpy(q2e_adj_mat).type('torch.FloatTensor') requires_grad=<false>))<assert_stmt>pagerank_f.requires_grad<eq><true><line_sep># encode query query_word_emb=self.WordEmbedding(query_text)<line_sep>query_hidden_emb,(query_node_emb _)=self.node_encoder(self.lstm_drop(query_word_emb) self.init_hidden(1 batch_size self.trans_units))<line_sep>query_node_emb=query_node_emb.squeeze(dim=0).unsqueeze(dim=1)<line_sep>query_rel_emb=query_node_emb<line_sep># build kb_adj_matrix from sparse matrix (e2f_batch e2f_f e2f_e e2f_val),(f2e_batch f2e_e f2e_f f2e_val)=kb_adj_mat<line_sep>entity2fact_index=torch.LongTensor([e2f_batch e2f_f e2f_e])<line_sep>entity2fact_val=torch.FloatTensor(e2f_val)<line_sep>entity2fact_mat=use_cuda(torch.sparse.FloatTensor(entity2fact_index entity2fact_val torch.Size([batch_size max_fact max_local_entity])))<line_sep>fact2entity_index=torch.LongTensor([f2e_batch f2e_e f2e_f])<line_sep>fact2entity_val=torch.FloatTensor(f2e_val)<line_sep>fact2entity_mat=use_cuda(torch.sparse.FloatTensor(fact2entity_index fact2entity_val torch.Size([batch_size max_local_entity max_fact])))<line_sep>local_fact_emb=self.EntityEmbedding(kb_fact_rel)<line_sep># attention fact2question div=float(np.sqrt(self.trans_units))<line_sep>fact2query_sim=torch.bmm(query_hidden_emb local_fact_emb.transpose(1 2))/div<line_sep>fact2query_sim=self.softmax_d1(fact2query_sim+(1-query_mask.unsqueeze(dim=2))<times>VERY_NEG_NUMBER)<line_sep>fact2query_att=torch.sum(fact2query_sim.unsqueeze(dim=3)<times>query_hidden_emb.unsqueeze(dim=2) dim=1)<line_sep>W=torch.sum(fact2query_att<times>local_fact_emb dim=2)/div<line_sep>W_max=torch.max(W dim=1 keepdim=<true>)[0]<line_sep>W_tilde=torch.exp(W-W_max)<line_sep>e2f_softmax=self.sparse_bmm(entity2fact_mat.transpose(1 2) W_tilde.unsqueeze(dim=2)).squeeze(dim=2)<line_sep>e2f_softmax=torch.clamp(e2f_softmax min=VERY_SMALL_NUMBER)<line_sep>e2f_out_dim=use_cuda(Variable(torch.sum(entity2fact_mat.to_dense() dim=1) requires_grad=<false>))<line_sep># load entity embedding local_entity_emb=self.EntityEmbedding(local_entity)<line_sep># label propagation on entities <for_stmt>i range(self.gnn_layers)# get linear transformation functions for each layer <block_start>q2e_linear=getattr(self 'q2e_linear'+str(i))<line_sep>d2e_linear=getattr(self 'd2e_linear'+str(i))<line_sep>e2q_linear=getattr(self 'e2q_linear'+str(i))<line_sep>e2d_linear=getattr(self 'e2d_linear'+str(i))<line_sep>e2e_linear=getattr(self 'e2e_linear'+str(i))<line_sep>kb_self_linear=getattr(self 'kb_self_linear'+str(i))<line_sep>kb_head_linear=getattr(self 'kb_head_linear'+str(i))<line_sep>kb_tail_linear=getattr(self 'kb_tail_linear'+str(i))<line_sep># start propagation next_local_entity_emb=local_entity_emb<line_sep># STEP 1: propagate from question, documents, and facts to entities # question -> entity q2e_emb=q2e_linear(self.linear_drop(query_node_emb)).expand(batch_size max_local_entity self.trans_units)<line_sep>next_local_entity_emb=torch.cat((next_local_entity_emb q2e_emb) dim=2)<line_sep># fact -> entity e2f_emb=self.relu(kb_self_linear(local_fact_emb)+self.sparse_bmm(entity2fact_mat kb_head_linear(self.linear_drop(local_entity_emb))))<line_sep>e2f_softmax_normalized=W_tilde.unsqueeze(dim=2)<times>self.sparse_bmm(entity2fact_mat (pagerank_f/e2f_softmax).unsqueeze(dim=2))<line_sep>e2f_emb=e2f_emb<times>e2f_softmax_normalized<line_sep>f2e_emb=self.relu(kb_self_linear(local_entity_emb)+self.sparse_bmm(fact2entity_mat kb_tail_linear(self.linear_drop(e2f_emb))))<line_sep>pagerank_f=self.pagerank_lambda<times>self.sparse_bmm(fact2entity_mat e2f_softmax_normalized).squeeze(dim=2)+(1-self.pagerank_lambda)<times>pagerank_f<line_sep># STEP 2: combine embeddings from fact next_local_entity_emb=torch.cat((next_local_entity_emb self.fact_scale<times>f2e_emb) dim=2)<line_sep># STEP 3: propagate from entities to update question, documents, and facts # entity -> query query_node_emb=torch.bmm(pagerank_f.unsqueeze(dim=1) e2q_linear(self.linear_drop(next_local_entity_emb)))<line_sep># update entity local_entity_emb=self.relu(e2e_linear(self.linear_drop(next_local_entity_emb)))<block_end><return>local_entity_emb<block_end><def_stmt>init_hidden self num_layer batch_size hidden_size<block_start><return>(use_cuda(Variable(torch.zeros(num_layer batch_size hidden_size))) use_cuda(Variable(torch.zeros(num_layer batch_size hidden_size))))<block_end><def_stmt>sparse_bmm self X Y<block_start>"""Batch multiply X and Y where X is sparse, Y is dense. Args: X: Sparse tensor of size BxMxN. Consists of two tensors, I:3xZ indices, and V:1xZ values. Y: Dense tensor of size BxNxK. Returns: batched-matmul(X, Y): BxMxK """<class_stmt>LeftMMFixed(torch.autograd.Function)<block_start>""" Implementation of matrix multiplication of a Sparse Variable with a Dense Variable, returning a Dense one. This is added because there's no autograd for sparse yet. No gradient computed on the sparse weights. """<line_sep>@staticmethod<def_stmt>forward ctx sparse_weights x<block_start>ctx.sparse_weights=sparse_weights<line_sep><return>torch.mm(ctx.sparse_weights x)<block_end>@staticmethod<def_stmt>backward ctx grad_output<block_start>sparse_weights=ctx.sparse_weights<line_sep><return><none> torch.mm(sparse_weights.t() grad_output)<block_end><block_end>I=X._indices()<line_sep>V=X._values()<line_sep>B,M,N=X.size()<line_sep>_,_,K=Y.size()<line_sep>Z=I.size()[1]<line_sep>lookup=Y[I[0 :] I[2 :] :]<line_sep>X_I=torch.stack((I[0 :]<times>M+I[1 :] use_cuda(torch.arange(Z).type(torch.LongTensor))) 0)<line_sep>S=use_cuda(Variable(torch.sparse.FloatTensor(X_I V torch.Size([B<times>M Z])) requires_grad=<false>))<line_sep>prod=LeftMMFixed.apply(S lookup)<line_sep><return>prod.view(B M K)<block_end><block_end>
# # (c) FFRI Security, Inc., 2021 / Author: FFRI Security, Inc. # <import_stmt>mmap<import_stmt>os<import_from_stmt>ctypes Structure c_uint32 c_uint64 sizeof<import_from_stmt>typing Iterable Optional cast<import_stmt>typer<line_sep>app=typer.Typer()<line_sep>AOT_SHARED_CACHE_MAGIC=0x6568636143746F41<def_stmt>show_err msg:str<arrow><none><block_start>typer.secho(msg err=<true> fg=typer.colors.RED)<block_end><def_stmt>show_warn msg:str<arrow><none><block_start>typer.secho(msg err=<true> fg=typer.colors.YELLOW)<block_end><def_stmt>show_log msg:str<arrow><none><block_start>typer.secho(msg err=<true> fg=typer.colors.GREEN)<block_end><class_stmt>AotMappingInfo(Structure)<block_start>""" struct AotMappingInfo { uint64_t address; uint64_t size; uint64_t file_offset; uint32_t init_prot; uint32_t max_prot; }; """<line_sep>_fields_=(("address" c_uint64) ("size" c_uint64) ("file_offset" c_uint64) ("init_prot" c_uint32) ("max_prot" c_uint32) )<def_stmt>__str__ self<arrow>str<block_start><return>f"""\tAotMappingInfo: \t\taddress: {hex(self.address)} \t\tsize: {hex(self.size)} \t\tfile_offset: {hex(self.file_offset)} \t\tinit_prot: {hex(self.init_prot)} \t\tmax_prot: {hex(self.max_prot)} """<block_end><block_end><class_stmt>AotSharedCacheHeader(Structure)<block_start>""" struct AotSharedCacheHeader { uint64_t magic; uint64_t field_0x8; uint64_t field_0x10; uint64_t uuid[2]; uint64_t version[4]; uint64_t offset_to_codesig; uint64_t size_of_codesig; uint32_t n_entries; uint32_t offset_to_metadata_seg; struct AotMappingInfo mapping[3]; }; """<line_sep>_fields_=(("magic" c_uint64) ("field_0x8" c_uint64) ("field_0x10" c_uint64) ("uuid" c_uint64<times>2) ("version" c_uint64<times>4) ("offset_to_codesig" c_uint64) ("size_of_codesig" c_uint64) ("n_entries" c_uint32) ("offset_to_metadata_seg" c_uint32) ("mapping" AotMappingInfo<times>3) )<def_stmt>__str__ self<arrow>str<block_start><return>f"""AotSharedCacheHeader: \tmagic: {hex(self.magic)} \tfield_0x8: {hex(self.field_0x8)} \tfield_0x10: {hex(self.field_0x10)} \tuuid: {[hex(self.uuid[i])<for>i range(2)]} \tversion: {[hex(self.version[i])<for>i range(4)]} \toffset_to_codesig: {hex(self.offset_to_codesig)} \tsize_of_codesig: {hex(self.size_of_codesig)} \tn_entries: {hex(self.n_entries)} \toffset_to_metadata_seg: {hex(self.offset_to_metadata_seg)} \tmapping:\n {''.join(str(self.mapping[i])<for>i range(3))}"""<block_end><block_end><class_stmt>CodeFragmentMetaData(Structure)<block_start>_fields_=(("type" c_uint32) ("offset_to_path_name" c_uint32) ("offset_to_x64_code" c_uint32) ("size_of_x64_code" c_uint32) ("offset_to_arm64_code" c_uint32) ("size_of_arm64_code" c_uint32) ("offset_to_branch_data" c_uint32) ("size_of_branch_data" c_uint32) ("offset_to_insn_map" c_uint32) ("size_of_insn_map" c_uint32) )<def_stmt>__str__ self<arrow>str<block_start><return>f"""CodeFragmentMetaData: \ttype: {hex(self.type)} \toffset_to_path_name: {hex(self.offset_to_path_name)} \toffset_to_x64_code: {hex(self.offset_to_x64_code)} \tsize_of_x64_code: {hex(self.size_of_x64_code)} \toffset_to_arm64_code: {hex(self.offset_to_arm64_code)} \tsize_of_arm64_code: {hex(self.size_of_arm64_code)} \toffset_to_branch_data: {hex(self.offset_to_branch_data)} \tsize_of_branch_data: {hex(self.size_of_branch_data)} \toffset_to_insn_map: {hex(self.offset_to_insn_map)} \tsize_of_insn_map: {hex(self.size_of_insn_map)}"""<block_end><block_end><def_stmt>load_aot_mapped_module_names mapped_module_file:str<arrow>Optional[Iterable[str]]<block_start><if_stmt><not>os.path.exists(mapped_module_file)<block_start>show_err(f"{mapped_module_file} does not exist")<line_sep><return><none><block_end><with_stmt>open(mapped_module_file "r")<as>fin<block_start><for_stmt>line fin.readlines()<block_start><yield>line.strip()<block_end><block_end><block_end>@app.command()<def_stmt>extract_codesig aot_shared_cache_path:str output_file_path:str<arrow><none><block_start><if_stmt><not>os.path.exists(aot_shared_cache_path)<block_start>show_err(f"{aot_shared_cache_path} does not exist")<line_sep><return><block_end><with_stmt>open(aot_shared_cache_path "r+b")<as>fin<block_start>mm=mmap.mmap(fin.fileno() 0)<line_sep>header=AotSharedCacheHeader.from_buffer_copy(cast(bytes mm[0:sizeof(AotSharedCacheHeader)]) 0)<if_stmt>header.magic<ne>AOT_SHARED_CACHE_MAGIC<block_start>show_err("magic should be AotCache")<line_sep><return><block_end>codesig_beg=header.offset_to_codesig<line_sep>codesig_end=codesig_beg+header.size_of_codesig<line_sep>show_log(f"Will extract a code signature located at [{hex(codesig_beg)}, {hex(codesig_end)}]")<with_stmt>open(output_file_path "wb")<as>fout<block_start>fout.write(cast(bytes mm[codesig_beg:codesig_end]))<block_end>show_log(f"The extracted code signature is saved to {output_file_path}")<block_end><block_end>@app.command()<def_stmt>dump aot_shared_cache_path:str<arrow><none><block_start><if_stmt><not>os.path.exists(aot_shared_cache_path)<block_start>show_err(f"{aot_shared_cache_path} does not exist")<line_sep><return><block_end><if_stmt>(mapped_module_names:=load_aot_mapped_module_names("aot_mapped_module_names"))<is><none><block_start><return><block_end><with_stmt>open(aot_shared_cache_path "r+b")<as>fin<block_start>mm=mmap.mmap(fin.fileno() 0)<line_sep>header=AotSharedCacheHeader.from_buffer_copy(cast(bytes mm[0:sizeof(AotSharedCacheHeader)]) 0)<if_stmt>header.magic<ne>AOT_SHARED_CACHE_MAGIC<block_start>show_err("magic should be AotCache")<line_sep><return><block_end>typer.echo(header)<line_sep>aot_seg_beg=header.mapping[2].file_offset<line_sep>metadata_seg_beg=header.offset_to_metadata_seg<line_sep>typer.echo(f"metadata segment starts from {hex(metadata_seg_beg)}")<line_sep>cur_seek=header.offset_to_metadata_seg<line_sep>typer.echo(f"number of entries is {header.n_entries}")<for_stmt>_ range(header.n_entries)<block_start>entry=CodeFragmentMetaData.from_buffer_copy(cast(bytes mm[cur_seek:cur_seek+sizeof(CodeFragmentMetaData)]))<line_sep>typer.echo(entry)<line_sep>cur_seek<augadd>sizeof(CodeFragmentMetaData)<if_stmt>entry.type<eq>0<block_start><if_stmt>cur_seek<ne>metadata_seg_beg+entry.offset_to_branch_data<block_start>show_err("branch data does not follow")<line_sep>show_err(f"{hex(cur_seek)} {hex(metadata_seg_beg)} {hex(entry.offset_to_branch_data)}")<line_sep><return><block_end>branch_data_beg,branch_data_end=cur_seek cur_seek+entry.size_of_branch_data<line_sep>cur_seek<augadd>entry.size_of_branch_data<if_stmt>cur_seek<ne>metadata_seg_beg+entry.offset_to_insn_map<block_start>show_err("instruction map data does not follow")<line_sep>show_err(f"{hex(cur_seek)} {hex(metadata_seg_beg)} {hex(entry.offset_to_insn_map)}")<line_sep><return><block_end>insn_map_beg,insn_map_end=cur_seek cur_seek+entry.size_of_insn_map<line_sep>cur_seek<augadd>entry.size_of_insn_map<line_sep>arm64_code_beg=aot_seg_beg+entry.offset_to_arm64_code<line_sep>cache_code_end=arm64_code_beg+entry.size_of_arm64_code<line_sep>typer.echo(f"[{hex(arm64_code_beg)}, {hex(cache_code_end)}] {next(mapped_module_names)}")<line_sep>typer.echo(f"\tbranch data: [{hex(branch_data_beg)}, {hex(branch_data_end)}]")<line_sep>typer.echo(f"\tinstruction map: [{hex(insn_map_beg)}, {hex(insn_map_end)}]")<block_end><elif_stmt>entry.type<eq>1<block_start>runtime_begin=aot_seg_beg+entry.offset_to_arm64_code<line_sep>runtime_end=runtime_begin+entry.size_of_arm64_code<line_sep>typer.echo(f"[{hex(runtime_begin)}, {hex(runtime_end)}] RuntimeRoutines")<block_end><else_stmt><block_start>show_err(f"Unknown CodeSegmentMetadata entry ({hex(entry.type)})")<line_sep><return><block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>app()<block_end>
<import_from_stmt>.genomes GenomeContainer GtfInterface<import_from_stmt>.utils translate_id_to_symbols translate_symbols_to_id<line_sep>
# -*- coding: utf-8 -*- #: Following the versioning system at http://semver.org/ #: See also docs/contributing.rst, section ``Versioning`` #: MAJOR: incremented for incompatible API changes MAJOR=1<line_sep>#: MINOR: incremented for adding functionality in a backwards-compatible manner MINOR=0<line_sep>#: PATCH: incremented for backward-compatible bug fixes and minor capability improvements PATCH=0<line_sep>#: Latest release version of MOE __version__="{0:d}.{1:d}.{2:d}".format(MAJOR MINOR PATCH)<line_sep>
<import_stmt>pandas<as>pd<import_from_stmt>datetime datetime<import_from_stmt>rich box<import_from_stmt>rich.table Table<import_from_stmt>rich.spinner Spinner<import_from_stmt>rich.console Console<import_from_stmt>rich.align Align<import_from_stmt>rich.progress BarColumn Progress TextColumn <def_stmt>protocol_summary db all_experiment_ids tail:int=5 verbose:bool=<true> full:bool=<false><block_start>"""Construct a summary dataframe of previous experiments."""<line_sep># Set pandas df format option to print pd.set_option("display.max_columns" 5)<line_sep>pd.set_option("max_colwidth" 30)<if_stmt>len(all_experiment_ids)<g>0<block_start>purposes,project_names,exp_paths=[] [] []<line_sep>num_seeds,statuses,start_times,experiment_types=[] [] [] []<line_sep>resource,num_cpus,num_gpus,total_jobs,completed_jobs=[] [] [] [] []<if_stmt>tail<is><none><block_start>tail=len(all_experiment_ids)<block_end># Loop over experiment ids and extract data to retrieve <for_stmt>int_e_id all_experiment_ids[-tail:]<block_start>e_id=str(int_e_id)<line_sep>purposes.append(db.dget(e_id "purpose"))<line_sep>project_names.append(db.dget(e_id "project_name"))<line_sep>exp_paths.append(db.dget(e_id "experiment_dir"))<line_sep>statuses.append(db.dget(e_id "job_status"))<line_sep>start_times.append(db.dget(e_id "start_time"))<line_sep>resource.append(db.dget(e_id "exec_resource"))<line_sep>num_seeds.append(db.dget(e_id "num_seeds"))<line_sep>num_cpus.append(db.dget(e_id "num_cpus"))<line_sep>num_gpus.append(db.dget(e_id "num_gpus"))<line_sep>experiment_types.append(db.dget(e_id "experiment_type"))<line_sep>total_jobs.append(db.dget(e_id "num_total_jobs"))<line_sep>completed_jobs.append(db.dget(e_id "completed_jobs"))<block_end>d={"ID":[str(e_id)<for>e_id all_experiment_ids[-tail:]] "Date":start_times "Project":project_names "Purpose":purposes "Experiment Dir":exp_paths "Status":statuses "Seeds":num_seeds "Resource":resource "CPUs":num_cpus "GPUs":num_gpus "Type":experiment_types "Jobs":total_jobs "Completed Jobs":completed_jobs }<line_sep>df=pd.DataFrame(d)<line_sep>df["Date"]=df["Date"].map("{:.5}".format)<line_sep>df["Purpose"]=df["Purpose"].map("{:.30}".format)<line_sep># Print a nice table overview (no job resources) <if_stmt>verbose<block_start>Console().print(Align.left(protocol_table(df full)))<block_end><return>df<block_end><else_stmt><block_start><if_stmt>verbose<block_start>time_t=datetime.now().strftime("%m/%d/%Y %I:%M:%S %p")<line_sep>print(time_t "No previously recorded experiments")<block_end><return><none><block_end><block_end><def_stmt>get_progress_bar total_jobs:int completed_jobs:int<block_start>progress=Progress(TextColumn("{task.completed:^3.0f}/{task.total:^3.0f}" justify="left" style="white") BarColumn(bar_width=10 style="red") TextColumn("[progress.percentage]{task.percentage:>3.0f}%" style="white") auto_refresh=<false> )<line_sep>task=progress.add_task("queue" total=total_jobs)<line_sep>progress.update(task completed=completed_jobs refresh=<true>)<line_sep><return>progress<block_end><def_stmt>protocol_table df full:bool=<true><block_start>"""Generate pretty table of experiment protocol db - preselected db."""<line_sep>table=Table(show_header=<true> show_footer=<false> header_style="bold blue")<line_sep>table.add_column(":bookmark:" justify="center")<line_sep>table.add_column(":id:" justify="center")<line_sep>table.add_column(":spiral_calendar:" justify="center")<line_sep>table.add_column("Project")<line_sep>table.add_column("Purpose")<line_sep>table.add_column("Type")<line_sep>table.add_column("[yellow]:arrow_forward:" justify="center")<line_sep>table.add_column("[yellow]:recycle:" justify="center")<line_sep>table.add_column("CPU" justify="center")<line_sep>table.add_column("GPU" justify="center")<line_sep># Full option prints also resource requirements of jobs <if_stmt>full<block_start>table.add_column(":hourglass_flowing_sand: Completed Jobs [yellow]:heavy_check_mark:" justify="center" )<block_end># Add rows of info if dataframe exists (previously recorded experiments) <if_stmt>df<is><not><none><block_start><for_stmt>index reversed(df.index)<block_start>row=df.iloc[index]<if_stmt>row["Resource"]<eq>"sge-cluster"<block_start>resource="SGE"<block_end><elif_stmt>row["Resource"]<eq>"slurm-cluster"<block_start>resource="Slurm"<block_end><elif_stmt>row["Resource"]<eq>"gcp-cloud"<block_start>resource="GCP"<block_end><else_stmt><block_start>resource="Local"<block_end><if_stmt>row["Type"]<eq>"hyperparameter-search"<block_start>exp_type="search"<block_end><elif_stmt>row["Type"]<eq>"multiple-configs"<block_start>exp_type="config"<block_end><elif_stmt>row["Type"]<eq>"single-config"<block_start>exp_type="single"<block_end><else_stmt><block_start>exp_type=row["Type"]<block_end><if_stmt>row["Status"]<eq>"running"<block_start>status=Spinner("dots" style="magenta")<block_end><elif_stmt>row["Status"]<eq>"completed"<block_start>status="[green]:heavy_check_mark:"<block_end><else_stmt><block_start>status="[red]:heavy_multiplication_x:"<block_end><if_stmt>full<block_start>bar=get_progress_bar(int(row["Jobs"]) int(row["Completed Jobs"]))<line_sep>table.add_row(status row["ID"] row["Date"] row["Project"][:10] row["Purpose"][:15] exp_type resource str(row["Seeds"]) str(row["CPUs"]) str(row["GPUs"]) bar )<block_end><else_stmt><block_start>table.add_row(status row["ID"] row["Date"] row["Project"][:10] row["Purpose"][:25] exp_type resource str(row["Seeds"]) str(row["CPUs"]) str(row["GPUs"]) )<block_end><block_end><block_end>table.border_style="blue"<line_sep>table.box=box.SIMPLE_HEAD<line_sep><return>table<block_end>
# # SPDX-License-Identifier: Apache-2.0 # <import_from_stmt>.fabric FabricNetwork<line_sep>
<import_stmt>torch.nn<as>nn<import_from_stmt>.helper init make_standard_block<import_stmt>torch<class_stmt>PAFModel(nn.Module)<block_start><def_stmt>__init__ self backend backend_outp_feats n_joints n_paf n_stages=7<block_start>super(PAFModel self).__init__()<assert_stmt>(n_stages<g>0)<line_sep>self.backend=backend<line_sep>stages=[Stage(backend_outp_feats n_joints n_paf <true>)]<for_stmt>i range(n_stages-1)<block_start>stages.append(Stage(backend_outp_feats n_joints n_paf <false>))<block_end>self.stages=nn.ModuleList(stages)<block_end><def_stmt>forward self x<block_start>img_feats=self.backend(x)<line_sep>cur_feats=img_feats<line_sep>heatmap_outs=[]<line_sep>paf_outs=[]<for_stmt>i,stage enumerate(self.stages)<block_start>heatmap_out,paf_out=stage(cur_feats)<line_sep>heatmap_outs.append(heatmap_out)<line_sep>paf_outs.append(paf_out)<line_sep>cur_feats=torch.cat([img_feats heatmap_out paf_out] 1)<block_end><return>heatmap_outs paf_outs<block_end><block_end><class_stmt>Stage(nn.Module)<block_start><def_stmt>__init__ self backend_outp_feats n_joints n_paf stage1<block_start>super(Stage self).__init__()<line_sep>inp_feats=backend_outp_feats<if_stmt>stage1<block_start>self.block1=make_paf_block_stage1(inp_feats n_joints)<line_sep>self.block2=make_paf_block_stage1(inp_feats n_paf)<block_end><else_stmt><block_start>inp_feats=backend_outp_feats+n_joints+n_paf<line_sep>self.block1=make_paf_block_stage2(inp_feats n_joints)<line_sep>self.block2=make_paf_block_stage2(inp_feats n_paf)<block_end>init(self.block1)<line_sep>init(self.block2)<block_end><def_stmt>forward self x<block_start>y1=self.block1(x)<line_sep>y2=self.block2(x)<line_sep><return>y1 y2<block_end><block_end><def_stmt>make_paf_block_stage1 inp_feats output_feats<block_start>layers=[make_standard_block(inp_feats 128 3) make_standard_block(128 128 3) make_standard_block(128 128 3) make_standard_block(128 512 1 1 0)]<line_sep>layers<augadd>[nn.Conv2d(512 output_feats 1 1 0)]<line_sep><return>nn.Sequential(*layers)<block_end><def_stmt>make_paf_block_stage2 inp_feats output_feats<block_start>layers=[make_standard_block(inp_feats 128 7 1 3) make_standard_block(128 128 7 1 3) make_standard_block(128 128 7 1 3) make_standard_block(128 128 7 1 3) make_standard_block(128 128 7 1 3) make_standard_block(128 128 1 1 0)]<line_sep>layers<augadd>[nn.Conv2d(128 output_feats 1 1 0)]<line_sep><return>nn.Sequential(*layers)<block_end>
<import_stmt>argparse<import_stmt>os<import_stmt>torch<import_from_stmt>repmlpnet *<line_sep>parser=argparse.ArgumentParser(description='RepMLPNet Conversion')<line_sep>parser.add_argument('load' metavar='LOAD' help='path to the source weights file')<line_sep>parser.add_argument('save' metavar='SAVE' help='path to the target weights file')<line_sep>parser.add_argument('-a' '--arch' metavar='ARCH' default='RepMLPNet-B224')<def_stmt>convert <block_start>args=parser.parse_args()<if_stmt>args.arch<eq>'RepMLPNet-B224'<block_start>model=create_RepMLPNet_B224(deploy=<false>)<block_end><elif_stmt>args.arch<eq>'RepMLPNet-B256'<block_start>model=create_RepMLPNet_B256(deploy=<false>)<block_end><else_stmt><block_start><raise>ValueError('TODO')<block_end><if_stmt>os.path.isfile(args.load)<block_start>print("=> loading checkpoint '{}'".format(args.load))<line_sep>checkpoint=torch.load(args.load map_location='cpu')<if_stmt>'state_dict'<in>checkpoint<block_start>checkpoint=checkpoint['state_dict']<block_end><elif_stmt>'model'<in>checkpoint<block_start>checkpoint=checkpoint['model']<block_end>ckpt={k.replace('module.' ''):v<for>k,v checkpoint.items()}# strip the names print(ckpt.keys())<line_sep>model.load_state_dict(ckpt)<block_end><else_stmt><block_start>print("=> no checkpoint found at '{}'".format(args.load))<block_end>model.locality_injection()<line_sep>torch.save(model.state_dict() args.save)<block_end><if_stmt>__name__<eq>'__main__'<block_start>convert()<block_end>
'''OpenGL extension ANGLE.pack_reverse_row_order This module customises the behaviour of the OpenGL.raw.GLES2.ANGLE.pack_reverse_row_order to provide a more Python-friendly API Overview (from the spec) This extension introduces a mechanism to allow reversing the order in which image rows are written into a pack destination. This effectively allows an application to flip the results of a ReadPixels in the y direction operation without having to render upside down. The coordinate system of OpenGL is vertically reversed in comparison to a number of other graphics systems such as native windowing APIs. Applications that perform ReadPixels may have to either render to an intermediate color buffer before calling ReadPixels or perform a flip in software after ReadPixels. In some systems the GL can perform the row reversal during ReadPixels without incurring additional cost. The official definition of this extension is available here: http://www.opengl.org/registry/specs/ANGLE/pack_reverse_row_order.txt '''<import_from_stmt>OpenGL platform constant arrays<import_from_stmt>OpenGL extensions wrapper<import_stmt>ctypes<import_from_stmt>OpenGL.raw.GLES2 _types _glgets<import_from_stmt>OpenGL.raw.GLES2.ANGLE.pack_reverse_row_order *<import_from_stmt>OpenGL.raw.GLES2.ANGLE.pack_reverse_row_order _EXTENSION_NAME<def_stmt>glInitPackReverseRowOrderANGLE <block_start>'''Return boolean indicating whether this extension is available'''<import_from_stmt>OpenGL extensions<line_sep><return>extensions.hasGLExtension(_EXTENSION_NAME)<block_end>### END AUTOGENERATED SECTION
<import_from_stmt>vedastr.utils Registry<line_sep>RECTIFICATORS=Registry('Rectificator')<line_sep>
<import_from_stmt>napari Viewer<def_stmt>test_multi_viewers_dont_clash qapp<block_start>v1=Viewer(show=<false> title='v1')<line_sep>v2=Viewer(show=<false> title='v2')<assert_stmt><not>v1.grid.enabled<assert_stmt><not>v2.grid.enabled<line_sep>v1.window.activate()# a click would do this in the actual gui v1.window._qt_viewer.viewerButtons.gridViewButton.click()<assert_stmt><not>v2.grid.enabled<assert_stmt>v1.grid.enabled<line_sep>v1.close()<line_sep>v2.close()<block_end>
__version__="1.1.19"<line_sep>
<import_from_stmt>.pykeyvi_autowrap_conversion_providers *<import_from_stmt>autowrap.ConversionProvider special_converters<def_stmt>register_converters <block_start>special_converters.append(MatchIteratorPairConverter())<block_end>
<import_from_stmt>GridCal.Engine.calculation_engine *<import_from_stmt>GridCal.Engine.Simulations.Dynamics.dynamic_modules *<line_sep>grid=MultiCircuit()<line_sep># grid.load_file('lynn5buspv.xlsx') grid.load_file('IEEE30.xlsx')<line_sep>grid.compile()<line_sep>circuit=grid.circuits[0]<line_sep>options=PowerFlowOptions(SolverType.NR verbose=<false> robust=<false> tolerance=1e-9)<line_sep>power_flow=PowerFlow(grid options)<line_sep>power_flow.run()<line_sep>dynamic_devices=circuit.get_generators()<line_sep>bus_indices=[circuit.buses_dict[elm.bus]<for>elm dynamic_devices]<line_sep>res=dynamic_simulation(n=len(circuit.buses) Vbus=power_flow.results.voltage Sbus=circuit.power_flow_input.Sbus Ybus=circuit.power_flow_input.Ybus Sbase=circuit.Sbase fBase=50 t_sim=50 h=0.001 dynamic_devices=dynamic_devices bus_indices=bus_indices)<import_from_stmt>matplotlib pyplot<as>plt<line_sep>plt.figure()<line_sep>plt.plot(res.time abs(res.voltage) linewidth=1)<line_sep>plt.title('Generator voltages')<line_sep>plt.figure()<line_sep>plt.plot(res.time abs(res.omegas) linewidth=1)<line_sep>plt.title('Angular speeds')<line_sep>plt.show()<line_sep>
"""Common response models."""<import_from_stmt>typing Any<import_stmt>simplejson<as>json<import_from_stmt>starlette responses<class_stmt>XMLResponse(responses.Response)<block_start>"""XML Response"""<line_sep>media_type="application/xml"<block_end><class_stmt>JSONResponse(responses.JSONResponse)<block_start>"""Custom JSON Response."""<def_stmt>render self content:Any<arrow>bytes<block_start>"""Render JSON. Same defaults as starlette.responses.JSONResponse.render but allow NaN to be replaced by null using simplejson """<line_sep><return>json.dumps(content ensure_ascii=<false> allow_nan=<false> indent=<none> ignore_nan=<true> separators=("," ":") ).encode("utf-8")<block_end><block_end><class_stmt>GeoJSONResponse(JSONResponse)<block_start>"""GeoJSON Response"""<line_sep>media_type="application/geo+json"<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-import """ Function namespace. Acknowledgement: This file originates from incubator-tvm """<import_stmt>os<import_stmt>sys<import_stmt>ctypes<import_from_stmt>..base _LIB check_call<import_from_stmt>.base py_str c_str<try_stmt><block_start><if_stmt>int(os.environ.get("MXNET_ENABLE_CYTHON" <true>))<eq>0<block_start><import_from_stmt>._ctypes.function FunctionBase<as>_FunctionBase<line_sep># To set RETURN_SWITCH for OBJECT_HANDLE <import_from_stmt>. object<block_end><else_stmt><block_start><import_from_stmt>._cy3.core FunctionBase<as>_FunctionBase<block_end><block_end><except_stmt>ImportError<block_start><if_stmt>int(os.environ.get("MXNET_ENFORCE_CYTHON" <false>))<ne>0<block_start><raise>ImportError("Cython Module cannot be loaded but MXNET_ENFORCE_CYTHON=1")<block_end><import_from_stmt>._ctypes.function FunctionBase<as>_FunctionBase<line_sep># To set RETURN_SWITCH for OBJECT_HANDLE <import_from_stmt>. object<block_end>FunctionHandle=ctypes.c_void_p<class_stmt>Function(_FunctionBase)<block_start>"""The PackedFunc object used in TVM. Function plays an key role to bridge front and backend in TVM. Function provide a type-erased interface, you can call function with positional arguments. The compiled module returns Function. TVM backend also registers and exposes its API as Functions. For example, the developer function exposed in tvm.ir_pass are actually C++ functions that are registered as PackedFunc The following are list of common usage scenario of tvm.Function. - Automatic exposure of C++ API into python - To call PackedFunc from python side - To call python callbacks to inspect results in generated code - Bring python hook into C++ backend See Also -------- tvm.register_func: How to register global function. tvm.get_global_func: How to get global function. """<block_end><def_stmt>get_global_func name allow_missing=<false><block_start>"""Get a global function by name Parameters ---------- name : str The name of the global function allow_missing : bool Whether allow missing function or raise an error. Returns ------- func : tvm.Function The function to be returned, None if function is missing. """<line_sep>handle=FunctionHandle()<line_sep>check_call(_LIB.MXNetFuncGetGlobal(c_str(name) ctypes.byref(handle)))<if_stmt>handle.value<block_start><return>Function(handle <false>)<block_end><if_stmt>allow_missing<block_start><return><none><block_end><raise>ValueError("Cannot find global function %s"%name)<block_end><def_stmt>list_global_func_names <block_start>"""Get list of global functions registered. Returns ------- names : list List of global functions names. """<line_sep>plist=ctypes.POINTER(ctypes.c_char_p)()<line_sep>size=ctypes.c_uint()<line_sep>check_call(_LIB.MXNetFuncListGlobalNames(ctypes.byref(size) ctypes.byref(plist)))<line_sep>fnames=[]<for_stmt>i range(size.value)<block_start>fnames.append(py_str(plist[i]))<block_end><return>fnames<block_end><def_stmt>_get_api f<block_start>flocal=f<line_sep>flocal.is_global=<true><line_sep><return>flocal<block_end><def_stmt>_init_api namespace target_module_name=<none><block_start>"""Initialize api for a given module name namespace : str The namespace of the source registry target_module_name : str The target module name if different from namespace """<line_sep>target_module_name=(target_module_name<if>target_module_name<else>namespace)<if_stmt>namespace.startswith("mxnet.")<block_start>_init_api_prefix(target_module_name namespace[6:])<block_end><else_stmt><block_start>_init_api_prefix(target_module_name namespace)<block_end><block_end><def_stmt>_init_api_prefix module_name prefix<block_start>module=sys.modules[module_name]<for_stmt>name list_global_func_names()<block_start><if_stmt>prefix<eq>"api"<block_start>fname=name<if_stmt>name.startswith("_")<block_start>target_module=sys.modules["mxnet._api_internal"]<block_end><else_stmt><block_start>target_module=module<block_end><block_end><else_stmt><block_start><if_stmt><not>name.startswith(prefix)<block_start><continue><block_end>fname=name[len(prefix)+1:]<line_sep>target_module=module<block_end><if_stmt>fname.find(".")<ne>-1<block_start><continue><block_end>f=get_global_func(name)<line_sep>ff=_get_api(f)<line_sep>ff.__name__=fname<line_sep>ff.__doc__=("MXNet PackedFunc %s. "%fname)<line_sep>setattr(target_module ff.__name__ ff)<block_end><block_end>
<import_stmt>pytest<import_from_stmt>salt.exceptions CommandExecutionError<line_sep>@pytest.fixture(scope="module" autouse=<true>)<def_stmt>install_npm sminion<block_start><try_stmt><block_start>sminion.functions.state.single("pkg.installed" name="npm")<line_sep># Just name the thing we're looking for sminion.functions.npm# pylint: disable=pointless-statement <block_end><except_stmt>(CommandExecutionError AttributeError)<as>e<block_start>pytest.skip("Unable to install npm - "+str(e))<block_end><block_end>@[email protected][email protected]_network<def_stmt>test_removed_installed_cycle states modules<block_start>project_version="[email protected]"<line_sep>success=modules.npm.uninstall("pm2")<assert_stmt>success "Unable to uninstall pm2 in prep for tests"<line_sep>ret=states.npm.installed(name=project_version)<assert_stmt>ret.result<is><true> "Failed to states.npm.installed {} - {}".format(project_version ret.comment)<line_sep>ret=states.npm.removed(name=project_version)<assert_stmt>ret.result<is><true> "Failed to states.npm.removed {} - {}".format(project_version ret.comment)<block_end>
# Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 <import_from_stmt>builtins bytes<import_stmt>unittest<import_stmt>mock<import_stmt>datetime<import_stmt>httplib2<import_from_stmt>arvados_cwl.util *<import_from_stmt>arvados.errors ApiError<class_stmt>MockDateTime(datetime.datetime)<block_start>@classmethod<def_stmt>utcnow cls<block_start><return>datetime.datetime(2018 1 1 0 0 0 0)<block_end><block_end>datetime.datetime=MockDateTime<class_stmt>TestUtil(unittest.TestCase)<block_start><def_stmt>test_get_intermediate_collection_info self<block_start>name="one"<line_sep>current_container={"uuid":"zzzzz-8i9sb-zzzzzzzzzzzzzzz"}<line_sep>intermediate_output_ttl=120<line_sep>info=get_intermediate_collection_info(name current_container intermediate_output_ttl)<line_sep>self.assertEqual(info["name"] "Intermediate collection for step one")<line_sep>self.assertEqual(info["trash_at"] datetime.datetime(2018 1 1 0 2 0 0))<line_sep>self.assertEqual(info["properties"] {"type":"intermediate" "container":"zzzzz-8i9sb-zzzzzzzzzzzzzzz"})<block_end><def_stmt>test_get_current_container_success self<block_start>api=mock.MagicMock()<line_sep>api.containers().current().execute.return_value={"uuid":"zzzzz-8i9sb-zzzzzzzzzzzzzzz"}<line_sep>current_container=get_current_container(api)<line_sep>self.assertEqual(current_container {"uuid":"zzzzz-8i9sb-zzzzzzzzzzzzzzz"})<block_end><def_stmt>test_get_current_container_error self<block_start>api=mock.MagicMock()<line_sep>api.containers().current().execute.side_effect=ApiError(httplib2.Response({"status":300}) bytes(b""))<line_sep>logger=mock.MagicMock()<with_stmt>self.assertRaises(ApiError)<block_start>get_current_container(api num_retries=0 logger=logger)<block_end><block_end><def_stmt>test_get_current_container_404_error self<block_start>api=mock.MagicMock()<line_sep>api.containers().current().execute.side_effect=ApiError(httplib2.Response({"status":404}) bytes(b""))<line_sep>logger=mock.MagicMock()<line_sep>current_container=get_current_container(api num_retries=0 logger=logger)<line_sep>self.assertEqual(current_container <none>)<block_end><block_end>
""" Copyright (c) 2017-2022 Red Hat, Inc All rights reserved. This software may be modified and distributed under the terms of the BSD license. See the LICENSE file for details. """<import_stmt>dataclasses<import_stmt>functools<import_stmt>hashlib<import_stmt>os<import_from_stmt>pathlib Path<import_from_stmt>typing Iterator List Sequence Dict<import_stmt>koji<import_from_stmt>atomic_reactor util<import_from_stmt>atomic_reactor.constants PLUGIN_FETCH_MAVEN_KEY REPO_FETCH_ARTIFACTS_URL REPO_FETCH_ARTIFACTS_KOJI <import_from_stmt>atomic_reactor.config get_koji_session<import_from_stmt>atomic_reactor.dirs BuildDir<import_from_stmt>atomic_reactor.download download_url<import_from_stmt>atomic_reactor.plugin Plugin<import_from_stmt>atomic_reactor.utils.koji NvrRequest<import_from_stmt>atomic_reactor.utils.pnc PNCUtil<try_stmt><block_start><import_from_stmt>urlparse urlparse<block_end><except_stmt>ImportError<block_start><import_from_stmt>urllib.parse urlparse<block_end>@dataclasses.dataclass(frozen=<true>)<class_stmt>DownloadRequest<block_start>url:str<line_sep>dest:str<line_sep>checksums:Dict[str str]<block_end><class_stmt>FetchMavenArtifactsPlugin(Plugin)<block_start>key=PLUGIN_FETCH_MAVEN_KEY<line_sep>is_allowed_to_fail=<false><line_sep>DOWNLOAD_DIR='artifacts'<def_stmt>__init__ self workflow<block_start>""" :param workflow: DockerBuildWorkflow instance """<line_sep>super(FetchMavenArtifactsPlugin self).__init__(workflow)<line_sep>self.path_info=self.workflow.conf.koji_path_info<line_sep>all_allowed_domains=self.workflow.conf.artifacts_allowed_domains<line_sep>self.allowed_domains=set(domain.lower()<for>domain all_allowed_domains<or>[])<line_sep>self.session=<none><line_sep>self._pnc_util=<none><line_sep>self.no_source_artifacts=[]<line_sep>self.source_url_to_artifacts={}<block_end>@property<def_stmt>pnc_util self<block_start><if_stmt><not>self._pnc_util<block_start>pnc_map=self.workflow.conf.pnc<if_stmt><not>pnc_map<block_start><raise>RuntimeError('No PNC configuration found in reactor config map')<block_end>self._pnc_util=PNCUtil(pnc_map)<block_end><return>self._pnc_util<block_end><def_stmt>process_by_nvr self nvr_requests:List[NvrRequest]# components are metadata about nvr artifacts that we're going to fetch <block_start>components=[]<line_sep>download_queue=[]<line_sep>errors=[]<for_stmt>nvr_request nvr_requests<block_start>build_info=self.session.getBuild(nvr_request.nvr)<if_stmt><not>build_info<block_start>errors.append('Build {} not found.'.format(nvr_request.nvr))<line_sep><continue><block_end>maven_build_path=self.path_info.mavenbuild(build_info)<line_sep>build_archives=self.session.listArchives(buildID=build_info['id'] type='maven')<line_sep>build_archives=nvr_request.match_all(build_archives)<for_stmt>build_archive build_archives<block_start>maven_file_path=self.path_info.mavenfile(build_archive)<line_sep># NOTE: Don't use urljoin here because maven_build_path does # not contain a trailing slash, which causes the last dir to # be dropped. url=maven_build_path+'/'+maven_file_path<line_sep>checksum_type=koji.CHECKSUM_TYPES[build_archive['checksum_type']]<line_sep>checksums={checksum_type:build_archive['checksum']}<line_sep>download_queue.append(DownloadRequest(url maven_file_path checksums))<line_sep>components.append({'type':'kojifile' 'filename':build_archive['filename'] 'filesize':build_archive['size'] 'checksum':build_archive['checksum'] 'checksum_type':checksum_type 'nvr':nvr_request.nvr 'archive_id':build_archive['id'] })<block_end>unmatched_archive_requests=nvr_request.unmatched()<if_stmt>unmatched_archive_requests<block_start>errors.append('NVR request for "{}", failed to find archives for: "{}"'.format(nvr_request.nvr unmatched_archive_requests))<line_sep><continue><block_end><block_end><if_stmt>errors<block_start><raise>ValueError('Errors found while processing {}: {}'.format(REPO_FETCH_ARTIFACTS_KOJI ', '.join(errors)))<block_end><return>components download_queue<block_end><def_stmt>process_by_url self url_requests<block_start>download_queue=[]<line_sep># we'll capture all source artifacts of url artifacts in a source_download_queue # later on maven_url_sources_metadata plugin will process this queue to generate # remote source files that are later used in source container build to get sources # of url artifacts. # we have to do this in post build to avoid having source artifacts in build_dir # during binary build source_download_queue=[]<line_sep>errors=[]<for_stmt>url_request url_requests<block_start>url=url_request['url']<if_stmt>self.allowed_domains<block_start>parsed_file_url=urlparse(url.lower())<line_sep>file_url=parsed_file_url.netloc+parsed_file_url.path<if_stmt><not>any(file_url.startswith(prefix)<for>prefix self.allowed_domains)<block_start>errors.append('File URL {} is not in list of allowed domains: {}'.format(file_url self.allowed_domains))<line_sep><continue><block_end><block_end>checksums={algo:url_request[algo]<for>algo hashlib.algorithms_guaranteed<if>algo<in>url_request}<line_sep>target=url_request.get('target' url.rsplit('/' 1)[-1])<line_sep>download_queue.append(DownloadRequest(url target checksums))<line_sep>artifact={'url':url_request['url'] 'checksums':checksums 'filename':os.path.basename(url_request['url'])}<if_stmt>'source-url'<not><in>url_request<block_start>self.no_source_artifacts.append(artifact)<line_sep>msg=f"No source-url found for {url_request['url']}.\n"<line_sep>self.log.warning(msg)<line_sep>msg<augadd>'fetch-artifacts-url without source-url is deprecated\n'<line_sep>msg<augadd>'to fix this please provide the source-url according to '<concat>'https://osbs.readthedocs.io/en/latest/users.html#fetch-artifacts-url-yaml'<line_sep>self.log.user_warning(msg)<line_sep><continue><block_end>source_url=url_request['source-url']<line_sep>checksums={algo:url_request[('source-'+algo)]<for>algo hashlib.algorithms_guaranteed<if>('source-'+algo)<in>url_request}<if_stmt>source_url<not><in>self.source_url_to_artifacts<block_start>self.source_url_to_artifacts[source_url]=[artifact]<line_sep># source_url will mostly be gerrit URLs that don't have filename # in the URL itself, so we'll have to get filename from URL response target=os.path.basename(source_url)<line_sep>source_download_queue.append(dataclasses.asdict(DownloadRequest(source_url target checksums)))<block_end><else_stmt><block_start>self.source_url_to_artifacts[source_url].append(artifact)<block_end><block_end><if_stmt>errors<block_start><raise>ValueError('Errors found while processing {}: {}'.format(REPO_FETCH_ARTIFACTS_URL ', '.join(errors)))<block_end><return>download_queue source_download_queue<block_end><def_stmt>process_pnc_requests self pnc_requests<block_start>download_queue=[]<line_sep>artifact_ids=[]<line_sep>builds=pnc_requests.get('builds' [])<if_stmt>builds<block_start>pnc_build_metadata={'builds':[]}<block_end><else_stmt><block_start>pnc_build_metadata={}<block_end><for_stmt>build builds<block_start>pnc_build_metadata['builds'].append({'id':build['build_id']})<for_stmt>artifact build['artifacts']<block_start>artifact_ids.append(artifact['id'])<line_sep>url,checksums=self.pnc_util.get_artifact(artifact['id'])<line_sep>download_queue.append(DownloadRequest(url artifact['target'] checksums))<block_end><block_end><return>artifact_ids download_queue pnc_build_metadata<block_end><def_stmt>download_files self downloads:Sequence[DownloadRequest] build_dir:BuildDir<arrow>Iterator[Path]<block_start>"""Download maven artifacts to a build dir."""<line_sep>artifacts_path=build_dir.path/self.DOWNLOAD_DIR<line_sep>koji_config=self.workflow.conf.koji<line_sep>insecure=koji_config.get('insecure_download' <false>)<line_sep>self.log.debug('%d files to download' len(downloads))<line_sep>session=util.get_retrying_requests_session()<for_stmt>index,download enumerate(downloads)<block_start>dest_path=artifacts_path/download.dest<line_sep>dest_dir=dest_path.parent<line_sep>dest_filename=dest_path.name<if_stmt><not>dest_dir.exists()<block_start>dest_dir.mkdir(parents=<true>)<block_end>self.log.debug('%d/%d downloading %s' index+1 len(downloads) download.url)<line_sep>download_url(url=download.url dest_dir=dest_dir insecure=insecure session=session dest_filename=dest_filename expected_checksums=download.checksums)<line_sep><yield>dest_path<block_end><block_end><def_stmt>run self<block_start>self.session=get_koji_session(self.workflow.conf)<line_sep>nvr_requests=[NvrRequest(**nvr_request)<for>nvr_request util.read_fetch_artifacts_koji(self.workflow)<or>[]]<line_sep>pnc_requests=util.read_fetch_artifacts_pnc(self.workflow)<or>{}<line_sep>url_requests=util.read_fetch_artifacts_url(self.workflow)<or>[]<line_sep>components,nvr_download_queue=self.process_by_nvr(nvr_requests)<line_sep>url_download_queue,source_download_queue=self.process_by_url(url_requests)<line_sep>pnc_artifact_ids,pnc_download_queue,pnc_build_metadata=self.process_pnc_requests(pnc_requests)<line_sep>download_queue=pnc_download_queue+nvr_download_queue+url_download_queue<line_sep>download_to_build_dir=functools.partial(self.download_files download_queue)<line_sep>self.workflow.build_dir.for_all_platforms_copy(download_to_build_dir)<line_sep><return>{'components':components 'download_queue':[dataclasses.asdict(download)<for>download download_queue] 'no_source':self.no_source_artifacts 'pnc_artifact_ids':pnc_artifact_ids 'pnc_build_metadata':pnc_build_metadata 'source_download_queue':source_download_queue 'source_url_to_artifacts':self.source_url_to_artifacts }<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>odoo models fields api<class_stmt>ResUsers(models.Model)<block_start>_inherit='res.users'<line_sep>sub_domain=fields.Char('子域名' help='用于小程序接口的子域名。' index=<true>)<line_sep>@api.model<def_stmt>create self vals<block_start><import_from_stmt>uuid uuid1<line_sep>vals['sub_domain']=uuid1().get_hex()<line_sep><return>super(ResUsers self).create(vals)<block_end><block_end>
<import_stmt>re<line_sep>word="jofwjoifA级哦啊接我金佛安fewfae慰剂serge"<line_sep>p=re.compile(r'\w' re.L)<line_sep>result=p.sub("" word)<line_sep>print(result)<line_sep>
# coding=utf-8 # Copyright (c) 2019 Alibaba PAI team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>tensorflow.python.layers.base Layer<import_from_stmt>.activations gelu_new<import_from_stmt>.attention Attention CrossAttention<import_from_stmt>.core dense_dropoutput_layernorm Dense<import_from_stmt>.utils get_initializer<class_stmt>EncoderBlock(Layer)<block_start><def_stmt>__init__ self config **kwargs<block_start>super(EncoderBlock self).__init__(**kwargs)<line_sep>self.attention=Attention(config name="attention")<line_sep># Use gelu_new, then match results self.intermediate=Dense(units=config.intermediate_size activation=gelu_new kernel_initializer=get_initializer(config.initializer_range) name="intermediate/dense")<line_sep>self.bert_output=dense_dropoutput_layernorm(config name="output")<block_end><def_stmt>call self inputs training=<false><block_start>hidden_states,attention_mask=inputs<line_sep>attention_output=self.attention([hidden_states attention_mask] training=training)<line_sep>intermediate_output=self.intermediate(attention_output)<line_sep>layer_output=self.bert_output([intermediate_output attention_output] training=training)<line_sep><return>layer_output attention_output<block_end><block_end><class_stmt>DecoderBlock(Layer)<block_start><def_stmt>__init__ self config **kwargs<block_start>super(DecoderBlock self).__init__(**kwargs)<line_sep>self.attention=Attention(config name="decoder_attention")<line_sep>self.cross_attention=CrossAttention(config name="decoder_cross_attention")<line_sep># Use gelu_new, then match results self.intermediate=Dense(units=config.intermediate_size activation=gelu_new kernel_initializer=get_initializer(config.initializer_range) name="intermediate/dense")<line_sep>self.output_1=dense_dropoutput_layernorm(config name="output_1")<line_sep>self.output_2=dense_dropoutput_layernorm(config name="output_2")<block_end><def_stmt>call self inputs training=<false><block_start>hidden_states,encoder_hidden_states,attention_mask,encoder_attention_mask=inputs<line_sep>attention_output=self.attention([hidden_states attention_mask] training=training)<line_sep>cross_attention_output=self.cross_attention([hidden_states encoder_hidden_states encoder_attention_mask])<line_sep>attention_output=self.output_1([attention_output cross_attention_output] training=training)<line_sep>intermediate_output=self.intermediate(attention_output)<line_sep>layer_output=self.output_2([intermediate_output attention_output] training=training)<line_sep><return>layer_output attention_output<block_end><block_end><class_stmt>Encoder(Layer)<block_start><def_stmt>__init__ self config **kwargs<block_start>super(Encoder self).__init__(**kwargs)<line_sep>self.layer=[EncoderBlock(config name="layer_{}".format(i))<for>i range(config.num_hidden_layers)]<block_end><def_stmt>call self inputs training=<false><block_start>hidden_states,attention_mask=inputs<line_sep>all_hidden_states=()<line_sep>all_att_outputs=()<for_stmt>i,layer_module enumerate(self.layer)<block_start>layer_output,att_output=layer_module([hidden_states attention_mask] training=training)<line_sep>hidden_states=layer_output<line_sep>all_hidden_states=all_hidden_states+(hidden_states )<line_sep>all_att_outputs=all_att_outputs+(att_output )<block_end>final_outputs=[]<for_stmt>hidden_states all_hidden_states<block_start>final_outputs.append(hidden_states)<block_end><return>final_outputs all_att_outputs<block_end><block_end><class_stmt>Decoder(Layer)<block_start><def_stmt>__init__ self config **kwargs<block_start>super(Decoder self).__init__(**kwargs)<line_sep>self.layer=[DecoderBlock(config name="decoder_layer_{}".format(i))<for>i range(config.num_hidden_layers)]<block_end><def_stmt>call self inputs training=<false><block_start>hidden_states,encoder_hidden_states,attention_mask,encoder_attention_mask=inputs<line_sep>all_hidden_states=()<line_sep>all_att_outputs=()<for_stmt>i,layer_module enumerate(self.layer)<block_start>layer_output,att_output=layer_module([hidden_states encoder_hidden_states attention_mask encoder_attention_mask] training=training)<line_sep>hidden_states=layer_output<line_sep>all_hidden_states=all_hidden_states+(hidden_states )<line_sep>all_att_outputs=all_att_outputs+(att_output )<block_end>final_outputs=[]<for_stmt>hidden_states all_hidden_states<block_start>final_outputs.append(hidden_states)<block_end><return>final_outputs all_att_outputs<block_end><block_end>
<import_from_stmt>async_io Event Queue<import_from_stmt>evaluator.loader from_object<import_from_stmt>evaluator.sourcemaps TraceEntry<import_from_stmt>rpython.rlib.objectmodel specialize always_inline<import_from_stmt>rpython.rlib.rstring UnicodeBuilder<import_from_stmt>rpython.rtyper.lltypesystem rffi<import_from_stmt>space.customobject CustomObject_instantiate<import_from_stmt>space *<import_stmt>core<import_stmt>naming<import_stmt>os<import_stmt>pathobj<line_sep>#import stdlib <import_stmt>sys<import_stmt>time<import_stmt>uv_handle<import_stmt>uv_stream<import_stmt>uv_timer<import_stmt>uv_util<import_stmt>vectormath<import_stmt>vector<line_sep># The base environment module=Module(u'base' {u'builtin':Builtin.interface u'greenlet':core.Greenlet.interface u'interface':Interface.interface u'Id':Id.interface u'dict':Dict.interface u'Module':Module.interface u'module':Module.interface # TODO: deprecate and then remove u'object':Object.interface u'list':List.interface u'multimethod':Multimethod.interface u'float':Float.interface u'float_repr':FloatRepr.interface u'int':Integer.interface u'bool':Boolean.interface u'str':String.interface u'null':null u'true':true u'false':false u'path':pathobj.Path.interface u'property':Property.interface u'Uint8Data':Uint8Data.interface u'Uint8Array':Uint8Array.interface u'Uint8Slice':Uint8Slice.interface u'Uint8Builder':Uint8Builder.interface u'Utf8Decoder':Utf8Decoder.interface u'StringBuilder':StringBuilder_.interface u'schedule':Builtin(core.schedule u'schedule') u'set':Set.interface u'slice':Slice.interface u'DocRef':naming.DocRef.interface u'Event':Event.interface u'Queue':Queue.interface u'Timer':uv_timer.Timer.interface u'Handle':uv_handle.Handle.interface u'Stream':uv_stream.Stream.interface u'TTY':uv_stream.TTY.interface u'Pipe':uv_stream.Pipe.interface # The new vector interface, lets see how it fares. u'Numeric':vector.Numeric.interface } frozen=<true>)<line_sep>@Module.instantiator@signature(String Module optional=1)<def_stmt>module_instantiate name extends<block_start><return>Module(name.string {} extends)<block_end># we may later want to do the same for the stuff you see above. <for_stmt>error all_errors<block_start>module.setattr_force(error.interface.name error.interface)<block_end><for_stmt>_,error uv_util.errors<block_start>module.setattr_force(error.interface.name error.interface)<block_end><for_stmt>name,value operators.by_symbol.iteritems()<block_start>module.setattr_force(name value)<block_end><for_stmt>name,value vectormath.by_symbol.iteritems()<block_start>module.setattr_force(name value)<block_end><def_stmt>builtin fn<block_start>name=fn.__name__.rstrip('_').decode('utf-8')<line_sep>module.setattr_force(name Builtin(fn name))<line_sep><return>fn<block_end>@builtin@signature(Object Float optional=1)<def_stmt>get_name obj stale<block_start><if_stmt>stale<is><none><block_start>name=naming.get_name(obj)<block_end><else_stmt><block_start>name=naming.get_name(obj stale.number)<block_end><if_stmt>name<is><not><none><block_start><return>String(name)<block_end><else_stmt><block_start><return>null<block_end><block_end>@builtin@signature(Object Object optional=1)<def_stmt>load program path<block_start><if_stmt>path<is><none><block_start>path=null<block_end><else_stmt><block_start>path=pathobj.to_path(path)<block_end><return>from_object(program path)<block_end>@builtin<def_stmt>class_ argv<block_start>exnihilo=argv[0]<line_sep>parent=Object.interface<line_sep>name=String(u"customobject")<assert_stmt>1<le>len(argv)<le>3<if_stmt>len(argv)<g>1<block_start>parent=argv[1]<block_end><if_stmt>len(argv)<g>2<block_start>name=argv[2]<block_end><assert_stmt>isinstance(exnihilo Exnihilo)<line_sep>methods={}<for_stmt>key,index exnihilo.map.attribute_indexes.items()<block_start>methods[key]=exnihilo.storage[index]<block_end>interface=Interface(cast(parent Interface u"parent") cast(name String u"name").string methods CustomObject_instantiate)<line_sep>core.g.finalizer_queue.register_finalizer(interface)<line_sep><return>interface<block_end>@builtin@signature(Object)<def_stmt>iter_ obj<block_start><return>obj.iter()<block_end>@builtin@signature(Object)<def_stmt>hash_ obj<block_start><return>Integer(obj.hash())<block_end>@builtin@signature(Object)<def_stmt>repr_ obj<block_start><return>String(obj.repr())<block_end>@builtin@signature(List)<def_stmt>reversed_ obj<block_start><return>ReversedListIterator(reversed(obj.contents))<block_end><class_stmt>ReversedListIterator(Object)<block_start>_immutable_fields_=['iterator']<def_stmt>__init__ self iterator<block_start>self.iterator=iterator<block_end><def_stmt>iter self<block_start><return>self<block_end><block_end>@ReversedListIterator.method(u"next" signature(ReversedListIterator))<def_stmt>ReversedListIterator_next self<block_start><return>self.iterator.next()<block_end>@builtin@signature(Object Object)<def_stmt>getitem obj index<block_start><return>obj.getitem(index)<block_end>@builtin@signature(Object Object Object)<def_stmt>setitem obj index value<block_start><return>obj.setitem(index value)<block_end>@builtin@signature(Object)<def_stmt>listattr obj<block_start><return>List(obj.listattr())<block_end>## The interface for analysing the interface. @builtin@signature(Interface)<def_stmt>list_methods interface<block_start>out=[]<for_stmt>name interface.methods<block_start>out.append(String(name))<block_end><return>List(out)<block_end>@builtin@signature(Interface)<def_stmt>list_multimethods interface<block_start>out=[]<for_stmt>record interface.multimethods<block_start>types=[]<for_stmt>ref record.vec<block_start>interface=ref.weakref()<if_stmt>interface<is><not><none><block_start>types.append(interface)<block_end><block_end><if_stmt>len(types)<eq>len(record.vec)<block_start>row=Exnihilo()<line_sep>row.setattr(u'multimethod' record.multimethod)<line_sep>row.setattr(u'types' List(types))<line_sep>out.append(row)<block_end><block_end><return>List(out)<block_end>@builtin@signature(Object String)<def_stmt>getattr obj index<block_start><return>obj.getattr(index.string)<block_end>@builtin@signature(Object String Object optional=1)<def_stmt>getattr_or obj index default<block_start><if_stmt>default<is><none><block_start>default=null<block_end><return>obj.getattr_or(index.string default)<block_end>@builtin@signature(Object String Object)<def_stmt>setattr obj index value<block_start><return>obj.setattr(index.string value)<block_end>@builtin@signature(String)<def_stmt>ord_ string<block_start><if_stmt>len(string.string)<ne>1<block_start><raise>unwind(LError(u"ord expects a char"))<block_end><return>Integer(ord(string.string[0]))<block_end>@builtin@signature(Integer)<def_stmt>chr_ value<block_start><return>String(unichr(value.value))<block_end>@builtin@signature(Object Object)<def_stmt>isinstance_ value which_list<block_start><if_stmt>isinstance(which_list List)<block_start>whichs=which_list.contents<block_end><else_stmt><block_start>whichs=[which_list]<block_end>interface=get_interface(value)<while_stmt>interface<is><not>null<block_start><if_stmt>interface<in>whichs<block_start><return>true<block_end># There should be exactly one recursively defined interface. <if_stmt>interface.parent<is>interface<block_start><return>false<block_end>interface=interface.parent<block_end><return>false<block_end>@builtin@signature(String Integer optional=1)<def_stmt>parse_int string base<block_start><return>Integer(parse_int_(string base))<block_end>@builtin@signature(String)<def_stmt>parse_float string<block_start><return>FloatRepr(string)<block_end># And and or are macros in the compiler. These are # convenience functions, likely not often used. # erm. Actually 'and' function is used by chaining. @builtin@signature(Object Object)<def_stmt>and_ a b<block_start><return>boolean(is_true(a)<and>is_true(b))<block_end>@builtin@signature(Object Object)<def_stmt>or_ a b<block_start><return>boolean(is_true(a)<or>is_true(b))<block_end>@builtin@signature(Object)<def_stmt>len_ obj<block_start><return>obj.getattr(u'length')<block_end>@builtin@signature(Object)<def_stmt>not_ a<block_start><return>boolean(is_false(a))<block_end>@builtin@signature(String)<def_stmt>encode_utf8 value<block_start><return>to_uint8array(value.string.encode('utf-8'))<block_end>@builtin@signature(Uint8Data)<def_stmt>decode_utf8 value<block_start><try_stmt><block_start><return>String(value.to_str().decode('utf-8'))<block_end><except_stmt>UnicodeDecodeError<as>error<block_start><raise>space.unwind(space.LError(u"unicode decode failed"))<block_end><block_end>@builtin<def_stmt>time_ argv<block_start><return>Float(time.time())<block_end>@builtin@signature()<def_stmt>getcwd <block_start><return>pathobj.getcwd()<block_end>@builtin@signature(Object)<def_stmt>chdir obj<block_start>pathobj.chdir(obj)<line_sep><return>null<block_end>@builtin@signature(Integer Integer Integer optional=2)<def_stmt>range_ start stop step<block_start><if_stmt>stop<is><none><block_start>stop=start.value<line_sep>start=0<block_end><else_stmt><block_start>start=start.value<line_sep>stop=stop.value<block_end><if_stmt>step<is><none><block_start>step=1<block_end><else_stmt><block_start>step=step.value<block_end><if_stmt>step<eq>0<block_start><raise>unwind(LTypeError(u"step==0"))<block_end><return>Range(start stop step)<block_end><class_stmt>Range(Object)<block_start>__slots__=['start' 'stop' 'step' 'sign' 'current']<line_sep>_immutable_fields_=['start' 'stop' 'step' 'sign']<def_stmt>__init__ self start stop step<block_start>self.current=start<line_sep>self.stop=stop<line_sep>self.step=step<line_sep>self.sign=+1<if>step<ge>0<else>-1<block_end><def_stmt>iter self<block_start><return>self<block_end><block_end>@Range.method(u"next" signature(Range))<def_stmt>Range_next self<block_start><if_stmt>self.current<times>self.sign<l>self.stop<times>self.sign<block_start>i=self.current<line_sep>self.current<augadd>self.step<line_sep><return>Integer(i)<block_end><raise>StopIteration()<block_end>@builtin@signature(Interface)<def_stmt>super_ interface<block_start><return>interface.parent<block_end>#@builtin #@signature(Object) #def attach_debugger(debugger): # ec = main.get_ec() # ec.debug_hook = debugger # return null <import_stmt>rlibuv<as>uv<line_sep>@builtin@signature(space.Integer optional=1)<def_stmt>exit obj<block_start>ec=core.get_ec()<line_sep>ec.exit_status=0<if>obj<is><none><else>int(obj.value)<line_sep>uv.stop(ec.uv_loop)<line_sep>ec.enqueue(ec.current)# Trick to ensure we get Discard -exception here <return>core.switch([ec.eventloop])<block_end># Once they are created. @builtin@signature()<def_stmt>getcurrent <block_start><return>core.get_ec().current<block_end>@builtin@signature()<def_stmt>new_log <block_start>queue=Queue()<if_stmt>queue<in>core.g.log.loggers<block_start><raise>unwind(LError(u"queue has been registered twice."))<block_end>core.g.log.loggers.append(queue)<line_sep><return>queue<block_end>@builtin<def_stmt>print_ argv<block_start>core.g.log.other(u"info" List(argv))<line_sep><return>null<block_end>@builtin@signature(Object String optional=1)<def_stmt>info value type<block_start><if_stmt>type<is><none><block_start>core.g.log.other(u"info" value)<block_end><else_stmt><block_start>core.g.log.other(type.string value)<block_end><return>null<block_end>@builtin@signature(Object)<def_stmt>print_traceback exception<block_start>core.g.log.exception(exception)<line_sep><return>null<block_end>@builtin@signature(Object)<def_stmt>format_traceback exception<block_start><return>String(format_traceback_raw(exception))<block_end><def_stmt>format_traceback_raw exception in_exception_repr=<false><block_start>traceback=exception.getattr(u"traceback")<if_stmt><not>isinstance(traceback space.List)<block_start><raise>space.unwind(space.LError(u"Expected null or list as .traceback: %s"%traceback.repr()))<block_end>out=u""<if_stmt>len(traceback.contents)<g>0<block_start>out=u"\033[31mTraceback:\033[36m\n"<block_end><for_stmt>entry reversed(traceback.contents)<block_start><if_stmt><not>isinstance(entry TraceEntry)<block_start><continue><block_end>name,col0,lno0,col1,lno1=entry.pc_location()<line_sep>out<augadd>u" %s: %d,%d : %d,%d\n"%(name.repr() lno0 col0 lno1 col1)<block_end>out<augadd>u"\033[31m"<line_sep>out<augadd>space.get_interface(exception).name<line_sep>out<augadd>u":\033[0m"<try_stmt><block_start><return>out+u" "+exception.repr()<block_end><except_stmt>Unwinder<as>unwinder<block_start><if_stmt>in_exception_repr<block_start><return>out+u" ... Second error during exception repr"<block_end><return>(out+u" ... Error during exception repr\n"+format_traceback_raw(unwinder.exception <true>))<block_end><block_end><import_from_stmt>rpython.rtyper.lltypesystem rffi lltype llmemory<import_from_stmt>rpython.rlib rgil<import_stmt>rlibuv<as>uv<import_stmt>uv_callback<line_sep>@builtin@signature(Object variadic=<true>)<def_stmt>work func args<block_start><if_stmt><not>core.g.work_pool# The function will be called in separate thread, # so allocate GIL here <block_start>rgil.allocate()<line_sep>core.g.work_pool=WorkPool()<block_end>req=lltype.malloc(uv.work_ptr.TO flavor='raw' zero=<true>)<line_sep>work=Work(func args)<line_sep>core.g.work_pool.push(req work)<try_stmt><block_start>response=uv_callback.after_work(req)<line_sep>response.wait(uv.queue_work(response.ec.uv_loop req work_cb uv_callback.after_work.cb))<if_stmt>work.unwinder<block_start><raise>work.unwinder<block_end><return>work.retval<block_end><finally_stmt><block_start>core.g.work_pool.pop(req)<line_sep>lltype.free(req flavor='raw')<block_end><block_end><def_stmt>work_cb handle<block_start>work=core.g.work_pool.peek(handle)<line_sep>#must_leave = False # must_leave = space.threadlocals.try_enter_thread(space) # Should check for separate threads here and crash # if the callback comes from a thread that has no execution context. <try_stmt><block_start>work.retval=work.func.call(work.args)<block_end><except_stmt>Unwinder<as>unwinder<block_start>work.unwinder=unwinder<block_end><except_stmt>Exception<as>e<block_start><try_stmt><block_start>os.write(2 "SystemError: callback raised ")<line_sep>os.write(2 str(e))<line_sep>os.write(2 "\n")<block_end><except_stmt><block_start><pass><block_end><block_end><block_end># if must_leave: # space.threadlocals.leave_thread(space) <class_stmt>WorkPool<block_start><def_stmt>__init__ self<block_start>self.table={}<block_end>@jit.dont_look_inside<def_stmt>peek self handle<block_start><return>self.table[rffi.cast_ptr_to_adr(handle)]<block_end>@jit.dont_look_inside<def_stmt>push self handle value<block_start>self.table[rffi.cast_ptr_to_adr(handle)]=value<block_end>@jit.dont_look_inside<def_stmt>pop self handle<block_start><return>self.table.pop(rffi.cast_ptr_to_adr(handle))<block_end><block_end><class_stmt>Work<block_start><def_stmt>__init__ self func args<block_start>self.func=func<line_sep>self.args=args<line_sep>self.retval=null<line_sep>self.unwinder=<none><block_end><block_end>@builtin@signature(Integer)<def_stmt>guess_handle num<block_start><return>Integer(uv_stream.uv.guess_handle(num.value))<block_end>@builtin@signature(Object)<def_stmt>instantiate_ i<block_start><if_stmt>interface<eq>Object<block_start><return>Exnihilo()<block_end><if_stmt>isinstance(i Interface)<block_start><return>CustomObject(i)<block_end><raise>OldError(u"Cannot instantiate from non-interface")<block_end>@builtin@signature(Object)<def_stmt>register_finalizer obj<block_start>core.g.finalizer_queue.register_finalizer(obj)<line_sep><return>null<block_end># @builtin # @signature(Object) # def finalize_on_exit(obj): # ec = core.get_ec() # ec.must_finalize_on_quit[obj] = true # return null @builtin<def_stmt>on_exit argv<block_start>ec=core.get_ec()<line_sep>ec.on_exit.append(argv)<line_sep><return>null<block_end>
<import_from_stmt>django.test TestCase<import_from_stmt>suggestion.algorithm.abstract_algorithm AbstractSuggestionAlgorithm<import_from_stmt>suggestion.algorithm.base_hyperopt_algorithm BaseHyperoptAlgorithm<class_stmt>BaseHyperoptAlgorithmTest(TestCase)<block_start><def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_init self<block_start>instance=BaseHyperoptAlgorithm()<line_sep>self.assertTrue(isinstance(instance AbstractSuggestionAlgorithm))<line_sep>self.assertEqual(instance.__class__ BaseHyperoptAlgorithm)<block_end><block_end>
<import_from_stmt>._aiohttp AIOHTTPCallbackExtension<line_sep>
# # Copyright (c) 2019, <NAME> # This file is licensed under the terms of the MIT license. # # # xds110 support # <import_stmt>sys<import_stmt>time<import_stmt>array<import_from_stmt>lib.ports *<import_from_stmt>lib.utility *<import_from_stmt>lib.shell *<line_sep># ------------------------------------------ XDS_USB=(0x0451 0xbef3)<line_sep># ------------------------------------------ <def_stmt>xds_reset dev delay=100#_ = {0:'CDC Communication', # 1:'CDC Data', 2:'Vendor Specific', 3:'CDC Communication', # 4:'CDC Data', 5:'Human Interface Device', 6:'Vendor Specific'} <block_start>ep=usb_point(dev 2 2)<if_stmt>ep<is><none><block_start><return><false><block_end><for_stmt>v ('00' '01')<times>2<block_start>ep.write(hex2dec('{} {} {} {}'.format('2a' '02' '00' '0e {}'.format(v))))<line_sep>time.sleep(delay/1000)<block_end><return><true><block_end># ------------------------------------------ __scan_test__=('2a 01 00 01' '2a 01 00 03' '2a 05 00 04 00 00 00 00' '2a 01 00 06' '2a 02 00 05 00' '2a 05 00 07 88 13 00 00' '2a 02 00 05 01' '2a 05 00 07 a0 86 01 00' '2a 05 00 2b 01 00 00 00' '2a 01 00 06' '2a 02 00 05 00' '2a 05 00 07 88 13 00 00' '2a 02 00 05 01' '2a 05 00 07 a0 86 01 00' '2a 09 00 09 01 00 00 00 01 00 00 00' '2a 01 00 1a' '2a 01 00 2f' '2a 01 00 02' '2a 01 00 01' '2a 01 00 03' '2a 05 00 04 00 00 00 00' '2a 01 00 06' '2a 02 00 05 00' '2a 05 00 07 88 13 00 00' '2a 02 00 05 01' '2a 05 00 07 a0 86 01 00' '2a 05 00 2b 01 00 00 00' '2a 10 00 0a 00 08 04 01 06 01 00 00 00 00 00 00 01 00 01' '2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('ff ff ff ff' )<times>4<times>16)) '2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('00 00 00 00' )<times>4<times>16)) '2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('00 00 00 00' )<times>4<times>16)) '2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('ff ff ff ff' )<times>4<times>16)) '2a 10 00 0a 00 08 03 01 05 01 00 00 00 00 00 00 01 00 01' '2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('ff ff ff ff' )<times>4<times>16)) '2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('00 00 00 00' )<times>4<times>16)) '2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('00 00 00 00' )<times>4<times>16)) '2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('ff ff ff ff' )<times>4<times>16)) '2a 15 00 0b 20 00 04 01 06 01 00 00 00 00 00 00 01 00 04 00 ff ff ff ff' '2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('ff ff ff ff' )<times>4<times>16)) '2a 15 00 0b 20 00 04 01 06 01 00 00 00 00 00 00 01 00 04 00 00 00 00 00' '2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('00 00 00 00' )<times>4<times>16)) '2a 15 00 0b 20 00 04 01 06 01 00 00 00 00 00 00 01 00 04 00 e2 e0 03 fe' '2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('e2 e0 03 fe' )<times>4<times>16)) '2a 15 00 0b 20 00 04 01 06 01 00 00 00 00 00 00 01 00 04 00 1d 1f fc 01' '2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('1d 1f fc 01' )<times>4<times>16)) '2a 15 00 0b 20 00 04 01 06 01 00 00 00 00 00 00 01 00 04 00 aa cc 33 55' '2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('aa cc 33 55' )<times>4<times>16)) '2a 15 00 0b 20 00 04 01 06 01 00 00 00 00 00 00 01 00 04 00 55 33 cc aa' '2a 13 01 0c 00 08 04 01 06 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('55 33 cc aa' )<times>4<times>16)) '2a 10 00 0a 00 08 04 01 06 01 00 00 00 00 00 00 01 00 01' '2a 01 00 08' '2a 09 00 09 05 00 00 00 02 00 00 00' '2a 15 00 0b 20 00 03 01 05 01 00 00 00 00 00 00 01 00 04 00 ff ff ff ff' '2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('ff ff ff ff' )<times>4<times>16)) '2a 15 00 0b 20 00 03 01 05 01 00 00 00 00 00 00 01 00 04 00 00 00 00 00' '2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('00 00 00 00' )<times>4<times>16)) '2a 15 00 0b 20 00 03 01 05 01 00 00 00 00 00 00 01 00 04 00 e2 e0 03 fe' '2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('e2 e0 03 fe' )<times>4<times>16)) '2a 15 00 0b 20 00 03 01 05 01 00 00 00 00 00 00 01 00 04 00 1d 1f fc 01' '2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('1d 1f fc 01' )<times>4<times>16)) '2a 15 00 0b 20 00 03 01 05 01 00 00 00 00 00 00 01 00 04 00 aa cc 33 55' '2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('aa cc 33 55' )<times>4<times>16)) '2a 15 00 0b 20 00 03 01 05 01 00 00 00 00 00 00 01 00 04 00 55 33 cc aa' '2a 13 01 0c 00 08 03 01 05 01 00 00 00 00 00 00 01 00 00 01 00 01 {}'.format(' '.join(('55 33 cc aa' )<times>4<times>16)) '2a 01 00 1a' '2a 01 00 2f' '2a 01 00 02')<line_sep># ------------------------------------------ <def_stmt>xds_test dev reset=<true><block_start><if_stmt>reset<block_start>xds_reset(dev)<block_end>ep2o=usb_point(dev 2 2)<line_sep>ep2i=usb_point(dev 2 3)<line_sep>_=dev.read(ep2i.bEndpointAddress 1024)<def_stmt>send epo msg epi=<none><block_start>_=epo.write(hex2dec(msg))<if_stmt>epi<is><not><none><block_start>buf=dev.read(epi.bEndpointAddress 1024)<line_sep><return>buf<block_end><return><none><block_end><def_stmt>collect v<block_start>res=send(ep2o v ep2i)<if_stmt>res<is><not><none><block_start><if_stmt>len(res)<g>21<block_start>res=set(res[8:])<if_stmt>len(res)%3<ne>1# super-lazy check <block_start><return><false><block_end><block_end><block_end><return><true><block_end><for_stmt>entry __scan_test__<block_start><if_stmt><not>collect(entry)<block_start><raise>Exception('integrity scan-test on the JTAG DR/IR has failed')<block_end><block_end><block_end>
<import_stmt>os<import_stmt>pytest<import_from_stmt>django.core.exceptions ValidationError<import_from_stmt>django.core.files.uploadedfile UploadedFile<import_from_stmt>hs_core.hydroshare add_file_to_resource ResourceFile add_resource_files<import_from_stmt>hs_core.views.utils move_or_rename_file_or_folder<import_from_stmt>hs_file_types.forms ModelInstanceMetadataValidationForm<import_from_stmt>hs_file_types.models ModelInstanceLogicalFile ModelProgramLogicalFile NetCDFLogicalFile GeoRasterLogicalFile GeoFeatureLogicalFile GenericLogicalFile TimeSeriesLogicalFile RefTimeseriesLogicalFile FileSetLogicalFile <line_sep>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_link_model_aggregations_same_resource composite_resource_with_mi_aggregation mock_irods<block_start>"""Test that we can link one model instance aggregation to one model program aggregation within the same resource"""<line_sep>res,user=composite_resource_with_mi_aggregation<assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># check that mi_aggr is not related to any model program aggregation <assert_stmt>mi_aggr.metadata.executed_by<is><none><line_sep># create a model program aggregation file_path='pytest/assets/logan.vrt'<line_sep>upload_folder=''<line_sep>file_to_upload=UploadedFile(file=open(file_path 'rb') name=os.path.basename(file_path))<line_sep>res_file=add_file_to_resource(res file_to_upload folder=upload_folder check_target_folder=<true>)<assert_stmt>ModelProgramLogicalFile.objects.count()<eq>0<line_sep># set file to model program aggregation type ModelProgramLogicalFile.set_file_type(res user res_file.id)<assert_stmt>ModelProgramLogicalFile.objects.count()<eq>1<line_sep>mp_aggr=ModelProgramLogicalFile.objects.first()<line_sep># link model instance aggregation to model program aggregation mi_validation_form=ModelInstanceMetadataValidationForm(data={"executed_by":mp_aggr.id} user=user resource=res)<assert_stmt>mi_validation_form.is_valid()<line_sep>mi_validation_form.update_metadata(metadata=mi_aggr.metadata)<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># check that mi_aggr is related to model program aggregation <assert_stmt>mi_aggr.metadata.executed_by<is><not><none><assert_stmt><not>res.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_model_instance_on_model_program_delete composite_resource_with_mi_aggregation mock_irods<block_start>"""Test that when we remove/delete a model program aggregation that the linked model instance aggregation does not get deleted and the metadata of the model instance aggregation is set to dirty"""<line_sep>res,user=composite_resource_with_mi_aggregation<assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># check that mi_aggr is not related to any model program aggregation <assert_stmt>mi_aggr.metadata.executed_by<is><none><line_sep># create a model program aggregation file_path='pytest/assets/logan.vrt'<line_sep>upload_folder=''<line_sep>file_to_upload=UploadedFile(file=open(file_path 'rb') name=os.path.basename(file_path))<line_sep>res_file=add_file_to_resource(res file_to_upload folder=upload_folder check_target_folder=<true>)<assert_stmt>ModelProgramLogicalFile.objects.count()<eq>0<line_sep># set file to model program aggregation type ModelProgramLogicalFile.set_file_type(res user res_file.id)<assert_stmt>ModelProgramLogicalFile.objects.count()<eq>1<line_sep>mp_aggr=ModelProgramLogicalFile.objects.first()<line_sep># link model instance aggregation to model program aggregation mi_validation_form=ModelInstanceMetadataValidationForm(data={"executed_by":mp_aggr.id} user=user resource=res)<assert_stmt>mi_validation_form.is_valid()<line_sep>mi_validation_form.update_metadata(metadata=mi_aggr.metadata)<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># check that mi_aggr is related to model program aggregation <assert_stmt>mi_aggr.metadata.executed_by<is><not><none><assert_stmt>mi_aggr.metadata.is_dirty<is><true><line_sep># remove/delete mp_aggregation mp_aggr.remove_aggregation()<assert_stmt>ModelProgramLogicalFile.objects.count()<eq>0<assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># check that mi_aggr is not related to any model program aggregation <assert_stmt>mi_aggr.metadata.executed_by<is><none><line_sep># check that mi_aggr metadata is set to dirty <assert_stmt>mi_aggr.metadata.is_dirty<is><true><assert_stmt><not>res.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_model_instance_on_model_program_rename_1 composite_resource_with_mi_aggregation mock_irods<block_start>"""Test that when we rename a file that represents a model program aggregation then the linked model instance aggregation metadata is set to dirty"""<line_sep>res,user=composite_resource_with_mi_aggregation<assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># check that mi_aggr is not related to any model program aggregation <assert_stmt>mi_aggr.metadata.executed_by<is><none><line_sep># create a model program aggregation file_path='pytest/assets/logan.vrt'<line_sep>upload_folder=''<line_sep>file_to_upload=UploadedFile(file=open(file_path 'rb') name=os.path.basename(file_path))<line_sep>res_file=add_file_to_resource(res file_to_upload folder=upload_folder check_target_folder=<true>)<assert_stmt>ModelProgramLogicalFile.objects.count()<eq>0<line_sep># set file to model program aggregation type ModelProgramLogicalFile.set_file_type(res user res_file.id)<assert_stmt>ModelProgramLogicalFile.objects.count()<eq>1<line_sep>mp_aggr=ModelProgramLogicalFile.objects.first()<line_sep># link model instance aggregation to model program aggregation mi_validation_form=ModelInstanceMetadataValidationForm(data={"executed_by":mp_aggr.id} user=user resource=res)<assert_stmt>mi_validation_form.is_valid()<line_sep>mi_validation_form.update_metadata(metadata=mi_aggr.metadata)<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># check that mi_aggr is related to model program aggregation <assert_stmt>mi_aggr.metadata.executed_by<is><not><none><assert_stmt>mi_aggr.metadata.is_dirty<is><true><line_sep># rename the model program file name src_path='data/contents/{}'.format(res_file.file_name)<line_sep>tgt_path='data/contents/{}'.format("logan_1.vrt")<line_sep>move_or_rename_file_or_folder(user res.short_id src_path tgt_path)<assert_stmt>ModelProgramLogicalFile.objects.count()<eq>1<assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># check that mi_aggr metadata is set to dirty <assert_stmt>mi_aggr.metadata.is_dirty<is><true><assert_stmt><not>res.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_model_instance_on_model_program_rename_2 composite_resource_with_mi_aggregation mock_irods<block_start>"""Test that when we rename a folder that represents a model program aggregation then the linked model instance aggregation metadata is set to dirty"""<line_sep>res,user=composite_resource_with_mi_aggregation<assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># check that mi_aggr is not related to any model program aggregation <assert_stmt>mi_aggr.metadata.executed_by<is><none><line_sep># create a model program aggregation file_path='pytest/assets/logan.vrt'<line_sep>mp_folder="mp_folder"<line_sep>ResourceFile.create_folder(res mp_folder)<line_sep>file_to_upload=UploadedFile(file=open(file_path 'rb') name=os.path.basename(file_path))<line_sep>add_file_to_resource(res file_to_upload folder=mp_folder check_target_folder=<true>)<assert_stmt>ModelProgramLogicalFile.objects.count()<eq>0<line_sep># set file to model program aggregation type ModelProgramLogicalFile.set_file_type(res user folder_path=mp_folder)<assert_stmt>ModelProgramLogicalFile.objects.count()<eq>1<line_sep>mp_aggr=ModelProgramLogicalFile.objects.first()<line_sep># link model instance aggregation to model program aggregation mi_validation_form=ModelInstanceMetadataValidationForm(data={"executed_by":mp_aggr.id} user=user resource=res)<assert_stmt>mi_validation_form.is_valid()<line_sep>mi_validation_form.update_metadata(metadata=mi_aggr.metadata)<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># check that mi_aggr is related to model program aggregation <assert_stmt>mi_aggr.metadata.executed_by<is><not><none><assert_stmt>mi_aggr.metadata.is_dirty<is><true><line_sep># rename the model program file name src_path='data/contents/{}'.format(mp_folder)<line_sep>tgt_path='data/contents/{}'.format("{}_1".format(mp_folder))<line_sep>move_or_rename_file_or_folder(user res.short_id src_path tgt_path)<assert_stmt>ModelProgramLogicalFile.objects.count()<eq>1<assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># check that mi_aggr metadata is set to dirty <assert_stmt>mi_aggr.metadata.is_dirty<is><true><assert_stmt><not>res.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_set_metadata composite_resource_with_mi_aggregation mock_irods<block_start>"""Test that we can store all metadata items for a model instance aggregation"""<line_sep>res,_=composite_resource_with_mi_aggregation<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># test extra metadata <assert_stmt><not>mi_aggr.metadata.extra_metadata<line_sep>extra_meta={'key1':'value 1' 'key2':'value 2'}<line_sep>mi_aggr.metadata.extra_metadata=extra_meta<line_sep>mi_aggr.metadata.save()<assert_stmt>mi_aggr.metadata.extra_metadata<eq>extra_meta<line_sep># test keywords <assert_stmt><not>mi_aggr.metadata.keywords<line_sep>keywords=['kw-1' 'kw-2']<line_sep>mi_aggr.metadata.keywords=keywords<line_sep>mi_aggr.metadata.save()<assert_stmt>mi_aggr.metadata.keywords<eq>keywords<line_sep># test coverage metadata <assert_stmt><not>mi_aggr.metadata.coverages.all()<line_sep>value_dict={'name':'Name for period coverage' 'start':'1/1/2000' 'end':'12/12/2012'}<line_sep>temp_cov=mi_aggr.metadata.create_element('coverage' type='period' value=value_dict)<assert_stmt>temp_cov.value['name']<eq>'Name for period coverage'<assert_stmt>temp_cov.value['start']<eq>'1/1/2000'<assert_stmt>temp_cov.value['end']<eq>'12/12/2012'<assert_stmt>mi_aggr.metadata.coverages.all().count()<eq>1<line_sep>value_dict={'east':'56.45678' 'north':'12.6789' 'units':'Decimal degree'}<line_sep>spatial_cov=mi_aggr.metadata.create_element('coverage' type='point' value=value_dict)<assert_stmt>spatial_cov.value['projection']<eq>'WGS 84 EPSG:4326'<assert_stmt>spatial_cov.value['units']<eq>'Decimal degree'<assert_stmt>spatial_cov.value['north']<eq>12.6789<assert_stmt>spatial_cov.value['east']<eq>56.45678<assert_stmt>mi_aggr.metadata.coverages.all().count()<eq>2<line_sep># test model output metadata <assert_stmt><not>mi_aggr.metadata.has_model_output<line_sep>mi_aggr.metadata.has_model_output=<true><line_sep>mi_aggr.metadata.save()<line_sep># test setting metadata json <assert_stmt><not>mi_aggr.metadata.metadata_json<line_sep># set mi metadata json from the content of the following file schema_file_path='pytest/assets/mi_metadata.json'<with_stmt>open(schema_file_path 'r')<as>file_obj<block_start>meta_json=file_obj.read()<block_end><assert_stmt>len(meta_json)<g>0<line_sep>mi_aggr.metadata.metadata_json=meta_json<line_sep>mi_aggr.metadata.save()<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<assert_stmt>mi_aggr.metadata.metadata_json<assert_stmt><not>res.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_auto_netcdf_aggregation_creation composite_resource_with_mi_aggregation_folder mock_irods<block_start>"""Test that when a netcdf file is uploaded to a folder that represents a model instance aggregation, a netcdf aggregation is created automatically"""<line_sep>resource,_=composite_resource_with_mi_aggregation_folder<line_sep>mi_aggr_path=ModelInstanceLogicalFile.objects.first().aggregation_name<assert_stmt>NetCDFLogicalFile.objects.count()<eq>0<line_sep># upload a netcdf file to the mi_aggr_path - folder that represents the model instance aggregation nc_file_name="netcdf_valid.nc"<line_sep>netcdf_file_path="hs_file_types/tests/{}".format(nc_file_name)<line_sep>_add_files_to_resource(resource=resource files_to_add=[netcdf_file_path] upload_folder=mi_aggr_path)<line_sep># there should be three resource file - one generated by netcdf aggregation <assert_stmt>resource.files.all().count()<eq>3<assert_stmt>NetCDFLogicalFile.objects.count()<eq>1<line_sep># the netcdf file added to the model instance folder should be part of a new netcdf aggregation nc_res_file=ResourceFile.get(resource=resource file=nc_file_name folder=mi_aggr_path)<assert_stmt>nc_res_file.has_logical_file<line_sep># the netcdf aggregation should contain 2 files - nc and the txt files <assert_stmt>NetCDFLogicalFile.objects.first().files.count()<eq>2<assert_stmt><not>resource.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_auto_raster_aggregation_creation composite_resource_with_mi_aggregation_folder mock_irods<block_start>"""Test that when a raster file (.tif) is uploaded to a folder that represents a model instance aggregation, a raster aggregation is created automatically"""<line_sep>resource,_=composite_resource_with_mi_aggregation_folder<line_sep>mi_aggr_path=ModelInstanceLogicalFile.objects.first().aggregation_name<assert_stmt>GeoRasterLogicalFile.objects.count()<eq>0<line_sep># upload a raster file to the mi_aggr_path - folder that represents the model instance aggregation raster_file_name='small_logan.tif'<line_sep>raster_file_path='hs_file_types/tests/{}'.format(raster_file_name)<line_sep>_add_files_to_resource(resource=resource files_to_add=[raster_file_path] upload_folder=mi_aggr_path)<line_sep># there should be three resource files ( one extra vrt file added as part of raster aggregation creation) <assert_stmt>resource.files.all().count()<eq>3<line_sep># there should be one raster aggregation now <assert_stmt>GeoRasterLogicalFile.objects.count()<eq>1<line_sep># the tif file added to the model instance folder should be part of a new raster aggregation raster_res_file=ResourceFile.get(resource=resource file=raster_file_name folder=mi_aggr_path)<assert_stmt>raster_res_file.has_logical_file<line_sep># the raster aggregation should contain 2 files (tif and vrt) <assert_stmt>GeoRasterLogicalFile.objects.first().files.count()<eq>2<assert_stmt><not>resource.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_auto_geofeature_aggregation_creation composite_resource_with_mi_aggregation_folder mock_irods<block_start>"""Test that when files that represents a geofeature are uploaded to a folder that represents a model instance, a geofeature aggregation is created automatically"""<line_sep>resource,_=composite_resource_with_mi_aggregation_folder<line_sep>mi_aggr_path=ModelInstanceLogicalFile.objects.first().aggregation_name<assert_stmt>GeoFeatureLogicalFile.objects.count()<eq>0<line_sep># upload all 4 geo feature files the mi_aggr_ptah - folder that represents the model instance aggregation base_data_file_path='hs_file_types/tests/data/{}'<line_sep>shp_file_name="states.shp"<line_sep>shp_file_path=base_data_file_path.format(shp_file_name)<line_sep>shx_file_name="states.shx"<line_sep>shx_file_path=base_data_file_path.format(shx_file_name)<line_sep>dbf_file_name="states.dbf"<line_sep>dbf_file_path=base_data_file_path.format(dbf_file_name)<line_sep>prj_file_name="states.prj"<line_sep>prj_file_path=base_data_file_path.format(prj_file_name)<line_sep>geo_feature_files=[shp_file_path shx_file_path dbf_file_path prj_file_path]<line_sep>_add_files_to_resource(resource=resource files_to_add=geo_feature_files upload_folder=mi_aggr_path)<line_sep># there should be five resource files <assert_stmt>resource.files.all().count()<eq>5<line_sep># the shp file added to the model instance folder should be part of a new geo feature aggregation shp_res_file=ResourceFile.get(resource=resource file=shp_file_name folder=mi_aggr_path)<assert_stmt>shp_res_file.has_logical_file<line_sep># the geo feature aggregation should contain 4 files that we uploaded <assert_stmt>GeoFeatureLogicalFile.objects.first().files.count()<eq>4<assert_stmt><not>resource.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_auto_timeseries_aggregation_creation composite_resource_with_mi_aggregation_folder mock_irods<block_start>"""Test that when a timeseries sqlite file is uploaded to a folder that represents a model instance, a timeseries aggregation is created automatically from that sqlite file"""<line_sep>resource,_=composite_resource_with_mi_aggregation_folder<line_sep>mi_aggr_path=ModelInstanceLogicalFile.objects.first().aggregation_name<assert_stmt>TimeSeriesLogicalFile.objects.count()<eq>0<line_sep># upload a sqlite file to the mi_aggr_path - folder that represents the model instance aggregation sqlite_file_name='ODM2_Multi_Site_One_Variable.sqlite'<line_sep>sqlite_file_path='hs_file_types/tests/data/{}'.format(sqlite_file_name)<line_sep>_add_files_to_resource(resource=resource files_to_add=[sqlite_file_path] upload_folder=mi_aggr_path)<line_sep># there should be 2 resource files <assert_stmt>resource.files.all().count()<eq>2<line_sep># the sqlite file added to the model instance folder should be part of a new timeseries aggregation sqlite_res_file=ResourceFile.get(resource=resource file=sqlite_file_name folder=mi_aggr_path)<assert_stmt>sqlite_res_file.has_logical_file<assert_stmt>TimeSeriesLogicalFile.objects.count()<eq>1<assert_stmt>ModelInstanceLogicalFile.objects.first().files.count()<eq>1<line_sep># the timeseries aggregation should contain 1 file <assert_stmt>TimeSeriesLogicalFile.objects.first().files.count()<eq>1<assert_stmt><not>resource.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_auto_ref_timeseries_aggregation_creation composite_resource_with_mi_aggregation_folder mock_irods<block_start>"""Test that when a ref timeseries json file is uploaded to a folder that represents a model instance aggregation, a ref timeseries aggregation is created automatically from that json file"""<line_sep>resource,_=composite_resource_with_mi_aggregation_folder<assert_stmt>ModelInstanceLogicalFile.objects.first().files.count()<eq>1<line_sep>mi_aggr_path=ModelInstanceLogicalFile.objects.first().aggregation_name<assert_stmt>RefTimeseriesLogicalFile.objects.count()<eq>0<line_sep># upload a ref timeseries json file to the mi_aggr_path - folder that represents the model instance aggregation ref_timeseries_file_name='multi_sites_formatted_version1.0.refts.json'<line_sep>ref_timeseries_file_path='hs_file_types/tests/{}'.format(ref_timeseries_file_name)<line_sep>_add_files_to_resource(resource=resource files_to_add=[ref_timeseries_file_path] upload_folder=mi_aggr_path)<line_sep># there should be 2 resource files <assert_stmt>resource.files.all().count()<eq>2<line_sep># the json file added to the model instance folder should be part of a new ref timeseries aggregation ref_ts_res_file=ResourceFile.get(resource=resource file=ref_timeseries_file_name folder=mi_aggr_path)<assert_stmt>ref_ts_res_file.has_logical_file<assert_stmt>RefTimeseriesLogicalFile.objects.count()<eq>1<assert_stmt>ModelInstanceLogicalFile.objects.first().files.count()<eq>1<line_sep># ref timeseries aggregation should contain 1 file <assert_stmt>RefTimeseriesLogicalFile.objects.first().files.count()<eq>1<assert_stmt><not>resource.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_canot_create_fileset_within_mi_aggregation composite_resource_with_mi_aggregation_folder mock_irods<block_start>"""Test that one can't create a fileset aggregation inside a folder that represents a model instance aggregation"""<line_sep>resource,user=composite_resource_with_mi_aggregation_folder<line_sep>mi_aggr_path=ModelInstanceLogicalFile.objects.first().aggregation_name<line_sep>file_path='pytest/assets/logan.vrt'<line_sep>fs_folder='fileset_folder'<line_sep>fs_folder_path=os.path.join(mi_aggr_path fs_folder)<line_sep>ResourceFile.create_folder(resource fs_folder)<line_sep>_add_files_to_resource(resource=resource files_to_add=[file_path] upload_folder=fs_folder_path)<line_sep># trying to set folder to fileset logical file type (aggregation) should fail <assert_stmt>FileSetLogicalFile.objects.count()<eq>0<with_stmt>pytest.raises(ValidationError)<block_start>FileSetLogicalFile.set_file_type(resource user folder_path=fs_folder_path)<block_end><assert_stmt>FileSetLogicalFile.objects.count()<eq>0<assert_stmt><not>resource.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_canot_create_mi_aggregation_within_mi_aggregation composite_resource_with_mi_aggregation_folder mock_irods<block_start>"""Test that one can't create a model instance aggregation inside a folder that represents a model instance aggregation"""<line_sep>resource,user=composite_resource_with_mi_aggregation_folder<line_sep>mi_aggr_path=ModelInstanceLogicalFile.objects.first().aggregation_name<assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<line_sep>file_path='pytest/assets/logan.vrt'<line_sep>mi_sub_folder='mi_sub_folder'<line_sep>mi_sub_folder_path=os.path.join(mi_aggr_path mi_sub_folder)<line_sep>ResourceFile.create_folder(resource mi_sub_folder)<line_sep>_add_files_to_resource(resource=resource files_to_add=[file_path] upload_folder=mi_sub_folder_path)<line_sep># trying to set folder to model instance should fail <assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<with_stmt>pytest.raises(ValidationError)<block_start>ModelInstanceLogicalFile.set_file_type(resource user folder_path=mi_sub_folder_path)<block_end><assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<assert_stmt><not>resource.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_move_single_file_aggr_into_model_instance_aggregation composite_resource mock_irods<block_start>""" test that we can move a single file aggregation into a folder that represents a model instance aggregation"""<line_sep>res,user=composite_resource<line_sep>file_path='pytest/assets/generic_file.txt'<line_sep>mi_folder='mi_folder'<line_sep>ResourceFile.create_folder(res mi_folder)<line_sep>file_to_upload=UploadedFile(file=open(file_path 'rb') name=os.path.basename(file_path))<line_sep>add_file_to_resource(res file_to_upload folder=mi_folder check_target_folder=<true>)<assert_stmt>res.files.count()<eq>1<line_sep># at this point there should not be any model instance aggregation <assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>0<line_sep># set folder to model instance aggregation type ModelInstanceLogicalFile.set_file_type(resource=res user=user folder_path=mi_folder)<line_sep>res_file=res.files.first()<assert_stmt>res_file.has_logical_file<line_sep># file has folder <assert_stmt>res_file.file_folder<eq>mi_folder<assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<line_sep># create a single file aggregation single_file_name='logan.vrt'<line_sep>file_path='pytest/assets/{}'.format(single_file_name)<line_sep>file_to_upload=UploadedFile(file=open(file_path 'rb') name=os.path.basename(file_path))<line_sep>res_file=add_file_to_resource(res file_to_upload check_target_folder=<true>)<line_sep># set file to generic logical file type (aggregation) GenericLogicalFile.set_file_type(res user res_file.id)<assert_stmt>GenericLogicalFile.objects.count()<eq>1<line_sep># moving the logan.vrt file into mi_folder should be successful src_path='data/contents/{}'.format(single_file_name)<line_sep>tgt_path='data/contents/{}/{}'.format(mi_folder single_file_name)<line_sep>move_or_rename_file_or_folder(user res.short_id src_path tgt_path)<assert_stmt><not>res.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_update_spatial_coverage_from_children composite_resource_with_mi_aggregation_folder mock_irods<block_start>"""Here we are testing fileset level spatial coverage update using the spatial data from the contained (children) aggregations - two child aggregations"""<line_sep>resource,user=composite_resource_with_mi_aggregation_folder<assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># model aggr should not have any spatial coverage <assert_stmt>mi_aggr.metadata.spatial_coverage<is><none><line_sep># auto create a raster aggregation inside the model instance aggregation <assert_stmt>GeoRasterLogicalFile.objects.count()<eq>0<line_sep># upload a raster file to the mi_aggr_path - folder that represents the model instance aggregation raster_file_name='small_logan.tif'<line_sep>raster_file_path='hs_file_types/tests/{}'.format(raster_file_name)<line_sep>_add_files_to_resource(resource=resource files_to_add=[raster_file_path] upload_folder=mi_aggr.folder)<line_sep># there should be three resource files ( one extra vrt file added as part of raster aggregation creation) <assert_stmt>resource.files.all().count()<eq>3<line_sep># there should be one raster aggregation now <assert_stmt>GeoRasterLogicalFile.objects.count()<eq>1<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># model aggr should now have spatial coverage <assert_stmt>mi_aggr.metadata.spatial_coverage<is><not><none><assert_stmt>mi_aggr.metadata.spatial_coverage.value['northlimit']<eq>42.0500269597691<assert_stmt>mi_aggr.metadata.spatial_coverage.value['eastlimit']<eq>-111.57773718106195<assert_stmt>mi_aggr.metadata.spatial_coverage.value['southlimit']<eq>41.98722286029891<assert_stmt>mi_aggr.metadata.spatial_coverage.value['westlimit']<eq>-111.69756293084055<line_sep># auto create a netcdf aggregation inside the model instance aggregation <assert_stmt>NetCDFLogicalFile.objects.count()<eq>0<line_sep># upload a netcdf file to the folder that represents the model instance aggregation nc_file_name="netcdf_valid.nc"<line_sep>netcdf_file_path="hs_file_types/tests/{}".format(nc_file_name)<line_sep>_add_files_to_resource(resource=resource files_to_add=[netcdf_file_path] upload_folder=mi_aggr.folder)<assert_stmt>NetCDFLogicalFile.objects.count()<eq>1<line_sep>nc_aggr=NetCDFLogicalFile.objects.first()<line_sep># netcdf aggr should have spatial coverage <assert_stmt>nc_aggr.metadata.spatial_coverage<is><not><none><line_sep># update model instance aggregation spatial coverage from the contained 2 aggregations mi_aggr.update_spatial_coverage()<line_sep># test model instance aggregation spatial coverage data <assert_stmt>mi_aggr.metadata.spatial_coverage.value['northlimit']<eq>42.0500269597691<assert_stmt>mi_aggr.metadata.spatial_coverage.value['eastlimit']<eq>-111.50594036845686<assert_stmt>mi_aggr.metadata.spatial_coverage.value['southlimit']<eq>41.8639080745171<assert_stmt>mi_aggr.metadata.spatial_coverage.value['westlimit']<eq>-111.69756293084055<assert_stmt><not>resource.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_no_auto_update_spatial_coverage_from_children composite_resource_with_mi_aggregation_folder mock_irods<block_start>"""Here we are testing model instance level spatial coverage auto update does not happen when a contained aggregation spatial coverage gets created as part of that aggregation creation since the model instance aggregation has spatial coverage prior to the child aggregation creation """<line_sep>resource,user=composite_resource_with_mi_aggregation_folder<assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># model aggr should not have any spatial coverage <assert_stmt>mi_aggr.metadata.spatial_coverage<is><none><line_sep># create spatial coverage for model instance value_dict={'east':'56.45678' 'north':'12.6789' 'units':'Decimal degree'}<line_sep>mi_aggr.metadata.create_element('coverage' type='point' value=value_dict)<line_sep># model aggr should now have any spatial coverage <assert_stmt>mi_aggr.metadata.spatial_coverage<is><not><none><line_sep># auto create a raster aggregation inside the model instance aggregation <assert_stmt>GeoRasterLogicalFile.objects.count()<eq>0<line_sep># upload a raster file to the mi_aggr_path - folder that represents the model instance aggregation raster_file_name='small_logan.tif'<line_sep>raster_file_path='hs_file_types/tests/{}'.format(raster_file_name)<line_sep>_add_files_to_resource(resource=resource files_to_add=[raster_file_path] upload_folder=mi_aggr.folder)<line_sep># there should be three resource files ( one extra vrt file added as part of raster aggregation creation) <assert_stmt>resource.files.all().count()<eq>3<line_sep># there should be one raster aggregation now <assert_stmt>GeoRasterLogicalFile.objects.count()<eq>1<line_sep>gr_aggr=GeoRasterLogicalFile.objects.first()<line_sep># raster aggr should have spatial coverage <assert_stmt>gr_aggr.metadata.spatial_coverage<is><not><none><assert_stmt>gr_aggr.metadata.spatial_coverage.value['northlimit']<eq>42.0500269597691<assert_stmt>gr_aggr.metadata.spatial_coverage.value['eastlimit']<eq>-111.57773718106195<assert_stmt>gr_aggr.metadata.spatial_coverage.value['southlimit']<eq>41.98722286029891<assert_stmt>gr_aggr.metadata.spatial_coverage.value['westlimit']<eq>-111.69756293084055<line_sep># check model instance spatial coverage has not been updated <assert_stmt>mi_aggr.metadata.spatial_coverage.value['east']<eq>value_dict['east']<assert_stmt>mi_aggr.metadata.spatial_coverage.value['north']<eq>value_dict['north']<assert_stmt><not>resource.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_auto_update_temporal_coverage_from_children composite_resource_with_mi_aggregation_folder mock_irods<block_start>"""Here we are testing model instance level temporal coverage auto update when a contained aggregation temporal coverage gets created as part of that aggregation creation provided the model instance aggregation has no temporal coverage prior to the child aggregation creation """<line_sep>resource,user=composite_resource_with_mi_aggregation_folder<assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># model aggr should not have any temporal coverage <assert_stmt>mi_aggr.metadata.temporal_coverage<is><none><line_sep># auto create a netcdf aggregation inside the model instance aggregation <assert_stmt>NetCDFLogicalFile.objects.count()<eq>0<line_sep># upload a netcdf file to the folder that represents the model instance aggregation nc_file_name="netcdf_valid.nc"<line_sep>netcdf_file_path="hs_file_types/tests/{}".format(nc_file_name)<line_sep>_add_files_to_resource(resource=resource files_to_add=[netcdf_file_path] upload_folder=mi_aggr.folder)<assert_stmt>NetCDFLogicalFile.objects.count()<eq>1<line_sep>nc_aggr=NetCDFLogicalFile.objects.first()<line_sep># netcdf aggr should have temporal coverage <assert_stmt>nc_aggr.metadata.temporal_coverage<is><not><none><line_sep># model aggr should now have temporal coverage <assert_stmt>mi_aggr.metadata.temporal_coverage<is><not><none><line_sep># temporal coverage of the model instance aggregation should match with that of the contained # netcdf aggregation <for_stmt>temp_date ('start' 'end')<block_start><assert_stmt>mi_aggr.metadata.temporal_coverage.value[temp_date]<eq>nc_aggr.metadata.temporal_coverage.value[temp_date]<block_end><assert_stmt><not>resource.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_no_auto_update_temporal_coverage_from_children composite_resource_with_mi_aggregation_folder mock_irods<block_start>"""Here we are testing model instance level temporal coverage auto update does not happen when a contained aggregation temporal coverage gets created as part of that aggregation creation since the model instance aggregation has temporal coverage prior to the child aggregation creation """<line_sep>resource,user=composite_resource_with_mi_aggregation_folder<assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># model aggr should not have any temporal coverage <assert_stmt>mi_aggr.metadata.temporal_coverage<is><none><line_sep># create temporal coverage for model instance value_dict={'name':'Name for period coverage' 'start':'1/1/2018' 'end':'12/12/2018'}<line_sep>mi_aggr.metadata.create_element('coverage' type='period' value=value_dict)<line_sep># model aggr should now have temporal coverage <assert_stmt>mi_aggr.metadata.temporal_coverage<is><not><none><line_sep># auto create a netcdf aggregation inside the model instance aggregation <assert_stmt>NetCDFLogicalFile.objects.count()<eq>0<line_sep># upload a netcdf file to the folder that represents the model instance aggregation nc_file_name="netcdf_valid.nc"<line_sep>netcdf_file_path="hs_file_types/tests/{}".format(nc_file_name)<line_sep>_add_files_to_resource(resource=resource files_to_add=[netcdf_file_path] upload_folder=mi_aggr.folder)<assert_stmt>NetCDFLogicalFile.objects.count()<eq>1<line_sep>nc_aggr=NetCDFLogicalFile.objects.first()<line_sep># netcdf aggr should have temporal coverage <assert_stmt>nc_aggr.metadata.temporal_coverage<is><not><none><line_sep># temporal coverage of the model instance aggregation should NOT match with that of the contained # netcdf aggregation <for_stmt>temp_date ('start' 'end')<block_start><assert_stmt>mi_aggr.metadata.temporal_coverage.value[temp_date]<ne>nc_aggr.metadata.temporal_coverage.value[temp_date]<block_end><assert_stmt><not>resource.dangling_aggregations_exist()<block_end>@pytest.mark.django_db(transaction=<true>)<def_stmt>test_update_temporal_coverage_from_children composite_resource_with_mi_aggregation_folder mock_irods<block_start>"""Here we are testing model instance level temporal coverage can be updated by user if the contained aggregations have temporal coverage """<line_sep>resource,user=composite_resource_with_mi_aggregation_folder<assert_stmt>ModelInstanceLogicalFile.objects.count()<eq>1<line_sep>mi_aggr=ModelInstanceLogicalFile.objects.first()<line_sep># model aggr should not have any temporal coverage <assert_stmt>mi_aggr.metadata.temporal_coverage<is><none><line_sep># create temporal coverage for model instance value_dict={'name':'Name for period coverage' 'start':'1/1/2018' 'end':'12/12/2018'}<line_sep>mi_aggr.metadata.create_element('coverage' type='period' value=value_dict)<line_sep># model aggr should now have temporal coverage <assert_stmt>mi_aggr.metadata.temporal_coverage<is><not><none><line_sep># auto create a netcdf aggregation inside the model instance aggregation <assert_stmt>NetCDFLogicalFile.objects.count()<eq>0<line_sep># upload a netcdf file to the folder that represents the model instance aggregation nc_file_name="netcdf_valid.nc"<line_sep>netcdf_file_path="hs_file_types/tests/{}".format(nc_file_name)<line_sep>_add_files_to_resource(resource=resource files_to_add=[netcdf_file_path] upload_folder=mi_aggr.folder)<assert_stmt>NetCDFLogicalFile.objects.count()<eq>1<line_sep>nc_aggr=NetCDFLogicalFile.objects.first()<line_sep># netcdf aggr should have temporal coverage <assert_stmt>nc_aggr.metadata.temporal_coverage<is><not><none><line_sep># temporal coverage of the model instance aggregation should NOT match with that of the contained # netcdf aggregation <for_stmt>temp_date ('start' 'end')<block_start><assert_stmt>mi_aggr.metadata.temporal_coverage.value[temp_date]<ne>nc_aggr.metadata.temporal_coverage.value[temp_date]<block_end># update temporal coverage for model instance from contained aggregations mi_aggr.update_temporal_coverage()<line_sep># temporal coverage of the model instance aggregation should now match with that of the contained # netcdf aggregation <for_stmt>temp_date ('start' 'end')<block_start><assert_stmt>mi_aggr.metadata.temporal_coverage.value[temp_date]<eq>nc_aggr.metadata.temporal_coverage.value[temp_date]<block_end><assert_stmt><not>resource.dangling_aggregations_exist()<block_end><def_stmt>_add_files_to_resource resource files_to_add upload_folder=<none><block_start>files_to_upload=[]<for_stmt>fl files_to_add<block_start>file_to_upload=UploadedFile(file=open(fl 'rb') name=os.path.basename(fl))<line_sep>files_to_upload.append(file_to_upload)<block_end>added_resource_files=add_resource_files(resource.short_id *files_to_upload folder=upload_folder)<line_sep><return>added_resource_files<block_end>
<import_from_future_stmt> annotations<import_from_stmt>typing List Optional<import_from_stmt>pydantic BaseModel<class_stmt>Pet(BaseModel)<block_start>id:int<line_sep>name:str<line_sep>tag:Optional[str]=<none><block_end><class_stmt>Pets(BaseModel)<block_start>__root__:List[Pet]<block_end><class_stmt>Error(BaseModel)<block_start>code:int<line_sep>message:str<block_end><class_stmt>Event(BaseModel)<block_start>name:Optional[str]=<none><block_end><class_stmt>Result(BaseModel)<block_start>event:Optional[Event]=<none><block_end><class_stmt>Events(BaseModel)<block_start>__root__:List[Event]<block_end><class_stmt>EventRoot(BaseModel)<block_start>__root__:Event<block_end><class_stmt>EventObject(BaseModel)<block_start>event:Optional[Event]=<none><block_end><class_stmt>DuplicateObject1(BaseModel)<block_start>event:Optional[List[Event]]=<none><block_end><class_stmt>Event1(BaseModel)<block_start>event:Optional[Event]=<none><block_end><class_stmt>DuplicateObject2(BaseModel)<block_start>event:Optional[Event1]=<none><block_end><class_stmt>DuplicateObject3(BaseModel)<block_start>__root__:Event<block_end>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_from_stmt>enum Enum<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>backend.utils.basic ChoicesEnum<line_sep># 管理员标示 SUPER_ROLE='manager'<class_stmt>Policy(Enum)# 项目管理 <block_start>PROJECT='modify:project:btn'<line_sep># 集群管理 CLUSTER='cluster:menu'<line_sep># 节点管理 NODE='node:menu'<line_sep># 应用管理 APP='app:menu'<line_sep># 配置管理 CONFIGURATION='configuration:menu'<line_sep># 网络管理 NETWORK='network:menu'<line_sep># 资源管理 RESOURCE='resource:menu'<line_sep># 仓库管理 REPO='repo:menu'<line_sep># 仓库按钮 REPO_MODIFY='modify:repo:btn'<block_end><class_stmt>PolicyEffect(Enum)# 正常 <block_start>NORMAL=0<line_sep># 隐藏 HIDDEN=1<line_sep># 按钮置灰 DISABLED=2<block_end>PolicyLabelOrdering=[_("容器服务") _("仓库管理") _("项目管理")]<line_sep>PolicyOrdering={'jfrog':[_("prod环境拉取") _("prod环境推送") _("test环境拉取") _("test环境推送") _("dev环境拉取") _("dev环境推送")] 'paas_backend':[_("集群管理") _("节点管理") _("应用管理") _("网络管理") _("仓库管理") _("资源管理")] 'apigw':[] }<class_stmt>StaffInfoStatus(ChoicesEnum)# 审批中,默认 <block_start>NORMAL=0<line_sep>INCUMBENCY=1<line_sep>RESIGN=2<line_sep>TRIAL=3<line_sep>WAITING_ENTRY=8<line_sep>NOT_ENTRY=9<line_sep>_choices_labels=((NORMAL _("正常")) (INCUMBENCY _("在职")) # 现在都返回显示正常 (RESIGN _("已离职")) (TRIAL _("试用")) (WAITING_ENTRY _("待入职")) (NOT_ENTRY _("待入职")) )<block_end>
<import_stmt>os<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>external.pytorch_pretrained_bert BertTokenizer<import_from_stmt>common.module Module<import_from_stmt>common.fast_rcnn FastRCNN<import_from_stmt>common.visual_linguistic_bert VisualLinguisticBert<import_from_stmt>common.utils.misc soft_cross_entropy<line_sep>BERT_WEIGHTS_NAME='pytorch_model.bin'<class_stmt>ResNetVLBERTForAttentionVis(Module)<block_start><def_stmt>__init__ self config<block_start>super(ResNetVLBERTForAttentionVis self).__init__(config)<line_sep>self.image_feature_extractor=FastRCNN(config average_pool=<true> final_dim=config.NETWORK.IMAGE_FINAL_DIM enable_cnn_reg_loss=<false>)<line_sep>self.object_linguistic_embeddings=nn.Embedding(1 config.NETWORK.VLBERT.hidden_size)<if_stmt>config.NETWORK.IMAGE_FEAT_PRECOMPUTED<or>(<not>config.NETWORK.MASK_RAW_PIXELS)<block_start>self.object_mask_visual_embedding=nn.Embedding(1 2048)<block_end><if_stmt>config.NETWORK.WITH_MVRC_LOSS<block_start>self.object_mask_word_embedding=nn.Embedding(1 config.NETWORK.VLBERT.hidden_size)<block_end>self.aux_text_visual_embedding=nn.Embedding(1 config.NETWORK.VLBERT.hidden_size)<line_sep>self.image_feature_bn_eval=config.NETWORK.IMAGE_FROZEN_BN<line_sep>self.tokenizer=BertTokenizer.from_pretrained(config.NETWORK.BERT_MODEL_NAME)<line_sep>language_pretrained_model_path=<none><if_stmt>config.NETWORK.BERT_PRETRAINED<ne>''<block_start>language_pretrained_model_path='{}-{:04d}.model'.format(config.NETWORK.BERT_PRETRAINED config.NETWORK.BERT_PRETRAINED_EPOCH)<block_end><elif_stmt>os.path.isdir(config.NETWORK.BERT_MODEL_NAME)<block_start>weight_path=os.path.join(config.NETWORK.BERT_MODEL_NAME BERT_WEIGHTS_NAME)<if_stmt>os.path.isfile(weight_path)<block_start>language_pretrained_model_path=weight_path<block_end><block_end><if_stmt>language_pretrained_model_path<is><none><block_start>print("Warning: no pretrained language model found, training from scratch!!!")<block_end>self.vlbert=VisualLinguisticBert(config.NETWORK.VLBERT language_pretrained_model_path=<none><if>config.NETWORK.VLBERT.from_scratch<else>language_pretrained_model_path)<line_sep># init weights self.init_weight()<line_sep>self.fix_params()<block_end><def_stmt>init_weight self<block_start><if_stmt>self.config.NETWORK.IMAGE_FEAT_PRECOMPUTED<or>(<not>self.config.NETWORK.MASK_RAW_PIXELS)<block_start>self.object_mask_visual_embedding.weight.data.fill_(0.0)<block_end><if_stmt>self.config.NETWORK.WITH_MVRC_LOSS<block_start>self.object_mask_word_embedding.weight.data.normal_(mean=0.0 std=self.config.NETWORK.VLBERT.initializer_range)<block_end>self.aux_text_visual_embedding.weight.data.normal_(mean=0.0 std=self.config.NETWORK.VLBERT.initializer_range)<line_sep>self.image_feature_extractor.init_weight()<if_stmt>self.object_linguistic_embeddings<is><not><none><block_start>self.object_linguistic_embeddings.weight.data.normal_(mean=0.0 std=self.config.NETWORK.VLBERT.initializer_range)<block_end><block_end><def_stmt>train self mode=<true><block_start>super(ResNetVLBERTForAttentionVis self).train(mode)<line_sep># turn some frozen layers to eval mode <if_stmt>self.image_feature_bn_eval<block_start>self.image_feature_extractor.bn_eval()<block_end><block_end><def_stmt>fix_params self<block_start><pass><block_end><def_stmt>_collect_obj_reps self span_tags object_reps<block_start>""" Collect span-level object representations :param span_tags: [batch_size, ..leading_dims.., L] :param object_reps: [batch_size, max_num_objs_per_batch, obj_dim] :return: """<line_sep>span_tags_fixed=torch.clamp(span_tags min=0)# In case there were masked values here row_id=span_tags_fixed.new_zeros(span_tags_fixed.shape)<line_sep>row_id_broadcaster=torch.arange(0 row_id.shape[0] step=1 device=row_id.device)[: <none>]<line_sep># Add extra diminsions to the row broadcaster so it matches row_id leading_dims=len(span_tags.shape)-2<for_stmt>i range(leading_dims)<block_start>row_id_broadcaster=row_id_broadcaster[<ellipsis> <none>]<block_end>row_id<augadd>row_id_broadcaster<line_sep><return>object_reps[row_id.view(-1) span_tags_fixed.view(-1)].view(*span_tags_fixed.shape -1)<block_end><def_stmt>forward self image boxes im_info text relationship_label mlm_labels mvrc_ops mvrc_labels *aux# concat aux texts from different dataset # assert len(aux) > 0 and len(aux) % 2 == 0 <block_start>aux_text_list=aux[0::2]<line_sep>aux_text_mlm_labels_list=aux[1::2]<line_sep>num_aux_text=sum([_text.shape[0]<for>_text aux_text_list])<line_sep>max_aux_text_len=max([_text.shape[1]<for>_text aux_text_list])<if>len(aux_text_list)<g>0<else>0<line_sep>aux_text=text.new_zeros((num_aux_text max_aux_text_len))<line_sep>aux_text_mlm_labels=mlm_labels.new_zeros((num_aux_text max_aux_text_len)).fill_(-1)<line_sep>_cur=0<for_stmt>_text,_mlm_labels zip(aux_text_list aux_text_mlm_labels_list)<block_start>_num=_text.shape[0]<line_sep>aux_text[_cur:(_cur+_num) :_text.shape[1]]=_text<line_sep>aux_text_mlm_labels[_cur:(_cur+_num) :_text.shape[1]]=_mlm_labels<line_sep>_cur<augadd>_num<block_end>########################################### # visual feature extraction images=image<line_sep>box_mask=(boxes[: : 0]<g>-1.5)<line_sep>origin_len=boxes.shape[1]<line_sep>max_len=int(box_mask.sum(1).max().item())<line_sep>box_mask=box_mask[: :max_len]<line_sep>boxes=boxes[: :max_len]<line_sep>mvrc_ops=mvrc_ops[: :max_len]<line_sep>mvrc_labels=mvrc_labels[: :max_len]<if_stmt>self.config.NETWORK.IMAGE_FEAT_PRECOMPUTED<block_start>box_features=boxes[: : 4:]<line_sep>box_features[mvrc_ops<eq>1]=self.object_mask_visual_embedding.weight[0]<line_sep>boxes[: : 4:]=box_features<block_end>obj_reps=self.image_feature_extractor(images=images boxes=boxes box_mask=box_mask im_info=im_info classes=<none> segms=<none> mvrc_ops=mvrc_ops mask_visual_embed=self.object_mask_visual_embedding.weight[0]<if>(<not>self.config.NETWORK.IMAGE_FEAT_PRECOMPUTED)<and>(<not>self.config.NETWORK.MASK_RAW_PIXELS)<else><none>)<line_sep>############################################ # prepare text text_input_ids=text<line_sep>text_tags=text.new_zeros(text.shape)<line_sep>text_visual_embeddings=self._collect_obj_reps(text_tags obj_reps['obj_reps'])<line_sep>object_linguistic_embeddings=self.object_linguistic_embeddings(boxes.new_zeros((boxes.shape[0] boxes.shape[1])).long())<if_stmt>self.config.NETWORK.WITH_MVRC_LOSS<block_start>object_linguistic_embeddings[mvrc_ops<eq>1]=self.object_mask_word_embedding.weight[0]<block_end>object_vl_embeddings=torch.cat((obj_reps['obj_reps'] object_linguistic_embeddings) -1)<line_sep># add auxiliary text max_text_len=max(text_input_ids.shape[1] aux_text.shape[1])<line_sep>text_input_ids_multi=text_input_ids.new_zeros((text_input_ids.shape[0]+aux_text.shape[0] max_text_len))<line_sep>text_input_ids_multi[:text_input_ids.shape[0] :text_input_ids.shape[1]]=text_input_ids<line_sep>text_input_ids_multi[text_input_ids.shape[0]: :aux_text.shape[1]]=aux_text<line_sep>text_token_type_ids_multi=text_input_ids_multi.new_zeros(text_input_ids_multi.shape)<line_sep>text_mask_multi=(text_input_ids_multi<g>0)<line_sep>text_visual_embeddings_multi=text_visual_embeddings.new_zeros((text_input_ids.shape[0]+aux_text.shape[0] max_text_len text_visual_embeddings.shape[-1]))<line_sep>text_visual_embeddings_multi[:text_visual_embeddings.shape[0] :text_visual_embeddings.shape[1]]=text_visual_embeddings<line_sep>text_visual_embeddings_multi[text_visual_embeddings.shape[0]:]=self.aux_text_visual_embedding.weight[0]<line_sep>object_vl_embeddings_multi=object_vl_embeddings.new_zeros((text_input_ids.shape[0]+aux_text.shape[0] *object_vl_embeddings.shape[1:]))<line_sep>object_vl_embeddings_multi[:object_vl_embeddings.shape[0]]=object_vl_embeddings<line_sep>box_mask_multi=box_mask.new_zeros((text_input_ids.shape[0]+aux_text.shape[0] *box_mask.shape[1:]))<line_sep>box_mask_multi[:box_mask.shape[0]]=box_mask<line_sep>########################################### # Visual Linguistic BERT encoder_layers,_,attention_probs=self.vlbert(text_input_ids_multi text_token_type_ids_multi text_visual_embeddings_multi text_mask_multi object_vl_embeddings_multi box_mask_multi output_all_encoded_layers=<true> output_attention_probs=<true>)<line_sep>hidden_states=torch.stack(encoder_layers dim=0).transpose(0 1).contiguous()<line_sep>attention_probs=torch.stack(attention_probs dim=0).transpose(0 1).contiguous()<line_sep><return>{'attention_probs':attention_probs 'hidden_states':hidden_states}<block_end><block_end>
<import_stmt>torch<import_from_stmt>ignite.utils to_onehot<class_stmt>ClassificationOutputTransform<block_start><def_stmt>__init__ self num_classes=<none><block_start>self._num_classes=num_classes<block_end><def_stmt>__call__ self output<block_start><if_stmt>isinstance(output tuple)<block_start>y_pred,y=output<block_end><elif_stmt>isinstance(output dict)<block_start>y_pred=output["y_pred"]<line_sep>y=output["y"]<block_end><else_stmt><block_start><raise>ValueError<block_end><if_stmt>self._num_classes<block_start>y_pred=y_pred.clamp(min=0 max=self._num_classes-1).long()<line_sep>y=y.clamp(min=0 max=self._num_classes-1).long()<line_sep>y_pred=to_onehot(y_pred self._num_classes)<block_end><else_stmt><block_start>y_pred=y_pred.long()<line_sep>y=y.long()<block_end><return>y_pred y<block_end><block_end>
# This code is part of Qiskit. # # (C) Copyright IBM 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Module for interfacing with a remote extractor."""<import_stmt>logging<import_from_stmt>typing Optional Callable List<import_stmt>numpy<as>np<import_from_stmt>.utils generate_wsr bitarray_to_bytes<import_from_stmt>.baserandomservice BaseRandomService<import_from_stmt>.cqcextractorjob CQCExtractorJob<line_sep>logger=logging.getLogger(__name__)<class_stmt>CQCExtractor(BaseRandomService)<block_start>"""Class for interfacing with a CQC remote extractor. There are two extractor methods - Dodis (extractor 1) and Hayashi (extractor 2). These methods can be invoked synchronously or asynchronously. To invoke them synchronously:: random_bits = extractor.run(*cqc_parameters) To invoke them asynchronously:: import numpy as np extractor1_out = extractor.run_async_ext1(*ext1_parameters).block_until_ready() extractor2_out = extractor.run_async_ext2( ext2_seed=extractor1_out, *ext2_parameters).block_until_ready() random_bits = np.append(extractor1_out, extractor2_out) Running them asynchronously takes more steps because extractor 2 uses the output of extractor 1 as its seed, so it must wait for extractor 1 to finish first. """<def_stmt>run # type: ignore[override] self ext1_input_num_bits:int ext1_output_num_bits:int ext1_raw_bytes:bytes ext1_wsr_bytes:bytes ext2_seed_num_bits:int ext2_wsr_multiplier:int ext2_wsr_generator:Optional[Callable]=<none><arrow>List[int]<block_start>"""Process input data synchronously. Args: ext1_input_num_bits: Number of input bits, for extractor 1. ext1_output_num_bits: Number of output bits, for extractor 1. ext1_raw_bytes: Initial random numbers, in bytes, for extractor 1. ext1_wsr_bytes: Initial WSRs, in bytes, for extractor 1. ext2_seed_num_bits: Number of bits in the seed, for extractor 2. ext2_wsr_multiplier: WSR multiplier, for extractor 2. The number of bits used by extractor 2 is ext2_seed_num_bits*ext2_wsr_multiplier. ext2_wsr_generator: WSR generator used for extractor 2. It must take the number of bits as the input and a list of random bits (0s and 1s) as the output. If ``None``, :func:``generate_wsr`` is used. Returns: An instance of ``CQCExtractorJob`` which can be used to retrieve the results later. """<line_sep># pylint: disable=arguments-differ # Run ext1 output=self.run_async_ext1(ext1_input_num_bits ext1_output_num_bits ext1_raw_bytes ext1_wsr_bytes).block_until_ready()<line_sep># Run ext2 if requested. <if_stmt>ext2_wsr_multiplier<ne>0<block_start>ext2_out=self.run_async_ext2(output ext2_seed_num_bits ext2_wsr_multiplier ext2_wsr_generator).block_until_ready()<line_sep>output=np.append(output ext2_out).tolist()<block_end><return>output<block_end><def_stmt>run_async_ext1 self ext1_input_num_bits:int ext1_output_num_bits:int ext1_raw_bytes:bytes ext1_wsr_bytes:bytes<arrow>CQCExtractorJob<block_start>"""Run the first extractor asynchronously. Args: ext1_input_num_bits: Number of input bits, for extractor 1. ext1_output_num_bits: Number of output bits, for extractor 1. ext1_raw_bytes: Initial random numbers, in bytes, for extractor 1. ext1_wsr_bytes: Initial WSRs, in bytes, for extractor 1. Returns: An instance of ``CQCExtractorJob`` which can be used to retrieve the results later. Raises: ValueError: If an invalid argument values are specified. """<if_stmt><not>ext1_input_num_bits<or><not>ext1_output_num_bits<block_start><raise>ValueError("Invalid input arguments. ext1_input_num_bits and "<concat>"ext1_output_num_bits must be non-zero.")<block_end>logger.info("Starting first extraction.")<line_sep># Run ext1 ext1_data={"n":ext1_input_num_bits "m":ext1_output_num_bits}<line_sep>ext1_files={"x":ext1_raw_bytes "y":ext1_wsr_bytes}<line_sep>response=self._client.extract(name='cqc' method='ext1' data=ext1_data files=ext1_files)<line_sep>parameters={'ext1_input_num_bits':ext1_input_num_bits 'ext1_output_num_bits':ext1_output_num_bits 'ext1_raw_bytes':ext1_raw_bytes 'ext1_wsr_bytes':ext1_wsr_bytes}<line_sep><return>CQCExtractorJob(job_id=response['id'] client=self._client parameters=parameters)<block_end><def_stmt>run_async_ext2 self ext2_seed:List[int] ext2_seed_num_bits:int ext2_wsr_multiplier:int ext2_wsr_generator:Optional[Callable]=<none><arrow>CQCExtractorJob<block_start>"""Run the second extractor asynchronously. Args: ext2_seed: Seed used for extractor 2, such as the output of extractor 1. ext2_seed_num_bits: Number of bits in the seed, for extractor 2. ext2_wsr_multiplier: WSR multiplier, for extractor 2. The number of bits used by extractor 2 is ext2_seed_num_bits*ext2_wsr_multiplier. ext2_wsr_generator: WSR generator used for extractor 2. It must take the number of bits as the input and a list of random bits (0s and 1s) as the output. If ``None``, :func:``generate_wsr`` is used. Returns: An instance of ``CQCExtractorJob`` which can be used to retrieve the results later. Raises: ValueError: If an invalid argument values are specified. """<if_stmt><not>ext2_seed_num_bits<or><not>ext2_wsr_multiplier<block_start><raise>ValueError("Invalid input arguments. ext2_seed_num_bits and "<concat>"ext2_wsr_multiplier must be non-zero.")<block_end>logger.info("Starting second extraction.")<line_sep>ext2_seed=bitarray_to_bytes(ext2_seed[:ext2_seed_num_bits])# type: ignore[assignment] <if_stmt>ext2_wsr_generator<is><none><block_start>ext2_wsr_generator=generate_wsr<block_end>ext2_wsr=ext2_wsr_generator(ext2_seed_num_bits<times>ext2_wsr_multiplier)<line_sep>ext2_wsr=bitarray_to_bytes(ext2_wsr)<line_sep>ext2_data={"a":ext2_seed_num_bits "b":ext2_wsr_multiplier}<line_sep>ext2_files={"r":ext2_seed "x":ext2_wsr}<line_sep>response=self._client.extract(name='cqc' method='ext2' data=ext2_data files=ext2_files)<line_sep>parameters={'ext2_seed_num_bits':ext2_seed_num_bits 'ext2_wsr_multiplier':ext2_wsr_multiplier 'ext2_seed_bytes':ext2_seed 'ext2_wsr':ext2_wsr}<line_sep><return>CQCExtractorJob(job_id=response['id'] client=self._client parameters=parameters)<block_end><def_stmt>retrieve_job self job_id:str<arrow>CQCExtractorJob<block_start>"""Retrieve a previously submitted job. Args: job_id: Job ID. Returns: A ``CQCExtractorJob`` instance. """<line_sep><return>CQCExtractorJob(job_id self._client)<block_end><def_stmt>__repr__ self<arrow>str<block_start><return>"<{}('{}') from {}>".format(self.__class__.__name__ self.name self._provider)<block_end><block_end>
TEMPLATES=[]<line_sep>PLATFORMS={}<line_sep>MIDDLEWARES=[]<line_sep>
<import_stmt>pytest<import_from_stmt>pathlib Path<import_from_stmt>testbook testbook<line_sep>TEST_DIR=Path(__file__).parent.parent.resolve()<line_sep>NB_DIR=TEST_DIR.parent<line_sep>NB_PATH=NB_DIR/"Frequently_used_code"/"Polygon_drill.ipynb"<line_sep>@pytest.fixture(scope="module")<def_stmt>tb <block_start><with_stmt>testbook(NB_PATH execute=<true>)<as>tb<block_start><yield>tb<block_end><block_end><def_stmt>test_ok tb<block_start><assert_stmt><true><block_end># ok <def_stmt>test_geometry tb<block_start>gdf=tb.ref("polygon_to_drill")<assert_stmt>"geometry"<in>gdf.columns<block_end><def_stmt>test_vars tb<block_start>ds=tb.ref("data")<line_sep>expected_vars=["time" "y" "x" "spatial_ref" "nbart_red" "nbart_green" "nbart_blue" ]<for_stmt>var expected_vars<block_start><assert_stmt>var<in>ds.variables<block_end><block_end><def_stmt>test_shape tb<block_start>ds=tb.ref("mask")<assert_stmt>len(ds.x)<eq>97<assert_stmt>len(ds.y)<eq>120<block_end><def_stmt>test_masked tb<block_start>ds=tb.ref("data_masked")<assert_stmt>ds.nbart_red.isnull().any().item()<block_end>
<import_from_stmt>.authorize_result AuthorizeResult<line_sep>
""" README ====== This file contains Python codes. ====== """<line_sep>""" Store common attributes of a stock option """<import_stmt>math<class_stmt>StockOption(object)<block_start><def_stmt>__init__ self S0 K r T N params<block_start>self.S0=S0<line_sep>self.K=K<line_sep>self.r=r<line_sep>self.T=T<line_sep>self.N=max(1 N)# Ensure N have at least 1 time step self.STs=<none># Declare the stock prices tree """ Optional parameterss used by derived classes """<line_sep>self.pu=params.get("pu" 0)# Probability of up state self.pd=params.get("pd" 0)# Probability of down state self.div=params.get("div" 0)# Divident yield self.sigma=params.get("sigma" 0)# Volatility self.is_call=params.get("is_call" <true>)# Call or put self.is_european=params.get("is_eu" <true>)# Eu or Am """ Computed values """<line_sep>self.dt=T/float(N)# Single time step, in years self.df=math.exp(-(r-self.div)<times>self.dt)<block_end><block_end># Discount factor
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>Configuration.Eras.Era_Run3_dd4hep_cff Run3_dd4hep<line_sep>process=cms.Process("GeometryWriter" Run3_dd4hep)<import_from_stmt>Configuration.ProcessModifiers.dd4hep_cff dd4hep<line_sep>process.load('CondCore.CondDB.CondDB_cfi')<line_sep>process.load('Configuration.Geometry.GeometryDD4hepExtended2021_cff')<line_sep>process.load('Geometry.CaloEventSetup.CaloGeometryDBWriter_cfi')<line_sep>process.load('CondTools.Geometry.HcalParametersWriter_cff')<line_sep>process.load("Geometry.MuonNumbering.muonGeometryConstants_cff")<line_sep>process.CaloGeometryBuilder=cms.ESProducer("CaloGeometryBuilder" SelectedCalos=cms.vstring('HCAL' 'ZDC' 'EcalBarrel' 'EcalEndcap' 'EcalPreshower' 'TOWER'))<line_sep>process.source=cms.Source("EmptyIOVSource" lastValue=cms.uint64(1) timetype=cms.string('runnumber') firstValue=cms.uint64(1) interval=cms.uint64(1))<line_sep># This reads the big XML file and the only way to fill the # nonreco part of the database is to read this file. process.XMLGeometryWriter=cms.EDAnalyzer("XMLGeometryBuilder" XMLFileName=cms.untracked.string("./geSingleBigFile.xml") ZIP=cms.untracked.bool(<true>))<line_sep>process.TrackerGeometryWriter=cms.EDAnalyzer("PGeometricDetBuilder" fromDD4hep=cms.bool(<true>))<line_sep>process.TrackerParametersWriter=cms.EDAnalyzer("PTrackerParametersDBBuilder" fromDD4hep=cms.bool(<true>))<line_sep>process.CaloGeometryWriter=cms.EDAnalyzer("PCaloGeometryBuilder" fromDD4Hep=cms.untracked.bool(<true>))<line_sep>process.CSCGeometryWriter=cms.EDAnalyzer("CSCRecoIdealDBLoader" fromDD4Hep=cms.untracked.bool(<true>))<line_sep>process.DTGeometryWriter=cms.EDAnalyzer("DTRecoIdealDBLoader" fromDD4Hep=cms.untracked.bool(<true>))<line_sep>process.RPCGeometryWriter=cms.EDAnalyzer("RPCRecoIdealDBLoader" fromDD4Hep=cms.untracked.bool(<true>))<line_sep>process.GEMGeometryWriter=cms.EDAnalyzer("GEMRecoIdealDBLoader" fromDD4Hep=cms.untracked.bool(<true>))<line_sep>process.CondDB.timetype=cms.untracked.string('runnumber')<line_sep>process.CondDB.connect=cms.string('sqlite_file:myfile.db')<line_sep>process.PoolDBOutputService=cms.Service("PoolDBOutputService" process.CondDB toPut=cms.VPSet(cms.PSet(record=cms.string('GeometryFileRcd') tag=cms.string('XMLFILE_Geometry_TagXX_Extended2021_mc')) cms.PSet(record=cms.string('IdealGeometryRecord') tag=cms.string('TKRECO_Geometry_TagXX')) cms.PSet(record=cms.string('PTrackerParametersRcd') tag=cms.string('TKParameters_Geometry_TagXX')) cms.PSet(record=cms.string('PEcalBarrelRcd') tag=cms.string('EBRECO_Geometry_TagXX')) cms.PSet(record=cms.string('PEcalEndcapRcd') tag=cms.string('EERECO_Geometry_TagXX')) cms.PSet(record=cms.string('PEcalPreshowerRcd') tag=cms.string('EPRECO_Geometry_TagXX')) cms.PSet(record=cms.string('PHcalRcd') tag=cms.string('HCALRECO_Geometry_TagXX')) cms.PSet(record=cms.string('HcalParametersRcd') tag=cms.string('HCALParameters_Geometry_TagXX')) cms.PSet(record=cms.string('PCaloTowerRcd') tag=cms.string('CTRECO_Geometry_TagXX')) cms.PSet(record=cms.string('PZdcRcd') tag=cms.string('ZDCRECO_Geometry_TagXX')) cms.PSet(record=cms.string('PCastorRcd') tag=cms.string('CASTORRECO_Geometry_TagXX')) cms.PSet(record=cms.string('CSCRecoGeometryRcd') tag=cms.string('CSCRECO_Geometry_TagXX')) cms.PSet(record=cms.string('CSCRecoDigiParametersRcd') tag=cms.string('CSCRECODIGI_Geometry_TagXX')) cms.PSet(record=cms.string('DTRecoGeometryRcd') tag=cms.string('DTRECO_Geometry_TagXX')) cms.PSet(record=cms.string('RPCRecoGeometryRcd') tag=cms.string('RPCRECO_Geometry_TagXX')) cms.PSet(record=cms.string('GEMRecoGeometryRcd') tag=cms.string('GEMRECO_Geometry_TagXX'))))<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1))<line_sep>process.p1=cms.Path(process.XMLGeometryWriter+process.TrackerGeometryWriter+process.TrackerParametersWriter+process.CaloGeometryWriter+process.HcalParametersWriter+process.CSCGeometryWriter+process.DTGeometryWriter+process.RPCGeometryWriter+process.GEMGeometryWriter)<line_sep>
<import_stmt>os<import_from_stmt>flask Flask<line_sep>this_file_dir=os.path.dirname(os.path.realpath(__file__))<line_sep>data_dir='{}/data'.format(this_file_dir)<line_sep>output_dir='{}/out'.format(this_file_dir)<line_sep>downloads_dir='{}/downloads'.format(data_dir)<def_stmt>create_app <block_start>app=Flask(__name__)<line_sep><return>app<block_end>
# -*- coding: utf-8 -*- # Copyright (C) 2013 <NAME> """ Implements a parser for the Valve Data Format (VDF,) or as often refered KeyValues. Currently only provides parsing functionality without the ability to serialise. API designed to mirror that of the built-in JSON module. https://developer.valvesoftware.com/wiki/KeyValues """<import_stmt>string<import_stmt>re<line_sep>_KV_KEY=0<line_sep>_KV_BLOCK=1<line_sep>_KV_BLOCKEND=2<line_sep>_KV_PAIR=3<line_sep>ALWAYS=0<line_sep>UNQUOTED=1<line_sep>NEVER=2<def_stmt>coerce_type token<block_start>""" Attempts to convert a token to a native Python object by matching it against various regexes. Will silently fall back to string if no conversion can be made. Currently only capable of converting integers and floating point numbers. """<line_sep>regexes=[# regex, converter (r"^-?[0-9]+$" int) (r"^[-+]?[0-9]*\.?[0-9]+$" float) # TODO: ("rgb", pass), # TODO: ("hex triplet", pass), ]<for_stmt>regex,converter regexes<block_start>print(regex converter token re.match(regex token re.UNICODE))<if_stmt>re.match(regex token re.UNICODE)<block_start><return>converter(token)<block_end><block_end># Fallback to string <return>token<block_end># Largely based on necavi's https://github.com/necavi/py-keyvalues <def_stmt>loads src encoding=<none> coerce_=UNQUOTED<block_start>""" Loades a VDF string into a series of nested dictionaries. encoding -- The encoding of the given source string if not Unicode. If this is not set and a bytestring is given, ASCII will be the assumed encoding. corece_ -- can be set to determine whether an attempt should be made to convert values to native Python type equivalents. If set to UNQUOTED (default,) only values that are not enclosed in double quotes will be converted. If set to ALWAYS, will attempt to convert regardless of whether the value is quoted or not. not recommended. If set to NEVER, no attempt will be made to convert. Should produce most reliable behaviour. """<if_stmt>isinstance(src str)<and>encoding<is><none><block_start>encoding="ascii"<block_end><if_stmt>encoding<is><not><none><block_start>src=src.decode(encoding)<block_end># else: # assume unicode # pair type, pair key, pair value, coerce pairs=[[_KV_BLOCK "" <none> <false>]]<line_sep># _KV_KEY -- all tokens begin as this # _KV_BLOCK -- is for when a _KV_KEY is followed by a { # _KV_PAIR -- is for when a _KV_KEY is followed by another token extended_alphanumeric=set(string.ascii_letters.decode("ascii")+unicode(string.digits)+u".-_")<line_sep>i=0<line_sep>line=1<line_sep>col=0<line_sep>token=<none><try_stmt><block_start><while_stmt>i<l>len(src)<block_start>char=src[i]<line_sep># Whitespace <if_stmt>char<in>{u" " u"\t"}<block_start><pass><block_end># End-of-line <elif_stmt>char<eq>u"\n"<block_start><try_stmt><block_start><if_stmt>src[i+1]<eq>u"\r"# Will IndexError at EOF <block_start>i<augadd>1<line_sep>col<augadd>1<block_end>line<augadd>1<line_sep>col=0<block_end><except_stmt>IndexError<block_start><pass><block_end><block_end># End-of-line <elif_stmt>char<eq>u"\r"<block_start><try_stmt><block_start><if_stmt>src[i+1]<eq>u"\n"# Will IndexError at EOF <block_start>i<augadd>1<line_sep>col<augadd>1<block_end>line<augadd>1<line_sep>col=0<block_end><except_stmt>IndexError<block_start><pass><block_end><block_end># Double-quotes enclosed token <elif_stmt>char<eq>u"\""<block_start>token=u""<while_stmt><true><block_start>i<augadd>1<line_sep>col<augadd>1<line_sep>char=src[i]<line_sep># I don't agree with the assertion in py-keyvalues # that \n or \r should also terminate a token if # its quoted. <if_stmt>char<eq>u"\""<block_start><break><block_end><elif_stmt>char<in>{"\r" "\n"}<block_start><raise>SyntaxError("End-of-line quoted token")<block_end><elif_stmt>char<eq>u"\\"<block_start>i<augadd>1<try_stmt><block_start>escaped_char=src[i]<block_end><except_stmt>IndexError<block_start><raise>SyntaxError("EOF in escaped character")<block_end><try_stmt><block_start>char={u"n":u"\n" u"r":u"\r" u"t":u"\t" u"\"":u"\"" u"\\":u"\\" }[escaped_char]<block_end><except_stmt>KeyError<block_start><raise>SyntaxError("Invalid escape character")<block_end><block_end>token<augadd>char<block_end><if_stmt>pairs[-1][0]<eq>_KV_KEY<block_start>pairs[-1][0]=_KV_PAIR<line_sep>pairs[-1][2]=token<line_sep>pairs[-1][3]=coerce_<in>[ALWAYS]<block_end><else_stmt><block_start>pairs.append([_KV_KEY token <none> <false>])<block_end><block_end># Unquoted token <elif_stmt>char<in>extended_alphanumeric<block_start>token=u""<while_stmt><true><block_start>token<augadd>char<line_sep>i<augadd>1<line_sep>col<augadd>1<line_sep>char=src[i]<if_stmt>char<not><in>extended_alphanumeric# Assume end of token; in most cases this will # white space or a new line # If newline, rewind 1 char so it can be # properly handled by the end-of-line processors <block_start><if_stmt>char<in>{u"\n" u"\r"}<block_start>i<augsub>1<line_sep>col<augsub>1<line_sep>char=src[i]<block_end><break><block_end><block_end><if_stmt>pairs[-1][0]<eq>_KV_KEY<block_start>pairs[-1][0]=_KV_PAIR<line_sep>pairs[-1][2]=token<line_sep>pairs[-1][3]=coerce_<in>[ALWAYS UNQUOTED]<block_end><else_stmt><block_start>pairs.append([_KV_KEY token <none> <false>])<block_end># I don't know if there are any cases where an unquoted # key may be illegal, e.g. if it contains only digits. # I assume it is, but I won't handle it for now. <block_end># Block start <elif_stmt>char<eq>u"{"<block_start><if_stmt>pairs[-1][0]<ne>_KV_KEY<block_start><raise>SyntaxError("Block doesn't follow block name")<block_end>pairs[-1][0]=_KV_BLOCK<block_end><elif_stmt>char<eq>u"}"<block_start>pairs.append([_KV_BLOCKEND <none> <none> <false>])<block_end><else_stmt><block_start><raise>SyntaxError("Unexpected character")<block_end>i<augadd>1<line_sep>col<augadd>1<block_end><block_end><except_stmt>SyntaxError<as>exc<block_start><raise>ValueError("{} '{}'; line {} column {}".format(exc.message src[i] line col))<block_end>dict_={}<line_sep>dict_stack=[dict_]<line_sep>CURRENT=-1<line_sep>PREVIOUS=-2<for_stmt>type,key,value,should_coerce pairs[1:]<block_start><if_stmt>type<eq>_KV_BLOCK<block_start>dict_stack.append({})<line_sep>dict_stack[PREVIOUS][key]=dict_stack[CURRENT]<block_end><elif_stmt>type<eq>_KV_BLOCKEND<block_start>dict_stack=dict_stack[:CURRENT]<block_end><elif_stmt>type<eq>_KV_PAIR<block_start>dict_stack[CURRENT][key]=(coerce_type(value)<if>should_coerce<else>value)<block_end># else: # should never occur, but would be caused by a token not being # followed by a block or value <block_end><return>dict_<block_end><def_stmt>load fp encoding=<none> coerce_=UNQUOTED<block_start>""" Same as loads but takes a file-like object as the source. """<line_sep><return>loads(fp.read() encoding coerce_)<block_end><def_stmt>dumps obj encoding=<none> indent=u" " object_encoders={}<block_start>""" Serialises a series of nested dictionaries to the VDF/KeyValues format and returns it as a string. If 'encoding' isn't specified a Unicode string will be returned, else an ecoded bytestring will be. 'indent' is the string to be used to indent nested blocks. The string given should be Unicode and represent one level of indentation. Four spaces by default. 'object_encoders' maps a series of types onto serialisers, which convert objects to their VDF equivalent. If no encoder is specified for a type it'll fall back to using __unicode__. Note that currently this likely causes None to be encoded incorrectly. Also, floats which include the exponent in their textual representaiton may also be 'wrong.' """<line_sep>object_codecs={float:<lambda>v:unicode(repr(v/1.0)) }<line_sep>object_codecs.update(object_encoders)<line_sep># I don't know how TYPE_NONE (None) are meant to be encoded so we # just use unicode() until it's known. lines=[]<def_stmt>recurse_obj obj indent_level=0<block_start>ind=indent<times>indent_level<for_stmt>key,value obj.iteritems()<block_start><if_stmt>isinstance(value dict)<block_start>lines.append(u"{}\"{}\"".format(ind key))<line_sep>lines.append(u"{}{{".format(ind))<line_sep>recurse_obj(value indent_level+1)<line_sep>lines.append(u"{}}}".format(ind))<block_end><else_stmt><block_start>lines.append(u"{}\"{}\"{}\"{}\"".format(ind key indent object_codecs.get(type(value) unicode)(value) ))<block_end><block_end><block_end>recurse_obj(obj)<if_stmt>encoding<is><not><none><block_start><return>u"\n".join(lines).encode(encoding)<block_end><else_stmt><block_start><return>u"\n".join(lines)<block_end><block_end><def_stmt>dump obj fp encoding indent=u" " object_encoders={}<block_start>""" Same as dumps but takes a file-like object 'fp' which will be written to. """<line_sep><return>fp.write(dumps(obj encoding indent object_encoders))<block_end>
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. <import_from_stmt>typing NamedTuple Optional<class_stmt>Pass(NamedTuple)<block_start>"""The declaration of an LLVM pass."""<line_sep># The name of the pass, e.g. "AddDiscriminatorsPass". name:str<line_sep># The opt commandline flag which turns this pass on, e.g. "-add-discriminators". flag:str<line_sep># The docstring for this pass, as reported by `opt -help`. E.g. "Add DWARF path discriminators". description:str<line_sep># The path of the C++ file which defines this pass, relative to the LLVM source tree root. source:str<line_sep># The path of the C++ header which declares this pass, relative to the LLVM source tree root. # If the header path could not be inferred, this is None. header:Optional[str]<line_sep># Boolean flags set in INITIALIZE_PASS(). cfg:bool<line_sep>is_analysis:bool<block_end>
# Copyright 2019 ZTE corporation. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """ The test of automatic test. """<import_stmt>unittest<import_stmt>subprocess<import_stmt>os<class_stmt>TestAutomaticTest(unittest.TestCase)<block_start>""" The test of automatic test """<line_sep>@staticmethod<def_stmt>test_automatic_test <block_start>""" The test of automatic test. """<line_sep>base_dir=os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))<line_sep>model_command=['python3' 'benchmark/tests/test_model/mnist_keras/mnist_keras.py']<line_sep>subprocess.run(args=model_command cwd=base_dir check=<true>)<line_sep>command=['python3' 'benchmark/src/automatic_test.py' '-d' 'benchmark/tests/docker_test/openvino.Dockerfile' '-s' 'openvino' '-b' '.' '-a' '.' '-m' 'mnist' '-c' 'benchmark/tests/client_script/client_script.sh' '-ss' 'benchmark/tests/serving_script/openvino_serving_script.sh' '-l' os.path.join(base_dir 'benchmark/log') '-tm' 'benchmark/tests/test_model/mnist_keras' '-cis' 'mnist_client.py' '-i' 'mnist.png' '-cs' 'benchmark/tests/compile_script/openvino_compile_script.sh']<with_stmt>subprocess.Popen(args=command cwd=base_dir)<as>process<block_start>print(process.stdout)<block_end><block_end><block_end>
var=5<line_sep>a="my string {:.2f}".format(var)<line_sep>
# coding: utf-8 <import_stmt>unittest<import_from_stmt>problems.two_sum_ii_input_array_is_sorted Solution<class_stmt>TestCase(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.solution=Solution()<block_end><def_stmt>test self<block_start>test_data=[{'numbers':[2 7 11 15] 'target':9 'expected':[1 2]} {'numbers':[2 3 4] 'target':6 'expected':[1 3]} {'numbers':[-1 0] 'target':-1 'expected':[1 2]} {'numbers':[1 2 3 4 5 6 7 19 100 222 412] 'target':13 'expected':[6 7]} ]<for_stmt>data test_data<block_start>numbers=data['numbers']<line_sep>target=data['target']<line_sep>expected=data['expected']<with_stmt>self.subTest(numbers=numbers target=target)<block_start>self.assertEqual(self.solution.twoSum(numbers target) expected)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>argparse<import_stmt>itertools<import_stmt>ntpath<import_stmt>os<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>tqdm tqdm<import_from_stmt>data create_eval_dataloader<import_from_stmt>metric create_metric_models get_fid<import_from_stmt>models networks<import_from_stmt>models.base_model BaseModel<import_from_stmt>models.modules.loss GANLoss<import_from_stmt>utils util<class_stmt>MunitModel(BaseModel)<block_start>@staticmethod<def_stmt>modify_commandline_options parser is_train=<false><block_start><assert_stmt>is_train<line_sep>parser=super(MunitModel MunitModel).modify_commandline_options(parser is_train)<assert_stmt>isinstance(parser argparse.ArgumentParser)<line_sep>parser.add_argument('--restore_G_A_path' type=str default=<none> help='the path to restore the generator A')<line_sep>parser.add_argument('--restore_G_B_path' type=str default=<none> help='the path to restore the generator B')<line_sep>parser.add_argument('--restore_D_A_path' type=str default=<none> help='the path to restore the discriminator A')<line_sep>parser.add_argument('--restore_D_B_path' type=str default=<none> help='the path to restore the discriminator B')<line_sep>parser.add_argument('--style_dim' type=int default=8 help='the dimension of the style vector')<line_sep>parser.add_argument('--n_downsample' type=int default=2 help='the number of downsample layer in the generator')<line_sep>parser.add_argument('--n_res' type=int default=4 help='the number of the ResBlock in the generator')<line_sep>parser.add_argument('--activ' type=str default='relu' help='the activation type of the generator')<line_sep>parser.add_argument('--pad_type' type=str default='reflect' help='the padding type of the generator')<line_sep>parser.add_argument('--mlp_dim' type=int default=256 help='the dimension of the mlp layer in the generator')<line_sep>parser.add_argument('--no_style_encoder' action='store_true' help='whether to have the style encoder in the generator')<line_sep>parser.add_argument('--lambda_rec_x' type=float default=10 help='weight of image reconstruction loss')<line_sep>parser.add_argument('--lambda_rec_s' type=float default=1 help='weight of style reconstruction loss')<line_sep>parser.add_argument('--lambda_rec_c' type=float default=1 help='weight of content reconstruction loss')<line_sep>parser.add_argument('--lambda_gan' type=float default=1 help='weight of gan loss')<line_sep>parser.add_argument('--weight_decay' type=float default=1e-4 help='weight decay of the optimizer')<line_sep>parser.add_argument('--real_stat_A_path' type=str required=<true> help='the path to load the ground-truth A images information to compute FID.')<line_sep>parser.add_argument('--real_stat_B_path' type=str required=<true> help='the path to load the ground-truth B images information to compute FID.')<line_sep>parser.set_defaults(dataset_mode='unaligned' gan_mode='lsgan' load_size=256 netG='munit' netD='ms_image' ndf=64 n_layers_D=4 init_type='kaiming' lr_policy='step' lr=1e-4 scheduler_counter='iter' nepochs=21 nepochs_decay=0 niters=1000000 save_latest_freq=100000000 save_epoch_freq=1)<line_sep><return>parser<block_end><def_stmt>__init__ self opt<block_start><assert_stmt>opt.isTrain<assert_stmt>opt.direction<eq>'AtoB'<assert_stmt>opt.dataset_mode<eq>'unaligned'<line_sep>valid_netGs=['munit' 'mobile_munit']<assert_stmt>opt.netG<in>valid_netGs<line_sep>super(MunitModel self).__init__(opt)<line_sep>self.loss_names=['D_A' 'G_rec_xA' 'G_rec_sA' 'G_rec_cA' 'G_gan_A' 'D_B' 'G_rec_xB' 'G_rec_sB' 'G_rec_cB' 'G_gan_B']<line_sep>self.visual_names=['real_A' 'fake_A' 'real_A' 'fake_B']<line_sep>self.model_names=['G_A' 'G_B' 'D_A' 'D_B']<line_sep>self.netG_A=networks.define_G(opt.netG init_type=opt.init_type init_gain=opt.init_gain gpu_ids=self.gpu_ids opt=opt)<line_sep>self.netG_B=networks.define_G(opt.netG init_type=opt.init_type init_gain=opt.init_gain gpu_ids=self.gpu_ids opt=opt)<line_sep>self.netD_A=networks.define_D(opt.netD input_nc=opt.input_nc init_type='normal' init_gain=opt.init_gain gpu_ids=self.gpu_ids opt=opt)<line_sep>self.netD_B=networks.define_D(opt.netD input_nc=opt.output_nc init_type='normal' init_gain=opt.init_gain gpu_ids=self.gpu_ids opt=opt)<line_sep>self.criterionGAN=GANLoss(opt.gan_mode).to(self.device)<line_sep>self.criterionRec=nn.L1Loss()<line_sep>self.optimizer_G=torch.optim.Adam(itertools.chain(self.netG_A.parameters() self.netG_B.parameters()) lr=opt.lr betas=(opt.beta1 0.999) weight_decay=opt.weight_decay)<line_sep>self.optimizer_D=torch.optim.Adam(itertools.chain(self.netD_A.parameters() self.netD_B.parameters()) lr=opt.lr betas=(opt.beta1 0.999) weight_decay=opt.weight_decay)<line_sep>self.optimizers=[self.optimizer_G self.optimizer_D]<line_sep>self.eval_dataloader_AtoB=create_eval_dataloader(self.opt direction='AtoB')<line_sep>self.eval_dataloader_BtoA=create_eval_dataloader(self.opt direction='BtoA')<line_sep>self.inception_model,_,_=create_metric_models(opt self.device)<line_sep>self.best_fid_A,self.best_fid_B=1e9 1e9<line_sep>self.fids_A,self.fids_B=[] []<line_sep>self.is_best=<false><line_sep>self.npz_A=np.load(opt.real_stat_A_path)<line_sep>self.npz_B=np.load(opt.real_stat_B_path)<block_end><def_stmt>set_input self input<block_start>self.real_A=input['A'].to(self.device)<line_sep>self.real_B=input['B'].to(self.device)<block_end><def_stmt>set_single_input self input<block_start>self.real_A=input['A'].to(self.device)<line_sep>self.image_paths=input['A_paths']<block_end><def_stmt>test_single_side self direction<block_start>G_A=getattr(self 'netG_%s'%direction[0])<line_sep>G_B=getattr(self 'netG_%s'%direction[-1])<line_sep>opt=self.opt<line_sep>batch_size=self.real_A.size(0)<line_sep>style_dim=opt.style_dim<with_stmt>torch.no_grad()<block_start>s=torch.randn(batch_size style_dim 1 1 device=self.device)<line_sep>c,_=G_A.encode(self.real_A need_style=<false>)<line_sep>self.fake_B=G_B.decode(c s)<block_end><block_end><def_stmt>forward self config=<none><block_start><raise>NotImplementedError<block_end><def_stmt>backward_G self<block_start>opt=self.opt<line_sep>batch_size=self.real_A.size(0)<line_sep>style_dim=opt.style_dim<line_sep>s_a=torch.randn(batch_size style_dim 1 1 device=self.device)<line_sep>s_b=torch.randn(batch_size style_dim 1 1 device=self.device)<line_sep># encode c_a,s_a_prime=self.netG_A.encode(self.real_A)<line_sep>c_b,s_b_prime=self.netG_B.encode(self.real_B)<line_sep># decode (within domain) rec_A=self.netG_A.decode(c_a s_a_prime)<line_sep>rec_B=self.netG_B.decode(c_b s_b_prime)<line_sep># decode (cross domain) fake_A=self.netG_A.decode(c_b s_a)<line_sep>fake_B=self.netG_B.decode(c_a s_b)<line_sep># encode again c_b_recon,s_a_recon=self.netG_A.encode(fake_A)<line_sep>c_a_recon,s_b_recon=self.netG_B.encode(fake_B)<line_sep># reconstruction loss self.loss_G_rec_xA=opt.lambda_rec_x<times>self.criterionRec(rec_A self.real_A)<line_sep>self.loss_G_rec_xB=opt.lambda_rec_x<times>self.criterionRec(rec_B self.real_B)<line_sep>self.loss_G_rec_sA=opt.lambda_rec_s<times>self.criterionRec(s_a_recon s_a)<line_sep>self.loss_G_rec_sB=opt.lambda_rec_s<times>self.criterionRec(s_b_recon s_b)<line_sep>self.loss_G_rec_cA=opt.lambda_rec_c<times>self.criterionRec(c_a_recon c_a)<line_sep>self.loss_G_rec_cB=opt.lambda_rec_c<times>self.criterionRec(c_b_recon c_b)<line_sep># gan loss self.loss_G_gan_A=opt.lambda_gan<times>self.criterionGAN(self.netD_A(fake_A) <true> for_discriminator=<false>)<line_sep>self.loss_G_gan_B=opt.lambda_gan<times>self.criterionGAN(self.netD_B(fake_B) <true> for_discriminator=<false>)<line_sep>self.loss_G=self.loss_G_rec_xA+self.loss_G_rec_xB+self.loss_G_rec_sA+self.loss_G_rec_sB+self.loss_G_rec_cA+self.loss_G_rec_cB+self.loss_G_gan_A+self.loss_G_gan_B<line_sep>self.loss_G.backward()<block_end><def_stmt>backward_D self<block_start>opt=self.opt<line_sep>batch_size=self.real_A.size(0)<line_sep>style_dim=opt.style_dim<line_sep>s_a=torch.randn(batch_size style_dim 1 1 device=self.device)<line_sep>s_b=torch.randn(batch_size style_dim 1 1 device=self.device)<line_sep># encode c_a,_=self.netG_A.encode(self.real_A need_style=<false>)<line_sep>c_b,_=self.netG_B.encode(self.real_B need_style=<false>)<line_sep># decode (cross domain) fake_A=self.netG_A.decode(c_b s_a)<line_sep>fake_B=self.netG_B.decode(c_a s_b)<line_sep># gan loss self.loss_D_A=opt.lambda_gan<times>(self.criterionGAN(self.netD_A(fake_A.detach()) <false>)+self.criterionGAN(self.netD_A(self.real_A) <true>))<line_sep>self.loss_D_B=opt.lambda_gan<times>(self.criterionGAN(self.netD_B(fake_B.detach()) <false>)+self.criterionGAN(self.netD_B(self.real_B) <true>))<line_sep>self.loss_D=self.loss_D_A+self.loss_D_B<line_sep>self.loss_D.backward()<block_end><def_stmt>optimize_parameters self steps<block_start>self.set_requires_grad([self.netD_A self.netD_B] <false>)# Ds require no gradients when optimizing Gs self.optimizer_G.zero_grad()<line_sep>self.backward_G()<line_sep>self.optimizer_G.step()<line_sep>self.set_requires_grad([self.netD_A self.netD_B] <true>)# Ds require no gradients when optimizing Gs self.optimizer_D.zero_grad()# set D_A and D_B's gradients to zero self.backward_D()# calculate gradients for D_A and D_B self.optimizer_D.step()<block_end># update D_A and D_B's weights <def_stmt>profile self config=<none> verbose=<true><block_start><raise>NotImplementedError<block_end><def_stmt>test self config=<none><block_start><with_stmt>torch.no_grad()<block_start>self.forward(config)<block_end><block_end><def_stmt>evaluate_model self step<block_start>ret={}<line_sep>self.is_best=<false><line_sep>save_dir=os.path.join(self.opt.log_dir 'eval' str(step))<line_sep>os.makedirs(save_dir exist_ok=<true>)<line_sep>self.netG_A.eval()<line_sep>self.netG_B.eval()<for_stmt>direction ['AtoB' 'BtoA']<block_start>eval_dataloader=getattr(self 'eval_dataloader_'+direction)<line_sep>fakes,names=[] []<line_sep>cnt=0<for_stmt>i,data_i enumerate(tqdm(eval_dataloader desc='Eval %s '%direction position=2 leave=<false>))<block_start>self.set_single_input(data_i)<line_sep>self.test_single_side(direction)<line_sep>fakes.append(self.fake_B.cpu())<for_stmt>j range(len(self.image_paths))<block_start>short_path=ntpath.basename(self.image_paths[j])<line_sep>name=os.path.splitext(short_path)[0]<line_sep>names.append(name)<if_stmt>cnt<l>10<block_start>input_im=util.tensor2im(self.real_A[j])<line_sep>fake_im=util.tensor2im(self.fake_B[j])<line_sep>util.save_image(input_im os.path.join(save_dir direction 'input' '%s.png'%name) create_dir=<true>)<line_sep>util.save_image(fake_im os.path.join(save_dir direction 'fake' '%s.png'%name) create_dir=<true>)<block_end>cnt<augadd>1<block_end><block_end>suffix=direction[-1]<line_sep>fid=get_fid(fakes self.inception_model getattr(self 'npz_%s'%direction[-1]) device=self.device batch_size=self.opt.eval_batch_size tqdm_position=2)<if_stmt>fid<l>getattr(self 'best_fid_%s'%suffix)<block_start>self.is_best=<true><line_sep>setattr(self 'best_fid_%s'%suffix fid)<block_end>fids=getattr(self 'fids_%s'%suffix)<line_sep>fids.append(fid)<if_stmt>len(fids)<g>3<block_start>fids.pop(0)<block_end>ret['metric/fid_%s'%suffix]=fid<line_sep>ret['metric/fid_%s-mean'%suffix]=sum(getattr(self 'fids_%s'%suffix))/len(getattr(self 'fids_%s'%suffix))<line_sep>ret['metric/fid_%s-best'%suffix]=getattr(self 'best_fid_%s'%suffix)<block_end>self.netG_A.train()<line_sep>self.netG_B.train()<line_sep><return>ret<block_end><block_end>
# Copyright (c) 2019 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_stmt>six<import_from_stmt>kmip.core enums<import_from_stmt>kmip.core exceptions<import_from_stmt>kmip.core objects<import_from_stmt>kmip.core primitives<import_from_stmt>kmip.core utils<import_from_stmt>kmip.core.messages.payloads base<class_stmt>DeleteAttributeRequestPayload(base.RequestPayload)<block_start>""" A request payload for the DeleteAttribute operation. Attributes: unique_identifier: The unique ID of the object on which attribute deletion should be performed. attribute_name: The name of the attribute to be deleted. Used in KMIP 1.0 - 1.4. attribute_index: The index of the attribute to be deleted. Used in KMIP 1.0 - 1.4. current_attribute: The attribute to be deleted. Used in KMIP 2.0+. attribute_reference: The reference to the attribute to be deleted. Used in KMIP 2.0+. """<def_stmt>__init__ self unique_identifier=<none> attribute_name=<none> attribute_index=<none> current_attribute=<none> attribute_reference=<none><block_start>""" Construct a DeleteAttribute request payload. Args: unique_identifier (string): The unique ID of the object on which attribute deletion should be performed. Optional, defaults to None. attribute_name (string): The name of the attribute to be deleted. Used in KMIP 1.0 - 1.4. Defaults to None. Required for read/write. attribute_index (int): The index of the attribute to be deleted. Used in KMIP 1.0 - 1.4. Optional, defaults to None. current_attribute (struct): A CurrentAttribute structure containing the attribute to be deleted. Used in KMIP 2.0+. Optional, defaults to None. Must be specified if the attribute reference is not provided. attribute_reference (struct): An AttributeReference structure containing a reference to the attribute to be deleted. Used in KMIP 2.0+. Optional, defaults to None. Must be specified if the current attribute is not specified. """<line_sep>super(DeleteAttributeRequestPayload self).__init__()<line_sep>self._unique_identifier=<none><line_sep>self._attribute_name=<none><line_sep>self._attribute_index=<none><line_sep>self._current_attribute=<none><line_sep>self._attribute_reference=<none><line_sep>self.unique_identifier=unique_identifier<line_sep>self.attribute_name=attribute_name<line_sep>self.attribute_index=attribute_index<line_sep>self.current_attribute=current_attribute<line_sep>self.attribute_reference=attribute_reference<block_end>@property<def_stmt>unique_identifier self<block_start><if_stmt>self._unique_identifier<block_start><return>self._unique_identifier.value<block_end><return><none><block_end>@unique_identifier.setter<def_stmt>unique_identifier self value<block_start><if_stmt>value<is><none><block_start>self._unique_identifier=<none><block_end><elif_stmt>isinstance(value six.string_types)<block_start>self._unique_identifier=primitives.TextString(value=value tag=enums.Tags.UNIQUE_IDENTIFIER)<block_end><else_stmt><block_start><raise>TypeError("The unique identifier must be a string.")<block_end><block_end>@property<def_stmt>attribute_name self<block_start><if_stmt>self._attribute_name<block_start><return>self._attribute_name.value<block_end><return><none><block_end>@attribute_name.setter<def_stmt>attribute_name self value<block_start><if_stmt>value<is><none><block_start>self._attribute_name=<none><block_end><elif_stmt>isinstance(value six.string_types)<block_start>self._attribute_name=primitives.TextString(value=value tag=enums.Tags.ATTRIBUTE_NAME)<block_end><else_stmt><block_start><raise>TypeError("The attribute name must be a string.")<block_end><block_end>@property<def_stmt>attribute_index self<block_start><if_stmt>self._attribute_index<block_start><return>self._attribute_index.value<block_end><return><none><block_end>@attribute_index.setter<def_stmt>attribute_index self value<block_start><if_stmt>value<is><none><block_start>self._attribute_index=<none><block_end><elif_stmt>isinstance(value six.integer_types)<block_start>self._attribute_index=primitives.Integer(value=value tag=enums.Tags.ATTRIBUTE_INDEX)<block_end><else_stmt><block_start><raise>TypeError("The attribute index must be an integer.")<block_end><block_end>@property<def_stmt>current_attribute self<block_start><if_stmt>self._current_attribute<block_start><return>self._current_attribute<block_end><return><none><block_end>@current_attribute.setter<def_stmt>current_attribute self value<block_start><if_stmt>value<is><none><block_start>self._current_attribute=<none><block_end><elif_stmt>isinstance(value objects.CurrentAttribute)<block_start>self._current_attribute=value<block_end><else_stmt><block_start><raise>TypeError("The current attribute must be a CurrentAttribute object.")<block_end><block_end>@property<def_stmt>attribute_reference self<block_start><if_stmt>self._attribute_reference<block_start><return>self._attribute_reference<block_end><return><none><block_end>@attribute_reference.setter<def_stmt>attribute_reference self value<block_start><if_stmt>value<is><none><block_start>self._attribute_reference=<none><block_end><elif_stmt>isinstance(value objects.AttributeReference)<block_start>self._attribute_reference=value<block_end><else_stmt><block_start><raise>TypeError("The attribute reference must be an AttributeReference object.")<block_end><block_end><def_stmt>read self input_buffer kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>""" Read the data encoding the DeleteAttribute request payload and decode it into its constituent part. Args: input_buffer (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: InvalidKmipEncoding: Raised if fields are missing from the encoding. """<line_sep>super(DeleteAttributeRequestPayload self).read(input_buffer kmip_version=kmip_version)<line_sep>local_buffer=utils.BytearrayStream(input_buffer.read(self.length))<if_stmt>self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER local_buffer)<block_start>self._unique_identifier=primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)<line_sep>self._unique_identifier.read(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start>self._unique_identifier=<none><block_end><if_stmt>kmip_version<l>enums.KMIPVersion.KMIP_2_0<block_start><if_stmt>self.is_tag_next(enums.Tags.ATTRIBUTE_NAME local_buffer)<block_start>self._attribute_name=primitives.TextString(tag=enums.Tags.ATTRIBUTE_NAME)<line_sep>self._attribute_name.read(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidKmipEncoding("The DeleteAttribute request payload encoding is missing "<concat>"the attribute name field.")<block_end><if_stmt>self.is_tag_next(enums.Tags.ATTRIBUTE_INDEX local_buffer)<block_start>self._attribute_index=primitives.Integer(tag=enums.Tags.ATTRIBUTE_INDEX)<line_sep>self._attribute_index.read(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start>self._attribute_index=<none><block_end><block_end><else_stmt><block_start><if_stmt>self.is_tag_next(enums.Tags.CURRENT_ATTRIBUTE local_buffer)<block_start>self._current_attribute=objects.CurrentAttribute()<line_sep>self._current_attribute.read(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start>self._current_attribute=<none><block_end><if_stmt>self.is_tag_next(enums.Tags.ATTRIBUTE_REFERENCE local_buffer)<block_start>self._attribute_reference=objects.AttributeReference()<line_sep>self._attribute_reference.read(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start>self._attribute_reference=<none><block_end><if_stmt>self._current_attribute<eq>self._attribute_reference<block_start><raise>exceptions.InvalidKmipEncoding("The DeleteAttribute encoding is missing either the "<concat>"current attribute or the attribute reference field.")<block_end><block_end>self.is_oversized(local_buffer)<block_end><def_stmt>write self output_buffer kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>""" Write the data encoding the DeleteAttribute request payload to a stream. Args: output_buffer (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: InvalidField """<line_sep>local_buffer=utils.BytearrayStream()<if_stmt>self._unique_identifier<block_start>self._unique_identifier.write(local_buffer kmip_version=kmip_version)<block_end><if_stmt>kmip_version<l>enums.KMIPVersion.KMIP_2_0<block_start><if_stmt>self._attribute_name<block_start>self._attribute_name.write(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidField("The DeleteAttribute request payload is missing the "<concat>"attribute name field.")<block_end><if_stmt>self._attribute_index<block_start>self._attribute_index.write(local_buffer kmip_version=kmip_version)<block_end><block_end><else_stmt><block_start><if_stmt>self._current_attribute<eq>self._attribute_reference<block_start><raise>exceptions.InvalidField("The DeleteAttribute request payload is missing either "<concat>"the current attribute or the attribute reference field.")<block_end><if_stmt>self._current_attribute<block_start>self._current_attribute.write(local_buffer kmip_version=kmip_version)<block_end><if_stmt>self._attribute_reference<block_start>self._attribute_reference.write(local_buffer kmip_version=kmip_version)<block_end><block_end>self.length=local_buffer.length()<line_sep>super(DeleteAttributeRequestPayload self).write(output_buffer kmip_version=kmip_version)<line_sep>output_buffer.write(local_buffer.buffer)<block_end><def_stmt>__repr__ self<block_start>args=["unique_identifier='{}'".format(self.unique_identifier) "attribute_name='{}'".format(self.attribute_name) "attribute_index={}".format(self.attribute_index) "current_attribute={}".format(repr(self.current_attribute)<if>self.current_attribute<else><none>) "attribute_reference={}".format(repr(self.attribute_reference)<if>self.attribute_reference<else><none>)]<line_sep><return>"DeleteAttributeRequestPayload({})".format(", ".join(args))<block_end><def_stmt>__str__ self<block_start><return>str({"unique_identifier":self.unique_identifier "attribute_name":self.attribute_name "attribute_index":self.attribute_index "current_attribute":str(self.current_attribute)<if>self.current_attribute<else><none> "attribute_reference":str(self.attribute_reference)<if>self.attribute_reference<else><none>})<block_end><def_stmt>__eq__ self other<block_start><if_stmt>isinstance(other DeleteAttributeRequestPayload)<block_start><if_stmt>self.unique_identifier<ne>other.unique_identifier<block_start><return><false><block_end><elif_stmt>self.attribute_name<ne>other.attribute_name<block_start><return><false><block_end><elif_stmt>self.attribute_index<ne>other.attribute_index<block_start><return><false><block_end><elif_stmt>self.current_attribute<ne>other.current_attribute<block_start><return><false><block_end><elif_stmt>self.attribute_reference<ne>other.attribute_reference<block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end><else_stmt><block_start><return>NotImplemented<block_end><block_end><def_stmt>__ne__ self other<block_start><if_stmt>isinstance(other DeleteAttributeRequestPayload)<block_start><return><not>self.__eq__(other)<block_end><else_stmt><block_start><return>NotImplemented<block_end><block_end><block_end><class_stmt>DeleteAttributeResponsePayload(base.ResponsePayload)<block_start>""" A response payload for the DeleteAttribute operation. Attributes: unique_identifier: The unique ID of the object on which attribute deletion was performed. Optional, defaults to None. attribute: The attribute object deleted from the managed object. Used in KMIP 1.0 - 1.4. """<def_stmt>__init__ self unique_identifier=<none> attribute=<none><block_start>""" Construct a DeleteAttribute response payload. Args: unique_identifier (string): The unique ID of the object on which attribute deletion was performed. Defaults to None. Required for read/write. attribute (struct): An Attribute object containing the attribute that was deleted. Used in KMIP 1.0 - 1.4. Defaults to None. Required for read/write. """<line_sep>super(DeleteAttributeResponsePayload self).__init__()<line_sep>self._unique_identifier=<none><line_sep>self._attribute=<none><line_sep>self.unique_identifier=unique_identifier<line_sep>self.attribute=attribute<block_end>@property<def_stmt>unique_identifier self<block_start><if_stmt>self._unique_identifier<block_start><return>self._unique_identifier.value<block_end><return><none><block_end>@unique_identifier.setter<def_stmt>unique_identifier self value<block_start><if_stmt>value<is><none><block_start>self._unique_identifier=<none><block_end><elif_stmt>isinstance(value six.string_types)<block_start>self._unique_identifier=primitives.TextString(value=value tag=enums.Tags.UNIQUE_IDENTIFIER)<block_end><else_stmt><block_start><raise>TypeError("The unique identifier must be a string.")<block_end><block_end>@property<def_stmt>attribute self<block_start><if_stmt>self._attribute<block_start><return>self._attribute<block_end><return><none><block_end>@attribute.setter<def_stmt>attribute self value<block_start><if_stmt>value<is><none><block_start>self._attribute=<none><block_end><elif_stmt>isinstance(value objects.Attribute)<block_start>self._attribute=value<block_end><else_stmt><block_start><raise>TypeError("The attribute must be an Attribute object.")<block_end><block_end><def_stmt>read self input_buffer kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>""" Read the data encoding the DeleteAttribute response payload and decode it into its constituent parts. Args: input_buffer (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (enum): A KMIPVersion enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: InvalidKmipEncoding: Raised if any required fields are missing from the encoding. """<line_sep>super(DeleteAttributeResponsePayload self).read(input_buffer kmip_version=kmip_version)<line_sep>local_buffer=utils.BytearrayStream(input_buffer.read(self.length))<if_stmt>self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER local_buffer)<block_start>self._unique_identifier=primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)<line_sep>self._unique_identifier.read(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidKmipEncoding("The DeleteAttribute response payload encoding is missing the "<concat>"unique identifier field.")<block_end><if_stmt>kmip_version<l>enums.KMIPVersion.KMIP_2_0<block_start><if_stmt>self.is_tag_next(enums.Tags.ATTRIBUTE local_buffer)<block_start>self._attribute=objects.Attribute()<line_sep>self._attribute.read(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidKmipEncoding("The DeleteAttribute response payload encoding is missing "<concat>"the attribute field.")<block_end><block_end>self.is_oversized(local_buffer)<block_end><def_stmt>write self output_buffer kmip_version=enums.KMIPVersion.KMIP_1_0<block_start>""" Write the data encoding the DeleteAttribute response payload to a buffer. Args: output_buffer (buffer): A data buffer in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (enum): A KMIPVersion enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: InvalidField: Raised if a required field is missing from the payload object. """<line_sep>local_buffer=utils.BytearrayStream()<if_stmt>self._unique_identifier<block_start>self._unique_identifier.write(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidField("The DeleteAttribute response payload is missing the unique "<concat>"identifier field.")<block_end><if_stmt>kmip_version<l>enums.KMIPVersion.KMIP_2_0<block_start><if_stmt>self._attribute<block_start>self._attribute.write(local_buffer kmip_version=kmip_version)<block_end><else_stmt><block_start><raise>exceptions.InvalidField("The DeleteAttribute response payload is missing the "<concat>"attribute field.")<block_end><block_end>self.length=local_buffer.length()<line_sep>super(DeleteAttributeResponsePayload self).write(output_buffer kmip_version=kmip_version)<line_sep>output_buffer.write(local_buffer.buffer)<block_end><def_stmt>__repr__ self<block_start>args=["unique_identifier='{}'".format(self.unique_identifier) "attribute={}".format(repr(self.attribute))]<line_sep><return>"DeleteAttributeResponsePayload({})".format(", ".join(args))<block_end><def_stmt>__str__ self<block_start><return>str({"unique_identifier":self.unique_identifier "attribute":str(self.attribute)})<block_end><def_stmt>__eq__ self other<block_start><if_stmt>isinstance(other DeleteAttributeResponsePayload)<block_start><if_stmt>self.unique_identifier<ne>other.unique_identifier<block_start><return><false><block_end><elif_stmt>self.attribute<ne>other.attribute<block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end><return>NotImplemented<block_end><def_stmt>__ne__ self other<block_start><if_stmt>isinstance(other DeleteAttributeResponsePayload)<block_start><return><not>self.__eq__(other)<block_end><return>NotImplemented<block_end><block_end>
#! /usr/env/python <import_stmt>sys<import_stmt>os<line_sep>sys.path.insert(1 os.path.join("../../../h2o-py"))<import_from_stmt>tests pyunit_utils<import_stmt>h2o<def_stmt>gcs_import # Just test the import works - no class clashes, no exception <block_start>keys=h2o.import_file("gs://gcp-public-data-nexrad-l2/2018/01/01/KABR/NWS_NEXRAD_NXL2DPBL_KABR_20180101050000_20180101055959.tar" parse=<false>)<assert_stmt>len(keys)<eq>1<assert_stmt>keys<eq>['gs://gcp-public-data-nexrad-l2/2018/01/01/KABR/NWS_NEXRAD_NXL2DPBL_KABR_20180101050000_20180101055959.tar']<line_sep>expected_keys=['gs://gcp-public-data-nexrad-l2/1991/06/05/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910605160000_19910605235959.tar' 'gs://gcp-public-data-nexrad-l2/1991/06/06/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910606000000_19910606075959.tar' 'gs://gcp-public-data-nexrad-l2/1991/06/06/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910606080000_19910606155959.tar' 'gs://gcp-public-data-nexrad-l2/1991/06/06/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910606160000_19910606235959.tar' 'gs://gcp-public-data-nexrad-l2/1991/06/07/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910607160000_19910607235959.tar' 'gs://gcp-public-data-nexrad-l2/1991/06/08/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910608000000_19910608075959.tar' 'gs://gcp-public-data-nexrad-l2/1991/06/08/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910608080000_19910608155959.tar' 'gs://gcp-public-data-nexrad-l2/1991/06/09/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910609160000_19910609235959.tar' 'gs://gcp-public-data-nexrad-l2/1991/06/10/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910610000000_19910610075959.tar' 'gs://gcp-public-data-nexrad-l2/1991/06/10/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910610080000_19910610155959.tar' 'gs://gcp-public-data-nexrad-l2/1991/06/22/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910622160000_19910622235959.tar' 'gs://gcp-public-data-nexrad-l2/1991/06/23/KTLX/NWS_NEXRAD_NXL2LG_KTLX_19910623000000_19910623075959.tar']<line_sep># Import folder keys=h2o.import_file("gs://gcp-public-data-nexrad-l2/1991/06" parse=<false>)<assert_stmt>len(keys)<eq>12<assert_stmt>keys<eq>expected_keys<line_sep># Import folder - slash at the end of path keys=h2o.import_file("gs://gcp-public-data-nexrad-l2/1991/06/" parse=<false>)<assert_stmt>len(keys)<eq>12<assert_stmt>keys<eq>expected_keys<line_sep># Import folder - Invalid path keys=h2o.import_file("gs://gcp-public-data-nexrad-l2/1991/06/somethingNonExistent/" parse=<false>)<assert_stmt>len(keys)<eq>0<block_end><if_stmt>__name__<eq>"__main__"<block_start>pyunit_utils.standalone_test(gcs_import)<block_end><else_stmt><block_start>gcs_import()<block_end>
# # Copyright 2021 Red Hat Inc. # SPDX-License-Identifier: Apache-2.0 # """Hashing utility."""<import_stmt>hashlib<import_from_stmt>masu.exceptions HasherError<class_stmt>Hasher<block_start>"""A utility class to create hashes."""<def_stmt>__init__ self hash_function length=<none> encoding="utf-8"<block_start>"""Initialize the Hasher. Args: hash_function (str): String representation of hash function Ex. 'md5' length (int): The digest length for SHAKE algorithms encoding (str): Encoding used to convert string to bytes Returns: (Hasher): the initialized Hasher """<line_sep>self.length=length<line_sep>self.hash_function=hash_function<line_sep>self.encoding=encoding<block_end><def_stmt>get_hash_function self<block_start>"""Get the hash function."""<line_sep><return>self._hash_function<block_end><def_stmt>set_hash_function self hash_function<block_start>"""Set the hash function used. Args: hash_function (str): String representation of hash function Ex. 'md5' Returns: (hashlib hash function) """<if_stmt>"shake"<in>hash_function<and><not>self.length<block_start>errmsg=f"{hash_function} requires length to be set"<line_sep><raise>HasherError(errmsg)<block_end>self._hash_function=getattr(hashlib hash_function <none>)<if_stmt><not>self._hash_function<block_start>errmsg=f"{hash_function} is not currently supported."<if_stmt>hash_function<in>hashlib.algorithms_guaranteed<block_start>errmsg=f"{hash_function} needs Hasher implementation."<block_end><raise>HasherError(errmsg)<block_end><block_end><def_stmt>hash_string_to_hex self string<block_start>"""Return a hex digest of the hashed string. Args: string (str): The string to be hashed Returns: (str): The hex string of the hash """<if_stmt>self.length<block_start><return>self.hash_function(string.encode(self.encoding)).hexdigest(self.length)<block_end><return>self.hash_function(string.encode(self.encoding)).hexdigest()<block_end>hash_function=property(get_hash_function set_hash_function)<block_end>
""" Copyright (c) 2020 COTOBA DESIGN, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """<import_stmt>unittest<import_from_stmt>programy.storage.stores.sql.dao.node PatternNode<import_from_stmt>programy.storage.stores.sql.dao.node TemplateNode<class_stmt>PatternNodeTests(unittest.TestCase)<block_start><def_stmt>test_init self<block_start>node1=PatternNode(name='name' node_class='class')<line_sep>self.assertIsNotNone(node1)<line_sep>self.assertEqual("<Pattern Node(id='n/a', name='name', node_class='class')>" str(node1))<line_sep>node2=PatternNode(id=1 name='name' node_class='class')<line_sep>self.assertIsNotNone(node2)<line_sep>self.assertEqual("<Pattern Node(id='1', name='name', node_class='class')>" str(node2))<block_end><block_end><class_stmt>TemplateNodeTests(unittest.TestCase)<block_start><def_stmt>test_init self<block_start>node1=TemplateNode(name='name' node_class='class')<line_sep>self.assertIsNotNone(node1)<line_sep>self.assertEqual("<Template Node(id='n/a', name='name', node_class='class')>" str(node1))<line_sep>node2=TemplateNode(id=1 name='name' node_class='class')<line_sep>self.assertIsNotNone(node2)<line_sep>self.assertEqual("<Template Node(id='1', name='name', node_class='class')>" str(node2))<block_end><block_end>
# Generated by Django 3.2.4 on 2021-07-08 14:14 <import_stmt>InvenTree.validators<import_stmt>build.models<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('build' '0029_auto_20210601_1525') ]<line_sep>operations=[migrations.AlterField(model_name='build' name='reference' field=models.CharField(default=build.models.get_next_build_number help_text='Build Order Reference' max_length=64 unique=<true> validators=[InvenTree.validators.validate_build_order_reference] verbose_name='Reference') ) ]<block_end>
<import_stmt>asyncio<import_stmt>unittest<import_from_stmt>typing Awaitable<import_stmt>hummingbot.connector.derivative.binance_perpetual.binance_perpetual_web_utils<as>web_utils<import_stmt>hummingbot.connector.derivative.binance_perpetual.constants<as>CONSTANTS<import_from_stmt>hummingbot.connector.derivative.binance_perpetual.binance_perpetual_web_utils BinancePerpetualRESTPreProcessor <import_from_stmt>hummingbot.connector.time_synchronizer TimeSynchronizer<import_from_stmt>hummingbot.core.web_assistant.connections.data_types RESTMethod RESTRequest<import_from_stmt>hummingbot.core.web_assistant.web_assistants_factory WebAssistantsFactory<class_stmt>BinancePerpetualWebUtilsUnitTests(unittest.TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<arrow><none><block_start>super().setUpClass()<line_sep>cls.ev_loop=asyncio.get_event_loop()<line_sep>cls.pre_processor=BinancePerpetualRESTPreProcessor()<block_end><def_stmt>async_run_with_timeout self coroutine:Awaitable timeout:float=1<block_start>ret=self.ev_loop.run_until_complete(asyncio.wait_for(coroutine timeout))<line_sep><return>ret<block_end><def_stmt>test_binance_perpetual_rest_pre_processor_non_post_request self<block_start>request:RESTRequest=RESTRequest(method=RESTMethod.GET url="/TEST_URL" )<line_sep>result_request:RESTRequest=self.async_run_with_timeout(self.pre_processor.pre_process(request))<line_sep>self.assertIn("Content-Type" result_request.headers)<line_sep>self.assertEqual(result_request.headers["Content-Type"] "application/x-www-form-urlencoded")<block_end><def_stmt>test_binance_perpetual_rest_pre_processor_post_request self<block_start>request:RESTRequest=RESTRequest(method=RESTMethod.POST url="/TEST_URL" )<line_sep>result_request:RESTRequest=self.async_run_with_timeout(self.pre_processor.pre_process(request))<line_sep>self.assertIn("Content-Type" result_request.headers)<line_sep>self.assertEqual(result_request.headers["Content-Type"] "application/json")<block_end><def_stmt>test_rest_url_main_domain self<block_start>path_url="/TEST_PATH_URL"<line_sep>expected_url=f"{CONSTANTS.PERPETUAL_BASE_URL}{CONSTANTS.API_VERSION_V2}{path_url}"<line_sep>self.assertEqual(expected_url web_utils.rest_url(path_url api_version=CONSTANTS.API_VERSION_V2))<line_sep>self.assertEqual(expected_url web_utils.rest_url(path_url api_version=CONSTANTS.API_VERSION_V2))<block_end><def_stmt>test_rest_url_testnet_domain self<block_start>path_url="/TEST_PATH_URL"<line_sep>expected_url=f"{CONSTANTS.TESTNET_BASE_URL}{CONSTANTS.API_VERSION_V2}{path_url}"<line_sep>self.assertEqual(expected_url web_utils.rest_url(path_url=path_url domain="testnet" api_version=CONSTANTS.API_VERSION_V2))<block_end><def_stmt>test_wss_url_main_domain self<block_start>endpoint="TEST_SUBSCRIBE"<line_sep>expected_url=f"{CONSTANTS.PERPETUAL_WS_URL}{endpoint}"<line_sep>self.assertEqual(expected_url web_utils.wss_url(endpoint=endpoint))<block_end><def_stmt>test_wss_url_testnet_domain self<block_start>endpoint="TEST_SUBSCRIBE"<line_sep>expected_url=f"{CONSTANTS.TESTNET_WS_URL}{endpoint}"<line_sep>self.assertEqual(expected_url web_utils.wss_url(endpoint=endpoint domain="testnet"))<block_end><def_stmt>test_build_api_factory self<block_start>api_factory=web_utils.build_api_factory(time_synchronizer=TimeSynchronizer() time_provider=<lambda>:<none> )<line_sep>self.assertIsInstance(api_factory WebAssistantsFactory)<line_sep>self.assertIsNone(api_factory._auth)<line_sep>self.assertTrue(2 len(api_factory._rest_pre_processors))<block_end><block_end>
# pylint: disable=missing-function-docstring, missing-module-docstring/ <import_from_stmt>project.folder2.mod3 one_hundred_plus_sum_to_n_squared<if_stmt>__name__<eq>'__main__'<block_start>print(one_hundred_plus_sum_to_n_squared(4))<block_end>
""" Copyright (c) 2014, Samsung Electronics Co.,Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of Samsung Electronics Co.,Ltd.. """<line_sep>""" jpeg4py - libjpeg-turbo cffi bindings and helper classes. URL: https://github.com/ajkxyz/jpeg4py Original author: <NAME> <<EMAIL>> """<line_sep>""" Helper classes for libjpeg-turbo cffi bindings. """<import_stmt>jpeg4py._cffi<as>jpeg<import_from_stmt>jpeg4py._cffi TJPF_RGB<import_stmt>numpy<import_stmt>os<class_stmt>JPEGRuntimeError(RuntimeError)<block_start><def_stmt>__init__ self msg code<block_start>super(JPEGRuntimeError self).__init__(msg)<line_sep>self.code=code<block_end><block_end><class_stmt>Base(object)<block_start>"""Base class. Attributes: lib_: cffi handle to loaded shared library. """<def_stmt>__init__ self lib_<block_start>"""Constructor. Parameters: lib_: cffi handle to loaded shared library. """<if_stmt>lib_<is><none><block_start>jpeg.initialize()<line_sep>lib_=jpeg.lib<block_end>self.lib_=lib_<block_end><def_stmt>get_last_error self<block_start>"""Returns last error string. """<line_sep><return>jpeg.ffi.string(self.lib_.tjGetErrorStr()).decode("utf-8")<block_end><block_end><class_stmt>Handle(Base)<block_start>"""Stores tjhandle pointer. Attributes: handle_: cffi tjhandle pointer. """<def_stmt>__init__ self handle_ lib_<block_start>"""Constructor. Parameters: handle_: cffi tjhandle pointer. """<line_sep>self.handle_=<none><line_sep>super(Handle self).__init__(lib_)<line_sep>self.handle_=handle_<block_end><def_stmt>release self<block_start><if_stmt>self.handle_<is><not><none><block_start>self.lib_.tjDestroy(self.handle_)<line_sep>self.handle_=<none><block_end><block_end><block_end><class_stmt>JPEG(Base)<block_start>"""Main class. Attributes: decompressor: Handle object for decompressor. source: numpy array with source data, either encoded raw jpeg which may be decoded/transformed or or source image for the later encode. width: image width. height: image height. subsampling: level of chrominance subsampling. Static attributes: decompressors: list of decompressors for caching purposes. """<line_sep>decompressors=[]<line_sep>@staticmethod<def_stmt>clear <block_start>"""Clears internal caches. """<line_sep># Manually release cached JPEG decompressors <for_stmt>handle reversed(JPEG.decompressors)<block_start>handle.release()<block_end><del_stmt>JPEG.decompressors[:]<block_end><def_stmt>__init__ self source lib_=<none><block_start>"""Constructor. Parameters: source: source for JPEG operations (numpy array or file name). """<line_sep>super(JPEG self).__init__(lib_)<line_sep>self.decompressor=<none><line_sep>self.width=<none><line_sep>self.height=<none><line_sep>self.subsampling=<none><if_stmt>hasattr(source "__array_interface__")<block_start>self.source=source<block_end><elif_stmt>numpy.fromfile<is><not><none><block_start>self.source=numpy.fromfile(source dtype=numpy.uint8)<block_end><else_stmt><block_start>fin=open(source "rb")<line_sep>self.source=numpy.empty(os.path.getsize(source) dtype=numpy.uint8)<line_sep>fin.readinto(self.source)<line_sep>fin.close()<block_end><block_end><def_stmt>_get_decompressor self<block_start><if_stmt>self.decompressor<is><not><none><block_start><return><block_end><try_stmt><block_start>self.decompressor=JPEG.decompressors.pop(-1)<block_end><except_stmt>IndexError<block_start>d=self.lib_.tjInitDecompress()<if_stmt>d<eq>jpeg.ffi.NULL<block_start><raise>JPEGRuntimeError("tjInitDecompress() failed with error "<concat>"string %s"%self.get_last_error() 0)<block_end>self.decompressor=Handle(d self.lib_)<block_end><block_end><def_stmt>parse_header self<block_start>"""Parses JPEG header. Fills self.width, self.height, self.subsampling. """<line_sep>self._get_decompressor()<line_sep>whs=jpeg.ffi.new("int[]" 3)<line_sep>whs_base=int(jpeg.ffi.cast("size_t" whs))<line_sep>whs_itemsize=int(jpeg.ffi.sizeof("int"))<line_sep>n=self.lib_.tjDecompressHeader2(self.decompressor.handle_ jpeg.ffi.cast("unsigned char*" self.source.__array_interface__["data"][0]) self.source.nbytes jpeg.ffi.cast("int*" whs_base) jpeg.ffi.cast("int*" whs_base+whs_itemsize) jpeg.ffi.cast("int*" whs_base+whs_itemsize+whs_itemsize))<if_stmt>n<block_start><raise>JPEGRuntimeError("tjDecompressHeader2() failed with error "<concat>"%d and error string %s"%(n self.get_last_error()) n)<block_end>self.width=int(whs[0])<line_sep>self.height=int(whs[1])<line_sep>self.subsampling=int(whs[2])<block_end><def_stmt>decode self dst=<none> pixfmt=TJPF_RGB<block_start>bpp=jpeg.tjPixelSize[pixfmt]<if_stmt>dst<is><none><block_start><if_stmt>self.width<is><none><block_start>self.parse_header()<block_end>sh=[self.height self.width]<if_stmt>bpp<g>1<block_start>sh.append(bpp)<block_end>dst=numpy.zeros(sh dtype=numpy.uint8)<block_end><elif_stmt><not>hasattr(dst "__array_interface__")<block_start><raise>ValueError("dst should be numpy array or None")<block_end><if_stmt>len(dst.shape)<l>2<block_start><raise>ValueError("dst shape length should 2 or 3")<block_end><if_stmt>dst.nbytes<l>dst.shape[1]<times>dst.shape[0]<times>bpp<block_start><raise>ValueError("dst is too small to hold the requested pixel format")<block_end>self._get_decompressor()<line_sep>n=self.lib_.tjDecompress2(self.decompressor.handle_ jpeg.ffi.cast("unsigned char*" self.source.__array_interface__["data"][0]) self.source.nbytes jpeg.ffi.cast("unsigned char*" dst.__array_interface__["data"][0]) dst.shape[1] dst.strides[0] dst.shape[0] pixfmt 0)<if_stmt>n<block_start><raise>JPEGRuntimeError("tjDecompress2() failed with error "<concat>"%d and error string %s"%(n self.get_last_error()) n)<block_end><return>dst<block_end><def_stmt>__del__ self# Return decompressor to cache. <block_start><if_stmt>self.decompressor<is><not><none><block_start>JPEG.decompressors.append(self.decompressor)<block_end><block_end><block_end>
<import_from_stmt>invoke task run<import_from_stmt>os.path dirname abspath<line_sep># Create scripted tasks to run in command-line here # http://docs.pyinvoke.org/en/latest/ PROJECT_ROOT='%s/{{ project_name }}'%dirname(abspath(__file__))<line_sep>@task<def_stmt>clean <block_start>"""Clean up static, compiled, test, and log files"""<line_sep>print("Deleting *.pyc files...")<line_sep>run('find . -name *.pyc -delete')<line_sep>print("Deleting collected static files...")<line_sep>run('rm -rf %s/public'%PROJECT_ROOT)<line_sep>print("Deleting compiled stylesheets...")<line_sep>run('rm -rf %s/static/css/build'%PROJECT_ROOT)<line_sep>print("Deleting compiled scripts...")<line_sep>run('rm -rf %s/static/js/build'%PROJECT_ROOT)<line_sep>run('rm -rf %s/static/js/tests/build'%PROJECT_ROOT)<line_sep>print('Deleting compressed images...')<line_sep>run('rm -rf %s/static/img/compressed'%PROJECT_ROOT)<line_sep>print('Deleting test files...')<line_sep>run('rm -rf tests/*')<line_sep>run('rm -rf .coverage')<line_sep>run('rm -rf _SpecRunner.html')<line_sep>print('Deleting log files...')<line_sep>run('rm -rf logs/*')<block_end>
# Copyright 2012 Viewfinder Inc. All Rights Reserved. """Server log tests. """<line_sep>__author__='<EMAIL> (<NAME>)'<import_stmt>logging<import_stmt>os<import_stmt>re<import_stmt>sys<import_stmt>tempfile<import_stmt>time<import_from_stmt>functools partial<import_from_stmt>tornado options testing<import_from_stmt>viewfinder.backend.storage.object_store ObjectStore InitObjectStore<import_from_stmt>viewfinder.backend.storage.file_object_store FileObjectStore<import_from_stmt>viewfinder.backend.storage.server_log BatchingLogHandler LogBatch LogBatchPersistor InitServerLog FinishServerLog<import_from_stmt>viewfinder.backend.base util counters<import_from_stmt>viewfinder.backend.base.testing async_test BaseTestCase<class_stmt>_BadObjectStore(FileObjectStore)<block_start>"""A file object store which simply does not return requests for puts. If 'fail_fast' is True, returns an error immediately. """<def_stmt>__init__ self bucket_name temporary=<false> fail_fast=<false><block_start>super(_BadObjectStore self).__init__(bucket_name temporary)<line_sep>self._fail_fast=fail_fast<line_sep>self._put_map=dict()<block_end><def_stmt>Put self key value callback<block_start>self._put_map[key]=value<if_stmt>self._fail_fast<block_start><raise>Exception('failed to put key %s'%key)<block_end><block_end><def_stmt>GetPutValue self key<block_start><return>self._put_map[key]<block_end><block_end><class_stmt>_FakePersistor(object)<block_start>"""Fake log persistor - simply accepts batches without accepting them."""<def_stmt>__init__ self<block_start>self.batches={}<line_sep>self._handlers=[]<block_end><def_stmt>PersistLogBatch self batch<block_start>self.batches[batch.Key()]=batch<block_end><def_stmt>AddHandler self handler<block_start>"""Add a handler to the list of handlers registered with this persistor."""<if_stmt><not>handler<in>self._handlers<block_start>self._handlers.append(handler)<block_end><block_end><def_stmt>RemoveHandler self handler<block_start>"""Remove a handler from the list of handlers registered with this persistor."""<if_stmt>handler<in>self._handlers<block_start>self._handlers.remove(handler)<block_end><block_end><def_stmt>close self callback=<none><block_start><for_stmt>h list(self._handlers)<block_start>h.close()<block_end><if_stmt>callback<block_start>callback()<block_end><block_end><block_end><class_stmt>_BasicLogHandler(BatchingLogHandler)<block_start>STORE_NAME='basic_store'<def_stmt>__init__ self *args **kwargs<block_start>super(_BasicLogHandler self).__init__(*args **kwargs)<line_sep>self.batch_no=0<block_end><def_stmt>MakeBatch self buffer<block_start>self.batch_no<augadd>1<line_sep><return>LogBatch(buffer self.STORE_NAME 'basic' self.batch_no)<block_end><block_end><class_stmt>LogBatchPersistorTestCase(BaseTestCase testing.LogTrapTestCase)<block_start><def_stmt>setUp self<block_start>options.options.fileobjstore=<true><line_sep>super(LogBatchPersistorTestCase self).setUp()<line_sep>InitObjectStore(temporary=<true>)<block_end><def_stmt>tearDown self<block_start>super(LogBatchPersistorTestCase self).tearDown()<block_end><def_stmt>testPersistor self<block_start>"""Basic test for a log persistor."""<line_sep>backup_dir=tempfile.mkdtemp()<line_sep>persistor=LogBatchPersistor(backup_dir=backup_dir)<line_sep>batches=[LogBatch('Log batch buffer 1A' ObjectStore.SERVER_LOG 'test1' 'keyA') LogBatch('Log batch buffer 2B' ObjectStore.SERVER_LOG 'test2' 'keyB') LogBatch('Log batch buffer 3C' ObjectStore.SERVER_LOG 'test3' 'keyC') LogBatch('Log batch buffer 4D' ObjectStore.USER_LOG 'test4' 'keyD') LogBatch('Log batch buffer 5E' ObjectStore.USER_LOG 'test5' 'keyE')]<for_stmt>batch batches<block_start>persistor.PersistLogBatch(batch)<block_end>self._RunAsync(persistor.Wait)<line_sep># No files should have been backed up. files=os.listdir(os.path.join(backup_dir os.path.basename(sys.argv[0])))<line_sep>self.assertEqual(0 len(files))<line_sep>self._RunAsync(self._VerifyObjStoreBatches batches)<block_end><def_stmt>testBadObjStore self<block_start>"""Tests backup storage in case the object store is down. Also verifies close() method."""<line_sep>backup_dir=tempfile.mkdtemp()<line_sep>persistor=LogBatchPersistor(backup_dir=backup_dir)<line_sep>batches=[LogBatch('Log batch buffer 1A' ObjectStore.SERVER_LOG 'test1' 'keyA') LogBatch('Log batch buffer 2B' ObjectStore.SERVER_LOG 'test2' 'keyB') LogBatch('Log batch buffer 3C' ObjectStore.SERVER_LOG 'test3' 'keyC') LogBatch('Log batch buffer 4D' ObjectStore.USER_LOG 'test4' 'keyD') LogBatch('Log batch buffer 5E' ObjectStore.USER_LOG 'test5' 'keyE')]<line_sep>oldStores=[ObjectStore.GetInstance(ObjectStore.SERVER_LOG) ObjectStore.GetInstance(ObjectStore.USER_LOG)]<line_sep>ObjectStore.SetInstance(ObjectStore.SERVER_LOG _BadObjectStore(ObjectStore.SERVER_LOG_BUCKET temporary=<true> fail_fast=<false>))<line_sep>ObjectStore.SetInstance(ObjectStore.USER_LOG _BadObjectStore(ObjectStore.USER_LOG_BUCKET temporary=<true> fail_fast=<false>))<line_sep># Cut the timeout allowed for flushing buffers on close to something small. persistor._CLOSE_TIMEOUT_SECS=0.100<for_stmt>batch batches<block_start>persistor.PersistLogBatch(batch)<block_end>self._RunAsync(persistor.close)<line_sep>self._VerifyBackupBatches(backup_dir batches)<line_sep># Set a functional file object store instance and verify that it # restores the pending server logs. ObjectStore.SetInstance(ObjectStore.SERVER_LOG oldStores[0])<line_sep>ObjectStore.SetInstance(ObjectStore.USER_LOG oldStores[1])<line_sep>persistor=LogBatchPersistor(backup_dir=backup_dir)<line_sep>self._RunAsync(persistor.Wait)<line_sep>self._RunAsync(self._VerifyObjStoreBatches batches)<block_end><def_stmt>testRestoreTimeout self<block_start>"""Verifies the persistor will reattempt failed object store writes after a timeout"""<line_sep>backup_dir=tempfile.mkdtemp()<line_sep>persistor=LogBatchPersistor(backup_dir=backup_dir)<line_sep>batches=[LogBatch('Log batch buffer 1A' ObjectStore.SERVER_LOG 'test1' 'keyA') LogBatch('Log batch buffer 2B' ObjectStore.SERVER_LOG 'test2' 'keyB') LogBatch('Log batch buffer 3C' ObjectStore.SERVER_LOG 'test3' 'keyC')]<line_sep>persistor._RESTORE_INTERVAL_SECS=0.100<line_sep># The "bad" object store which does nothing with puts. oldStore=ObjectStore.GetInstance(ObjectStore.SERVER_LOG)<line_sep>ObjectStore.SetInstance(ObjectStore.SERVER_LOG _BadObjectStore(ObjectStore.SERVER_LOG_BUCKET temporary=<true> fail_fast=<true>))<for_stmt>batch batches<block_start>persistor.PersistLogBatch(batch)<block_end>self.io_loop.add_callback(partial(self._VerifyBackupBatches backup_dir batches))<line_sep># Reinstate the "good" object store. ObjectStore.SetInstance(ObjectStore.SERVER_LOG oldStore)<line_sep>self._RunAsync(self.io_loop.add_timeout time.time()+0.200)<line_sep>self._RunAsync(self._VerifyObjStoreBatches batches)<block_end><def_stmt>_SortBatchesByStore self batches<block_start>batches_by_store={}<for_stmt>batch batches<block_start>key=batch.store_name<line_sep>store_batches=batches_by_store.setdefault(key [])<line_sep>store_batches.append(batch)<block_end><return>batches_by_store<block_end><def_stmt>_VerifyObjStoreBatches self exp_batches callback<block_start><def_stmt>_OnGetBatch exp_batch cb buffer<block_start>self.assertEqual(exp_batch.buffer buffer)<line_sep>cb()<block_end><def_stmt>_OnListKeys store batches cb keys<block_start>self.assertEqual(len(batches) len(keys))<with_stmt>util.Barrier(cb)<as>b2<block_start><for_stmt>batch batches<block_start>self.assertIn(batch.Key() keys)<line_sep>store.Get(batch.Key() partial(_OnGetBatch batch b2.Callback()))<block_end><block_end><block_end>batches_by_store=self._SortBatchesByStore(exp_batches)<with_stmt>util.Barrier(callback)<as>b<block_start><for_stmt>store batches_by_store.keys()<block_start>batches=batches_by_store[store]<line_sep>store=ObjectStore.GetInstance(store)<line_sep>store.ListKeys(partial(_OnListKeys store batches b.Callback()))<block_end><block_end><block_end><def_stmt>_VerifyBackupBatches self backup_dir exp_batches<block_start>batches_by_store=self._SortBatchesByStore(exp_batches)<line_sep>dir=os.path.join(backup_dir os.path.basename(sys.argv[0]))<line_sep>store_dirs=os.listdir(dir)<line_sep>self.assertEqual(len(batches_by_store.keys()) len(store_dirs))<for_stmt>store batches_by_store.keys()<block_start>self.assertIn(store store_dirs)<line_sep>store_dir=os.path.join(dir store)<line_sep>batches=batches_by_store[store]<line_sep>self.assertEqual(len(batches) len(os.listdir(store_dir)))<for_stmt>batch batches<block_start>file=os.path.join(store_dir batch.FileSystemKey())<line_sep>self.assertTrue(os.path.exists(file))<line_sep>self.assertTrue(os.path.isfile(file))<with_stmt>open(file 'r')<as>f<block_start>buffer=f.read()<line_sep>self.assertEqual(batch.buffer buffer)<block_end><block_end><block_end><block_end><block_end><class_stmt>BatchingLogHandlerTestCase(BaseTestCase testing.LogTrapTestCase)<block_start><def_stmt>setUp self<block_start>super(BatchingLogHandlerTestCase self).setUp()<line_sep>self._persistor=_FakePersistor()<line_sep>LogBatchPersistor.SetInstance(self._persistor)<block_end><def_stmt>testBatching self<block_start>"""Tests that the server log writes to object store."""<line_sep>basic_log=_BasicLogHandler(max_buffer_bytes=100)<line_sep>record=logging.makeLogRecord({'level':logging.INFO 'msg':'test'})<line_sep>basic_log.emit(record)<line_sep>basic_log.flush()<line_sep>self._RunAsync(self._VerifyLog ['test'])<block_end><def_stmt>testBadLogMessages self<block_start>"""Tests log messages with both 8-bit byte strings and unicode."""<line_sep>basic_log=_BasicLogHandler(max_buffer_bytes=100)<line_sep>record=logging.makeLogRecord({'level':logging.INFO 'msg':'\x80abc'})<line_sep>basic_log.emit(record)<line_sep>record=logging.makeLogRecord({'level':logging.INFO 'msg':u'\x80abc'})<line_sep>basic_log.emit(record)<line_sep>basic_log.flush()<block_end><def_stmt>testMultipleFlushes self<block_start>"""Tests multiple flushes."""<line_sep>basic_log=_BasicLogHandler(flush_interval_secs=0.100)<for_stmt>i xrange(8)<block_start>record=logging.makeLogRecord({'level':logging.INFO 'msg':'test%d'%i})<line_sep>basic_log.emit(record)<line_sep>basic_log.flush()<block_end>self._RunAsync(self._VerifyLog ['test%d'%i<for>i range(8)])<block_end><def_stmt>testMaxBytesFlush self<block_start>"""Tests that the server log flushes based on maximum bytes written."""<line_sep>basic_log=_BasicLogHandler(max_buffer_bytes=100)<line_sep>msg='test'<times>100<line_sep>record=logging.makeLogRecord({'level':logging.INFO 'msg':msg})<line_sep>basic_log.emit(record)<line_sep>self._RunAsync(self._VerifyLog [msg])<block_end><def_stmt>testTimeoutFlush self<block_start>"""Tests that the server log flushes after maximum flush interval."""<line_sep>basic_log=_BasicLogHandler(flush_interval_secs=0.100)<line_sep>record=logging.makeLogRecord({'level':logging.INFO 'msg':'test'})<line_sep>basic_log.emit(record)<line_sep>self._RunAsync(self.io_loop.add_timeout time.time()+0.150)<line_sep>self._RunAsync(self._VerifyLog ['test'])<block_end><def_stmt>testFinishServerLog self<block_start>"""Verify that 'close()' is called on the server handler when the persistor is closed. """<line_sep>persistor=_FakePersistor()<line_sep>InitServerLog(persistor)<line_sep>self.assertEqual(2 len(persistor._handlers))<line_sep>basic_handler=_BasicLogHandler()<line_sep>basic_handler.setLevel(logging.INFO)<with_stmt>basic_handler.LoggingContext()<block_start>self.assertEqual(3 len(persistor._handlers))<line_sep>self.assertEqual(0 len(persistor.batches))<line_sep>logging.info('Test Message')<block_end>self.assertEqual(3 len(persistor._handlers))<line_sep>self._RunAsync(FinishServerLog)<line_sep>self.assertEqual(0 len(persistor._handlers))<line_sep>self.assertEqual(2 len(persistor.batches))<block_end><def_stmt>testFinishServerLogWithErrors self<block_start>"""Verify that the error log handler properly batches. """<line_sep>persistor=_FakePersistor()<line_sep>InitServerLog(persistor)<line_sep>self.assertEqual(2 len(persistor._handlers))<line_sep>basic_handler=_BasicLogHandler()<line_sep>basic_handler.setLevel(logging.INFO)<with_stmt>basic_handler.LoggingContext()<block_start>self.assertEqual(3 len(persistor._handlers))<line_sep>self.assertEqual(0 len(persistor.batches))<line_sep>logging.error('Test Error')<block_end>self.assertEqual(3 len(persistor._handlers))<line_sep>self._RunAsync(FinishServerLog)<line_sep>self.assertEqual(0 len(persistor._handlers))<line_sep>self.assertEqual(3 len(persistor.batches))<block_end><def_stmt>_VerifyLog self exp_msgs callback<block_start>"""Verifies that there are len('exp_msg') batches stored and that each contains the expected message as contents. """<def_stmt>_DoVerify <block_start>batches=self._persistor.batches<line_sep>self.assertEqual(len(batches) len(exp_msgs))<for_stmt>key,msg zip(sorted(batches.keys()) exp_msgs)<block_start>value=batches[key].buffer<line_sep>regexp=re.compile('\[pid:[0-9]+\] .*:[0-9]+: %s'%msg)<line_sep>self.assertTrue(regexp.search(value)<is><not><none> '%s not found in %s'%(msg value))<block_end>callback()<block_end>self.io_loop.add_callback(_DoVerify)<block_end><block_end><class_stmt>ServerLogHandlerTestCase(BaseTestCase testing.LogTrapTestCase)<block_start><def_stmt>testErrorCounters self<block_start>"""Verify that error-counting performance counters are working correctly. These performance counters are implemented as a log filter. """<line_sep>meter=counters.Meter(counters.counters.viewfinder.errors)<line_sep>InitServerLog(_FakePersistor())<def_stmt>_CheckCounters expected_errors expected_warnings<block_start>sample=meter.sample()<line_sep>self.assertEqual(expected_errors sample.viewfinder.errors.error)<line_sep>self.assertEqual(expected_warnings sample.viewfinder.errors.warning)<block_end>_CheckCounters(0 0)<line_sep>old_level=logging.getLogger().level<line_sep>logging.getLogger().setLevel(logging.DEBUG)<line_sep>logging.critical('Critical')<line_sep>logging.error('Error1')<line_sep>logging.warning('Warning1')<line_sep>logging.warning('Warning2')<line_sep>logging.getLogger().setLevel(old_level)<line_sep>self._RunAsync(FinishServerLog)<line_sep>_CheckCounters(2 2)<line_sep>_CheckCounters(0 0)<block_end><block_end>
"""Actions."""<import_from_stmt>construct Array Byte Const CString Flag Float32l If Int16ul Int32sl Int32ul Padding Peek String Struct this Bytes Embedded IfThenElse <import_from_stmt>mgz.body.achievements achievements<import_from_stmt>mgz.enums DiplomacyStanceEnum FormationEnum GameActionModeEnum OrderTypeEnum ReleaseTypeEnum ResourceEnum ResourceLevelEnum RevealMapEnum StanceEnum AgeEnum VictoryEnum <import_from_stmt>mgz.util TimeSecAdapter check_flags<line_sep># pylint: disable=invalid-name, bad-continuation # Not all actions are defined, not all actions are complete. interact="interact"/Struct("player_id"/Byte Const(b"\x00\x00") "target_id"/Int32ul "selected"/Int32ul "x"/Float32l "y"/Float32l "next"/Peek(Bytes(8)) "flags"/If(<lambda>ctx:check_flags(ctx.next) Bytes(8)) "unit_ids"/If(<lambda>ctx:ctx.selected<l>0xff Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul)))<line_sep>give_attribute="give_attribute"/Struct("player_id"/Byte "target_id"/Byte "attribute"/Byte "amount"/Float32l)<line_sep>add_attribute="add_attribute"/Struct("player_id"/Byte "attribute"/Byte Padding(1) "amount"/Float32l)<line_sep>create="create"/Struct(Padding(1) "unit_type"/Int16ul "player_id"/Byte Padding(1) "x"/Float32l "y"/Float32l "z"/Float32l)<line_sep>ai_interact="ai_interact"/Struct(Padding(3) "target_id"/Int32ul "selected"/Byte Padding(3) "x"/Float32l "y"/Float32l If(<lambda>ctx:ctx.selected<l>0xff Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul)))<line_sep>move="move"/Struct("player_id"/Byte Const(b"\x00\x00") Padding(4) "selected"/Int32ul "x"/Float32l "y"/Float32l If(<lambda>ctx:ctx.selected<l>0xff Struct("next"/Peek(Bytes(8)) "flags"/If(<lambda>ctx:check_flags(ctx.next) Bytes(8)))) "unit_ids"/If(<lambda>ctx:ctx.selected<l>0xff Array(<lambda>ctx:ctx.selected Int32ul)))<line_sep>ai_move="ai_move"/Struct("selected"/Byte "player_id"/Byte "player_num"/Byte Padding(4) Padding(4) "target_id"/Int32ul Padding(1) Padding(3) "x"/Float32l "y"/Float32l Padding(4) Padding(4) Padding(4) If(<lambda>ctx:ctx.selected<g>0x01 Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul)))<line_sep>resign="resign"/Struct("player_id"/Byte "player_num"/Byte "disconnected"/Flag)<line_sep>spec="spec"/Struct(Padding(<lambda>ctx:ctx._._.length-1))<line_sep>queue="queue"/Struct(Padding(3) "building_id"/Int32ul "unit_type"/Int16ul "number"/Int16ul )<line_sep>multiqueue="multiqueue"/Struct(Padding(3) "unit_type"/Int16ul "num_buildings"/Byte "queue_amount"/Byte Array(<lambda>ctx:ctx.num_buildings "building_ids"/Int32ul))<line_sep>ai_queue="ai_queue"/Struct(Padding(3) "building_id"/Int32ul "player_id"/Int16ul "unit_type"/Int16ul Padding(4))<line_sep>research="research"/Struct(Padding(3) "building_id"/Int32ul "player_id"/Int16ul "next"/Peek(Struct(Padding(6) "check"/Int32sl)) IfThenElse(<lambda>ctx:ctx.next.check<eq>-1 Embedded(Struct("selected"/Int16ul "technology_type"/Int32ul Array(<lambda>ctx:ctx.selected "selected_ids"/Int32sl))) Embedded(Struct("technology_type"/Int16ul Array(1 "selected_ids"/Int32sl)))))<line_sep>sell="sell"/Struct("player_id"/Byte ResourceEnum("resource_type"/Byte) "amount"/Byte Padding(4))<line_sep>buy="buy"/Struct("player_id"/Byte ResourceEnum("resource_type"/Byte) "amount"/Byte Padding(4))<line_sep>stop="stop"/Struct("selected"/Byte Array(<lambda>ctx:ctx.selected "object_ids"/Int32ul))<line_sep>stance="stance"/Struct("selected"/Byte StanceEnum("stance_type"/Byte) Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul))<line_sep>guard="guard"/Struct("selected"/Byte Padding(2) "guarded_unit_id"/Int32ul Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul))<line_sep>follow="follow"/Struct("selected"/Byte Padding(2) "followed_unit_id"/Int32ul Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul))<line_sep>formation="formation"/Struct("selected"/Byte "player_id"/Int16ul FormationEnum("formation_type"/Int32ul) Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul))<line_sep>save="save"/Struct("exited"/Flag "player_id"/Byte "filename"/CString(encoding='latin1') Padding(<lambda>ctx:ctx._._.length-23) "checksum"/Int32ul)<line_sep>chapter="chapter"/Struct("player_id"/Byte)<line_sep>build="build"/Struct("selected"/Byte "player_id"/Int16ul "x"/Float32l "y"/Float32l "building_type"/Int32ul Padding(4) "sprite_id"/Int32ul Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul))<line_sep>game="game"/Struct("mode"/GameActionModeEnum("mode_id"/Byte) "player_id"/Byte Padding(1) "diplomacy"/If(this.mode<eq>'diplomacy' Struct("target_player_id"/Byte Padding(3) "stance_float"/Float32l "stance"/DiplomacyStanceEnum("stance_id"/Byte) )) "speed"/If(this.mode<eq>'speed' Struct(Padding(4) "speed"/Float32l Padding(1))) "instant_build"/If(this.mode<eq>'instant_build' Struct(Padding(9))) "quick_build"/If(this.mode<eq>'quick_build' Struct("status"/Flag Padding(8) )) "allied_victory"/If(this.mode<eq>'allied_victory' Struct("player_id"/Byte "status"/Flag Padding(7))) "cheat"/If(this.mode<eq>'cheat' Struct("cheat_id"/Byte Padding(8))) "unk0"/If(this.mode<eq>'unk0' Struct(Padding(9))) "spy"/If(this.mode<eq>'spy' Struct(Padding(9))) "unk1"/If(this.mode<eq>'unk1' Struct(Padding(9))) "farm_queue"/If(this.mode<eq>'farm_queue' Struct("amount"/Byte # this seems to be a bit inconsistent between versions, needs more research Padding(8))) "farm_unqueue"/If(this.mode<eq>'farm_unqueue' Struct("amount"/Byte # this seems to be a bit inconsistent between versions, needs more research Padding(8))) # toggle farm auto seed queue "farm_autoqueue"/If(this.mode<eq>'farm_autoqueue' Struct(Padding(9))) "fishtrap_queue"/If(this.mode<eq>'fishtrap_queue' Struct("amount"/Byte Padding(8))) "fishtrap_unqueue"/If(this.mode<eq>'fishtrap_unqueue' Struct("amount"/Byte Padding(8))) # toggle fish trap auto place queue "fishtrap_autoqueue"/If(this.mode<eq>'fishtrap_autoqueue' Struct(Padding(9))) # toggles the default stance when units are created. All players start on aggressive by default, if the player # (initially) has defensive enabled it is called right before the first unit is queued, and again every time # the player toggles it in the game options menu "default_stance"/If(this.mode<eq>'default_stance' Struct(Padding(9))) Padding(3))<line_sep>droprelic="droprelic"/Struct(Const(b"\x00\x00\x00") 'unit_id'/Int32ul)<line_sep>wall="wall"/Struct("selected"/Byte "player_id"/Byte IfThenElse(<lambda>ctx:ctx._._.length-16-(ctx.selected<times>4)<eq>8 Embedded(Struct(Padding(1) "start_x"/Int16ul "start_y"/Int16ul "end_x"/Int16ul "end_y"/Int16ul "building_id"/Int32ul Padding(4) "flags"/Bytes(4))) Embedded(Struct("start_x"/Byte "start_y"/Byte "end_x"/Byte "end_y"/Byte Padding(1) "building_id"/Int32ul Padding(4) ))) Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul))<line_sep>delete="delete"/Struct(Padding(3) "object_id"/Int32ul "player_id"/Int32ul)<line_sep>attackground="attackground"/Struct("selected"/Byte Padding(2) "x"/Float32l "y"/Float32l "next"/Peek(Bytes(4)) "flags"/If(<lambda>ctx:check_flags(ctx.next) Bytes(4)) Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul))<line_sep>tribute="tribute"/Struct("player_id"/Byte "player_id_to"/Byte ResourceEnum("resource_type"/Byte) "amount"/Float32l "fee"/Float32l)<line_sep>repair="repair"/Struct("selected"/Byte Padding(2) "repaired_id"/Int32ul "next"/Peek(Bytes(4)) "flags"/If(<lambda>ctx:check_flags(ctx.next) Bytes(4)) Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul))<line_sep>release="release"/Struct("selected"/Int16ul Padding(1) "x"/Float32l # -1 if none "y"/Float32l # -1 if none ReleaseTypeEnum("release_type"/Byte) Padding(3) "release_id"/Int32ul Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul))<line_sep>""" unload = "unload"/Struct( "selected"/Int16ul, Padding(1), "x"/Float32l, # -1 if none "y"/Float32l, # -1 if none Padding(4), Padding(4), # 0xffffffff Array(lambda ctx: ctx.selected, "unit_ids"/Int32ul) ) """<line_sep>togglegate="togglegate"/Struct(Padding(3) "gate_id"/Int32ul)<line_sep>flare="flare"/Struct(Padding(7) Array(9 "player_ids"/Byte) Padding(3) "x"/Float32l "y"/Float32l "player_id"/Byte "player_number"/Byte Padding(2))<line_sep>order="order"/Struct("selected"/Byte Padding(2) "building_id"/Int32sl # -1 cancels production queue OrderTypeEnum("order_type"/Byte) "cancel_order"/Byte # when cancelling production queue, this indicates which item in the queue is to be cancelled Padding(2) "x"/Float32l "y"/Float32l Padding(4) # const "next"/Peek(Bytes(4)) "flags"/If(<lambda>ctx:check_flags(ctx.next) Bytes(4)) Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul) )<line_sep>gatherpoint="gatherpoint"/Struct("selected"/Byte Padding(2) "target_id"/Int32ul "target_type"/Int32ul "x"/Float32l "y"/Float32l Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul))<line_sep>townbell="townbell"/Struct(Padding(3) "towncenter_id"/Int32ul "active"/Int32ul)<line_sep>"""Patrol 10 X-coordinates followed by 10 Y-coordinates First of each is popped off for consistency with other actions """<line_sep>patrol="patrol"/Struct("selected"/Byte "waypoints"/Int16ul "x"/Float32l Array(9 "x_more"/Float32l) "y"/Float32l Array(9 "y_more"/Float32l) Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul) )<line_sep>waypoint="waypoint"/Struct(Padding(1) "selected"/Byte "x"/Byte "y"/Byte "building_ids"/If(<lambda>ctx:ctx.selected<ne>255 Array(<lambda>ctx:ctx.selected Int32ul)))<line_sep>ai_waypoint="ai_waypoint"/Struct("selected"/Byte "waypoint_count"/Byte Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul) Array(<lambda>ctx:ctx.waypoint_count "x_more"/Byte) Array(<lambda>ctx:ctx.waypoint_count "y_more"/Byte))<line_sep>backtowork="backtowork"/Struct(Padding(3) "towncenter_id"/Int32ul)<line_sep>ai_command="ai_command"/Struct(Padding(<lambda>ctx:ctx._._.length-1))<line_sep>"""DE Queue In DE queue and multi queue share the same command """<line_sep>de_queue="de_queue"/Struct("player_id"/Byte "building_type"/Int16ul "selected"/Byte Padding(1) "unit_type"/Int16ul "queue_amount"/Byte Padding(1) Array(<lambda>ctx:ctx.selected "building_ids"/Int32ul))<line_sep>"""DE Attack Move It's almost the same as Patrol. 10 X-coordinates followed by 10 Y-coordinates First of each is popped off for consistency with other actions """<line_sep>de_attackmove="de_attackmove"/Struct("selected"/Byte "waypoints"/Int16ul "x"/Float32l Array(9 "x_more"/Float32l) "y"/Float32l Array(9 "y_more"/Float32l) Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul) )<line_sep>postgame="achievements"/Struct(Padding(3) "scenario_filename"/String(32 padchar=b'\x00' trimdir='right' encoding='latin1') "player_num"/Byte "computer_num"/Byte Padding(2) Peek("duration_int"/Int32ul) TimeSecAdapter("duration"/Int32ul) "cheats"/Flag "complete"/Flag Padding(2) "db_checksum"/Int32ul "code_checksum"/Int32ul "version"/Float32l "map_size"/Byte "map_id"/Byte "population"/Int16ul Peek("victory_type_id"/Byte) VictoryEnum("victory_type"/Byte) Peek("starting_age_id"/Byte) AgeEnum("starting_age"/Byte) Peek("starting_resources_id"/Byte) ResourceLevelEnum("starting_resources"/Byte) "all_techs"/Flag "random_positions"/Flag RevealMapEnum("reveal_map"/Byte) "is_deathmatch"/Flag "is_regicide"/Flag "starting_units"/Byte "lock_teams"/Flag "lock_speed"/Flag Padding(1) Array(<lambda>ctx:ctx.player_num achievements) Padding(4) Array(<lambda>ctx:(8-ctx.player_num)<times>63 Padding(4)) )<line_sep>de_autoscout="de_autoscout"/Struct("selected"/Byte Array(<lambda>ctx:ctx.selected "unit_ids"/Int32ul))<line_sep>
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>pytest<import_from_stmt>data_validation state_manager<line_sep>GCS_STATE_PATH="gs://pso-kokoro-resources/state/"<line_sep>TEST_CONN_NAME="example"<line_sep>TEST_CONN={"source_type":"BigQuery" "project_id":"my-project" }<def_stmt>test_get_gcs_file_path <block_start>manager=state_manager.StateManager(GCS_STATE_PATH)<line_sep>result_path=manager._get_gcs_file_path(GCS_STATE_PATH+"file/path/name.json")<assert_stmt>result_path<eq>"state/file/path/name.json"<block_end><def_stmt>test_gcs_create_and_get_connection_config <block_start>manager=state_manager.StateManager(GCS_STATE_PATH)<line_sep>manager.create_connection(TEST_CONN_NAME TEST_CONN)<line_sep>config=manager.get_connection_config(TEST_CONN_NAME)<assert_stmt>config<eq>TEST_CONN<block_end><def_stmt>test_list_connections <block_start>manager=state_manager.StateManager(GCS_STATE_PATH)<line_sep>expected=set(["example" "my_bq_conn"])<line_sep>connections=manager.list_connections()<assert_stmt>set(connections)<eq>expected<block_end><def_stmt>test_create_invalid_gcs_path_raises # Unknown file paths will be created by the state manager <block_start>files_directory="gs://!!bucket!!/this/path/"<with_stmt>pytest.raises(ValueError match=r"GCS Path Failure .*")<block_start>state_manager.StateManager(files_directory)<block_end><block_end>
<import_stmt>logging<import_stmt>logging.handlers<import_stmt>socket os threading<import_from_stmt>core.const.Do Do<import_from_stmt>core.const.GlobalConst ExecStatus<import_from_stmt>core.tools.DBTool DBTool<import_from_stmt>core.tools.TypeTool TypeTool<import_from_stmt>runfunc.initial init_logging<import_from_stmt>runfunc.ui_threadProcess *<import_from_stmt>core.config.InitConfig *<import_stmt>socket traceback os<import_from_stmt>core.tools.CommonFunc *<import_from_stmt>allmodels_ui.UITestTask UITestTask<import_from_stmt>threads.UITaskThread UITaskThread<import_from_stmt>copy deepcopy<line_sep>########################################################################################################## ########################################################################################################## ########################################################################################################## uiTaskQueue=[]<line_sep>uiTaskCancelQueue=[]<line_sep>maxUITaskThreadNums=7#最大能并行的UI测试任务数量 maxWebThreadNums=5<line_sep>maxAndroidThreadNums=3<line_sep>maxIosThreadNums=4<line_sep>executingWebThreadNum=0#当前执行中的UI测试任务数量 executingIosThreadNum=0#当前执行中的UI测试任务数量 executingAndroidThreadNum=0#当前执行中的UI测试任务数量 executingUITaskThreadNum=0#当前执行中的UI测试任务数量 executintUITaskThreadDict={}#当前活着的threadDict executintWebThreadDict={}#当前活着的threadDict executintIosThreadDict={}#当前活着的threadDict executintAndroidThreadDict={}#当前活着的threadDict ########################################################################################################## ########################################################################################################## ########################################################################################################## <def_stmt>init_ui_task_queue <block_start>db=DBTool().initGlobalDBConf()<line_sep># DONE 初始化 taskrun {'do': 3, 'TaskExecuteId': '1'} colsStr="id"<line_sep>tbName="tb_ui_test_execute"<line_sep>whereStr="execStatus = %d or execStatus = %d "%(ExecStatus.NOTRUN ExecStatus.RUNNING)<line_sep>orderBy="addTime asc"<line_sep>sql="select %s from %s where %s order by %s"%(colsStr tbName whereStr orderBy)<line_sep>res=db.execute_sql(sql)<line_sep>#重置所有模拟器 mobileServer=db.execute_sql("UPDATE tb_ui_mobile_server SET STATUS = 0")<line_sep>db.release()<line_sep>logging.debug("init_ui_task_queue: 初始化tb_ui_test_execute结果:%s"%str(res))<if_stmt>res<eq><false><block_start>logging.error("init_ui_task_queue: 初始化任务执行队列失败!")<line_sep><return><false><block_end><if_stmt>mobileServer<eq><false><block_start>logging.error("init_ui_task_queue: 初始化模拟器状态失败!")<line_sep><return><false><block_end><for_stmt>tRes res<block_start>tmpData={}<line_sep>tmpData[Do.KEY_DO]=Do.TYPE_UITASK_EXECUTE<line_sep>tmpData[Do.KEY_UITASK_EXEC_ID]=tRes['id']<line_sep>uiTaskQueue.append(tmpData)<line_sep>logging.info("init_ui_task_queue: uiTaskQueue加入新data:%s 。来源表:%s"%(tmpData tbName))<line_sep>logging.info("init_ui_task_queue: uiTaskQueue:%s"%uiTaskQueue)<block_end>logging.info("init_ui_task_queue: 初始化任务执行表完成uiTaskQueue:%s"%uiTaskQueue)<block_end><def_stmt>init_cancel_ui_task_queue <block_start>db=DBTool().initGlobalDBConf()<line_sep># DONE 初始化 taskrun {'do': 3, 'TaskExecuteId': '1'} colsStr="id"<line_sep>tbName="tb_ui_test_execute"<line_sep>whereStr="execStatus = %d "%(ExecStatus.CANCELING)<line_sep>orderBy="addTime asc"<line_sep>sql="select %s from %s where %s order by %s"%(colsStr tbName whereStr orderBy)<line_sep>res=db.execute_sql(sql)<line_sep>db.release()<line_sep>logging.debug("init_cancel_ui_task_queue: 初始化tb_ui_test_execute结果:%s"%str(res))<if_stmt>res<eq><false><block_start>logging.error("init_cancel_ui_task_queue: 初始化任务执行队列失败!")<line_sep><return><false><block_end><for_stmt>tRes res<block_start>tmpData={}<line_sep>tmpData[Do.KEY_DO]=Do.TYPE_UITASK_CANCEL<line_sep>tmpData[Do.KEY_UITASK_EXEC_ID]=tRes['id']<line_sep>uiTaskCancelQueue.append(tmpData)<line_sep>logging.info("init_cancel_ui_task_queue: uiTaskCancelQueue加入新data:%s 。来源表:%s"%(tmpData tbName))<line_sep>logging.info("init_cancel_ui_task_queue: uiTaskCancelQueue:%s"%uiTaskCancelQueue)<block_end>logging.info("init_cancel_ui_task_queue: 初始化任务执行表完成uiTaskCancelQueuee:%s"%uiTaskCancelQueue)<block_end>
<import_from_stmt>.. usbhid<line_sep>profile={"name":"SteelSeries Rival 3 Wireless" "models":[{"name":"SteelSeries Rival 3 Wireless (2.4 GHz mode)" "vendor_id":0x1038 "product_id":0x1830 "endpoint":3 } ] "settings":{"sensitivity":{"label":"Sensibility presets" "description":"Set sensitivity preset (DPI)" "cli":["-s" "--sensitivity"] "report_type":usbhid.HID_REPORT_TYPE_OUTPUT "command":[0x20] "value_type":"multidpi_range" "input_range":[100 18000 100] "output_range":[0x00 0xD6 1.2] "dpi_length_byte":2 "first_preset":1 "count_mode":"number" "max_preset_count":5 "default":"400, 800, 1200, 2400, 3200" } "polling_rate":{"label":"Polling rate" "description":"Set polling rate (Hz)" "cli":["-p" "--polling-rate"] "report_type":usbhid.HID_REPORT_TYPE_OUTPUT "command":[0x17] "value_type":"choice" "choices":{125:0x03 250:0x02 500:0x01 1000:0x00 } "default":1000 } "buttons_mapping":{"label":"Buttons mapping" "description":"Set the mapping of the buttons" "cli":["-b" "--buttons"] "report_type":usbhid.HID_REPORT_TYPE_OUTPUT "command":[0x19] "value_type":"buttons" # fmt: off "buttons":{"Button1":{"id":0x01 "offset":0x00 "default":"button1"} "Button2":{"id":0x02 "offset":0x05 "default":"button2"} "Button3":{"id":0x03 "offset":0x0A "default":"button3"} "Button4":{"id":0x04 "offset":0x0F "default":"button4"} "Button5":{"id":0x05 "offset":0x14 "default":"button5"} "Button6":{"id":0x06 "offset":0x19 "default":"dpi"} } "button_field_length":5 "button_disable":0x00 "button_keyboard":0x51 "button_multimedia":0x61 "button_dpi_switch":0x30 "button_scroll_up":0x31 "button_scroll_down":0x32 # fmt: on "default":"buttons(button1=button1; button2=button2; button3=button3; button4=button4; button5=button5; button6=dpi)" } } "battery_level":{"report_type":usbhid.HID_REPORT_TYPE_OUTPUT "command":[0xAA 0x01] "response_length":3 "is_charging":<lambda>data:bool(data[2]) "level":<lambda>data:int(data[0]) } "save_command":{"report_type":usbhid.HID_REPORT_TYPE_OUTPUT "command":[0x09] } }<line_sep>
<import_from_future_stmt> print_function<import_stmt>re<import_stmt>os<line_sep>'''CamelCase -> CamelCase m_fieldOfView -> Field Of View '''<def_stmt>CamelCaseToReadable string# http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case <block_start><if_stmt>string.startswith('m_')<block_start>string=string[2:]<block_end><if_stmt>string[0].islower()<block_start>string=string[0].upper()+string[1:]<block_end><return>re.sub('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))' r' \1' string)<block_end><def_stmt>unittest_CamelCaseToReadable <block_start><for_stmt>s ("m_fieldOfView" "CamelCase" "Camel2Camel2Case" "getHTTPResponseCode" "get2HTTPResponse123Code" "HTTPResponseCodeXYZ")<block_start>print(s '==>' CamelCaseToReadable(s))<block_end><block_end><def_stmt>UpdateFile out_path content<block_start>need_update=<true><line_sep>directory=os.path.dirname(out_path)<if_stmt><not>os.path.exists(directory)<block_start>os.makedirs(directory)<block_end><if_stmt>os.path.exists(out_path)<block_start><with_stmt>open(out_path)<as>f<block_start>old_content=f.read()<line_sep>need_update=(content<ne>old_content)<block_end><block_end><if_stmt>need_update<block_start>print("update" out_path)<with_stmt>open(out_path 'w')<as>f<block_start>f.write(content)<block_end><block_end><else_stmt><block_start>print("no update" out_path)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest_CamelCaseToReadable()<block_end>
# coding: utf-8 """ This tests the use of a view coming from installed package. """<import_from_stmt>flask Flask jsonify<import_from_stmt>flasgger Swagger<import_from_stmt>flasgger_package package_view<line_sep>app=Flask(__name__)<line_sep>swag=Swagger(app)<line_sep>app.add_url_rule('/v1/decorated/<username>' view_func=package_view)<line_sep>@app.route('/v2/decorated/<username>')<def_stmt>package_view_2 username<block_start>""" This is the summary defined in yaml file First line is the summary All following lines until the hyphens is added to description the format of the first lines until 3 hyphens will be not yaml compliant but everything below the 3 hyphens should be. --- tags: - users import: "flasgger_package/parameters.yml" responses: 200: description: A single user item schema: id: rec_username properties: username: type: string description: The name of the user default: 'steve-harris 2' """<line_sep><return>jsonify({'username':username})<block_end><def_stmt>test_swag client specs_data<block_start>""" This test is runs automatically in Travis CI :param client: Flask app test client :param specs_data: {'url': {swag_specs}} for every spec in app """<for_stmt>url,spec specs_data.items()<block_start><assert_stmt>'rec_username'<in>spec['definitions']<assert_stmt>'users'<in>spec['paths']['/v1/decorated/{username}']['get']['tags']<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>app.run(debug=<true>)<block_end>
# Copyright (c) 2010 Resolver Systems Ltd, PythonAnywhere LLP # See LICENSE.md # <import_from_stmt>.formula_interpreter get_dependencies_from_parse_tree get_python_formula_from_parse_tree <import_from_stmt>.parser FormulaError parser<class_stmt>Undefined(object)<block_start><def_stmt>__repr__ self<block_start><return>"<undefined>"<block_end><block_end>undefined=Undefined()<class_stmt>Cell(object)<block_start><def_stmt>__init__ self<block_start>self.clear()<block_end><def_stmt>_set_formula self value<block_start>self._python_formula=<none><if_stmt>value<is><none><block_start>self._formula=<none><block_end><elif_stmt>type(value)<eq>str<or>type(value)<eq>unicode<block_start>self._formula=value<if_stmt>value.startswith('=')<block_start><try_stmt><block_start>parsed_formula=parser.parse(value)<line_sep>self.dependencies=get_dependencies_from_parse_tree(parsed_formula)<line_sep>self._python_formula=get_python_formula_from_parse_tree(parsed_formula)<block_end><except_stmt>FormulaError e<block_start>self.dependencies=[]<line_sep>self._python_formula='_raise(FormulaError("{}"))'.format(e)<block_end><block_end><block_end><else_stmt><block_start><raise>TypeError('cell formula must be str or unicode')<block_end><block_end><def_stmt>_get_formula self<block_start><return>self._formula<block_end>formula=property(_get_formula _set_formula)<def_stmt>_set_python_formula self value<block_start><if_stmt>type(value)<eq>str<or>type(value)<eq>unicode<block_start>self._python_formula=value<block_end><else_stmt><block_start><raise>TypeError('cell python_formula must be str or unicode')<block_end><block_end><def_stmt>_get_python_formula self<block_start><return>self._python_formula<block_end>python_formula=property(_get_python_formula _set_python_formula)<def_stmt>_set_value self value<block_start>self._value=value<if_stmt>value<is>undefined<block_start>self._set_formatted_value(u'')<block_end><else_stmt><block_start>self._set_formatted_value(unicode(value))<block_end><block_end><def_stmt>_get_value self<block_start><return>self._value<block_end>value=property(_get_value _set_value)<def_stmt>clear_value self<block_start>self._value=undefined<block_end><def_stmt>_set_formatted_value self value<block_start><if_stmt>value<is><none><block_start>self._formatted_value=u''<block_end><elif_stmt>type(value)<eq>str<or>type(value)<eq>unicode<block_start>self._formatted_value=value<block_end><else_stmt><block_start><raise>TypeError('cell formatted_value must be str or unicode')<block_end><block_end><def_stmt>_get_formatted_value self<block_start><return>self._formatted_value<block_end>formatted_value=property(_get_formatted_value _set_formatted_value)<def_stmt>clear self<block_start>self._value=undefined<line_sep>self._formula=<none><line_sep>self._python_formula=<none><line_sep>self.dependencies=[]<line_sep>self._formatted_value=u''<line_sep>self.error=<none><block_end><def_stmt>__repr__ self<block_start>error=""<if_stmt>self.error<block_start>error=" error=%r"%(self.error )<block_end><return>'<Cell formula=%s value=%r formatted_value=%r%s>'%(self.formula self._value self.formatted_value error)<block_end><def_stmt>__eq__ self other<block_start><return>(isinstance(other Cell)<and>self._formula<eq>other.formula<and>self._value<eq>other.value<and>self._formatted_value<eq>other.formatted_value<and>self.error<eq>other.error)<block_end><def_stmt>__ne__ self other<block_start><return><not>self.__eq__(other)<block_end><block_end>
<import_stmt>sys<import_stmt>time<import_stmt>subprocess<import_stmt>math<import_from_stmt>threading Thread<import_from_stmt>collections OrderedDict deque<import_from_stmt>ds4drv.actions ActionRegistry<import_from_stmt>ds4drv.backends BluetoothBackend HidrawBackend<import_from_stmt>ds4drv.config load_options<import_from_stmt>ds4drv.daemon Daemon<import_from_stmt>ds4drv.eventloop EventLoop<import_from_stmt>ds4drv.exceptions BackendError<import_from_stmt>ds4drv.action ReportAction<import_from_stmt>ds4drv.__main__ create_controller_thread<class_stmt>ActionShim(ReportAction)<block_start>""" intercepts the joystick report"""<def_stmt>__init__ self *args **kwargs<block_start>super(ActionShim self).__init__(*args **kwargs)<line_sep>self.timer=self.create_timer(0.02 self.intercept)<line_sep>self.values=<none><line_sep>self.timestamps=deque(range(10) maxlen=10)<block_end><def_stmt>enable self<block_start>self.timer.start()<block_end><def_stmt>disable self<block_start>self.timer.stop()<line_sep>self.values=<none><block_end><def_stmt>load_options self options<block_start><pass><block_end><def_stmt>deadzones self values<block_start>deadzone=0.14<if_stmt>math.sqrt(values['left_analog_x']<power>2+values['left_analog_y']<power>2)<l>deadzone<block_start>values['left_analog_y']=0.0<line_sep>values['left_analog_x']=0.0<block_end><if_stmt>math.sqrt(values['right_analog_x']<power>2+values['right_analog_y']<power>2)<l>deadzone<block_start>values['right_analog_y']=0.0<line_sep>values['right_analog_x']=0.0<block_end><return>values<block_end><def_stmt>intercept self report<block_start>new_out=OrderedDict()<for_stmt>key report.__slots__<block_start>value=getattr(report key)<line_sep>new_out[key]=value<block_end><for_stmt>key ["left_analog_x" "left_analog_y" "right_analog_x" "right_analog_y" "l2_analog" "r2_analog"]<block_start>new_out[key]=2<times>(new_out[key]/255)-1<block_end>new_out=self.deadzones(new_out)<line_sep>self.timestamps.append(new_out['timestamp'])<if_stmt>len(set(self.timestamps))<le>1<block_start>self.values=<none><block_end><else_stmt><block_start>self.values=new_out<block_end><return><true><block_end><block_end><class_stmt>Joystick<block_start><def_stmt>__init__ self<block_start>self.thread=<none><line_sep>options=load_options()<if_stmt>options.hidraw<block_start><raise>ValueError("HID mode not supported")<line_sep>backend=HidrawBackend(Daemon.logger)<block_end><else_stmt><block_start>subprocess.run(["hciconfig" "hciX" "up"])<line_sep>backend=BluetoothBackend(Daemon.logger)<block_end>backend.setup()<line_sep>self.thread=create_controller_thread(1 options.controllers[0])<line_sep>self.thread.controller.setup_device(next(backend.devices))<line_sep>self.shim=ActionShim(self.thread.controller)<line_sep>self.thread.controller.actions.append(self.shim)<line_sep>self.shim.enable()<line_sep>self._color=(<none> <none> <none>)<line_sep>self._rumble=(<none> <none>)<line_sep>self._flash=(<none> <none>)<line_sep># ensure we get a value before returning <while_stmt>self.shim.values<is><none><block_start><pass><block_end><block_end><def_stmt>close self<block_start><if_stmt>self.thread<is><none><block_start><return><block_end>self.thread.controller.exit("Cleaning up...")<line_sep>self.thread.controller.loop.stop()<block_end><def_stmt>__del__ self<block_start>self.close()<block_end>@staticmethod<def_stmt>map val in_min in_max out_min out_max<block_start>""" helper static method that helps with rescaling """<line_sep>in_span=in_max-in_min<line_sep>out_span=out_max-out_min<line_sep>value_scaled=float(val-in_min)/float(in_span)<line_sep>value_mapped=(value_scaled<times>out_span)+out_min<if_stmt>value_mapped<l>out_min<block_start>value_mapped=out_min<block_end><if_stmt>value_mapped<g>out_max<block_start>value_mapped=out_max<block_end><return>value_mapped<block_end><def_stmt>get_input self<block_start>""" returns ordered dict with state of all inputs """<if_stmt>self.thread.controller.error<block_start><raise>IOError("Encountered error with controller")<block_end><if_stmt>self.shim.values<is><none><block_start><raise>TimeoutError("Joystick hasn't updated values in last 200ms")<block_end><return>self.shim.values<block_end><def_stmt>led_color self red=0 green=0 blue=0<block_start>""" set RGB color in range 0-255"""<line_sep>color=(int(red) int(green) int(blue))<if_stmt>(self._color<eq>color)<block_start><return><block_end>self._color=color<line_sep>self.thread.controller.device.set_led(*self._color)<block_end><def_stmt>rumble self small=0 big=0<block_start>""" rumble in range 0-255 """<line_sep>rumble=(int(small) int(big))<if_stmt>(self._rumble<eq>rumble)<block_start><return><block_end>self._rumble=rumble<line_sep>self.thread.controller.device.rumble(*self._rumble)<block_end><def_stmt>led_flash self on=0 off=0<block_start>""" flash led: on and off times in range 0 - 255 """<line_sep>flash=(int(on) int(off))<if_stmt>(self._flash<eq>flash)<block_start><return><block_end>self._flash=flash<if_stmt>(self._flash<eq>(0 0))<block_start>self.thread.controller.device.stop_led_flash()<block_end><else_stmt><block_start>self.thread.controller.device.start_led_flash(*self._flash)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>j=Joystick()<while_stmt>1<block_start><for_stmt>key,value j.get_input().items()<block_start>print(key value)<block_end>print()<line_sep>time.sleep(0.1)<block_end><block_end>
<import_from_stmt>monitorrent.plugins.trackers TrackerSettings<class_stmt>TrackerSettingsMock(TrackerSettings)<block_start><def_stmt>get_requests_kwargs self<block_start>result=super(TrackerSettingsMock self).get_requests_kwargs()<line_sep>result.pop('timeout')<line_sep>result['verify']=<false><line_sep><return>result<block_end><block_end>
<import_from_stmt>django.conf settings<import_stmt>requests<import_stmt>json<import_from_stmt>mailchimp3 MailChimp<def_stmt>subscribe_user_to_mailer profile<block_start>status=<true><if_stmt><not>settings.MAILERLITE_API_KEY<block_start><return><block_end>content=json.dumps({"email":profile.user.email})<line_sep>headers={"content-type":"application/json" "x-mailerlite-apikey":settings.MAILERLITE_API_KEY }<try_stmt><block_start>req=requests.post("https://api.mailerlite.com/api/v2/subscribers" data=content headers=headers )<block_end><except_stmt># TODO specify which errors can be raised at this point <block_start>status=<false><block_end><return>status<block_end><def_stmt>subscribe_user_to_chimp profile<block_start>status=<true><line_sep>configs=(settings.MAILCHIMP_API_KEY settings.MAILCHIMP_USERNAME settings.MAILCHIMP_LIST_KEY )<if_stmt><not>all(configs)<block_start><return><false><block_end><try_stmt><block_start>client=MailChimp(settings.MAILCHIMP_API_KEY settings.MAILCHIMP_USERNAME)<line_sep>client.lists.members.create(settings.MAILCHIMP_LIST_KEY {"status":"subscribed" "email_address":profile.user.email} )<block_end><except_stmt># TODO specify which errors can be raised at this point <block_start>status=<false><block_end><return>status<block_end>
<import_from_stmt>click.testing CliRunner<import_from_stmt>hatch.cli hatch<import_from_stmt>hatch.settings SETTINGS_FILE copy_default_settings load_settings save_settings <import_from_stmt>hatch.utils temp_chdir temp_move_path<def_stmt>test_show_location <block_start><with_stmt>temp_chdir()<block_start>runner=CliRunner()<line_sep>result=runner.invoke(hatch ['config'])<assert_stmt>result.exit_code<eq>0<assert_stmt>'Settings location: '<in>result.output<assert_stmt>'settings.json'<in>result.output<block_end><block_end><def_stmt>test_restore <block_start><with_stmt>temp_chdir()<as>d<block_start>runner=CliRunner()<with_stmt>temp_move_path(SETTINGS_FILE d)<block_start>result=runner.invoke(hatch ['config' '--restore'])<assert_stmt>result.exit_code<eq>0<assert_stmt>'Settings were successfully restored.'<in>result.output<assert_stmt>load_settings()<eq>copy_default_settings()<block_end><block_end><block_end><def_stmt>test_update <block_start><with_stmt>temp_chdir()<as>d<block_start>runner=CliRunner()<with_stmt>temp_move_path(SETTINGS_FILE d)<block_start>new_settings=copy_default_settings()<line_sep>new_settings.pop('email')<line_sep>new_settings['new setting']=''<line_sep>save_settings(new_settings)<assert_stmt>load_settings()<eq>new_settings<line_sep>result=runner.invoke(hatch ['config' '-u'])<line_sep>updated_settings=load_settings()<assert_stmt>result.exit_code<eq>0<assert_stmt>'Settings were successfully updated.'<in>result.output<assert_stmt>'email'<in>updated_settings<assert_stmt>'new setting'<in>updated_settings<block_end><block_end><block_end><def_stmt>test_update_config_not_exist <block_start><with_stmt>temp_chdir()<as>d<block_start>runner=CliRunner()<with_stmt>temp_move_path(SETTINGS_FILE d)<block_start>result=runner.invoke(hatch ['config' '-u'])<assert_stmt>result.exit_code<eq>0<assert_stmt>'Settings were successfully restored.'<in>result.output<assert_stmt>load_settings()<eq>copy_default_settings()<block_end><block_end><block_end>
""" Test ants_image.py nptest.assert_allclose self.assertEqual self.assertTrue """<import_stmt>os<import_stmt>unittest<import_from_stmt>common run_tests<import_from_stmt>tempfile mktemp<import_stmt>numpy<as>np<import_stmt>nibabel<as>nib<import_stmt>numpy.testing<as>nptest<import_stmt>ants<class_stmt>TestModule_ants_image_io(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>img2d=ants.image_read(ants.get_ants_data('r16')).clone('float')<line_sep>img3d=ants.image_read(ants.get_ants_data('mni')).clone('float')<line_sep>arr2d=np.random.randn(69 70).astype('float32')<line_sep>arr3d=np.random.randn(69 70 71).astype('float32')<line_sep>vecimg2d=ants.from_numpy(np.random.randn(69 70 4) has_components=<true>)<line_sep>vecimg3d=ants.from_numpy(np.random.randn(69 70 71 2) has_components=<true>)<line_sep>self.imgs=[img2d img3d]<line_sep>self.arrs=[arr2d arr3d]<line_sep>self.vecimgs=[vecimg2d vecimg3d]<line_sep>self.pixeltypes=['unsigned char' 'unsigned int' 'float']<block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_from_numpy self<block_start>self.setUp()<line_sep># no physical space info <for_stmt>arr self.arrs<block_start>img=ants.from_numpy(arr)<line_sep>self.assertTrue(img.dimension arr.ndim)<line_sep>self.assertTrue(img.shape arr.shape)<line_sep>self.assertTrue(img.dtype arr.dtype.name)<line_sep>nptest.assert_allclose(img.numpy() arr)<line_sep>new_origin=tuple([6.9]<times>arr.ndim)<line_sep>new_spacing=tuple([3.6]<times>arr.ndim)<line_sep>new_direction=np.eye(arr.ndim)<times>9.6<line_sep>img2=ants.from_numpy(arr origin=new_origin spacing=new_spacing direction=new_direction)<line_sep>self.assertEqual(img2.origin new_origin)<line_sep>self.assertEqual(img2.spacing new_spacing)<line_sep>nptest.assert_allclose(img2.direction new_direction)<block_end># test with components arr2d_components=np.random.randn(69 70 4).astype('float32')<line_sep>img=ants.from_numpy(arr2d_components has_components=<true>)<line_sep>self.assertEqual(img.components arr2d_components.shape[-1])<line_sep>nptest.assert_allclose(arr2d_components img.numpy())<block_end><def_stmt>test_make_image self<block_start>self.setUp()<for_stmt>arr self.arrs<block_start>voxval=6.<line_sep>img=ants.make_image(arr.shape voxval=voxval)<line_sep>self.assertTrue(img.dimension arr.ndim)<line_sep>self.assertTrue(img.shape arr.shape)<line_sep>nptest.assert_allclose(img.mean() voxval)<line_sep>new_origin=tuple([6.9]<times>arr.ndim)<line_sep>new_spacing=tuple([3.6]<times>arr.ndim)<line_sep>new_direction=np.eye(arr.ndim)<times>9.6<line_sep>img2=ants.make_image(arr.shape voxval=voxval origin=new_origin spacing=new_spacing direction=new_direction)<line_sep>self.assertTrue(img2.dimension arr.ndim)<line_sep>self.assertTrue(img2.shape arr.shape)<line_sep>nptest.assert_allclose(img2.mean() voxval)<line_sep>self.assertEqual(img2.origin new_origin)<line_sep>self.assertEqual(img2.spacing new_spacing)<line_sep>nptest.assert_allclose(img2.direction new_direction)<for_stmt>ptype self.pixeltypes<block_start>img=ants.make_image(arr.shape voxval=1. pixeltype=ptype)<line_sep>self.assertEqual(img.pixeltype ptype)<block_end><block_end># test with components img=ants.make_image((69 70 4) has_components=<true>)<line_sep>self.assertEqual(img.components 4)<line_sep>self.assertEqual(img.dimension 2)<line_sep>nptest.assert_allclose(img.mean() 0.)<line_sep>img=ants.make_image((69 70 71 4) has_components=<true>)<line_sep>self.assertEqual(img.components 4)<line_sep>self.assertEqual(img.dimension 3)<line_sep>nptest.assert_allclose(img.mean() 0.)<line_sep># set from image <for_stmt>img self.imgs<block_start>mask=ants.image_clone(img<g>img.mean() pixeltype='float')<line_sep>arr=img[mask]<line_sep>img2=ants.make_image(mask voxval=arr)<line_sep>nptest.assert_allclose(img2.numpy() (img<times>mask).numpy())<line_sep>self.assertTrue(ants.image_physical_space_consistency(img2 mask))<line_sep># set with arr.ndim > 1 img2=ants.make_image(mask voxval=np.expand_dims(arr -1))<line_sep>nptest.assert_allclose(img2.numpy() (img<times>mask).numpy())<line_sep>self.assertTrue(ants.image_physical_space_consistency(img2 mask))<line_sep>#with self.assertRaises(Exception): # # wrong number of non-zero voxels # img3 = ants.make_image(img, voxval=arr) <block_end><block_end><def_stmt>test_matrix_to_images self# def matrix_to_images(data_matrix, mask): <block_start><for_stmt>img self.imgs<block_start>imgmask=ants.image_clone(img<g>img.mean() pixeltype='float')<line_sep>data=img[imgmask]<line_sep>dataflat=data.reshape(1 -1)<line_sep>mat=np.vstack([dataflat dataflat]).astype('float32')<line_sep>imglist=ants.matrix_to_images(mat imgmask)<line_sep>nptest.assert_allclose((img<times>imgmask).numpy() imglist[0].numpy())<line_sep>nptest.assert_allclose((img<times>imgmask).numpy() imglist[1].numpy())<line_sep>self.assertTrue(ants.image_physical_space_consistency(img imglist[0]))<line_sep>self.assertTrue(ants.image_physical_space_consistency(img imglist[1]))<line_sep># go back to matrix mat2=ants.images_to_matrix(imglist imgmask)<line_sep>nptest.assert_allclose(mat mat2)<line_sep># test with matrix.ndim > 2 img=img.clone()<line_sep>img.set_direction(img.direction<times>2)<line_sep>imgmask=ants.image_clone(img<g>img.mean() pixeltype='float')<line_sep>arr=(img<times>imgmask).numpy()<line_sep>arr=arr[arr<ge>0.5]<line_sep>arr2=arr.copy()<line_sep>mat=np.stack([arr arr2])<line_sep>imglist=ants.matrix_to_images(mat imgmask)<for_stmt>im imglist<block_start>self.assertTrue(ants.allclose(im imgmask<times>img))<line_sep>self.assertTrue(ants.image_physical_space_consistency(im imgmask))<block_end># test for wrong number of voxels #with self.assertRaises(Exception): # arr = (img*imgmask).numpy() # arr = arr[arr>0.5] # arr2 = arr.copy() # mat = np.stack([arr,arr2]) # imglist = ants.matrix_to_images(mat, img) <block_end><block_end><def_stmt>test_images_to_matrix self# def images_to_matrix(image_list, mask=None, sigma=None, epsilon=0): <block_start><for_stmt>img self.imgs<block_start>mask=ants.image_clone(img<g>img.mean() pixeltype='float')<line_sep>imglist=[img.clone() img.clone() img.clone()]<line_sep>imgmat=ants.images_to_matrix(imglist mask=mask)<line_sep>self.assertTrue(imgmat.shape[0]<eq>len(imglist))<line_sep>self.assertTrue(imgmat.shape[1]<eq>(mask<g>0).sum())<line_sep># go back to images imglist2=ants.matrix_to_images(imgmat mask)<for_stmt>i1,i2 zip(imglist imglist2)<block_start>self.assertTrue(ants.image_physical_space_consistency(i1 i2))<line_sep>nptest.assert_allclose(i1.numpy()<times>mask.numpy() i2.numpy())<block_end><if_stmt>img.dimension<eq>2# with sigma <block_start>mask=ants.image_clone(img<g>img.mean() pixeltype='float')<line_sep>imglist=[img.clone() img.clone() img.clone()]<line_sep>imgmat=ants.images_to_matrix(imglist mask=mask sigma=2.)<line_sep># with no mask mask=ants.image_clone(img<g>img.mean() pixeltype='float')<line_sep>imglist=[img.clone() img.clone() img.clone()]<line_sep>imgmat=ants.images_to_matrix(imglist)<line_sep># with mask of different shape s=[65]<times>img.dimension<line_sep>mask2=ants.from_numpy(np.random.randn(*s))<line_sep>mask2=mask2<g>mask2.mean()<line_sep>imgmat=ants.images_to_matrix(imglist mask=mask2)<block_end><block_end><block_end><def_stmt>test_image_header_info self# def image_header_info(filename): <block_start><for_stmt>img self.imgs<block_start>img.set_spacing([6.9]<times>img.dimension)<line_sep>img.set_origin([3.6]<times>img.dimension)<line_sep>tmpfile=mktemp(suffix='.nii.gz')<line_sep>ants.image_write(img tmpfile)<line_sep>info=ants.image_header_info(tmpfile)<line_sep>self.assertEqual(info['dimensions'] img.shape)<line_sep>nptest.assert_allclose(info['direction'] img.direction)<line_sep>self.assertEqual(info['nComponents'] img.components)<line_sep>self.assertEqual(info['nDimensions'] img.dimension)<line_sep>self.assertEqual(info['origin'] img.origin)<line_sep>self.assertEqual(info['pixeltype'] img.pixeltype)<line_sep>self.assertEqual(info['pixelclass'] 'vector'<if>img.has_components<else>'scalar')<line_sep>self.assertEqual(info['spacing'] img.spacing)<try_stmt><block_start>os.remove(tmpfile)<block_end><except_stmt><block_start><pass><block_end><block_end># test on vector image img=ants.from_numpy(np.random.randn(69 60 4).astype('float32') has_components=<true>)<line_sep>tmpfile=mktemp(suffix='.nii.gz')<line_sep>ants.image_write(img tmpfile)<line_sep>info=ants.image_header_info(tmpfile)<line_sep>self.assertEqual(info['dimensions'] img.shape)<line_sep>nptest.assert_allclose(info['direction'] img.direction)<line_sep>self.assertEqual(info['nComponents'] img.components)<line_sep>self.assertEqual(info['nDimensions'] img.dimension)<line_sep>self.assertEqual(info['origin'] img.origin)<line_sep>self.assertEqual(info['pixeltype'] img.pixeltype)<line_sep>self.assertEqual(info['pixelclass'] 'vector'<if>img.has_components<else>'scalar')<line_sep>self.assertEqual(info['spacing'] img.spacing)<line_sep>img=ants.from_numpy(np.random.randn(69 60 70 2).astype('float32') has_components=<true>)<line_sep>tmpfile=mktemp(suffix='.nii.gz')<line_sep>ants.image_write(img tmpfile)<line_sep>info=ants.image_header_info(tmpfile)<line_sep>self.assertEqual(info['dimensions'] img.shape)<line_sep>nptest.assert_allclose(info['direction'] img.direction)<line_sep>self.assertEqual(info['nComponents'] img.components)<line_sep>self.assertEqual(info['nDimensions'] img.dimension)<line_sep>self.assertEqual(info['origin'] img.origin)<line_sep>self.assertEqual(info['pixeltype'] img.pixeltype)<line_sep>self.assertEqual(info['pixelclass'] 'vector'<if>img.has_components<else>'scalar')<line_sep>self.assertEqual(info['spacing'] img.spacing)<line_sep># non-existant file <with_stmt>self.assertRaises(Exception)<block_start>tmpfile=mktemp(suffix='.nii.gz')<line_sep>ants.image_header_info(tmpfile)<block_end><block_end><def_stmt>test_image_clone self<block_start><for_stmt>img self.imgs<block_start>img=ants.image_clone(img 'unsigned char')<line_sep>orig_ptype=img.pixeltype<for_stmt>ptype self.pixeltypes<block_start>imgcloned=ants.image_clone(img ptype)<line_sep>self.assertTrue(ants.image_physical_space_consistency(img imgcloned))<line_sep>nptest.assert_allclose(img.numpy() imgcloned.numpy())<line_sep>self.assertEqual(imgcloned.pixeltype ptype)<line_sep>self.assertEqual(img.pixeltype orig_ptype)<block_end><block_end><for_stmt>img self.vecimgs<block_start>img=img.clone('unsigned char')<line_sep>orig_ptype=img.pixeltype<for_stmt>ptype self.pixeltypes<block_start>imgcloned=ants.image_clone(img ptype)<line_sep>self.assertTrue(ants.image_physical_space_consistency(img imgcloned))<line_sep>self.assertEqual(imgcloned.components img.components)<line_sep>nptest.assert_allclose(img.numpy() imgcloned.numpy())<line_sep>self.assertEqual(imgcloned.pixeltype ptype)<line_sep>self.assertEqual(img.pixeltype orig_ptype)<block_end><block_end><block_end><def_stmt>test_nibabel self<block_start>fn=ants.get_ants_data('mni')<line_sep>ants_img=ants.image_read(fn)<line_sep>nii_mni=nib.load(fn)<line_sep>ants_mni=ants_img.to_nibabel()<line_sep>self.assertTrue((ants_mni.get_qform()<eq>nii_mni.get_qform()).all())<line_sep>temp=ants.from_nibabel(nii_mni)<line_sep>self.assertTrue(ants.image_physical_space_consistency(ants_img temp))<block_end><def_stmt>test_image_read_write self# def image_read(filename, dimension=None, pixeltype='float'): # def image_write(image, filename): # test scalar images <block_start><for_stmt>img self.imgs<block_start>img=(img-img.min())/(img.max()-img.min())<line_sep>img=img<times>255.<line_sep>img=img.clone('unsigned char')<for_stmt>ptype self.pixeltypes<block_start>img=img.clone(ptype)<line_sep>tmpfile=mktemp(suffix='.nii.gz')<line_sep>ants.image_write(img tmpfile)<line_sep>img2=ants.image_read(tmpfile)<line_sep>self.assertTrue(ants.image_physical_space_consistency(img img2))<line_sep>self.assertEqual(img2.components img.components)<line_sep>nptest.assert_allclose(img.numpy() img2.numpy())<block_end># unsupported ptype <with_stmt>self.assertRaises(Exception)<block_start>ants.image_read(tmpfile pixeltype='not-suppoted-ptype')<block_end><block_end># test vector images <for_stmt>img self.vecimgs<block_start>img=(img-img.min())/(img.max()-img.min())<line_sep>img=img<times>255.<line_sep>img=img.clone('unsigned char')<for_stmt>ptype self.pixeltypes<block_start>img=img.clone(ptype)<line_sep>tmpfile=mktemp(suffix='.nii.gz')<line_sep>ants.image_write(img tmpfile)<line_sep>img2=ants.image_read(tmpfile)<line_sep>self.assertTrue(ants.image_physical_space_consistency(img img2))<line_sep>self.assertEqual(img2.components img.components)<line_sep>nptest.assert_allclose(img.numpy() img2.numpy())<block_end><block_end># test saving/loading as npy <for_stmt>img self.imgs<block_start>tmpfile=mktemp(suffix='.npy')<line_sep>ants.image_write(img tmpfile)<line_sep>img2=ants.image_read(tmpfile)<line_sep>self.assertTrue(ants.image_physical_space_consistency(img img2))<line_sep>self.assertEqual(img2.components img.components)<line_sep>nptest.assert_allclose(img.numpy() img2.numpy())<line_sep># with no json header arr=img.numpy()<line_sep>tmpfile=mktemp(suffix='.npy')<line_sep>np.save(tmpfile arr)<line_sep>img2=ants.image_read(tmpfile)<line_sep>nptest.assert_allclose(img.numpy() img2.numpy())<block_end># non-existant file <with_stmt>self.assertRaises(Exception)<block_start>tmpfile=mktemp(suffix='.nii.gz')<line_sep>ants.image_read(tmpfile)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>run_tests()<block_end>
<import_from_stmt>datetime datetime<line_sep>navigation_test_docs=[{'description':'Test User' 'extra':{} 'status_code':200 'user':'<EMAIL>' 'session_key':'14f8fb95aece47d8341dc561dfd108df' 'ip_address':'0.0.0.0' 'request_path':'/a/test-domain/reports/' 'view_kwargs':{'domain':'test-domain'} 'doc_type':'NavigationEventAudit' 'headers':{'REQUEST_METHOD':'GET' 'SERVER_PORT':'443' } 'base_type':'AuditEvent' 'user_agent':'Mozilla/5.0 (Windows NT 5.1)' 'event_date':'2021-06-01T00:13:01Z' 'view':'corehq.apps.reports.views.default'} {'description':'Test User' 'extra':{} 'status_code':200 'user':'<EMAIL>' 'session_key':'14f8fb95aece47d8341dc561dfd108df' 'ip_address':'0.0.0.0' 'request_path':'/a/test-domain/reports/' 'view_kwargs':{'domain':'test-domain'} 'doc_type':'NavigationEventAudit' 'headers':{'REQUEST_METHOD':'GET' 'SERVER_PORT':'443' } 'base_type':'AuditEvent' 'user_agent':'Mozilla/5.0 (Windows NT 5.1)' 'event_date':'2021-06-01T01:13:01Z' 'view':'corehq.apps.reports.views.default'} {'description':'Test User' 'extra':{} 'status_code':200 'user':'<EMAIL>' 'session_key':'14f8fb95aece47d8341dc561dfd108df' 'ip_address':'0.0.0.0' 'request_path':'/a/test-domain/reports/' 'view_kwargs':{'domain':'test-domain'} 'doc_type':'NavigationEventAudit' 'headers':{'SERVER_NAME':'www.commcarehq.org' 'HTTP_ACCEPT_LANGUAGE':'en-US,en;q=0.8' 'REQUEST_METHOD':'GET' 'HTTP_ACCEPT_ENCODING':'gzip,deflate,sdch'} 'base_type':'AuditEvent' 'user_agent':'Mozilla/5.0 (Windows NT 5.1)' 'event_date':'2021-06-01T00:01:00Z' 'view':'corehq.apps.reports.views.default'}]<line_sep>audit_test_docs=[{'http_accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' 'doc_type':'AccessAudit' 'description':'Login Success' 'get_data':[] 'access_type':'login' 'base_type':'AuditEvent' 'post_data':[] 'user_agent':'Mozilla/5.0 (Windows NT 6.1; WOW64)' 'failures_since_start':<none> 'event_date':'2021-06-15T04:23:32Z' 'path_info':'/accounts/login/' 'session_key':'sess_key' 'ip_address':'0.0.0.0' 'user':'<EMAIL>' 'headers':{'SERVER_NAME':'www.commcarehq.org' 'HTTP_ACCEPT_LANGUAGE':'en-US,en;q=0.8' 'REQUEST_METHOD':'GET' 'HTTP_ACCEPT_ENCODING':'gzip,deflate,sdch'} } {'access_type':'logout' 'ip_address':'0.0.0.0' 'session_key':'sess_key' 'user_agent':<none> 'get_data':[] 'post_data':[] 'http_accept':<none> 'path_info':<none> 'failures_since_start':<none> 'doc_type':'AccessAudit' 'user':'<EMAIL>' 'base_type':'AuditEvent' 'event_date':'2021-06-24T00:00:00.15Z' 'description':'Logout test' 'headers':{}}]<line_sep>failed_docs=[{'http_accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' 'doc_type':'AccessAudit' 'description':'Login Success' 'get_data':[] 'access_type':'login' 'base_type':'AuditEvent' 'post_data':[] 'user_agent':'Mozilla/5.0 (Windows NT 6.1; WOW64)' 'failures_since_start':<none> 'event_date':'2021-05-15T04:23:32Z' 'path_info':'/accounts/login/' 'session_key':'sess_key' 'ip_address':'0.0.0.0' 'user':'<EMAIL>' } {'description':'Test User' 'extra':{} 'status_code':200 'user':'<EMAIL>' 'session_key':'14f8fb95aece47d8341dc561dfd108df' 'ip_address':'0.0.0.0' 'request_path':'/a/test-domain/reports/' 'view_kwargs':{'domain':'test-domain'} 'doc_type':'NavigationEventAudit' 'headers':{'SERVER_NAME':'www.commcarehq.org' 'HTTP_ACCEPT_LANGUAGE':'en-US,en;q=0.8' 'REQUEST_METHOD':'GET' 'HTTP_ACCEPT_ENCODING':'gzip,deflate,sdch'} 'base_type':'AuditEvent' 'user_agent':'Mozilla/5.0 (Windows NT 5.1)' 'event_date':'2021-05-01T00:01:00Z' 'view':'corehq.apps.reports.views.default'}]<line_sep>task_docs=[{'doc_type':'NavigationEventAudit' 'user':'<EMAIL>' 'event_date':datetime(2021 1 1).strftime("%Y-%m-%dT%H:%M:%SZ") 'description':'User Name' 'extra':{} 'headers':{'REQUEST_METHOD':'GET' } 'ip_address':'10.1.2.3' 'request_path':'/a/delmar/phone/restore/?version=2.0&since=...' 'session_key':'abc123' 'status_code':200 'view_kwargs':{'domain':'delmar'} 'view':'corehq.apps.ota.views.restore' } {'doc_type':'NavigationEventAudit' 'user':'<EMAIL>' 'event_date':datetime(2021 2 1 2).strftime("%Y-%m-%dT%H:%M:%SZ") 'description':'User Name' 'extra':{} 'headers':{'REQUEST_METHOD':'GET' } 'ip_address':'10.1.2.3' 'request_path':'/a/test-space/phone/restore/?version=2.0&since=...' 'session_key':'abc123' 'status_code':200 'view_kwargs':{'domain':'test-space'} 'view':'corehq.apps.ota.views.restore' } {'doc_type':'NavigationEventAudit' 'user':'<EMAIL>' 'event_date':datetime(2021 2 1 2 1).strftime("%Y-%m-%dT%H:%M:%SZ") 'description':'User Name' 'extra':{} 'headers':{'REQUEST_METHOD':'GET' } 'ip_address':'10.1.2.3' 'request_path':'/a/random/phone/restore/?version=2.0&since=...' 'session_key':'abc123' 'status_code':200 'view_kwargs':{'domain':'random'} 'view':'corehq.apps.ota.views.restore' } {'doc_type':"AccessAudit" 'user':'<EMAIL>' 'event_date':datetime(2021 2 1 3).strftime("%Y-%m-%dT%H:%M:%SZ") 'access_type':'login' 'description':'Login Success' 'failures_since_start':<none> 'get_data':[] 'http_accept':'text/html' 'ip_address':'10.1.3.2' 'path_info':'/a/delmar/login/' 'post_data':[] 'session_key':'abc123' 'user_agent':'Mozilla/5.0' } {'doc_type':'NavigationEventAudit' 'user':'<EMAIL>' 'event_date':datetime(2021 2 2).strftime("%Y-%m-%dT%H:%M:%SZ") 'description':'User Name' 'extra':{} 'headers':{'REQUEST_METHOD':'GET' } 'ip_address':'10.1.2.3' 'request_path':'/a/sandwich/phone/restore/?version=2.0&since=...&db=/etc/passwd\x00' 'session_key':'abc123' 'status_code':200 'view_kwargs':{'domain':'sandwich'} 'view':'corehq.apps.ota.views.restore' }]<line_sep>
<import_stmt>unittest<import_stmt>json<import_stmt>os<import_from_stmt>memcnn.experiment.factory load_experiment_config experiment_config_parser<import_from_stmt>memcnn.config Config<import_stmt>memcnn.config<class_stmt>ConfigTestCase(unittest.TestCase)<block_start><class_stmt>ConfigTest(Config)<block_start>@staticmethod<def_stmt>get_filename <block_start><return>os.path.join(Config.get_dir() "config.json.example")<block_end><block_end><def_stmt>setUp self<block_start>self.config=ConfigTestCase.ConfigTest()<line_sep>self.config_fname=os.path.join(os.path.dirname(__file__) ".." "config.json.example")<line_sep>self.experiments_fname=os.path.join(os.path.dirname(__file__) ".." "experiments.json")<def_stmt>load_json_file fname<block_start><with_stmt>open(fname 'r')<as>f<block_start>data=json.load(f)<block_end><return>data<block_end>self.load_json_file=load_json_file<block_end><def_stmt>test_loading_main_config self<block_start>self.assertTrue(os.path.exists(self.config.get_filename()))<line_sep>data=self.config<line_sep>self.assertTrue(isinstance(data dict))<line_sep>self.assertTrue("data_dir"<in>data)<line_sep>self.assertTrue("results_dir"<in>data)<block_end><def_stmt>test_loading_experiments_config self<block_start>self.assertTrue(os.path.exists(self.experiments_fname))<line_sep>data=self.load_json_file(self.experiments_fname)<line_sep>self.assertTrue(isinstance(data dict))<block_end><def_stmt>test_experiment_configs self<block_start>data=self.load_json_file(self.experiments_fname)<line_sep>config=self.config<line_sep>keys=data.keys()<for_stmt>key keys<block_start>result=load_experiment_config(self.experiments_fname [key])<line_sep>self.assertTrue(isinstance(result dict))<if_stmt>"dataset"<in>result<block_start>experiment_config_parser(result config['data_dir'])<block_end><block_end><block_end><def_stmt>test_config_get_filename self<block_start>self.assertEqual(Config.get_filename() os.path.join(os.path.dirname(memcnn.config.__file__) "config.json"))<block_end><def_stmt>test_config_get_dir self<block_start>self.assertEqual(Config.get_dir() os.path.dirname(memcnn.config.__file__))<block_end><def_stmt>test_verbose self<block_start>ConfigTestCase.ConfigTest(verbose=<true>)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
""" Copyright (c) 2020 COTOBA DESIGN, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """<import_stmt>unittest<import_from_stmt>programy.security.authorise.usergroups User<import_from_stmt>programy.security.authorise.usergroups Group<import_from_stmt>programy.security.authorise.usergroups Authorisable<class_stmt>UserGroupTests(unittest.TestCase)<block_start><def_stmt>test_users self<block_start>user=User("keith")<line_sep>self.assertEqual("keith" user.userid)<line_sep>user.roles.append("admin1")<line_sep>self.assertTrue(user.has_role("admin1"))<line_sep>self.assertFalse(user.has_role("adminx"))<line_sep>group=Group("sysadmin")<line_sep>self.assertFalse(group.has_user("keith"))<line_sep>self.assertEqual([] user.groups)<line_sep>user.add_to_group(group)<line_sep>self.assertTrue(group.has_user("keith"))<line_sep>self.assertEqual([group] user.groups)<line_sep>user.add_to_group(group)<line_sep>self.assertTrue(group.has_user("keith"))<line_sep>self.assertEqual([group] user.groups)<block_end><def_stmt>test_groups self<block_start>group=Group("sysadmin")<line_sep>self.assertEqual("sysadmin" group.groupid)<line_sep>self.assertFalse(group.has_role("admin2"))<line_sep>group.roles.append("admin2")<line_sep>self.assertTrue(group.has_role("admin2"))<line_sep>self.assertEqual([] group.users)<line_sep>self.assertFalse(group.has_user("keith"))<line_sep>self.assertFalse(group.has_user("fred"))<line_sep>user=User("keith")<line_sep>group.add_user(user)<line_sep>self.assertEqual([user] group.users)<line_sep>self.assertTrue(group.has_user("keith"))<line_sep>self.assertFalse(group.has_user("fred"))<line_sep>group.add_user(user)<line_sep>self.assertEqual([user] group.users)<block_end><def_stmt>test_users_and_groups self<block_start>user1=User("keith")<line_sep>user1.roles.append("admin1")<line_sep>self.assertTrue(user1.has_role("admin1"))<line_sep>self.assertFalse(user1.has_role("adminx"))<line_sep>group1=Group("sysadmin")<line_sep>group1.roles.append("admin2")<line_sep>self.assertTrue(group1.has_role("admin2"))<line_sep>group2=Group("operations")<line_sep>group2.roles.append("audit")<line_sep>group1.groups.append(group2)<line_sep>user2=User("fred")<line_sep>user2.groups.append(group1)<line_sep>user2.roles.append("admin3")<line_sep>self.assertTrue(user2.has_group("sysadmin"))<line_sep>self.assertTrue(user2.has_role("admin2"))<line_sep>self.assertTrue(user2.has_role("admin3"))<line_sep>self.assertFalse(user2.has_role("adminx"))<block_end><def_stmt>test_authorisable self<block_start>authorisable=Authorisable("testid")<line_sep>self.assertEqual("testid" authorisable._id)<line_sep>self.assertEqual([] authorisable.roles)<line_sep>self.assertEqual([] authorisable.groups)<line_sep>self.assertEqual([] authorisable.available_roles())<line_sep>self.assertFalse(authorisable.has_role("user"))<line_sep>self.assertFalse(authorisable.has_role("admin"))<line_sep>self.assertFalse(authorisable.has_group("sysadmin"))<line_sep>self.assertEqual([] authorisable.roles)<line_sep>authorisable.add_role("user")<line_sep>self.assertEqual(['user'] authorisable.roles)<line_sep>authorisable.add_role("user")<line_sep>self.assertEqual(['user'] authorisable.roles)<line_sep>self.assertTrue(authorisable.has_role("user"))<line_sep>group=Group("sysadmin")<line_sep>group.roles.append("admin")<line_sep>self.assertEqual([] authorisable.groups)<line_sep>authorisable.add_group(group)<line_sep>self.assertEqual([group] authorisable.groups)<line_sep>authorisable.add_group(group)<line_sep>self.assertEqual([group] authorisable.groups)<line_sep>self.assertTrue(authorisable.has_group("sysadmin"))<line_sep>self.assertTrue(authorisable.has_role("admin"))<line_sep>self.assertEqual(['user' 'admin'] authorisable.available_roles())<line_sep>group2=Group("root")<line_sep>self.assertFalse(authorisable.has_group("root"))<line_sep>group.add_group(group2)<line_sep>self.assertTrue(authorisable.has_group("root"))<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>observations.util maybe_download_and_extract<def_stmt>morley path<block_start>"""Michelson Speed of Light Data A classical data of Michelson (but not this one with Morley) on measurements done in 1879 on the speed of light. The data consists of five experiments, each consisting of 20 consecutive ‘runs’. The response is the speed of light measurement, suitably coded (km/sec, with `299000` subtracted). A data frame with 100 observations on the following 3 variables. `Expt` The experiment number, from 1 to 5. `Run` The run number within each experiment. `Speed` Speed-of-light measurement. Details ~~~~~~~ The data is here viewed as a randomized block experiment with ‘experiment’ and ‘run’ as the factors. ‘run’ may also be considered a quantitative variate to account for linear (or polynomial) changes in the measurement over the course of a single experiment. <NAME> (1986) *A Genstat Primer*. London: <NAME>. <NAME> (1977) Do robust estimators work with real data? *Annals of Statistics* **5**, 1055–1098. (See Table 6.) <NAME> (1882) Experimental determination of the velocity of light made at the United States Naval Academy, Annapolis. *Astronomic Papers* **1** 135–8. U.S. Nautical Almanac Office. (See Table 24.) Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `morley.csv`. Returns: Tuple of np.ndarray `x_train` with 100 rows and 3 columns and dictionary `metadata` of column headers (feature names). """<import_stmt>pandas<as>pd<line_sep>path=os.path.expanduser(path)<line_sep>filename='morley.csv'<if_stmt><not>os.path.exists(os.path.join(path filename))<block_start>url='http://dustintran.com/data/r/datasets/morley.csv'<line_sep>maybe_download_and_extract(path url save_file_name='morley.csv' resume=<false>)<block_end>data=pd.read_csv(os.path.join(path filename) index_col=0 parse_dates=<true>)<line_sep>x_train=data.values<line_sep>metadata={'columns':data.columns}<line_sep><return>x_train metadata<block_end>
<import_from_stmt>django.test override_settings<import_from_stmt>..events event_document_type_changed event_document_viewed<import_from_stmt>..permissions permission_document_properties_edit permission_document_view <import_from_stmt>.base GenericDocumentViewTestCase<import_from_stmt>.mixins.document_mixins DocumentViewTestMixin<class_stmt>DocumentViewTestCase(DocumentViewTestMixin GenericDocumentViewTestCase)<block_start>auto_upload_test_document=<false><def_stmt>setUp self<block_start>super().setUp()<line_sep>self._create_test_document_stub()<block_end><def_stmt>test_document_properties_view_no_permission self<block_start>self._clear_events()<line_sep>response=self._request_test_document_properties_view()<line_sep>self.assertEqual(response.status_code 404)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_document_properties_view_with_access self<block_start>self.grant_access(obj=self.test_document permission=permission_document_view)<line_sep>self._clear_events()<line_sep>response=self._request_test_document_properties_view()<line_sep>self.assertContains(response=response status_code=200 text=self.test_document.label)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_trashed_document_properties_view_with_access self<block_start>self.grant_access(obj=self.test_document permission=permission_document_view)<line_sep>self.test_document.delete()<line_sep>self._clear_events()<line_sep>response=self._request_test_document_properties_view()<line_sep>self.assertEqual(response.status_code 404)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_document_properties_edit_get_view_no_permission self<block_start>self._clear_events()<line_sep>response=self._request_test_document_properties_edit_get_view()<line_sep>self.assertEqual(response.status_code 404)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_document_properties_edit_get_view_with_access self<block_start>self.grant_access(permission=permission_document_properties_edit obj=self.test_document_type)<line_sep>self._clear_events()<line_sep>response=self._request_test_document_properties_edit_get_view()<line_sep>self.assertEqual(response.status_code 200)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_trashed_document_properties_edit_get_view_with_access self<block_start>self.grant_access(permission=permission_document_properties_edit obj=self.test_document_type)<line_sep>self.test_document.delete()<line_sep>self._clear_events()<line_sep>response=self._request_test_document_properties_edit_get_view()<line_sep>self.assertEqual(response.status_code 404)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end>@override_settings(DOCUMENTS_LANGUAGE='fra')<def_stmt>test_document_properties_view_setting_non_us_language_with_access self<block_start>self.grant_access(obj=self.test_document permission=permission_document_view)<line_sep>self._clear_events()<line_sep>response=self._request_test_document_properties_view()<line_sep>self.assertContains(response=response status_code=200 text=self.test_document.label)<line_sep>self.assertContains(response=response status_code=200 text='Language:</label>\n \n \n English')<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end>@override_settings(DOCUMENTS_LANGUAGE='fra')<def_stmt>test_document_properties_edit_get_view_setting_non_us_language_with_access self<block_start>self.grant_access(permission=permission_document_properties_edit obj=self.test_document_type)<line_sep>self._clear_events()<line_sep>response=self._request_test_document_properties_edit_get_view()<line_sep>self.assertContains(response=response status_code=200 text='<option value="eng" selected>English</option>' )<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_document_list_view_no_permission self<block_start>self._clear_events()<line_sep>response=self._request_test_document_list_view()<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.context['object_list'].count() 0)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_document_list_view_with_access self<block_start>self.grant_access(obj=self.test_document permission=permission_document_view)<line_sep>self._clear_events()<line_sep>response=self._request_test_document_list_view()<line_sep>self.assertContains(response=response status_code=200 text=self.test_document.label)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_trashed_document_list_view_with_access self<block_start>self.grant_access(obj=self.test_document permission=permission_document_view)<line_sep>self.test_document.delete()<line_sep>self._clear_events()<line_sep>response=self._request_test_document_list_view()<line_sep>self.assertNotContains(response=response status_code=200 text=self.test_document.label)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_document_type_change_post_view_no_permission self<block_start>self._create_test_document_type()<line_sep>document_type=self.test_document.document_type<line_sep>self._clear_events()<line_sep>response=self._request_test_document_type_change_post_view()<line_sep>self.assertEqual(response.status_code 404)<line_sep>self.test_document.refresh_from_db()<line_sep>self.assertEqual(self.test_document.document_type document_type)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_document_type_change_post_view_with_access self<block_start>self._create_test_document_type()<line_sep>document_type=self.test_document.document_type<line_sep>self.grant_access(obj=self.test_document permission=permission_document_properties_edit)<line_sep>self._clear_events()<line_sep>response=self._request_test_document_type_change_post_view()<line_sep>self.assertEqual(response.status_code 302)<line_sep>self.test_document.refresh_from_db()<line_sep>self.assertNotEqual(self.test_document.document_type document_type)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 1)<line_sep>self.assertEqual(events[0].action_object self.test_document_types[1])<line_sep>self.assertEqual(events[0].actor self._test_case_user)<line_sep>self.assertEqual(events[0].target self.test_document)<line_sep>self.assertEqual(events[0].verb event_document_type_changed.id)<block_end><def_stmt>test_trashed_document_document_type_change_post_view_with_access self<block_start>self._create_test_document_type()<line_sep>document_type=self.test_document.document_type<line_sep>self.grant_access(obj=self.test_document permission=permission_document_properties_edit)<line_sep>self.test_document.delete()<line_sep>self._clear_events()<line_sep>response=self._request_test_document_type_change_post_view()<line_sep>self.assertEqual(response.status_code 404)<line_sep>self.test_document.refresh_from_db()<line_sep>self.assertEqual(self.test_document.document_type document_type)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_document_type_change_view_get_no_permission self<block_start>self._create_test_document_type()<line_sep>document_type=self.test_document.document_type<line_sep>self._clear_events()<line_sep>response=self._request_test_document_type_change_get_view()<line_sep>self.assertEqual(response.status_code 404)<line_sep>self.test_document.refresh_from_db()<line_sep>self.assertEqual(self.test_document.document_type document_type)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_document_type_change_view_get_with_access self<block_start>self.grant_access(obj=self.test_document permission=permission_document_properties_edit)<line_sep>self._clear_events()<line_sep>response=self._request_test_document_type_change_get_view()<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.test_document.refresh_from_db()<line_sep>self.assertEqual(self.test_document.document_type self.test_document_type)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_trashed_document_type_change_view_get_with_access self<block_start>self.grant_access(obj=self.test_document permission=permission_document_properties_edit)<line_sep>self._create_test_document_type()<line_sep>document_type=self.test_document.document_type<line_sep>self.test_document.delete()<line_sep>self._clear_events()<line_sep>response=self._request_test_document_type_change_get_view()<line_sep>self.assertEqual(response.status_code 404)<line_sep>self.test_document.refresh_from_db()<line_sep>self.assertEqual(self.test_document.document_type document_type)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_document_multiple_document_type_change_view_no_permission self<block_start>self._create_test_document_type()<line_sep>document_type=self.test_document.document_type<line_sep>self._clear_events()<line_sep>response=self._request_test_document_multiple_type_change()<line_sep>self.assertEqual(response.status_code 404)<line_sep>self.test_document.refresh_from_db()<line_sep>self.assertEqual(self.test_document.document_type document_type)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_document_multiple_document_type_change_view_with_permission self<block_start>self.grant_access(obj=self.test_document permission=permission_document_properties_edit)<line_sep>self._create_test_document_type()<line_sep>document_type=self.test_document.document_type<line_sep>self._clear_events()<line_sep>response=self._request_test_document_multiple_type_change()<line_sep>self.assertEqual(response.status_code 302)<line_sep>self.test_document.refresh_from_db()<line_sep>self.assertNotEqual(self.test_document.document_type document_type)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 1)<line_sep>self.assertEqual(events[0].action_object self.test_document_types[1])<line_sep>self.assertEqual(events[0].actor self._test_case_user)<line_sep>self.assertEqual(events[0].target self.test_document)<line_sep>self.assertEqual(events[0].verb event_document_type_changed.id)<block_end><def_stmt>test_document_preview_view_no_permission self<block_start>self._clear_events()<line_sep>response=self._request_test_document_preview_view()<line_sep>self.assertEqual(response.status_code 404)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><def_stmt>test_document_preview_view_with_access self<block_start>self.grant_access(obj=self.test_document permission=permission_document_view)<line_sep>self._clear_events()<line_sep>response=self._request_test_document_preview_view()<line_sep>self.assertContains(response=response status_code=200 text=self.test_document.label)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 1)<line_sep>self.assertEqual(events[0].action_object <none>)<line_sep>self.assertEqual(events[0].actor self._test_case_user)<line_sep>self.assertEqual(events[0].target self.test_document)<line_sep>self.assertEqual(events[0].verb event_document_viewed.id)<block_end><def_stmt>test_trashed_document_preview_view_with_access self<block_start>self.grant_access(obj=self.test_document permission=permission_document_view)<line_sep>self.test_document.delete()<line_sep>self._clear_events()<line_sep>response=self._request_test_document_preview_view()<line_sep>self.assertEqual(response.status_code 404)<line_sep>events=self._get_test_events()<line_sep>self.assertEqual(events.count() 0)<block_end><block_end>
<import_stmt>pytest<import_from_stmt>torch rand<import_from_stmt>torchtyping TensorType<import_from_stmt>typeguard typechecked<line_sep>x=y=<none><def_stmt>test_single <block_start>@typechecked<def_stmt>func1 x:TensorType["x"] y:TensorType["x"]<block_start><pass><block_end>@typechecked<def_stmt>func2 x:TensorType["x"] y:TensorType["x"]<arrow>TensorType["x"]<block_start><return>x+y<block_end>@typechecked<def_stmt>func3 x:TensorType["x"] y:TensorType["x"]<arrow>TensorType["x" "x"]<block_start><return>x+y<block_end>@typechecked<def_stmt>func4 x:TensorType["x"] y:TensorType["x"]<arrow>TensorType["x" "x"]<block_start><return>x.unsqueeze(0)+y.unsqueeze(1)<block_end>@typechecked<def_stmt>func5 x:TensorType["x"] y:TensorType["x"]<arrow>TensorType["x" "y"]<block_start><return>x<block_end>@typechecked<def_stmt>func6 x:TensorType["x"] y:TensorType["x"]<arrow>TensorType["y" "x"]<block_start><return>x<block_end>@typechecked<def_stmt>func7 x:TensorType["x"]<arrow>TensorType["x"]<block_start><assert_stmt>x.shape<ne>(1 )<line_sep><return>rand((1 ))<block_end>func1(rand(2) rand(2))<line_sep>func2(rand(2) rand(2))<with_stmt>pytest.raises(TypeError)<block_start>func3(rand(2) rand(2))<block_end>func4(rand(2) rand(2))<with_stmt>pytest.raises(TypeError)<block_start>func5(rand(2) rand(2))<block_end><with_stmt>pytest.raises(TypeError)<block_start>func6(rand(2) rand(2))<block_end><with_stmt>pytest.raises(TypeError)<block_start>func7(rand(3))<block_end><block_end><def_stmt>test_multiple # Fun fact, this "wrong" func0 is actually a mistype of func1, that torchtyping # caught for me when I ran the tests! <block_start>@typechecked<def_stmt>func0 x:TensorType["x"] y:TensorType["y"]<arrow>TensorType["x" "y"]<block_start><return>x.unsqueeze(0)+y.unsqueeze(1)<block_end>@typechecked<def_stmt>func1 x:TensorType["x"] y:TensorType["y"]<arrow>TensorType["x" "y"]<block_start><return>x.unsqueeze(1)+y.unsqueeze(0)<block_end>@typechecked<def_stmt>func2 x:TensorType["x" "x"]<block_start><pass><block_end>@typechecked<def_stmt>func3 x:TensorType["x" "x" "x"]<block_start><pass><block_end>@typechecked<def_stmt>func4 x:TensorType["x"] y:TensorType["x" "y"]<block_start><pass><block_end>@typechecked<def_stmt>func5 x:TensorType["x" "y"] y:TensorType["y" "x"]<block_start><pass><block_end>@typechecked<def_stmt>func6 x:TensorType["x"] y:TensorType["y"]<arrow>TensorType["x" "y"]<block_start><assert_stmt><not>(x.shape<eq>(2 )<and>y.shape<eq>(3 ))<line_sep><return>rand(2 3)<block_end>func0(rand(2) rand(2))# can't catch this <with_stmt>pytest.raises(TypeError)<block_start>func0(rand(2) rand(3))<block_end><with_stmt>pytest.raises(TypeError)<block_start>func0(rand(10) rand(0))<block_end>func1(rand(2) rand(2))<line_sep>func1(rand(2) rand(3))<line_sep>func1(rand(10) rand(0))<line_sep>func2(rand(0 0))<line_sep>func2(rand(2 2))<line_sep>func2(rand(9 9))<with_stmt>pytest.raises(TypeError)<block_start>func2(rand(0 4))<line_sep>func2(rand(1 4))<line_sep>func2(rand(3 4))<block_end>func3(rand(0 0 0))<line_sep>func3(rand(2 2 2))<line_sep>func3(rand(9 9 9))<with_stmt>pytest.raises(TypeError)<block_start>func3(rand(0 4 4))<line_sep>func3(rand(1 4 4))<line_sep>func3(rand(3 3 4))<block_end>func4(rand(3) rand(3 4))<with_stmt>pytest.raises(TypeError)<block_start>func4(rand(3) rand(4 3))<block_end>func5(rand(2 3) rand(3 2))<line_sep>func5(rand(0 5) rand(5 0))<line_sep>func5(rand(2 2) rand(2 2))<with_stmt>pytest.raises(TypeError)<block_start>func5(rand(2 3) rand(2 3))<line_sep>func5(rand(2 3) rand(2 2))<block_end><with_stmt>pytest.raises(TypeError)<block_start>func6(rand(5) rand(3))<block_end><block_end>
X,_=make_blobs(n_samples=800 centers=clusters n_features=3)<line_sep>
<import_stmt>torch<import_from_stmt>collections namedtuple<import_stmt>monet.lm_ops<as>lm_ops<import_stmt>numpy<as>np<import_from_stmt>monet.graph *<import_from_stmt>checkmate.checkmate_solver *<import_from_stmt>monet.solver_info *<import_from_stmt>monet.pipelined_solver_info *<import_from_stmt>models.unet UNet<line_sep>ScheduleType=namedtuple('ScheduleType' 'recompute store_output delete_nodes store_intermediate')<line_sep>KEEP_FWDOP=<false><class_stmt>Schedule(Graph)<block_start><def_stmt>__init__ self graph:Graph info:SolverInfo<block_start>self.si=info<line_sep>self._nodes=graph.nodes<line_sep>self.lennodes=len(self.nodes)<line_sep>self._outputs=graph._outputs<line_sep>self._op=[]# List of operations self.bs=-1<line_sep># Stored tensors self._stored=[<none><for>i range(self.lennodes)]<line_sep>self._stored_intermediate=[<none><for>i range(self.lennodes)]<line_sep>self._bwd_stored=[<none><for>i range(self.lennodes)]<line_sep># Parameters list self._args=[]<line_sep>self.args_updated=[]<line_sep># Preprocessing self.computeInstance=[]<for_stmt>k,n enumerate(self.nodes)<block_start><if_stmt>isinstance(n ComputeNode)<and><not>n.op<eq>"aten::t"<block_start>self.computeInstance.append(k)<block_end><block_end><block_end><def_stmt>init_schedule self solution:CheckmateSolution mode<block_start>T=len(self.si.nodes)<line_sep># Create main structures self._op=[<none><for>i range(self.lennodes)]# List of operations self._fwd_schedule=[[]<for>i range(T)]# Forward schedule self._bwd_schedule=[[]<for>i range(T)]# Backward schedule self.fwdargs=[<none><for>i range(self.lennodes)]# Index to forward node input tensor self.bwdargs=[<none><for>i range(self.lennodes)]# Index to backward node input tensors # Initialize forward pass structures <for_stmt>t range(T)<block_start><for_stmt>i,n enumerate(self.nodes)<block_start><if_stmt>isinstance(n ComputeNode)<and>n.op<ne>"aten::t"<block_start>j=self.si.graph_to_solver[i]<line_sep>ops_list=lm_ops.list_ops(self.si.mode n.op)<if_stmt>isinstance(self.si PipelinedSolverInfo)<and>self.si.nodes[j].has_intermediates<block_start>op=ops_list[-1]()# Select intermediate-computing and intermediate-activated operator implementation <block_end><else_stmt><block_start>op=ops_list[0]()# Select the default operator implementations <block_end><if_stmt>n.is_depthwise<block_start>op.is_depthwise=<true><block_end>s=solution.s[t+1][j]<if>t<l>T-1<else><false><line_sep>r=solution.r[t][j]<line_sep>f=solution.f[t][j]<line_sep>schedule_intermediate=<false><line_sep>storage=op.backward_storage<if_stmt><not>isinstance(storage list)<block_start>storage=[storage]<block_end><for_stmt>store storage<block_start><if_stmt>isinstance(store lm_ops.IntermediateStorage)<block_start>schedule_intermediate=<true><block_end><block_end><if_stmt>r<or>len(f)<or>s<block_start>self._fwd_schedule[t].append((i ScheduleType(r s f schedule_intermediate) n.op))<line_sep>self._op[i]=op<line_sep>self.fwdargs[i]=[(a.value <none>)<if>isinstance(a ComputeNode.V)<else>(a.index a.requires_grad)<for>a n.args]<block_end><block_end><elif_stmt>isinstance(n ComputeNode)<and>n.op<eq>"aten::t"<block_start><pass><block_end><else_stmt># Node represents a parameter <block_start>self._fwd_schedule[t].append((i <none> <none>))<line_sep>self._op[i]=<none><block_end><block_end><block_end># Initialize backward pass structures <for_stmt>k,m reversed(list(enumerate(self.nodes)))# Create backward dependencies <block_start><if_stmt>isinstance(m ComputeNode)<and>m.op<ne>"aten::t"<block_start>j=self.si.fwd_to_bwd[self.si.graph_to_solver[k]]<line_sep>n=self.si.nodes[j]<assert_stmt>isinstance(n BwdNode)<line_sep>self.bwdargs[k]={'param':[] 'ip':[]}<line_sep>storage_list=self._op[k].backward_storage<if_stmt><not>isinstance(storage_list list)<block_start>storage_list=[storage_list]<block_end><for_stmt>storage storage_list<block_start><if_stmt>isinstance(storage lm_ops.InputStorage)<block_start><for_stmt>posi,i enumerate(storage.ids)<block_start>idx=m.args[i].index<if_stmt>(((m.op<eq>"aten::_convolution"<and><not>m.is_depthwise)<or>m.op<eq>"aten::addmm")<and>n.bwd_op<eq>"ip_grad")<block_start>self.bwdargs[k]['param'].append((idx <true> <false>))<if_stmt>posi<eq>0<block_start>self.bwdargs[k]['ip'].append((idx <false> <false>))# Input tensor for conv/addmm ip grad need not be stored <block_end><else_stmt><block_start>self.bwdargs[k]['ip'].append((idx <true> <false>))<block_end><block_end><else_stmt><block_start>self.bwdargs[k]['ip'].append((idx <true> <false>))<block_end><block_end><block_end><elif_stmt>isinstance(storage lm_ops.OutputStorage)<block_start>self.bwdargs[k]['ip'].append((k <true> <false>))<block_end><elif_stmt>isinstance(storage lm_ops.IntermediateStorage)<block_start>self.bwdargs[k]['ip'].append((k <true> <true>))<block_end><block_end><block_end># Create backward schedule <for_stmt>t range(T)<block_start><if_stmt>isinstance(m ComputeNode)<and>m.op<ne>"aten::t"<block_start>j=self.si.fwd_to_bwd[self.si.graph_to_solver[k]]<line_sep>n=self.si.nodes[j]<assert_stmt>isinstance(n BwdNode)<line_sep>s=solution.s[t+1][j]<if>t<l>T-1<else><false><line_sep>r=solution.r[t][j]<line_sep>f=solution.f[t][j]<if_stmt>(((m.op<eq>"aten::_convolution"<and><not>m.is_depthwise)<or>m.op<eq>"aten::addmm")<and>n.bwd_op<eq>"ip_grad")<block_start>s1=solution.s[t+1][j-1]<if>t<l>T-1<else><false><if_stmt>solution.r[t][j-1]<or>len(solution.f[t][j-1])<or>s1<block_start>self._bwd_schedule[t].append((k ScheduleType(solution.r[t][j-1] s1 solution.f[t][j-1] <false>) "param"))<block_end><block_end><if_stmt>r<or>len(f)<or>s<block_start>self._bwd_schedule[t].append((k ScheduleType(r s f <false>) "ip"))<block_end><block_end><elif_stmt>isinstance(m ComputeNode)<and>m.op<eq>"aten::t"<block_start><pass><block_end><else_stmt><block_start>self._bwd_schedule[t].append((k <none> "grad"))<block_end><block_end><block_end>self.opshapes=defaultdict()<for_stmt>k self._outputs<block_start>self.opshapes[k]=[self.bs<if>dim<eq>-1<else>dim<for>dim self._nodes[k].shape]<block_end><block_end><def_stmt>_forward self t<block_start>tensors=self._stored<line_sep>bw_tensors=self._bwd_stored<line_sep>self._stored=[<none>]<times>self.lennodes<line_sep>self._bwd_stored=[<none>]<times>self.lennodes<if_stmt>len(self._fwd_schedule[t])<block_start><for_stmt>(k schedule op_name) self._fwd_schedule[t]<block_start><if_stmt>schedule<eq><none><block_start>tensors[k]=self._args[k]<block_end><else_stmt><block_start>recompute,s,f,si=schedule<if_stmt>recompute<block_start>args=[a<if>b<eq><none><else>tensors[a].requires_grad_(b)<for>(a b) self.fwdargs[k]]<line_sep># Checkmate does not reuse params for BN <if_stmt>op_name<eq>"aten::batch_norm"<block_start>self._op[k].params=<none><block_end><if_stmt>si<block_start>tensors[k],self._stored_intermediates[k]=self._op[k].forward(*args)<block_end><else_stmt><block_start>tensors[k]=self._op[k].forward(*args)<block_end><assert_stmt>tensors[k]<is><not><none><del_stmt>args<block_end><for_stmt>u f<block_start><assert_stmt>u<l>self.si.loss<line_sep>graphu=self.si.solver_to_graph[u]<line_sep>tensors[graphu]=<none><block_end><if_stmt>s<block_start>self._stored[k]=tensors[k]<block_end><block_end><block_end><block_end><if_stmt>len(self._bwd_schedule[t])<block_start><for_stmt>(k schedule optype) self._bwd_schedule[t]<block_start><if_stmt>schedule<eq><none><block_start><if_stmt>bw_tensors[k]<is><not><none><and>k<not><in>self.args_updated<and>self._args[k].requires_grad<block_start><assert_stmt>len(bw_tensors[k])<eq>1<for_stmt>u bw_tensors[k]<block_start>self._args[k].backward(bw_tensors[k][u])<block_end>bw_tensors[k]=<none><line_sep># self._bwd_stored[k] = None self.args_updated.append(k)<block_end><block_end><else_stmt><block_start>recompute,s,f,si=schedule<if_stmt>recompute<block_start><for_stmt>(idx checkNone intmd) self.bwdargs[k][optype]<block_start><if_stmt>checkNone<block_start><if_stmt>intmd<block_start><assert_stmt>self._stored_intermediate[idx]<is><not><none><block_end><else_stmt><block_start><assert_stmt>tensors[idx]<is><not><none><block_end><block_end><block_end>stored=[tensors[idx]<if><not>intmd<else>self._stored_intermediates[idx]<for>(idx _ intmd) self.bwdargs[k][optype]]<line_sep>grad_nd=self._op[k]<line_sep>m=self.nodes[k]<if_stmt>((m.op<eq>"aten::_convolution"<and><not>m.is_depthwise)<or>m.op<eq>"aten::addmm")<block_start><if_stmt>optype<eq>"param"<block_start>grad_nd.algorithm=0<block_end><elif_stmt>optype<eq>"ip"<block_start>grad_nd.algorithm=10<block_end><block_end># Call backward bwd_in=<none><if_stmt>k<in>self._outputs<block_start>s=[val<if>val<g>0<else>self.bs<for>val list(self.opshapes[k])]<line_sep>bw_tensors[k]={-1:torch.ones(s device=self._args[0].device)}<block_end><with_stmt>torch.no_grad()<block_start><assert_stmt>bw_tensors[k]<is><not><none> "k, t: %d %d "%(k t)<for_stmt>u bw_tensors[k]<block_start><assert_stmt>bw_tensors[k][u]<is><not><none> "k, u, t: %d %d %s %d "%(k u self.si.nodes[self.si.graph_to_solver[k]] t)<if_stmt>bwd_in<eq><none><block_start>bwd_in=bw_tensors[k][u]<block_end><else_stmt><block_start>bwd_in<augadd>bw_tensors[k][u]<block_end><block_end><assert_stmt>bwd_in<is><not><none><block_end>bw_outs=grad_nd.backward(bwd_in stored)<del_stmt>bwd_in<if_stmt><not>isinstance(bw_outs (list tuple))<block_start>bw_outs=(bw_outs )<block_end><assert_stmt>len(bw_outs)<eq>len(self.nodes[k].dependencies) "Require the same number of grad outputs as forward inputs"<concat>" %s (%d) , %s (%d)"%(repr(bw_outs) len(bw_outs) repr(self.nodes[k].dependencies) len(self.nodes[k].dependencies))<line_sep># Accumulate the backward gradient <for_stmt>(i r),o zip(self.nodes[k].dependencies bw_outs)<block_start><if_stmt>r<block_start><if_stmt>o<is><not><none><block_start><if_stmt>bw_tensors[i]<is><none><block_start>bw_tensors[i]={k:o}<block_end><else_stmt><block_start>bw_tensors[i][k]=o<block_end><block_end><block_end><block_end><del_stmt>grad_nd bw_outs o<block_end><for_stmt>u f<block_start><if_stmt>u<l>self.si.loss<block_start>graphu=self.si.solver_to_graph[u]<line_sep>tensors[graphu]=<none><block_end><elif_stmt>u<eq>self.si.loss<block_start><pass><line_sep># Do not delete loss nodes <block_end><else_stmt><block_start>graphu=self.si.solver_to_graph[self.si.bwd_to_fwd[u]]<line_sep>unode=self.si.nodes[self.si.bwd_to_fwd[u]].gnode<for_stmt>(i r) unode.dependencies<block_start><if_stmt>isinstance(self.nodes[i] ComputeNode)<block_start>bw_tensors[i][graphu]=<none><block_end><block_end><block_end><block_end><if_stmt>s<block_start><for_stmt>(i r) self.nodes[k].dependencies<block_start><if_stmt>r<block_start><if_stmt>isinstance(self._nodes[i] ComputeNode)<block_start><if_stmt>optype<ne>"param"<block_start><assert_stmt>bw_tensors[i]<is><not><none><assert_stmt>bw_tensors[i][k]<is><not><none> "%d (%s) should have bwd input from %d (%s)"%(self.si.graph_to_solver[i] self.si.nodes[self.si.graph_to_solver[i]] self.si.graph_to_solver[k] self.si.nodes[self.si.graph_to_solver[k]])<if_stmt>self._bwd_stored[i]<is><none><block_start>self._bwd_stored[i]={k:bw_tensors[i][k]}<block_end><else_stmt><block_start>self._bwd_stored[i][k]=bw_tensors[i][k]<block_end><block_end><block_end><block_end><block_end><block_end><block_end><block_end><block_end><del_stmt>tensors bw_tensors<block_end><def_stmt>forward self *args<block_start>self._args=args<line_sep>T=self.si.size<line_sep>fwd_output=<none><for_stmt>t range(T)<block_start>self._forward(t)<block_end><for_stmt>k,n enumerate(self._nodes)<block_start><if_stmt>k<in>self.computeInstance<and>n.op<eq>"aten::batch_norm"<block_start>self._op[k].params=<none><block_end><block_end>self.args_updated=[]<line_sep><return>fwd_output<block_end><block_end><def_stmt>disable_dropout model<block_start><for_stmt>m model.modules()<block_start><if_stmt>isinstance(m torch.nn.Dropout)<block_start>m.p=0.0<block_end><block_end><block_end><def_stmt>load_solution filename<block_start><import_stmt>pickle<line_sep>print(f'Loading solver_info, solution from {filename}')<with_stmt>open(filename 'rb')<as>f<block_start>si,solution=pickle.load(f)<block_end><return>si solution<block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>argparse<import_stmt>torchvision<import_from_stmt>time time<import_from_stmt>pathlib Path<import_from_stmt>monet.cvxpy_solver Solution<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('model')<line_sep>parser.add_argument('bs')<line_sep>parser.add_argument('budget')<line_sep>parser.add_argument('mode')<line_sep>parser.add_argument('solver')<line_sep>parser.add_argument("--solution_file" type=str default="" help="If specified, load stored solution file.")<line_sep>parser.add_argument("--check_diff" action="store_true" help="Compute the output (gradient) difference between ours and normal model.")<line_sep>parser.add_argument("--check_runtime" action="store_true" help="Compute the runtime difference between ours and normal model.")<line_sep>parser.add_argument("--run_bs" action="store_true" help="Run the given batch size.")<line_sep>parser.add_argument("--pipeline" action="store_true" help="Pipeline the operator optimization followed by checkpointing")<line_sep>parser.add_argument("--ablation" action="store_true" help="Do ablation?.")<line_sep>args=parser.parse_args()<line_sep>budget=float(args.budget)<import_stmt>config<line_sep>config.budget=budget<line_sep>bs=int(args.bs)<line_sep>model_name=args.model.split(".")[-1][:-2]<line_sep>mode=args.mode<line_sep>print("Memory budget " budget " GB")<line_sep>print("Batch size " bs)<line_sep>print("Model" model_name)<line_sep>print("Mode" mode)<if_stmt>args.model<eq>'unet'<block_start>height,width=416 608<line_sep>model=UNet(n_channels=3 n_classes=1 height=height width=width)<block_end><else_stmt><block_start>height,width=224 224<line_sep>model=eval(args.model {'torch':torch 'torchvision':torchvision})<block_end><if_stmt>'mobilenet_v2'<in>args.model<block_start>model=torch.nn.Sequential(model.features torch.nn.AdaptiveAvgPool2d((1 1)) torch.nn.Flatten(start_dim=1) model.classifier[0] model.classifier[1])<block_end><if_stmt>args.check_diff<block_start>disable_dropout(model)<block_end>graph=Graph.create(model input_shape=(3 height width))<line_sep>model.cuda()<line_sep>solvert=-1<if_stmt>args.check_diff<block_start>input_=torch.randn((bs 3 height width)).cuda()<if_stmt>len(args.solution_file)<g>0<block_start>solver_info,solution=load_solution(args.solution_file)<block_end><else_stmt><block_start>solver_info=SolverInfo(bs=bs model_name=model_name mode=mode)<line_sep>solver_info.extract(graph input_ *list(model.state_dict(keep_vars=<true>).values()))<line_sep>solution=solve_ilp_gurobi(solver_info budget approx=<false> time_limit=86400)<block_end>schedule=Schedule(graph solver_info)<line_sep>schedule.init_schedule(solution mode)<line_sep>x0=model(input_)<if_stmt>'googlenet'<in>args.model<block_start>(-(x0[0]+x0[1]+x0[2])).sum().backward()<block_end><else_stmt><block_start>(-x0).sum().backward()<block_end>KEEP_FWDOP=<true><line_sep>x1=schedule.forward(input_ *list(model.state_dict(keep_vars=<true>).values()))<line_sep>print('Forward mean absolute difference' abs(x0[0]-x1).mean()<if>'googlenet'<in>args.model<else>abs(x0-x1).mean())<line_sep>print('original output' x0)<line_sep>print('ours output' x1)<line_sep>print('Gradient of normal model' ['{:.5f} {}'.format(float(v.grad.mean()) v.shape)<for>v model.parameters()<if>v.grad<is><not><none>])<block_end><if_stmt>args.check_runtime<block_start>FORWARD_EMPTY_CACHE=<false><if_stmt>len(args.solution_file)<g>0<block_start>solver_info,solution=load_solution(args.solution_file)<block_end><else_stmt><block_start>input_=torch.randn((bs 3 height width)).cuda()<if_stmt>args.pipeline<block_start>solver_info=PipelinedSolverInfo(bs=bs model_name=model_name mode=mode)<block_end><else_stmt><block_start>solver_info=SolverInfo(bs=bs model_name=model_name mode=mode)<block_end>solver_info.extract(graph input_ *list(model.state_dict(keep_vars=<true>).values()))<line_sep>solution=solve_ilp_gurobi(solver_info budget approx=<false> time_limit=86400)<line_sep># t0 = time() # solution = solver_model.solve() # solvert = time() - t0 <del_stmt>input_<block_end>input_=torch.randn((bs 3 height width)).cuda()<line_sep>torch.cuda.reset_max_memory_allocated()<line_sep>torch.cuda.synchronize()<line_sep>torch.cuda.empty_cache()<line_sep>torch.cuda.reset_max_memory_allocated()<line_sep>schedule=Schedule(graph solver_info)<line_sep>schedule.bs=bs<line_sep>schedule.init_schedule(solution mode)<line_sep>torch.cuda.synchronize()<line_sep>start_event_monet=torch.cuda.Event(enable_timing=<true>)<line_sep>end_event_monet=torch.cuda.Event(enable_timing=<true>)<for_stmt>iterid range(120)<block_start><if_stmt>iterid<eq>100<block_start>start_event_monet.record()<block_end>x1=schedule.forward(input_ *list(model.state_dict(keep_vars=<true>).values()))<for_stmt>v model.parameters()<block_start>v.grad=<none><block_end><block_end>end_event_monet.record()<line_sep>torch.cuda.synchronize()<del_stmt>x1<line_sep>autosave_maxmem=torch.cuda.max_memory_allocated()/2<power>20<line_sep>print("checkmate: %f ms avg, %8.2f MB"%(start_event_monet.elapsed_time(end_event_monet)/20 autosave_maxmem))<line_sep>exit()<block_end>solvert=-1<if_stmt>args.run_bs<block_start>bs=int(args.bs)<line_sep>print("Solver trying batch size %d"%bs)<if_stmt>len(args.solution_file)<g>0<block_start>solver_info,solution=load_solution(args.solution_file)<block_end><else_stmt><block_start>input_=torch.randn((bs 3 height width)).cuda()<if_stmt>args.pipeline<block_start>solver_info=PipelinedSolverInfo(bs=bs model_name=model_name mode=mode)<block_end><else_stmt><block_start>solver_info=SolverInfo(bs=bs model_name=model_name mode=mode)<block_end>solver_info.extract(graph input_ *list(model.state_dict(keep_vars=<true>).values()))<line_sep>solution=solve_ilp_gurobi(solver_info budget approx=<false> time_limit=86400)<del_stmt>input_<block_end>print("Batch size %d feasible"%bs)<line_sep>print("Solved in %fs with actual opt taking %fs"%(solvert solution.solve_time))<line_sep>print("Running schedule for batch size %d"%bs)<line_sep>torch.cuda.empty_cache()<line_sep>torch.cuda.reset_max_memory_allocated()<line_sep>input_=torch.randn((bs 3 height width)).cuda()<line_sep>schedule=Schedule(graph solver_info)<line_sep>schedule.bs=bs<line_sep>schedule.init_schedule(solution mode)<line_sep>t0=time()<line_sep>x1=schedule.forward(input_ *list(model.state_dict(keep_vars=<true>).values()))<del_stmt>input_<del_stmt>x1<line_sep>torch.cuda.synchronize()<line_sep>t1=time()-t0<line_sep>print("Ran schedule for batch %d "%bs)<line_sep>torch.cuda.empty_cache()<line_sep>mem=torch.cuda.max_memory_allocated()/2<power>20<line_sep>print("Ran batch %d with peak memory %8.2fM, %fs"%(bs mem t1))<block_end><block_end>
<import_stmt>odrive<import_from_stmt>odrive.enums *<import_from_stmt>odrive.utils *<line_sep>print("finding an odrive...")<line_sep>odrv0=odrive.find_any()<line_sep>print('Odrive found')<line_sep>odrv0.axis1.controller.config.vel_limit=50000<line_sep>odrv0.axis1.controller.config.control_mode=CONTROL_MODE_POSITION_CONTROL<line_sep>odrv0.axis1.controller.config.input_mode=INPUT_MODE_PASSTHROUGH<line_sep>odrv0.axis1.encoder.config.cpr=2400<line_sep>odrv0.axis1.encoder.config.bandwidth=1000<line_sep>odrv0.axis1.motor.config.calibration_current=5<line_sep>odrv0.axis1.motor.config.current_lim=5<line_sep>odrv0.axis1.controller.config.homing_speed=5000<line_sep>odrv0.config.brake_resistance=0<line_sep>odrv0.axis0.min_endstop.config.gpio_num=6<line_sep>odrv0.axis0.min_endstop.config.enabled=<true><line_sep>odrv0.axis0.min_endstop.config.offset=-1000<line_sep>odrv0.axis0.max_endstop.config.gpio_num=5<line_sep>odrv0.axis0.max_endstop.config.enabled=<true><line_sep>odrv0.axis1.min_endstop.config.gpio_num=8<line_sep>odrv0.axis1.min_endstop.config.enabled=<true><line_sep>odrv0.axis1.min_endstop.config.offset=-1000<line_sep>odrv0.axis1.max_endstop.config.gpio_num=7<line_sep>odrv0.axis1.max_endstop.config.enabled=<true><line_sep>odrv0.axis1.config.startup_encoder_offset_calibration=<true><line_sep>odrv0.axis1.config.startup_motor_calibration=<true><line_sep>odrv0.axis1.config.startup_homing=<true><line_sep>odrv0.axis1.config.startup_closed_loop_control=<true><line_sep>
''' Z80 architecture base... '''<line_sep>
# Before executing the script, we need urllib3. Run `pip install urllib3` # A simple script for automatically download the ihme-covid19.zip file and extract it <import_stmt>requests<import_stmt>os<import_stmt>sys<import_stmt>calendar<import_stmt>datetime<def_stmt>download_file_by_date path date# metadata <block_start>prefix="https://raw.githubusercontent.com/shaman-lab/COVID-19Projection/master/Projection_"<line_sep>suffix="/cdc_hosp/state_cdchosp_"<line_sep>raw_list=["60contact.csv" "70contact.csv" "80contact.csv" "nointerv.csv" "80contact_1x.csv" "80contactw.csv"]<line_sep># Check all urls if there's new data on specific date working_urls=[]<for_stmt>raw_file raw_list<block_start>url=prefix+date+suffix+raw_file<line_sep>response=requests.get(url)<line_sep>working_urls.append(response.status_code)<block_end># Download data savepath=os.path.join(path "Projection_"+date+"/cdc_hosp/")<if_stmt>200<in>working_urls<block_start><if_stmt><not>os.path.exists(savepath)<block_start>os.makedirs(savepath)<block_end><for_stmt>raw_file raw_list<block_start>url=prefix+date+suffix+raw_file<line_sep>response=requests.get(url)<if_stmt>response.status_code<eq>200<block_start><with_stmt>open(os.path.join(savepath "state_cdchosp_"+raw_file) "wb")<as>writer<block_start><for_stmt>chunk response<block_start>writer.write(chunk)<block_end>writer.close()<block_end><block_end><block_end><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>download_recent_CU_data path# Because the script is run at 1pm everyday, it may miss forecasts that # are uploaded after 1pm if the script only look at the current day. We # set it up to also look at the day before <block_start>today=datetime.datetime.today()-datetime.timedelta(days=2)<line_sep>today_date_v1=calendar.month_name[today.month]+today.strftime('%d')<line_sep>today_date_v2=calendar.month_name[today.month]+today.strftime('%d').strip('0')<line_sep>yesterday=datetime.datetime.today()-datetime.timedelta(days=1)<line_sep>yesterday_date_v1=calendar.month_name[yesterday.month]+yesterday.strftime('%d')<line_sep>yesterday_date_v2=calendar.month_name[yesterday.month]+yesterday.strftime('%d').strip('0')<line_sep># Check for different combination of new data from yesterday (Example: May3 vs May03) download_with_yesterday_v1_is_successful=download_file_by_date(path yesterday_date_v1)<line_sep>download_with_yesterday_v2_is_successful=download_file_by_date(path yesterday_date_v2)<if_stmt>(download_with_yesterday_v1_is_successful<or>download_with_yesterday_v2_is_successful)<block_start>print('There is new data from CU on '+yesterday_date_v1)<block_end><else_stmt><block_start>print('There is no new data from CU on '+yesterday_date_v1)<block_end># Check for different combination of new data from today (Example: May4 vs May04) download_with_today_v1_is_successful=download_file_by_date(path today_date_v1)<line_sep>download_with_today_v2_is_successful=download_file_by_date(path today_date_v2)<if_stmt>(download_with_today_v1_is_successful<or>download_with_today_v2_is_successful)<block_start>print('There is new data from CU on '+today_date_v1)<block_end><else_stmt><block_start>print('There is no new data from CU on '+today_date_v1)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>path=sys.argv[1]<line_sep>download_recent_CU_data(path)<block_end>
# -*- coding: utf-8 -*- """ Vincent Stacked Area Examples """<line_sep>#Build a Stacked Area Chart from scratch <import_from_stmt>vincent *<import_stmt>pandas<as>pd<import_stmt>pandas.io.data<as>web<line_sep>all_data={}<for_stmt>ticker ['AAPL' 'GOOG' 'IBM' 'YHOO' 'MSFT']<block_start>all_data[ticker]=web.get_data_yahoo(ticker '1/1/2010' '1/1/2013')<block_end>price=pd.DataFrame({tic:data['Adj Close']<for>tic,data all_data.items()})<line_sep>vis=Visualization(width=500 height=300)<line_sep>vis.padding={'top':10 'left':50 'bottom':50 'right':100}<line_sep>data=Data.from_pandas(price)<line_sep>vis.data['table']=data<line_sep>facets=Transform(type='facet' keys=['data.idx'])<line_sep>stats=Transform(type='stats' value='data.val')<line_sep>stat_dat=Data(name='stats' source='table' transform=[facets stats])<line_sep>vis.data['stats']=stat_dat<line_sep>vis.scales['x']=Scale(name='x' type='time' range='width' domain=DataRef(data='table' field="data.idx"))<line_sep>vis.scales['y']=Scale(name='y' range='height' type='linear' nice=<true> domain=DataRef(data='stats' field="sum"))<line_sep>vis.scales['color']=Scale(name='color' type='ordinal' domain=DataRef(data='table' field='data.col') range='category20')<line_sep>vis.axes.extend([Axis(type='x' scale='x') Axis(type='y' scale='y')])<line_sep>facet=Transform(type='facet' keys=['data.col'])<line_sep>stack=Transform(type='stack' point='data.idx' height='data.val')<line_sep>transform=MarkRef(data='table' transform=[facet stack])<line_sep>enter_props=PropertySet(x=ValueRef(scale='x' field="data.idx") y=ValueRef(scale='y' field="y") interpolate=ValueRef(value='monotone') y2=ValueRef(field='y2' scale='y') fill=ValueRef(scale='color' field='data.col'))<line_sep>mark=Mark(type='group' from_=transform marks=[Mark(type='area' properties=MarkProperties(enter=enter_props))])<line_sep>vis.marks.append(mark)<line_sep>vis.axis_titles(x='Date' y='Price')<line_sep>vis.legend(title='Tech Stocks')<line_sep>vis.to_json('vega.json')<line_sep>#Convenience method vis=StackedArea(price)<line_sep>vis.axis_titles(x='Date' y='Price')<line_sep>vis.legend(title='Tech Stocks')<line_sep>vis.colors(brew='Paired')<line_sep>vis.to_json('vega.json')<line_sep>
"""parameter_search.py Search for optimal parameters for RIDDLE and various ML classifiers. Requires: Keras, NumPy, scikit-learn, RIDDLE (and their dependencies) Author: <NAME>, Rzhetsky Lab Copyright: 2018, all rights reserved """<import_from_future_stmt> print_function<import_stmt>argparse<import_stmt>os<import_stmt>pickle<import_stmt>time<import_stmt>warnings<import_stmt>numpy<as>np<import_from_stmt>sklearn.metrics log_loss<import_from_stmt>sklearn.model_selection RandomizedSearchCV<import_from_stmt>riddle emr<import_from_stmt>riddle tuning<import_from_stmt>riddle.models MLP<import_from_stmt>utils get_param_path<import_from_stmt>utils get_preprocessed_data<import_from_stmt>utils recursive_mkdir<import_from_stmt>utils select_features<import_from_stmt>utils subset_reencode_features<import_from_stmt>utils vectorize_features<line_sep>SEED=109971161161043253%8085<line_sep>TUNING_K=3# number of partitions to use to evaluate a parameter config parser=argparse.ArgumentParser(description='Perform parameter search for various classification methods.')<line_sep>parser.add_argument('--method' type=str default='riddle' help='Classification method to use.')<line_sep>parser.add_argument('--data_fn' type=str default='dummy.txt' help='Filename of text data file.')<line_sep>parser.add_argument('--prop_missing' type=float default=0.0 help='Proportion of feature observations to simulate as missing.')<line_sep>parser.add_argument('--max_num_feature' type=int default=-1 help='Maximum number of features to use; with the default of -1, use all'<concat>'available features')<line_sep>parser.add_argument('--feature_selection' type=str default='random' help='Method to use for feature selection.')<line_sep>parser.add_argument('--force_run' type=bool default=<false> help='Whether to force parameter search to run even if it has been already'<concat>'performed.')<line_sep>parser.add_argument('--max_num_sample' type=int default=10000 help='Maximum number of samples to use during parameter tuning.')<line_sep>parser.add_argument('--num_search' type=int default=5 help='Number of parameter settings (searches) to try.')<line_sep>parser.add_argument('--data_dir' type=str default='_data' help='Directory of data files.')<line_sep>parser.add_argument('--cache_dir' type=str default='_cache' help='Directory where to cache files and outputs.')<def_stmt>loss_scorer estimator x y<block_start>"""Negative log loss scoring function for scikit-learn model selection."""<line_sep>loss=log_loss(y estimator.predict_proba(x))<assert_stmt>loss<ge>0<line_sep># we want to minimize loss; since scikit-learn model selection tries to # maximize a given score, return the negative of the loss <return>-1<times>loss<block_end><def_stmt>run method x_unvec y idx_feat_dict num_feature max_num_feature num_class max_num_sample feature_selection k_idx k num_search perm_indices<block_start>"""Run a parameter search for a single k-fold partitions Arguments: method: string name of classification method; values = {'logit', 'random_forest', 'linear_svm', 'poly_svm', 'rbf_svm', 'gbdt', 'riddle'} x_unvec: [[int]] feature indices that have not been vectorized; each inner list collects the indices of features that are present (binary on) for a sample y: [int] list of class labels as integer indices idx_feat_dict: {int: string} dictionary mapping feature indices to features num_feature: int number of features present in the dataset max_num_feature: int maximum number of features to use num_class: int number of classes present feature_selection: string feature selection method; values = {'random', 'frequency', 'chi2'} k_idx: int index of the k-fold partition to use k: int number of partitions for k-fold cross-validation num_search: int number of searches (parameter configurations) to try perm_indices: np.ndarray, int array of indices representing a permutation of the samples with shape (num_sample, ) Returns: best_param: {string: ?} dictionary mapping parameter names to the best values found """<line_sep>print('-'<times>72)<line_sep>print('Partition k = {}'.format(k_idx))<line_sep>x_train_unvec,y_train,x_val_unvec,y_val,_,_=(emr.get_k_fold_partition(x_unvec y k_idx=k_idx k=k perm_indices=perm_indices))<if_stmt>max_num_feature<g>0# select features and re-encode <block_start>feat_encoding_dict,_=select_features(x_train_unvec y_train idx_feat_dict method=feature_selection num_feature=num_feature max_num_feature=max_num_feature)<line_sep>x_val_unvec=subset_reencode_features(x_val_unvec feat_encoding_dict)<line_sep>num_feature=max_num_feature<block_end># cap number of validation samples <if_stmt>max_num_sample<ne><none><and>len(x_val_unvec)<g>max_num_sample<block_start>x_val_unvec=x_val_unvec[0:max_num_sample]<line_sep>y_val=y_val[0:max_num_sample]<block_end>start=time.time()<if_stmt>method<eq>'riddle'<block_start>model_class=MLP<line_sep>init_args={'num_feature':num_feature 'num_class':num_class}<line_sep>param_dist={'num_hidden_layer':2 # [1, 2] 'num_hidden_node':512 # [128, 256, 512] 'activation':['prelu' 'relu'] 'dropout':tuning.Uniform(lo=0.2 hi=0.8) 'learning_rate':tuning.UniformLogSpace(10 lo=-6 hi=-1) }<line_sep>best_param=tuning.random_search(model_class init_args param_dist x_val_unvec y_val num_class=num_class k=TUNING_K num_search=num_search)<block_end><else_stmt># scikit-learn methods <block_start>x_val=vectorize_features(x_val_unvec num_feature)<if_stmt>method<eq>'logit'# logistic regression <block_start><import_from_stmt>sklearn.linear_model LogisticRegression<line_sep>estimator=LogisticRegression(multi_class='multinomial' solver='lbfgs')<line_sep>param_dist={'C':tuning.UniformLogSpace(base=10 lo=-3 hi=3)}<block_end><elif_stmt>method<eq>'random_forest'<block_start><import_from_stmt>sklearn.ensemble RandomForestClassifier<line_sep>estimator=RandomForestClassifier()<line_sep>param_dist={'max_features':['sqrt' 'log2' <none>] 'max_depth':tuning.UniformIntegerLogSpace(base=2 lo=0 hi=7) 'n_estimators':tuning.UniformIntegerLogSpace(base=2 lo=4 hi=8)}<block_end><elif_stmt>method<eq>'linear_svm'<block_start><import_from_stmt>sklearn.svm SVC<line_sep># remark: due to a bug in scikit-learn / libsvm, the sparse 'linear' # kernel is much slower than the sparse 'poly' kernel, so we use # the 'poly' kernel with degree=1 over the 'linear' kernel estimator=SVC(kernel='poly' degree=1 coef0=0. gamma=1. probability=<true> cache_size=1000)<line_sep>param_dist={'C':tuning.UniformLogSpace(base=10 lo=-2 hi=1)}<block_end><elif_stmt>method<eq>'poly_svm'<block_start><import_from_stmt>sklearn.svm SVC<line_sep>estimator=SVC(kernel='poly' probability=<true> cache_size=1000)<line_sep>param_dist={'C':tuning.UniformLogSpace(base=10 lo=-2 hi=1) 'degree':[2 3 4] 'gamma':tuning.UniformLogSpace(base=10 lo=-5 hi=1)}<block_end><elif_stmt>method<eq>'rbf_svm'<block_start><import_from_stmt>sklearn.svm SVC<line_sep>estimator=SVC(kernel='rbf' probability=<true> cache_size=1000)<line_sep>param_dist={'C':tuning.UniformLogSpace(base=10 lo=-2 hi=1) 'gamma':tuning.UniformLogSpace(base=10 lo=-5 hi=1)}<block_end><elif_stmt>method<eq>'gbdt'<block_start><import_from_stmt>xgboost XGBClassifier<line_sep>estimator=XGBClassifier(objective='multi:softprob')<line_sep>param_dist={'max_depth':tuning.UniformIntegerLogSpace(base=2 lo=0 hi=5) 'n_estimators':tuning.UniformIntegerLogSpace(base=2 lo=4 hi=8) 'learning_rate':tuning.UniformLogSpace(base=10 lo=-3 hi=0)}<block_end><else_stmt><block_start><raise>ValueError('unknown method: {}'.format(method))<block_end>param_search=RandomizedSearchCV(estimator param_dist refit=<false> n_iter=num_search scoring=loss_scorer)<line_sep>param_search.fit(x_val y_val)<line_sep>best_param=param_search.best_params_<block_end>print('Best parameters for {} for k_idx={}: {} found in {:.3f} s'.format(method k_idx best_param time.time()-start))<line_sep><return>best_param<block_end><def_stmt>run_kfold data_fn method='logit' prop_missing=0. max_num_feature=-1 feature_selection='random' k=10 max_num_sample=10000 num_search=30 data_dir='_data' cache_dir='_cache' force_run=<false><block_start>"""Run several parameter searches a la k-fold cross-validation. Arguments: data_fn: string data file filename method: string name of classification method; values = {'logit', 'random_forest', 'linear_svm', 'poly_svm', 'rbf_svm', 'gbdt', 'riddle'} prop_missing: float proportion of feature observations which should be randomly masked; values in [0, 1) max_num_feature: int maximum number of features to use feature_selection: string feature selection method; values = {'random', 'frequency', 'chi2'} k: int number of partitions for k-fold cross-validation max_num_sample: int maximum number of samples to use num_search: int number of searches (parameter configurations) to try for each partition data_dir: string directory where data files are located cache_dir: string directory where cached files (e.g., saved parameters) are located out_dir: string directory where outputs (e.g., results) should be saved """<if_stmt>'debug'<in>data_fn<block_start>num_search=3<block_end># check if already did param search, if so, skip param_path=get_param_path(cache_dir method data_fn prop_missing max_num_feature feature_selection)<if_stmt><not>force_run<and>os.path.isfile(param_path)<block_start>warnings.warn('Already did search for {}, skipping the search'.format(method))<line_sep><return><block_end>x_unvec,y,idx_feat_dict,idx_class_dict,_,perm_indices=(get_preprocessed_data(data_dir data_fn prop_missing=prop_missing))<line_sep>num_feature=len(idx_feat_dict)<line_sep>num_class=len(idx_class_dict)<line_sep>params={}<for_stmt>k_idx range(0 k)<block_start>params[k_idx]=run(method x_unvec y idx_feat_dict num_feature=num_feature max_num_feature=max_num_feature num_class=num_class max_num_sample=max_num_sample feature_selection=feature_selection k_idx=k_idx k=k num_search=num_search perm_indices=perm_indices)<block_end>recursive_mkdir(FLAGS.cache_dir)<with_stmt>open(param_path 'wb')<as>f# save <block_start>pickle.dump(params f)<block_end>print('Finished parameter search for method: {}'.format(method))<block_end><def_stmt>main <block_start>"""Main method."""<line_sep>np.random.seed(SEED)# for reproducibility, must be before Keras imports! run_kfold(data_fn=FLAGS.data_fn method=FLAGS.method prop_missing=FLAGS.prop_missing max_num_feature=FLAGS.max_num_feature feature_selection=FLAGS.feature_selection max_num_sample=FLAGS.max_num_sample num_search=FLAGS.num_search data_dir=FLAGS.data_dir cache_dir=FLAGS.cache_dir force_run=FLAGS.force_run)<block_end># if run as script, execute main <if_stmt>__name__<eq>'__main__'<block_start>FLAGS,_=parser.parse_known_args()<line_sep>main()<block_end>
"""add new netloc tracking table Revision ID: 669e9df34ea7 Revises: <KEY> Create Date: 2020-01-20 01:36:51.862767 """<line_sep># revision identifiers, used by Alembic. revision='669e9df34ea7'<line_sep>down_revision='<KEY>'<line_sep>branch_labels=<none><line_sep>depends_on=<none><import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<import_from_stmt>sqlalchemy_utils.types TSVectorType<import_from_stmt>sqlalchemy_searchable make_searchable<import_stmt>sqlalchemy_utils<line_sep># Patch in knowledge of the citext type, so it reflects properly. <import_from_stmt>sqlalchemy.dialects.postgresql.base ischema_names<import_stmt>citext<import_stmt>queue<import_stmt>datetime<import_from_stmt>sqlalchemy.dialects.postgresql ENUM<import_from_stmt>sqlalchemy.dialects.postgresql JSON<import_from_stmt>sqlalchemy.dialects.postgresql JSONB<import_from_stmt>sqlalchemy.dialects.postgresql TSVECTOR<line_sep>ischema_names['citext']=citext.CIText<import_from_stmt>sqlalchemy.dialects postgresql<def_stmt>upgrade # ### commands auto generated by Alembic - please adjust! ### <block_start>op.create_table('seen_netloc_tracker' sa.Column('id' sa.BigInteger() nullable=<false>) sa.Column('netloc' citext.CIText() nullable=<false>) sa.Column('ignore' sa.Boolean() nullable=<true>) sa.Column('have' sa.Boolean() nullable=<true>) sa.Column('extra' postgresql.JSONB(astext_type=sa.Text()) nullable=<true>) sa.PrimaryKeyConstraint('id'))<line_sep>op.create_index(op.f('ix_seen_netloc_tracker_netloc') 'seen_netloc_tracker' ['netloc'] unique=<true>)<line_sep># ### end Alembic commands ### <block_end><def_stmt>downgrade # ### commands auto generated by Alembic - please adjust! ### <block_start>op.drop_index(op.f('ix_seen_netloc_tracker_netloc') table_name='seen_netloc_tracker')<line_sep>op.drop_table('seen_netloc_tracker')<line_sep># ### end Alembic commands ### <block_end>
# coding=utf-8 # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- <import_from_stmt>knack.help_files helps# pylint: disable=unused-import helps['functionapp devops-pipeline']=""" type: group short-summary: Azure Function specific integration with Azure DevOps. Please visit https://aka.ms/functions-azure-devops for more information. """<line_sep>helps['functionapp devops-pipeline create']=""" type: command short-summary: Create an Azure DevOps pipeline for a function app. examples: - name: create an Azure Pipeline to a function app. text: > az functionapp devops-pipeline create --functionapp-name FunctionApp - name: create an Azure Pipeline from a Github function app repository. text: > az functionapp devops-pipeline create --github-repository GithubOrganization/GithubRepository --github-pat GithubPersonalAccessToken - name: create an Azure Pipeline with specific Azure DevOps organization and project text: > az functionapp devops-pipeline create --organization-name AzureDevOpsOrganization --project-name AzureDevOpsProject """<line_sep>
<import_stmt>os<import_stmt>aiofiles<import_stmt>yaml<import_stmt>click<import_stmt>asyncio<import_from_stmt>datetime datetime<import_from_stmt>models.user create_user<import_from_stmt>models Post User<def_stmt>run_async coro<block_start>asyncio.run(coro)<block_end>@click.group()<def_stmt>cli <block_start><ellipsis><block_end><async_keyword><def_stmt>_adduser **kwargs<block_start><try_stmt><block_start>user=<await>create_user(**kwargs)<block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep>click.echo(str(e))<block_end><else_stmt><block_start>click.echo(f'User {user.name} created!!! ID: {user.id}')<block_end><block_end><async_keyword><def_stmt>extract_meta file_path:str<block_start>data=''<line_sep>data_exist=<false><line_sep>content=''<async_keyword><with_stmt>aiofiles.open(file_path)<as>fp<block_start><async_keyword><for_stmt>line fp<block_start><if_stmt>line.strip()<eq>'---'<and>data_exist<block_start>data_exist=<false><line_sep><continue><block_end><if_stmt>line.strip()<eq>'---'<block_start>data_exist=<true><line_sep><continue><block_end><if_stmt>data_exist<block_start>data<augadd>line<block_end><else_stmt><block_start>content<augadd>line<block_end><block_end><block_end><return>data content<block_end><async_keyword><def_stmt>add_post dct content user_id=<none><block_start>title=dct.get('title' '')<line_sep>tags=dct.get('tags' [])<line_sep>author_id=user_id<line_sep>date=dct.get('date' <none>)<if_stmt><not>title<block_start><return><block_end>post=<await>Post.async_first(title=title)<if_stmt>post<block_start><return><block_end><if_stmt>date<is><none><block_start>date=datetime.now()<block_end><await>Post.acreate(title=title content=content author_id=author_id slug=title summary='' status=Post.STATUS_ONLINE can_comment=<true> type=Post.TYPE_ARTICLE created_at=date)<line_sep>print(f'{title} save...')<block_end><async_keyword><def_stmt>_hexo_export dir uname<block_start>user=<await>User.async_first(name=uname)<line_sep>id=user.get('id' '')<if_stmt><not>id<block_start><return><block_end><for_stmt>article os.listdir(dir)<block_start><if_stmt><not>article.endswith('.md')<block_start><continue><block_end><else_stmt><block_start>file=f'{dir}/{article}'<line_sep>metdata,content=<await>extract_meta(file)<line_sep>dct=yaml.load(metdata)<if_stmt>'title'<not><in>dct<block_start>title=' '.join(file.split('-')[3:])<line_sep>title=title.replace('.md' '')<line_sep>dct.update(title=title)<block_end>asyncio.create_task(add_post(dct content user_id=id))<block_end><block_end><block_end>@cli.command()@click.option('--name' required=<true> prompt=<true>)@click.option('--email' required=<false> default=<none> prompt=<true>)@click.option('--password' required=<true> prompt=<true> hide_input=<true> confirmation_prompt=<true>)<def_stmt>adduser name email password<block_start>run_async(_adduser(name=name password=password email=email))<block_end>@cli.command()@click.option('--dir' required=<true>)@click.option('--uname' required=<true>)<def_stmt>hexo_export dir uname<block_start>run_async(_hexo_export(dir=dir uname=uname))<line_sep>click.echo('Export Hexo Finished!')<block_end><if_stmt>__name__<eq>'__main__'<block_start>cli()<block_end>
""" 请完成一个函数,输入一个二叉树,该函数输出它的镜像。 https://leetcode.com/problems/invert-binary-tree/ """<line_sep># Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None <class_stmt>_Solution<block_start><def_stmt>invertTree self root<block_start>""" :type root: TreeNode :rtype: TreeNode """<if_stmt>root<block_start>root.left,root.right=root.right root.left<line_sep>self.invertTree(root.left)<line_sep>self.invertTree(root.right)<block_end><return>root<block_end><block_end># Definition for a binary tree node. <class_stmt>Node<block_start><def_stmt>__init__ self x left=<none> right=<none><block_start>self.val=x<line_sep>self.left=left<line_sep>self.right=right<block_end><def_stmt>__str__ self<block_start><return>'{}'.format(self.val)<block_end>__repr__=__str__<block_end><import_from_stmt>collections deque<class_stmt>Stack<block_start><def_stmt>__init__ self<block_start>self.items=deque()<block_end><def_stmt>push self val<block_start><return>self.items.append(val)<block_end><def_stmt>pop self<block_start><return>self.items.pop()<block_end><def_stmt>empty self<block_start><return>len(self.items)<eq>0<block_end><block_end><class_stmt>Solution# https://leetcode.com/problems/symmetric-tree/ <block_start><def_stmt>isSymmetric self root<block_start>""" use stack """<if_stmt><not>root<block_start><return><true><block_end>s=Stack()<line_sep>s.push((root.left root.right))# push a tuple <while_stmt><not>s.empty()<block_start>top_vals=s.pop()<line_sep>left_node,right_node=top_vals[0] top_vals[1]<if_stmt>left_node<and>right_node<block_start><if_stmt>left_node.val<eq>right_node.val<block_start>s.push((left_node.left right_node.right))<line_sep>s.push((left_node.right right_node.left))<block_end><else_stmt><block_start><return><false><block_end><block_end><else_stmt><block_start><if_stmt>left_node<ne>right_node<block_start><return><false><block_end><block_end><block_end><return><true><block_end><def_stmt>isSymmetric_recursive self root<block_start>""" 判断是否是镜像,使用递归的方式 :type root: TreeNode :rtype: bool """<def_stmt>_check left right<block_start><if_stmt>left<and>right<block_start><if_stmt>left.val<eq>right.val<block_start>flag1=_check(left.left right.right)<line_sep>flag2=_check(left.right right.left)<line_sep><return>flag1<and>flag2<block_end><else_stmt><block_start><return><false><block_end><block_end><else_stmt><block_start><return>left<eq>right<block_end><block_end># 这种情况下 left 和 right 要么一个为 None,或者都是 None <if_stmt>root<block_start><return>_check(root.left root.right)<block_end><return><true><block_end><def_stmt>isSymmetric_layer self root<block_start>""" 判断是否是镜像,使用层序遍历 :type root: TreeNode :rtype: bool """<if_stmt><not>root<block_start><return><true><block_end>curnodes=[root]<line_sep>next_nodes=[]<while_stmt>curnodes<or>next_nodes<block_start>lefts=[]<line_sep>rights=[]<for_stmt>node curnodes<block_start>lefts.append(node.left.val<if>node.left<else><none>)# NOTE: append val not node rights.append(node.right.val<if>node.right<else><none>)<if_stmt>node.left<block_start>next_nodes.append(node.left)<block_end><if_stmt>node.right<block_start>next_nodes.append(node.right)<block_end><block_end><if_stmt>lefts<ne>rights[::-1]<block_start><return><false><block_end>curnodes=next_nodes<line_sep>next_nodes=[]<block_end><return><true><block_end><block_end><def_stmt>test <block_start>t=Node(1 Node(2 Node(3) Node(4)) Node(2 Node(4) Node(3)))<line_sep>s=Solution()<assert_stmt>s.isSymmetric(t)<is><true><block_end>test()<line_sep>
""" Terminal client for telegram """<line_sep>__version__="0.17.0"<line_sep>
<import_from_stmt>typing Dict<import_from_stmt>botocore.paginate Paginator<class_stmt>GetEntitlements(Paginator)<block_start><def_stmt>paginate self ProductCode:str Filter:Dict=<none> PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`MarketplaceEntitlementService.Client.get_entitlements`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/entitlement.marketplace-2017-01-11/GetEntitlements>`_ **Request Syntax** :: response_iterator = paginator.paginate( ProductCode='string', Filter={ 'string': [ 'string', ] }, PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'Entitlements': [ { 'ProductCode': 'string', 'Dimension': 'string', 'CustomerIdentifier': 'string', 'Value': { 'IntegerValue': 123, 'DoubleValue': 123.0, 'BooleanValue': True|False, 'StringValue': 'string' }, 'ExpirationDate': datetime(2015, 1, 1) }, ], } **Response Structure** - *(dict) --* The GetEntitlementsRequest contains results from the GetEntitlements operation. - **Entitlements** *(list) --* The set of entitlements found through the GetEntitlements operation. If the result contains an empty set of entitlements, NextToken might still be present and should be used. - *(dict) --* An entitlement represents capacity in a product owned by the customer. For example, a customer might own some number of users or seats in an SaaS application or some amount of data capacity in a multi-tenant database. - **ProductCode** *(string) --* The product code for which the given entitlement applies. Product codes are provided by AWS Marketplace when the product listing is created. - **Dimension** *(string) --* The dimension for which the given entitlement applies. Dimensions represent categories of capacity in a product and are specified when the product is listed in AWS Marketplace. - **CustomerIdentifier** *(string) --* The customer identifier is a handle to each unique customer in an application. Customer identifiers are obtained through the ResolveCustomer operation in AWS Marketplace Metering Service. - **Value** *(dict) --* The EntitlementValue represents the amount of capacity that the customer is entitled to for the product. - **IntegerValue** *(integer) --* The IntegerValue field will be populated with an integer value when the entitlement is an integer type. Otherwise, the field will not be set. - **DoubleValue** *(float) --* The DoubleValue field will be populated with a double value when the entitlement is a double type. Otherwise, the field will not be set. - **BooleanValue** *(boolean) --* The BooleanValue field will be populated with a boolean value when the entitlement is a boolean type. Otherwise, the field will not be set. - **StringValue** *(string) --* The StringValue field will be populated with a string value when the entitlement is a string type. Otherwise, the field will not be set. - **ExpirationDate** *(datetime) --* The expiration date represents the minimum date through which this entitlement is expected to remain valid. For contractual products listed on AWS Marketplace, the expiration date is the date at which the customer will renew or cancel their contract. Customers who are opting to renew their contract will still have entitlements with an expiration date. :type ProductCode: string :param ProductCode: **[REQUIRED]** Product code is used to uniquely identify a product in AWS Marketplace. The product code will be provided by AWS Marketplace when the product listing is created. :type Filter: dict :param Filter: Filter is used to return entitlements for a specific customer or for a specific dimension. Filters are described as keys mapped to a lists of values. Filtered requests are *unioned* for each value in the value list, and then *intersected* for each filter key. - *(string) --* - *(list) --* - *(string) --* :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end>
<import_stmt>logging<import_stmt>django<import_from_stmt>contact_form.forms ContactForm<import_from_stmt>django forms<import_from_stmt>django.conf settings<import_from_stmt>django.contrib.sites.models Site<import_from_stmt>django.utils.encoding force_bytes<import_from_stmt>pykismet3 Akismet AkismetServerError<line_sep>logger=logging.getLogger(__name__)<class_stmt>BaseContactForm(ContactForm)<block_start>message_subject=forms.CharField(max_length=100 widget=forms.TextInput(attrs={'class':'required' 'placeholder':'Message subject'}) label='Message subject' )<line_sep>email=forms.EmailField(widget=forms.TextInput(attrs={'class':'required' 'placeholder':'E-mail'}))<line_sep>name=forms.CharField(widget=forms.TextInput(attrs={'class':'required' 'placeholder':'Name'}))<line_sep>body=forms.CharField(widget=forms.Textarea(attrs={'class':'required' 'placeholder':'Your message'}))<def_stmt>subject self# Strip all linebreaks from the subject string. <block_start>subject=''.join(self.cleaned_data["message_subject"].splitlines())<line_sep><return>"[Contact form] "+subject<block_end><def_stmt>message self<block_start><return>"From: {name} <{email}>\n\n{body}".format(**self.cleaned_data)<block_end><def_stmt>clean_body self<block_start>""" Check spam against Akismet. Backported from django-contact-form pre-1.0; 1.0 dropped built-in Akismet support. """<if_stmt>'body'<in>self.cleaned_data<and>getattr(settings 'AKISMET_API_KEY' <none>)<block_start><try_stmt><block_start>akismet_api=Akismet(api_key=settings.AKISMET_API_KEY blog_url='http://%s/'%Site.objects.get_current().domain user_agent='Django {}.{}.{}'.format(*django.VERSION))<line_sep>akismet_data={'user_ip':self.request.META.get('REMOTE_ADDR' '') 'user_agent':self.request.META.get('HTTP_USER_AGENT' '') 'referrer':self.request.META.get('HTTP_REFERER' '') 'comment_content':force_bytes(self.cleaned_data['body']) 'comment_author':self.cleaned_data.get('name' '') }<if_stmt>getattr(settings 'AKISMET_TESTING' <none>)# Adding test argument to the request in order to tell akismet that # they should ignore the request so that test runs affect the heuristics <block_start>akismet_data['test']=1<block_end><if_stmt>akismet_api.check(akismet_data)<block_start><raise>forms.ValidationError("Akismet thinks this message is spam")<block_end><block_end><except_stmt>AkismetServerError<block_start>logger.error('Akismet server error')<block_end><block_end><return>self.cleaned_data['body']<block_end><block_end><class_stmt>FoundationContactForm(BaseContactForm)<block_start>recipient_list=["<EMAIL>"]<block_end>
<import_from_stmt>tests.conftest JiraTestCase<class_stmt>CommentTests(JiraTestCase)<block_start><def_stmt>setUp self<block_start>JiraTestCase.setUp(self)<line_sep>self.issue_1=self.test_manager.project_b_issue1<line_sep>self.issue_2=self.test_manager.project_b_issue2<line_sep>self.issue_3=self.test_manager.project_b_issue3<block_end><def_stmt>test_comments self<block_start><for_stmt>issue [self.issue_1 self.jira.issue(self.issue_2)]<block_start>self.jira.issue(issue)<line_sep>comment1=self.jira.add_comment(issue "First comment")<line_sep>comment2=self.jira.add_comment(issue "Second comment")<line_sep>comments=self.jira.comments(issue)<assert_stmt>comments[0].body<eq>"First comment"<assert_stmt>comments[1].body<eq>"Second comment"<line_sep>comment1.delete()<line_sep>comment2.delete()<line_sep>comments=self.jira.comments(issue)<assert_stmt>len(comments)<eq>0<block_end><block_end><def_stmt>test_expanded_comments self<block_start>comment1=self.jira.add_comment(self.issue_1 "First comment")<line_sep>comment2=self.jira.add_comment(self.issue_1 "Second comment")<line_sep>comments=self.jira.comments(self.issue_1 expand="renderedBody")<line_sep>self.assertTrue(hasattr(comments[0] "renderedBody"))<line_sep>ret_comment1=self.jira.comment(self.issue_1 comment1.id expand="renderedBody")<line_sep>ret_comment2=self.jira.comment(self.issue_1 comment2.id)<line_sep>comment1.delete()<line_sep>comment2.delete()<line_sep>self.assertTrue(hasattr(ret_comment1 "renderedBody"))<line_sep>self.assertFalse(hasattr(ret_comment2 "renderedBody"))<line_sep>comments=self.jira.comments(self.issue_1)<assert_stmt>len(comments)<eq>0<block_end><def_stmt>test_add_comment self<block_start>comment=self.jira.add_comment(self.issue_3 "a test comment!" visibility={"type":"role" "value":"Administrators"} )<line_sep>self.assertEqual(comment.body "a test comment!")<line_sep>self.assertEqual(comment.visibility.type "role")<line_sep>self.assertEqual(comment.visibility.value "Administrators")<line_sep>comment.delete()<block_end><def_stmt>test_add_comment_with_issue_obj self<block_start>issue=self.jira.issue(self.issue_3)<line_sep>comment=self.jira.add_comment(issue "a new test comment!" visibility={"type":"role" "value":"Administrators"} )<line_sep>self.assertEqual(comment.body "a new test comment!")<line_sep>self.assertEqual(comment.visibility.type "role")<line_sep>self.assertEqual(comment.visibility.value "Administrators")<line_sep>comment.delete()<block_end><def_stmt>test_update_comment self<block_start>comment=self.jira.add_comment(self.issue_3 "updating soon!")<line_sep>comment.update(body="updated!")<line_sep>self.assertEqual(comment.body "updated!")<line_sep># self.assertEqual(comment.visibility.type, 'role') # self.assertEqual(comment.visibility.value, 'Administrators') comment.delete()<block_end><block_end>
<import_from_stmt>parameterized parameterized<import_from_stmt>aim Repo<import_from_stmt>performance_tests.base StorageTestBase<import_from_stmt>performance_tests.utils get_baseline write_baseline<import_from_stmt>performance_tests.storage.utils random_access_metric_values<class_stmt>TestRandomAccess(StorageTestBase)<block_start>@parameterized.expand({0:50 1:250 2:500}.items())<def_stmt>test_random_access self test_key density<block_start>test_name=f'test_random_access_{test_key}'<line_sep>repo=Repo.default_repo()<line_sep>query='metric.name == "metric 0"'<line_sep>execution_time=random_access_metric_values(repo query density)<line_sep>baseline=get_baseline(test_name)<if_stmt>baseline<block_start>self.assertInRange(execution_time baseline)<block_end><else_stmt><block_start>write_baseline(test_name execution_time)<block_end><block_end><block_end>
# # Copyright (c) 2013-2018 Quarkslab. # This file is part of IRMA project. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License in the top-level directory # of this distribution and at: # # http://www.apache.org/licenses/LICENSE-2.0 # # No part of the project, including this file, may be copied, # modified, propagated, or distributed except according to the # terms contained in the LICENSE file. <import_stmt>logging<import_stmt>unittest<import_from_stmt>unittest.mock Mock patch<import_from_stmt>irma.common.utils.utils UUID bytes_to_utf8 save_to_file<import_from_stmt>irma.common.base.utils IrmaFrontendReturn IrmaTaskReturn IrmaReturnCode IrmaScanStatus IrmaValueError common_celery_options IrmaScanRequest IrmaProbeType<line_sep># ================= # Logging options # ================= <def_stmt>enable_logging level=logging.INFO handler=<none> formatter=<none><block_start><global>log<line_sep>log=logging.getLogger()<if_stmt>formatter<is><none><block_start>formatter=logging.Formatter("%(asctime)s [%(name)s] "+"%(levelname)s: %(message)s")<block_end><if_stmt>handler<is><none><block_start>handler=logging.StreamHandler()<block_end>handler.setFormatter(formatter)<line_sep>log.addHandler(handler)<line_sep>log.setLevel(level)<block_end># ============ # Test Cases # ============ <class_stmt>TestCommonUtils(unittest.TestCase)<block_start><def_stmt>test_uuid self<block_start>uuid=UUID.generate()<line_sep>self.assertTrue(UUID.validate(uuid))<line_sep>self.assertEqual(len(uuid) 36)<line_sep>self.assertEqual(uuid.count("-") 4)<block_end><def_stmt>test_uuid_generate self<block_start>uuid=UUID.normalize("01234567-abcd-ef01-2345-deadbeaff00d")<line_sep>self.assertTrue(UUID.validate(uuid))<line_sep>self.assertEqual(uuid "01234567-abcd-ef01-2345-deadbeaff00d")<block_end><def_stmt>test_uuid_validate self<block_start>self.assertFalse(UUID.validate("not a uuid"))<block_end><def_stmt>test_bytes_to_utf8_0 self<block_start>result=bytes_to_utf8(b"something")<line_sep>self.assertEqual(result "something")<block_end><def_stmt>test_bytes_to_utf8_1 self<block_start>result=bytes_to_utf8("something")<line_sep>self.assertIs(result "something")<block_end><def_stmt>test_bytes_to_utf8_2 self<block_start>result=bytes_to_utf8(["foo" b"bar" (b"baz" )])<line_sep>self.assertEqual(result ["foo" "bar" ("baz" )])<block_end><def_stmt>test_bytes_to_utf8_3 self<block_start>result=bytes_to_utf8({"foo":b"bar" b"baz":<none>})<line_sep>self.assertDictEqual(result {"foo":"bar" "baz":<none>})<block_end>@patch("builtins.open")<def_stmt>test_save_to_file0 self m_open<block_start>Mockfile=type("Mockfile" (Mock ) {"seek":<lambda>self pos:setattr(self "pos" pos)})<line_sep>fileobj=Mockfile()<line_sep>fileobj.read.return_value=""<line_sep>fileobj.pos=0<line_sep>dstobj=Mockfile()<line_sep>m_open.return_value.__enter__.return_value=dstobj<line_sep>size=save_to_file(fileobj "/foo")<line_sep>m_open.assert_called_once_with("/foo" "wb")<line_sep>self.assertEqual(fileobj.pos 0)<line_sep>self.assertEqual(size 0)<line_sep>dstobj.write.assert_not_called()<block_end>@patch("builtins.open")<def_stmt>test_save_to_file1 self m_open<block_start>Mockfile=type("Mockfile" (Mock ) {"seek":<lambda>self pos:setattr(self "pos" pos) "write":<lambda>self buf:setattr(self "written" self.written+buf)})<line_sep>fileobj=Mockfile()<line_sep>fileobj.read.side_effect=["foo" "bar" "baz" ""]<line_sep>fileobj.pos=0<line_sep>dstobj=Mockfile()<line_sep>dstobj.written=""<line_sep>m_open.return_value.__enter__.return_value=dstobj<line_sep>size=save_to_file(fileobj "/foo")<line_sep>m_open.assert_called_once_with("/foo" "wb")<line_sep>self.assertEqual(dstobj.written "foobarbaz")<line_sep>self.assertEqual(fileobj.pos 0)<line_sep>self.assertEqual(size 9)<block_end><def_stmt>test_irma_taskreturn_success self<block_start>ret=IrmaTaskReturn.success("success")<line_sep>self.assertEqual(ret[0] IrmaReturnCode.success)<line_sep>self.assertEqual(ret[1] "success")<line_sep>self.assertEqual(type(ret) tuple)<line_sep>self.assertEqual(type(ret[0]) int)<line_sep>self.assertEqual(type(ret[1]) str)<block_end><def_stmt>test_irma_taskreturn_warning self<block_start>ret=IrmaTaskReturn.warning("warning")<line_sep>self.assertEqual(ret[0] IrmaReturnCode.warning)<line_sep>self.assertEqual(ret[1] "warning")<line_sep>self.assertEqual(type(ret) tuple)<line_sep>self.assertEqual(type(ret[0]) int)<line_sep>self.assertEqual(type(ret[1]) str)<block_end><def_stmt>test_irma_taskreturn_error self<block_start>ret=IrmaTaskReturn.error("error")<line_sep>self.assertEqual(ret[0] IrmaReturnCode.error)<line_sep>self.assertEqual(ret[1] "error")<line_sep>self.assertEqual(type(ret) tuple)<line_sep>self.assertEqual(type(ret[0]) int)<line_sep>self.assertEqual(type(ret[1]) str)<block_end><def_stmt>test_irma_frontendreturn_success self<block_start>f_success=IrmaFrontendReturn.success<line_sep>ret=f_success(optional={'key':'value'})<line_sep>self.assertEqual(ret['code'] IrmaReturnCode.success)<line_sep>self.assertEqual(ret['msg'] "success")<line_sep>self.assertEqual(type(ret) dict)<line_sep>self.assertEqual(type(ret['code']) int)<line_sep>self.assertEqual(type(ret['msg']) str)<line_sep>self.assertEqual(type(ret['optional']) dict)<line_sep>self.assertEqual(ret['optional']['key'] 'value')<block_end><def_stmt>test_irma_frontendreturn_warning self<block_start>f_warning=IrmaFrontendReturn.warning<line_sep>ret=f_warning("warning" optional={'key':'value'})<line_sep>self.assertEqual(ret['code'] IrmaReturnCode.warning)<line_sep>self.assertEqual(ret['msg'] "warning")<line_sep>self.assertEqual(type(ret) dict)<line_sep>self.assertEqual(type(ret['code']) int)<line_sep>self.assertEqual(type(ret['msg']) str)<line_sep>self.assertEqual(type(ret['optional']) dict)<line_sep>self.assertEqual(ret['optional']['key'] 'value')<block_end><def_stmt>test_irma_frontendreturn_error self<block_start>f_error=IrmaFrontendReturn.error<line_sep>ret=f_error("error" optional={'key':'value'})<line_sep>self.assertEqual(ret['code'] IrmaReturnCode.error)<line_sep>self.assertEqual(ret['msg'] "error")<line_sep>self.assertEqual(type(ret) dict)<line_sep>self.assertEqual(type(ret['code']) int)<line_sep>self.assertEqual(type(ret['msg']) str)<line_sep>self.assertEqual(type(ret['optional']) dict)<line_sep>self.assertEqual(ret['optional']['key'] 'value')<block_end><def_stmt>test_irmascanstatus_is_error0 self<block_start>self.assertFalse(IrmaScanStatus.is_error(IrmaScanStatus.finished))<block_end><def_stmt>test_irmascanstatus_is_error1 self<block_start>self.assertTrue(IrmaScanStatus.is_error(IrmaScanStatus.error))<block_end><def_stmt>test_irmascanstatus_is_error2 self<block_start>self.assertTrue(IrmaScanStatus.is_error(IrmaScanStatus.error_probe_na))<block_end><def_stmt>test_irmascanstatus_filter_status0 self<block_start>mini,maxi=IrmaScanStatus.launched IrmaScanStatus.flushed<line_sep>self.assertIs(IrmaScanStatus.filter_status(IrmaScanStatus.processed mini maxi) <none>)<block_end><def_stmt>test_irmascanstatus_filter_status1 self<block_start>mini,maxi=IrmaScanStatus.launched IrmaScanStatus.flushed<with_stmt>self.assertRaises(IrmaValueError)<block_start>IrmaScanStatus.filter_status(IrmaScanStatus.ready mini maxi)<block_end><block_end><def_stmt>test_irmascanstatus_filter_status2 self<block_start>mini,maxi=IrmaScanStatus.launched IrmaScanStatus.flushed<with_stmt>self.assertRaises(IrmaValueError)<block_start>IrmaScanStatus.filter_status(IrmaScanStatus.cancelled mini maxi)<block_end><block_end><def_stmt>test_irmascanstatus_filter_status3 self<block_start>mini,maxi=IrmaScanStatus.launched IrmaScanStatus.flushed<with_stmt>self.assertRaises(IrmaValueError)<block_start>IrmaScanStatus.filter_status(25 mini maxi)<block_end><block_end><def_stmt>test_irmascanstatus_code_ot_label0 self<block_start>self.assertEqual(IrmaScanStatus.code_to_label(IrmaScanStatus.finished) "finished")<block_end><def_stmt>test_irmascanstatus_code_ot_label1 self<block_start>self.assertEqual(IrmaScanStatus.code_to_label(25) "Unknown status code")<block_end><def_stmt>test_irmaprobetype_normalize0 self<block_start>self.assertEqual(IrmaProbeType.normalize("external") IrmaProbeType.external)<block_end><def_stmt>test_irmaprobetype_normalize1 self<block_start>self.assertEqual(IrmaProbeType.normalize("foo") IrmaProbeType.unknown)<block_end>@patch("irma.common.base.utils.UUID.generate")<def_stmt>test_common_celery_options0 self m_generate<block_start>m_generate.return_value="a-random-uuid"<line_sep>result=common_celery_options("foo" "bar" 0 50 100)<line_sep>self.assertEqual(result ["--app=foo" "--loglevel=info" "--without-gossip" "--without-mingle" "--without-heartbeat" "--soft-time-limit=50" "--time-limit=100" "--hostname=bar-a-random-uuid"])<block_end>@patch("irma.common.base.utils.UUID.generate")<def_stmt>test_common_celery_options1 self m_generate<block_start>m_generate.return_value="a-random-uuid"<line_sep>result=common_celery_options("foo" "bar" 3 50 100)<line_sep>self.assertEqual(result ["--app=foo" "--loglevel=info" "--without-gossip" "--without-mingle" "--without-heartbeat" "--concurrency=3" "--soft-time-limit=50" "--time-limit=100" "--hostname=bar-a-random-uuid"])<block_end><block_end><class_stmt>TestIrmaScanRequest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.isr=IrmaScanRequest()<block_end><def_stmt>test_init self<block_start>isr=IrmaScanRequest({"foo":Mock() "bar":Mock()})<line_sep>self.assertEqual(isr.nb_files 2)<block_end><def_stmt>test_add_file self<block_start>self.isr.add_file("foo" "probelist" "mimetype")<line_sep>self.assertDictEqual(self.isr.request["foo"] {"probe_list":"probelist" "mimetype":"mimetype"})<line_sep>self.assertEqual(self.isr.nb_files 1)<block_end><def_stmt>test_del_file0 self<block_start>self.isr.add_file("foo" "probelist" "mimetype")<line_sep>self.isr.del_file("foo")<line_sep>self.assertNotIn("foo" self.isr.request)<line_sep>self.assertEqual(self.isr.nb_files 0)<block_end><def_stmt>test_del_file1 self<block_start>self.isr.del_file("foo")<line_sep>self.assertNotIn("foo" self.isr.request)<line_sep>self.assertEqual(self.isr.nb_files 0)<block_end><def_stmt>test_get_probelist self<block_start>self.isr.add_file("foo" "bar" "mimetype")<line_sep>result=self.isr.get_probelist("foo")<line_sep>self.assertEqual(result "bar")<line_sep>self.assertEqual(self.isr.nb_files 1)<block_end><def_stmt>test_set_probelist self<block_start>self.isr.add_file("foo" "bar" "mimetype")<line_sep>self.isr.set_probelist("foo" "baz")<line_sep>self.assertEqual(self.isr.get_probelist("foo") "baz")<line_sep>self.assertEqual(self.isr.nb_files 1)<block_end><def_stmt>test_get_mimetype self<block_start>self.isr.add_file("foo" "probelist" "bar")<line_sep>result=self.isr.get_mimetype("foo")<line_sep>self.assertEqual(result "bar")<line_sep>self.assertEqual(self.isr.nb_files 1)<block_end><def_stmt>test_to_dict self<block_start>self.assertIs(self.isr.request self.isr.to_dict())<block_end><def_stmt>test_files self<block_start>self.assertEqual(self.isr.request.keys() self.isr.files())<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>enable_logging()<line_sep>unittest.main()<block_end>
<class_stmt>BeaxyIOError(IOError)<block_start><def_stmt>__init__ self msg response result *args **kwargs<block_start>self.response=response<line_sep>self.result=result<line_sep>super(BeaxyIOError self).__init__(msg *args **kwargs)<block_end><block_end>
# -*- coding: utf-8 -*- # Generated by Django 1.9.13 on 2018-01-15 15:43 <import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations<def_stmt>populate_m2m apps schema_editor<block_start>ScriptParser=apps.get_model('wooey' 'ScriptParser')<line_sep>ScriptParameterGroup=apps.get_model('wooey' 'ScriptParameterGroup')<for_stmt>obj ScriptParser.objects.all()<block_start>obj.new_script_version.add(obj.script_version)<block_end><for_stmt>obj ScriptParameterGroup.objects.all()<block_start>obj.new_script_version.add(obj.script_version)<block_end><block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('wooey' '0029_add-m2m-sv') ]<line_sep>operations=[migrations.RunPython(populate_m2m)]<block_end>
<import_from_stmt>unittest TestCase<import_from_stmt>scrapy.downloadermiddlewares.stats DownloaderStats<import_from_stmt>scrapy.http Request Response<import_from_stmt>scrapy.spiders Spider<import_from_stmt>scrapy.utils.test get_crawler<class_stmt>MyException(Exception)<block_start><pass><block_end><class_stmt>TestDownloaderStats(TestCase)<block_start><def_stmt>setUp self<block_start>self.crawler=get_crawler(Spider)<line_sep>self.spider=self.crawler._create_spider('scrapytest.org')<line_sep>self.mw=DownloaderStats(self.crawler.stats)<line_sep>self.crawler.stats.open_spider(self.spider)<line_sep>self.req=Request('http://scrapytest.org')<line_sep>self.res=Response('scrapytest.org' status=400)<block_end><def_stmt>assertStatsEqual self key value<block_start>self.assertEqual(self.crawler.stats.get_value(key spider=self.spider) value str(self.crawler.stats.get_stats(self.spider)))<block_end><def_stmt>test_process_request self<block_start>self.mw.process_request(self.req self.spider)<line_sep>self.assertStatsEqual('downloader/request_count' 1)<block_end><def_stmt>test_process_response self<block_start>self.mw.process_response(self.req self.res self.spider)<line_sep>self.assertStatsEqual('downloader/response_count' 1)<block_end><def_stmt>test_process_exception self<block_start>self.mw.process_exception(self.req MyException() self.spider)<line_sep>self.assertStatsEqual('downloader/exception_count' 1)<line_sep>self.assertStatsEqual('downloader/exception_type_count/tests.test_downloadermiddleware_stats.MyException' 1)<block_end><def_stmt>tearDown self<block_start>self.crawler.stats.close_spider(self.spider '')<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>pytest<import_from_stmt>verta.code Git Notebook<line_sep>pytestmark=pytest.mark.not_oss# skip if run in oss setup. Applied to entire module <class_stmt>TestLogCode<block_start><def_stmt>test_log_code self model_version<block_start>key1,key2,key3="version1" "version2" "version3"<line_sep>version1=Git(repo_url="<EMAIL>:VertaAI/models.git" commit_hash="52f3d22" autocapture=<false> )<line_sep>version2=Git(repo_url="<EMAIL>:VertaAI/data-processing.git" commit_hash="26f9787" autocapture=<false> )<line_sep>version3=Notebook("conftest.py" # not a notebook, but fine for testing _autocapture=<false> )<line_sep>model_version.log_code_version(key1 version1)<line_sep>model_version.log_code_versions({key2:version2 key3:version3 } )<assert_stmt>model_version.get_code_version(key1)<eq>version1<assert_stmt>model_version.get_code_version(key2)<eq>version2<assert_stmt>model_version.get_code_version(key3)<eq>version3<assert_stmt>model_version.get_code_versions()<eq>{key1:version1 key2:version2 key3:version3 }<block_end><block_end>
# Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. <import_from_stmt>unittest TestCase<import_from_stmt>ipywidgets.widgets.docutils doc_subst<class_stmt>TestDocSubst(TestCase)<block_start><def_stmt>test_substitution self<block_start>snippets={'key':'62'}<line_sep>@doc_subst(snippets)<def_stmt>f <block_start>""" Docstring with value {key} """<block_end><assert_stmt>f.__doc__<eq>" Docstring with value 62 "<block_end><def_stmt>test_unused_keys self<block_start>snippets={'key':'62' 'other-key':'unused'}<line_sep>@doc_subst(snippets)<def_stmt>f <block_start>""" Docstring with value {key} """<block_end><assert_stmt>f.__doc__<eq>" Docstring with value 62 "<block_end><block_end>
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for scann_utils.py."""<import_stmt>os<import_from_stmt>language.orqa.utils scann_utils<import_stmt>numpy<as>np<import_stmt>tensorflow.compat.v1<as>tf<class_stmt>ScannUtilsTest(tf.test.TestCase)<block_start><def_stmt>test_scann_searcher self<block_start>temp_dir=self.create_tempdir().full_path<line_sep>checkpoint_path=os.path.join(temp_dir "dummy_db.ckpt")<line_sep>dummy_db=np.random.uniform(size=[1024 32]).astype(np.float32)<line_sep>scann_utils.write_array_to_checkpoint("dummy_db" dummy_db checkpoint_path)<line_sep>dummy_queries=np.random.uniform(size=[4 32]).astype(np.float32)<line_sep>_,searcher=scann_utils.load_scann_searcher(var_name="dummy_db" checkpoint_path=checkpoint_path num_neighbors=10)<line_sep>distance,index=searcher.search_batched(dummy_queries)<line_sep>self.assertAllEqual(distance.numpy().shape [4 10])<line_sep>self.assertAllEqual(index.numpy().shape [4 10])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.test.main()<block_end>
<def_stmt>cartpole_analytical_derivatives model data x u=<none><block_start><if_stmt>u<is><none><block_start>u=model.unone<block_end># Getting the state and control variables y,th,ydot,thdot=x[0].item() x[1].item() x[2].item() x[3].item()<line_sep>f=u[0].item()<line_sep># Shortname for system parameters m1,m2,l,g=model.m1 model.m2 model.l model.g<line_sep>s,c=np.sin(th) np.cos(th)<line_sep>m=m1+m2<line_sep>mu=m1+m2<times>s<power>2<line_sep>w=model.costWeights<line_sep># derivative of xddot by x, theta, xdot, thetadot # derivative of thddot by x, theta, xdot, thetadot data.Fx[: :]=np.array([[0.0 (m2<times>g<times>c<times>c-m2<times>g<times>s<times>s-m2<times>l<times>c<times>thdot)/mu 0.0 -m2<times>l<times>s/mu] [0.0 ((-s<times>f/l)+(m<times>g<times>c/l)-(m2<times>c<times>c<times>thdot<power>2)+(m2<times>s<times>s<times>thdot<power>2))/mu 0.0 -2<times>m2<times>c<times>s<times>thdot]])<line_sep># derivative of xddot and thddot by f data.Fu[:]=np.array([1/mu c/(l<times>mu)])<line_sep># first derivative of data.cost by x, theta, xdot, thetadot data.Lx[:]=np.array([y<times>w[2]<power>2 s<times>((w[0]<power>2-w[1]<power>2)<times>c+w[1]<power>2) ydot<times>w[3]<power>2 thdot<times>w[4]<power>2])<line_sep># first derivative of data.cost by f data.Lu[:]=np.array([f<times>w[5]<power>2])<line_sep># second derivative of data.cost by x, theta, xdot, thetadot data.Lxx[:]=np.array([w[2]<power>2 w[0]<power>2<times>(c<power>2-s<power>2)+w[1]<power>2<times>(s<power>2-c<power>2+c) w[3]<power>2 w[4]<power>2])<line_sep># second derivative of data.cost by f data.Luu[:]=np.array([w[5]<power>2])<block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>observations.util maybe_download_and_extract<def_stmt>nsw74psid_a path<block_start>"""A Subset of the nsw74psid1 Data Set The `nsw74psidA` data frame has 252 rows and 10 columns. See `nsw74psid1` for more information. This data frame contains the following columns: trt a numeric vector age a numeric vector educ a numeric vector black a numeric vector hisp a numeric vector marr a numeric vector nodeg a numeric vector re74 a numeric vector re75 a numeric vector re78 a numeric vector Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `nsw74psid_a.csv`. Returns: Tuple of np.ndarray `x_train` with 252 rows and 10 columns and dictionary `metadata` of column headers (feature names). """<import_stmt>pandas<as>pd<line_sep>path=os.path.expanduser(path)<line_sep>filename='nsw74psid_a.csv'<if_stmt><not>os.path.exists(os.path.join(path filename))<block_start>url='http://dustintran.com/data/r/DAAG/nsw74psidA.csv'<line_sep>maybe_download_and_extract(path url save_file_name='nsw74psid_a.csv' resume=<false>)<block_end>data=pd.read_csv(os.path.join(path filename) index_col=0 parse_dates=<true>)<line_sep>x_train=data.values<line_sep>metadata={'columns':data.columns}<line_sep><return>x_train metadata<block_end>