content
stringlengths
0
1.55M
<import_from_stmt>..expr BVS BVV<import_from_stmt>..memory.sym_file SymFile<import_from_stmt>..os_models.os_file OsFileHandler<def_stmt>test_1 # read unconstrained <block_start>f=SymFile("a")<line_sep>res=f.read(1)<assert_stmt>len(res)<eq>1<assert_stmt>isinstance(res[0] BVS)<assert_stmt>res[0].name<eq>"unconstrained_a_0"<block_end><def_stmt>test_2 # read concrete <block_start>f=SymFile("a")<line_sep>f.write([BVV(0xff 8)])<line_sep>f.seek(0)<line_sep>res=f.read(1)<assert_stmt>len(res)<eq>1<assert_stmt>isinstance(res[0] BVV)<assert_stmt>res[0].value<eq>255<block_end><def_stmt>test_3 <block_start>os=OsFileHandler()<line_sep>fd=os.open("stdin" "r--")<assert_stmt>fd<eq>0<block_end><def_stmt>test_4 <block_start>os=OsFileHandler()<line_sep>fd=os.open("stdin" "r--")<assert_stmt>os.is_open(fd)<line_sep>os.close(fd)<assert_stmt><not>os.is_open(fd)<block_end><def_stmt>test_5 <block_start>os=OsFileHandler()<line_sep>fd1=os.open("A" "-w-")<line_sep>fd2=os.open("A" "r--")<line_sep>os.write(fd1 [BVV(0xff 8)])<line_sep>res=os.read(fd2 1)<assert_stmt>len(res)<eq>1<assert_stmt>isinstance(res[0] BVV)<assert_stmt>res[0].value<eq>255<block_end><def_stmt>test_6 <block_start>os1=OsFileHandler()<line_sep>os2=OsFileHandler()<line_sep>fd1=os1.open("A" "-w-")<line_sep>os1.write(fd1 [BVV(0xff 8)])<line_sep>fd2=os1.open("A" "r--")<line_sep>os1.copy_to(os2)<line_sep>res=os2.read(fd2 1)<assert_stmt>len(res)<eq>1<assert_stmt>isinstance(res[0] BVV)<assert_stmt>res[0].value<eq>255<block_end>
""" Supernet for differentiable rollouts. """<import_stmt>contextlib<import_stmt>torch<import_from_stmt>torch.nn functional<as>F<import_from_stmt>aw_nas assert_rollout_type utils<import_from_stmt>aw_nas.rollout.base DartsArch DifferentiableRollout BaseRollout<import_from_stmt>aw_nas.utils data_parallel use_params<import_from_stmt>aw_nas.weights_manager.base CandidateNet<import_from_stmt>aw_nas.weights_manager.shared SharedNet SharedCell SharedOp<line_sep>__all__=["DiffSubCandidateNet" "DiffSuperNet"]<class_stmt>DiffSubCandidateNet(CandidateNet)<block_start><def_stmt>__init__ self super_net rollout:DifferentiableRollout gpus=tuple() virtual_parameter_only=<true> eval_no_grad=<true><block_start>super(DiffSubCandidateNet self).__init__(eval_no_grad=eval_no_grad)<line_sep>self.super_net=super_net<line_sep>self._device=super_net.device<line_sep>self.gpus=gpus<line_sep>self.arch=rollout.arch<line_sep>self.virtual_parameter_only=virtual_parameter_only<block_end><def_stmt>get_device self<block_start><return>self._device<block_end>@contextlib.contextmanager<def_stmt>begin_virtual self<block_start>w_clone={k:v.clone()<for>k,v self.named_parameters()}<if_stmt><not>self.virtual_parameter_only<block_start>buffer_clone={k:v.clone()<for>k,v self.named_buffers()}<block_end><yield><for_stmt>n,v self.named_parameters()<block_start>v.data.copy_(w_clone[n])<block_end><del_stmt>w_clone<if_stmt><not>self.virtual_parameter_only<block_start><for_stmt>n,v self.named_buffers()<block_start>v.data.copy_(buffer_clone[n])<block_end><del_stmt>buffer_clone<block_end><block_end><def_stmt>forward self inputs detach_arch=<true>#pylint: disable=arguments-differ <block_start><if_stmt>detach_arch<block_start>arch=[DartsArch(op_weights=op_weights.detach() edge_norms=edge_norms.detach()<if>edge_norms<is><not><none><else><none>)<for>op_weights,edge_norms self.arch]<block_end><else_stmt><block_start>arch=self.arch<block_end><if_stmt><not>self.gpus<or>len(self.gpus)<eq>1<block_start><return>self.super_net.forward(inputs arch detach_arch=detach_arch)<block_end><if_stmt>arch[0].op_weights.ndimension()<eq>2<block_start>arch=[DartsArch(op_weights=a.op_weights.repeat(len(self.gpus) 1) edge_norms=(a.edge_norms.repeat(len(self.gpus))<if>a.edge_norms<is><not><none><else><none>))<for>a arch]<block_end><else_stmt># Ugly fix for rollout_size > 1 # call scatter here and stack... # split along dimension 1, # then concatenate along dimension 0 for `data_parallel` to scatter it again <block_start>num_split=len(self.gpus)<line_sep>rollout_batch_size=arch[0].op_weights.shape[1]<assert_stmt>rollout_batch_size%num_split<eq>0<line_sep>split_size=rollout_batch_size<floordiv>num_split<line_sep># arch = [torch.cat(torch.split(a, split_size, dim=1), dim=0) for a in arch] # Note: edge_norms (1-dim) do not support batch_size, just repeat arch=[DartsArch(op_weights=torch.cat(torch.split(a.op_weights split_size dim=1) dim=0) edge_norms=(a.edge_norms.repeat(len(self.gpus))<if>a.edge_norms<is><not><none><else><none>))<for>a arch]<block_end><return>data_parallel(self.super_net (inputs arch) self.gpus module_kwargs={"detach_arch":detach_arch})<block_end><def_stmt>_forward_with_params self inputs params **kwargs#pylint: disable=arguments-differ <block_start><with_stmt>use_params(self.super_net params)<block_start><return>self.forward(inputs **kwargs)<block_end><block_end><def_stmt>named_parameters self *args **kwargs#pylint: disable=arguments-differ <block_start><return>self.super_net.named_parameters(*args **kwargs)<block_end><def_stmt>named_buffers self *args **kwargs#pylint: disable=arguments-differ <block_start><return>self.super_net.named_buffers(*args **kwargs)<block_end><def_stmt>eval_data self data criterions mode="eval" **kwargs#pylint: disable=arguments-differ <block_start>""" Override eval_data, to enable gradient. Returns: results (list of results return by criterions) """<line_sep>self._set_mode(mode)<line_sep>outputs=self.forward_data(data[0] **kwargs)<line_sep><return>utils.flatten_list([c(data[0] outputs data[1])<for>c criterions])<block_end><block_end><class_stmt>DiffSuperNet(SharedNet)<block_start>NAME="diff_supernet"<def_stmt>__init__ self search_space device rollout_type="differentiable" gpus=tuple() num_classes=10 init_channels=16 stem_multiplier=3 max_grad_norm=5.0 dropout_rate=0.1 use_stem="conv_bn_3x3" stem_stride=1 stem_affine=<true> preprocess_op_type=<none> cell_use_preprocess=<true> cell_use_shortcut=<false> cell_shortcut_op_type="skip_connect" cell_group_kwargs=<none> candidate_virtual_parameter_only=<false> candidate_eval_no_grad=<true><block_start>super(DiffSuperNet self).__init__(search_space device rollout_type cell_cls=DiffSharedCell op_cls=DiffSharedOp gpus=gpus num_classes=num_classes init_channels=init_channels stem_multiplier=stem_multiplier max_grad_norm=max_grad_norm dropout_rate=dropout_rate use_stem=use_stem stem_stride=stem_stride stem_affine=stem_affine preprocess_op_type=preprocess_op_type cell_use_preprocess=cell_use_preprocess cell_group_kwargs=cell_group_kwargs cell_use_shortcut=cell_use_shortcut cell_shortcut_op_type=cell_shortcut_op_type)<line_sep>self.candidate_virtual_parameter_only=candidate_virtual_parameter_only<line_sep>self.candidate_eval_no_grad=candidate_eval_no_grad<block_end># ---- APIs ---- <def_stmt>extract_features self inputs rollout_or_arch **kwargs<block_start><if_stmt>isinstance(rollout_or_arch BaseRollout)# from extract_features (wrapper wm) <block_start>arch=rollout_or_arch.arch<block_end><else_stmt># from candidate net <block_start>arch=rollout_or_arch<block_end><return>super().extract_features(inputs arch **kwargs)<block_end><def_stmt>assemble_candidate self rollout<block_start><return>DiffSubCandidateNet(self rollout gpus=self.gpus virtual_parameter_only=self.candidate_virtual_parameter_only eval_no_grad=self.candidate_eval_no_grad)<block_end>@classmethod<def_stmt>supported_rollout_types cls<block_start><return>[assert_rollout_type("differentiable")]<block_end><block_end><class_stmt>DiffSharedCell(SharedCell)<block_start><def_stmt>num_out_channel self<block_start><return>self.num_out_channels<times>self._steps<block_end><def_stmt>forward self inputs arch detach_arch=<true># pylint: disable=arguments-differ <block_start><assert_stmt>self._num_init<eq>len(inputs)<line_sep>states=[op(_input)<for>op,_input zip(self.preprocess_ops inputs)]<line_sep>offset=0<line_sep># in parallel forward, after scatter, a namedtuple will be come a normal tuple arch=DartsArch(*arch)<line_sep>use_edge_normalization=arch.edge_norms<is><not><none><for_stmt>i_step range(self._steps)<block_start>to_=i_step+self._num_init<if_stmt>use_edge_normalization<block_start>act_lst=[arch.edge_norms[offset+from_]<times># edge norm factor scalar on this edge self.edges[from_][to_](state arch.op_weights[offset+from_] # op weights vector on this edge detach_arch=detach_arch)<for>from_,state enumerate(states)]<block_end><else_stmt><block_start>act_lst=[self.edges[from_][to_](state arch.op_weights[offset+from_] detach_arch=detach_arch)<for>from_,state enumerate(states)]<block_end>new_state=sum(act_lst)<line_sep>offset<augadd>len(states)<line_sep>states.append(new_state)<block_end>out=torch.cat(states[-self._steps:] dim=1)<if_stmt>self.use_shortcut<and>self.layer_index<ne>0<block_start>out=out+self.shortcut_reduction_op(inputs[-1])<block_end><return>out<block_end><block_end><class_stmt>DiffSharedOp(SharedOp)<block_start><def_stmt>forward self x weights detach_arch=<true># pylint: disable=arguments-differ <block_start><if_stmt>weights.ndimension()<eq>2# weights: (batch_size, num_op) <block_start><if_stmt><not>weights.shape[0]<eq>x.shape[0]# every `x.shape[0] % weights.shape[0]` data use the same sampled arch weights <block_start><assert_stmt>x.shape[0]%weights.shape[0]<eq>0<line_sep>weights=weights.repeat(x.shape[0]<floordiv>weights.shape[0] 1)<block_end><return>sum([weights[: i].reshape(-1 1 1 1)<times>op(x)<for>i,op enumerate(self.p_ops)])<block_end>out_act:torch.Tensor=0.0<line_sep># weights: (num_op) <if_stmt>self.partial_channel_proportion<is><none><block_start><for_stmt>w,op zip(weights self.p_ops)<block_start><if_stmt>detach_arch<and>w.item()<eq>0<block_start><continue><block_end>act=op(x).detach_()<if>w.item()<eq>0<else>op(x)<line_sep>out_act<augadd>w<times>act<block_end><block_end><else_stmt><block_start>op_channels=x.shape[1]<floordiv>self.partial_channel_proportion<line_sep>x_1=x[: :op_channels : :]# these channels goes through op x_2=x[: op_channels: : :]# these channels skips op # apply pooling if the ops have stride=2 <if_stmt>self.stride<eq>2<block_start>x_2=F.max_pool2d(x_2 2 2)<block_end><for_stmt>w,op zip(weights self.p_ops)# if detach_arch and w.item() == 0: # continue # not really sure about this <block_start>act=op(x_1)<line_sep># if w.item() == 0: # act.detach_() # not really sure about this either out_act<augadd>w<times>act<block_end>out_act=torch.cat((out_act x_2) dim=1)<line_sep># PC-DARTS implements a deterministic channel_shuffle() (not what they said in the paper) # ref: https://github.com/yuhuixu1993/PC-DARTS/blob/b74702f86c70e330ce0db35762cfade9df026bb7/model_search.py#L9 out_act=self._channel_shuffle(out_act self.partial_channel_proportion)<line_sep># this is the random channel shuffle # channel_perm = torch.randperm(out_act.shape[1]) # out_act = out_act[:, channel_perm, :, :] <block_end><return>out_act<block_end>@staticmethod<def_stmt>_channel_shuffle x:torch.Tensor groups:int<block_start>"""channel shuffle for PC-DARTS"""<line_sep>n,c,h,w=x.shape<line_sep>x=x.view(n groups -1 h w).transpose(1 2).contiguous()<line_sep>x=x.view(n c h w).contiguous()<line_sep><return>x<block_end><block_end>
""" Salem package """<import_from_future_stmt> division<import_from_stmt>os path<import_from_stmt>os makedirs<import_stmt>sys<import_from_stmt>functools wraps<import_stmt>pyproj<try_stmt><block_start><import_from_stmt>.version version<as>__version__<block_end><except_stmt>ImportError# pragma: no cover <block_start><raise>ImportError('Salem is not properly installed. If you are running '<concat>'from the source directory, please instead create a '<concat>'new virtual environment (using conda or virtualenv) '<concat>'and then install it in-place by running: '<concat>'pip install -e .')<block_end><def_stmt>lazy_property fn<block_start>"""Decorator that makes a property lazy-evaluated."""<line_sep>attr_name='_lazy_'+fn.__name__<line_sep>@property@wraps(fn)<def_stmt>_lazy_property self<block_start><if_stmt><not>hasattr(self attr_name)<block_start>setattr(self attr_name fn(self))<block_end><return>getattr(self attr_name)<block_end><return>_lazy_property<block_end># Default proj wgs84=pyproj.Proj(proj='latlong' datum='WGS84')<line_sep># Path to the cache directory cache_dir=path.join(path.expanduser('~') '.salem_cache')<if_stmt><not>path.exists(cache_dir)<block_start>makedirs(cache_dir)<block_end>download_dir=path.join(cache_dir 'downloads')<if_stmt><not>path.exists(download_dir)<block_start>makedirs(download_dir)<block_end>sample_data_gh_commit='758f7ddd0fa6b5b1bd4c63b6dcfe8d5eec0f4c59'<line_sep>sample_data_dir=path.join(cache_dir 'salem-sample-data-'+sample_data_gh_commit)<line_sep># python version python_version='py3'<if_stmt>sys.version_info.major<eq>2<block_start>python_version='py2'<block_end># API <import_from_stmt>salem.gis *<import_from_stmt>salem.datasets *<import_from_stmt>salem.sio read_shapefile read_shapefile_to_grid grid_from_dataset<import_from_stmt>salem.sio open_xr_dataset open_metum_dataset open_wrf_dataset open_mf_wrf_dataset <import_from_stmt>salem.sio DataArrayAccessor DatasetAccessor<import_from_stmt>salem.utils get_demo_file reduce<try_stmt><block_start><import_from_stmt>salem.graphics get_cmap DataLevels Map<block_end><except_stmt>ImportError<as>err<block_start><if_stmt>'matplotlib'<not><in>str(err)<block_start><raise><block_end><def_stmt>get_cmap <block_start><raise>ImportError('requires matplotlib')<block_end><def_stmt>DataLevels <block_start><raise>ImportError('requires matplotlib')<block_end><def_stmt>Map <block_start><raise>ImportError('requires matplotlib')<block_end><block_end><import_from_stmt>salem.wrftools geogrid_simulator<line_sep>
<import_stmt>requests<line_sep>message=raw_input('Enter a Message: ')<line_sep>number=raw_input('Enter the phone number: ')<line_sep>payload={'number':number 'message':message}<line_sep>r=requests.post("http://textbelt.com/text" data=payload)<if_stmt>r.json()['success']<block_start>print('Success!')<block_end><else_stmt><block_start>print('Error!')<block_end>
# SPDX-License-Identifier: Apache-2.0 <import_stmt>tensorflow<as>tf<import_from_stmt>..proto keras is_tf_keras is_keras_older_than<import_from_stmt>..proto.tfcompat is_tf2<line_sep>_layer=keras.layers<line_sep>_adv_activations=keras.layers.advanced_activations<def_stmt>_default_layer_name_extractor fstr_list node_name<block_start><for_stmt>fstr fstr_list<block_start>idx=fstr.rfind('{}/')<if_stmt>node_name.endswith(fstr[idx+3:])<block_start>klen=len(fstr)+idx-2# 2 = len('{}') <return>node_name[:len(node_name)-klen]<block_end><block_end><return><none><block_end><def_stmt>_simple_layer_name_extractor fstr_list node_name<block_start>ri=node_name.rindex('/')<line_sep><return>node_name[:ri]<block_end><def_stmt>_conv_layer_spec_outputs layer node<block_start><if_stmt>type(layer)<eq>_layer.Conv1D<block_start><return>node.name+'/Squeeze'<block_end>activation_map={keras.activations.linear:'' tf.nn.sigmoid:'Sigmoid' tf.nn.softmax:'Softmax' tf.nn.relu:'Relu' tf.nn.elu:'Elu' tf.nn.tanh:'Tanh' tf.nn.swish:'mul'}<line_sep>node_act=activation_map.get(layer.activation <none>)<if_stmt>node_act<is><none><block_start>actname_map={a_.__name__:a_<for>a_ activation_map<if>hasattr(a_ "__name__")}<line_sep>act_trans=actname_map.get(layer.activation.__name__ <none>)<if_stmt>act_trans<is><not><none><block_start>node_act=activation_map.get(act_trans)<block_end><block_end><assert_stmt>node_act<is><not><none> "Unsupported activation in the layer({})".format(layer.activation)<if_stmt>node_act<block_start>ri=node.name.rindex('/')<line_sep><return>node.name[:ri+1]+node_act<block_end><else_stmt><block_start><if_stmt><not>layer.use_bias<block_start><if_stmt>node.inputs[0].op.type<eq>'SpaceToBatchND'<block_start><return>node.name+'/BatchToSpaceND'<block_end><else_stmt><block_start><return>node.name<block_end><block_end><else_stmt><block_start>ri=node.name.rindex('/')<line_sep><return>node.name[:ri+1]+'BiasAdd'<block_end><block_end><block_end><def_stmt>_relu_like_spec_outputs layer node<block_start><if_stmt>isinstance(layer _adv_activations.PReLU)<block_start>ri=node.name.rindex('/')<line_sep><return>node.name[:ri+1]+'add'<block_end><return>node.name<block_end>_keras_layer_spec={# layer-type: ([pattern-list], [extract-layer-name, output-name-generator(optional)] _layer.AveragePooling1D:(["{}/AvgPool"] [_default_layer_name_extractor]) _layer.AveragePooling2D:(["{}/AvgPool"] [_default_layer_name_extractor]) _layer.AveragePooling3D:(["{}/AvgPool"] [_default_layer_name_extractor]) _layer.MaxPooling1D:(["{}/MaxPool"] [_default_layer_name_extractor]) _layer.MaxPooling2D:(["{}/MaxPool"] [_default_layer_name_extractor]) _layer.MaxPooling3D:(["{}/MaxPool"] [_default_layer_name_extractor]) _layer.Conv1D:(["{}/conv1d"] [_simple_layer_name_extractor _conv_layer_spec_outputs]) _layer.Conv2D:(["{}/Conv2D"] [_simple_layer_name_extractor _conv_layer_spec_outputs]) _layer.Conv2DTranspose:(["{}/conv2d_transpose"] [_simple_layer_name_extractor _conv_layer_spec_outputs]) _layer.DepthwiseConv2D:(["{}/depthwise"] [_simple_layer_name_extractor _conv_layer_spec_outputs]) _layer.LeakyReLU:(["{}/LeakyRelu"] [_default_layer_name_extractor]) _adv_activations.PReLU:(["{}/Relu"] [_simple_layer_name_extractor _relu_like_spec_outputs]) _layer.Reshape:(["{}/Reshape"] [_default_layer_name_extractor])}<if_stmt><not>is_keras_older_than('2.2.0')<block_start>_keras_layer_spec.update({_adv_activations.ReLU:(["{}/Relu"] [_simple_layer_name_extractor _relu_like_spec_outputs]) })<block_end><if_stmt>is_tf_keras<and>is_tf2<and>hasattr(_layer 'normalization_v2')<block_start>_keras_layer_spec.update({_layer.normalization_v2.BatchNormalization:(["{}/FusedBatchNormV3" "{}/batchnorm/add_1"] [_default_layer_name_extractor])})<block_end><def_stmt>keras_layer_spec layer_type<block_start><return>_keras_layer_spec.get(layer_type (<none> []))<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """move_pivot_table_v2_legacy_order_by_to_timeseries_limit_metric Revision ID: <KEY> Revises: fe23025b9441 Create Date: 2021-12-17 16:56:55.186285 """<line_sep># revision identifiers, used by Alembic. revision="<KEY>"<line_sep>down_revision="fe23025b9441"<import_stmt>json<import_stmt>logging<import_from_stmt>alembic op<import_from_stmt>sqlalchemy Column Integer String Text<import_from_stmt>sqlalchemy.ext.declarative declarative_base<import_from_stmt>superset db<line_sep>Base=declarative_base()<line_sep>logger=logging.getLogger("alembic")<class_stmt>Slice(Base)<block_start>__tablename__="slices"<line_sep>id=Column(Integer primary_key=<true>)<line_sep>params=Column(Text)<line_sep>viz_type=Column(String(250))<block_end><def_stmt>upgrade <block_start>bind=op.get_bind()<line_sep>session=db.Session(bind=bind)<line_sep>slices=session.query(Slice).filter(Slice.viz_type<eq>"pivot_table_v2").all()<for_stmt>slc slices<block_start><try_stmt><block_start>params=json.loads(slc.params)<line_sep>legacy_order_by=params.pop("legacy_order_by" <none>)<if_stmt>legacy_order_by<block_start>params["series_limit_metric"]=legacy_order_by<block_end>slc.params=json.dumps(params sort_keys=<true>)<block_end><except_stmt>Exception<as>e<block_start>logger.exception(f"An error occurred: parsing params for slice {slc.id} failed."<concat>f"You need to fix it before upgrading your DB.")<line_sep><raise>e<block_end><block_end>session.commit()<line_sep>session.close()<block_end><def_stmt>downgrade <block_start>bind=op.get_bind()<line_sep>session=db.Session(bind=bind)<line_sep>slices=session.query(Slice).filter(Slice.viz_type<eq>"pivot_table_v2").all()<for_stmt>slc slices<block_start><try_stmt><block_start>params=json.loads(slc.params)<line_sep>series_limit_metric=params.pop("series_limit_metric" <none>)<if_stmt>series_limit_metric<block_start>params["legacy_order_by"]=series_limit_metric<block_end>slc.params=json.dumps(params sort_keys=<true>)<block_end><except_stmt>Exception<as>e<block_start>logger.exception(f"An error occurred: parsing params for slice {slc.id} failed. "<concat>"You need to fix it before downgrading your DB.")<line_sep><raise>e<block_end><block_end>session.commit()<line_sep>session.close()<block_end>
<import_from_stmt>flask_restplus Namespace Resource fields<import_from_stmt>flask_login current_user<line_sep>api=Namespace("checkout" description="Checkout related operations")<line_sep>cart=api.model("CartLine" {"id":fields.Integer(required=<true> description="The checkout cartline id") "quantity":fields.Integer(required=<true> description="The cart item num") "title":fields.String(description="The cart item title" attribute="variant.product.title") "variant":fields.String(description="The cart item variant" attribute="variant.title") "product_id":fields.Integer(description="The cart item product" attribute="variant.product.id") "price":fields.Float(description="The cart item price" attribute="variant.price") "first_img":fields.String(description="The cart item image" attribute="variant.product.first_img") } )<line_sep>@api.route("/cart")<class_stmt>CartIndex(Resource)<block_start>@api.doc("list_products")@api.marshal_list_with(cart)<def_stmt>get self<block_start>"""List current user cart items"""<line_sep>cartitems=current_user.cart.lines<line_sep><return>cartitems<block_end><block_end>
""" More or less uniformly run something as a new daemon thread or process. """<import_stmt>multiprocessing threading queue<def_stmt>_run_locally input output function args **kwds<block_start>function(input output *args **kwds)<block_end><def_stmt>run function *args use_subprocess=<false> daemon=<true> **kwds<block_start>""" Create input, output queues, call `function` in a subprocess or a thread. ``function`` is called like this: ``function(input, output, *args, **kwds)`` :param use_subprocess: if true, create a new multiprocess; if false, create a new thread :param function: the function to call :param daemon: is the thread or subprocess run as a daemon or not? :param args: positional arguments to the function :param kwds: keyword arguments to the function :returns: a tuple with three elements: the subprocess or thread, an input queue, and an output queue. """<if_stmt>use_subprocess<block_start>Creator,Queue=multiprocessing.Process multiprocessing.Queue<block_end><else_stmt><block_start>Creator,Queue=threading.Thread queue.Queue<block_end>input,output=Queue() Queue()<line_sep>args=input output function args<line_sep>sub=Creator(target=_run_locally args=args kwargs=kwds daemon=daemon)<line_sep>sub.start()<line_sep><return>sub input output<block_end>
#importing Required Modules <import_stmt>qrcode<line_sep>#QR Code Generator query=input("Enter Content: ")#Enter Content code=qrcode.make(str(query))#Making the QR code code.save("qrcode.png")#Saving the QR code file
# # Copyright (c) 2016 <NAME> # All rights reserved. # # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) <import_from_stmt>. ui<def_stmt>add_options vars<block_start>ui.add_option('--python' help='the python executable')<block_end><def_stmt>check context<block_start>python_source_file=r""" // If defined, enforces linking againg PythonXXd.lib, which // is usually not included in Python environments. #undef _DEBUG #include "Python.h" int main() { Py_Initialize(); Py_Finalize(); return 0; } """<import_stmt>platform<import_stmt>subprocess<import_stmt>re os<def_stmt>check_python cmd<block_start><return>subprocess.check_output([python '-c' cmd]).strip()<block_end><def_stmt>check_sysconfig cmd<block_start>r=check_python('import distutils.sysconfig as c; print(c.%s)'%cmd)<line_sep><return>r<if>r<ne>'None'<else>''<block_end>context.Message('Checking for Python...')<line_sep>python=context.env.GetOption('python')<or>'python'<line_sep>context.env['PYTHON']=python<line_sep>incpath=check_sysconfig('get_python_inc()')<line_sep>context.env.AppendUnique(CPPPATH=[incpath])<if_stmt>platform.system()<eq>'Windows'<block_start>version=check_python('import sys; print("%d%d"%sys.version_info[0:2])')<line_sep>prefix=check_python('import sys; print(sys.prefix)')<line_sep>libfile=os.path.join(prefix 'libs' 'python%s.lib'%version)<line_sep>libpath=os.path.join(prefix 'libs')<line_sep>lib='python%s'%version<line_sep>context.env.AppendUnique(LIBS=[lib])<block_end><else_stmt><block_start>libpath=check_sysconfig('get_config_var("LIBDIR")')<line_sep>libfile=check_sysconfig('get_config_var("LIBRARY")')<line_sep>match=re.search('(python.*)\.(a|so|dylib)' libfile)<line_sep>lib=<none><if_stmt>match<block_start>lib=match.group(1)<line_sep>context.env.AppendUnique(PYTHONLIBS=[lib])<if_stmt>match.group(2)<eq>'a'<block_start>flags=check_sysconfig('get_config_var("LINKFORSHARED")')<if_stmt>flags<is><not><none><block_start>context.env.AppendUnique(LINKFLAGS=flags.split())<block_end><block_end><block_end><block_end>context.env.AppendUnique(LIBPATH=[libpath])<line_sep>oldlibs=context.AppendLIBS([lib])<line_sep>flags=check_sysconfig('get_config_var("MODLIBS")')<line_sep>flags<augadd>' '+check_sysconfig('get_config_var("SHLIBS")')<line_sep>flags=[f[2:]<for>f flags.strip().split()<if>f.startswith('-l')]<if_stmt>flags<block_start>context.AppendLIBS([flags])<block_end>result=context.TryLink(python_source_file '.cpp')<if_stmt><not>result<and>context.env['PLATFORM']<eq>'darwin'# Sometimes we need some extra stuff on Mac OS <block_start>frameworkDir=libpath# search up the libDir tree for the proper home for frameworks <while_stmt>frameworkDir<and>frameworkDir<ne>"/"<block_start>frameworkDir,d2=os.path.split(frameworkDir)<if_stmt>d2<eq>"Python.framework"<block_start><if_stmt><not>"Python"<in>os.listdir(os.path.join(frameworkDir d2))<block_start>context.Result(0)<line_sep>print(("Expected to find Python in framework directory %s, but it isn't there"%frameworkDir))<line_sep><return><false><block_end><break><block_end><block_end>context.env.AppendUnique(LINKFLAGS="-F%s"%frameworkDir)<line_sep>result=context.TryLink(python_source_file '.cpp')<block_end><if_stmt><not>result<block_start>context.Result(0)<line_sep>print("Cannot link program with Python.")<line_sep><return><false><block_end><if_stmt>context.env['PLATFORM']<eq>'darwin'<block_start>context.env['LDMODULESUFFIX']='.so'<block_end>context.Result(1)<line_sep>context.SetLIBS(oldlibs)<line_sep>context.env.AppendUnique(PYTHONLIBS=[lib]+flags)<line_sep><return><true><block_end>
""" Copyright (c) Jupyter Development Team. Distributed under the terms of the Modified BSD License. """<import_stmt>os<import_stmt>json<import_stmt>os.path<as>osp<import_from_stmt>jupyter_server.base.handlers JupyterHandler FileFindHandler<import_from_stmt>jupyter_server.extension.handler ExtensionHandlerMixin ExtensionHandlerJinjaMixin<import_from_stmt>jupyterlab_server LabServerApp LabConfig<import_from_stmt>jupyter_server.utils url_path_join<as>ujoin<import_from_stmt>traitlets Unicode<line_sep>HERE=osp.dirname(__file__)<with_stmt>open(os.path.join(HERE 'package.json'))<as>fid<block_start>version=json.load(fid)['version']<block_end><def_stmt>_jupyter_server_extension_points <block_start><return>[{'module':__name__ 'app':ExampleApp}]<block_end><class_stmt>ExampleHandler(ExtensionHandlerJinjaMixin ExtensionHandlerMixin JupyterHandler)<block_start>"""Handle requests between the main app page and notebook server."""<def_stmt>get self<block_start>"""Get the main page for the application's interface."""<line_sep>config_data={# Use camelCase here, since that's what the lab components expect "appVersion":version 'baseUrl':self.base_url 'token':self.settings['token'] 'fullStaticUrl':ujoin(self.base_url 'static' self.name) 'frontendUrl':ujoin(self.base_url 'example/')}<line_sep><return>self.write(self.render_template('index.html' static=self.static_url base_url=self.base_url token=self.settings['token'] page_config=config_data))<block_end><block_end><class_stmt>ExampleApp(LabServerApp)<block_start>extension_url='/example'<line_sep>default_url='/example'<line_sep>app_url="/example"<line_sep>name=__name__<line_sep>load_other_extensions=<false><line_sep>app_name='JupyterLab Example Service'<line_sep>app_settings_dir=os.path.join(HERE 'build' 'application_settings')<line_sep>app_version=version<line_sep>schemas_dir=os.path.join(HERE 'build' 'schemas')<line_sep>static_dir=os.path.join(HERE 'build')<line_sep>templates_dir=os.path.join(HERE 'templates')<line_sep>themes_dir=os.path.join(HERE 'build' 'themes')<line_sep>user_settings_dir=os.path.join(HERE 'build' 'user_settings')<line_sep>workspaces_dir=os.path.join(HERE 'build' 'workspaces')<def_stmt>initialize_handlers self<block_start>"""Add example handler to Lab Server's handler list. """<line_sep>self.handlers.append(('/example' ExampleHandler))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>ExampleApp.launch_instance()<block_end>
# Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ tests of the Parameters class """<line_sep># pylint: disable=missing-function-docstring,missing-class-docstring,abstract-method,protected-access <import_stmt>unittest<import_stmt>traceback<import_stmt>armi<import_from_stmt>armi.reactor parameters<import_from_stmt>armi.reactor composites<class_stmt>MockComposite<block_start><def_stmt>__init__ self name<block_start>self.name=name<line_sep>self.p={}<block_end><block_end><class_stmt>MockCompositeGrandParent(MockComposite)<block_start><pass><block_end><class_stmt>MockCompositeParent(MockCompositeGrandParent)<block_start><pass><block_end><class_stmt>MockCompositeChild(MockCompositeParent)<block_start><pass><block_end><class_stmt>ParameterTests(unittest.TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.defs=parameters.ALL_DEFINITIONS._paramDefs<block_end>@classmethod<def_stmt>tearDownClass cls<block_start>parameters.ALL_DEFINITIONS._paramDefs=cls.defs<block_end><def_stmt>setUp self<block_start>parameters.ALL_DEFINITIONS._paramDefs=[]<block_end><def_stmt>test_mutableDefaultsNotSupported self<block_start><class_stmt>Mock(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start><with_stmt>self.assertRaises(AssertionError)<block_start>pb.defParam("units" "description" "location" default=[])<block_end><with_stmt>self.assertRaises(AssertionError)<block_start>pb.defParam("units" "description" "location" default={})<block_end><block_end><with_stmt>self.assertRaises(AssertionError)<block_start>fail=pDefs.createBuilder(default=[])<block_end><with_stmt>self.assertRaises(AssertionError)<block_start>fail=pDefs.createBuilder(default={})<block_end><block_end><block_end><def_stmt>test_paramPropertyDoesNotConflict self<block_start><class_stmt>Mock(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("doodle" "units" "description" "location" default=42)<block_end><with_stmt>pDefs.createBuilder(MockComposite default=0.0)<as>pb<block_start>pb.defParam("cheese" "kg" "pressed curds of milk" "avg")<line_sep>pb.defParam("fudge" "kg" "saturated chocolate" "avg" default=19)<line_sep>pb.defParam("noodles" "kg" "strip, ring, or tube of pasta" "avg" default=<none> )<block_end><block_end>mock1=Mock()<line_sep>mock2=Mock()<line_sep>self.assertEqual(42 mock1.doodle)<line_sep>self.assertEqual(42 mock2.doodle)<line_sep>self.assertEqual(0.0 mock1.cheese)# make sure factory default is applied self.assertEqual(19 mock2.fudge)<line_sep># make sure we can override the factory default self.assertEqual(<none> mock2.noodles)<line_sep># make sure we can override the factory default mock1.doodle=17<line_sep>self.assertEqual(17 mock1.doodle)<line_sep>self.assertEqual(42 mock2.doodle)<block_end><def_stmt>test_paramPropertyDoesNotConflictWithNoneDefault self<block_start><class_stmt>Mock(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("noneDefault" "units" "description" "location" default=<none>)<block_end><block_end>mock1=Mock()<line_sep>mock2=Mock()<line_sep>self.assertIsNone(mock1.noneDefault)<line_sep>self.assertIsNone(mock2.noneDefault)<line_sep>mock1.noneDefault=1.234<line_sep>self.assertEqual(1.234 mock1.noneDefault)<line_sep>self.assertEqual(<none> mock2.noneDefault)<block_end><def_stmt>test_getWithoutDefaultRaisesParameterError self<block_start><class_stmt>Mock(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("noDefault" "units" "description" "location")<block_end><block_end>mock=Mock()<with_stmt>self.assertRaises(parameters.ParameterError)<block_start>print(mock.noDefault)<block_end><block_end><def_stmt>test_attemptingToSetParamWithoutSetterFails self<block_start><class_stmt>Mock(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("noSetter" "noSetter" "units" "description" "location" default="encapsulated" setter=<none> )<block_end><block_end>mock=Mock()<line_sep>self.assertEqual("encapsulated" mock.noSetter)<with_stmt>self.assertRaises(parameters.ParameterError)<block_start>mock.noSetter=<false><block_end>self.assertEqual("encapsulated" mock.noSetter)<block_end><def_stmt>test_setter self<block_start><class_stmt>Mock(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start><def_stmt>n self value<block_start>self._p_n=value<line_sep>self._p_nPlus1=value+1<block_end>pb.defParam("n" "units" "description" "location" setter=n)<def_stmt>nPlus1 self value<block_start>self._p_nPlus1=value<line_sep>self._p_n=value-1<block_end>pb.defParam("nPlus1" "units" "description" "location" setter=nPlus1)<block_end><block_end>mock=Mock()<line_sep>self.assertTrue(all(pd.assigned<eq>parameters.NEVER<for>pd mock.paramDefs<if>pd.name<ne>"serialNum"))<with_stmt>self.assertRaises(parameters.ParameterError)<block_start>print(mock.n)<block_end><with_stmt>self.assertRaises(parameters.ParameterError)<block_start>print(mock.nPlus1)<block_end>mock.n=15<line_sep>self.assertEqual(15 mock.n)<line_sep>self.assertEqual(16 mock.nPlus1)<line_sep>mock.nPlus1=22<line_sep>self.assertEqual(21 mock.n)<line_sep>self.assertEqual(22 mock.nPlus1)<line_sep>self.assertTrue(all(pd.assigned<for>pd mock.paramDefs))<block_end><def_stmt>test_cannotDefineParameterWithSameName self<block_start><with_stmt>self.assertRaises(parameters.ParameterDefinitionError)<block_start><class_stmt>MockParamCollection(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("sameName" "units" "description 1" "location")<line_sep>pb.defParam("sameName" "units" "description 2" "location")<block_end><block_end>_=MockParamCollection()<block_end><block_end><def_stmt>test_paramDefinitionsCompose self<block_start><class_stmt>MockBaseParamCollection(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("base1" "units" "a param on the base collection" "avg")<line_sep>pb.defParam("base2" "units" "another param on the base collection" "avg")<block_end><block_end><class_stmt>MockDerivedACollection(MockBaseParamCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("derAp1" "units" "derived a p 1" "centroid")<line_sep>pb.defParam("derAp2" "units" "derived a p 2" "centroid")<block_end><block_end><class_stmt>MockDerivedBCollection(MockDerivedACollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("derBp" "units" "derived b param" "centroid")<block_end><block_end>base=MockBaseParamCollection()<line_sep>derA=MockDerivedACollection()<line_sep>derB=MockDerivedBCollection()<line_sep>self.assertTrue(set(base.paramDefs._paramDefs).issubset(set(derA.paramDefs._paramDefs)))<line_sep>self.assertTrue(set(base.paramDefs._paramDefs).issubset(set(derB.paramDefs._paramDefs)))<line_sep>self.assertTrue(set(derA.paramDefs._paramDefs).issubset(set(derB.paramDefs._paramDefs)))<block_end><def_stmt>test_cannotDefineParameterWithSameNameForCollectionSubclass self<block_start><class_stmt>MockPCParent(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("sameName" "units" "description 3" "location")<block_end><block_end><with_stmt>self.assertRaises(parameters.ParameterDefinitionError)<block_start><class_stmt>MockPCChild(MockPCParent)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("sameName" "units" "description 4" "location")<block_end><block_end>_=MockPCChild()<block_end># same name along a different branch from the base ParameterCollection should be fine <class_stmt>MockPCUncle(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("sameName" "units" "description 5" "location")<block_end><block_end><block_end><def_stmt>test_cannotCreateAttrbuteOnParameterCollectionSubclass self<block_start><class_stmt>MockPC(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("someParam" "units" "description" "location")<block_end><block_end>_=MockPC()<block_end><def_stmt>test_cannotCreateInstanceOf_NoDefault self<block_start><with_stmt>self.assertRaises(NotImplementedError)<block_start>_=parameters.NoDefault()<block_end><block_end><def_stmt>test_cannotCreateInstanceOf_Undefined self<block_start><with_stmt>self.assertRaises(NotImplementedError)<block_start>_=parameters.parameterDefinitions._Undefined()<block_end><block_end><def_stmt>test_defaultLocation self<block_start><class_stmt>MockPC(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder(location=parameters.ParamLocation.AVERAGE)<as>pb<block_start>pb.defParam("p1" "units" "p1 description")<line_sep>pb.defParam("p2" "units" "p2 description" parameters.ParamLocation.TOP)<block_end><block_end>pc=MockPC()<line_sep>self.assertEqual(pc.paramDefs["p1"].location parameters.ParamLocation.AVERAGE)<line_sep>self.assertEqual(pc.paramDefs["p2"].location parameters.ParamLocation.TOP)<block_end><def_stmt>test_categories self<block_start><class_stmt>MockPC0(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("p0" "units" "p0 description" "location")<block_end><block_end>pc=MockPC0()<line_sep>self.assertEqual(pc.paramDefs.categories set())<class_stmt>MockPC(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder(categories=["awesome" "stuff"])<as>pb<block_start>pb.defParam("p1" "units" "p1 description" "location")<line_sep>pb.defParam("p2" "units" "p2 description" "location" categories=["bacon"])<block_end><with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("p3" "units" "p3 description" "location" categories=["bacon"])<block_end><block_end>pc=MockPC()<line_sep>self.assertEqual(pc.paramDefs.categories set(["awesome" "stuff" "bacon"]))<line_sep>p1=pc.paramDefs["p1"]<line_sep>p2=pc.paramDefs["p2"]<line_sep>p3=pc.paramDefs["p3"]<line_sep>self.assertEqual(p1.categories set(["awesome" "stuff"]))<line_sep>self.assertEqual(p2.categories set(["awesome" "stuff" "bacon"]))<line_sep>self.assertEqual(p3.categories set(["bacon"]))<line_sep>self.assertEqual(set(pc.paramDefs.inCategory("awesome")) set([p1 p2]))<line_sep>self.assertEqual(set(pc.paramDefs.inCategory("stuff")) set([p1 p2]))<line_sep>self.assertEqual(set(pc.paramDefs.inCategory("bacon")) set([p2 p3]))<block_end><def_stmt>test_parameterCollectionsHave__slots__ self<block_start>"""Make sure something is implemented to prevent accidental creation of attributes"""<line_sep>self.assertEqual(set(["_hist" "_backup" "assigned" "_p_serialNum" "serialNum"]) set(parameters.ParameterCollection._slots) )<class_stmt>MockPC(parameters.ParameterCollection)<block_start><pass><block_end>pc=MockPC()<line_sep># No longer protecting against __dict__ access. If someone REALLY wants to # staple something to a parameter collection with no guarantees of anything, # that's on them # with self.assertRaises(AttributeError): # pc.__dict__["foo"] = 5 <with_stmt>self.assertRaises(AssertionError)<block_start>pc.whatever=22<block_end># try again after using a ParameterBuilder <class_stmt>MockPC(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<line_sep># use of the ParameterBuilder creates an empty __slots__ <with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("p0" "units" "p0 description" "location")<block_end><block_end>pc=MockPC()<line_sep>self.assertTrue("_p_p0"<in>MockPC._slots)<line_sep># Make sure we aren't making any weird copies of anything self.assertTrue(pc._slots<is>MockPC._slots)<with_stmt>self.assertRaises(AssertionError)<block_start>pc.whatever=33<block_end>self.assertEqual(["serialNum"] pc.keys())<line_sep>pc.p0="hi"<line_sep>self.assertEqual({"p0" "serialNum"} set(pc.keys()))<line_sep># Also make sure that subclasses of ParameterCollection subclasses use __slots__ <class_stmt>MockPCChild(MockPC)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder()<as>pb<block_start>pb.defParam("p2" "foo" "bar")<block_end><block_end>pcc=MockPCChild()<with_stmt>self.assertRaises(AssertionError)<block_start>pcc.whatever=33<block_end><block_end><block_end><class_stmt>MockSyncPC(parameters.ParameterCollection)<block_start>pDefs=parameters.ParameterDefinitionCollection()<with_stmt>pDefs.createBuilder(default=0.0 location=parameters.ParamLocation.AVERAGE)<as>pb<block_start>pb.defParam("param1" "units" "p1 description" categories=["cat1"])<line_sep>pb.defParam("param2" "units" "p2 description" categories=["cat2"])<line_sep>pb.defParam("param3" "units" "p3 description" categories=["cat3"])<block_end><block_end><def_stmt>makeComp name<block_start>c=composites.Composite(name)<line_sep>c.p=MockSyncPC()<line_sep><return>c<block_end><class_stmt>SynchronizationTests<block_start>"""Some unit tests that must be run with mpirun instead of the standard unittest system."""<def_stmt>setUp self<block_start>self.r=makeComp("reactor")<line_sep>self.r.core=makeComp("core")<line_sep>self.r.add(self.r.core)<for_stmt>ai range(armi.MPI_SIZE<times>4)<block_start>a=makeComp("assembly{}".format(ai))<line_sep>self.r.core.add(a)<for_stmt>bi range(10)<block_start>a.add(makeComp("block{}-{}".format(ai bi)))<block_end><block_end>self.comps=[self.r.core]+self.r.core.getChildren(deep=<true>)<for_stmt>pd MockSyncPC().paramDefs<block_start>pd.assigned=parameters.NEVER<block_end><block_end><def_stmt>tearDown self<block_start><del_stmt>self.r<block_end><def_stmt>run self testNamePrefix="mpitest_"<block_start><with_stmt>open("mpitest{}.temp".format(armi.MPI_RANK) "w")<as>self.l<block_start><for_stmt>methodName sorted(dir(self))<block_start><if_stmt>methodName.startswith(testNamePrefix)<block_start>self.write("{}.{}".format(self.__class__.__name__ methodName))<try_stmt><block_start>self.setUp()<line_sep>getattr(self methodName)()<block_end><except_stmt>Exception<block_start>self.write("failed, big time")<line_sep>traceback.print_exc(file=self.l)<line_sep>self.write("*** printed exception")<try_stmt><block_start>self.tearDown()<block_end><except_stmt><block_start><pass><block_end><block_end><block_end><block_end>self.l.write("done.")<block_end><block_end><def_stmt>write self msg<block_start>self.l.write("{}\n".format(msg))<line_sep>self.l.flush()<block_end><def_stmt>assertRaises self exceptionType<block_start><class_stmt>ExceptionCatcher<block_start><def_stmt>__enter__ self<block_start><pass><block_end><def_stmt>__exit__ self exc_type exc_value traceback<block_start><if_stmt>exc_type<is>exceptionType<block_start><return><true><block_end><raise>AssertionError("Expected {}, but got {}".format(exceptionType exc_type))<block_end><block_end><return>ExceptionCatcher()<block_end><def_stmt>assertEqual self expected actual<block_start><if_stmt>expected<ne>actual<block_start><raise>AssertionError("(expected) {} != {} (actual)".format(expected actual))<block_end><block_end><def_stmt>assertNotEqual self expected actual<block_start><if_stmt>expected<eq>actual<block_start><raise>AssertionError("(expected) {} == {} (actual)".format(expected actual))<block_end><block_end><def_stmt>mpitest_noConflicts self<block_start><for_stmt>ci,comp enumerate(self.comps)<block_start><if_stmt>ci%armi.MPI_SIZE<eq>armi.MPI_RANK<block_start>comp.p.param1=(armi.MPI_RANK+1)<times>30.0<block_end><else_stmt><block_start>self.assertNotEqual((armi.MPI_RANK+1)<times>30.0 comp.p.param1)<block_end><block_end># numUpdates = len(self.comps) // armi.MPI_SIZE + (len(self.comps) % armi.MPI_SIZE > armi.MPI_RANK) self.assertEqual(len(self.comps) self.r.syncMpiState())<for_stmt>ci,comp enumerate(self.comps)<block_start>self.assertEqual((ci%armi.MPI_SIZE+1)<times>30.0 comp.p.param1)<block_end><block_end><def_stmt>mpitest_noConflicts_setByString self<block_start>"""Make sure params set by string also work with sync."""<for_stmt>ci,comp enumerate(self.comps)<block_start><if_stmt>ci%armi.MPI_SIZE<eq>armi.MPI_RANK<block_start>comp.p.param2=(armi.MPI_RANK+1)<times>30.0<block_end><else_stmt><block_start>self.assertNotEqual((armi.MPI_RANK+1)<times>30.0 comp.p.param2)<block_end><block_end># numUpdates = len(self.comps) // armi.MPI_SIZE + (len(self.comps) % armi.MPI_SIZE > armi.MPI_RANK) self.assertEqual(len(self.comps) self.r.syncMpiState())<for_stmt>ci,comp enumerate(self.comps)<block_start>self.assertEqual((ci%armi.MPI_SIZE+1)<times>30.0 comp.p.param2)<block_end><block_end><def_stmt>mpitest_withConflicts self<block_start>self.r.core.p.param1=(armi.MPI_RANK+1)<times>99.0<with_stmt>self.assertRaises(ValueError)<block_start>self.r.syncMpiState()<block_end><block_end><def_stmt>mpitest_withConflictsButSameValue self<block_start>self.r.core.p.param1=(armi.MPI_SIZE+1)<times>99.0<line_sep>self.r.syncMpiState()<line_sep>self.assertEqual((armi.MPI_SIZE+1)<times>99.0 self.r.core.p.param1)<block_end><def_stmt>mpitest_noConflictsMaintainWithStateRetainer self<block_start>assigned=[]<with_stmt>self.r.retainState(parameters.inCategory("cat1"))<block_start><for_stmt>ci,comp enumerate(self.comps)<block_start>comp.p.param2=99<times>ci<if_stmt>ci%armi.MPI_SIZE<eq>armi.MPI_RANK<block_start>comp.p.param1=(armi.MPI_RANK+1)<times>30.0<line_sep>assigned.append(parameters.SINCE_ANYTHING)<block_end><else_stmt><block_start>self.assertNotEqual((armi.MPI_RANK+1)<times>30.0 comp.p.param1)<line_sep>assigned.append(parameters.NEVER)<block_end><block_end># 1st inside state retainer self.assertEqual(<true> all(c.p.assigned<eq>parameters.SINCE_ANYTHING<for>c self.comps))<block_end># confirm outside state retainer self.assertEqual(assigned [c.p.assigned<for>ci,c enumerate(self.comps)])<line_sep># this rank's "assigned" components are not assigned on the workers, and so will be updated self.assertEqual(len(self.comps) self.r.syncMpiState())<for_stmt>ci,comp enumerate(self.comps)<block_start>self.assertEqual((ci%armi.MPI_SIZE+1)<times>30.0 comp.p.param1)<block_end><block_end><def_stmt>mpitest_conflictsMaintainWithStateRetainer self<block_start><with_stmt>self.r.retainState(parameters.inCategory("cat2"))<block_start><for_stmt>_,comp enumerate(self.comps)<block_start>comp.p.param2=99<times>armi.MPI_RANK<block_end><block_end><with_stmt>self.assertRaises(ValueError)<block_start>self.r.syncMpiState()<block_end><block_end><def_stmt>mpitest_rxCoeffsProcess self<block_start>"""This test mimics the process for rxCoeffs when doing distributed doppler"""<def_stmt>do # we will do this over 4 passes (there are 4 * MPI_SIZE assemblies) <block_start><for_stmt>passNum range(4)<block_start><with_stmt>self.r.retainState(parameters.inCategory("cat2"))<block_start>self.r.p.param3="hi"<for_stmt>c self.comps<block_start>c.p.param1=(99<times>armi.MPI_RANK)<line_sep># this will get reset after state retainer <block_end>a=self.r.core[passNum<times>armi.MPI_SIZE+armi.MPI_RANK]<line_sep>a.p.param2=armi.MPI_RANK<times>20.0<for_stmt>b a<block_start>b.p.param2=armi.MPI_RANK<times>10.0<block_end><for_stmt>ai,a2 enumerate(self.r)<block_start><if_stmt>ai%armi.MPI_SIZE<ne>armi.MPI_RANK<block_start><assert_stmt>"param2"<not><in>a2.p<block_end><block_end>self.assertEqual(parameters.SINCE_ANYTHING param1.assigned)<line_sep>self.assertEqual(parameters.SINCE_ANYTHING param2.assigned)<line_sep>self.assertEqual(parameters.SINCE_ANYTHING param3.assigned)<line_sep>self.assertEqual(parameters.SINCE_ANYTHING a.p.assigned)<line_sep>self.r.syncMpiState()<line_sep>self.assertEqual(parameters.SINCE_ANYTHING&~parameters.SINCE_LAST_DISTRIBUTE_STATE param1.assigned )<line_sep>self.assertEqual(parameters.SINCE_ANYTHING&~parameters.SINCE_LAST_DISTRIBUTE_STATE param2.assigned )<line_sep>self.assertEqual(parameters.SINCE_ANYTHING&~parameters.SINCE_LAST_DISTRIBUTE_STATE param3.assigned )<line_sep>self.assertEqual(parameters.SINCE_ANYTHING&~parameters.SINCE_LAST_DISTRIBUTE_STATE a.p.assigned )<block_end>self.assertEqual(parameters.NEVER param1.assigned)<line_sep>self.assertEqual(parameters.SINCE_ANYTHING param2.assigned)<line_sep>self.assertEqual(parameters.NEVER param3.assigned)<line_sep>self.assertEqual(parameters.SINCE_ANYTHING a.p.assigned)<line_sep>do_assert(passNum)<block_end><block_end>param1=self.r.p.paramDefs["param1"]<line_sep>param2=self.r.p.paramDefs["param2"]<line_sep>param3=self.r.p.paramDefs["param3"]<def_stmt>do_assert passNum# ensure all assemblies and blocks set values for param2, but param1 is empty <block_start><for_stmt>rank range(armi.MPI_SIZE)<block_start>a=self.r.core[passNum<times>armi.MPI_SIZE+rank]<assert_stmt>"param1"<not><in>a.p<assert_stmt>"param3"<not><in>a.p<line_sep>self.assertEqual(rank<times>20 a.p.param2)<for_stmt>b a<block_start>self.assertEqual(rank<times>10 b.p.param2)<assert_stmt>"param1"<not><in>b.p<assert_stmt>"param3"<not><in>b.p<block_end><block_end><block_end><if_stmt>armi.MPI_RANK<eq>0<block_start><with_stmt>self.r.retainState(parameters.inCategory("cat2"))<block_start>armi.MPI_COMM.bcast(self.r)<line_sep>do()<line_sep>[do_assert(passNum)<for>passNum range(4)]<block_end>[do_assert(passNum)<for>passNum range(4)]<block_end><else_stmt><block_start><del_stmt>self.r<line_sep>self.r=armi.MPI_COMM.bcast(<none>)<line_sep>do()<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><if_stmt>armi.MPI_SIZE<eq>1<block_start>unittest.main()<block_end><else_stmt><block_start>SynchronizationTests().run()<block_end><block_end>
<import_stmt>calendar<import_stmt>time<import_stmt>unittest<import_stmt>random<import_stmt>pytest<import_from_stmt>selenium.test.selenium.webdriver.common utils<class_stmt>CookieTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self._loadPage("simpleTest")<line_sep># Set the cookie to expire in 30 minutes timestamp=calendar.timegm(time.gmtime())+(30<times>60)<line_sep>self.COOKIE_A={"name":"foo" "value":"bar" "path":"/" "secure":<false>}<block_end><def_stmt>tearDown self<block_start>self.driver.delete_all_cookies()<block_end><def_stmt>testAddCookie self<block_start>self.driver.execute_script("return document.cookie")<line_sep>self.driver.add_cookie(self.COOKIE_A)<line_sep>cookie_returned=str(self.driver.execute_script("return document.cookie"))<line_sep>self.assertTrue(self.COOKIE_A["name"]<in>cookie_returned)<block_end><def_stmt>testAddingACookieThatExpiredInThePast self<block_start><if_stmt>self.driver.name<eq>'internet explorer'<block_start>pytest.skip("Issue needs investigating")<block_end>cookie=self.COOKIE_A.copy()<line_sep>cookie["expiry"]=calendar.timegm(time.gmtime())-1<line_sep>self.driver.add_cookie(cookie)<line_sep>cookies=self.driver.get_cookies()<line_sep>self.assertEquals(0 len(cookies))<block_end><def_stmt>testDeleteAllCookie self<block_start>self.driver.add_cookie(utils.convert_cookie_to_json(self.COOKIE_A))<line_sep>self.driver.delete_all_cookies()<line_sep>self.assertFalse(self.driver.get_cookies())<block_end><def_stmt>testDeleteCookie self<block_start>self.driver.add_cookie(utils.convert_cookie_to_json(self.COOKIE_A))<line_sep>self.driver.delete_cookie("foo")<line_sep>self.assertFalse(self.driver.get_cookies())<block_end><def_stmt>testShouldGetCookieByName self<block_start>key="key_%d"%int(random.random()<times>10000000)<line_sep>self.driver.execute_script("document.cookie = arguments[0] + '=set';" key)<line_sep>cookie=self.driver.get_cookie(key)<line_sep>self.assertEquals("set" cookie["value"])<block_end><def_stmt>testGetAllCookies self<block_start>key1="key_%d"%int(random.random()<times>10000000)<line_sep>key2="key_%d"%int(random.random()<times>10000000)<line_sep>cookies=self.driver.get_cookies()<line_sep>count=len(cookies)<line_sep>one={"name":key1 "value":"value"}<line_sep>two={"name":key2 "value":"value"}<line_sep>self.driver.add_cookie(one)<line_sep>self.driver.add_cookie(two)<line_sep>self._loadPage("simpleTest")<line_sep>cookies=self.driver.get_cookies()<line_sep>self.assertEquals(count+2 len(cookies))<block_end><def_stmt>testShouldNotDeleteCookiesWithASimilarName self<block_start>cookieOneName="fish"<line_sep>cookie1={"name":cookieOneName "value":"cod"}<line_sep>cookie2={"name":cookieOneName+"x" "value":"earth"}<line_sep>self.driver.add_cookie(cookie1)<line_sep>self.driver.add_cookie(cookie2)<line_sep>self.driver.delete_cookie(cookieOneName)<line_sep>cookies=self.driver.get_cookies()<line_sep>self.assertFalse(cookie1["name"]<eq>cookies[0]["name"] msg=str(cookies))<line_sep>self.assertEquals(cookie2["name"] cookies[0]["name"] msg=str(cookies))<block_end><def_stmt>_loadPage self name<block_start>self.driver.get(self._pageURL(name))<block_end><def_stmt>_pageURL self name<block_start><return>"http://localhost:%d/%s.html"%(self.webserver.port name)<block_end><block_end>
""" Jupyter <-> Vim See: <http://jupyter-client.readthedocs.io/en/stable/api/client.html> """<line_sep># Standard <import_stmt>re<import_from_stmt>textwrap dedent<import_from_stmt>threading Thread Lock<import_from_stmt>time sleep<line_sep># Py module <import_from_stmt>jupyter_client KernelManager<import_stmt>vim<line_sep># Local <import_from_stmt>jupyter_util echom unquote_string match_kernel_id get_vim<try_stmt><block_start><import_from_stmt>queue Queue Empty<block_end><except_stmt>ImportError<block_start><import_from_stmt>Queue Queue Empty<block_end># Local <import_from_stmt>language list_languages<class_stmt>VimMessenger()<block_start>"""Handle message to/from Vim Attributes ---------- sync : :obj:`Sync` Object to support asynchronous operations. message_queue : :obj:`Queue` Asynchronous queue of messages. pid : int PID of the current vim session. verbose : bool If True, receive message id from sending function and report back to vim with output, silent otherwise. monitor_console : bool If True, create a new buffer in vim to display output from the kernel. cell_separators : list of str User-defined list of strings that separate code cells. """<def_stmt>__init__ self sync<block_start>self.sync=sync<line_sep>self.message_queue=Queue()# for async echom self.pid=get_vim('getpid()' -1)# pid of current vim session # Define members python <- vim self.set_monitor_bools()<line_sep>self.set_cell_separators()<block_end><def_stmt>set_monitor_bools self<block_start>"""Set booleans to define if jupyter_vim monitors messages."""<line_sep># NOTE this function is called by the @monitor_decorator in jupyter_vim # to ensure user options are up-to-date. self.verbose=bool(int(vim.vars.get('jupyter_verbose' 0)))<line_sep>self.monitor_console=bool(int(vim.vars.get('jupyter_monitor_console' 0)))<block_end><def_stmt>set_cell_separators self<block_start>"""Set cell separators (list of str)."""<line_sep># NOTE this function is called from jupyter_vim.run_cell self.cell_separators=get_vim('g:jupyter_cell_separators' '')<line_sep>self.cell_separators=[unquote_string(x)<for>x self.cell_separators]<block_end>@staticmethod<def_stmt>get_timer_intervals <block_start>"""Return list of user-defined timers [ms]. Returns ------- list of int List of timers [ms]. """<line_sep>timer_list=get_vim('g:jupyter_timer_intervals' [0.1 0.5 1 3])<line_sep><return>[int(x)<for>x timer_list]<block_end>@staticmethod<def_stmt>get_meta_messages <block_start>"""Return list of user-defined list of meta messages. Returns ------- list of str List of user-defined meta messages to send before/after code. """<line_sep><return>(get_vim('b:jupyter_exec_before' '') get_vim('b:jupyter_exec_pre' '') get_vim('b:jupyter_exex_post' '') get_vim('b:jupyter_exec_after' ''))<block_end><def_stmt>is_cell_separator self line<block_start>"""Return True if given `line` is a cell separator."""<line_sep><return>any([bool(re.match(separation line.strip()))<for>separation self.cell_separators])<block_end><def_stmt>thread_echom self arg **args<block_start>"""Wrap echo async: put message to be echoed in a queue."""<line_sep>self.message_queue.put((arg args))<block_end><def_stmt>timer_echom self<block_start>"""Call echom sync on all messages in queue."""<line_sep># Check in <if_stmt>self.message_queue.empty()<block_start><return><block_end># Show user the force <while_stmt><not>self.message_queue.empty()<block_start>(arg args)=self.message_queue.get_nowait()<line_sep>echom(arg **args)<block_end># Restore peace in the galaxy vim.command('redraw')<block_end># TODO add verbose flag <def_stmt>string_hi self<block_start>"""Return Hi from vim string."""<line_sep><return>('\\n\\nReceived connection from vim client with pid {}'<concat>'\\n'+'-'<times>60+'\\n').format(self.pid)<block_end><def_stmt>thread_echom_kernel_info self kernel_info<block_start>"""Echo kernel info (async). Parameters ---------- kernel_info : str Information about the kernel to print in vim messages. """<line_sep>kernel_string='\n '.join([str(key)+': '+str(kernel_info[key])<for>key kernel_info])<line_sep># Send command so that user knows vim is connected at bottom, more readable self.thread_echom('Connected: {}'.format(kernel_info['id']) style='Question')<line_sep># FIXME messages does not actually display in vim, # only appears in `:messages` command. self.thread_echom('To:' style='Question')<line_sep>self.thread_echom(kernel_string)<block_end><block_end><class_stmt>JupyterMessenger()<block_start>"""Handle primitive messages to/from jupyter kernel. Attributes ---------- km_client : :obj:`KernelManager` client Object to handle connections with the kernel. See: <http://jupyter-client.readthedocs.io/en/stable/api/client.html> kernel_info : dict Information about the kernel itself. dict with keys: 'kernel_type' : str, the type of kernel, i.e. `python`. 'pid' : int, the pid of the kernel process. 'cwd' : str, the current working directory of the kernel. 'hostname' : str, the hostname of the kernel. cfile : str Filename of the connection file, i.e. `kernel-123.json`. sync : :obj:`Sync` Object to support asynchronous operations. meta_messages : list of str User-defined meta messages to send before/after code. """<def_stmt>__init__ self sync<block_start>self.km_client=<none># KernelManager client self.kernel_info=dict()# Kernel information self.cfile=''# Connection file self.sync=sync# Sync object self.meta_messages=VimMessenger.get_meta_messages()<block_end><def_stmt>create_kernel_manager self<block_start>"""Create the kernel manager and connect a client. Returns ------- bool True if client connects successfully, False on failure. """<line_sep># Get client kernel_manager=KernelManager(connection_file=self.cfile)<line_sep># The json may be badly encoding especially if autoconnecting <try_stmt><block_start>kernel_manager.load_connection_file()<block_end><except_stmt>Exception<block_start><return><false><block_end>self.km_client=kernel_manager.client()<line_sep># Open channel self.km_client.start_channels()<line_sep># Ping the kernel self.km_client.kernel_info()<try_stmt><block_start>self.km_client.get_shell_msg(timeout=1)<line_sep><return><true><block_end><except_stmt>Empty<block_start><return><false><block_end><block_end><def_stmt>disconnnect self<block_start>"""Disconnect silently from kernel and close channels."""<if_stmt>self.km_client<is><none><block_start><return><block_end>self.km_client.stop_channels()<line_sep>self.km_client=<none><block_end><def_stmt>update_meta_messages self<block_start>"""Sync: reread vim meta vars."""<line_sep>self.meta_messages=VimMessenger.get_meta_messages()<block_end><def_stmt>check_connection self<block_start>"""Check that we have a client connected to the kernel. Returns ------- bool True if client is connected, False if not. """<line_sep><return>self.km_client.hb_channel.is_beating()<if>self.km_client<else><false><block_end><def_stmt>check_connection_or_warn self<block_start>"""Echo warning if not connected. Returns ------- bool True if client is connected, False if not. """<if_stmt>self.check_connection()<block_start><return><true><block_end>echom('WARNING: Not connected to Jupyter!'<concat>'\nRun :JupyterConnect to find the kernel' style='WarningMsg')<line_sep><return><false><block_end><def_stmt>get_pending_msgs self<block_start>"""Get pending message pool. Returns ------- list of :obj:`msg` List of messages waiting on the `iopub_channel`. """<line_sep>msgs=list()<try_stmt><block_start>self.sync.msg_lock.acquire()<line_sep>msgs=self.km_client.iopub_channel.get_msgs()<block_end><except_stmt>(Empty TypeError KeyError IndexError ValueError)<block_start><pass><block_end><finally_stmt><block_start>self.sync.msg_lock.release()<block_end><return>msgs<block_end><def_stmt>get_reply_msg self msg_id<block_start>"""Get kernel reply from sent client message with msg_id (async). This function can block 3 sec, so call in a thread. Returns ------- dict Message response. """<line_sep># TODO handle 'is_complete' requests? # <http://jupyter-client.readthedocs.io/en/stable/messaging.html#code-completeness> # Declare default reply=dict()<for_stmt>_ range(3)# Check <block_start><if_stmt>self.sync.stop<block_start><return>dict()<block_end># Get self.sync.msg_lock.acquire()<try_stmt><block_start>reply=self.km_client.get_shell_msg(block=<true> timeout=1)<or>{}<block_end><except_stmt>(Empty TypeError KeyError IndexError ValueError)<block_start><pass><block_end><finally_stmt><block_start>self.sync.msg_lock.release()<block_end># Stop <if_stmt>reply.get('parent_header' {}).get('msg_id' -1)<eq>msg_id<block_start><break><block_end><block_end><return>reply<block_end><def_stmt>send self msg ismeta=<false> **kwargs<block_start>"""Send a message to the kernel client. .. note:: Async: crossroad <- run_command Global: -> cmd, cmd_id Returns ------- int Command id. """<if_stmt><not>self.check_connection_or_warn()<block_start><return>-1<block_end># Pre <if_stmt><not>ismeta<block_start>bef,pre,post,aft=self.meta_messages<line_sep># Send before unless it is blank <if_stmt>bef<block_start>self.send(bef ismeta=<true>)<block_end># Craft new message msg=pre+msg+post<block_end># Include dedent of msg so we don't get odd indentation errors. cmd=dedent(msg)<line_sep># Actually send execute_request cmd_id=self.km_client.execute(cmd **kwargs)<line_sep># Send after unless it is blank <if_stmt><not>ismeta<and>aft<block_start>self.send(aft ismeta=<true>)<block_end><return>cmd_id<block_end><def_stmt>get_kernel_info self language<block_start>"""Explicitly ask the jupyter kernel for its pid .. note:: Thread: <- cfile <- vim_pid -> lang -> kernel_pid Returns ------- dict dict with keys: {'kernel_type', 'pid', 'cwd', 'hostname'} """<line_sep># Check in <if_stmt>self.kernel_info['kernel_type']<not><in>list_languages()<block_start>echom('I don'<concat>'t know how to get infos for a Jupyter kernel of type "{}"'.format(self.kernel_info['kernel_type']) 'WarningMsg')<block_end># Fill kernel_info self.kernel_info.update({'connection_file':self.cfile 'id':match_kernel_id(self.cfile) # int id of cfile # Get from kernel info 'pid':self.send_code_and_get_reply(language.pid) # PID of kernel 'cwd':self.send_code_and_get_reply(language.cwd) 'hostname':self.send_code_and_get_reply(language.hostname) })<line_sep># Return <return>self.kernel_info<block_end><def_stmt>send_code_and_get_reply self code<block_start>"""Get variable _res from code string. .. note:: Only used by get_kernel_info (internal) => send with ismeta. Returns ------- str Unquoted string of the message reply. """<line_sep># Send message msg_id=self.send(code ismeta=<true> silent=<true> user_expressions={'_res':'_res'})<line_sep># Wait to get message back from kernel (1 sec) reply=self.get_reply_msg(msg_id)<line_sep># Get _res from user expression res=reply.get('content' {}).get('user_expressions' {}).get('_res' {}).get('data' {}).get('text/plain' -1)<line_sep># Try again parse messages <if_stmt>res<eq>-1<block_start>line_number=reply.get('content' {}).get('execution_count' -1)<line_sep>msgs=self.get_pending_msgs()<line_sep>res=parse_iopub_for_reply(msgs line_number)<block_end># Rest in peace <return>unquote_string(res)<block_end><block_end><class_stmt>Sync()<block_start>"""Synchronization (not so) primitives, for safe thread support. Attributes ---------- thread : :obj:`Thread` or None The running thread. stop : bool True if thread should not be stopped, False otherwise. line_queue : :obj:`Queue` Queue of lines of code to echo to the kernel. msg_lock : :obj:`Lock` lock to retrieve messages one thread at a time. """<def_stmt>__init__ self<block_start>self.thread=<none><line_sep>self.stop=<false><line_sep>self.line_queue=Queue()<line_sep>self.msg_lock=Lock()<block_end><def_stmt>check_stop self<block_start>"""Check and reset stop value. Returns ------- bool Last value of `self.stop`. """<line_sep>last=self.stop<if_stmt>self.stop<block_start>self.stop=<false><block_end><return>last<block_end><def_stmt>stop_thread self<block_start>"""Stop current thread."""<if_stmt>self.thread<is><none><block_start><return><block_end><if_stmt><not>self.thread.is_alive()<block_start>self.thread=<none><line_sep><return><block_end># Wait 1 sec max self.stop=<true><for_stmt>_ range(100)<block_start><if_stmt><not>self.stop<block_start>sleep(0.010)<block_end><block_end>self.thread=<none><line_sep><return><block_end><def_stmt>start_thread self target=<none> args=<none><block_start>"""Stop last / Create new / Start thread. Parameters ---------- target : callable, optional, default=None Callable object to which `args` will be passed. args : list, optional, default=None """<if_stmt>args<is><none><block_start>args=list()<block_end>self.stop_thread()<line_sep>self.thread=Thread(target=target args=args daemon=<true>)<line_sep>self.thread.start()<block_end><block_end># ----------------------------------------------------------------------------- # Parsers # ----------------------------------------------------------------------------- <def_stmt>parse_iopub_for_reply msgs line_number<block_start>"""Get kernel response from message pool (Async). .. note:: some kernel (iperl) do not discriminate when client asks for `user_expressions`. But still they give a printable output. Parameters ---------- msgs : list List of messages to parse. line_number : int The message number of the corresponding code. Returns ------- str The kernel response to the messages. """<line_sep>res=-1<line_sep># Parse all execute <for_stmt>msg msgs# Get the result of execution <block_start>content=msg.get('content' <false>)<if_stmt><not>content<block_start><continue><block_end>ec=int(content.get('execution_count' 0))<if_stmt><not>ec<block_start><continue><block_end><if_stmt>line_number<not><in>(-1 ec)<block_start><continue><block_end>msg_type=msg.get('header' {}).get('msg_type' '')<if_stmt>msg_type<not><in>('execute_result' 'stream')<block_start><continue><block_end>res=content.get('data' {}).get('text/plain' -1)<line_sep>res=res<if>res<ne>-1<else>content.get('text' -1)<line_sep><break><block_end><return>res<block_end>
# python DDP_moco_ccrop.py path/to/this/config # model dim=128<line_sep>model=dict(type='ResNet' depth=18 num_classes=dim maxpool=<false>)<line_sep>moco=dict(dim=dim K=65536 m=0.999 T=0.20 mlp=<true>)<line_sep>loss=dict(type='CrossEntropyLoss')<line_sep># data root='/path/to/your/dataset'<line_sep>mean=(0.4914 0.4822 0.4465)<line_sep>std=(0.2023 0.1994 0.2010)<line_sep>batch_size=512<line_sep>num_workers=4<line_sep>data=dict(train=dict(ds_dict=dict(type='CIFAR10_boxes' root=root train=<true> ) rcrop_dict=dict(type='cifar_train_rcrop' mean=mean std=std) ccrop_dict=dict(type='cifar_train_ccrop' alpha=0.1 mean=mean std=std) ) eval_train=dict(ds_dict=dict(type='CIFAR10' root=root train=<true> ) trans_dict=dict(type='cifar_test' mean=mean std=std) ) )<line_sep># boxes warmup_epochs=100<line_sep>loc_interval=100<line_sep>box_thresh=0.10<line_sep># training optimizer & scheduler epochs=500<line_sep>lr=0.5<line_sep>optimizer=dict(type='SGD' lr=lr momentum=0.9 weight_decay=1e-4)<line_sep>lr_cfg=dict(# passed to adjust_learning_rate(cfg=lr_cfg) type='Cosine' steps=epochs lr=lr decay_rate=0.1 # decay_steps=[100, 150] warmup_steps=0 # warmup_from=0.01 )<line_sep># log & save log_interval=20<line_sep>save_interval=250<line_sep>work_dir=<none># rewritten by args resume=<none><line_sep>load=<none><line_sep>port=10001<line_sep>
# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Reddit dataset using tldr as summaries."""<import_stmt>json<import_stmt>os<import_stmt>datasets<line_sep>_CITATION=""" @inproceedings{volske-etal-2017-tl, title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization}, author = {<NAME> and <NAME> and <NAME> and <NAME>}, booktitle = {Proceedings of the Workshop on New Frontiers in Summarization}, month = {sep}, year = {2017}, address = {Copenhagen, Denmark}, publisher = {Association for Computational Linguistics}, url = {https://www.aclweb.org/anthology/W17-4508}, doi = {10.18653/v1/W17-4508}, pages = {59--63}, abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.}, } """<line_sep>_DESCRIPTION=""" This corpus contains preprocessed posts from the Reddit dataset. The dataset consists of 3,848,330 posts with an average length of 270 words for content, and 28 words for the summary. Features includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id. Content is used as document and summary is used as summary. """<line_sep>_URL="https://zenodo.org/record/1043504/files/corpus-webis-tldr-17.zip?download=1"<line_sep>_DOCUMENT="content"<line_sep>_SUMMARY="summary"<line_sep>_ADDITIONAL_FEATURES=["author" "body" "normalizedBody" "subreddit" "subreddit_id" "id"]<class_stmt>Reddit(datasets.GeneratorBasedBuilder)<block_start>"""Reddit Dataset."""<line_sep>VERSION=datasets.Version("1.0.0")<def_stmt>_info self<block_start><return>datasets.DatasetInfo(description=_DESCRIPTION features=datasets.Features({k:datasets.Value("string")<for>k _ADDITIONAL_FEATURES+[_DOCUMENT _SUMMARY]}) supervised_keys=<none> homepage="https://github.com/webis-de/webis-tldr-17-corpus" citation=_CITATION )<block_end><def_stmt>_split_generators self dl_manager<block_start>"""Returns SplitGenerators."""<line_sep>dl_path=dl_manager.download_and_extract(_URL)<line_sep><return>[datasets.SplitGenerator(name=datasets.Split.TRAIN gen_kwargs={"path":os.path.join(dl_path "corpus-webis-tldr-17.json")} )]<block_end><def_stmt>_generate_examples self path=<none><block_start>"""Yields examples."""<with_stmt>open(path "rb")<as>f<block_start><for_stmt>i,line enumerate(f)# possible keys are: # author: string (nullable = true) # body: string (nullable = true) # normalizedBody: string (nullable = true) # content: string (nullable = true) # content_len: long (nullable = true) # summary: string (nullable = true) # summary_len: long (nullable = true) # id: string (nullable = true) # subreddit: string (nullable = true) # subreddit_id: string (nullable = true) # title: string (nullable = true) <block_start>d=json.loads(line)<if_stmt>_SUMMARY<in>d<and>_DOCUMENT<in>d<block_start><yield>i {k:d.get(k "")<for>k _ADDITIONAL_FEATURES+[_DOCUMENT _SUMMARY]}<block_end><block_end><block_end><block_end><block_end>
<import_stmt>re<def_stmt>audit_link linkText uri<block_start>"""Generate link "markdown" from URI."""<line_sep><return>'{{{}|{}}}'.format(linkText uri)<block_end><def_stmt>path_to_text path<block_start>"""Convert object path to the text portion."""<line_sep>accession=re.match(r'\/.*\/(.*)\/' path)<line_sep><return>accession.group(1)<if>accession<else><none><block_end><def_stmt>space_in_words objects_string<block_start>"""Insert a space between objects that have more than one capital letter eg. AntibodyChar --> Antibody Char"""<line_sep>add_space=re.sub(r"(\w)([A-Z])" r"\1 \2" objects_string)<line_sep><return>add_space<block_end>
<import_stmt>pytest<import_from_stmt>django.utils timezone<import_from_stmt>dynamic_models cache<line_sep>TEST_MODEL_NAME="test"<line_sep>now=timezone.now()<line_sep>@pytest.fixture<def_stmt>mock_now monkeypatch<block_start>monkeypatch.setattr(timezone "now" <lambda>:now)<block_end><def_stmt>test_get_and_update_last_modified mock_now<block_start><assert_stmt>cache.get_last_modified(TEST_MODEL_NAME)<is><none><line_sep>cache.update_last_modified(TEST_MODEL_NAME)<assert_stmt>cache.get_last_modified(TEST_MODEL_NAME)<eq>now<block_end><def_stmt>test_delete_last_modified mock_now<block_start>cache.update_last_modified(TEST_MODEL_NAME)<assert_stmt>cache.get_last_modified(TEST_MODEL_NAME)<eq>now<line_sep>cache.clear_last_modified(TEST_MODEL_NAME)<assert_stmt>cache.get_last_modified(TEST_MODEL_NAME)<is><none><block_end>
# Copyright 2021 The Kubeflow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Deprecated. See kfp.v2.components.types.type_utils instead. This module will be removed in KFP v2.0. """<import_stmt>warnings<import_from_stmt>kfp.v2.components.types type_utils<line_sep>warnings.warn('Module kfp.dsl.type_utils is deprecated and will be removed'<concat>' in KFP v2.0. Please use from kfp.v2.components.types.type_utils instead.' category=FutureWarning)<line_sep>is_parameter_type=type_utils.is_parameter_type<line_sep>get_artifact_type_schema=type_utils.get_artifact_type_schema<line_sep>get_parameter_type=type_utils.get_parameter_type<line_sep>get_input_artifact_type_schema=type_utils.get_input_artifact_type_schema<line_sep>
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Library of common learning rate schedules."""<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<def_stmt>exponential_decay_with_burnin global_step learning_rate_base learning_rate_decay_steps learning_rate_decay_factor burnin_learning_rate=0.0 burnin_steps=0<block_start>"""Exponential decay schedule with burn-in period. In this schedule, learning rate is fixed at burnin_learning_rate for a fixed period, before transitioning to a regular exponential decay schedule. Args: global_step: int tensor representing global step. learning_rate_base: base learning rate. learning_rate_decay_steps: steps to take between decaying the learning rate. Note that this includes the number of burn-in steps. learning_rate_decay_factor: multiplicative factor by which to decay learning rate. burnin_learning_rate: initial learning rate during burn-in period. If 0.0 (which is the default), then the burn-in learning rate is simply set to learning_rate_base. burnin_steps: number of steps to use burnin learning rate. Returns: a (scalar) float tensor representing learning rate """<if_stmt>burnin_learning_rate<eq>0<block_start>burnin_learning_rate=learning_rate_base<block_end>post_burnin_learning_rate=tf.train.exponential_decay(learning_rate_base global_step learning_rate_decay_steps learning_rate_decay_factor staircase=<true>)<line_sep><return>tf.where(tf.less(tf.cast(global_step tf.int32) tf.constant(burnin_steps)) tf.constant(burnin_learning_rate) post_burnin_learning_rate)<block_end><def_stmt>cosine_decay_with_warmup global_step learning_rate_base total_steps warmup_learning_rate=0.0 warmup_steps=0<block_start>"""Cosine decay schedule with warm up period. Cosine annealing learning rate as described in: Loshchilov and Hutter, SGDR: Stochastic Gradient Descent with Warm Restarts. ICLR 2017. https://arxiv.org/abs/1608.03983 In this schedule, the learning rate grows linearly from warmup_learning_rate to learning_rate_base for warmup_steps, then transitions to a cosine decay schedule. Args: global_step: int64 (scalar) tensor representing global step. learning_rate_base: base learning rate. total_steps: total number of training steps. warmup_learning_rate: initial learning rate for warm up. warmup_steps: number of warmup steps. Returns: a (scalar) float tensor representing learning rate. Raises: ValueError: if warmup_learning_rate is larger than learning_rate_base, or if warmup_steps is larger than total_steps. """<if_stmt>learning_rate_base<l>warmup_learning_rate<block_start><raise>ValueError('learning_rate_base must be larger '<concat>'or equal to warmup_learning_rate.')<block_end><if_stmt>total_steps<l>warmup_steps<block_start><raise>ValueError('total_steps must be larger or equal to '<concat>'warmup_steps.')<block_end>learning_rate=0.5<times>learning_rate_base<times>(1+tf.cos(np.pi<times>(tf.cast(global_step tf.float32)-warmup_steps)/float(total_steps-warmup_steps)))<if_stmt>warmup_steps<g>0<block_start>slope=(learning_rate_base-warmup_learning_rate)/warmup_steps<line_sep>pre_cosine_learning_rate=slope<times>tf.cast(global_step tf.float32)+warmup_learning_rate<line_sep>learning_rate=tf.where(tf.less(tf.cast(global_step tf.int32) warmup_steps) pre_cosine_learning_rate learning_rate)<block_end><return>learning_rate<block_end><def_stmt>manual_stepping global_step boundaries rates<block_start>"""Manually stepped learning rate schedule. This function provides fine grained control over learning rates. One must specify a sequence of learning rates as well as a set of integer steps at which the current learning rate must transition to the next. For example, if boundaries = [5, 10] and rates = [.1, .01, .001], then the learning rate returned by this function is .1 for global_step=0,...,4, .01 for global_step=5...9, and .001 for global_step=10 and onward. Args: global_step: int64 (scalar) tensor representing global step. boundaries: a list of global steps at which to switch learning rates. This list is assumed to consist of increasing positive integers. rates: a list of (float) learning rates corresponding to intervals between the boundaries. The length of this list must be exactly len(boundaries) + 1. Returns: a (scalar) float tensor representing learning rate Raises: ValueError: if one of the following checks fails: 1. boundaries is a strictly increasing list of positive integers 2. len(rates) == len(boundaries) + 1 """<if_stmt>any([b<l>0<for>b boundaries])<or>any([<not>isinstance(b int)<for>b boundaries])<block_start><raise>ValueError('boundaries must be a list of positive integers')<block_end><if_stmt>any([bnext<le>b<for>bnext,b zip(boundaries[1:] boundaries[:-1])])<block_start><raise>ValueError('Entries in boundaries must be strictly increasing.')<block_end><if_stmt>any([<not>isinstance(r float)<for>r rates])<block_start><raise>ValueError('Learning rates must be floats')<block_end><if_stmt>len(rates)<ne>len(boundaries)+1<block_start><raise>ValueError('Number of provided learning rates must exceed '<concat>'number of boundary points by exactly 1.')<block_end><if_stmt><not>boundaries<block_start><return>tf.constant(rates[0])<block_end>step_boundaries=tf.constant(boundaries tf.int32)<line_sep>num_boundaries=len(boundaries)<line_sep>learning_rates=tf.constant(rates tf.float32)<line_sep>index=tf.reduce_min(tf.where(# Casting global step to tf.int32 is dangerous, but necessary to be # compatible with TPU. tf.greater(step_boundaries tf.cast(global_step tf.int32)) tf.constant(range(num_boundaries) dtype=tf.int32) tf.constant([num_boundaries]<times>num_boundaries dtype=tf.int32)))<line_sep><return>tf.reduce_sum(learning_rates<times>tf.one_hot(index len(rates) dtype=tf.float32))<block_end>
# Copyright IBM Corp. 2016 All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>os<import_stmt>time<import_stmt>unittest<import_stmt>random<import_stmt>string<import_from_stmt>hfc.fabric_ca.caservice CAService<import_from_stmt>test.integration.utils cli_call<with_stmt>open(os.path.join(os.path.dirname(__file__) "../fixtures/ca/enroll-csr.pem"))<as>f<block_start>test_pem=f.read()<block_end>ENROLLMENT_ID="admin"<line_sep>ENROLLMENT_SECRET="adminpw"<def_stmt>get_random_username <block_start><return>''.join([random.choice(string.ascii_letters+string.digits)<for>n range(9)])<block_end><class_stmt>IdentityServiceTest(unittest.TestCase)<block_start>"""Test for ca module. """<def_stmt>setUp self<block_start>self._enrollment_id=ENROLLMENT_ID<line_sep>self._enrollment_secret=ENROLLMENT_SECRET<if_stmt>os.getenv("CA_ADDR")<block_start>self._ca_server_address=os.getenv("CA_ADDR")<block_end><else_stmt><block_start>self._ca_server_address="localhost:7054"<block_end>self.compose_file_path=os.path.normpath(os.path.join(os.path.dirname(__file__) "../fixtures/ca/docker-compose.yml"))<line_sep>self.start_test_env()<line_sep>self._ca_service=CAService("http://"+self._ca_server_address)<line_sep>id=self._enrollment_id<line_sep>secret=self._enrollment_secret<line_sep>self._adminEnrollment=self._ca_service.enroll(id secret)<line_sep>self._identityService=self._ca_service.newIdentityService()<block_end><def_stmt>tearDown self<block_start>self.shutdown_test_env()<block_end><def_stmt>start_test_env self<block_start>cli_call(["docker-compose" "-f" self.compose_file_path "up" "-d"])<line_sep>time.sleep(5)<block_end><def_stmt>shutdown_test_env self<block_start>cli_call(["docker-compose" "-f" self.compose_file_path "down"])<block_end><def_stmt>test_create_success self<block_start>"""Test create success. """<line_sep>username=get_random_username()<line_sep>secret=self._identityService.create(self._adminEnrollment username enrollmentSecret='pass')<line_sep>self.assertTrue(secret<eq>'pass')<block_end><def_stmt>test_getOne_success self<block_start>"""Test getOne success. """<line_sep>username=get_random_username()<line_sep>self._identityService.create(self._adminEnrollment username)<line_sep>res=self._identityService.getOne(username self._adminEnrollment)<line_sep>self.assertTrue(res['result']['id']<eq>username)<line_sep>self.assertTrue(res['success']<is><true>)<block_end><def_stmt>test_getAll_success self<block_start>"""Test getAll success. """<line_sep>username=get_random_username()<line_sep>self._identityService.create(self._adminEnrollment username)<line_sep>res=self._identityService.getAll(self._adminEnrollment)<line_sep>self.assertTrue(len(res['result']['identities'])<g>0)<line_sep>self.assertTrue(res['success']<is><true>)<block_end><def_stmt>test_delete_success self<block_start>"""Test delete success. """<line_sep>username=get_random_username()<line_sep>self._identityService.create(self._adminEnrollment username)<line_sep>res=self._identityService.delete(username self._adminEnrollment)<line_sep>self.assertTrue(res['success']<is><true>)<block_end><def_stmt>test_update_success self<block_start>"""Test update success. """<line_sep>username=get_random_username()<line_sep>self._identityService.create(self._adminEnrollment username)<line_sep>res=self._identityService.update(username self._adminEnrollment maxEnrollments=3)<line_sep>self.assertTrue(res['result']['id']<eq>username)<line_sep>self.assertTrue(res['result']['max_enrollments']<eq>3)<line_sep>self.assertTrue(res['success']<is><true>)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# -*- coding: utf-8 -*- # This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and # is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012) <import_from_stmt>django.contrib.auth.decorators login_required<import_from_stmt>django.conf.urls url<import_from_stmt>.views feedbackcreate thankyou FeedbackListView<import_from_stmt>.models Feedback<line_sep>urlpatterns=[url(r'^create/?$' feedbackcreate name='feedback-create') url(r'^view?$' FeedbackListView.as_view(template_name='feedback_list.html' queryset=Feedback.objects.all()) name='feedback-list') url(r'^thankyou/?$' thankyou name='thanks') ]<line_sep>
<import_from_stmt>datetime datetime<import_stmt>json<import_stmt>logging<import_stmt>random<import_stmt>time<import_from_stmt>urllib.parse urlencode urljoin<import_from_stmt>django.utils.functional cached_property<import_from_stmt>django.utils.text slugify<import_stmt>requests<import_from_stmt>zentral.core.stores.backends.base BaseEventStore<line_sep>logger=logging.getLogger('zentral.core.stores.backends.splunk')<class_stmt>EventStore(BaseEventStore)<block_start>max_batch_size=100<line_sep>max_retries=3<def_stmt>__init__ self config_d<block_start>super().__init__(config_d)<line_sep>self.collector_url=urljoin(config_d["hec_url"] "/services/collector/event")<line_sep>self.hec_token=config_d["hec_token"]<line_sep>self.search_app_url=config_d.get("search_app_url")<line_sep># If set, the computer name of the machine snapshots of these sources will be used # as host field value. First source with a non-empty value will be picked. self.computer_name_as_host_sources=[slugify(src)<for>src config_d.get("computer_name_as_host_sources" [])]<line_sep>self.serial_number_field=config_d.get("serial_number_field" "machine_serial_number")<if_stmt>self.search_app_url<block_start>self.machine_events_url=<true><line_sep>self.probe_events_url=<true><block_end>self.verify_tls=config_d.get('verify_tls' <true>)<line_sep>self.index=config_d.get("index")<line_sep>self.source=config_d.get("source")<line_sep>self._collector_session=<none><block_end>@cached_property<def_stmt>collector_session self<block_start>session=requests.Session()<line_sep>session.verify=self.verify_tls<line_sep>session.headers.update({'Authorization':f'Splunk {self.hec_token}' 'Content-Type':'application/json'})<line_sep><return>session<block_end>@staticmethod<def_stmt>_convert_datetime dt<block_start><if_stmt>isinstance(dt str)<block_start>dt=dt.replace("+00:00" "").replace("Z" "").strip()<if_stmt>"."<in>dt<block_start>fmt="%Y-%m-%dT%H:%M:%S.%f"<block_end><else_stmt><block_start>fmt="%Y-%m-%dT%H:%M:%S"<block_end>dt=datetime.strptime(dt fmt)<block_end>ts=time.mktime(dt.timetuple())+dt.microsecond/1e6<line_sep><return>"{:.3f}".format(ts)<block_end><def_stmt>_serialize_event self event<block_start><if_stmt><not>isinstance(event dict)<block_start>event=event.serialize()<block_end>payload_event=event.pop("_zentral")<line_sep>created_at=payload_event.pop("created_at")<line_sep>event_type=payload_event.pop("type")<line_sep>namespace=payload_event.get("namespace" event_type)<line_sep>payload_event[namespace]=event<line_sep># host / serial number host="Zentral"<line_sep>machine_serial_number=payload_event.pop("machine_serial_number" <none>)<if_stmt>machine_serial_number<block_start>payload_event[self.serial_number_field]=machine_serial_number<line_sep>host=machine_serial_number<for_stmt>ms_src_slug self.computer_name_as_host_sources<block_start>machine_name=payload_event.get("machine" {}).get(ms_src_slug {}).get("name")<if_stmt>machine_name<block_start>host=machine_name<line_sep><break><block_end><block_end><block_end><else_stmt><block_start>observer=payload_event.get("observer" {}).get("hostname")<if_stmt>observer<block_start>host=observer<block_end><block_end>payload={"host":host "sourcetype":event_type "time":self._convert_datetime(created_at) "event":payload_event }<if_stmt>self.index<block_start>payload["index"]=self.index<block_end><if_stmt>self.source<block_start>payload["source"]=self.source<block_end><return>payload<block_end><def_stmt>store self event<block_start>payload=self._serialize_event(event)<for_stmt>i range(self.max_retries)<block_start>r=self.collector_session.post(self.collector_url json=payload)<if_stmt>r.ok<block_start><return><block_end><if_stmt>r.status_code<g>500<block_start>logger.error("Temporary server error")<if_stmt>i+1<l>self.max_retries<block_start>seconds=random.uniform(3 4)<times>(i+1)<line_sep>logger.error("Retry in %.1fs" seconds)<line_sep>time.sleep(seconds)<line_sep><continue><block_end><block_end>r.raise_for_status()<block_end><block_end><def_stmt>bulk_store self events<block_start><if_stmt>self.batch_size<l>2<block_start><raise>RuntimeError("bulk_store is not available when batch_size < 2")<block_end>event_keys=[]<line_sep>data=b""<for_stmt>event events<block_start>payload=self._serialize_event(event)<line_sep>event_keys.append((payload["event"]["id"] payload["event"]["index"]))<if_stmt>data<block_start>data<augadd>b"\n"<block_end>data<augadd>json.dumps(payload).encode("utf-8")<block_end><for_stmt>i range(self.max_retries)<block_start>r=self.collector_session.post(self.collector_url data=data)<if_stmt>r.ok<block_start><return>event_keys<block_end><if_stmt>r.status_code<g>500<block_start>logger.error("Temporary server error")<if_stmt>i+1<l>self.max_retries<block_start>seconds=random.uniform(3 4)<times>(i+1)<line_sep>logger.error("Retry in %.1fs" seconds)<line_sep>time.sleep(seconds)<line_sep><continue><block_end><block_end>r.raise_for_status()<block_end><block_end><def_stmt>_get_search_url self query from_dt to_dt<block_start>kwargs={"q":f"search {query}" "earliest":self._convert_datetime(from_dt) "latest":self._convert_datetime(to_dt)<if>to_dt<else>"now"}<line_sep><return>"{}?{}".format(self.search_app_url urlencode(kwargs))<block_end># machine events <def_stmt>_get_machine_events_query self serial_number event_type=<none><block_start>query_chunks=[("host" serial_number)]<if_stmt>self.index<block_start>query_chunks.append(("index" self.index))<block_end><if_stmt>event_type<block_start>query_chunks.append(("sourcetype" event_type))<block_end><return>" ".join('{}="{}"'.format(k v.replace('"' '\\"'))<for>k,v query_chunks)<block_end><def_stmt>get_machine_events_url self serial_number from_dt to_dt=<none> event_type=<none><block_start><return>self._get_search_url(self._get_machine_events_query(serial_number event_type) from_dt to_dt)<block_end># probe events <def_stmt>_get_probe_events_query self probe event_type=<none><block_start>filter_chunks=[]<if_stmt>self.index<block_start>filter_chunks.append(("index" self.index))<block_end><if_stmt>event_type<block_start>filter_chunks.append(("sourcetype" event_type))<block_end>filter_str=" ".join('{}="{}"'.format(k v.replace('"' '\\"'))<for>k,v filter_chunks)<line_sep><return>f'{filter_str} | spath "probes{{}}.pk" | search "probes{{}}.pk"={probe.pk}'<block_end><def_stmt>get_probe_events_url self probe from_dt to_dt=<none> event_type=<none><block_start><return>self._get_search_url(self._get_probe_events_query(probe event_type) from_dt to_dt)<block_end><block_end>
<import_from_stmt>.base VoxelGrid<line_sep>__all__=['VoxelGrid' ]<line_sep>
""" Copyright (c) 2020 COTOBA DESIGN, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """<line_sep>""" Copyright (c) 2016-2019 <NAME> http://www.keithsterling.com Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """<import_from_stmt>programy.utils.logging.ylogger YLogger<import_from_stmt>programy.parser.pattern.nodes.base PatternNode<import_from_stmt>programy.parser.pattern.matcher EqualsMatch<import_from_stmt>programy.parser.exceptions ParserException<import_from_stmt>programy.utils.language.japanese JapaneseLanguage<import_stmt>re<class_stmt>PatternISetNode(PatternNode)<block_start>iset_count=1<def_stmt>__init__ self attribs text userid='*' element=<none> brain=<none><block_start>PatternNode.__init__(self userid)<line_sep>self._words={}<line_sep>self._values={}<if_stmt>'words'<in>attribs<block_start>words=attribs['words']<block_end><elif_stmt>text<block_start>words=text<block_end><else_stmt><block_start><raise>ParserException("No words specified as attribute or text" xml_element=element nodename='iset')<block_end>check_words=JapaneseLanguage.zenhan_normalize(words)<line_sep>self._is_CJK=JapaneseLanguage.is_CJKword(check_words)<if_stmt>self._parse_words(words)<is><false><block_start><raise>ParserException("empty element in words" xml_element=element nodename='iset')<block_end>self._iset_name="iset_%d"%(PatternISetNode.iset_count)<line_sep>PatternISetNode.iset_count<augadd>1<block_end><def_stmt>_parse_words self words<block_start>is_success=<true><line_sep>splits=words.split(",")<for_stmt>word splits<block_start>word=word.strip()<if_stmt>word<eq>''<block_start>is_success=<false><block_end><else_stmt><block_start>self.add_set_values(word)<block_end><block_end><return>is_success<block_end><def_stmt>add_set_values self value<block_start>checkwords=JapaneseLanguage.zenhan_normalize(value)<line_sep>checkwords=checkwords.upper()<if_stmt>self._is_CJK<is><true><block_start>checkwords=checkwords.replace(' ' '')<block_end><else_stmt><block_start>checkwords=re.sub(' +' ' ' checkwords)<block_end><if_stmt>checkwords<in>self._values<block_start><return><block_end>self._values[checkwords]=value<if_stmt>self._is_CJK<is><true><block_start>splits=checkwords<line_sep>key=splits[0].upper()<block_end><else_stmt><block_start>splits=checkwords.split()<line_sep>key=splits[0].upper()<block_end><if_stmt>key<not><in>self._words<block_start>self._words[key]=[]<block_end>self._words[key].append(splits)<block_end>@property<def_stmt>words self<block_start><return>self._words<block_end>@property<def_stmt>iset_name self<block_start><return>self._iset_name<block_end><def_stmt>is_iset self<block_start><return><true><block_end><def_stmt>to_xml self client_context include_user=<false><block_start>string=""<if_stmt>include_user<is><true><block_start>string<augadd>'<iset userid="%s" words="'%self.userid<block_end><else_stmt><block_start>string<augadd>'<iset words="'<block_end><if_stmt>self._is_CJK<is><false><block_start>string<augadd>", ".join(self._words)<block_end><else_stmt><block_start>join_char=""<for_stmt>key self.words<block_start><for_stmt>value self.words[key]<block_start>string<augadd>'%s%s'%(join_char value)<line_sep>join_char=", "<block_end><block_end><block_end>string<augadd>'">'<line_sep>string<augadd>super(PatternISetNode self).to_xml(client_context)<line_sep>string<augadd>"</iset>\n"<line_sep><return>string<block_end><def_stmt>to_string self verbose=<true><block_start><if_stmt>self._is_CJK<is><false><block_start>words_str=",".join(self._words)<block_end><else_stmt><block_start>words_str=""<line_sep>join_char=""<for_stmt>key self.words<block_start><for_stmt>value self.words[key]<block_start>words_str<augadd>'%s%s'%(join_char value)<line_sep>join_char=","<block_end><block_end><block_end><if_stmt>verbose<is><true><block_start><return>"ISET [%s] [%s] words=[%s]"%(self.userid self._child_count(verbose) words_str)<block_end><return>"ISET words=[%s]"%words_str<block_end><def_stmt>equivalent self other<block_start><if_stmt>self.userid<ne>other.userid<block_start><return><false><block_end><if_stmt>len(self.words)<ne>len(other.words)<block_start><return><false><block_end><if_stmt>self._is_CJK<is><false><block_start><for_stmt>word self.words<block_start><if_stmt>word<not><in>other.words<block_start><return><false><block_end><block_end><block_end><else_stmt><block_start><for_stmt>key self.words<block_start><if_stmt>key<not><in>other.words<block_start><return><false><block_end><if_stmt>len(self.words[key])<ne>len(other.words[key])<block_start><return><false><block_end><for_stmt>value self.words[key]<block_start><if_stmt>value<not><in>other.words[key]<block_start><return><false><block_end><block_end><block_end><block_end><return><true><block_end><def_stmt>equals self client_context words word_no<block_start><if_stmt>client_context.match_nlu<is><true><block_start><return>EqualsMatch(<false> word_no)<block_end><if_stmt>self.userid<ne>'*'<block_start><if_stmt>self.userid<ne>client_context.userid<block_start><return>EqualsMatch(<false> word_no)<block_end><block_end>word=words.word(word_no)<if_stmt>word<is><not><none><block_start>match=self.words_in_set(client_context words word_no)<if_stmt>match.matched<is><true><block_start>YLogger.debug(client_context "Found word [%s] in iset" word)<line_sep><return>match<block_end><block_end>YLogger.debug(client_context "No word [%s] found in iset" word)<line_sep><return>EqualsMatch(<false> word_no)<block_end><def_stmt>words_in_set self client_context words word_no<block_start>word=words.word(word_no)<line_sep>check_word=JapaneseLanguage.zenhan_normalize(word)<line_sep>word=check_word.upper()<if_stmt>self._is_CJK<is><true><block_start>keyword=word[0]<block_end><else_stmt><block_start>keyword=word<block_end><if_stmt>keyword<in>self._words<block_start>phrases=self._words[keyword]<line_sep>phrases=sorted(phrases key=len reverse=<true>)<for_stmt>phrase phrases<block_start><if_stmt>self._is_CJK<is><true><block_start>phrase_words=client_context.brain.tokenizer.texts_to_words(phrase)<line_sep>phrase="".join(phrase_words)<line_sep>phrase_text=phrase<block_end><else_stmt><block_start>phrase_text=" ".join(phrase)<block_end>phrase_word_no=0<line_sep>words_word_no=word_no<while_stmt>phrase_word_no<l>len(phrase)<and>words_word_no<l>words.num_words()<block_start>word=words.word(words_word_no)<line_sep>check_word=JapaneseLanguage.zenhan_normalize(word)<line_sep>word=check_word.upper()<if_stmt>self._is_CJK<is><true><block_start>phrase_word=phrase[phrase_word_no:(phrase_word_no+len(word))]<if_stmt>phrase_word<eq>word<block_start><if_stmt>(phrase_word_no+len(word))<eq>len(phrase)<block_start><return>EqualsMatch(<true> words_word_no self._values[phrase_text])<block_end><block_end><else_stmt><block_start><break><block_end>phrase_word_no<augadd>len(word)<block_end><else_stmt><block_start>phrase_word=phrase[phrase_word_no]<if_stmt>phrase_word<eq>word<block_start><if_stmt>phrase_word_no+1<eq>len(phrase)<block_start><return>EqualsMatch(<true> words_word_no self._values[phrase_text])<block_end><block_end><else_stmt><block_start><break><block_end>phrase_word_no<augadd>1<block_end>words_word_no<augadd>1<block_end><block_end><block_end><return>EqualsMatch(<false> word_no)<block_end><block_end>
ACCOUNT_SETTING_SLACK_URL_KEY="slackWebhookUrl"<line_sep>NOTIFY_ON_SUCCESS_KEY="notifyOnSuccess"<line_sep>NOTIFY_ON_FAILURE_KEY="notifyOnFailure"<line_sep>
"""Unit tests for code in urllib.response."""<import_stmt>socket<import_stmt>tempfile<import_stmt>urllib.response<import_stmt>unittest<class_stmt>TestResponse(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.sock=socket.socket(socket.AF_INET socket.SOCK_STREAM)<line_sep>self.fp=self.sock.makefile('rb')<line_sep>self.test_headers={"Host":"www.python.org" "Connection":"close"}<block_end><def_stmt>test_with self<block_start>addbase=urllib.response.addbase(self.fp)<line_sep>self.assertIsInstance(addbase tempfile._TemporaryFileWrapper)<def_stmt>f <block_start><with_stmt>addbase<as>spam<block_start><pass><block_end><block_end>self.assertFalse(self.fp.closed)<line_sep>f()<line_sep>self.assertTrue(self.fp.closed)<line_sep>self.assertRaises(ValueError f)<block_end><def_stmt>test_addclosehook self<block_start>closehook_called=<false><def_stmt>closehook <block_start><nonlocal>closehook_called<line_sep>closehook_called=<true><block_end>closehook=urllib.response.addclosehook(self.fp closehook)<line_sep>closehook.close()<line_sep>self.assertTrue(self.fp.closed)<line_sep>self.assertTrue(closehook_called)<block_end><def_stmt>test_addinfo self<block_start>info=urllib.response.addinfo(self.fp self.test_headers)<line_sep>self.assertEqual(info.info() self.test_headers)<block_end><def_stmt>test_addinfourl self<block_start>url="http://www.python.org"<line_sep>code=200<line_sep>infourl=urllib.response.addinfourl(self.fp self.test_headers url code)<line_sep>self.assertEqual(infourl.info() self.test_headers)<line_sep>self.assertEqual(infourl.geturl() url)<line_sep>self.assertEqual(infourl.getcode() code)<block_end><def_stmt>tearDown self<block_start>self.sock.close()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# Import standard library's logging <import_stmt>logging<line_sep># Create function that converts dollars to cents <def_stmt>convert_dollars_to_cents dollars# Convert dollars to cents (as an integer) <block_start>cents=int(dollars<times>100)<line_sep>logging.debug("debug")<line_sep>logging.info("info")<line_sep>logging.warning("warning")<line_sep>logging.error("error")<line_sep>logging.critical("critical")<line_sep># Return cents <return>cents<block_end># Create dollar amount dollars=12.40<line_sep># Run dollars to cents convert function convert_dollars_to_cents(dollars)<line_sep>
# Copyright 2022 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Defines the ImageVAE model."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>collections<import_from_stmt>magenta.models.image_stylization ops<import_from_stmt>tensor2tensor.layers common_hparams<import_from_stmt>tensor2tensor.layers common_layers<import_from_stmt>tensor2tensor.utils registry<import_from_stmt>tensor2tensor.utils t2t_model<import_stmt>tensorflow.compat.v1<as>tf<import_from_stmt>tensorflow.compat.v1 estimator<as>tf_estimator<import_stmt>tensorflow_probability<as>tfp<line_sep>tfd=tfp.distributions<def_stmt>_softplus_inverse x<block_start>"""Helper which computes the function inverse of `tf.nn.softplus`."""<line_sep><return>tf.log(tf.math.expm1(x))<block_end>@registry.register_model<class_stmt>ImageVAE(t2t_model.T2TModel)<block_start>"""Defines the ImageVAE model."""<def_stmt>bottom self features# inputs and targets should all be images, no preprocessing needed. # but we do need to resize them to 64x64. <block_start>transformed_features=collections.OrderedDict()<line_sep>transformed_features['targets']=features['targets']<line_sep>transformed_features['inputs']=features['inputs']<line_sep>transformed_features['cls']=features['targets_cls']<if_stmt>'bottleneck'<in>features<block_start>transformed_features['bottleneck']=features['bottleneck']<block_end><return>transformed_features<block_end><def_stmt>body self features<block_start>train=self._hparams.mode<eq>tf_estimator.ModeKeys.TRAIN<line_sep><return>self.vae_internal(features self._hparams train)<block_end><def_stmt>top self body_output features# body_output should be a dict with 'outputs', which will be an image. # no postprocessing needed. <block_start><return>body_output<block_end><def_stmt>loss self logits features# logits should be dict with 'outputs', which is image. <block_start>targets=tf.reshape(features['targets'] [-1 64 64 1])<line_sep>weights=common_layers.weights_all(targets)<line_sep>loss_num=tf.pow(logits-targets 2)<line_sep><return>tf.reduce_sum(loss_num<times>weights) tf.reduce_sum(weights)<block_end><def_stmt>vae_internal self features hparams train# inputs and targets should both be images with dims [batch, 64, 64, 1] <block_start>inputs,targets=features['inputs'] features['targets']<line_sep>inputs=tf.reshape(inputs [-1 64 64 1])<line_sep>targets=tf.reshape(targets [-1 64 64 1])<line_sep>clss=features['cls']<with_stmt>tf.variable_scope('vae_internal' reuse=tf.AUTO_REUSE)# encoder <block_start>enc_out=self.visual_encoder(inputs clss hparams train)<line_sep>enc_out=tf.reshape(enc_out [-1 2<times>hparams.bottleneck_bits])<line_sep># bottleneck sampled_bottleneck,b_loss=self.bottleneck(enc_out)<line_sep>losses={'bottleneck_kl':tf.reduce_mean(b_loss)}<if_stmt>'bottleneck'<in>features<block_start><if_stmt>common_layers.shape_list(features['bottleneck'])[0]<eq>0# return bottleneck for interpolation # set losses['training'] = 0 so top() isn't called on it # potential todo: use losses dict so we have kl_loss here for non stop # gradient models <block_start><return>sampled_bottleneck {'training':0.0}<block_end><else_stmt># we want to use the given bottleneck <block_start>sampled_bottleneck=features['bottleneck']<block_end><block_end># finalize bottleneck unbottleneck=sampled_bottleneck<line_sep># decoder. dec_out=self.visual_decoder(unbottleneck clss hparams)<line_sep># calculate training loss here lol rec_loss=-dec_out.log_prob(inputs)<line_sep>elbo=tf.reduce_mean(-(b_loss+rec_loss))<line_sep>losses['rec_loss']=tf.reduce_mean(rec_loss)<line_sep>losses['training']=-elbo<if_stmt>(<not>hasattr(self 'summarized_imgs')<and>self._hparams.mode<ne>tf_estimator.ModeKeys.PREDICT)<block_start>self.summarized_imgs=<true><with_stmt>tf.name_scope(<none>) tf.name_scope('train'<if>train<else>'test')<block_start>tf.summary.image('rendered_out' dec_out.mean())<line_sep>tf.summary.image('rendered_og' inputs)<block_end><block_end><block_end><return>dec_out.mean() losses<block_end><def_stmt>bottleneck self x<block_start>z_size=self.hparams.bottleneck_bits<line_sep>x_shape=common_layers.shape_list(x)<with_stmt>tf.variable_scope('bottleneck' reuse=tf.AUTO_REUSE)<block_start>mu=x[<ellipsis> :self.hparams.bottleneck_bits]<if_stmt>self.hparams.mode<ne>tf_estimator.ModeKeys.TRAIN<block_start><return>mu 0.0# No sampling or kl loss on eval. <block_end>log_sigma=x[<ellipsis> self.hparams.bottleneck_bits:]<line_sep>epsilon=tf.random_normal(x_shape[:-1]+[z_size])<line_sep>z=mu+tf.exp(log_sigma/2)<times>epsilon<line_sep>kl=0.5<times>tf.reduce_mean(tf.exp(log_sigma)+tf.square(mu)-1.-log_sigma axis=-1)<line_sep># This is the 'free bits' trick mentioned in Kingma et al. (2016) free_bits=self.hparams.free_bits<line_sep>kl_loss=tf.reduce_mean(tf.maximum(kl-free_bits 0.0))<block_end><return>z kl_loss<times>self.hparams.kl_beta<block_end><def_stmt>visual_encoder self inputs clss hparams train<block_start><del_stmt>train<line_sep># goes from [batch, 64, 64, 1] to [batch, hidden_size] <with_stmt>tf.variable_scope('visual_encoder' reuse=tf.AUTO_REUSE)<block_start>ret=inputs<line_sep>clss=tf.reshape(clss [-1])<line_sep># conv layer, followed by instance norm + FiLM ret=tf.layers.Conv2D(hparams.base_depth 5 1 padding='SAME' activation=<none>)(ret)<line_sep>ret=ops.conditional_instance_norm(ret clss hparams.num_categories)<line_sep>ret=tf.nn.relu(ret)<line_sep>ret=tf.layers.Conv2D(hparams.base_depth 5 2 padding='SAME' activation=<none>)(ret)<line_sep>ret=ops.conditional_instance_norm(ret clss hparams.num_categories)<line_sep>ret=tf.nn.relu(ret)<line_sep>ret=tf.layers.Conv2D(2<times>hparams.base_depth 5 1 padding='SAME' activation=<none>)(ret)<line_sep>ret=ops.conditional_instance_norm(ret clss hparams.num_categories)<line_sep>ret=tf.nn.relu(ret)<line_sep>ret=tf.layers.Conv2D(2<times>hparams.base_depth 5 2 padding='SAME' activation=<none>)(ret)<line_sep>ret=ops.conditional_instance_norm(ret clss hparams.num_categories)<line_sep>ret=tf.nn.relu(ret)<line_sep># new conv layer, to bring shape down ret=tf.layers.Conv2D(2<times>hparams.bottleneck_bits 4 2 padding='SAME' activation=<none>)(ret)<line_sep>ret=ops.conditional_instance_norm(ret clss hparams.num_categories)<line_sep>ret=tf.nn.relu(ret)<line_sep># new conv layer, to bring shape down ret=tf.layers.Conv2D(2<times>hparams.bottleneck_bits 4 2 padding='SAME' activation=<none>)(ret)<line_sep>ret=ops.conditional_instance_norm(ret clss hparams.num_categories)<line_sep>ret=tf.nn.relu(ret)<line_sep># ret has 1024 ret=tf.layers.flatten(ret)<line_sep>ret=tf.layers.dense(ret 2<times>hparams.bottleneck_bits activation=<none>)<block_end><return>ret<block_end><def_stmt>visual_decoder self bottleneck clss hparams# goes from [batch, bottleneck_bits] to [batch, 64, 64, 1] <block_start><with_stmt>tf.variable_scope('visual_decoder' reuse=tf.AUTO_REUSE)# unbottleneck <block_start>ret=tf.layers.dense(bottleneck 1024 activation=<none>)<line_sep>ret=tf.reshape(ret [-1 4 4 64])<line_sep>clss=tf.reshape(clss [-1])<line_sep># new deconv to bring shape up ret=tf.layers.Conv2DTranspose(2<times>hparams.base_depth 4 2 padding='SAME' activation=<none>)(ret)<line_sep>ret=ops.conditional_instance_norm(ret clss hparams.num_categories)<line_sep>ret=tf.nn.relu(ret)<line_sep># new deconv to bring shape up ret=tf.layers.Conv2DTranspose(2<times>hparams.base_depth 4 2 padding='SAME' activation=<none>)(ret)<line_sep>ret=ops.conditional_instance_norm(ret clss hparams.num_categories)<line_sep>ret=tf.nn.relu(ret)<line_sep>ret=tf.layers.Conv2DTranspose(2<times>hparams.base_depth 5 padding='SAME' activation=<none>)(ret)<line_sep>ret=ops.conditional_instance_norm(ret clss hparams.num_categories)<line_sep>ret=tf.nn.relu(ret)<line_sep>ret=tf.layers.Conv2DTranspose(2<times>hparams.base_depth 5 2 padding='SAME' activation=<none>)(ret)<line_sep>ret=ops.conditional_instance_norm(ret clss hparams.num_categories)<line_sep>ret=tf.nn.relu(ret)<line_sep>ret=tf.layers.Conv2DTranspose(hparams.base_depth 5 padding='SAME' activation=<none>)(ret)<line_sep>ret=ops.conditional_instance_norm(ret clss hparams.num_categories)<line_sep>ret=tf.nn.relu(ret)<line_sep>ret=tf.layers.Conv2DTranspose(hparams.base_depth 5 2 padding='SAME' activation=<none>)(ret)<line_sep>ret=ops.conditional_instance_norm(ret clss hparams.num_categories)<line_sep>ret=tf.nn.relu(ret)<line_sep>ret=tf.layers.Conv2DTranspose(hparams.base_depth 5 padding='SAME' activation=<none>)(ret)<line_sep>ret=ops.conditional_instance_norm(ret clss hparams.num_categories)<line_sep>ret=tf.nn.relu(ret)<line_sep>ret=tf.layers.Conv2D(1 5 padding='SAME' activation=<none>)(ret)<line_sep>ret=tfd.Independent(tfd.Bernoulli(logits=ret) reinterpreted_batch_ndims=3 name='image')<block_end><return>ret<block_end><block_end>@registry.register_hparams<def_stmt>image_vae <block_start>"""Basic Image VAE model hparams."""<line_sep>hparams=common_hparams.basic_params1()<line_sep>hparams.daisy_chain_variables=<false><line_sep>hparams.batch_size=64<line_sep>hparams.hidden_size=32<line_sep>hparams.initializer='uniform_unit_scaling'<line_sep>hparams.initializer_gain=1.0<line_sep>hparams.weight_decay=0.0<line_sep># VAE hparams hparams.add_hparam('base_depth' 32)<line_sep>hparams.add_hparam('bottleneck_bits' 32)<line_sep># loss hparams hparams.add_hparam('kl_beta' 300)<line_sep>hparams.add_hparam('free_bits_div' 4)<line_sep>hparams.add_hparam('free_bits' 0.15)<line_sep># data format hparams hparams.add_hparam('num_categories' 62)<line_sep># problem hparams (required, don't modify) hparams.add_hparam('absolute' <false>)<line_sep>hparams.add_hparam('just_render' <true>)<line_sep>hparams.add_hparam('plus_render' <false>)<line_sep><return>hparams<block_end>
""" Overview ======== Find where patterns are found, this plugin uses silver searcher to search for word patterns. It is useful to find where functions/methods are used over multiple files. Key-Commands ============ Namespace: fstmt Mode: NORMAL Event: <Control-z> Description: Same as <Key-bar> but matches insensitively. Mode: NORMAL Event: <Key-z> Description: Open the previous found pattern occurrences. Mode: NORMAL Event: <Key-Z> Description: Get the string under the cursor and perform a case sensitive and resursive search in the current project file directory. It grabs the string under the cursor only if there is no selected text. The search is performed in the current project folder, if fstmt cant find a .git, svn nor .hg it performs the search in the vy HOME directory. """<import_from_stmt>subprocess Popen STDOUT PIPE<import_from_stmt>vyapp.widgets LinePicker<import_from_stmt>vyapp.areavi AreaVi<import_from_stmt>re findall escape<import_from_stmt>vyapp.stderr printd<import_from_stmt>vyapp.app root<class_stmt>Fstmt<block_start>options=LinePicker()<line_sep>path='ag'<def_stmt>__init__ self area<block_start>self.area=area<line_sep>area.install('fstmt' ('NORMAL' '<Key-z>' <lambda>event:self.options.display()) ('NORMAL' '<Control-z>' <lambda>event:self.picker('-i')) ('NORMAL' '<Key-Z>' <lambda>event:self.picker('-s')))<block_end>@classmethod<def_stmt>c_path cls path='ag'<block_start>cls.path=path<line_sep>printd('Fstmt - Setting ag path = ' path)<block_end><def_stmt>catch_pattern self<block_start>pattern=self.area.join_ranges('sel')<line_sep>pattern=pattern<if>pattern<else>self.area.get(*self.area.get_word_range())<line_sep>pattern=escape(pattern)<line_sep><return>pattern<block_end><def_stmt>make_cmd self pattern dir *args<block_start>cmd=[Fstmt.path '--nocolor' '--nogroup' '--vimgrep' '--noheading']<line_sep>cmd.extend(args)<line_sep>cmd.extend([pattern dir])<line_sep><return>cmd<block_end><def_stmt>run_cmd self pattern *args<block_start>dir=self.area.project<line_sep>dir=dir<if>dir<else>AreaVi.HOME<line_sep>dir=dir<if>dir<else>self.area.filename<line_sep>child=Popen(self.make_cmd(pattern dir *args) stdout=PIPE stderr=STDOUT encoding=self.area.charset)<line_sep>regex='(.+):([0-9]+):[0-9]+:(.+)'<line_sep>ranges=findall(regex child.communicate()[0])<if_stmt>ranges<block_start>self.options(ranges)<block_end><else_stmt><block_start>root.status.set_msg('No pattern found!')<block_end><block_end><def_stmt>picker self *args<block_start>pattern=self.catch_pattern()<if_stmt><not>pattern<block_start>root.status.set_msg('No pattern set!')<block_end><else_stmt><block_start>self.run_cmd(pattern *args)<block_end><block_end><block_end>
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Execution Callbacks for Eager Mode."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>functools<import_stmt>numpy<as>np<import_from_stmt>tensorflow.python pywrap_tensorflow<import_from_stmt>tensorflow.python.eager context<import_from_stmt>tensorflow.python.eager core<import_from_stmt>tensorflow.python.platform tf_logging<as>logging<line_sep>_DEFAULT_CALLBACK_ACTION="raise"<line_sep>_VALID_CALLBACK_ACTIONS=(<none> "ignore" "print" "raise" "warn")<line_sep># TODO(cais): Consider moving this exception class to errors_impl.py. <class_stmt>InfOrNanError(Exception)<block_start>"""Exception for inf and/or nan being present in tensor."""<def_stmt>__init__ self op_type op_name output_index num_outputs value<block_start>"""Constructor of InfOrNanError. Args: op_type: Type name of the op that generated the tensor that generated the `inf`(s) or `nan`(s) (e.g., `Div`). op_name: Name of the op that generated the tensor with `inf`(s) or `nan`(s). This name is set by client and can be `None` if it is unset. output_index: The 0-based output index of the tensor that contains `inf`(s) or `nan`(s). num_outputs: Total number of outputs of the operation. value: The tensor value that contains `inf`(s) or `nan`(s). """<line_sep>self._op_type=op_type<line_sep>self._op_name=op_name<line_sep>self._output_index=output_index<line_sep>self._num_outputs=num_outputs<line_sep>self._value=value<line_sep>self._total_count=np.size(value)<line_sep>self._inf_count=np.count_nonzero(np.isinf(value))<line_sep>self._nan_count=np.count_nonzero(np.isnan(value))<line_sep>super(InfOrNanError self).__init__(self._get_error_message())<block_end><def_stmt>_get_error_message self<block_start>"""Get the error message describing this InfOrNanError object."""<line_sep>name_str=(("'%s'"%self._op_name)<if>self._op_name<is><not><none><else>str(self._op_name))<line_sep>msg="Output %d of %d of TFE operation %s (name: %s) contains "%(self._output_index+1 self._num_outputs self._op_type name_str)<if_stmt>self._inf_count<and>self._nan_count<block_start>msg<augadd>"%d inf(s) and %d nan(s) "%(self._inf_count self._nan_count)<block_end><elif_stmt>self._inf_count<block_start>msg<augadd>"%d inf(s) "%self._inf_count<block_end><else_stmt><block_start>msg<augadd>"%d nan(s) "%self._nan_count<block_end>msg<augadd>"out of a total of %d element(s). Tensor value: %s"%(self._total_count self._value)<line_sep><return>msg<block_end>@property<def_stmt>op_type self<block_start><return>self._op_type<block_end>@property<def_stmt>op_name self<block_start><return>self._op_name<block_end>@property<def_stmt>output_index self<block_start><return>self._output_index<block_end>@property<def_stmt>num_outputs self<block_start><return>self._num_outputs<block_end>@property<def_stmt>value self<block_start><return>self._value<block_end><block_end><def_stmt>inf_nan_callback op_type op_name attrs inputs outputs check_inf=<true> check_nan=<true> action=_DEFAULT_CALLBACK_ACTION<block_start>"""An execution callback that checks for `inf`s and `nan`s in output tensors. This callback can be used with `tfe.add_execute_callback` to check for invalid numeric values. E.g., ```python tfe.add_execute_callback(tfe.inf_nan_callback) ``` Args: op_type: Name of the TFE operation type (e.g., `MatMul`). op_name: Name of the TFE operation. This name is set by client and can be `None` if it unset. attrs: Attributes of the TFE operation, as a tuple of alternating attribute names and attribute values. inputs: The `list` of input tensors to the operation, currently unused by this callback. outputs: The `list` of output tensors from the operation, checked by this callback for `inf` and `nan` values. check_inf: (`bool`) Whether this callback should check for `inf` values in the output tensor values. check_nan: (`bool`) Whether this callback should check for `nan` values in the output tensor values. action: (`str`) Action to be taken by the callback when `inf` or `nan` values are detected. Possible values {"raise", "warn", "print"} `"raise"`: Raise a `InfOrNanError`. `"warn"`: Log a warning using `tf.logging.warn`. `"print"`: Print a message to `sys.stdout`. Raises: InfOrNanError: iff `inf` or `nan` values are seen in any of `outputs` and `action` is `"raise"`. ValueError: iff the value of `action` is invalid. """<del_stmt>attrs inputs# Not used. ctx=context.get_default_context()<for_stmt>index,output enumerate(outputs)<block_start><if_stmt><not>output.dtype.is_numpy_compatible<block_start><continue><block_end>numpy_dtype=output.dtype.as_numpy_dtype<if_stmt>(np.issubdtype(numpy_dtype np.float)<or>np.issubdtype(numpy_dtype np.complex)<or>np.issubdtype(numpy_dtype np.integer))<block_start><try_stmt><block_start>check_numerics_op_attrs=("message" "Eager-mode inf/nan check" "T" outputs[0].dtype.as_datatype_enum)<line_sep># TODO(cais): Consider moving this into execute.py. # pylint: disable=protected-access pywrap_tensorflow.TFE_Py_Execute(ctx._handle output.device "CheckNumerics" [output] check_numerics_op_attrs 1)<line_sep># pylint: enable=protected-access <block_end><except_stmt>core._NotOkStatusException# pylint: disable=protected-access <block_start>value=output.numpy()<line_sep>inf_detected=np.any(np.isinf(value))<and>check_inf<line_sep>nan_detected=np.any(np.isnan(value))<and>check_nan<if_stmt><not>inf_detected<and><not>nan_detected<block_start><continue><block_end>error=InfOrNanError(op_type op_name index len(outputs) value)<if_stmt>action<eq>"print"<block_start>print("Warning: %s"%str(error))<block_end><elif_stmt>action<eq>"warn"<block_start>logging.warn(str(error))<block_end><elif_stmt>action<eq>"raise"<block_start><raise>error<block_end><else_stmt><block_start><raise>ValueError("Invalid action for inf_nan_callback: %s. Valid actions are: "<concat>"{print | warn | raise}"%action)<block_end><block_end><block_end><block_end><block_end><def_stmt>inf_callback op_type op_name attrs inputs outputs action=_DEFAULT_CALLBACK_ACTION<block_start>"""A specialization of `inf_nan_callback` that checks for `inf`s only."""<line_sep>inf_nan_callback(op_type op_name attrs inputs outputs check_inf=<true> check_nan=<false> action=action)<block_end><def_stmt>nan_callback op_type op_name attrs inputs outputs action=_DEFAULT_CALLBACK_ACTION<block_start>"""A specialization of `inf_nan_callback` that checks for `nan`s only."""<line_sep>inf_nan_callback(op_type op_name attrs inputs outputs check_inf=<false> check_nan=<true> action=action)<block_end><def_stmt>add_execution_callback callback<block_start>"""Add an execution callback to the default eager context. An execution callback is invoked immediately after an eager operation or function has finished execution, providing access to the op's type, name input and output tensors. Multiple execution callbacks can be added, in which case the callbacks will be invoked in the order in which they are added. To clear all execution callbacks that have been added, use `clear_execution_callbacks()`. Example: ```python def print_even_callback(op_type, op_name, attrs, inputs, outputs): # A callback that prints only the even output values. if outputs[0].numpy() % 2 == 0: print("Even output from %s: %s" % (op_name or op_type, outputs)) tfe.add_execution_callback(print_even_callback) x = tf.pow(2.0, 3.0) - 3.0 y = tf.multiply(x, tf.add(1.0, 5.0)) # When the line above is run, you will see all intermediate outputs that are # even numbers printed to the console. tfe.clear_execution_callbacks() ``` Args: callback: a callable of the signature `f(op_type, op_name, attrs, inputs, outputs)`. `op_type` is the type of the operation that was just executed (e.g., `MatMul`). `op_name` is the name of the operation that has was just executed. This name is set by the client who created the operation and can be `None` if it is unset. `attrs` contains the attributes of the operation as a `tuple` of alternating attribute name and attribute value. `inputs` is the `list` of input `Tensor`(s) to the op. `outputs` is the `list` of output `Tensor`(s) from the op. Return value(s) from the callback are ignored. """<line_sep>context.get_default_context().add_post_execution_callback(callback)<block_end><def_stmt>clear_execution_callbacks <block_start>"""Clear all execution callbacks from the default eager context."""<line_sep>context.get_default_context().clear_post_execution_callbacks()<block_end><def_stmt>seterr inf_or_nan=<none><block_start>"""Set how abnormal conditions are handled by the default eager context. Example: ```python tfe.seterr(inf_or_nan="raise") a = tf.constant(10.0) b = tf.constant(0.0) try: c = a / b # <-- Raises InfOrNanError. except Exception as e: print("Caught Exception: %s" % e) tfe.seterr(inf_or_nan="ignore") c = a / b # <-- Does NOT raise exception anymore. ``` Args: inf_or_nan: Set action for infinity (`inf`) and NaN (`nan`) values. Possible values: `{"ignore", "print", "raise", "warn"}`. `"ignore"`: take no action when `inf` values appear. `"print"`: print a warning to `stdout`. `"raise"`: raise an `InfOrNanError`. `"warn"`: print a warning using `tf.logging.warn`. A value of `None` leads to no change in the action of the condition. Returns: A dictionary of old actions. Raises: ValueError: If the value of any keyword arguments is invalid. """<if_stmt>inf_or_nan<not><in>_VALID_CALLBACK_ACTIONS<block_start><raise>ValueError("Invalid action value for inf_or_nan: %s. "<concat>"Valid actions are %s."%(inf_or_nan _VALID_CALLBACK_ACTIONS))<block_end>old_settings={"inf_or_nan":"ignore"}<line_sep>default_context=context.get_default_context()<line_sep>carryover_callbacks=[]<for_stmt>callback default_context.post_execution_callbacks# Check whether the callback is inf_nan_callback or a partial object of # inf_nan_callback. <block_start><if_stmt>(callback<eq>inf_nan_callback<or>isinstance(callback functools.partial)<and>callback.func<eq>inf_nan_callback)<block_start><if_stmt>callback<eq>inf_nan_callback<block_start>old_settings["inf_or_nan"]=_DEFAULT_CALLBACK_ACTION<block_end><else_stmt><block_start>old_settings["inf_or_nan"]=callback.keywords.get("action" _DEFAULT_CALLBACK_ACTION)<block_end><block_end><elif_stmt>inf_or_nan<is><not><none><block_start>carryover_callbacks.append(callback)<block_end><block_end><if_stmt>inf_or_nan<is><not><none><block_start>default_context.clear_post_execution_callbacks()<for_stmt>callback carryover_callbacks<block_start>default_context.add_post_execution_callback(callback)<block_end><if_stmt>inf_or_nan<ne>"ignore"<block_start>default_context.add_post_execution_callback(functools.partial(inf_nan_callback action=inf_or_nan))<block_end><block_end><return>old_settings<block_end>
# coding=utf-8 # Copyright 2021 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for common image attention utilities."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>absl.testing parameterized<import_from_stmt>tensor2tensor.layers common_hparams<import_from_stmt>tensor2tensor.layers common_image_attention<import_from_stmt>tensor2tensor.utils hparam<import_stmt>tensorflow.compat.v1<as>tf<class_stmt>CommonImageAttentionTest(parameterized.TestCase tf.test.TestCase)<block_start>@parameterized.parameters((common_image_attention.DistributionType.DMOL 5 50) (common_image_attention.DistributionType.CAT <none> 256) )<def_stmt>testPostProcessImageTrainMode self likelihood num_mixtures depth<block_start>batch=1<line_sep>rows=8<line_sep>cols=24<line_sep>hparams=hparam.HParams(hidden_size=2 likelihood=likelihood mode=tf.estimator.ModeKeys.TRAIN num_mixtures=num_mixtures )<line_sep>inputs=tf.random_uniform([batch rows cols hparams.hidden_size] minval=-1. maxval=1.)<line_sep>outputs=common_image_attention.postprocess_image(inputs rows cols hparams)<line_sep>self.assertEqual(outputs.shape (batch rows cols depth))<block_end>@parameterized.parameters((common_image_attention.DistributionType.DMOL 5 50) (common_image_attention.DistributionType.CAT <none> 256) )<def_stmt>testPostProcessImageInferMode self likelihood num_mixtures depth<block_start>batch=1<line_sep>rows=8<line_sep>cols=24<line_sep>block_length=4<line_sep>block_width=2<line_sep>hparams=hparam.HParams(block_raster_scan=<true> hidden_size=2 likelihood=likelihood mode=tf.estimator.ModeKeys.PREDICT num_mixtures=num_mixtures query_shape=[block_length block_width] )<line_sep>inputs=tf.random_uniform([batch rows cols hparams.hidden_size] minval=-1. maxval=1.)<line_sep>outputs=common_image_attention.postprocess_image(inputs rows cols hparams)<line_sep>num_blocks_rows=rows<floordiv>block_length<line_sep>num_blocks_cols=cols<floordiv>block_width<line_sep>self.assertEqual(outputs.shape (batch num_blocks_rows num_blocks_cols block_length block_width depth))<block_end>@parameterized.parameters((common_image_attention.DistributionType.DMOL 5 50) (common_image_attention.DistributionType.CAT <none> 256) )<def_stmt>testCreateOutputTrainMode self likelihood num_mixtures depth<block_start>batch=1<line_sep>height=8<line_sep>width=8<line_sep>channels=3<line_sep>rows=height<if_stmt>likelihood<eq>common_image_attention.DistributionType.CAT<block_start>cols=channels<times>width<block_end><else_stmt><block_start>cols=width<block_end>hparams=hparam.HParams(hidden_size=2 likelihood=likelihood num_channels=channels mode=tf.estimator.ModeKeys.TRAIN num_mixtures=num_mixtures )<line_sep>decoder_output=tf.random_normal([batch rows cols hparams.hidden_size])<line_sep>targets=tf.random_uniform([batch height width channels] minval=-1. maxval=1.)<line_sep>output=common_image_attention.create_output(decoder_output rows cols targets hparams)<if_stmt>hparams.likelihood<eq>common_image_attention.DistributionType.CAT<block_start>self.assertEqual(output.shape (batch height width channels depth))<block_end><else_stmt><block_start>self.assertEqual(output.shape (batch height width depth))<block_end><block_end><def_stmt>testTransformerDecoderLayersGlobal self<block_start>one_hot_data=tf.constant([[[0. 1.] [1. 0.]] [[0. 1.] [1. 0.]] [[1. 0.] [1. 0.]]])<line_sep>hparams=common_hparams.basic_params1()<line_sep>hparams.hidden_size=4<line_sep>hparams.num_layers=1<line_sep>hparams.layer_prepostprocess_dropout=0.<line_sep>hparams.add_hparam("attention_key_channels" <none>)<line_sep>hparams.add_hparam("attention_value_channels" <none>)<line_sep>hparams.add_hparam("num_heads" 1)<line_sep>hparams.add_hparam("attention_dropout" 0.)<line_sep>hparams.add_hparam("shared_rel" <false>)<line_sep>hparams.add_hparam("block_width" 1)<line_sep>hparams.add_hparam("block_length" 1)<line_sep>hparams.add_hparam("q_filter_width" 1)<line_sep>hparams.add_hparam("kv_filter_width" 1)<line_sep>hparams.add_hparam("filter_size" 16)<line_sep>hparams.add_hparam("ffn_layer" "conv_hidden_relu")<line_sep>hparams.add_hparam("relu_dropout" 0.)<line_sep>conv_1d=tf.keras.layers.Conv1D(filters=hparams.hidden_size kernel_size=1 use_bias=<false>)<line_sep>shifted_data=tf.pad(one_hot_data [[0 0] [1 0] [0 0]])[<ellipsis> :-1 :]<line_sep>net=conv_1d(shifted_data)<line_sep>output=common_image_attention.transformer_decoder_layers(inputs=net encoder_output=<none> num_layers=hparams.num_layers hparams=hparams self_attention_bias=common_image_attention.get_self_attention_bias(net) attention_type=common_image_attention.AttentionType.GLOBAL)<line_sep>self.evaluate(tf.global_variables_initializer())<line_sep>output_val=self.evaluate(output)<line_sep># The outputs for the padded dimension should be equal across all data. self.assertAllEqual(output_val[0 0] output_val[1 0])<line_sep>self.assertAllEqual(output_val[1 0] output_val[2 0])<line_sep># The first and second elements of the batch are identical, so they should # have the same outputs for the second latent dimension as well. self.assertAllEqual(output_val[0 1] output_val[1 1])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.test.main()<block_end>
''' The `graph` module contains tools for constructing and executing graphs of pliers Transformers. '''<import_from_stmt>itertools chain<import_from_stmt>collections OrderedDict<import_stmt>json<import_from_stmt>pliers.extractors.base merge_results<import_from_stmt>pliers.stimuli __all__<as>stim_list<import_from_stmt>pliers.transformers get_transformer<import_from_stmt>pliers.utils listify flatten isgenerator attempt_to_import verify_dependencies <line_sep>pgv=attempt_to_import('pygraphviz' 'pgv')<line_sep>stim_list.insert(0 'ExtractorResult')<class_stmt>Node<block_start>''' A graph node/vertex. Represents a single transformer, optionally with references to children. Args: name (str): Name of the node transformer (Transformer): the Transformer instance at this node parameters (kwargs): parameters for initializing the Transformer '''<def_stmt>__init__ self transformer name=<none> **parameters<block_start>self.name=name<line_sep>self.children=[]<if_stmt>isinstance(transformer str)<block_start>transformer=get_transformer(transformer **parameters)<block_end>self.transformer=transformer<line_sep>self.parameters=parameters<if_stmt>name<is><not><none><block_start>self.transformer.name=name<block_end>self.id=id(transformer)<block_end><def_stmt>add_child self node<block_start>''' Append a child to the list of children. '''<line_sep>self.children.append(node)<block_end><def_stmt>is_leaf self<block_start><return>len(self.children)<eq>0<block_end><def_stmt>to_json self<block_start>spec={'transformer':self.transformer.__class__.__name__}<if_stmt>self.name<block_start>spec['name']=self.name<block_end><if_stmt>self.children<block_start>children=[]<for_stmt>c self.children<block_start>children.append(c.to_json())<block_end>spec['children']=children<block_end><if_stmt>self.parameters<block_start>spec['parameters']=self.parameters<block_end><return>spec<block_end><block_end><class_stmt>Graph<block_start>''' Graph-like structure that represents an entire pliers workflow. Args: nodes (list, dict): Optional nodes to add to the Graph at construction. If a dict, must have a 'roots' key. If a list, each element must be in one of the forms accepted by add_nodes(). spec (str): An optional path to a .json file containing the graph specification. '''<def_stmt>__init__ self nodes=<none> spec=<none><block_start>self.nodes=OrderedDict()<line_sep>self.roots=[]<if_stmt>nodes<is><not><none><block_start><if_stmt>isinstance(nodes dict)<block_start>nodes=nodes['roots']<block_end>self.add_nodes(nodes)<block_end><elif_stmt>spec<is><not><none><block_start><with_stmt>open(spec)<as>spec_file<block_start>self.add_nodes(json.load(spec_file)['roots'])<block_end><block_end><block_end>@staticmethod<def_stmt>_parse_node_args node<block_start><if_stmt>isinstance(node dict)<block_start><return>node<block_end>kwargs={}<if_stmt>isinstance(node (list tuple))<block_start>kwargs['transformer']=node[0]<if_stmt>len(node)<g>1<block_start>kwargs['children']=node[1]<block_end><if_stmt>len(node)<g>2<block_start>kwargs['name']=node[2]<block_end><block_end><elif_stmt>isinstance(node Node)<block_start>kwargs['transformer']=node.transformer<line_sep>kwargs['children']=node.children<line_sep>kwargs['name']=node.name<block_end><else_stmt><block_start>kwargs['transformer']=node<block_end><return>kwargs<block_end><def_stmt>add_nodes self nodes parent=<none> mode='horizontal'<block_start>''' Adds one or more nodes to the current graph. Args: nodes (list): A list of nodes to add. Each element must be one of the following: * A dict containing keyword args to pass onto to the Node init. * An iterable containing 1 - 3 elements. The first element is mandatory, and specifies the Transformer at that node. The second element (optional) is an iterable of child nodes (specified in the same format). The third element (optional) is a string giving the (unique) name of the node. * A Node instance. * A Transformer instance. parent (Node): Optional parent node (i.e., the node containing the pliers Transformer from which the to-be-created nodes receive their inputs). mode (str): Indicates the direction with which to add the new nodes * horizontal: the nodes should each be added as a child of the 'parent' argument (or a Graph root by default). * vertical: the nodes should each be added in sequence with the first node being the child of the 'parnet' argument (a Graph root by default) and each subsequent node being the child of the previous node in the list. '''<for_stmt>n nodes<block_start>node_args=self._parse_node_args(n)<if_stmt>mode<eq>'horizontal'<block_start>self.add_node(parent=parent **node_args)<block_end><elif_stmt>mode<eq>'vertical'<block_start>parent=self.add_node(parent=parent return_node=<true> **node_args)<block_end><else_stmt><block_start><raise>ValueError("Invalid mode for adding nodes to a graph:"<concat>"%s"%mode)<block_end><block_end><block_end><def_stmt>add_chain self nodes parent=<none><block_start>''' An alias for add_nodes with the mode preset to 'vertical'. '''<line_sep>self.add_nodes(nodes parent 'vertical')<block_end><def_stmt>add_children self nodes parent=<none><block_start>''' An alias for add_nodes with the mode preset to 'horizontal'. '''<line_sep>self.add_nodes(nodes parent 'horizontal')<block_end><def_stmt>add_node self transformer name=<none> children=<none> parent=<none> parameters={} return_node=<false><block_start>''' Adds a node to the current graph. Args: transformer (str, Transformer): The pliers Transformer to use at the to-be-added node. Either a case-insensitive string giving the name of a Transformer class, or an initialized Transformer instance. name (str): Optional name to give this Node. children (list): Optional list of child nodes (i.e., nodes to pass the to-be-added node's Transformer output to). parent (Node): Optional node from which the to-be-added Node receives its input. parameters (dict): Optional keyword arguments to pass onto the Transformer initialized at this Node if a string is passed to the 'transformer' argument. Ignored if an already-initialized Transformer is passed. return_node (bool): If True, returns the initialized Node instance. Returns: The initialized Node instance if return_node is True, None otherwise. '''<line_sep>node=Node(transformer name **parameters)<line_sep>self.nodes[node.id]=node<if_stmt>parent<is><none><block_start>self.roots.append(node)<block_end><else_stmt><block_start>parent=self.nodes[parent.id]<line_sep>parent.add_child(node)<block_end><if_stmt>children<is><not><none><block_start>self.add_nodes(children parent=node)<block_end><if_stmt>return_node<block_start><return>node<block_end><block_end><def_stmt>run self stim merge=<true> **merge_kwargs<block_start>''' Executes the graph by calling all Transformers in sequence. Args: stim (str, Stim, list): One or more valid inputs to any Transformer's 'transform' call. merge (bool): If True, all results are merged into a single pandas DataFrame before being returned. If False, a list of ExtractorResult objects is returned (one per Extractor/Stim combination). merge_kwargs: Optional keyword arguments to pass onto the merge_results() call. '''<line_sep>results=list(chain(*[self.run_node(n stim)<for>n self.roots]))<line_sep>results=list(flatten(results))<line_sep>self._results=results# For use in plotting <return>merge_results(results **merge_kwargs)<if>merge<else>results<block_end>transform=run<def_stmt>run_node self node stim<block_start>''' Executes the Transformer at a specific node. Args: node (str, Node): If a string, the name of the Node in the current Graph. Otherwise the Node instance to execute. stim (str, stim, list): Any valid input to the Transformer stored at the target node. '''<if_stmt>isinstance(node str)<block_start>node=self.nodes[node]<block_end>result=node.transformer.transform(stim)<if_stmt>node.is_leaf()<block_start><return>listify(result)<block_end>stim=result<line_sep># If result is a generator, the first child will destroy the # iterable, so cache via list conversion <if_stmt>len(node.children)<g>1<and>isgenerator(stim)<block_start>stim=list(stim)<block_end><return>list(chain(*[self.run_node(c stim)<for>c node.children]))<block_end><def_stmt>draw self filename color=<true><block_start>''' Render a plot of the graph via pygraphviz. Args: filename (str): Path to save the generated image to. color (bool): If True, will color graph nodes based on their type, otherwise will draw a black-and-white graph. '''<line_sep>verify_dependencies(['pgv'])<if_stmt><not>hasattr(self '_results')<block_start><raise>RuntimeError("Graph cannot be drawn before it is executed. "<concat>"Try calling run() first.")<block_end>g=pgv.AGraph(directed=<true>)<line_sep>g.node_attr['colorscheme']='set312'<for_stmt>elem self._results<block_start><if_stmt><not>hasattr(elem 'history')<block_start><continue><block_end>log=elem.history<while_stmt>log# Configure nodes <block_start>source_from=log.parent[6]<if>log.parent<else>''<line_sep>s_node=hash((source_from log[2]))<line_sep>s_color=stim_list.index(log[2])<line_sep>s_color=s_color%12+1<line_sep>t_node=hash((log[6] log[7]))<line_sep>t_style='filled,'<if>color<else>''<line_sep>t_style<augadd>'dotted'<if>log.implicit<else>''<if_stmt>log[6].endswith('Extractor')<block_start>t_color='#0082c8'<block_end><elif_stmt>log[6].endswith('Filter')<block_start>t_color='#e6194b'<block_end><else_stmt><block_start>t_color='#3cb44b'<block_end>r_node=hash((log[6] log[5]))<line_sep>r_color=stim_list.index(log[5])<line_sep>r_color=r_color%12+1<line_sep># Add nodes <if_stmt>color<block_start>g.add_node(s_node label=log[2] shape='ellipse' style='filled' fillcolor=s_color)<line_sep>g.add_node(t_node label=log[6] shape='box' style=t_style fillcolor=t_color)<line_sep>g.add_node(r_node label=log[5] shape='ellipse' style='filled' fillcolor=r_color)<block_end><else_stmt><block_start>g.add_node(s_node label=log[2] shape='ellipse')<line_sep>g.add_node(t_node label=log[6] shape='box' style=t_style)<line_sep>g.add_node(r_node label=log[5] shape='ellipse')<block_end># Add edges g.add_edge(s_node t_node style=t_style)<line_sep>g.add_edge(t_node r_node style=t_style)<line_sep>log=log.parent<block_end><block_end>g.draw(filename prog='dot')<block_end><def_stmt>to_json self<block_start>''' Returns the JSON representation of this graph. '''<line_sep>roots=[]<for_stmt>r self.roots<block_start>roots.append(r.to_json())<block_end><return>{'roots':roots}<block_end><def_stmt>save self filename<block_start>''' Writes the JSON representation of this graph to the provided filename, such that the graph can be easily reconstructed using Graph(spec=filename). Args: filename (str): Path at which to write out the json file. '''<with_stmt>open(filename 'w')<as>outfile<block_start>json.dump(self.to_json() outfile)<block_end><block_end><block_end>
<def_stmt>main <block_start><def_stmt>findMin x<block_start>minNum=x[0]<for_stmt>i x<block_start><if_stmt>minNum<g>i<block_start>minNum=i<block_end><block_end><return>minNum<block_end>print(findMin([0 1 2 3 4 5 -3 24 -56]))<block_end># = -56 <if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# -*- coding: utf-8 -*- """RegressionTorchModel Base class for model with no cell specific parameters"""<import_stmt>matplotlib.pyplot<as>plt<line_sep># + <import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>cell2location.models.base.torch_model TorchModel<class_stmt>RegressionTorchModel(TorchModel)<block_start>r"""Base class for regression models with no cell-specific parameters (enable minibatch training). :param sample_id: str with column name in cell2covar that denotes sample :param cell2covar: pd.DataFrame with covariates in columns and cells in rows, rows should be named. :param X_data: Numpy array of gene expression (cols) in cells (rows) :param n_iter: number of iterations, when using minibatch, the number of epochs (passes through all data), supersedes self.n_iter :param (data_type, learning_rate, total_grad_norm_constraint, verbose, var_names, var_names_read, obs_names, fact_names): arguments for parent class :func:`~cell2location.models.BaseModel` :param minibatch_size: if None all data is used for training, if not None - the number of cells / observations per batch. For best results use 1024 cells per batch. :param minibatch_seed: order of cells in minibatch is chose randomly, so a seed for each traning restart should be provided :param prior_eps: numerical stability constant added to initial values :param nb_param_conversion_eps: NB distribution numerical stability constant, see :func:`~cell2location.models.TorchModel.nb_log_prob` :param use_cuda: boolean, telling pytorch to use the GPU (if true). :param use_average_as_initial_value: boolean, use average gene expression for each categorical covariate as initial value? :param stratify_cv: when using cross-validation on cells (selected in the training method), this is a pd.Series that tells :func:`~sklearn.model_selection.train_test_split` how to stratify when creating a split. """<def_stmt>__init__ self sample_id cell2covar:pd.DataFrame X_data:np.ndarray data_type="float32" n_iter=200000 learning_rate=0.001 total_grad_norm_constraint=200 verbose=<true> var_names=<none> var_names_read=<none> obs_names=<none> fact_names=<none> minibatch_size=<none> minibatch_seed=[41 56 345] prior_eps=1e-8 nb_param_conversion_eps=1e-8 use_cuda=<false> use_average_as_initial_value=<true> stratify_cv=<none> ############# Initialise parameters ################ # convert covariates to binary matrix # test for column types, get dummies for categorical / character, and just copy over continous <block_start>cell2covar_df=pd.get_dummies(cell2covar.loc[: ~cell2covar.columns.isin([sample_id])])<line_sep>cell2sample_df=pd.get_dummies(cell2covar[[sample_id]])<line_sep>cell2sample_covar_df=pd.concat([cell2sample_df cell2covar_df] axis=1)<line_sep>fact_names=cell2sample_covar_df.columns<line_sep>n_fact=cell2sample_covar_df.shape[1]<line_sep># extract obs names and sample id obs_names=cell2covar.index<line_sep>sample_id=cell2covar[sample_id]<line_sep>super().__init__(X_data n_fact data_type n_iter learning_rate total_grad_norm_constraint verbose var_names var_names_read obs_names fact_names sample_id use_cuda )<line_sep>self.nb_param_conversion_eps=nb_param_conversion_eps<line_sep>self.cell_factors_df=<none><line_sep>self.minibatch_size=minibatch_size<line_sep>self.minibatch_seed=minibatch_seed<line_sep>self.n_cells_total=self.n_obs<line_sep>self.which_sample=self.fact_names.isin(cell2sample_df.columns)<line_sep>self.n_samples=np.sum(self.which_sample)<line_sep>self.n_covar=self.n_fact-self.n_samples<line_sep>self.prior_eps=prior_eps<line_sep>self.cell2sample_df=cell2sample_df<line_sep>self.cell2sample_covar_df=cell2sample_covar_df<line_sep># convert to np.ndarray self.cell2sample_mat=cell2sample_df.values<line_sep>self.cell2sample_covar_mat=cell2sample_covar_df.values<line_sep># find mean and variance for each gene self.gene_mean=(self.X_data+self.prior_eps).mean(0).astype(self.data_type).reshape((1 self.n_var))<line_sep>self.noise_gene_mean=(self.gene_mean/10).astype(self.data_type).reshape((1 self.n_var))<line_sep>self.prior_gene_mean=np.concatenate([self.noise_gene_mean self.gene_mean] axis=0)<line_sep>self.stratify_cv=stratify_cv<line_sep>self.extra_data["cell2sample_covar"]=self.cell2sample_covar_mat<if_stmt>use_average_as_initial_value# compute initial value for parameters: cluster averages <block_start>self.cell2sample_covar_sig_mat=self.cell2sample_covar_mat/self.cell2sample_covar_mat.sum(0)<line_sep>self.clust_average_mat=np.dot(self.cell2sample_covar_sig_mat.T self.X_data)+self.prior_eps<line_sep>self.clust_average_mat[self.which_sample :]=self.clust_average_mat[self.which_sample :]/10<line_sep># aver = get_cluster_averages(adata_snrna_raw, 'annotation_1') + self.prior_eps # variances = get_cluster_variances(adata_snrna_raw, 'annotation_1') + self.prior_eps # shape = aver ** 2 / variances # shape = shape.mean(1).values # overdisp_mean = shape.reshape((1, adata_snrna_raw.shape[1])) self.gene_E_mat=<none># np.sqrt(1 / overdisp_mean) # get gene_E ~ Exponential() <block_end><else_stmt><block_start>self.clust_average_mat=<none><line_sep>self.gene_E_mat=<none><block_end><block_end># =====================Other functions======================= # <def_stmt>plot_gene_budget self<block_start>plt.hist(np.log10(self.samples["post_sample_means"]["gene_level"][: 0]) bins=50)<line_sep>plt.xlabel("Gene expression level (hierarchical)")<line_sep>plt.title("Gene expression level (hierarchical)")<line_sep>plt.tight_layout()<block_end><def_stmt>sample2df self gene_node_name="gene_factors" sample_type="means"<block_start>r"""Export cell factors as Pandas data frames. :param node_name: name of the cell factor model parameter to be exported :param gene_node_name: name of the gene factor model parameter to be exported :param sample_type: type of posterior sample (means, q05, q95, sds) :return: 8 Pandas dataframes added to model object: .covariate_effects, .covariate_effects_sd, .covariate_effects_q05, .covariate_effects_q95 .sample_effects, .sample_effects_sd, .sample_effects_q05, .sample_effects_q95 """<line_sep># export parameters for covariate effects cov_ind=~self.which_sample<line_sep>self.covariate_effects=pd.DataFrame.from_records(self.samples["post_sample_"+sample_type][gene_node_name][cov_ind :].T index=self.var_names columns=[sample_type+"_cov_effect_"+i<for>i self.fact_names[cov_ind]] )<line_sep># export parameters for sample effects sample_ind=self.which_sample<line_sep>self.sample_effects=pd.DataFrame.from_records(self.samples["post_sample_"+sample_type][gene_node_name][sample_ind :].T index=self.var_names columns=[sample_type+"_sample_effect"+i<for>i self.fact_names[sample_ind]] )<block_end><def_stmt>annotate_cell_adata self adata use_raw=<true><block_start>r"""Add covariate and sample coefficients to anndata.var :param adata: anndata object to annotate :return: updated anndata object """<if_stmt>self.cell_factors_df<is><none><block_start>self.sample2df()<block_end><if_stmt>use_raw<is><true><block_start>var_index=adata.raw.var.index<line_sep>### Covariate effect # add gene factors to adata adata.raw.var[self.covariate_effects.columns]=self.covariate_effects.loc[var_index :]<line_sep>### Sample effects # add gene factors to adata adata.raw.var[self.sample_effects.columns]=self.sample_effects.loc[var_index :]<block_end><else_stmt><block_start>var_index=adata.var.index<line_sep>### Covariate effect # add gene factors to adata adata.var[self.covariate_effects.columns]=self.covariate_effects.loc[var_index :]<line_sep>### Sample effects # add gene factors to adata adata.var[self.sample_effects.columns]=self.sample_effects.loc[var_index :]<block_end><return>adata<block_end><block_end>
<import_stmt>cv2<import_stmt>torch<import_from_stmt>torchvision transforms<import_stmt>math<import_stmt>numpy<as>np<import_stmt>torchvision.models<as>models<import_stmt>torch.utils.data<as>data<import_from_stmt>torchvision transforms<import_stmt>cv2<import_stmt>torch.nn.functional<as>F<import_from_stmt>torch.autograd Variable<import_stmt>pandas<as>pd<import_stmt>os torch<import_stmt>torch.nn<as>nn<import_stmt>time<import_stmt>argparse<line_sep>result=["Surprise" "Fear" "Disgust" "Happiness" "Sadness" "Anger" "Neutral"]<class_stmt>Res18Feature(nn.Module)<block_start><def_stmt>__init__ self pretrained num_classes=7<block_start>super(Res18Feature self).__init__()<line_sep>resnet=models.resnet18(pretrained)<line_sep>self.features=nn.Sequential(*list(resnet.children())[:-1])<line_sep>fc_in_dim=list(resnet.children())[-1].in_features<line_sep>self.fc=nn.Linear(fc_in_dim num_classes)<line_sep>self.alpha=nn.Sequential(nn.Linear(fc_in_dim 1) nn.Sigmoid())<block_end><def_stmt>forward self x<block_start>x=self.features(x)<line_sep>x=x.view(x.size(0) -1)<line_sep>attention_weights=self.alpha(x)<line_sep>out=attention_weights<times>self.fc(x)<line_sep><return>attention_weights out<block_end><block_end>model_save_path="./checkpoint/wiki2020.pth"#mode path <def_stmt>main args<block_start>preprocess_transform=transforms.Compose([transforms.ToPILImage() transforms.Resize((224 224)) transforms.ToTensor() transforms.Normalize(mean=[0.485 0.456 0.406] std=[0.229 0.224 0.225])])<line_sep>res18=Res18Feature(pretrained=<false>)<line_sep>checkpoint=torch.load(model_save_path)<line_sep>res18.load_state_dict(checkpoint['model_state_dict'])<line_sep>res18.cuda()<line_sep>res18.eval()<for_stmt>i [0]<block_start>time1=time.time()<line_sep>image=cv2.imread(args.img)<line_sep>image=image[: : ::-1]<line_sep>image_tensor=preprocess_transform(image)<line_sep>tensor=Variable(torch.unsqueeze(image_tensor dim=0).float() requires_grad=<false>)<line_sep>tensor=tensor.cuda()<line_sep>time2=time.time()<line_sep>_,outputs=res18(tensor)<line_sep>_,predicts=torch.max(outputs 1)<line_sep>print(result[int(predicts.cpu().data)])<block_end><block_end><def_stmt>parse_args <block_start>parser=argparse.ArgumentParser(description='Testing')<line_sep>parser.add_argument('--img' default="./img/suripse.jpg" type=str)<line_sep>args=parser.parse_args()<line_sep><return>args<block_end><if_stmt>__name__<eq>"__main__"<block_start>args=parse_args()<line_sep>main(args)<block_end>
"""Normalize features"""<import_stmt>numpy<as>np<def_stmt>normalize features<block_start>"""Normalize features. Normalizes input features X. Returns a normalized version of X where the mean value of each feature is 0 and deviation is close to 1. :param features: set of features. :return: normalized set of features. """<line_sep># Copy original array to prevent it from changes. features_normalized=np.copy(features).astype(float)<line_sep># Get average values for each feature (column) in X. features_mean=np.mean(features 0)<line_sep># Calculate the standard deviation for each feature. features_deviation=np.std(features 0)<line_sep># Subtract mean values from each feature (column) of every example (row) # to make all features be spread around zero. <if_stmt>features.shape[0]<g>1<block_start>features_normalized<augsub>features_mean<block_end># Normalize each feature values so that all features are close to [-1:1] boundaries. # Also prevent division by zero error. features_deviation[features_deviation<eq>0]=1<line_sep>features_normalized<augdiv>features_deviation<line_sep><return>features_normalized features_mean features_deviation<block_end>
# Copyright 2020 The Forte Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Index processor """<import_from_stmt>abc ABC<import_from_stmt>typing Dict Any List Tuple<import_from_stmt>forte.common Resources<import_from_stmt>forte.common.configuration Config<import_from_stmt>forte.data.data_pack DataPack<import_from_stmt>forte.processors.base.pack_processor PackProcessor<line_sep>__all__=["IndexProcessor" "IndexProcessorWithDatapack"]<class_stmt>IndexProcessor(PackProcessor ABC)<block_start>r"""A base processor for indexing documents into traditional indexers like Elasticsearch and/or dense vector indexers like Faiss. Subclasses need to implement :meth:`IndexProcessor::_bulk_process`. """<line_sep># pylint: disable=useless-super-delegation <def_stmt>__init__ self<arrow><none><block_start>super().__init__()<line_sep>self.documents:List[Dict[str str]]=[]<block_end>@classmethod<def_stmt>default_configs cls<arrow>Dict[str Any]<block_start><return>{"batch_size":128}<block_end><def_stmt>_bulk_process self<block_start>r"""Subclasses of :class:`IndexProcessor` should implement this method to bulk add the documents into the index. """<line_sep><raise>NotImplementedError<block_end><def_stmt>_field_names self<arrow>List[str]<block_start>r"""Subclasses of :class:`IndexProcessor` should implement this method to provide the field name for indexing. The return value from :func:`_content_for_index` will be added into these fields. The length of the return value of this function should be the same as the return value for :func:`_content_for_index`. Returns: """<line_sep><raise>NotImplementedError<block_end><def_stmt>_content_for_index self input_pack:DataPack<arrow>List[str]<block_start><raise>NotImplementedError<block_end><def_stmt>_process self input_pack:DataPack# self.documents.append((str(input_pack.pack_id), input_pack.text)) <block_start>index_pairs:Dict[str str]=dict(zip(self._field_names() self._content_for_index(input_pack)))<line_sep>self.documents.append(index_pairs)<if_stmt>len(self.documents)<eq>self.configs.batch_size<block_start>self._bulk_process()<line_sep>self.documents=[]<block_end><block_end><def_stmt>flush self<block_start><if_stmt>len(self.documents)<g>0<block_start>self._bulk_process()<block_end><block_end><block_end><class_stmt>IndexProcessorWithDatapack(PackProcessor ABC)<block_start>r"""A base processor for indexing a document with its original datapack into traditional indexers like Elasticsearch. Subclasses need to implement :meth:`IndexProcessorWithDatapack::_bulk_process`. """<line_sep># pylint: disable=useless-super-delegation <def_stmt>__init__ self<arrow><none><block_start>super().__init__()<line_sep>self.documents:List[Tuple[str str str]]=[]<block_end># pylint: disable=attribute-defined-outside-init <def_stmt>initialize self resources:Resources configs:Config<block_start>self.resources=resources<line_sep>self.config=configs<block_end>@classmethod<def_stmt>default_configs cls<arrow>Dict[str Any]<block_start>config=super().default_configs()<line_sep>config.update({"batch_size":128})<line_sep><return>config<block_end><def_stmt>_bulk_process self<block_start>r"""Subclasses of :class:`IndexProcessorWithDatapack` should implement this method to bulk add the documents into the index. """<line_sep><raise>NotImplementedError<block_end><def_stmt>_process self input_pack:DataPack<block_start>serialized_datapack:str=input_pack.to_string()<line_sep>self.documents.append((str(input_pack.pack_id) input_pack.text serialized_datapack))<if_stmt>len(self.documents)<eq>self.config.batch_size<block_start>self._bulk_process()<line_sep>self.documents=[]<block_end><block_end><def_stmt>flush self<block_start><if_stmt>len(self.documents)<g>0<block_start>self._bulk_process()<block_end><block_end><block_end>
# -*- coding: utf-8 -*- r""" werkzeug.contrib.sessions ~~~~~~~~~~~~~~~~~~~~~~~~~ This module contains some helper classes that help one to add session support to a python WSGI application. For full client-side session storage see :mod:`~werkzeug.contrib.securecookie` which implements a secure, client-side session storage. Application Integration ======================= :: from werkzeug.contrib.sessions import SessionMiddleware, \ FilesystemSessionStore app = SessionMiddleware(app, FilesystemSessionStore()) The current session will then appear in the WSGI environment as `werkzeug.session`. However it's recommended to not use the middleware but the stores directly in the application. However for very simple scripts a middleware for sessions could be sufficient. This module does not implement methods or ways to check if a session is expired. That should be done by a cronjob and storage specific. For example to prune unused filesystem sessions one could check the modified time of the files. If sessions are stored in the database the new() method should add an expiration timestamp for the session. For better flexibility it's recommended to not use the middleware but the store and session object directly in the application dispatching:: session_store = FilesystemSessionStore() def application(environ, start_response): request = Request(environ) sid = request.cookies.get('cookie_name') if sid is None: request.session = session_store.new() else: request.session = session_store.get(sid) response = get_the_response_object(request) if request.session.should_save: session_store.save(request.session) response.set_cookie('cookie_name', request.session.sid) return response(environ, start_response) :copyright: 2007 Pallets :license: BSD-3-Clause """<import_stmt>os<import_stmt>re<import_stmt>tempfile<import_stmt>warnings<import_from_stmt>hashlib sha1<import_from_stmt>os path<import_from_stmt>pickle dump<import_from_stmt>pickle HIGHEST_PROTOCOL<import_from_stmt>pickle load<import_from_stmt>random random<import_from_stmt>time time<import_from_stmt>.._compat PY2<import_from_stmt>.._compat text_type<import_from_stmt>..datastructures CallbackDict<import_from_stmt>..filesystem get_filesystem_encoding<import_from_stmt>..posixemulation rename<import_from_stmt>..utils dump_cookie<import_from_stmt>..utils parse_cookie<import_from_stmt>..wsgi ClosingIterator<line_sep>warnings.warn("'werkzeug.contrib.sessions' is deprecated as of version 0.15 and"<concat>" will be removed in version 1.0. It has moved to"<concat>" https://github.com/pallets/secure-cookie." DeprecationWarning stacklevel=2 )<line_sep>_sha1_re=re.compile(r"^[a-f0-9]{40}$")<def_stmt>_urandom <block_start><if_stmt>hasattr(os "urandom")<block_start><return>os.urandom(30)<block_end><return>text_type(random()).encode("ascii")<block_end><def_stmt>generate_key salt=<none><block_start><if_stmt>salt<is><none><block_start>salt=repr(salt).encode("ascii")<block_end><return>sha1(b"".join([salt str(time()).encode("ascii") _urandom()])).hexdigest()<block_end><class_stmt>ModificationTrackingDict(CallbackDict)<block_start>__slots__=("modified" )<def_stmt>__init__ self *args **kwargs<block_start><def_stmt>on_update self<block_start>self.modified=<true><block_end>self.modified=<false><line_sep>CallbackDict.__init__(self on_update=on_update)<line_sep>dict.update(self *args **kwargs)<block_end><def_stmt>copy self<block_start>"""Create a flat copy of the dict."""<line_sep>missing=object()<line_sep>result=object.__new__(self.__class__)<for_stmt>name self.__slots__<block_start>val=getattr(self name missing)<if_stmt>val<is><not>missing<block_start>setattr(result name val)<block_end><block_end><return>result<block_end><def_stmt>__copy__ self<block_start><return>self.copy()<block_end><block_end><class_stmt>Session(ModificationTrackingDict)<block_start>"""Subclass of a dict that keeps track of direct object changes. Changes in mutable structures are not tracked, for those you have to set `modified` to `True` by hand. """<line_sep>__slots__=ModificationTrackingDict.__slots__+("sid" "new")<def_stmt>__init__ self data sid new=<false><block_start>ModificationTrackingDict.__init__(self data)<line_sep>self.sid=sid<line_sep>self.new=new<block_end><def_stmt>__repr__ self<block_start><return>"<%s %s%s>"%(self.__class__.__name__ dict.__repr__(self) "*"<if>self.should_save<else>"" )<block_end>@property<def_stmt>should_save self<block_start>"""True if the session should be saved. .. versionchanged:: 0.6 By default the session is now only saved if the session is modified, not if it is new like it was before. """<line_sep><return>self.modified<block_end><block_end><class_stmt>SessionStore(object)<block_start>"""Baseclass for all session stores. The Werkzeug contrib module does not implement any useful stores besides the filesystem store, application developers are encouraged to create their own stores. :param session_class: The session class to use. Defaults to :class:`Session`. """<def_stmt>__init__ self session_class=<none><block_start><if_stmt>session_class<is><none><block_start>session_class=Session<block_end>self.session_class=session_class<block_end><def_stmt>is_valid_key self key<block_start>"""Check if a key has the correct format."""<line_sep><return>_sha1_re.match(key)<is><not><none><block_end><def_stmt>generate_key self salt=<none><block_start>"""Simple function that generates a new session key."""<line_sep><return>generate_key(salt)<block_end><def_stmt>new self<block_start>"""Generate a new session."""<line_sep><return>self.session_class({} self.generate_key() <true>)<block_end><def_stmt>save self session<block_start>"""Save a session."""<block_end><def_stmt>save_if_modified self session<block_start>"""Save if a session class wants an update."""<if_stmt>session.should_save<block_start>self.save(session)<block_end><block_end><def_stmt>delete self session<block_start>"""Delete a session."""<block_end><def_stmt>get self sid<block_start>"""Get a session for this sid or a new session object. This method has to check if the session key is valid and create a new session if that wasn't the case. """<line_sep><return>self.session_class({} sid <true>)<block_end><block_end>#: used for temporary files by the filesystem session store _fs_transaction_suffix=".__wz_sess"<class_stmt>FilesystemSessionStore(SessionStore)<block_start>"""Simple example session store that saves sessions on the filesystem. This store works best on POSIX systems and Windows Vista / Windows Server 2008 and newer. .. versionchanged:: 0.6 `renew_missing` was added. Previously this was considered `True`, now the default changed to `False` and it can be explicitly deactivated. :param path: the path to the folder used for storing the sessions. If not provided the default temporary directory is used. :param filename_template: a string template used to give the session a filename. ``%s`` is replaced with the session id. :param session_class: The session class to use. Defaults to :class:`Session`. :param renew_missing: set to `True` if you want the store to give the user a new sid if the session was not yet saved. """<def_stmt>__init__ self path=<none> filename_template="werkzeug_%s.sess" session_class=<none> renew_missing=<false> mode=0o644 <block_start>SessionStore.__init__(self session_class)<if_stmt>path<is><none><block_start>path=tempfile.gettempdir()<block_end>self.path=path<if_stmt>isinstance(filename_template text_type)<and>PY2<block_start>filename_template=filename_template.encode(get_filesystem_encoding())<block_end><assert_stmt><not>filename_template.endswith(_fs_transaction_suffix) ("filename templates may not end with %s"%_fs_transaction_suffix)<line_sep>self.filename_template=filename_template<line_sep>self.renew_missing=renew_missing<line_sep>self.mode=mode<block_end><def_stmt>get_session_filename self sid# out of the box, this should be a strict ASCII subset but # you might reconfigure the session object to have a more # arbitrary string. <block_start><if_stmt>isinstance(sid text_type)<and>PY2<block_start>sid=sid.encode(get_filesystem_encoding())<block_end><return>path.join(self.path self.filename_template%sid)<block_end><def_stmt>save self session<block_start>fn=self.get_session_filename(session.sid)<line_sep>fd,tmp=tempfile.mkstemp(suffix=_fs_transaction_suffix dir=self.path)<line_sep>f=os.fdopen(fd "wb")<try_stmt><block_start>dump(dict(session) f HIGHEST_PROTOCOL)<block_end><finally_stmt><block_start>f.close()<block_end><try_stmt><block_start>rename(tmp fn)<line_sep>os.chmod(fn self.mode)<block_end><except_stmt>(IOError OSError)<block_start><pass><block_end><block_end><def_stmt>delete self session<block_start>fn=self.get_session_filename(session.sid)<try_stmt><block_start>os.unlink(fn)<block_end><except_stmt>OSError<block_start><pass><block_end><block_end><def_stmt>get self sid<block_start><if_stmt><not>self.is_valid_key(sid)<block_start><return>self.new()<block_end><try_stmt><block_start>f=open(self.get_session_filename(sid) "rb")<block_end><except_stmt>IOError<block_start><if_stmt>self.renew_missing<block_start><return>self.new()<block_end>data={}<block_end><else_stmt><block_start><try_stmt><block_start><try_stmt><block_start>data=load(f)<block_end><except_stmt>Exception<block_start>data={}<block_end><block_end><finally_stmt><block_start>f.close()<block_end><block_end><return>self.session_class(data sid <false>)<block_end><def_stmt>list self<block_start>"""Lists all sessions in the store. .. versionadded:: 0.6 """<line_sep>before,after=self.filename_template.split("%s" 1)<line_sep>filename_re=re.compile(r"%s(.{5,})%s$"%(re.escape(before) re.escape(after)))<line_sep>result=[]<for_stmt>filename os.listdir(self.path)#: this is a session that is still being saved. <block_start><if_stmt>filename.endswith(_fs_transaction_suffix)<block_start><continue><block_end>match=filename_re.match(filename)<if_stmt>match<is><not><none><block_start>result.append(match.group(1))<block_end><block_end><return>result<block_end><block_end><class_stmt>SessionMiddleware(object)<block_start>"""A simple middleware that puts the session object of a store provided into the WSGI environ. It automatically sets cookies and restores sessions. However a middleware is not the preferred solution because it won't be as fast as sessions managed by the application itself and will put a key into the WSGI environment only relevant for the application which is against the concept of WSGI. The cookie parameters are the same as for the :func:`~dump_cookie` function just prefixed with ``cookie_``. Additionally `max_age` is called `cookie_age` and not `cookie_max_age` because of backwards compatibility. """<def_stmt>__init__ self app store cookie_name="session_id" cookie_age=<none> cookie_expires=<none> cookie_path="/" cookie_domain=<none> cookie_secure=<none> cookie_httponly=<false> cookie_samesite="Lax" environ_key="werkzeug.session" <block_start>self.app=app<line_sep>self.store=store<line_sep>self.cookie_name=cookie_name<line_sep>self.cookie_age=cookie_age<line_sep>self.cookie_expires=cookie_expires<line_sep>self.cookie_path=cookie_path<line_sep>self.cookie_domain=cookie_domain<line_sep>self.cookie_secure=cookie_secure<line_sep>self.cookie_httponly=cookie_httponly<line_sep>self.cookie_samesite=cookie_samesite<line_sep>self.environ_key=environ_key<block_end><def_stmt>__call__ self environ start_response<block_start>cookie=parse_cookie(environ.get("HTTP_COOKIE" ""))<line_sep>sid=cookie.get(self.cookie_name <none>)<if_stmt>sid<is><none><block_start>session=self.store.new()<block_end><else_stmt><block_start>session=self.store.get(sid)<block_end>environ[self.environ_key]=session<def_stmt>injecting_start_response status headers exc_info=<none><block_start><if_stmt>session.should_save<block_start>self.store.save(session)<line_sep>headers.append(("Set-Cookie" dump_cookie(self.cookie_name session.sid self.cookie_age self.cookie_expires self.cookie_path self.cookie_domain self.cookie_secure self.cookie_httponly samesite=self.cookie_samesite ) ))<block_end><return>start_response(status headers exc_info)<block_end><return>ClosingIterator(self.app(environ injecting_start_response) <lambda>:self.store.save_if_modified(session) )<block_end><block_end>
# coding=utf-8 # Copyright 2020 HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """BioCreative II gene mention recognition Corpus"""<import_stmt>datasets<line_sep>logger=datasets.logging.get_logger(__name__)<line_sep>_CITATION="""\ @article{smith2008overview, title={Overview of BioCreative II gene mention recognition}, author={<NAME> and <NAME>, <NAME>, <NAME> and <NAME> and others}, journal={Genome biology}, volume={9}, number={S2}, pages={S2}, year={2008}, publisher={Springer} } """<line_sep>_DESCRIPTION="""\ Nineteen teams presented results for the Gene Mention Task at the BioCreative II Workshop. In this task participants designed systems to identify substrings in sentences corresponding to gene name mentions. A variety of different methods were used and the results varied with a highest achieved F1 score of 0.8721. Here we present brief descriptions of all the methods used and a statistical analysis of the results. We also demonstrate that, by combining the results from all submissions, an F score of 0.9066 is feasible, and furthermore that the best result makes use of the lowest scoring submissions. For more details, see: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2559986/ The original dataset can be downloaded from: https://biocreative.bioinformatics.udel.edu/resources/corpora/biocreative-ii-corpus/ This dataset has been converted to CoNLL format for NER using the following tool: https://github.com/spyysalo/standoff2conll """<line_sep>_HOMEPAGE="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2559986/"<line_sep>_URL="https://github.com/spyysalo/bc2gm-corpus/raw/master/conll/"<line_sep>_TRAINING_FILE="train.tsv"<line_sep>_DEV_FILE="devel.tsv"<line_sep>_TEST_FILE="test.tsv"<class_stmt>Bc2gmCorpusConfig(datasets.BuilderConfig)<block_start>"""BuilderConfig for Bc2gmCorpus"""<def_stmt>__init__ self **kwargs<block_start>"""BuilderConfig for Bc2gmCorpus. Args: **kwargs: keyword arguments forwarded to super. """<line_sep>super(Bc2gmCorpusConfig self).__init__(**kwargs)<block_end><block_end><class_stmt>Bc2gmCorpus(datasets.GeneratorBasedBuilder)<block_start>"""Bc2gmCorpus dataset."""<line_sep>BUILDER_CONFIGS=[Bc2gmCorpusConfig(name="bc2gm_corpus" version=datasets.Version("1.0.0") description="bc2gm corpus") ]<def_stmt>_info self<block_start><return>datasets.DatasetInfo(description=_DESCRIPTION features=datasets.Features({"id":datasets.Value("string") "tokens":datasets.Sequence(datasets.Value("string")) "ner_tags":datasets.Sequence(datasets.features.ClassLabel(names=["O" "B-GENE" "I-GENE" ])) }) supervised_keys=<none> homepage=_HOMEPAGE citation=_CITATION )<block_end><def_stmt>_split_generators self dl_manager<block_start>"""Returns SplitGenerators."""<line_sep>urls_to_download={"train":f"{_URL}{_TRAINING_FILE}" "dev":f"{_URL}{_DEV_FILE}" "test":f"{_URL}{_TEST_FILE}" }<line_sep>downloaded_files=dl_manager.download_and_extract(urls_to_download)<line_sep><return>[datasets.SplitGenerator(name=datasets.Split.TRAIN gen_kwargs={"filepath":downloaded_files["train"]}) datasets.SplitGenerator(name=datasets.Split.VALIDATION gen_kwargs={"filepath":downloaded_files["dev"]}) datasets.SplitGenerator(name=datasets.Split.TEST gen_kwargs={"filepath":downloaded_files["test"]}) ]<block_end><def_stmt>_generate_examples self filepath<block_start>logger.info("⏳ Generating examples from = %s" filepath)<with_stmt>open(filepath encoding="utf-8")<as>f<block_start>guid=0<line_sep>tokens=[]<line_sep>ner_tags=[]<for_stmt>line f<block_start><if_stmt>line<eq>""<or>line<eq>"\n"<block_start><if_stmt>tokens<block_start><yield>guid {"id":str(guid) "tokens":tokens "ner_tags":ner_tags }<line_sep>guid<augadd>1<line_sep>tokens=[]<line_sep>ner_tags=[]<block_end><block_end><else_stmt># tokens are tab separated <block_start>splits=line.split("\t")<line_sep>tokens.append(splits[0])<line_sep>ner_tags.append(splits[1].rstrip())<block_end><block_end># last example <yield>guid {"id":str(guid) "tokens":tokens "ner_tags":ner_tags }<block_end><block_end><block_end>
# $Id$ # # Copyright (C) 2003-2006 Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # """basic unit testing code for the wrapper of the SMARTS matcher """<import_from_stmt>rdkit RDConfig<import_stmt>unittest<import_stmt>os.path<import_from_stmt>rdkit Chem<class_stmt>TestCase(unittest.TestCase)<block_start><def_stmt>setUp self#print '\n%s: '%self.shortDescription(), <block_start>fName=os.path.join(RDConfig.RDCodeDir 'Chem' 'test_data' 'quinone.mol')<line_sep>self.m=Chem.MolFromMolFile(fName)<assert_stmt>self.m.GetNumAtoms()<eq>8 'bad nAtoms'<block_end><def_stmt>testMatch self<block_start>" testing smarts match "<line_sep>p=Chem.MolFromSmarts('CC(=O)C')<line_sep>matches=self.m.GetSubstructMatches(p)<assert_stmt>len(matches)<eq>2 'bad UMapList: %s'%(str(res))<for_stmt>match matches<block_start><assert_stmt>len(match)<eq>4 'bad match: %s'%(str(match))<block_end><block_end><def_stmt>testOrder self<block_start>" testing atom order in smarts match "<line_sep>p=Chem.MolFromSmarts('CC(=O)C')<line_sep>matches=self.m.GetSubstructMatches(p)<line_sep>m=matches[0]<line_sep>atomList=[self.m.GetAtomWithIdx(x).GetSymbol()<for>x m]<assert_stmt>atomList<eq>['C' 'C' 'O' 'C'] 'bad atom ordering: %s'%str(atomList)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. <import_from_stmt>abc abstractmethod<import_from_stmt>copy deepcopy<import_from_stmt>typing Callable Dict Union<import_from_stmt>jsonpickle.pickler Pickler<import_from_stmt>botbuilder.core.state_property_accessor StatePropertyAccessor<import_from_stmt>.bot_assert BotAssert<import_from_stmt>.turn_context TurnContext<import_from_stmt>.storage Storage<import_from_stmt>.property_manager PropertyManager<class_stmt>CachedBotState<block_start>""" Internal cached bot state. """<def_stmt>__init__ self state:Dict[str object]=<none><block_start>self.state=state<if>state<is><not><none><else>{}<line_sep>self.hash=self.compute_hash(state)<block_end>@property<def_stmt>is_changed self<arrow>bool<block_start><return>self.hash<ne>self.compute_hash(self.state)<block_end><def_stmt>compute_hash self obj:object<arrow>str<block_start><return>str(Pickler().flatten(obj))<block_end><block_end><class_stmt>BotState(PropertyManager)<block_start>""" Defines a state management object and automates the reading and writing of associated state properties to a storage layer. .. remarks:: Each state management object defines a scope for a storage layer. State properties are created within a state management scope, and the Bot Framework defines these scopes: :class:`ConversationState`, :class:`UserState`, and :class:`PrivateConversationState`. You can define additional scopes for your bot. """<def_stmt>__init__ self storage:Storage context_service_key:str<block_start>""" Initializes a new instance of the :class:`BotState` class. :param storage: The storage layer this state management object will use to store and retrieve state :type storage: :class:`bptbuilder.core.Storage` :param context_service_key: The key for the state cache for this :class:`BotState` :type context_service_key: str .. remarks:: This constructor creates a state management object and associated scope. The object uses the :param storage: to persist state property values and the :param context_service_key: to cache state within the context for each turn. :raises: It raises an argument null exception. """<line_sep>self.state_key="state"<line_sep>self._storage=storage<line_sep>self._context_service_key=context_service_key<block_end><def_stmt>get_cached_state self turn_context:TurnContext<block_start>""" Gets the cached bot state instance that wraps the raw cached data for this "BotState" from the turn context. :param turn_context: The context object for this turn. :type turn_context: :class:`TurnContext` :return: The cached bot state instance. """<line_sep>BotAssert.context_not_none(turn_context)<line_sep><return>turn_context.turn_state.get(self._context_service_key)<block_end><def_stmt>create_property self name:str<arrow>StatePropertyAccessor<block_start>""" Creates a property definition and registers it with this :class:`BotState`. :param name: The name of the property :type name: str :return: If successful, the state property accessor created :rtype: :class:`StatePropertyAccessor` """<if_stmt><not>name<block_start><raise>TypeError("BotState.create_property(): name cannot be None or empty.")<block_end><return>BotStatePropertyAccessor(self name)<block_end><def_stmt>get self turn_context:TurnContext<arrow>Dict[str object]<block_start>BotAssert.context_not_none(turn_context)<line_sep>cached=self.get_cached_state(turn_context)<line_sep><return>getattr(cached "state" <none>)<block_end><async_keyword><def_stmt>load self turn_context:TurnContext force:bool=<false><arrow><none><block_start>""" Reads the current state object and caches it in the context object for this turn. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :param force: Optional, true to bypass the cache :type force: bool """<line_sep>BotAssert.context_not_none(turn_context)<line_sep>cached_state=self.get_cached_state(turn_context)<line_sep>storage_key=self.get_storage_key(turn_context)<if_stmt>force<or><not>cached_state<or><not>cached_state.state<block_start>items=<await>self._storage.read([storage_key])<line_sep>val=items.get(storage_key)<line_sep>turn_context.turn_state[self._context_service_key]=CachedBotState(val)<block_end><block_end><async_keyword><def_stmt>save_changes self turn_context:TurnContext force:bool=<false><arrow><none><block_start>""" Saves the state cached in the current context for this turn. If the state has changed, it saves the state cached in the current context for this turn. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :param force: Optional, true to save state to storage whether or not there are changes :type force: bool """<line_sep>BotAssert.context_not_none(turn_context)<line_sep>cached_state=self.get_cached_state(turn_context)<if_stmt>force<or>(cached_state<is><not><none><and>cached_state.is_changed)<block_start>storage_key=self.get_storage_key(turn_context)<line_sep>changes:Dict[str object]={storage_key:cached_state.state}<line_sep><await>self._storage.write(changes)<line_sep>cached_state.hash=cached_state.compute_hash(cached_state.state)<block_end><block_end><async_keyword><def_stmt>clear_state self turn_context:TurnContext<block_start>""" Clears any state currently stored in this state scope. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :return: None .. remarks:: This function must be called in order for the cleared state to be persisted to the underlying store. """<line_sep>BotAssert.context_not_none(turn_context)<line_sep># Explicitly setting the hash will mean IsChanged is always true. And that will force a Save. cache_value=CachedBotState()<line_sep>cache_value.hash=""<line_sep>turn_context.turn_state[self._context_service_key]=cache_value<block_end><async_keyword><def_stmt>delete self turn_context:TurnContext<arrow><none><block_start>""" Deletes any state currently stored in this state scope. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :return: None """<line_sep>BotAssert.context_not_none(turn_context)<line_sep>turn_context.turn_state.pop(self._context_service_key)<line_sep>storage_key=self.get_storage_key(turn_context)<line_sep><await>self._storage.delete({storage_key})<block_end>@abstractmethod<def_stmt>get_storage_key self turn_context:TurnContext<arrow>str<block_start><raise>NotImplementedError()<block_end><async_keyword><def_stmt>get_property_value self turn_context:TurnContext property_name:str<block_start>""" Gets the value of the specified property in the turn context. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :param property_name: The property name :type property_name: str :return: The value of the property """<line_sep>BotAssert.context_not_none(turn_context)<if_stmt><not>property_name<block_start><raise>TypeError("BotState.get_property_value(): property_name cannot be None.")<block_end>cached_state=self.get_cached_state(turn_context)<line_sep># if there is no value, this will throw, to signal to IPropertyAccesor that a default value should be computed # This allows this to work with value types <return>cached_state.state[property_name]<block_end><async_keyword><def_stmt>delete_property_value self turn_context:TurnContext property_name:str<arrow><none><block_start>""" Deletes a property from the state cache in the turn context. :param turn_context: The context object for this turn :type turn_context: :TurnContext` :param property_name: The name of the property to delete :type property_name: str :return: None """<line_sep>BotAssert.context_not_none(turn_context)<if_stmt><not>property_name<block_start><raise>TypeError("BotState.delete_property(): property_name cannot be None.")<block_end>cached_state=self.get_cached_state(turn_context)<del_stmt>cached_state.state[property_name]<block_end><async_keyword><def_stmt>set_property_value self turn_context:TurnContext property_name:str value:object<arrow><none><block_start>""" Sets a property to the specified value in the turn context. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :param property_name: The property name :type property_name: str :param value: The value to assign to the property :type value: Object :return: None """<line_sep>BotAssert.context_not_none(turn_context)<if_stmt><not>property_name<block_start><raise>TypeError("BotState.delete_property(): property_name cannot be None.")<block_end>cached_state=self.get_cached_state(turn_context)<line_sep>cached_state.state[property_name]=value<block_end><block_end><class_stmt>BotStatePropertyAccessor(StatePropertyAccessor)<block_start>""" Defines methods for accessing a state property created in a :class:`BotState` object. """<def_stmt>__init__ self bot_state:BotState name:str<block_start>""" Initializes a new instance of the :class:`BotStatePropertyAccessor` class. :param bot_state: The state object to access :type bot_state: :class:`BotState` :param name: The name of the state property to access :type name: str """<line_sep>self._bot_state=bot_state<line_sep>self._name=name<block_end>@property<def_stmt>name self<arrow>str<block_start>""" The name of the property. """<line_sep><return>self._name<block_end><async_keyword><def_stmt>delete self turn_context:TurnContext<arrow><none><block_start>""" Deletes the property. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` """<line_sep><await>self._bot_state.load(turn_context <false>)<line_sep><await>self._bot_state.delete_property_value(turn_context self._name)<block_end><async_keyword><def_stmt>get self turn_context:TurnContext default_value_or_factory:Union[Callable object]=<none> <arrow>object<block_start>""" Gets the property value. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :param default_value_or_factory: Defines the default value for the property """<line_sep><await>self._bot_state.load(turn_context <false>)<try_stmt><block_start>result=<await>self._bot_state.get_property_value(turn_context self._name)<line_sep><return>result<block_end><except_stmt># ask for default value from factory <block_start><if_stmt><not>default_value_or_factory<block_start><return><none><block_end>result=(default_value_or_factory()<if>callable(default_value_or_factory)<else>deepcopy(default_value_or_factory))<line_sep># save default value for any further calls <await>self.set(turn_context result)<line_sep><return>result<block_end><block_end><async_keyword><def_stmt>set self turn_context:TurnContext value:object<arrow><none><block_start>""" Sets the property value. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :param value: The value to assign to the property """<line_sep><await>self._bot_state.load(turn_context <false>)<line_sep><await>self._bot_state.set_property_value(turn_context self._name value)<block_end><block_end>
<import_from_future_stmt> annotations<import_from_stmt>.__about__ __version__<import_from_stmt>.config Config<line_sep>__all__=("__version__" "Config")<line_sep>
"""@author: Young @license: (C) Copyright 2013-2017 @contact: <EMAIL> @file: main.py @time: 2018/1/17 10:02 """<import_stmt>gc<import_stmt>gym<import_from_stmt>agent.agent Agent<line_sep>MAX_EPISODES=5000<line_sep>env=gym.make('BipedalWalker-v2')<line_sep>state_size=env.observation_space.shape[0]<line_sep>action_size=env.action_space.shape[0]<line_sep>agent=Agent(state_size action_size)<line_sep>state=env.reset()<for_stmt>_ range(int(1e3))<block_start>action=agent.get_exploration_policy(state)<line_sep>next_state,reward,done,info=env.step(action)<line_sep>agent.append(state action reward done next_state)<line_sep>state=next_state<if_stmt>done<block_start>state=env.reset()<block_end><block_end><for_stmt>_ep range(MAX_EPISODES)<block_start>state=env.reset()<line_sep>count=0<while_stmt><true><block_start>count<augadd>1<line_sep># env.render() action=agent.get_exploration_policy(state)<line_sep>next_state,reward,done,info=env.step(action)<line_sep>agent.append(state action reward done next_state)<line_sep>state=next_state<line_sep>agent.optimize()<if_stmt>done<block_start>state=env.reset()<line_sep><break><block_end><block_end>gc.collect()<if_stmt>_ep%100<eq>0<block_start>print("{} - score: {}".format(_ep count))<line_sep>agent.save_models(_ep)<block_end><block_end>
<import_from_stmt>functools partial<import_stmt>numpy<as>np<import_from_stmt>keras.preprocessing.image img_to_array<import_from_stmt>keras.preprocessing.image load_img<import_from_stmt>toolbox.image bicubic_rescale<import_from_stmt>toolbox.image modcrop<import_from_stmt>toolbox.paths data_dir<def_stmt>load_set name lr_sub_size=11 lr_sub_stride=5 scale=3<block_start>hr_sub_size=lr_sub_size<times>scale<line_sep>hr_sub_stride=lr_sub_stride<times>scale<line_sep>lr_gen_sub=partial(generate_sub_images size=lr_sub_size stride=lr_sub_stride)<line_sep>hr_gen_sub=partial(generate_sub_images size=hr_sub_size stride=hr_sub_stride)<line_sep>lr_sub_arrays=[]<line_sep>hr_sub_arrays=[]<for_stmt>path (data_dir/name).glob('*')<block_start>lr_image,hr_image=load_image_pair(str(path) scale=scale)<line_sep>lr_sub_arrays<augadd>[img_to_array(img)<for>img lr_gen_sub(lr_image)]<line_sep>hr_sub_arrays<augadd>[img_to_array(img)<for>img hr_gen_sub(hr_image)]<block_end>x=np.stack(lr_sub_arrays)<line_sep>y=np.stack(hr_sub_arrays)<line_sep><return>x y<block_end><def_stmt>load_image_pair path scale=3<block_start>image=load_img(path)<line_sep>image=image.convert('YCbCr')<line_sep>hr_image=modcrop(image scale)<line_sep>lr_image=bicubic_rescale(hr_image 1/scale)<line_sep><return>lr_image hr_image<block_end><def_stmt>generate_sub_images image size stride<block_start><for_stmt>i range(0 image.size[0]-size+1 stride)<block_start><for_stmt>j range(0 image.size[1]-size+1 stride)<block_start><yield>image.crop([i j i+size j+size])<block_end><block_end><block_end>
# Copyright 2013 Google, Inc. All Rights Reserved. # # Google Author(s): <NAME>, <NAME> <import_from_stmt>fontTools.ttLib.tables.DefaultTable DefaultTable<import_stmt>logging<line_sep>log=logging.getLogger("fontTools.merge")<def_stmt>add_method *clazzes **kwargs<block_start>"""Returns a decorator function that adds a new method to one or more classes."""<line_sep>allowDefault=kwargs.get('allowDefaultTable' <false>)<def_stmt>wrapper method<block_start>done=[]<for_stmt>clazz clazzes<block_start><if_stmt>clazz<in>done<block_start><continue># Support multiple names of a clazz <block_end>done.append(clazz)<assert_stmt>allowDefault<or>clazz<ne>DefaultTable 'Oops, table class not found.'<assert_stmt>method.__name__<not><in>clazz.__dict__ "Oops, class '%s' has method '%s'."%(clazz.__name__ method.__name__)<line_sep>setattr(clazz method.__name__ method)<block_end><return><none><block_end><return>wrapper<block_end><def_stmt>mergeObjects lst<block_start>lst=[item<for>item lst<if>item<is><not>NotImplemented]<if_stmt><not>lst<block_start><return>NotImplemented<block_end>lst=[item<for>item lst<if>item<is><not><none>]<if_stmt><not>lst<block_start><return><none><block_end>clazz=lst[0].__class__<assert_stmt>all(type(item)<eq>clazz<for>item lst) lst<line_sep>logic=clazz.mergeMap<line_sep>returnTable=clazz()<line_sep>returnDict={}<line_sep>allKeys=set.union(set() *(vars(table).keys()<for>table lst))<for_stmt>key allKeys<block_start><try_stmt><block_start>mergeLogic=logic[key]<block_end><except_stmt>KeyError<block_start><try_stmt><block_start>mergeLogic=logic['*']<block_end><except_stmt>KeyError<block_start><raise>Exception("Don't know how to merge key %s of class %s"%(key clazz.__name__))<block_end><block_end><if_stmt>mergeLogic<is>NotImplemented<block_start><continue><block_end>value=mergeLogic(getattr(table key NotImplemented)<for>table lst)<if_stmt>value<is><not>NotImplemented<block_start>returnDict[key]=value<block_end><block_end>returnTable.__dict__=returnDict<line_sep><return>returnTable<block_end>@add_method(DefaultTable allowDefaultTable=<true>)<def_stmt>merge self m tables<block_start><if_stmt><not>hasattr(self 'mergeMap')<block_start>log.info("Don't know how to merge '%s'." self.tableTag)<line_sep><return>NotImplemented<block_end>logic=self.mergeMap<if_stmt>isinstance(logic dict)<block_start><return>m.mergeObjects(self self.mergeMap tables)<block_end><else_stmt><block_start><return>logic(tables)<block_end><block_end>
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for the API /events methods. """<import_from_stmt>http client<as>http_client<import_from_stmt>unittest mock<import_from_stmt>keystonemiddleware auth_token<import_from_stmt>oslo_config cfg<import_from_stmt>ironic.api.controllers base<as>api_base<import_from_stmt>ironic.api.controllers.v1 event<import_from_stmt>ironic.api.controllers.v1 versions<import_from_stmt>ironic.common args<import_from_stmt>ironic.common exception<import_from_stmt>ironic.tests base<as>test_base<import_from_stmt>ironic.tests.unit.api base<as>test_api_base<def_stmt>get_fake_port_event <block_start><return>{'event':'network.bind_port' 'port_id':'11111111-aaaa-bbbb-cccc-555555555555' 'mac_address':'de:ad:ca:fe:ba:be' 'status':'ACTIVE' 'device_id':'22222222-aaaa-bbbb-cccc-555555555555' 'binding:host_id':'22222222-aaaa-bbbb-cccc-555555555555' 'binding:vnic_type':'baremetal'}<block_end><class_stmt>TestEventValidator(test_base.TestCase)<block_start><def_stmt>setUp self<block_start>super(TestEventValidator self).setUp()<line_sep>self.v_event=event.NETWORK_EVENT_VALIDATOR<line_sep>self.v_events=args.schema(event.EVENTS_SCHEMA)<block_end><def_stmt>test_simple_event_type self<block_start>self.v_events('body' {'events':[get_fake_port_event()]})<block_end><def_stmt>test_invalid_event_type self<block_start>value={'events':[{'event':'invalid.event'}]}<line_sep>self.assertRaisesRegex(exception.Invalid "Schema error for body: "<concat>"'invalid.event' is not one of" self.v_events 'body' value)<block_end><def_stmt>test_event_missing_madatory_field self<block_start>value={'invalid':'invalid'}<line_sep>self.assertRaisesRegex(exception.Invalid "Schema error for event: "<concat>"'event' is a required property" self.v_event 'event' value)<block_end><def_stmt>test_invalid_mac_network_port_event self<block_start>value={'event':'network.bind_port' 'port_id':'11111111-aaaa-bbbb-cccc-555555555555' 'mac_address':'INVALID_MAC_ADDRESS' 'status':'ACTIVE' 'device_id':'22222222-aaaa-bbbb-cccc-555555555555' 'binding:host_id':'22222222-aaaa-bbbb-cccc-555555555555' 'binding:vnic_type':'baremetal'}<line_sep>self.assertRaisesRegex(exception.Invalid 'Expected valid MAC address for mac_address: '<concat>'INVALID_MAC_ADDRESS' self.v_event 'event' value)<block_end><def_stmt>test_missing_mandatory_fields_network_port_event self<block_start>value={'event':'network.bind_port' 'device_id':'22222222-aaaa-bbbb-cccc-555555555555' 'binding:host_id':'22222222-aaaa-bbbb-cccc-555555555555' 'binding:vnic_type':'baremetal'}<line_sep>self.assertRaisesRegex(exception.Invalid "Schema error for event: "<concat>"'port_id' is a required property" self.v_event 'event' value)<block_end><block_end><class_stmt>TestPost(test_api_base.BaseApiTest)<block_start><def_stmt>setUp self<block_start>super(TestPost self).setUp()<line_sep>self.headers={api_base.Version.string:str(versions.max_version_string())}<block_end><def_stmt>test_events self<block_start>events_dict={'events':[get_fake_port_event()]}<line_sep>response=self.post_json('/events' events_dict headers=self.headers)<line_sep>self.assertEqual(http_client.NO_CONTENT response.status_int)<block_end><def_stmt>test_multiple_events self<block_start>events_dict={'events':[get_fake_port_event() get_fake_port_event() get_fake_port_event()]}<line_sep>response=self.post_json('/events' events_dict headers=self.headers)<line_sep>self.assertEqual(http_client.NO_CONTENT response.status_int)<block_end><def_stmt>test_events_does_not_contain_event self<block_start>events_dict={'events':[{'INVALID':'fake.event'}]}<line_sep>response=self.post_json('/events' events_dict expect_errors=<true> headers=self.headers)<line_sep>self.assertEqual(http_client.BAD_REQUEST response.status_int)<line_sep>self.assertEqual('application/json' response.content_type)<line_sep>self.assertTrue(response.json['error_message'])<block_end><def_stmt>test_events_invalid_event self<block_start>events_dict={'events':[{'event':'invalid.event'}]}<line_sep>response=self.post_json('/events' events_dict expect_errors=<true> headers=self.headers)<line_sep>self.assertEqual(http_client.BAD_REQUEST response.status_int)<line_sep>self.assertEqual('application/json' response.content_type)<line_sep>self.assertTrue(response.json['error_message'])<block_end><def_stmt>test_network_unknown_event_property self<block_start>events_dict={'events':[{'event':'network.unbind_port' 'UNKNOWN':'EVENT_PROPERTY'}]}<line_sep>response=self.post_json('/events' events_dict expect_errors=<true> headers=self.headers)<line_sep>self.assertEqual(http_client.BAD_REQUEST response.status_int)<line_sep>self.assertEqual('application/json' response.content_type)<line_sep>self.assertTrue(response.json['error_message'])<block_end><def_stmt>test_network_bind_port_events self<block_start>events_dict={'events':[get_fake_port_event()]}<line_sep>response=self.post_json('/events' events_dict headers=self.headers)<line_sep>self.assertEqual(http_client.NO_CONTENT response.status_int)<block_end><def_stmt>test_network_unbind_port_events self<block_start>events_dict={'events':[get_fake_port_event()]}<line_sep>events_dict['events'][0].update({'event':'network.unbind_port'})<line_sep>response=self.post_json('/events' events_dict headers=self.headers)<line_sep>self.assertEqual(http_client.NO_CONTENT response.status_int)<block_end><def_stmt>test_network_delete_port_events self<block_start>events_dict={'events':[get_fake_port_event()]}<line_sep>events_dict['events'][0].update({'event':'network.delete_port'})<line_sep>response=self.post_json('/events' events_dict headers=self.headers)<line_sep>self.assertEqual(http_client.NO_CONTENT response.status_int)<block_end><def_stmt>test_network_port_event_invalid_mac_address self<block_start>port_evt=get_fake_port_event()<line_sep>port_evt.update({'mac_address':'INVALID_MAC_ADDRESS'})<line_sep>events_dict={'events':[port_evt]}<line_sep>response=self.post_json('/events' events_dict expect_errors=<true> headers=self.headers)<line_sep>self.assertEqual(http_client.BAD_REQUEST response.status_int)<line_sep>self.assertEqual('application/json' response.content_type)<line_sep>self.assertTrue(response.json['error_message'])<block_end><def_stmt>test_network_port_event_invalid_device_id self<block_start>port_evt=get_fake_port_event()<line_sep>port_evt.update({'device_id':'DEVICE_ID_SHOULD_BE_UUID'})<line_sep>events_dict={'events':[port_evt]}<line_sep>response=self.post_json('/events' events_dict expect_errors=<true> headers=self.headers)<line_sep>self.assertEqual(http_client.BAD_REQUEST response.status_int)<line_sep>self.assertEqual('application/json' response.content_type)<line_sep>self.assertTrue(response.json['error_message'])<block_end><def_stmt>test_network_port_event_invalid_port_id self<block_start>port_evt=get_fake_port_event()<line_sep>port_evt.update({'port_id':'PORT_ID_SHOULD_BE_UUID'})<line_sep>events_dict={'events':[port_evt]}<line_sep>response=self.post_json('/events' events_dict expect_errors=<true> headers=self.headers)<line_sep>self.assertEqual(http_client.BAD_REQUEST response.status_int)<line_sep>self.assertEqual('application/json' response.content_type)<line_sep>self.assertTrue(response.json['error_message'])<block_end><def_stmt>test_network_port_event_invalid_status self<block_start>port_evt=get_fake_port_event()<line_sep>port_evt.update({'status':['status' 'SHOULD' 'BE' 'TEXT']})<line_sep>events_dict={'events':[port_evt]}<line_sep>response=self.post_json('/events' events_dict expect_errors=<true> headers=self.headers)<line_sep>self.assertEqual(http_client.BAD_REQUEST response.status_int)<line_sep>self.assertEqual('application/json' response.content_type)<line_sep>self.assertTrue(response.json['error_message'])<block_end><def_stmt>test_network_port_event_invalid_binding_vnic_type self<block_start>port_evt=get_fake_port_event()<line_sep>port_evt.update({'binding:vnic_type':['binding:vnic_type' 'SHOULD' 'BE' 'TEXT']})<line_sep>events_dict={'events':[port_evt]}<line_sep>response=self.post_json('/events' events_dict expect_errors=<true> headers=self.headers)<line_sep>self.assertEqual(http_client.BAD_REQUEST response.status_int)<line_sep>self.assertEqual('application/json' response.content_type)<line_sep>self.assertTrue(response.json['error_message'])<block_end><def_stmt>test_network_port_event_invalid_binding_host_id self<block_start>port_evt=get_fake_port_event()<line_sep>port_evt.update({'binding:host_id':['binding:host_id' 'IS' 'NODE_UUID' 'IN' 'IRONIC']})<line_sep>events_dict={'events':[port_evt]}<line_sep>response=self.post_json('/events' events_dict expect_errors=<true> headers=self.headers)<line_sep>self.assertEqual(http_client.BAD_REQUEST response.status_int)<line_sep>self.assertEqual('application/json' response.content_type)<line_sep>self.assertTrue(response.json['error_message'])<block_end><def_stmt>test_events_unsupported_api_version self<block_start>headers={api_base.Version.string:'1.50'}<line_sep>events_dict={'events':[get_fake_port_event()]}<line_sep>response=self.post_json('/events' events_dict expect_errors=<true> headers=headers)<line_sep>self.assertEqual(http_client.NOT_FOUND response.status_int)<line_sep>self.assertEqual('application/json' response.content_type)<line_sep>self.assertTrue(response.json['error_message'])<block_end><block_end>@mock.patch.object(auth_token.AuthProtocol 'process_request' <lambda>*_:<none>)<class_stmt>TestPostRBAC(TestPost)<block_start>"""Test class to execute the Event post tests with RBAC enforcement."""<def_stmt>setUp self<block_start>super(TestPostRBAC self).setUp()<line_sep>cfg.CONF.set_override('enforce_scope' <true> group='oslo_policy')<line_sep>cfg.CONF.set_override('enforce_new_defaults' <true> group='oslo_policy')<line_sep>cfg.CONF.set_override('auth_strategy' 'keystone')<line_sep># Headers required for this to pass in system scope restricted # authentication, as our default for api tests is noauth. self.headers={api_base.Version.string:str(versions.max_version_string()) 'X-Auth-Token':'<PASSWORD>' 'X-Roles':'admin' 'OpenStack-System-Scope':'all' }<block_end><block_end>
<class_stmt>AppTestCProfile(object)<block_start>spaceconfig={"usemodules":['_lsprof' 'time'] }<def_stmt>setup_class cls<block_start>cls.w_file=cls.space.wrap(__file__)<block_end><def_stmt>test_repr self<block_start><import_stmt>_lsprof<assert_stmt>repr(_lsprof.Profiler)<eq>"<type '_lsprof.Profiler'>"<block_end><def_stmt>test_builtins self<block_start><import_stmt>_lsprof<line_sep>prof=_lsprof.Profiler()<line_sep>lst=[]<line_sep>prof.enable()<line_sep>lst.append(len(lst))<line_sep>prof.disable()<line_sep>stats=prof.getstats()<line_sep>expected=("<len>" "<method 'append' of 'list' objects>" "<method 'disable' of '_lsprof.Profiler' objects>" )<for_stmt>entry stats<block_start><assert_stmt>entry.code<in>expected<block_end><block_end><def_stmt>test_builtins_callers self<block_start><import_stmt>_lsprof<line_sep>prof=_lsprof.Profiler(subcalls=<true>)<line_sep>lst=[]<def_stmt>f1 <block_start>lst.append(len(lst))<block_end>prof.enable(subcalls=<true>)<line_sep>f1()<line_sep>prof.disable()<line_sep>stats=prof.getstats()<line_sep>expected=("<len>" "<method 'append' of 'list' objects>" )<line_sep>by_id=set()<for_stmt>entry stats<block_start><if_stmt>entry.code<eq>f1.__code__<block_start><assert_stmt>len(entry.calls)<eq>2<for_stmt>subentry entry.calls<block_start><assert_stmt>subentry.code<in>expected<line_sep>by_id.add(id(subentry.code))<block_end><block_end><elif_stmt>entry.code<in>expected<block_start>by_id.add(id(entry.code))<block_end><block_end># :-( cProfile.py relies on the id() of the strings... <assert_stmt>len(by_id)<eq>len(expected)<block_end><def_stmt>test_direct self<block_start><import_stmt>_lsprof<def_stmt>getticks <block_start><return>len(ticks)<block_end>prof=_lsprof.Profiler(getticks 0.25 <true> <false>)<line_sep>ticks=[]<def_stmt>bar m<block_start>ticks.append(1)<if_stmt>m<eq>1<block_start>foo(42)<block_end>ticks.append(1)<block_end><def_stmt>spam m<block_start>bar(m)<block_end><def_stmt>foo n<block_start>bar(n)<line_sep>ticks.append(1)<line_sep>bar(n+1)<line_sep>ticks.append(1)<line_sep>spam(n+2)<block_end>prof.enable()<line_sep>foo(0)<line_sep>prof.disable()<assert_stmt>len(ticks)<eq>16<line_sep>stats=prof.getstats()<line_sep>entries={}<for_stmt>entry stats<block_start><assert_stmt>hasattr(entry.code 'co_name')<line_sep>entries[entry.code.co_name]=entry<block_end>efoo=entries['foo']<assert_stmt>efoo.callcount<eq>2<assert_stmt>efoo.reccallcount<eq>1<assert_stmt>efoo.inlinetime<eq>1.0<assert_stmt>efoo.totaltime<eq>4.0<assert_stmt>len(efoo.calls)<eq>2<line_sep>ebar=entries['bar']<assert_stmt>ebar.callcount<eq>6<assert_stmt>ebar.reccallcount<eq>3<assert_stmt>ebar.inlinetime<eq>3.0<assert_stmt>ebar.totaltime<eq>3.5<assert_stmt>len(ebar.calls)<eq>1<line_sep>espam=entries['spam']<assert_stmt>espam.callcount<eq>2<assert_stmt>espam.reccallcount<eq>0<assert_stmt>espam.inlinetime<eq>0.0<assert_stmt>espam.totaltime<eq>1.0<assert_stmt>len(espam.calls)<eq>1<line_sep>foo2spam,foo2bar=efoo.calls<if_stmt>foo2bar.code.co_name<eq>'spam'<block_start>foo2bar,foo2spam=foo2spam foo2bar<block_end><assert_stmt>foo2bar.code.co_name<eq>'bar'<assert_stmt>foo2bar.callcount<eq>4<assert_stmt>foo2bar.reccallcount<eq>2<assert_stmt>foo2bar.inlinetime<eq>2.0<assert_stmt>foo2bar.totaltime<eq>3.0<assert_stmt>foo2spam.code.co_name<eq>'spam'<assert_stmt>foo2spam.callcount<eq>2<assert_stmt>foo2spam.reccallcount<eq>0<assert_stmt>foo2spam.inlinetime<eq>0.0<assert_stmt>foo2spam.totaltime<eq>1.0<line_sep>bar2foo,=ebar.calls<assert_stmt>bar2foo.code.co_name<eq>'foo'<assert_stmt>bar2foo.callcount<eq>1<assert_stmt>bar2foo.reccallcount<eq>0<assert_stmt>bar2foo.inlinetime<eq>0.5<assert_stmt>bar2foo.totaltime<eq>2.0<line_sep>spam2bar,=espam.calls<assert_stmt>spam2bar.code.co_name<eq>'bar'<assert_stmt>spam2bar.callcount<eq>2<assert_stmt>spam2bar.reccallcount<eq>0<assert_stmt>spam2bar.inlinetime<eq>1.0<assert_stmt>spam2bar.totaltime<eq>1.0<block_end><def_stmt>test_scale_of_result self<block_start><import_stmt>_lsprof time<line_sep>prof=_lsprof.Profiler()<def_stmt>foo n<block_start>t=time.time()<while_stmt>abs(t-time.time())<l>1.0<block_start><pass># busy-wait for 1 second <block_end><block_end><def_stmt>bar n<block_start>foo(n)<block_end>prof.enable()<line_sep>bar(0)<line_sep>prof.disable()<line_sep>stats=prof.getstats()<line_sep>entries={}<for_stmt>entry stats<block_start>entries[entry.code]=entry<block_end>efoo=entries[foo.__code__]<line_sep>ebar=entries[bar.__code__]<assert_stmt>0.9<l>efoo.totaltime<l>2.9<line_sep># --- cannot test .inlinetime, because it does not include # --- the time spent doing the call to time.time() #assert 0.9 < efoo.inlinetime < 2.9 <for_stmt>subentry ebar.calls<block_start><assert_stmt>0.9<l>subentry.totaltime<l>2.9<line_sep>#assert 0.9 < subentry.inlinetime < 2.9 <block_end><block_end><def_stmt>test_builtin_exception self<block_start><import_stmt>math<import_stmt>_lsprof<line_sep>prof=_lsprof.Profiler()<line_sep>prof.enable()<try_stmt><block_start>math.sqrt("a")<block_end><except_stmt>TypeError<block_start><pass><block_end>prof.disable()<line_sep>stats=prof.getstats()<assert_stmt>len(stats)<eq>2<block_end><block_end>
<import_from_stmt>argparse FileType<import_from_stmt>sys stdin stdout<import_from_stmt>django.core.management.base BaseCommand<import_from_stmt>devices.models Configuration<import_from_stmt>peering.models Router<class_stmt>Command(BaseCommand)<block_start>help="Render the configurations of routers."<def_stmt>add_arguments self parser<block_start>parser.add_argument("--limit" nargs="?" help="Limit the configuration to the given set of routers (comma separated)." )<line_sep>parser.add_argument("--input" nargs="?" type=FileType("r") default=stdin help="File to read the template from (default to stdin)." )<line_sep>parser.add_argument("--output" nargs="?" type=FileType("w") default=stdout help="File to write the configuration to (default to stdout)." )<line_sep>parser.add_argument("--trim" action="store_true" help="Remove new line after tag (keep them by default)." )<line_sep>parser.add_argument("--lstrip" action="store_true" help="Strip whitespaces before block (keep them by default)." )<block_end><def_stmt>handle self *args **options<block_start><if_stmt>options["verbosity"]<ge>2<block_start>self.stdout.write("[*] Loading template")<block_end>t=Configuration(name="tmp" template=options["input"].read() jinja2_trim=options["trim"] jinja2_lstrip=options["lstrip"] )<line_sep>routers=Router.objects.all()<if_stmt>options["limit"]<block_start>routers=routers.filter(hostname__in=options["limit"].split(","))<block_end>self.stdout.write("[*] Rendering configurations")<for_stmt>r routers<block_start><if_stmt>options["verbosity"]<ge>2<block_start>self.stdout.write(f" - Rendering {r.hostname} configuration")<block_end>r.configuration_template=t<line_sep>configuration=r.generate_configuration()<line_sep>self.stdout.write(configuration)<block_end><block_end><block_end>
# Copyright The Cloud Custodian Authors. # SPDX-License-Identifier: Apache-2.0 <import_stmt>json<import_stmt>logging<import_stmt>mock<import_stmt>os<import_from_stmt>.common BaseTest<import_from_stmt>c7n.exceptions PolicyExecutionError<import_from_stmt>c7n.policy Policy<import_from_stmt>c7n handler<class_stmt>HandleTest(BaseTest)<block_start><def_stmt>test_init_config_exec_option_merge self<block_start>policy_config={'execution-options':{'region':'us-east-1' 'assume_role':'arn:::' 'profile':'dev' 'tracer':'xray' 'account_id':'004' 'dryrun':<true> 'cache':'/foobar.cache'} 'policies':[{'mode':{'type':'period' 'schedule':"rate(1 minute)" 'execution-options':{'metrics_enabled':<true> 'assume_role':'arn::::007:foo' 'output_dir':'s3://mybucket/output'}} 'resource':'aws.ec2' 'name':'check-dev'}]}<line_sep>self.assertEqual(dict(handler.init_config(policy_config)) {'assume_role':'arn::::007:foo' 'metrics_enabled':'aws' 'tracer':'xray' 'account_id':'007' 'region':'us-east-1' 'output_dir':'s3://mybucket/output' # defaults 'external_id':<none> 'dryrun':<false> 'profile':<none> 'authorization_file':<none> 'cache':'' 'regions':() 'cache_period':0 'log_group':<none> 'metrics':<none>})<block_end><def_stmt>setupLambdaEnv self policy_data environment=<none> err_execs=() log_level=logging.INFO<block_start>work_dir=self.change_cwd()<line_sep>self.patch(handler 'policy_data' <none>)<line_sep>self.patch(handler 'policy_config' <none>)<line_sep># don't require api creds to resolve account id <if_stmt>'execution-options'<not><in>policy_data<block_start>policy_data['execution-options']={'account_id':'007'}<block_end><elif_stmt>'account_id'<not><in>policy_data['execution-options']<block_start>policy_data['execution-options']['account_id']='007'<block_end><with_stmt>open(os.path.join(work_dir 'config.json') 'w')<as>fh<block_start>json.dump(policy_data fh indent=2)<block_end>output=self.capture_logging('custodian.lambda' level=log_level)<if_stmt>environment<block_start>self.change_environment(**environment)<block_end>policy_execution=[]<line_sep>validation_called=[]<def_stmt>validate self<block_start>validation_called.append(<true>)<block_end><def_stmt>push self event context<block_start>policy_execution.append((event context))<if_stmt>err_execs<block_start><raise>err_execs.pop(0)<block_end><block_end>self.patch(Policy "push" push)<line_sep>self.patch(Policy "validate" validate)<line_sep><return>output policy_execution<block_end><def_stmt>test_dispatch_log_event self<block_start>output,executions=self.setupLambdaEnv({'policies':[{'name':'ec2' 'resource':'ec2'}]} {'C7N_DEBUG_EVENT':<none>} log_level=logging.DEBUG)<line_sep>handler.dispatch_event({'detail':{'resource':'xyz'}} {})<line_sep>self.assertTrue('xyz'<in>output.getvalue())<line_sep>self.patch(handler 'C7N_DEBUG_EVENT' <false>)<line_sep>handler.dispatch_event({'detail':{'resource':'abc'}} {})<line_sep>self.assertFalse('abc'<in>output.getvalue())<line_sep>self.assertTrue(executions)<block_end>@mock.patch('c7n.handler.PolicyCollection')<def_stmt>test_dispatch_err_event self mock_collection<block_start>output,executions=self.setupLambdaEnv({'execution-options':{'output_dir':'s3://xyz' 'account_id':'004'} 'policies':[{'resource':'ec2' 'name':'xyz'}]} log_level=logging.DEBUG)<line_sep>mock_collection.from_data.return_value=[]<line_sep>handler.dispatch_event({'detail':{'errorCode':'unauthorized'}} <none>)<line_sep>self.assertTrue('Skipping failed operation: unauthorized'<in>output.getvalue())<line_sep>self.patch(handler 'C7N_SKIP_EVTERR' <false>)<line_sep>handler.dispatch_event({'detail':{'errorCode':'foi'}} <none>)<line_sep>self.assertFalse('Skipping failed operation: foi'<in>output.getvalue())<line_sep>mock_collection.from_data.assert_called_once()<block_end><def_stmt>test_dispatch_err_handle self<block_start>output,executions=self.setupLambdaEnv({'execution-options':{'output_dir':'s3://xyz' 'account_id':'004'} 'policies':[{'resource':'ec2' 'name':'xyz'}]} err_execs=[PolicyExecutionError("foo")]<times>2)<line_sep>self.assertRaises(PolicyExecutionError handler.dispatch_event {'detail':{'xyz':'oui'}} <none>)<line_sep>self.patch(handler 'C7N_CATCH_ERR' <true>)<line_sep>handler.dispatch_event({'detail':{'xyz':'oui'}} <none>)<line_sep>self.assertEqual(output.getvalue().count('error during') 2)<block_end><def_stmt>test_handler self<block_start>output,executions=self.setupLambdaEnv({'policies':[{'resource':'asg' 'name':'auto'}]} )<line_sep>self.assertEqual(handler.dispatch_event({"detail":{"errorCode":"404"}} <none>) <none>)<line_sep>self.assertEqual(handler.dispatch_event({"detail":{}} <none>) <true>)<line_sep>self.assertEqual(executions [({"detail":{} "debug":<true>} <none>)])<block_end><block_end>
""" Jump search algorithm iterates through a sorted list with a step of n^(1/2), until the element compared is bigger than the one searched.If the item is not in the particular step, it shifts the entire step. It will then perform a linear search on the step until it matches the target. If not found, it returns -1. Time Complexity: O(√n) Space Complexity: O(1) """<import_stmt>math<line_sep>arr=[0 1 2 8 13 17 19 25 31 32 42]<line_sep>target=25<def_stmt>jump_search arr:list x:int<arrow>int<block_start>""" >>> jump_search(arr, target) == (arr.index(target) if target in arr else -1) True """<line_sep>n=len(arr)<line_sep>step=int(math.floor(math.sqrt(n)))<line_sep>prev=0<while_stmt>arr[min(step n)-1]<l>x<block_start>prev=step<line_sep>step<augadd>int(math.floor(math.sqrt(n)))<if_stmt>prev<ge>n<block_start><return>-1<block_end><block_end><while_stmt>arr[prev]<l>x<block_start>prev=prev+1<if_stmt>prev<eq>min(step n)<block_start><return>-1<block_end><block_end><if_stmt>arr[prev]<eq>x<block_start><return>prev<block_end><return>-1<block_end><def_stmt>check_sort test:list<arrow>bool<block_start>"""checks whether the given list is sorted or not."""<if_stmt>sorted(test)<eq>test<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><if_stmt>check_sort(arr)<block_start>res=jump_search(arr target)<if_stmt>res<eq>-1<block_start>print("Number not found!")<block_end><else_stmt><block_start>print(f"Number {target} is at index {res}")<block_end><block_end><else_stmt><block_start>print("Given list is not sorted!")<block_end><block_end>
''' Inference code for SeqFormer '''<import_stmt>argparse<import_stmt>datetime<import_stmt>json<import_stmt>random<import_stmt>time<import_from_stmt>pathlib Path<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>datasets<import_stmt>util.misc<as>utils<import_from_stmt>models build_model<import_stmt>torchvision.transforms<as>T<import_stmt>matplotlib.pyplot<as>plt<import_stmt>os<import_from_stmt>PIL Image<import_stmt>math<import_stmt>torch.nn.functional<as>F<import_stmt>json<import_stmt>pycocotools.mask<as>mask_util<import_stmt>sys<import_stmt>cv2<def_stmt>get_args_parser <block_start>parser=argparse.ArgumentParser('Set transformer detector' add_help=<false>)<line_sep>parser.add_argument('--lr' default=1e-4 type=float)<line_sep>parser.add_argument('--lr_backbone' default=1e-5 type=float)<line_sep>parser.add_argument('--batch_size' default=2 type=int)<line_sep>parser.add_argument('--weight_decay' default=1e-4 type=float)<line_sep>parser.add_argument('--epochs' default=150 type=int)<line_sep>parser.add_argument('--lr_drop' default=100 type=int)<line_sep>parser.add_argument('--clip_max_norm' default=0.1 type=float help='gradient clipping max norm')<line_sep>parser.add_argument('--with_box_refine' default=<true> action='store_true')<line_sep># Model parameters parser.add_argument('--model_path' type=str default=<none> help="Path to the model weights.")<line_sep># * Backbone parser.add_argument('--backbone' default='resnet50' type=str help="Name of the convolutional backbone to use")<line_sep>parser.add_argument('--dilation' action='store_true' help="If true, we replace stride with dilation in the last convolutional block (DC5)")<line_sep>parser.add_argument('--position_embedding' default='sine' type=str choices=('sine' 'learned') help="Type of positional embedding to use on top of the image features")<line_sep>parser.add_argument('--position_embedding_scale' default=2<times>np.pi type=float help="position / size * scale")<line_sep>parser.add_argument('--num_feature_levels' default=4 type=int help='number of feature levels')<line_sep># * Transformer parser.add_argument('--enc_layers' default=6 type=int help="Number of encoding layers in the transformer")<line_sep>parser.add_argument('--dec_layers' default=6 type=int help="Number of decoding layers in the transformer")<line_sep>parser.add_argument('--dim_feedforward' default=1024 type=int help="Intermediate size of the feedforward layers in the transformer blocks")<line_sep>parser.add_argument('--hidden_dim' default=256 type=int help="Size of the embeddings (dimension of the transformer)")<line_sep>parser.add_argument('--dropout' default=0.1 type=float help="Dropout applied in the transformer")<line_sep>parser.add_argument('--nheads' default=8 type=int help="Number of attention heads inside the transformer's attentions")<line_sep>parser.add_argument('--num_queries' default=300 type=int help="Number of query slots")<line_sep>parser.add_argument('--dec_n_points' default=4 type=int)<line_sep>parser.add_argument('--enc_n_points' default=4 type=int)<line_sep>parser.add_argument('--rel_coord' default=<true> action='store_true')<line_sep># Segmentation parser.add_argument('--masks' action='store_true' help="Train segmentation head if the flag is provided")<line_sep>parser.add_argument('--mask_out_stride' default=4 type=int)<line_sep># Loss parser.add_argument('--no_aux_loss' dest='aux_loss' action='store_false' help="Disables auxiliary decoding losses (loss at each layer)")<line_sep># * Matcher parser.add_argument('--set_cost_class' default=1 type=float help="Class coefficient in the matching cost")<line_sep>parser.add_argument('--set_cost_bbox' default=5 type=float help="L1 box coefficient in the matching cost")<line_sep>parser.add_argument('--set_cost_giou' default=2 type=float help="giou box coefficient in the matching cost")<line_sep># * Loss coefficients parser.add_argument('--mask_loss_coef' default=2 type=float)<line_sep>parser.add_argument('--dice_loss_coef' default=5 type=float)<line_sep>parser.add_argument('--cls_loss_coef' default=2 type=float)<line_sep>parser.add_argument('--bbox_loss_coef' default=5 type=float)<line_sep>parser.add_argument('--giou_loss_coef' default=2 type=float)<line_sep>parser.add_argument('--focal_alpha' default=0.25 type=float)<line_sep># dataset parameters parser.add_argument('--img_path' default='../ytvis/val/JPEGImages/')<line_sep>parser.add_argument('--ann_path' default='../ytvis/annotations/instances_val_sub.json')<line_sep>parser.add_argument('--save_path' default='results.json')<line_sep>parser.add_argument('--dataset_file' default='YoutubeVIS')<line_sep>parser.add_argument('--coco_path' type=str)<line_sep>parser.add_argument('--coco_panoptic_path' type=str)<line_sep>parser.add_argument('--remove_difficult' action='store_true')<line_sep>parser.add_argument('--output_dir' default='output_ytvos' help='path where to save, empty for no saving')<line_sep>parser.add_argument('--device' default='cuda' help='device to use for training / testing')<line_sep>parser.add_argument('--seed' default=42 type=int)<line_sep>parser.add_argument('--resume' default='' help='resume from checkpoint')<line_sep>parser.add_argument('--start_epoch' default=0 type=int metavar='N' help='start epoch')<line_sep>#parser.add_argument('--eval', action='store_true') parser.add_argument('--eval' action='store_false')<line_sep>parser.add_argument('--num_workers' default=0 type=int)<line_sep>parser.add_argument('--num_frames' default=1 type=int help='number of frames')<line_sep># distributed training parameters parser.add_argument('--world_size' default=1 type=int help='number of distributed processes')<line_sep>parser.add_argument('--dist_url' default='env://' help='url used to set up distributed training')<line_sep><return>parser<block_end>CLASSES=['person' 'giant_panda' 'lizard' 'parrot' 'skateboard' 'sedan' 'ape' 'dog' 'snake' 'monkey' 'hand' 'rabbit' 'duck' 'cat' 'cow' 'fish' 'train' 'horse' 'turtle' 'bear' 'motorbike' 'giraffe' 'leopard' 'fox' 'deer' 'owl' 'surfboard' 'airplane' 'truck' 'zebra' 'tiger' 'elephant' 'snowboard' 'boat' 'shark' 'mouse' 'frog' 'eagle' 'earless_seal' 'tennis_racket']<line_sep>transform=T.Compose([T.Resize(360) T.ToTensor() T.Normalize([0.485 0.456 0.406] [0.229 0.224 0.225])])<def_stmt>main args<block_start>device=torch.device(args.device)<line_sep># fix the seed for reproducibility seed=args.seed+utils.get_rank()<line_sep>torch.manual_seed(seed)<line_sep>np.random.seed(seed)<line_sep>random.seed(seed)<with_stmt>torch.no_grad()<block_start>model,criterion,postprocessors=build_model(args)<line_sep>model.to(device)<line_sep>state_dict=torch.load(args.model_path)['model']<line_sep>model.load_state_dict(state_dict)<line_sep>model.eval()<line_sep>folder=args.img_path<line_sep>videos=json.load(open(args.ann_path 'rb'))['videos']#[:5] # videos = [videos[1],videos[8],videos[22],videos[34]] vis_num=len(videos)<line_sep># postprocess = PostProcessSegm_ifc() result=[]<for_stmt>i range(vis_num)<block_start>print("Process video: " i)<line_sep>id_=videos[i]['id']<line_sep>vid_len=videos[i]['length']<line_sep>file_names=videos[i]['file_names']<line_sep>video_name_len=10<line_sep>pred_masks=<none><line_sep>pred_logits=<none><line_sep>img_set=[]<for_stmt>k range(vid_len)<block_start>im=Image.open(os.path.join(folder file_names[k]))<line_sep>w,h=im.size<line_sep>sizes=torch.as_tensor([int(h) int(w)])<line_sep>img_set.append(transform(im).unsqueeze(0).cuda())<block_end>img=torch.cat(img_set 0)<line_sep>model.detr.num_frames=vid_len<line_sep>outputs=model.inference(img img.shape[-1] img.shape[-2])<line_sep>logits=outputs['pred_logits'][0]<line_sep>output_mask=outputs['pred_masks'][0]<line_sep>output_boxes=outputs['pred_boxes'][0]<line_sep>H=output_mask.shape[-2]<line_sep>W=output_mask.shape[-1]<line_sep>scores=logits.sigmoid().cpu().detach().numpy()<line_sep>hit_dict={}<line_sep>topkv,indices10=torch.topk(logits.sigmoid().cpu().detach().flatten(0) k=10)<line_sep>indices10=indices10.tolist()<for_stmt>idx indices10<block_start>queryid=idx<floordiv>42<if_stmt>queryid<in>hit_dict.keys()<block_start>hit_dict[queryid].append(idx%42)<block_end><else_stmt><block_start>hit_dict[queryid]=[idx%42]<block_end><block_end><for_stmt>inst_id hit_dict.keys()<block_start>masks=output_mask[inst_id]<line_sep>pred_masks=F.interpolate(masks[: <none> : :] (im.size[1] im.size[0]) mode="bilinear")<line_sep>pred_masks=pred_masks.sigmoid().cpu().detach().numpy()<g>0.5#shape [100, 36, 720, 1280] <if_stmt>pred_masks.max()<eq>0<block_start>print('skip')<line_sep><continue><block_end><for_stmt>class_id hit_dict[inst_id]<block_start>category_id=class_id<line_sep>score=scores[inst_id class_id]<line_sep># print('name:',CLASSES[category_id-1],', score',score) instance={'video_id':id_ 'video_name':file_names[0][:video_name_len] 'score':float(score) 'category_id':int(category_id)}<line_sep>segmentation=[]<for_stmt>n range(vid_len)<block_start><if_stmt>score<l>0.001<block_start>segmentation.append(<none>)<block_end><else_stmt><block_start>mask=(pred_masks[n 0]).astype(np.uint8)<line_sep>rle=mask_util.encode(np.array(mask[: : np.newaxis] order='F'))[0]<line_sep>rle["counts"]=rle["counts"].decode("utf-8")<line_sep>segmentation.append(rle)<block_end><block_end>instance['segmentations']=segmentation<line_sep>result.append(instance)<block_end><block_end><block_end><block_end><with_stmt>open(args.save_path 'w' encoding='utf-8')<as>f<block_start>json.dump(result f)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(' inference script' parents=[get_args_parser()])<line_sep>args=parser.parse_args()<line_sep>main(args)<block_end>
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A place to put test of the replacers. """<import_stmt>os<import_stmt>unittest<import_from_stmt>gabbi case<import_from_stmt>gabbi exception<class_stmt>EnvironReplaceTest(unittest.TestCase)<block_start><def_stmt>test_environ_boolean self<block_start>"""Environment variables are always strings That doesn't always suit our purposes, so test that "True" and "False" become booleans as a special case. """<line_sep>http_case=case.HTTPTestCase('test_request')<line_sep>message="$ENVIRON['moo']"<line_sep>os.environ['moo']="True"<line_sep>self.assertEqual(<true> http_case._environ_replace(message))<line_sep>os.environ['moo']="False"<line_sep>self.assertEqual(<false> http_case._environ_replace(message))<line_sep>os.environ['moo']="true"<line_sep>self.assertEqual(<true> http_case._environ_replace(message))<line_sep>os.environ['moo']="faLse"<line_sep>self.assertEqual(<false> http_case._environ_replace(message))<line_sep>os.environ['moo']="null"<line_sep>self.assertEqual(<none> http_case._environ_replace(message))<line_sep>os.environ['moo']="1"<line_sep>self.assertEqual(1 http_case._environ_replace(message))<line_sep>os.environ['moo']="cow"<line_sep>self.assertEqual("cow" http_case._environ_replace(message))<line_sep>message='$ENVIRON["moo"]'<line_sep>os.environ['moo']="True"<line_sep>self.assertEqual(<true> http_case._environ_replace(message))<block_end><block_end><class_stmt>TestReplaceHeaders(unittest.TestCase)<block_start><def_stmt>test_empty_headers self<block_start>"""A None value in headers should cause a GabbiFormatError."""<line_sep>http_case=case.HTTPTestCase('test_request')<line_sep>self.assertRaises(exception.GabbiFormatError http_case._replace_headers_template 'foo' <none>)<block_end><block_end>
# coding=utf-8 <import_stmt>os<import_from_stmt>pprint pprint# noqa # Third party libraries <import_stmt>heroku3<line_sep># import socket # import httplib # import logging # httplib.HTTPConnection.debuglevel = 1 # logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests # logging.getLogger().setLevel(logging.INFO) # requests_log = logging.getLogger("requests.packages.urllib3") # requests_log.setLevel(logging.INFO) # requests_log.propagate = True HEROKU_API_KEY=os.environ.get("HEROKU_API_KEY" <false>)<line_sep>HEROKU_APPNAME=os.environ.get("HEROKU_APPNAME" <false>)<line_sep>TEST_EMAIL=os.environ.get("TEST_EMAIL" <false>)<line_sep>heroku_conn=heroku3.from_key(HEROKU_API_KEY)<line_sep># app = heroku_conn.create_app(name='testy2124app', stack_id_or_name='cedar', region_id_or_name='us') # print(app.addons()) # print(heroku_conn.addons('testy123app')) # for addon in app.addons(): # addon.delete() # del config['TEST1'] # del config['TEST2'] # del config['TEST3'] # del config['Z01'] # del config['Z02'] # print(config) # config['TEST1'] = u'MM1' # config['TEST2'] = u'MM2' # config['TEST3'] = u'MM3' # config2 = heroku_conn.update_appconfig('testy123app', {u'Z01': u'A1', u'Z02': u'A2'}) # config2 = config.update({u'Z01': u'A1', u'Z02': u'A2'}) # config3 = app.config() # print(config) # print("======") # print(config2) # print("======") # print(config3) # print(config['TEST1']) # print(config['TEST3']) # app = heroku_conn.app('kdsjhkszdjhgksjdhfkj') # procs = app.process_formation() # proc = app.process_formation()['web'] # print(proc.size) # print(proc.quantity) # print(procs) # proc.scale(0) # app.scale_formation_process('web', 1) # output = app.run_command('pgbackups:url') # collab = app.add_collaborator(email=TEST_EMAIL, silent=False) # collab = app.remove_collaborator(TEST_EMAIL) # print(newapp.collaborators()) # config = newapp.config() # config['TEST2'] = None # print(newapp.domains()) # domain2 = newapp.add_domain('testy123.testing.com') # print(newapp.domains()) # newapp.remove_domain('testy123.testing.com') # domain.remove() # print(newapp.domains()) # app = heroku_conn.app(HEROKU_APPNAME) # pprint(app.addons()) # dynos = app.dynos() # dyno = dynos['web.1'] # print(dyno) # releases = app.releases(sort='asc') # for release in releases: # print("{0} {1} {2} {3}".format(release.id, release.commit, release.user, release.description)) # releases = app.releases()._items.reverse() # print(releases.pop()) # print(releases.pop()) # app.rollback('v108') # apps = heroku_conn.apps(order_by='name', limit=1, sort='asc') # apps = heroku_conn.apps(order_by='name', limit=1) apps=heroku_conn.apps(order_by="name" sort="asc")<for_stmt>app apps<block_start>print(app.name)<block_end># app.rename('testy223') # print(app.enable_maintenance_mode()) # print(app.disable_maintenance_mode()) # app.enable_feature('user-env-compile') # app.disable_feature('user-env-compile') # print(app.labs()) # print(heroku_conn.features()) # domain = app.add_domain('test123-1.testing.com') # domain = app.add_domain('test123-2.testing.com') # domain = app.add_domain('test123-3.testing.com') # domain = app.add_domain('test123-4.testing.com') # domain = app.add_domain('test123-5.testing.com') # domain = app.add_domain('test123-6.testing.com') # domain = app.add_domain('test123-7.testing.com') # iterator = app.stream_log(lines=1) # for line in iterator: # filter out keep-alive new lines # if line: # print("{0}".format(line)) # logs = app.get_log(lines=100) # print(logs) # print(app.domains(limit=1)) # dyno = app.run_command('fab -l', printout=True) # dyno.remove() # proc = heroku_conn.apps()['testy123app'].process_formation()['web'] # print(proc.size) # print(proc.quantity) # formations = app.process_formation() # print(formations['web']) # for formation in formations: # formation.resize(1) # print(app._h._last_request_id) # print(app.dynos()['web.1']) # print(dynos['web.1']) # print(heroku_conn.apps()['testy123app']) # print(heroku_conn.apps()['d32b74d8-f5cf-4e3e-95dd-a601668fdb0c']) # for dyno in app.dynos(): # print(dyno) # print(dyno.command) # dyno.restart() # app.restart() # del config['TEST2'] # newapp.remove_collaborator('<EMAIL>') # collab.remove() # pprint(newapp.addons) # app = heroku_conn.app('testy123app') # for addon in app.addons: # print(addon.app.name, " - ", addon.plan.name) # addons = heroku_conn.addon_services() # pprint(addons) # pg_addon = heroku_conn.addon_services('6235c964-8b3c-47e0-952f-8d8f6a2d53f5') # pg_addon = heroku_conn.addon_services(id_or_name='heroku-postgresql') # pprint(pg_addon) # for addon in addons: # print(addon.name, " - ", addon.id, " - ", addon.id, " - ", addon.price) # addon.upgrade(plan_id_or_name='heroku-postgresql:basic') # addon.delete() # buildpack_urls = [ # 'https://github.com/some/buildpack', 'https://github.com/another/buildpack' # ] # app.update_buildpacks([buildpack_urls]) # buildpack_urls can also be empty. This clears all buildpacks: # app.update_buildpacks([]) # app.delete() print(heroku_conn._last_request_id)<line_sep>
"""3. Extracting video features from pre-trained models ======================================================= Feature extraction is a very useful tool when you don't have large annotated dataset or don't have the computing resources to train a model from scratch for your use case. It's also useful to visualize what the model have learned. In this tutorial, we provide a simple unified solution. The only thing you need to prepare is a text file containing the information of your videos (e.g., the path to your videos), we will take care of the rest. You can extract strong video features from many popular pre-trained models in the GluonCV video model zoo using a single command line. .. note:: Feel free to skip the tutorial because the feature extraction script is self-complete and ready to launch. :download:`Download Full Python Script: feat_extract_pytorch.py<../../../scripts/action-recognition/feat_extract_pytorch.py>` Please checkout the `model_zoo <../model_zoo/index.html#action_recognition>`_ to select your preferred pretrained model. ``python feat_extract_pytorch.py --config-file CONFIG`` """<line_sep>###################################################################### # Prepare Data # ------------ # # Your data can be stored in any hierarchy. # Just use the format we adopt for training models in the previous tutorial and save the data annotation file as ``video.txt``. # :: # # /home/ubuntu/your_data/video_001.mp4 200 0 # /home/ubuntu/your_data/video_001.mp4 300 1 # /home/ubuntu/your_data/video_002.mp4 100 2 # /home/ubuntu/your_data/video_003.mp4 400 2 # /home/ubuntu/your_data/video_004.mp4 200 1 # ...... # /home/ubuntu/your_data/video_100.mp4.100 3 # # Each line has three things, the path to each video, the number of video frames and the video label. # However, the second and third things are not gonna used in the code, they are just a placeholder. # So you can put any postive number in these two places. # # Note that, at this moment, we only support extracting features from videos directly. ###################################################################### # Once you prepare the ``video.txt``, you can start extracting feature by: # # :: # # python feat_extract_pytorch.py --config-file ./scripts/action-recognition/configuration/i3d_resnet50_v1_feat.yaml ###################################################################### # The extracted features will be saved to a directory defined in the config file. Each video will have one feature file. # For example, ``video_001.mp4`` will have a feature named ``i3d_resnet50_v1_kinetics400_video_001_feat.npy``. # The feature is extracted from the center of the video by using a 32-frames clip. ###################################################################### # There are many other options and other models you can choose, # e.g., `resnet50_v1b_feat.yaml <https://raw.githubusercontent.com/dmlc/gluon-cv/master/scripts/action-recognition/configuration/resnet50_v1b_feat.yaml>`_, # `slowfast_4x16_resnet50_feat.yaml <https://raw.githubusercontent.com/dmlc/gluon-cv/master/scripts/action-recognition/configuration/slowfast_4x16_resnet50_feat.yaml>`_, # `tpn_resnet50_f32s2_feat.yaml <https://raw.githubusercontent.com/dmlc/gluon-cv/master/scripts/action-recognition/configuration/tpn_resnet50_f32s2_feat.yaml>`_, # `r2plus1d_v1_resnet50_feat.yaml <https://raw.githubusercontent.com/dmlc/gluon-cv/master/scripts/action-recognition/configuration/r2plus1d_v1_resnet50_feat.yaml>`_, # `i3d_slow_resnet50_f32s2_feat.yaml <https://raw.githubusercontent.com/dmlc/gluon-cv/master/scripts/action-recognition/configuration/i3d_slow_resnet50_f32s2_feat.yaml>`_. # Try extracting features from these SOTA video models on your own dataset and see which one performs better.
<import_from_stmt>typing Any List Dict Union Optional<import_stmt>time<import_stmt>gym<import_stmt>gym_hybrid<import_stmt>copy<import_stmt>numpy<as>np<import_from_stmt>easydict EasyDict<import_from_stmt>ding.envs BaseEnv BaseEnvTimestep BaseEnvInfo<import_from_stmt>ding.envs.common EnvElementInfo affine_transform<import_from_stmt>ding.torch_utils to_ndarray to_list<import_from_stmt>ding.utils ENV_REGISTRY<line_sep>@ENV_REGISTRY.register('gym_hybrid')<class_stmt>GymHybridEnv(BaseEnv)<block_start>default_env_id=['Sliding-v0' 'Moving-v0']<def_stmt>__init__ self cfg:EasyDict<arrow><none><block_start>self._cfg=cfg<line_sep>self._env_id=cfg.env_id<assert_stmt>self._env_id<in>self.default_env_id<line_sep>self._act_scale=cfg.act_scale<line_sep>self._init_flag=<false><line_sep>self._replay_path=<none><block_end><def_stmt>reset self<arrow>np.ndarray<block_start><if_stmt><not>self._init_flag<block_start>self._env=gym.make(self._env_id)<if_stmt>self._replay_path<is><not><none><block_start>self._env=gym.wrappers.Monitor(self._env self._replay_path video_callable=<lambda>episode_id:<true> force=<true>)<line_sep>self._env.metadata["render.modes"]=["human" "rgb_array"]<block_end>self._init_flag=<true><block_end><if_stmt>hasattr(self '_seed')<and>hasattr(self '_dynamic_seed')<and>self._dynamic_seed<block_start>np_seed=100<times>np.random.randint(1 1000)<line_sep>self._env.seed(self._seed+np_seed)<block_end><elif_stmt>hasattr(self '_seed')<block_start>self._env.seed(self._seed)<block_end>self._final_eval_reward=0<line_sep>obs=self._env.reset()<line_sep>obs=to_ndarray(obs).astype(np.float32)<line_sep><return>obs<block_end><def_stmt>close self<arrow><none><block_start><if_stmt>self._init_flag<block_start>self._env.close()<block_end>self._init_flag=<false><block_end><def_stmt>seed self seed:int dynamic_seed:bool=<true><arrow><none><block_start>self._seed=seed<line_sep>self._dynamic_seed=dynamic_seed<line_sep>np.random.seed(self._seed)<block_end><def_stmt>step self action:Dict<arrow>BaseEnvTimestep<block_start><if_stmt>self._act_scale# acceleration_value. <block_start>action['action_args'][0]=affine_transform(action['action_args'][0] min_val=0 max_val=1)<line_sep># rotation_value. Following line can be omitted, because in the affine_transform function, # we have already done the clip(-1,1) operation action['action_args'][1]=affine_transform(action['action_args'][1] min_val=-1 max_val=1)<line_sep>action=[action['action_type'] action['action_args']]<block_end>obs,rew,done,info=self._env.step(action)<line_sep>self._final_eval_reward<augadd>rew<if_stmt>done<block_start>info['final_eval_reward']=self._final_eval_reward<block_end>obs=to_ndarray(obs)<if_stmt>isinstance(obs list)# corner case <block_start><for_stmt>i range(len(obs))<block_start><if_stmt>len(obs[i].shape)<eq>0<block_start>obs[i]=np.array([obs[i]])<block_end><block_end>obs=np.concatenate(obs)<block_end><assert_stmt>isinstance(obs np.ndarray)<and>obs.shape<eq>(10 )<line_sep>obs=obs.astype(np.float32)<line_sep>rew=to_ndarray([rew])# wrapped to be transfered to a numpy array with shape (1,) <if_stmt>isinstance(rew list)<block_start>rew=rew[0]<block_end><assert_stmt>isinstance(rew np.ndarray)<and>rew.shape<eq>(1 )<line_sep>info['action_args_mask']=np.array([[1 0] [0 1] [0 0]])<line_sep><return>BaseEnvTimestep(obs rew done info)<block_end><def_stmt>get_random_action self<arrow>Dict# action_type: 0, 1, 2 # action_args: # - acceleration_value: [0, 1] # - rotation_value: [-1, 1] <block_start>raw_action=self._env.action_space.sample()<line_sep><return>{'action_type':raw_action[0] 'action_args':raw_action[1]}<block_end><def_stmt>info self<arrow>BaseEnvInfo<block_start>T=EnvElementInfo<line_sep><return>BaseEnvInfo(agent_num=1 obs_space=T((10 ) {'min':-1 'max':2 'dtype':np.float32 } ) # [min, max) act_space=T((3 ) {'min':0 'max':3 'dtype':int } ) rew_space=T((1 ) {'min':-1.0 'max':1.0} ) use_wrappers=<none> )<block_end><def_stmt>__repr__ self<arrow>str<block_start><return>"DI-engine gym hybrid Env"<block_end><def_stmt>enable_save_replay self replay_path:Optional[str]=<none><arrow><none><block_start><if_stmt>replay_path<is><none><block_start>replay_path='./video'<block_end>self._replay_path=replay_path<block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>joblib<import_from_stmt>lithops.util.joblib register_lithops<import_from_stmt>lithops.utils setup_lithops_logger<import_from_stmt>sklearn.datasets load_digits<import_from_stmt>sklearn.model_selection RandomizedSearchCV<import_from_stmt>sklearn.svm SVC<line_sep>digits=load_digits()<line_sep>param_space={'C':np.logspace(-6 6 30) 'gamma':np.logspace(-8 8 30) 'tol':np.logspace(-4 -1 30) 'class_weight':[<none> 'balanced'] }<line_sep>model=SVC(kernel='rbf')<line_sep>search=RandomizedSearchCV(model param_space cv=2 n_iter=50 verbose=10)<line_sep>register_lithops()<line_sep>setup_lithops_logger('INFO')<with_stmt>joblib.parallel_backend('lithops')<block_start>search.fit(digits.data digits.target)<block_end>
<import_stmt>pytest<import_from_stmt>pycox.models BCESurv<import_stmt>torchtuples<as>tt<import_from_stmt>utils_model_testing make_dataset fit_model assert_survs<line_sep>@pytest.mark.parametrize('numpy' [<true> <false>])@pytest.mark.parametrize('num_durations' [2 5])<def_stmt>test_pmf_runs numpy num_durations<block_start>data=make_dataset(<true>)<line_sep>input,target=data<line_sep>labtrans=BCESurv.label_transform(num_durations)<line_sep>target=labtrans.fit_transform(*target)<line_sep>data=tt.tuplefy(input target)<if_stmt><not>numpy<block_start>data=data.to_tensor()<block_end>net=tt.practical.MLPVanilla(input.shape[1] [4] labtrans.out_features)<line_sep>model=BCESurv(net)<line_sep>fit_model(data model)<line_sep>assert_survs(input model)<line_sep>model.duration_index=labtrans.cuts<line_sep>assert_survs(input model)<line_sep>cdi=model.interpolate(3 'const_pdf')<line_sep>assert_survs(input cdi)<block_end>
# pylint: disable=unused-import <import_stmt>os<import_stmt>tempfile<import_stmt>docker<import_stmt>kubernetes<import_stmt>pytest<import_from_stmt>dagster.core.instance DagsterInstance<import_from_stmt>dagster_k8s.launcher K8sRunLauncher<import_from_stmt>dagster_k8s_test_infra.cluster dagster_instance_for_k8s_run_launcher define_cluster_provider_fixture helm_postgres_url_for_k8s_run_launcher <import_from_stmt>dagster_k8s_test_infra.helm TEST_AWS_CONFIGMAP_NAME TEST_IMAGE_PULL_SECRET_NAME TEST_SECRET_NAME TEST_VOLUME_CONFIGMAP_NAME <import_from_stmt>dagster_k8s_test_infra.integration_utils image_pull_policy<import_from_stmt>dagster_test.test_project build_and_tag_test_image get_test_project_docker_image<line_sep>pytest_plugins=["dagster_k8s_test_infra.helm"]<line_sep>IS_BUILDKITE=os.getenv("BUILDKITE")<is><not><none><line_sep>@pytest.fixture(scope="session" autouse=<true>)<def_stmt>dagster_home <block_start>old_env=os.getenv("DAGSTER_HOME")<line_sep>os.environ["DAGSTER_HOME"]="/opt/dagster/dagster_home"<line_sep><yield><if_stmt>old_env<is><not><none><block_start>os.environ["DAGSTER_HOME"]=old_env<block_end><block_end>cluster_provider=define_cluster_provider_fixture(additional_kind_images=["docker.io/bitnami/rabbitmq" "docker.io/bitnami/postgresql"])<line_sep>@pytest.yield_fixture<def_stmt>schedule_tempdir <block_start><with_stmt>tempfile.TemporaryDirectory()<as>tempdir<block_start><yield>tempdir<block_end><block_end>@pytest.fixture()<def_stmt>run_launcher cluster_provider helm_namespace_for_k8s_run_launcher# pylint: disable=redefined-outer-name,unused-argument <block_start><return>K8sRunLauncher(image_pull_secrets=[{"name":TEST_IMAGE_PULL_SECRET_NAME}] service_account_name="dagit-admin" instance_config_map="dagster-instance" postgres_password_secret="<PASSWORD>" dagster_home="/opt/dagster/dagster_home" job_image=get_test_project_docker_image() load_incluster_config=<false> kubeconfig_file=cluster_provider.kubeconfig_file image_pull_policy=image_pull_policy() job_namespace=helm_namespace_for_k8s_run_launcher env_config_maps=["dagster-pipeline-env" "test-env-configmap"]+([TEST_AWS_CONFIGMAP_NAME]<if><not>IS_BUILDKITE<else>[]) env_secrets=["test-env-secret"] volume_mounts=[{"name":"test-volume" "mountPath":"/opt/dagster/test_mount_path/volume_mounted_file.yaml" "subPath":"volume_mounted_file.yaml" }] volumes=[{"name":"test-volume" "configMap":{"name":TEST_VOLUME_CONFIGMAP_NAME}}] )<block_end>@pytest.fixture(scope="session")<def_stmt>dagster_docker_image <block_start>docker_image=get_test_project_docker_image()<if_stmt><not>IS_BUILDKITE<block_start><try_stmt><block_start>client=docker.from_env()<line_sep>client.images.get(docker_image)<line_sep>print(# pylint: disable=print-call "Found existing image tagged {image}, skipping image build. To rebuild, first run: "<concat>"docker rmi {image}".format(image=docker_image))<block_end><except_stmt>docker.errors.ImageNotFound<block_start>build_and_tag_test_image(docker_image)<block_end><block_end><return>docker_image<block_end># See: https://stackoverflow.com/a/31526934/324449 <def_stmt>pytest_addoption parser# We catch the ValueError to support cases where we are loading multiple test suites, e.g., in # the VSCode test explorer. When pytest tries to add an option twice, we get, e.g. # # ValueError: option names {'--cluster-provider'} already added # Use kind or some other cluster provider? <block_start><try_stmt><block_start>parser.addoption("--cluster-provider" action="store" default="kind")<block_end><except_stmt>ValueError<block_start><pass><block_end># Specify an existing kind cluster name to use <try_stmt><block_start>parser.addoption("--kind-cluster" action="store")<block_end><except_stmt>ValueError<block_start><pass><block_end># Keep resources around after tests are done <try_stmt><block_start>parser.addoption("--no-cleanup" action="store_true" default=<false>)<block_end><except_stmt>ValueError<block_start><pass><block_end># Use existing Helm chart/namespace <try_stmt><block_start>parser.addoption("--existing-helm-namespace" action="store")<block_end><except_stmt>ValueError<block_start><pass><block_end><block_end>
<import_from_future_stmt> absolute_import division print_function<def_stmt>exercise_oset <block_start><import_from_stmt>libtbx.containers OrderedSet<as>oset<line_sep>o=oset()<assert_stmt>repr(o)<eq>"OrderedSet()"<assert_stmt>len(o)<eq>0<line_sep>o=oset([3 5 2 5 4 2 1])<assert_stmt>list(o)<eq>[3 5 2 4 1]<assert_stmt>3<in>o<assert_stmt>6<not><in>o<line_sep>o.add(3)<assert_stmt>len(o)<eq>5<line_sep>o.add(6)<assert_stmt>6<in>o<assert_stmt>list(reversed(o))<eq>[6 1 4 2 5 3]<assert_stmt>o.pop()<eq>6<assert_stmt>len(o)<eq>5<assert_stmt>o.pop(last=<false>)<eq>3<assert_stmt>len(o)<eq>4<assert_stmt>repr(o)<eq>"OrderedSet([5, 2, 4, 1])"<assert_stmt>o<eq>oset([5 2 4 1])<assert_stmt>o<ne>oset([5 4 2 1])<assert_stmt>o<eq>set([5 2 4 1])<assert_stmt>o<eq>set([5 4 2 1])<line_sep>o1=oset([6 5 4 3 2 1])<line_sep>o2=o1-o<assert_stmt>o2<eq>oset([6 3])<block_end><def_stmt>exercise_odict <block_start><import_from_stmt>libtbx.containers OrderedDict<as>odict<line_sep>d=odict([('banana' 3) ('apple' 4) ('pear' 1)])<line_sep>d.setdefault('orange' 2)<assert_stmt>'orange'<in>d<assert_stmt>d['orange']<eq>2<assert_stmt>list(d.keys())<eq>['banana' 'apple' 'pear' 'orange']<assert_stmt>list(d.values())<eq>[3 4 1 2]<line_sep>d=odict.fromkeys(('b' 'c' 'a'))<assert_stmt>list(d.keys())<eq>['b' 'c' 'a']<block_end><def_stmt>run args<block_start><assert_stmt>len(args)<eq>0<line_sep>exercise_oset()<line_sep>exercise_odict()<line_sep>print("OK")<block_end><if_stmt>(__name__<eq>"__main__")<block_start><import_stmt>sys<line_sep>run(args=sys.argv[1:])<block_end>
"""Test module of the images."""<import_stmt>unittest<import_from_stmt>books Books<import_stmt>fnmatch<import_stmt>os<import_stmt>re<import_stmt>sys<class_stmt>TestImages(unittest.TestCase)<block_start>"""Unit test of the images."""<def_stmt>test_images_are_valid self<block_start>"""Test that the MD files refer to valid URLs."""<line_sep>books=Books()<for_stmt>book books.books<block_start><for_stmt>md_path book.md_paths<block_start>args={}<if>sys.version_info[0]<l>3<else>{'encoding':'utf-8'}<with_stmt>open(md_path **args)<as>f<block_start>content=f.read()<block_end><for_stmt>match re.finditer(r"!\[(.*?)\]\((.*?)\)" content)# remove parameters <block_start>is_youtube_video=match.group(1)<eq>"youtube video"<line_sep>image_ref=match.group(2).split(' ')[0]<if_stmt><not>is_youtube_video<and><not>image_ref.startswith('http')<block_start>image_path=os.path.join(book.path image_ref)<line_sep>self.assertTrue(os.path.isfile(image_path) msg='%s: "%s" not found'%(md_path image_path))<block_end><block_end><block_end><block_end><block_end><def_stmt>test_all_images_are_used self<block_start>"""Test that all the image files are referenced somewhere."""<line_sep>books=Books()<for_stmt>book books.books# search for all images <block_start>images_paths=[]# ['image/sonar.png', 'image/sphere.png', ...] <for_stmt>root,dirnames,filenames os.walk(book.path)<block_start><if_stmt>'scenes'<in>root.replace(books.project_path '')<block_start><continue><block_end><for_stmt>filename fnmatch.filter(filenames '*.png')+fnmatch.filter(filenames '*.jpg')<block_start>image_path=os.path.join(root filename)<line_sep>image_path=image_path[(len(book.path)+1):]<line_sep>images_paths.append(image_path.replace('\\' '/'))<block_end><block_end>self.assertGreater(len(images_paths) 0 msg='No image found in book "%s"'%book.name)<line_sep># check the image reference can be found in at least one MD file <for_stmt>image_path images_paths<block_start>found=<false><for_stmt>md_path book.md_paths<block_start>args={}<if>sys.version_info[0]<l>3<else>{'encoding':'utf-8'}<with_stmt>open(md_path **args)<as>file<block_start><if_stmt>(image_path<in>file.read()<or>image_path.replace('.png' '.thumbnail.jpg')<in>images_paths<or>image_path.replace('.png' '.thumbnail.png')<in>images_paths)<block_start>found=<true><line_sep><break><block_end><block_end><block_end>self.assertTrue(found msg='Image "%s" not referenced in any MD file.'%image_path)<line_sep># in case of thumbnail make sure the original file is available <if_stmt>image_path.endswith('.thumbnail.jpg')<block_start>self.assertTrue(image_path.replace('.thumbnail.jpg' '.png')<in>images_paths msg='Missing original file for thumbnail "%s".'%image_path)<block_end><block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>scipy.fftpack<import_stmt>scipy.signal<import_from_stmt>matplotlib pyplot<line_sep># from eulerian_magnification.io import play_vid_data <import_from_stmt>eulerian_magnification.pyramid create_laplacian_video_pyramid collapse_laplacian_video_pyramid<import_from_stmt>eulerian_magnification.transforms temporal_bandpass_filter<def_stmt>eulerian_magnification vid_data fps freq_min freq_max amplification pyramid_levels=4 skip_levels_at_top=2<block_start>vid_pyramid=create_laplacian_video_pyramid(vid_data pyramid_levels=pyramid_levels)<for_stmt>i,vid enumerate(vid_pyramid)<block_start><if_stmt>i<l>skip_levels_at_top<or>i<ge>len(vid_pyramid)-1# ignore the top and bottom of the pyramid. One end has too much noise and the other end is the # gaussian representation <block_start><continue><block_end>bandpassed=temporal_bandpass_filter(vid fps freq_min=freq_min freq_max=freq_max amplification_factor=amplification)<line_sep># play_vid_data(bandpassed) vid_pyramid[i]<augadd>bandpassed<line_sep># play_vid_data(vid_pyramid[i]) <block_end>vid_data=collapse_laplacian_video_pyramid(vid_pyramid)<line_sep><return>vid_data<block_end><def_stmt>show_frequencies vid_data fps bounds=<none><block_start>"""Graph the average value of the video as well as the frequency strength"""<line_sep>averages=[]<if_stmt>bounds<block_start><for_stmt>x range(1 vid_data.shape[0]-1)<block_start>averages.append(vid_data[x bounds[2]:bounds[3] bounds[0]:bounds[1] :].sum())<block_end><block_end><else_stmt><block_start><for_stmt>x range(1 vid_data.shape[0]-1)<block_start>averages.append(vid_data[x : : :].sum())<block_end><block_end>averages=averages-min(averages)<line_sep>charts_x=1<line_sep>charts_y=2<line_sep>pyplot.figure(figsize=(20 10))<line_sep>pyplot.subplots_adjust(hspace=.7)<line_sep>pyplot.subplot(charts_y charts_x 1)<line_sep>pyplot.title("Pixel Average")<line_sep>pyplot.xlabel("Time")<line_sep>pyplot.ylabel("Brightness")<line_sep>pyplot.plot(averages)<line_sep>freqs=scipy.fftpack.fftfreq(len(averages) d=1.0/fps)<line_sep>fft=abs(scipy.fftpack.fft(averages))<line_sep>idx=np.argsort(freqs)<line_sep>pyplot.subplot(charts_y charts_x 2)<line_sep>pyplot.title("FFT")<line_sep>pyplot.xlabel("Freq (Hz)")<line_sep>freqs=freqs[idx]<line_sep>fft=fft[idx]<line_sep>freqs=freqs[len(freqs)<floordiv>2+1:]<line_sep>fft=fft[len(fft)<floordiv>2+1:]<line_sep>pyplot.plot(freqs abs(fft))<line_sep>pyplot.show()<block_end><def_stmt>gaussian_video video shrink_multiple<block_start>"""Create a gaussian representation of a video"""<line_sep>vid_data=<none><for_stmt>x range(0 video.shape[0])<block_start>frame=video[x]<line_sep>gauss_copy=np.ndarray(shape=frame.shape dtype="float")<line_sep>gauss_copy[:]=frame<for_stmt>i range(shrink_multiple)<block_start>gauss_copy=cv2.pyrDown(gauss_copy)<block_end><if_stmt>x<eq>0<block_start>vid_data=np.zeros((video.shape[0] gauss_copy.shape[0] gauss_copy.shape[1] 3))<block_end>vid_data[x]=gauss_copy<block_end><return>vid_data<block_end><def_stmt>laplacian_video video shrink_multiple<block_start>vid_data=<none><line_sep>frame_count,height,width,colors=video.shape<for_stmt>i,frame enumerate(video)<block_start>gauss_copy=np.ndarray(shape=frame.shape dtype="float")<line_sep>gauss_copy[:]=frame<for_stmt>_ range(shrink_multiple)<block_start>prev_copy=gauss_copy[:]<line_sep>gauss_copy=cv2.pyrDown(gauss_copy)<block_end>laplacian=prev_copy-cv2.pyrUp(gauss_copy)<if_stmt>vid_data<is><none><block_start>vid_data=np.zeros((frame_count laplacian.shape[0] laplacian.shape[1] 3))<block_end>vid_data[i]=laplacian<block_end><return>vid_data<block_end><def_stmt>combine_pyramid_and_save g_video orig_video enlarge_multiple fps save_filename='media/output.avi'<block_start>"""Combine a gaussian video representation with the original and save to file"""<line_sep>width,height=get_frame_dimensions(orig_video[0])<line_sep>fourcc=cv2.VideoWriter_fourcc(*'MJPG')<line_sep>print("Outputting to %s"%save_filename)<line_sep>writer=cv2.VideoWriter(save_filename fourcc fps (width height) 1)<for_stmt>x range(0 g_video.shape[0])<block_start>img=np.ndarray(shape=g_video[x].shape dtype='float')<line_sep>img[:]=g_video[x]<for_stmt>i range(enlarge_multiple)<block_start>img=cv2.pyrUp(img)<block_end>img[:height :width]=img[:height :width]+orig_video[x]<line_sep>res=cv2.convertScaleAbs(img[:height :width])<line_sep>writer.write(res)<block_end><block_end><def_stmt>get_frame_dimensions frame<block_start>"""Get the dimensions of a single frame"""<line_sep>height,width=frame.shape[:2]<line_sep><return>width height<block_end><def_stmt>butter_bandpass lowcut highcut fs order=5<block_start>nyq=0.5<times>fs<line_sep>low=lowcut/nyq<line_sep>high=highcut/nyq<line_sep>b,a=scipy.signal.butter(order [low high] btype='band')<line_sep><return>b a<block_end><def_stmt>butter_bandpass_filter data lowcut highcut fs order=5<block_start>b,a=butter_bandpass(lowcut highcut fs order=order)<line_sep>y=scipy.signal.lfilter(b a data axis=0)<line_sep><return>y<block_end>
<import_from_stmt>. localpaths<as>_localpaths# noqa: F401
<import_stmt>os<import_stmt>time<import_stmt>json<import_stmt>datetime<as>datetime<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.optim<as>optim<import_stmt>torch.distributed<as>dist<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>torchvision transforms<import_from_stmt>dataloaders.train_datasets DAVIS2017_Train YOUTUBEVOS_Train StaticTrain TEST<import_stmt>dataloaders.video_transforms<as>tr<import_from_stmt>utils.meters AverageMeter<import_from_stmt>utils.image label2colormap masked_image save_image<import_from_stmt>utils.checkpoint load_network_and_optimizer load_network save_network<import_from_stmt>utils.learning adjust_learning_rate get_trainable_params<import_from_stmt>utils.metric pytorch_iou<import_from_stmt>utils.ema ExponentialMovingAverage get_param_buffer_for_ema<import_from_stmt>networks.models build_vos_model<import_from_stmt>networks.engines build_engine<class_stmt>Trainer(object)<block_start><def_stmt>__init__ self rank cfg enable_amp=<true><block_start>self.gpu=rank+cfg.DIST_START_GPU<line_sep>self.gpu_num=cfg.TRAIN_GPUS<line_sep>self.rank=rank<line_sep>self.cfg=cfg<line_sep>self.print_log("Exp {}:".format(cfg.EXP_NAME))<line_sep>self.print_log(json.dumps(cfg.__dict__ indent=4 sort_keys=<true>))<line_sep>print("Use GPU {} for training VOS.".format(self.gpu))<line_sep>torch.cuda.set_device(self.gpu)<line_sep>torch.backends.cudnn.benchmark=<true><if>cfg.DATA_RANDOMCROP[0]<eq>cfg.DATA_RANDOMCROP[1]<and>'swin'<not><in>cfg.MODEL_ENCODER<else><false><line_sep>self.print_log('Build VOS model.')<line_sep>self.model=build_vos_model(cfg.MODEL_VOS cfg).cuda(self.gpu)<line_sep>self.model_encoder=self.model.encoder<line_sep>self.engine=build_engine(cfg.MODEL_ENGINE 'train' aot_model=self.model gpu_id=self.gpu long_term_mem_gap=cfg.TRAIN_LONG_TERM_MEM_GAP)<if_stmt>cfg.MODEL_FREEZE_BACKBONE<block_start><for_stmt>param self.model_encoder.parameters()<block_start>param.requires_grad=<false><block_end><block_end><if_stmt>cfg.DIST_ENABLE<block_start>dist.init_process_group(backend=cfg.DIST_BACKEND init_method=cfg.DIST_URL world_size=cfg.TRAIN_GPUS rank=rank timeout=datetime.timedelta(seconds=300))<line_sep>self.model.encoder=nn.SyncBatchNorm.convert_sync_batchnorm(self.model.encoder).cuda(self.gpu)<line_sep>self.dist_engine=torch.nn.parallel.DistributedDataParallel(self.engine device_ids=[self.gpu] output_device=self.gpu find_unused_parameters=<true> broadcast_buffers=<false>)<block_end><else_stmt><block_start>self.dist_engine=self.engine<block_end>self.use_frozen_bn=<false><if_stmt>'swin'<in>cfg.MODEL_ENCODER<block_start>self.print_log('Use LN in Encoder!')<block_end><elif_stmt><not>cfg.MODEL_FREEZE_BN<block_start><if_stmt>cfg.DIST_ENABLE<block_start>self.print_log('Use Sync BN in Encoder!')<block_end><else_stmt><block_start>self.print_log('Use BN in Encoder!')<block_end><block_end><else_stmt><block_start>self.use_frozen_bn=<true><line_sep>self.print_log('Use Frozen BN in Encoder!')<block_end><if_stmt>self.rank<eq>0<block_start><try_stmt><block_start>total_steps=float(cfg.TRAIN_TOTAL_STEPS)<line_sep>ema_decay=1.-1./(total_steps<times>cfg.TRAIN_EMA_RATIO)<line_sep>self.ema_params=get_param_buffer_for_ema(self.model update_buffer=(<not>cfg.MODEL_FREEZE_BN))<line_sep>self.ema=ExponentialMovingAverage(self.ema_params decay=ema_decay)<line_sep>self.ema_dir=cfg.DIR_EMA_CKPT<block_end><except_stmt>Exception<as>inst<block_start>self.print_log(inst)<line_sep>self.print_log('Error: failed to create EMA model!')<block_end><block_end>self.print_log('Build optimizer.')<line_sep>trainable_params=get_trainable_params(model=self.dist_engine base_lr=cfg.TRAIN_LR use_frozen_bn=self.use_frozen_bn weight_decay=cfg.TRAIN_WEIGHT_DECAY exclusive_wd_dict=cfg.TRAIN_WEIGHT_DECAY_EXCLUSIVE no_wd_keys=cfg.TRAIN_WEIGHT_DECAY_EXEMPTION)<if_stmt>cfg.TRAIN_OPT<eq>'sgd'<block_start>self.optimizer=optim.SGD(trainable_params lr=cfg.TRAIN_LR momentum=cfg.TRAIN_SGD_MOMENTUM nesterov=<true>)<block_end><else_stmt><block_start>self.optimizer=optim.AdamW(trainable_params lr=cfg.TRAIN_LR weight_decay=cfg.TRAIN_WEIGHT_DECAY)<block_end>self.enable_amp=enable_amp<if_stmt>enable_amp<block_start>self.scaler=torch.cuda.amp.GradScaler()<block_end><else_stmt><block_start>self.scaler=<none><block_end>self.prepare_dataset()<line_sep>self.process_pretrained_model()<if_stmt>cfg.TRAIN_TBLOG<and>self.rank<eq>0<block_start><import_from_stmt>tensorboardX SummaryWriter<line_sep>self.tblogger=SummaryWriter(cfg.DIR_TB_LOG)<block_end><block_end><def_stmt>process_pretrained_model self<block_start>cfg=self.cfg<line_sep>self.step=cfg.TRAIN_START_STEP<line_sep>self.epoch=0<if_stmt>cfg.TRAIN_AUTO_RESUME<block_start>ckpts=os.listdir(cfg.DIR_CKPT)<if_stmt>len(ckpts)<g>0<block_start>ckpts=list(map(<lambda>x:int(x.split('_')[-1].split('.')[0]) ckpts))<line_sep>ckpt=np.sort(ckpts)[-1]<line_sep>cfg.TRAIN_RESUME=<true><line_sep>cfg.TRAIN_RESUME_CKPT=ckpt<line_sep>cfg.TRAIN_RESUME_STEP=ckpt<block_end><else_stmt><block_start>cfg.TRAIN_RESUME=<false><block_end><block_end><if_stmt>cfg.TRAIN_RESUME<block_start><if_stmt>self.rank<eq>0<block_start><try_stmt><block_start>ema_ckpt_dir=os.path.join(self.ema_dir 'save_step_%s.pth'%(cfg.TRAIN_RESUME_CKPT))<line_sep>ema_model,removed_dict=load_network(self.model ema_ckpt_dir self.gpu)<if_stmt>len(removed_dict)<g>0<block_start>self.print_log('Remove {} from EMA model.'.format(removed_dict))<block_end>ema_decay=self.ema.decay<del_stmt>(self.ema)<line_sep>ema_params=get_param_buffer_for_ema(ema_model update_buffer=(<not>cfg.MODEL_FREEZE_BN))<line_sep>self.ema=ExponentialMovingAverage(ema_params decay=ema_decay)<line_sep>self.ema.num_updates=cfg.TRAIN_RESUME_CKPT<block_end><except_stmt>Exception<as>inst<block_start>self.print_log(inst)<line_sep>self.print_log('Error: EMA model not found!')<block_end><block_end><try_stmt><block_start>resume_ckpt=os.path.join(cfg.DIR_CKPT 'save_step_%s.pth'%(cfg.TRAIN_RESUME_CKPT))<line_sep>self.model,self.optimizer,removed_dict=load_network_and_optimizer(self.model self.optimizer resume_ckpt self.gpu scaler=self.scaler)<block_end><except_stmt>Exception<as>inst<block_start>self.print_log(inst)<line_sep>resume_ckpt=os.path.join('saved_models' 'save_step_%s.pth'%(cfg.TRAIN_RESUME_CKPT))<line_sep>self.model,self.optimizer,removed_dict=load_network_and_optimizer(self.model self.optimizer resume_ckpt self.gpu scaler=self.scaler)<block_end><if_stmt>len(removed_dict)<g>0<block_start>self.print_log('Remove {} from checkpoint.'.format(removed_dict))<block_end>self.step=cfg.TRAIN_RESUME_STEP<if_stmt>cfg.TRAIN_TOTAL_STEPS<le>self.step<block_start>self.print_log("Your training has finished!")<line_sep>exit()<block_end>self.epoch=int(np.ceil(self.step/len(self.train_loader)))<line_sep>self.print_log('Resume from step {}'.format(self.step))<block_end><elif_stmt>cfg.PRETRAIN<block_start><if_stmt>cfg.PRETRAIN_FULL<block_start>self.model,removed_dict=load_network(self.model cfg.PRETRAIN_MODEL self.gpu)<if_stmt>len(removed_dict)<g>0<block_start>self.print_log('Remove {} from pretrained model.'.format(removed_dict))<block_end>self.print_log('Load pretrained VOS model from {}.'.format(cfg.PRETRAIN_MODEL))<block_end><else_stmt><block_start>model_encoder,removed_dict=load_network(self.model_encoder cfg.MODEL_ENCODER_PRETRAIN self.gpu)<if_stmt>len(removed_dict)<g>0<block_start>self.print_log('Remove {} from pretrained model.'.format(removed_dict))<block_end>self.print_log('Load pretrained backbone model from {}.'.format(cfg.PRETRAIN_MODEL))<block_end><block_end><block_end><def_stmt>prepare_dataset self<block_start>cfg=self.cfg<line_sep>self.enable_prev_frame=cfg.TRAIN_ENABLE_PREV_FRAME<line_sep>self.print_log('Process dataset...')<line_sep>composed_transforms=transforms.Compose([tr.RandomScale(cfg.DATA_MIN_SCALE_FACTOR cfg.DATA_MAX_SCALE_FACTOR cfg.DATA_SHORT_EDGE_LEN) tr.BalancedRandomCrop(cfg.DATA_RANDOMCROP max_obj_num=cfg.MODEL_MAX_OBJ_NUM) tr.RandomHorizontalFlip(cfg.DATA_RANDOMFLIP) tr.Resize(cfg.DATA_RANDOMCROP use_padding=<true>) tr.ToTensor()])<line_sep>train_datasets=[]<if_stmt>'static'<in>cfg.DATASETS<block_start>pretrain_vos_dataset=StaticTrain(cfg.DIR_STATIC cfg.DATA_RANDOMCROP seq_len=cfg.DATA_SEQ_LEN merge_prob=cfg.DATA_DYNAMIC_MERGE_PROB max_obj_n=cfg.MODEL_MAX_OBJ_NUM)<line_sep>train_datasets.append(pretrain_vos_dataset)<line_sep>self.enable_prev_frame=<false><block_end><if_stmt>'davis2017'<in>cfg.DATASETS<block_start>train_davis_dataset=DAVIS2017_Train(root=cfg.DIR_DAVIS full_resolution=cfg.TRAIN_DATASET_FULL_RESOLUTION transform=composed_transforms repeat_time=cfg.DATA_DAVIS_REPEAT seq_len=cfg.DATA_SEQ_LEN rand_gap=cfg.DATA_RANDOM_GAP_DAVIS rand_reverse=cfg.DATA_RANDOM_REVERSE_SEQ merge_prob=cfg.DATA_DYNAMIC_MERGE_PROB enable_prev_frame=self.enable_prev_frame max_obj_n=cfg.MODEL_MAX_OBJ_NUM)<line_sep>train_datasets.append(train_davis_dataset)<block_end><if_stmt>'youtubevos'<in>cfg.DATASETS<block_start>train_ytb_dataset=YOUTUBEVOS_Train(root=cfg.DIR_YTB transform=composed_transforms seq_len=cfg.DATA_SEQ_LEN rand_gap=cfg.DATA_RANDOM_GAP_YTB rand_reverse=cfg.DATA_RANDOM_REVERSE_SEQ merge_prob=cfg.DATA_DYNAMIC_MERGE_PROB enable_prev_frame=self.enable_prev_frame max_obj_n=cfg.MODEL_MAX_OBJ_NUM)<line_sep>train_datasets.append(train_ytb_dataset)<block_end><if_stmt>'test'<in>cfg.DATASETS<block_start>test_dataset=TEST(transform=composed_transforms seq_len=cfg.DATA_SEQ_LEN)<line_sep>train_datasets.append(test_dataset)<block_end><if_stmt>len(train_datasets)<g>1<block_start>train_dataset=torch.utils.data.ConcatDataset(train_datasets)<block_end><elif_stmt>len(train_datasets)<eq>1<block_start>train_dataset=train_datasets[0]<block_end><else_stmt><block_start>self.print_log('No dataset!')<line_sep>exit(0)<block_end>self.train_sampler=torch.utils.data.distributed.DistributedSampler(train_dataset)<line_sep>self.train_loader=DataLoader(train_dataset batch_size=int(cfg.TRAIN_BATCH_SIZE/cfg.TRAIN_GPUS) shuffle=<false> num_workers=cfg.DATA_WORKERS pin_memory=<true> sampler=self.train_sampler drop_last=<true> prefetch_factor=4)<line_sep>self.print_log('Done!')<block_end><def_stmt>sequential_training self<block_start>cfg=self.cfg<if_stmt>self.enable_prev_frame<block_start>frame_names=['Ref' 'Prev']<block_end><else_stmt><block_start>frame_names=['Ref(Prev)']<block_end><for_stmt>i range(cfg.DATA_SEQ_LEN-1)<block_start>frame_names.append('Curr{}'.format(i+1))<block_end>seq_len=len(frame_names)<line_sep>running_losses=[]<line_sep>running_ious=[]<for_stmt>_ range(seq_len)<block_start>running_losses.append(AverageMeter())<line_sep>running_ious.append(AverageMeter())<block_end>batch_time=AverageMeter()<line_sep>avg_obj=AverageMeter()<line_sep>optimizer=self.optimizer<line_sep>model=self.dist_engine<line_sep>train_sampler=self.train_sampler<line_sep>train_loader=self.train_loader<line_sep>step=self.step<line_sep>epoch=self.epoch<line_sep>max_itr=cfg.TRAIN_TOTAL_STEPS<line_sep>start_seq_training_step=int(cfg.TRAIN_SEQ_TRAINING_START_RATIO<times>max_itr)<line_sep>use_prev_prob=cfg.MODEL_USE_PREV_PROB<line_sep>self.print_log('Start training:')<line_sep>model.train()<while_stmt>step<l>cfg.TRAIN_TOTAL_STEPS<block_start>train_sampler.set_epoch(epoch)<line_sep>epoch<augadd>1<line_sep>last_time=time.time()<for_stmt>frame_idx,sample enumerate(train_loader)<block_start><if_stmt>step<g>cfg.TRAIN_TOTAL_STEPS<block_start><break><block_end><if_stmt>step%cfg.TRAIN_TBLOG_STEP<eq>0<and>self.rank<eq>0<and>cfg.TRAIN_TBLOG<block_start>tf_board=<true><block_end><else_stmt><block_start>tf_board=<false><block_end><if_stmt>step<ge>start_seq_training_step<block_start>use_prev_pred=<true><line_sep>freeze_params=cfg.TRAIN_SEQ_TRAINING_FREEZE_PARAMS<block_end><else_stmt><block_start>use_prev_pred=<false><line_sep>freeze_params=[]<block_end><if_stmt>step%cfg.TRAIN_LR_UPDATE_STEP<eq>0<block_start>now_lr=adjust_learning_rate(optimizer=optimizer base_lr=cfg.TRAIN_LR p=cfg.TRAIN_LR_POWER itr=step max_itr=max_itr restart=cfg.TRAIN_LR_RESTART warm_up_steps=cfg.TRAIN_LR_WARM_UP_RATIO<times>max_itr is_cosine_decay=cfg.TRAIN_LR_COSINE_DECAY min_lr=cfg.TRAIN_LR_MIN encoder_lr_ratio=cfg.TRAIN_LR_ENCODER_RATIO freeze_params=freeze_params)<block_end>ref_imgs=sample['ref_img']# batch_size * 3 * h * w prev_imgs=sample['prev_img']<line_sep>curr_imgs=sample['curr_img']<line_sep>ref_labels=sample['ref_label']# batch_size * 1 * h * w prev_labels=sample['prev_label']<line_sep>curr_labels=sample['curr_label']<line_sep>obj_nums=sample['meta']['obj_num']<line_sep>bs,_,h,w=curr_imgs[0].size()<line_sep>ref_imgs=ref_imgs.cuda(self.gpu non_blocking=<true>)<line_sep>prev_imgs=prev_imgs.cuda(self.gpu non_blocking=<true>)<line_sep>curr_imgs=[curr_img.cuda(self.gpu non_blocking=<true>)<for>curr_img curr_imgs]<line_sep>ref_labels=ref_labels.cuda(self.gpu non_blocking=<true>)<line_sep>prev_labels=prev_labels.cuda(self.gpu non_blocking=<true>)<line_sep>curr_labels=[curr_label.cuda(self.gpu non_blocking=<true>)<for>curr_label curr_labels]<line_sep>obj_nums=list(obj_nums)<line_sep>obj_nums=[int(obj_num)<for>obj_num obj_nums]<line_sep>batch_size=ref_imgs.size(0)<line_sep>all_frames=torch.cat([ref_imgs prev_imgs]+curr_imgs dim=0)<line_sep>all_labels=torch.cat([ref_labels prev_labels]+curr_labels dim=0)<line_sep>self.engine.restart_engine(batch_size <true>)<line_sep>optimizer.zero_grad(set_to_none=<true>)<if_stmt>self.enable_amp<block_start><with_stmt>torch.cuda.amp.autocast(enabled=<true>)<block_start>loss,all_pred,all_loss,boards=model(all_frames all_labels batch_size use_prev_pred=use_prev_pred obj_nums=obj_nums step=step tf_board=tf_board enable_prev_frame=self.enable_prev_frame use_prev_prob=use_prev_prob)<line_sep>loss=torch.mean(loss)<block_end>self.scaler.scale(loss).backward()<line_sep>self.scaler.unscale_(optimizer)<line_sep>torch.nn.utils.clip_grad_norm_(model.parameters() cfg.TRAIN_CLIP_GRAD_NORM)<line_sep>self.scaler.step(optimizer)<line_sep>self.scaler.update()<block_end><else_stmt><block_start>loss,all_pred,all_loss,boards=model(all_frames all_labels ref_imgs.size(0) use_prev_pred=use_prev_pred obj_nums=obj_nums step=step tf_board=tf_board enable_prev_frame=self.enable_prev_frame use_prev_prob=use_prev_prob)<line_sep>loss=torch.mean(loss)<line_sep>torch.nn.utils.clip_grad_norm_(model.parameters() cfg.TRAIN_CLIP_GRAD_NORM)<line_sep>loss.backward()<line_sep>optimizer.step()<block_end><for_stmt>idx range(seq_len)<block_start>now_pred=all_pred[idx].detach()<line_sep>now_label=all_labels[idx<times>bs:(idx+1)<times>bs].detach()<line_sep>now_loss=torch.mean(all_loss[idx].detach())<line_sep>now_iou=pytorch_iou(now_pred.unsqueeze(1) now_label obj_nums)<times>100<line_sep>dist.all_reduce(now_loss)<line_sep>dist.all_reduce(now_iou)<line_sep>now_loss<augdiv>self.gpu_num<line_sep>now_iou<augdiv>self.gpu_num<if_stmt>self.rank<eq>0<block_start>running_losses[idx].update(now_loss.item())<line_sep>running_ious[idx].update(now_iou.item())<block_end><block_end><if_stmt>self.rank<eq>0<block_start>self.ema.update(self.ema_params)<line_sep>avg_obj.update(sum(obj_nums)/float(len(obj_nums)))<line_sep>curr_time=time.time()<line_sep>batch_time.update(curr_time-last_time)<line_sep>last_time=curr_time<if_stmt>step%cfg.TRAIN_TBLOG_STEP<eq>0<block_start>all_f=[ref_imgs prev_imgs]+curr_imgs<line_sep>self.process_log(ref_imgs all_f[-2] all_f[-1] ref_labels all_pred[-2] now_label now_pred boards running_losses running_ious now_lr step)<block_end><if_stmt>step%cfg.TRAIN_LOG_STEP<eq>0<block_start>strs='I:{}, LR:{:.5f}, T:{:.1f}({:.1f})s, Obj:{:.1f}({:.1f})'.format(step now_lr batch_time.val batch_time.moving_avg avg_obj.val avg_obj.moving_avg)<line_sep>batch_time.reset()<line_sep>avg_obj.reset()<for_stmt>idx range(seq_len)<block_start>strs<augadd>', {}: L {:.3f}({:.3f}) IoU {:.1f}({:.1f})%'.format(frame_names[idx] running_losses[idx].val running_losses[idx].moving_avg running_ious[idx].val running_ious[idx].moving_avg)<line_sep>running_losses[idx].reset()<line_sep>running_ious[idx].reset()<block_end>self.print_log(strs)<block_end><block_end>step<augadd>1<if_stmt>step%cfg.TRAIN_SAVE_STEP<eq>0<and>self.rank<eq>0<block_start>max_mem=torch.cuda.max_memory_allocated(device=self.gpu)/(1024.<power>3)<line_sep>ETA=str(datetime.timedelta(seconds=int(batch_time.moving_avg<times>(cfg.TRAIN_TOTAL_STEPS-step))))<line_sep>self.print_log('ETA: {}, Max Mem: {:.2f}G.'.format(ETA max_mem))<line_sep>self.print_log('Save CKPT (Step {}).'.format(step))<line_sep>save_network(self.model optimizer step cfg.DIR_CKPT cfg.TRAIN_MAX_KEEP_CKPT scaler=self.scaler)<try_stmt><block_start>torch.cuda.empty_cache()<line_sep># First save original parameters before replacing with EMA version self.ema.store(self.ema_params)<line_sep># Copy EMA parameters to model self.ema.copy_to(self.ema_params)<line_sep># Save EMA model save_network(self.model optimizer step self.ema_dir cfg.TRAIN_MAX_KEEP_CKPT backup_dir='./saved_ema_models' scaler=self.scaler)<line_sep># Restore original parameters to resume training later self.ema.restore(self.ema_params)<block_end><except_stmt>Exception<as>inst<block_start>self.print_log(inst)<line_sep>self.print_log('Error: failed to save EMA model!')<block_end><block_end><block_end><block_end>self.print_log('Stop training!')<block_end><def_stmt>print_log self string<block_start><if_stmt>self.rank<eq>0<block_start>print(string)<block_end><block_end><def_stmt>process_log self ref_imgs prev_imgs curr_imgs ref_labels prev_labels curr_labels curr_pred boards running_losses running_ious now_lr step<block_start>cfg=self.cfg<line_sep>mean=np.array([[[0.485]] [[0.456]] [[0.406]]])<line_sep>sigma=np.array([[[0.229]] [[0.224]] [[0.225]]])<line_sep>show_ref_img,show_prev_img,show_curr_img=[img.cpu().numpy()[0]<times>sigma+mean<for>img [ref_imgs prev_imgs curr_imgs]]<line_sep>show_gt,show_prev_gt,show_ref_gt,show_preds_s=[label.cpu()[0].squeeze(0).numpy()<for>label [curr_labels prev_labels ref_labels curr_pred]]<line_sep>show_gtf,show_prev_gtf,show_ref_gtf,show_preds_sf=[label2colormap(label).transpose((2 0 1))<for>label [show_gt show_prev_gt show_ref_gt show_preds_s]]<if_stmt>cfg.TRAIN_IMG_LOG<or>cfg.TRAIN_TBLOG<block_start>show_ref_img=masked_image(show_ref_img show_ref_gtf show_ref_gt)<if_stmt>cfg.TRAIN_IMG_LOG<block_start>save_image(show_ref_img os.path.join(cfg.DIR_IMG_LOG '%06d_ref_img.jpeg'%(step)))<block_end>show_prev_img=masked_image(show_prev_img show_prev_gtf show_prev_gt)<if_stmt>cfg.TRAIN_IMG_LOG<block_start>save_image(show_prev_img os.path.join(cfg.DIR_IMG_LOG '%06d_prev_img.jpeg'%(step)))<block_end>show_img_pred=masked_image(show_curr_img show_preds_sf show_preds_s)<if_stmt>cfg.TRAIN_IMG_LOG<block_start>save_image(show_img_pred os.path.join(cfg.DIR_IMG_LOG '%06d_prediction.jpeg'%(step)))<block_end>show_curr_img=masked_image(show_curr_img show_gtf show_gt)<if_stmt>cfg.TRAIN_IMG_LOG<block_start>save_image(show_curr_img os.path.join(cfg.DIR_IMG_LOG '%06d_groundtruth.jpeg'%(step)))<block_end><if_stmt>cfg.TRAIN_TBLOG<block_start><for_stmt>seq_step,running_loss,running_iou zip(range(len(running_losses)) running_losses running_ious)<block_start>self.tblogger.add_scalar('S{}/Loss'.format(seq_step) running_loss.avg step)<line_sep>self.tblogger.add_scalar('S{}/IoU'.format(seq_step) running_iou.avg step)<block_end>self.tblogger.add_scalar('LR' now_lr step)<line_sep>self.tblogger.add_image('Ref/Image' show_ref_img step)<line_sep>self.tblogger.add_image('Ref/GT' show_ref_gtf step)<line_sep>self.tblogger.add_image('Prev/Image' show_prev_img step)<line_sep>self.tblogger.add_image('Prev/GT' show_prev_gtf step)<line_sep>self.tblogger.add_image('Curr/Image_GT' show_curr_img step)<line_sep>self.tblogger.add_image('Curr/Image_Pred' show_img_pred step)<line_sep>self.tblogger.add_image('Curr/Mask_GT' show_gtf step)<line_sep>self.tblogger.add_image('Curr/Mask_Pred' show_preds_sf step)<for_stmt>key boards['image'].keys()<block_start>tmp=boards['image'][key]<for_stmt>seq_step range(len(tmp))<block_start>self.tblogger.add_image('S{}/'.format(seq_step)+key tmp[seq_step].detach().cpu().numpy() step)<block_end><block_end><for_stmt>key boards['scalar'].keys()<block_start>tmp=boards['scalar'][key]<for_stmt>seq_step range(len(tmp))<block_start>self.tblogger.add_scalar('S{}/'.format(seq_step)+key tmp[seq_step].detach().cpu().numpy() step)<block_end><block_end>self.tblogger.flush()<block_end><block_end><del_stmt>(boards)<block_end><block_end>
"""Same as the table_movie.py but uses Live to update"""<import_stmt>time<import_from_stmt>contextlib contextmanager<import_from_stmt>rich box<import_from_stmt>rich.align Align<import_from_stmt>rich.console Console<import_from_stmt>rich.live Live<import_from_stmt>rich.table Table<import_from_stmt>rich.text Text<line_sep>TABLE_DATA=[["May 25, 1977" "Star Wars Ep. [b]IV[/]: [i]A New Hope" "$11,000,000" "$1,554,475" "$775,398,007" ] ["May 21, 1980" "Star Wars Ep. [b]V[/]: [i]The Empire Strikes Back" "$23,000,000" "$4,910,483" "$547,969,004" ] ["May 25, 1983" "Star Wars Ep. [b]VI[/b]: [i]Return of the Jedi" "$32,500,000" "$23,019,618" "$475,106,177" ] ["May 19, 1999" "Star Wars Ep. [b]I[/b]: [i]The phantom Menace" "$115,000,000" "$64,810,870" "$1,027,044,677" ] ["May 16, 2002" "Star Wars Ep. [b]II[/b]: [i]Attack of the Clones" "$115,000,000" "$80,027,814" "$656,695,615" ] ["May 19, 2005" "Star Wars Ep. [b]III[/b]: [i]Revenge of the Sith" "$115,500,000" "$380,270,577" "$848,998,877" ] ]<line_sep>console=Console()<line_sep>BEAT_TIME=0.04<line_sep>@contextmanager<def_stmt>beat length:int=1<arrow><none><block_start><yield><line_sep>time.sleep(length<times>BEAT_TIME)<block_end>table=Table(show_footer=<false>)<line_sep>table_centered=Align.center(table)<line_sep>console.clear()<with_stmt>Live(table_centered console=console screen=<false> refresh_per_second=20)<block_start><with_stmt>beat(10)<block_start>table.add_column("Release Date" no_wrap=<true>)<block_end><with_stmt>beat(10)<block_start>table.add_column("Title" Text.from_markup("[b]Total" justify="right"))<block_end><with_stmt>beat(10)<block_start>table.add_column("Budget" "[u]$412,000,000" no_wrap=<true>)<block_end><with_stmt>beat(10)<block_start>table.add_column("Opening Weekend" "[u]$577,703,455" no_wrap=<true>)<block_end><with_stmt>beat(10)<block_start>table.add_column("Box Office" "[u]$4,331,212,357" no_wrap=<true>)<block_end><with_stmt>beat(10)<block_start>table.title="Star Wars Box Office"<block_end><with_stmt>beat(10)<block_start>table.title=("[not italic]:popcorn:[/] Star Wars Box Office [not italic]:popcorn:[/]")<block_end><with_stmt>beat(10)<block_start>table.caption="Made with Rich"<block_end><with_stmt>beat(10)<block_start>table.caption="Made with [b]Rich[/b]"<block_end><with_stmt>beat(10)<block_start>table.caption="Made with [b magenta not dim]Rich[/]"<block_end><for_stmt>row TABLE_DATA<block_start><with_stmt>beat(10)<block_start>table.add_row(*row)<block_end><block_end><with_stmt>beat(10)<block_start>table.show_footer=<true><block_end>table_width=console.measure(table).maximum<with_stmt>beat(10)<block_start>table.columns[2].justify="right"<block_end><with_stmt>beat(10)<block_start>table.columns[3].justify="right"<block_end><with_stmt>beat(10)<block_start>table.columns[4].justify="right"<block_end><with_stmt>beat(10)<block_start>table.columns[2].header_style="bold red"<block_end><with_stmt>beat(10)<block_start>table.columns[3].header_style="bold green"<block_end><with_stmt>beat(10)<block_start>table.columns[4].header_style="bold blue"<block_end><with_stmt>beat(10)<block_start>table.columns[2].style="red"<block_end><with_stmt>beat(10)<block_start>table.columns[3].style="green"<block_end><with_stmt>beat(10)<block_start>table.columns[4].style="blue"<block_end><with_stmt>beat(10)<block_start>table.columns[0].style="cyan"<line_sep>table.columns[0].header_style="bold cyan"<block_end><with_stmt>beat(10)<block_start>table.columns[1].style="magenta"<line_sep>table.columns[1].header_style="bold magenta"<block_end><with_stmt>beat(10)<block_start>table.columns[2].footer_style="bright_red"<block_end><with_stmt>beat(10)<block_start>table.columns[3].footer_style="bright_green"<block_end><with_stmt>beat(10)<block_start>table.columns[4].footer_style="bright_blue"<block_end><with_stmt>beat(10)<block_start>table.row_styles=["none" "dim"]<block_end><with_stmt>beat(10)<block_start>table.border_style="bright_yellow"<block_end><for_stmt>box_style [box.SQUARE box.MINIMAL box.SIMPLE box.SIMPLE_HEAD ]<block_start><with_stmt>beat(10)<block_start>table.box=box_style<block_end><block_end><with_stmt>beat(10)<block_start>table.pad_edge=<false><block_end>original_width=console.measure(table).maximum<for_stmt>width range(original_width console.width 2)<block_start><with_stmt>beat(1)<block_start>table.width=width<block_end><block_end><for_stmt>width range(console.width original_width -2)<block_start><with_stmt>beat(1)<block_start>table.width=width<block_end><block_end><for_stmt>width range(original_width 90 -2)<block_start><with_stmt>beat(1)<block_start>table.width=width<block_end><block_end><for_stmt>width range(90 original_width+1 2)<block_start><with_stmt>beat(1)<block_start>table.width=width<block_end><block_end><with_stmt>beat(2)<block_start>table.width=<none><block_end><block_end>
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2021, <NAME>, <EMAIL> # <import_stmt>unittest<import_from_stmt>io StringIO<import_from_stmt>...styles Styles<class_stmt>TestWriteColors(unittest.TestCase)<block_start>""" Test the Styles _write_colors() method. """<def_stmt>setUp self<block_start>self.fh=StringIO()<line_sep>self.styles=Styles()<line_sep>self.styles._set_filehandle(self.fh)<block_end><def_stmt>test_write_colors1 self<block_start>"""Test the _write_colors() method"""<line_sep>self.styles.custom_colors=['FF26DA55']<line_sep>self.styles._write_colors()<line_sep>exp="""<colors><mruColors><color rgb="FF26DA55"/></mruColors></colors>"""<line_sep>got=self.fh.getvalue()<line_sep>self.assertEqual(got exp)<block_end><def_stmt>test_write_colors2 self<block_start>"""Test the _write_colors() method"""<line_sep>self.styles.custom_colors=['FF26DA55' 'FF792DC8' 'FF646462']<line_sep>self.styles._write_colors()<line_sep>exp="""<colors><mruColors><color rgb="FF646462"/><color rgb="FF792DC8"/><color rgb="FF26DA55"/></mruColors></colors>"""<line_sep>got=self.fh.getvalue()<line_sep>self.assertEqual(got exp)<block_end><def_stmt>test_write_colors3 self<block_start>"""Test the _write_colors() method"""<line_sep>self.styles.custom_colors=['FF792DC8' 'FF646462' 'FF5EA29C' 'FF583AC6' 'FFE31DAF' 'FFA1A759' 'FF600FF1' 'FF0CF49C' 'FFE3FA06' 'FF913AC6' 'FFB97847' 'FFD97827']<line_sep>self.styles._write_colors()<line_sep>exp="""<colors><mruColors><color rgb="FFD97827"/><color rgb="FFB97847"/><color rgb="FF913AC6"/><color rgb="FFE3FA06"/><color rgb="FF0CF49C"/><color rgb="FF600FF1"/><color rgb="FFA1A759"/><color rgb="FFE31DAF"/><color rgb="FF583AC6"/><color rgb="FF5EA29C"/></mruColors></colors>"""<line_sep>got=self.fh.getvalue()<line_sep>self.assertEqual(got exp)<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals<import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('knox' '0001_initial') ]<line_sep>operations=[migrations.DeleteModel('AuthToken') migrations.CreateModel(name='AuthToken' fields=[('digest' models.CharField(max_length=64 serialize=<false> primary_key=<true>)) ('salt' models.CharField(max_length=16 serialize=<false> unique=<true>)) ('created' models.DateTimeField(auto_now_add=<true>)) ('user' models.ForeignKey(to=settings.AUTH_USER_MODEL related_name='auth_token_set' on_delete=models.CASCADE)) ] )]<block_end>
# Time: O(m * nlogn) # Space: O(n) <class_stmt>Solution(object)<block_start><def_stmt>longestCommonSubpath self n paths<block_start>""" :type n: int :type paths: List[List[int]] :rtype: int """<def_stmt>RabinKarp arr x# double hashing <block_start>hashes=tuple([reduce(<lambda>h x:(h<times>p+x)%MOD (arr[i]<for>i xrange(x)) 0)<for>p P])<line_sep>powers=[pow(p x MOD)<for>p P]<line_sep>lookup={hashes}<for_stmt>i xrange(x len(arr))<block_start>hashes=tuple([(hashes[j]<times>P[j]-arr[i-x]<times>powers[j]+arr[i])%MOD<for>j xrange(len(P))])# in smaller datasets, tuple from list is much faster than tuple from generator, see https://stackoverflow.com/questions/16940293/why-is-there-no-tuple-comprehension-in-python lookup.add(hashes)<block_end><return>lookup<block_end><def_stmt>check paths x<block_start>intersect=RabinKarp(paths[0] x)<for_stmt>i xrange(1 len(paths))<block_start>intersect=set.intersection(intersect RabinKarp(paths[i] x))<if_stmt><not>intersect<block_start><return><false><block_end><block_end><return><true><block_end>MOD,P=10<power>9+7 (113 109)# MOD could be the min prime of 7-digit number (10**6+3), P could be (2, 3) left,right=1 min(len(p)<for>p paths)<while_stmt>left<le>right<block_start>mid=left+(right-left)<floordiv>2<if_stmt><not>check(paths mid)<block_start>right=mid-1<block_end><else_stmt><block_start>left=mid+1<block_end><block_end><return>right<block_end><block_end># Time: O(m * nlogn) # Space: O(n) <class_stmt>Solution2(object)<block_start><def_stmt>longestCommonSubpath self n paths<block_start>""" :type n: int :type paths: List[List[int]] :rtype: int """<def_stmt>RabinKarp arr x<block_start>h=reduce(<lambda>h x:(h<times>P+x)%MOD (arr[i]<for>i xrange(x)) 0)<line_sep>power=pow(P x MOD)<line_sep>lookup={h}<for_stmt>i xrange(x len(arr))<block_start>h=(h<times>P-arr[i-x]<times>power+arr[i])%MOD<line_sep>lookup.add(h)<block_end><return>lookup<block_end><def_stmt>check paths x<block_start>intersect=RabinKarp(paths[0] x)<for_stmt>i xrange(1 len(paths))<block_start>intersect=set.intersection(intersect RabinKarp(paths[i] x))<if_stmt><not>intersect<block_start><return><false><block_end><block_end><return><true><block_end>MOD,P=10<power>11+19 max(x<for>p paths<for>x p)+1# MOD is the min prime of 12-digit number left,right=1 min(len(p)<for>p paths)<while_stmt>left<le>right<block_start>mid=left+(right-left)<floordiv>2<if_stmt><not>check(paths mid)<block_start>right=mid-1<block_end><else_stmt><block_start>left=mid+1<block_end><block_end><return>right<block_end><block_end>
# Copyright 2015 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>numpy iinfo uint32 <import_from_stmt>zipline.data.us_equity_pricing BcolzDailyBarReader SQLiteAdjustmentReader <import_from_stmt>zipline.lib.adjusted_array AdjustedArray<import_from_stmt>zipline.errors NoFurtherDataError<import_from_stmt>zipline.utils.calendars get_calendar<import_from_stmt>.base PipelineLoader<line_sep>UINT32_MAX=iinfo(uint32).max<class_stmt>USEquityPricingLoader(PipelineLoader)<block_start>""" PipelineLoader for US Equity Pricing data Delegates loading of baselines and adjustments. """<def_stmt>__init__ self raw_price_loader adjustments_loader<block_start>self.raw_price_loader=raw_price_loader<line_sep>self.adjustments_loader=adjustments_loader<line_sep>cal=self.raw_price_loader.trading_calendar<or>get_calendar("NYSE")<line_sep>self._all_sessions=cal.all_sessions<block_end>@classmethod<def_stmt>from_files cls pricing_path adjustments_path<block_start>""" Create a loader from a bcolz equity pricing dir and a SQLite adjustments path. Parameters ---------- pricing_path : str Path to a bcolz directory written by a BcolzDailyBarWriter. adjusments_path : str Path to an adjusments db written by a SQLiteAdjustmentWriter. """<line_sep><return>cls(BcolzDailyBarReader(pricing_path) SQLiteAdjustmentReader(adjustments_path))<block_end><def_stmt>load_adjusted_array self columns dates assets mask# load_adjusted_array is called with dates on which the user's algo # will be shown data, which means we need to return the data that would # be known at the start of each date. We assume that the latest data # known on day N is the data from day (N - 1), so we shift all query # dates back by a day. <block_start>start_date,end_date=_shift_dates(self._all_sessions dates[0] dates[-1] shift=1 )<line_sep>colnames=[c.name<for>c columns]<line_sep>raw_arrays=self.raw_price_loader.load_raw_arrays(colnames start_date end_date assets )<line_sep>adjustments=self.adjustments_loader.load_adjustments(colnames dates assets )<line_sep>out={}<for_stmt>c,c_raw,c_adjs zip(columns raw_arrays adjustments)<block_start>out[c]=AdjustedArray(c_raw.astype(c.dtype) mask c_adjs c.missing_value )<block_end><return>out<block_end><block_end><def_stmt>_shift_dates dates start_date end_date shift<block_start><try_stmt><block_start>start=dates.get_loc(start_date)<block_end><except_stmt>KeyError<block_start><if_stmt>start_date<l>dates[0]<block_start><raise>NoFurtherDataError(msg=("Pipeline Query requested data starting on {query_start}, "<concat>"but first known date is {calendar_start}").format(query_start=str(start_date) calendar_start=str(dates[0]) ))<block_end><else_stmt><block_start><raise>ValueError("Query start %s not in calendar"%start_date)<block_end><block_end># Make sure that shifting doesn't push us out of the calendar. <if_stmt>start<l>shift<block_start><raise>NoFurtherDataError(msg=("Pipeline Query requested data from {shift}"<concat>" days before {query_start}, but first known date is only "<concat>"{start} days earlier.").format(shift=shift query_start=start_date start=start) )<block_end><try_stmt><block_start>end=dates.get_loc(end_date)<block_end><except_stmt>KeyError<block_start><if_stmt>end_date<g>dates[-1]<block_start><raise>NoFurtherDataError(msg=("Pipeline Query requesting data up to {query_end}, "<concat>"but last known date is {calendar_end}").format(query_end=end_date calendar_end=dates[-1] ))<block_end><else_stmt><block_start><raise>ValueError("Query end %s not in calendar"%end_date)<block_end><block_end><return>dates[start-shift] dates[end-shift]<block_end>
# Copyright (c) 2015-2019, Activision Publishing, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. <import_stmt>datetime<import_from_stmt>assertpy assert_that fail<line_sep>d1=datetime.datetime.today()<def_stmt>test_is_before <block_start>d2=datetime.datetime.today()<line_sep>assert_that(d1).is_before(d2)<block_end><def_stmt>test_is_before_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()<line_sep>assert_that(d2).is_before(d1)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be before <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_before_bad_val_type_failure <block_start><try_stmt><block_start>assert_that(123).is_before(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')<block_end><block_end><def_stmt>test_is_before_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(d1).is_before(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')<block_end><block_end><def_stmt>test_is_after <block_start>d2=datetime.datetime.today()<line_sep>assert_that(d2).is_after(d1)<block_end><def_stmt>test_is_after_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()<line_sep>assert_that(d1).is_after(d2)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be after <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_after_bad_val_type_failure <block_start><try_stmt><block_start>assert_that(123).is_after(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')<block_end><block_end><def_stmt>test_is_after_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(d1).is_after(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')<block_end><block_end><def_stmt>test_is_equal_to_ignoring_milliseconds <block_start>assert_that(d1).is_equal_to_ignoring_milliseconds(d1)<block_end><def_stmt>test_is_equal_to_ignoring_milliseconds_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()+datetime.timedelta(days=1)<line_sep>assert_that(d1).is_equal_to_ignoring_milliseconds(d2)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_equal_to_ignoring_milliseconds_bad_val_type_failure <block_start><try_stmt><block_start>assert_that(123).is_equal_to_ignoring_milliseconds(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')<block_end><block_end><def_stmt>test_is_equal_to_ignoring_milliseconds_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(d1).is_equal_to_ignoring_milliseconds(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')<block_end><block_end><def_stmt>test_is_equal_to_ignoring_seconds <block_start>assert_that(d1).is_equal_to_ignoring_seconds(d1)<block_end><def_stmt>test_is_equal_to_ignoring_seconds_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()+datetime.timedelta(days=1)<line_sep>assert_that(d1).is_equal_to_ignoring_seconds(d2)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}> to be equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_equal_to_ignoring_seconds_bad_val_type_failure <block_start><try_stmt><block_start>assert_that(123).is_equal_to_ignoring_seconds(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')<block_end><block_end><def_stmt>test_is_equal_to_ignoring_seconds_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(d1).is_equal_to_ignoring_seconds(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')<block_end><block_end><def_stmt>test_is_equal_to_ignoring_time <block_start>assert_that(d1).is_equal_to_ignoring_time(d1)<block_end><def_stmt>test_is_equal_to_ignoring_time_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()+datetime.timedelta(days=1)<line_sep>assert_that(d1).is_equal_to_ignoring_time(d2)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2}> to be equal to <\d{4}-\d{2}-\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_equal_to_ignoring_time_bad_val_type_failure <block_start><try_stmt><block_start>assert_that(123).is_equal_to_ignoring_time(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')<block_end><block_end><def_stmt>test_is_equal_to_ignoring_time_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(d1).is_equal_to_ignoring_time(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')<block_end><block_end><def_stmt>test_is_greater_than <block_start>d2=datetime.datetime.today()<line_sep>assert_that(d2).is_greater_than(d1)<block_end><def_stmt>test_is_greater_than_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()<line_sep>assert_that(d1).is_greater_than(d2)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be greater than <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_greater_than_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(d1).is_greater_than(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')<block_end><block_end><def_stmt>test_is_greater_than_or_equal_to <block_start>assert_that(d1).is_greater_than_or_equal_to(d1)<block_end><def_stmt>test_is_greater_than_or_equal_to_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()<line_sep>assert_that(d1).is_greater_than_or_equal_to(d2)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be greater than or equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_greater_than_or_equal_to_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(d1).is_greater_than_or_equal_to(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')<block_end><block_end><def_stmt>test_is_less_than <block_start>d2=datetime.datetime.today()<line_sep>assert_that(d1).is_less_than(d2)<block_end><def_stmt>test_is_less_than_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()<line_sep>assert_that(d2).is_less_than(d1)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be less than <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_less_than_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(d1).is_less_than(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')<block_end><block_end><def_stmt>test_is_less_than_or_equal_to <block_start>assert_that(d1).is_less_than_or_equal_to(d1)<block_end><def_stmt>test_is_less_than_or_equal_to_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()<line_sep>assert_that(d2).is_less_than_or_equal_to(d1)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be less than or equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_less_than_or_equal_to_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(d1).is_less_than_or_equal_to(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')<block_end><block_end><def_stmt>test_is_between <block_start>d2=datetime.datetime.today()<line_sep>d3=datetime.datetime.today()<line_sep>assert_that(d2).is_between(d1 d3)<block_end><def_stmt>test_is_between_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()<line_sep>d3=datetime.datetime.today()<line_sep>assert_that(d1).is_between(d2 d3)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be between <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> and <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_between_bad_arg1_type_failure <block_start><try_stmt><block_start>assert_that(d1).is_between(123 456)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given low arg must be <datetime>, but was <int>')<block_end><block_end><def_stmt>test_is_between_bad_arg2_type_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()<line_sep>assert_that(d1).is_between(d2 123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given high arg must be <datetime>, but was <datetime>')<block_end><block_end><def_stmt>test_is_not_between <block_start>d2=d1+datetime.timedelta(minutes=5)<line_sep>d3=d1+datetime.timedelta(minutes=10)<line_sep>assert_that(d1).is_not_between(d2 d3)<block_end><def_stmt>test_is_not_between_failure <block_start><try_stmt><block_start>d2=d1+datetime.timedelta(minutes=5)<line_sep>d3=d1+datetime.timedelta(minutes=10)<line_sep>assert_that(d2).is_not_between(d1 d3)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to not be between <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> and <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was.')<block_end><block_end><def_stmt>test_is_not_between_bad_arg1_type_failure <block_start><try_stmt><block_start>assert_that(d1).is_not_between(123 456)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given low arg must be <datetime>, but was <int>')<block_end><block_end><def_stmt>test_is_not_between_bad_arg2_type_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()<line_sep>assert_that(d1).is_not_between(d2 123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given high arg must be <datetime>, but was <datetime>')<block_end><block_end><def_stmt>test_is_close_to <block_start>d2=datetime.datetime.today()<line_sep>assert_that(d1).is_close_to(d2 datetime.timedelta(minutes=5))<block_end><def_stmt>test_is_close_to_failure <block_start><try_stmt><block_start>d2=d1+datetime.timedelta(minutes=5)<line_sep>assert_that(d1).is_close_to(d2 datetime.timedelta(minutes=1))<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be close to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> within tolerance <\d+:\d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_close_to_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(d1).is_close_to(123 456)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be datetime, but was <int>')<block_end><block_end><def_stmt>test_is_close_to_bad_tolerance_arg_type_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()<line_sep>assert_that(d1).is_close_to(d2 123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given tolerance arg must be timedelta, but was <int>')<block_end><block_end><def_stmt>test_is_not_close_to <block_start>d2=d1+datetime.timedelta(minutes=5)<line_sep>assert_that(d1).is_not_close_to(d2 datetime.timedelta(minutes=4))<block_end><def_stmt>test_is_not_close_to_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()<line_sep>assert_that(d1).is_not_close_to(d2 datetime.timedelta(minutes=5))<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to not be close to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> within tolerance <\d+:\d{2}:\d{2}>, but was.')<block_end><block_end><def_stmt>test_is_not_close_to_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(d1).is_not_close_to(123 456)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be datetime, but was <int>')<block_end><block_end><def_stmt>test_is_not_close_to_bad_tolerance_arg_type_failure <block_start><try_stmt><block_start>d2=datetime.datetime.today()<line_sep>assert_that(d1).is_not_close_to(d2 123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given tolerance arg must be timedelta, but was <int>')<block_end><block_end>t1=datetime.timedelta(seconds=60)<def_stmt>test_is_greater_than_timedelta <block_start>d2=datetime.timedelta(seconds=120)<line_sep>assert_that(d2).is_greater_than(t1)<block_end><def_stmt>test_is_greater_than_timedelta_failure <block_start><try_stmt><block_start>t2=datetime.timedelta(seconds=90)<line_sep>assert_that(t1).is_greater_than(t2)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be greater than <\d{1,2}:\d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_greater_than_timedelta_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(t1).is_greater_than(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')<block_end><block_end><def_stmt>test_is_greater_than_or_equal_to_timedelta <block_start>assert_that(t1).is_greater_than_or_equal_to(t1)<block_end><def_stmt>test_is_greater_than_or_equal_to_timedelta_failure <block_start><try_stmt><block_start>t2=datetime.timedelta(seconds=90)<line_sep>assert_that(t1).is_greater_than_or_equal_to(t2)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be greater than or equal to <\d{1,2}:\d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_greater_than_or_equal_to_timedelta_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(t1).is_greater_than_or_equal_to(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')<block_end><block_end><def_stmt>test_is_less_than_timedelta <block_start>t2=datetime.timedelta(seconds=90)<line_sep>assert_that(t1).is_less_than(t2)<block_end><def_stmt>test_is_less_than_timedelta_failure <block_start><try_stmt><block_start>t2=datetime.timedelta(seconds=90)<line_sep>assert_that(t2).is_less_than(t1)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be less than <\d{1,2}:\d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_less_than_timedelta_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(t1).is_less_than(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')<block_end><block_end><def_stmt>test_is_less_than_or_equal_to_timedelta <block_start>assert_that(t1).is_less_than_or_equal_to(t1)<block_end><def_stmt>test_is_less_than_or_equal_to_timedelta_failure <block_start><try_stmt><block_start>t2=datetime.timedelta(seconds=90)<line_sep>assert_that(t2).is_less_than_or_equal_to(t1)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be less than or equal to <\d{1,2}:\d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_less_than_or_equal_to_timedelta_bad_arg_type_failure <block_start><try_stmt><block_start>assert_that(t1).is_less_than_or_equal_to(123)<line_sep>fail('should have raised error')<block_end><except_stmt>TypeError<as>ex<block_start>assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')<block_end><block_end><def_stmt>test_is_between_timedelta <block_start>d2=datetime.timedelta(seconds=90)<line_sep>d3=datetime.timedelta(seconds=120)<line_sep>assert_that(d2).is_between(t1 d3)<block_end><def_stmt>test_is_between_timedelta_failure <block_start><try_stmt><block_start>d2=datetime.timedelta(seconds=30)<line_sep>d3=datetime.timedelta(seconds=40)<line_sep>assert_that(t1).is_between(d2 d3)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be between <\d{1,2}:\d{2}:\d{2}> and <\d{1,2}:\d{2}:\d{2}>, but was not.')<block_end><block_end><def_stmt>test_is_not_between_timedelta <block_start>d2=datetime.timedelta(seconds=90)<line_sep>d3=datetime.timedelta(seconds=120)<line_sep>assert_that(t1).is_not_between(d2 d3)<block_end><def_stmt>test_is_not_between_timedelta_failure <block_start><try_stmt><block_start>d2=datetime.timedelta(seconds=90)<line_sep>d3=datetime.timedelta(seconds=120)<line_sep>assert_that(d2).is_not_between(t1 d3)<line_sep>fail('should have raised error')<block_end><except_stmt>AssertionError<as>ex<block_start>assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to not be between <\d{1,2}:\d{2}:\d{2}> and <\d{1,2}:\d{2}:\d{2}>, but was.')<block_end><block_end>
# -*- coding: utf-8 -*- # Copyright 2012 splinter authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. <import_from_stmt>splinter.exceptions ElementDoesNotExist<class_stmt>ElementList(object)<block_start>""" List of elements. Each member of the list is (usually) an instance of :class:`ElementAPI <splinter.driver.ElementAPI>`. Beyond the traditional list methods, the ``ElementList`` provides some other methods, listed below. There is a peculiar behavior on ElementList: you never get an ``IndexError``. Instead, you can an :class:`ElementDoesNotExist <splinter.exceptions.ElementDoesNotExist>` exception when trying to access an inexistent item in the list: >>> element_list = ElementList([]) >>> element_list[0] # raises ElementDoesNotExist """<def_stmt>__init__ self list driver=<none> find_by=<none> query=<none><block_start>self._container=[]<line_sep>self._container.extend(list)<line_sep>self.driver=driver<line_sep>self.find_by=find_by<line_sep>self.query=query<block_end><def_stmt>__getitem__ self index<block_start><if_stmt><not>isinstance(index int)<and><not>isinstance(index slice)<block_start><return>self.first[index]<block_end><try_stmt><block_start><return>self._container[index]<block_end><except_stmt>IndexError<block_start><raise>ElementDoesNotExist(u'no elements could be found with {0} "{1}"'.format(self.find_by self.query))<block_end><block_end>@property<def_stmt>first self<block_start>"""An alias to the first element of the list. Example: >>> assert element_list[0] == element_list.first """<line_sep><return>self[0]<block_end>@property<def_stmt>last self<block_start>"""An alias to the last element of the list. Example: >>> assert element_list[-1] == element_list.last """<line_sep><return>self[-1]<block_end><def_stmt>is_empty self<block_start>"""Check if the ElementList is empty. Returns: bool: True if the list is empty, else False """<line_sep><return>len(self)<eq>0<block_end><def_stmt>__getattr__ self name<block_start><try_stmt><block_start><return>getattr(self.first name)<block_end><except_stmt>AttributeError<block_start><try_stmt><block_start><return>getattr(self._container name)<block_end><except_stmt>AttributeError<block_start><raise>AttributeError(u"'{0}' object has no attribute '{1}'".format(self.__class__.__name__ name))<block_end><block_end><block_end><def_stmt>__iter__ self<block_start><for_stmt>item self._container<block_start><yield>item<block_end><block_end><def_stmt>__len__ self<block_start>"""__len__ checks the internal container."""<line_sep><return>len(self._container)<block_end><block_end>
<import_stmt>os<import_from_stmt>web3 HTTPProvider<import_from_stmt>ethereumetl.providers.rpc BatchHTTPProvider<import_from_stmt>tests.ethereumetl.job.mock_batch_web3_provider MockBatchWeb3Provider<import_from_stmt>tests.ethereumetl.job.mock_web3_provider MockWeb3Provider<def_stmt>get_web3_provider provider_type read_resource_lambda=<none> batch=<false><block_start><if_stmt>provider_type<eq>'mock'<block_start><if_stmt>read_resource_lambda<is><none><block_start><raise>ValueError('read_resource_lambda must not be None for provider type mock'.format(provider_type))<block_end><if_stmt>batch<block_start>provider=MockBatchWeb3Provider(read_resource_lambda)<block_end><else_stmt><block_start>provider=MockWeb3Provider(read_resource_lambda)<block_end><block_end><elif_stmt>provider_type<eq>'infura'<block_start>provider_url=os.environ.get('PROVIDER_URL' 'https://mainnet.infura.io/v3/7aef3f0cd1f64408b163814b22cc643c')<if_stmt>batch<block_start>provider=BatchHTTPProvider(provider_url)<block_end><else_stmt><block_start>provider=HTTPProvider(provider_url)<block_end><block_end><else_stmt><block_start><raise>ValueError('Provider type {} is unexpected'.format(provider_type))<block_end><return>provider<block_end>
<import_from_stmt>. base<import_from_stmt>. fields<import_from_stmt>. mixins<import_from_stmt>.photo_size PhotoSize<class_stmt>Video(base.TelegramObject mixins.Downloadable)<block_start>""" This object represents a video file. https://core.telegram.org/bots/api#video """<line_sep>file_id:base.String=fields.Field()<line_sep>file_unique_id:base.String=fields.Field()<line_sep>width:base.Integer=fields.Field()<line_sep>height:base.Integer=fields.Field()<line_sep>duration:base.Integer=fields.Field()<line_sep>thumb:PhotoSize=fields.Field(base=PhotoSize)<line_sep>file_name:base.String=fields.Field()<line_sep>mime_type:base.String=fields.Field()<line_sep>file_size:base.Integer=fields.Field()<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, invalid-name, redefined-builtin """ The Relay Virtual Machine. Implements a Python interface to compiling and executing on the Relay VM. """<import_stmt>numpy<as>np<import_stmt>tvm.runtime.ndarray<as>_nd<import_stmt>tvm.runtime.vm<as>vm_rt<import_from_stmt>tvm autotvm<import_from_stmt>tvm.relay expr<as>_expr<import_from_stmt>tvm.relay.backend.interpreter Executor<import_from_stmt>tvm.target Target<import_from_stmt>. _vm<def_stmt>compile mod target=<none> target_host=<none> params=<none><block_start>"""Compile the module to VM executable. A helper function for VMCompiler. Parameters ---------- mod : tvm.IRModule The Relay module to build. target : any multi-target like object, see Target.canon_multi_target For homogeneous compilation, the unique build target. For heterogeneous compilation, a dictionary or list of possible build targets. target_host : None, or any target-like object, see Target.canon_target Host compilation target, if target is device. When TVM compiles device specific program such as CUDA, we also need host(CPU) side code to interact with the driver to setup the dimensions and parameters correctly. target_host is used to specify the host side codegen target. By default, llvm is used if it is enabled, otherwise a stackvm intepreter is used. params : dict of str to NDArray Input parameters to the graph that do not change during inference time. Used for constant folding. Returns ------- exec : tvm.runtime.vm.Executable The VM executable that contains both library code and bytecode. """<line_sep>compiler=VMCompiler()<if_stmt>params<block_start>compiler.set_params(params)<block_end>compiler.lower(mod target target_host)<line_sep>compiler.codegen()<line_sep><return>compiler.get_exec()<block_end><class_stmt>VMCompiler(object)<block_start>"""Compiler that compiles Relay module to VM executable."""<def_stmt>__init__ self<block_start>self.mod=_vm._VMCompiler()<line_sep>self._lower=self.mod["lower"]<line_sep>self._codegen=self.mod["codegen"]<line_sep>self._get_exec=self.mod["get_executable"]<line_sep>self._set_params_func=self.mod["set_params"]<line_sep>self._get_params_func=self.mod["get_params"]<line_sep>self._optimize=self.mod["optimize"]<block_end><def_stmt>set_params self params<block_start>"""Set constant parameters for the model. Parameters ---------- params : dict of str to NDArray Input parameters to the graph that do not change during inference time. Used for constant folding. """<line_sep>inputs={}<for_stmt>name,param params.items()<block_start><if_stmt>isinstance(param np.ndarray)<block_start>param=_nd.array(param)<block_end>inputs[name]=_expr.const(param)<block_end>self._set_params_func(inputs)<block_end><def_stmt>get_params self<block_start>"""Return the updated weights."""<line_sep>params=self._get_params_func()<line_sep>ret={}<for_stmt>key,value params.items()<block_start>ret[key]=value.data<block_end><return>ret<block_end><def_stmt>lower self mod target=<none> target_host=<none><block_start>"""Lower the module to VM bytecode. Parameters ---------- mod : tvm.IRModule The Relay module to build. target : any multi-target like object, see Target.canon_multi_target For homogeneous compilation, the unique build target. For heterogeneous compilation, a dictionary or list of possible build targets. target_host : any target-like object, see Target.canon_target Host compilation target, if target is device. """<line_sep>raw_targets=Target.canon_multi_target_and_host(target target_host)<line_sep>tophub_context=self._tophub_context(raw_targets)<with_stmt>tophub_context<block_start>self._lower(mod raw_targets)<block_end><block_end><def_stmt>codegen self<block_start>"""Generate the kernel library."""<line_sep>self._codegen()<block_end><def_stmt>optimize self mod target=<none> target_host=<none> params=<none><block_start>"""Helper method that optimizes a Relay module via VM. Parameters ---------- mod : tvm.IRModule target : any multi-target like object, see Target.canon_multi_target For homogeneous compilation, the unique build target. For heterogeneous compilation, a dictionary or list of possible build targets. target_host : any target-like object, see Target.canon_target Host compilation target, if target is device. params : dict of str to NDArray Input parameters to the graph that do not change during inference time. Used for constant folding. Returns ------- mod : tvm.IRModule The optimized relay module. params : dict The parameters of the final module. """<line_sep>raw_targets=Target.canon_multi_target_and_host(target target_host)<if_stmt>params<block_start>self.set_params(params)<block_end><return>self._optimize(mod raw_targets) self.get_params()<block_end><def_stmt>get_exec self<block_start>"""Get the VM executable. Returns ------- exec : tvm.runtime.vm.Executable The VM executable that contains both library code and bytecode. """<line_sep><return>vm_rt.Executable(self._get_exec())<block_end><def_stmt>_tophub_context self raw_targets<block_start>"""Get the autotvm context."""<line_sep># If current dispatch context is fallback context (the default root context), # then load pre-tuned parameters from TopHub <if_stmt>isinstance(autotvm.DispatchContext.current autotvm.FallbackContext)<block_start>tophub_context=autotvm.tophub.context(raw_targets)<block_end><else_stmt><block_start>tophub_context=autotvm.utils.EmptyContext()<block_end><return>tophub_context<block_end><block_end><class_stmt>VMExecutor(Executor)<block_start>""" An implementation of the executor interface for the Relay VM. Useful interface for experimentation and debugging the VM can also be used directly from the API. supported by `tvm.runtime.vm`. Parameters ---------- mod : :py:class:`~tvm.IRModule` The module to support the execution. device : :py:class:`~tvm.runtime.Device` The runtime device to run the code on. target : :py:class:`Target` The target option to build the function. """<def_stmt>__init__ self mod device target<block_start><if_stmt>mod<is><none><block_start><raise>RuntimeError("Must provide module to get VM executor.")<block_end>self.mod=mod<line_sep>self.device=device<line_sep>self.target=target<line_sep>self.executable=<none><line_sep>self.vm=<none><block_end><def_stmt>_make_executor self expr=<none><block_start><if_stmt>expr<block_start>self.mod["main"]=expr<block_end>self.executable=compile(self.mod self.target)<line_sep>self.vm=vm_rt.VirtualMachine(self.executable self.device)<def_stmt>_vm_wrapper *args **kwargs<block_start>args=self._convert_args(self.mod["main"] args kwargs)<line_sep><return>self.vm.run(*args)<block_end><return>_vm_wrapper<block_end><block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>shutil<import_stmt>sys<import_stmt>tempfile<import_from_stmt>observations.r.snow_dates snow_dates<def_stmt>test_snow_dates <block_start>"""Test module snow_dates.py by downloading snow_dates.csv and testing shape of extracted data has 44 rows and 3 columns """<line_sep>test_path=tempfile.mkdtemp()<line_sep>x_train,metadata=snow_dates(test_path)<try_stmt><block_start><assert_stmt>x_train.shape<eq>(44 3)<block_end><except_stmt><block_start>shutil.rmtree(test_path)<line_sep><raise>()<block_end><block_end>
<import_from_stmt>monk.tf_keras_1.finetune.imports *<import_from_stmt>monk.system.imports *<import_from_stmt>monk.tf_keras_1.finetune.level_12_losses_main prototype_losses<class_stmt>prototype_updates(prototype_losses)<block_start>''' Main class for all parametric update functions Args: verbose (int): Set verbosity levels 0 - Print Nothing 1 - Print desired details '''<line_sep>@accepts("self" verbose=int post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>__init__ self verbose=1<block_start>super().__init__(verbose=verbose)<line_sep><block_end>########################################################################################################################################################## @warning_checks(<none> ["gte" 32 "lte" 1024] post_trace=<false>)@error_checks(<none> ["gt" 0] post_trace=<false>)@accepts("self" int post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_input_size self input_size<block_start>''' Update input size. Args: input_size (int): New input size Returns: None '''<line_sep>self.system_dict=set_input_size(input_size self.system_dict)<line_sep>self.custom_print("Update: Input size - {}".format(self.system_dict["dataset"]["params"]["input_size"]))<line_sep>self.custom_print("")<line_sep><block_end>@warning_checks(<none> ["lte" 128] post_trace=<false>)@error_checks(<none> ["gt" 0] post_trace=<false>)@accepts("self" int post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_batch_size self batch_size<block_start>''' Update batch size. Args: batch_size (int): New batch size Returns: None '''<line_sep>self.system_dict=set_batch_size(batch_size self.system_dict)<line_sep>self.custom_print("Update: Batch size - {}".format(self.system_dict["dataset"]["params"]["batch_size"]))<line_sep>self.custom_print("")<line_sep><block_end>@accepts("self" bool post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_shuffle_data self shuffle<block_start>''' Update to shuffle data or not. Args: shuffle (bool): If True, will shuffle data Returns: None '''<line_sep>self.system_dict=set_data_shuffle(shuffle self.system_dict)<line_sep>self.custom_print("Update: Data shuffle - {}".format(self.system_dict["dataset"]["params"]["train_shuffle"]))<line_sep>self.custom_print("")<line_sep><block_end>@warning_checks(<none> ["lte" psutil.cpu_count()] post_trace=<false>)@error_checks(<none> ["gt" 0] post_trace=<false>)@accepts("self" int post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_num_processors self num_processors<block_start>''' Update num processors for data loader. Args: num_processors (int): Max CPUs for data sampling Returns: None '''<line_sep>self.system_dict=set_num_processors(num_processors self.system_dict)<line_sep>self.custom_print("Update: Num processors - {}".format(self.system_dict["dataset"]["params"]["num_workers"]))<line_sep>self.custom_print("")<line_sep><block_end>@accepts("self" bool post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_weighted_sampling self sample<block_start>''' Function inactive '''<line_sep>self.system_dict=set_weighted_sampling(sample self.system_dict)<line_sep>self.custom_print("Update: Weighted Sampling - {}".format(self.system_dict["dataset"]["params"]["weighted_sample"]))<line_sep>self.custom_print("")<line_sep><block_end>@warning_checks(<none> ["gt" 0.5 "lt" 1] post_trace=<false>)@error_checks(<none> ["gt" 0 "lt" 1] post_trace=<false>)@accepts("self" float post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_trainval_split self value<block_start>''' Update training-validation split Args: split (float): Indicating train validation split Division happens as follows: train - total dataset * split * 100 val - total dataset * (1-split) * 100 Returns: None '''<if_stmt>(self.system_dict["dataset"]["dataset_type"]<eq>"train")<block_start>dataset_path=self.system_dict["dataset"]["train_path"]<line_sep>path_to_csv=<false><block_end><elif_stmt>(self.system_dict["dataset"]["dataset_type"]<eq>"train-val")<block_start>dataset_path=[self.system_dict["dataset"]["train_path"] self.system_dict["dataset"]["val_path"]]<line_sep>path_to_csv=<false><block_end><elif_stmt>(self.system_dict["dataset"]["dataset_type"]<eq>"csv_train")<block_start>dataset_path=self.system_dict["dataset"]["train_path"]<line_sep>path_to_csv=self.system_dict["dataset"]["csv_train"]<block_end><elif_stmt>(self.system_dict["dataset"]["dataset_type"]<eq>"csv_train-val")<block_start>dataset_path=[self.system_dict["dataset"]["train_path"] self.system_dict["dataset"]["val_path"]]<line_sep>path_to_csv=[self.system_dict["dataset"]["csv_train"] self.system_dict["dataset"]["csv_val"]]<block_end><else_stmt><block_start>msg="Dataset Type invalid.\n"<line_sep>msg<augadd>"Cannot update split"<line_sep>ConstraintsWarning(msg)<block_end>self.system_dict=set_dataset_train_path(self.system_dict dataset_path value path_to_csv self.system_dict["dataset"]["params"]["delimiter"])<line_sep><block_end>@warning_checks(<none> dataset_path=<none> split=["gt" 0.5 "lt" 1] path_to_csv=<none> delimiter=<none> post_trace=<false>)@error_checks(<none> dataset_path=["folder" 'r'] split=["gt" 0 "lt" 1] path_to_csv=["file" 'r'] delimiter=["in" ["," ";" "-" " "]] post_trace=<false>)@accepts("self" dataset_path=[str list] split=float path_to_csv=[str list bool] delimiter=str post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_dataset self dataset_path=<false> split=0.9 path_to_csv=<false> delimiter=","<block_start>''' Update dataset path Args: dataset_path (str, list): Path to Dataset folder 1) Single string if validation data does not exist 2) List [train_path, val_path] in case of separate train and val data path_to_csv (str, list): Path to csv file pointing towards images 1) Single string if validation data does not exist 2) List [train_path, val_path] in case of separate train and val data value (float): Indicating train validation split Division happens as follows: train - total dataset * split * 100 val - total dataset * (1-split) * 100 delimiter (str): Delimiter for csv file Returns: None '''<line_sep>self.system_dict=set_dataset_train_path(self.system_dict dataset_path split path_to_csv delimiter)<line_sep><block_end>########################################################################################################################################################## ########################################################################################################################################################## @accepts("self" str force=bool post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_model_name self model_name force=<false><block_start>''' Update model name Args: model_name (str): Select from available models. Check via List_Models() function force (bool): Dummy function Returns: None '''<if_stmt>(<not>force)<block_start><if_stmt>(self.system_dict["training"]["status"])<block_start>ConstraintWarning("Model trained using {}\n".format(self.system_dict["model"]["params"]["model_name"]))<line_sep>ConstraintWarning("Changing the model will overwrite previously trained models if training is executed.\n")<line_sep>inp=input("Do you wish to continue further (y/n):")<if_stmt>(inp<eq>"y")<block_start>self.system_dict=set_model_name(model_name self.system_dict)<line_sep>self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]))<line_sep>self.custom_print("")<block_end><else_stmt><block_start>self.custom_print("Model not updated.")<line_sep>self.custom_print("")<block_end><block_end><else_stmt><block_start>self.system_dict=set_model_name(model_name self.system_dict)<line_sep>self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]))<line_sep>self.custom_print("")<block_end><block_end><else_stmt><block_start>self.system_dict=set_model_name(model_name self.system_dict)<line_sep>self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]))<line_sep>self.custom_print("")<line_sep><block_end><block_end>########################################################################################################################################################## ########################################################################################################################################################## @accepts("self" [str list] force=bool post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_model_path self model_path force=<false><block_start>''' Update to use gpu or cpu Args: gpu (bool): If True, then use GPU Returns: None '''<if_stmt>(<not>force)<block_start><if_stmt>(self.system_dict["training"]["status"])<block_start>ConstraintWarning("Model trained using {}\n".format(self.system_dict["model"]["params"]["model_name"]))<line_sep>ConstraintWarning("Changing the model will overwrite previously trained models if training is executed.\n")<line_sep>inp=input("Do you wish to continue further (y/n):")<if_stmt>(inp<eq>"y")<block_start>self.system_dict=set_model_path(model_path self.system_dict)<line_sep>self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]))<line_sep>self.custom_print("")<block_end><else_stmt><block_start>self.custom_print("Model not updated.")<line_sep>self.custom_print("")<block_end><block_end><else_stmt><block_start>self.system_dict=set_model_path(model_path self.system_dict)<line_sep>self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]))<line_sep>self.custom_print("")<block_end><block_end><else_stmt><block_start>self.system_dict=set_model_path(model_path self.system_dict)<line_sep>self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]))<line_sep>self.custom_print("")<line_sep><block_end><block_end>########################################################################################################################################################## ########################################################################################################################################################## @accepts("self" bool post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_use_gpu self gpu<block_start>''' Update to use gpu or cpu Args: gpu (bool): If True, then use GPU Returns: None '''<line_sep>self.system_dict=set_device(gpu self.system_dict)<line_sep>self.custom_print("Update: Use Gpu - {}".format(self.system_dict["model"]["params"]["use_gpu"]))<line_sep>self.custom_print("")<line_sep><block_end>########################################################################################################################################################## ########################################################################################################################################################## @accepts("self" bool post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_use_pretrained self pretrained<block_start>''' Update to use pretrained wights or randomly initialized weights Args: pretrained (bool): If True, use pretrained weights else, use randomly initialized weights Returns: None '''<line_sep>self.system_dict=set_pretrained(pretrained self.system_dict)<line_sep>self.custom_print("Update: Use pretrained - {}".format(self.system_dict["model"]["params"]["use_pretrained"]))<line_sep>self.custom_print("")<line_sep><block_end>########################################################################################################################################################## ########################################################################################################################################################## @accepts("self" bool post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_freeze_base_network self freeze<block_start>''' Update whether freeze base network or not Args: freeze (bool): If True, then base network is non-trainable, works as a feature extractor Returns: None '''<line_sep>self.system_dict=set_freeze_base_network(freeze self.system_dict)<line_sep>self.custom_print("Update: Freeze Base Network - {}".format(self.system_dict["model"]["params"]["freeze_base_network"]))<line_sep>self.custom_print("")<line_sep><block_end>########################################################################################################################################################## ########################################################################################################################################################## @error_checks(<none> ["gte" 0] post_trace=<false>)@accepts("self" int post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_freeze_layers self num_freeze<block_start>''' Update to freeze certain layers in the network Args: num_freeze (int): Number of layers to freeze in network starting from top Returns: None '''<line_sep>self.system_dict["model"]["params"]["num_freeze"]=num_freeze<line_sep>self.custom_print("Update: Freeze layers - {}".format(self.system_dict["model"]["params"]["num_freeze"]))<line_sep>self.custom_print("")<line_sep><block_end>########################################################################################################################################################## ########################################################################################################################################################## @warning_checks(<none> ["lt" 100] post_trace=<false>)@error_checks(<none> ["gt" 0] post_trace=<false>)@accepts("self" int post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_num_epochs self num_epochs<block_start>''' Update number of epochs to train the network Args: num_epochs (int): New number of epochs Returns: None '''<line_sep>self.system_dict=set_num_epochs(num_epochs self.system_dict)<line_sep>self.custom_print("Update: Num Epochs - {}".format(self.system_dict["hyper-parameters"]["num_epochs"]))<line_sep>self.custom_print("")<line_sep><block_end>########################################################################################################################################################## ########################################################################################################################################################## @warning_checks(<none> ["lt" 1] post_trace=<false>)@error_checks(<none> ["gt" 0] post_trace=<false>)@accepts("self" [int float] post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_learning_rate self learning_rate<block_start>''' Update base learning rate for training Args: learning_rate (float): New base learning rate Returns: None '''<line_sep>self.system_dict["hyper-parameters"]["learning_rate"]=learning_rate<line_sep>self.system_dict["hyper-parameters"]["optimizer"]["params"]["lr"]=learning_rate<line_sep>self.custom_print("Update: Learning Rate - {}".format(self.system_dict["hyper-parameters"]["learning_rate"]))<line_sep>self.custom_print("")<line_sep><block_end>########################################################################################################################################################## ########################################################################################################################################################## @accepts("self" bool post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_display_progress_realtime self value<block_start>''' Update display progress param Args: value (bool): If True, then real time progress is displayed Returns: None '''<line_sep>self.system_dict=set_display_progress_realtime(value self.system_dict)<line_sep>self.custom_print("Update: Display progress realtime - {}".format(self.system_dict["training"]["settings"]["display_progress_realtime"]))<line_sep>self.custom_print("")<line_sep><block_end>########################################################################################################################################################## ########################################################################################################################################################## @accepts("self" bool post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_display_progress self value<block_start>''' Update display progress param Args: value (bool): If True, then per epoch progress is displayed Returns: None '''<line_sep>self.system_dict=set_display_progress(value self.system_dict)<line_sep>self.custom_print("Update: Display progress - {}".format(self.system_dict["training"]["settings"]["display_progress"]))<line_sep>self.custom_print("")<line_sep><block_end>########################################################################################################################################################## ########################################################################################################################################################## @error_checks(<none> <none> prefix=["name" ["A-Z" "a-z" "0-9" "-" "_"]] post_trace=<false>)@accepts("self" bool prefix=str post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_save_intermediate_models self value prefix="intermediate_model_"<block_start>''' Update whether to save intermediate models or not Args: value (bool): If True, saves model weight post every epoch prefix (str): Appends a prefix to intermediate weights Returns: None '''<if_stmt>(value)<block_start><if_stmt>(<not>os.access(self.system_dict["model_dir"] os.W_OK))<block_start>msg="Folder \"{}\" has no read access".format(self.system_dict["model_dir"])<line_sep>msg<augadd>"Cannot save Intermediate models"<line_sep><raise>ConstraintError(msg)<line_sep><block_end><block_end>self.system_dict=set_save_intermediate_models(value self.system_dict)<line_sep>self.system_dict=set_intermediate_model_prefix(prefix self.system_dict)<line_sep>self.custom_print("Update: Save Intermediate models - {}".format(self.system_dict["training"]["settings"]["save_intermediate_models"]))<if_stmt>(self.system_dict["training"]["settings"]["save_intermediate_models"])<block_start>self.custom_print("Update: Intermediate model prefix - {}".format(self.system_dict["training"]["settings"]["intermediate_model_prefix"]))<line_sep><block_end>self.custom_print("")<line_sep><block_end>########################################################################################################################################################## ########################################################################################################################################################## @accepts("self" bool post_trace=<false>)#@TraceFunction(trace_args=True, trace_rv=True) <def_stmt>update_save_training_logs self value<block_start>''' Update whether to save training logs or not Args: value (bool): If True, saves all training and validation metrics. Required for comparison. Returns: None '''<line_sep>self.system_dict=set_save_training_logs(value self.system_dict)<line_sep>self.custom_print("Update: Save Training logs - {}".format(self.system_dict["training"]["settings"]["save_training_logs"]))<line_sep>self.custom_print("")<line_sep><block_end>########################################################################################################################################################## <block_end>
#coding:utf-8 <import_from_stmt>flask render_template redirect request url_for current_app flash Markup<import_from_stmt>flask_babelex lazy_gettext<as>_<import_from_stmt>flask_login current_user<import_from_stmt>datetime datetime timedelta date<import_from_stmt>. report<import_from_stmt>.forms WriteForm ReadDepartmentForm ReadCrewForm EmailReminderForm<import_from_stmt>.. db<import_from_stmt>..email send_email<import_from_stmt>..models Permission User Report Department<import_from_stmt>..utils get_week_count permission_required get_this_monday get_last_week get_last_week_start_at get_last_week_end_at get_last_week_content<line_sep>@report.route('/write/' methods=['GET' 'POST'])@permission_required(Permission.WRITE_REPORT)<def_stmt>write <block_start>form=WriteForm()<line_sep>last_content_display=""<line_sep>report=Report.query.filter_by(author_id=current_user.id week_count=get_week_count() year=datetime.today().year).first()<line_sep>last_report=Report.query.filter_by(author_id=current_user.id week_count=get_week_count()-1 year=datetime.today().year).first()<if_stmt>form.submit.data<and>form.validate_on_submit()<block_start><if_stmt>report<block_start>report.content=form.body.data.replace('<br>' '')<line_sep>report.last_content=form.last_content.data.replace('<br>' '')<line_sep>db.session.add(report)<block_end><else_stmt><block_start>report=Report(content=form.body.data.replace('<br>' '') last_content=form.last_content.data.replace('<br>' '') author_id=current_user.id week_count=get_week_count() year=datetime.today().year)<line_sep>db.session.add(report)<block_end>db.session.commit()<line_sep>flash(_('Successfully submitted report'))<line_sep>current_app.logger.info('{} submitted report'.format(current_user.email))<line_sep><return>redirect(url_for('report.write'))<block_end><if_stmt>report<block_start>form.body.data=report.content<block_end><else_stmt><block_start>form.body.data=current_app.config['DEFAULT_CONTENT']<block_end><if_stmt>last_report<block_start>form.last_content.data=last_report.content<line_sep>last_content_display=get_last_week_content(last_report.content)<block_end><return>render_template('report/write.html' form=form week_count=get_week_count() start_at=get_this_monday() end_at=get_this_monday()+timedelta(days=6) last_content_display=last_content_display)<block_end>@report.route('/write/last_week' methods=['GET' 'POST'])@permission_required(Permission.WRITE_REPORT)<def_stmt>write_last_week <block_start>form=WriteForm()<line_sep>last_content_display=""<line_sep>report=Report.query.filter_by(author_id=current_user.id week_count=get_week_count(get_last_week()) year=get_last_week().year).first()<line_sep>last_report=Report.query.filter_by(author_id=current_user.id week_count=get_week_count(get_last_week())-1 year=get_last_week().year).first()<if_stmt>form.submit.data<and>form.validate_on_submit()<block_start><if_stmt>report<block_start>report.content=form.body.data.replace('<br>' '')<line_sep>report.last_content=form.last_content.data.replace('<br>' '')<block_end><else_stmt><block_start>report=Report(content=form.body.data.replace('<br>' '') author_id=current_user.id week_count=get_week_count(get_last_week()) year=get_last_week().year)<block_end>db.session.add(report)<line_sep>db.session.commit()<line_sep>flash(_('Successfully submitted report'))<line_sep>current_app.logger.info("{} edited last week's report".format(current_user.email))<line_sep><return>redirect(url_for('report.write_last_week'))<block_end><if_stmt>report<block_start>form.body.data=report.content<block_end><else_stmt><block_start>form.body.data=current_app.config['DEFAULT_CONTENT']<block_end><if_stmt>last_report<block_start>form.last_content.data=last_report.content<line_sep>last_content_display=get_last_week_content(last_report.content)<block_end><return>render_template('report/write.html' form=form week_count=get_week_count(get_last_week()) start_at=get_last_week_start_at() end_at=get_last_week_end_at()-timedelta(days=1) last_content_display=last_content_display)<block_end>@report.route('/read/' methods=['GET'])@report.route('/read/<int:page_count>' methods=['GET'])@permission_required(Permission.WRITE_REPORT)<def_stmt>read page_count=1<block_start><if_stmt><not>Report.query.filter_by(author_id=current_user.id week_count=get_week_count(get_last_week()) year=get_last_week().year).first()<block_start>flash(Markup(_("Do you want to <a href='/report/write/last_week'>"<concat>"edit last week's report?</a>")))<block_end>pagination=Report.query.filter_by(author_id=current_user.id).order_by(Report.year.desc()).order_by(Report.week_count.desc()).paginate(page=page_count per_page=current_app.config['PER_PAGE'])<if_stmt><not>Report.query.filter_by(author_id=current_user.id week_count=get_week_count() year=datetime.today().year)<block_start>flash(_("You haven't submitted your weekly report"))<block_end><return>render_template('report/read.html' pagination=pagination)<block_end>@report.route('/read/department/' methods=['GET' 'POST'])@permission_required(Permission.READ_DEPARTMENT_REPORT)<def_stmt>read_department <block_start>form=ReadDepartmentForm()<line_sep>user_choices=[('0' '*')]<line_sep>user_choices.extend([(str(user.id) user.username)<for>user User.query.all()])<line_sep>form.user.choices=user_choices<line_sep>page=request.args.get('page' 1 type=int)<line_sep>user_id=request.args.get('user' 0 type=int)<line_sep>start_at=request.args.get('start_at' '' type=str)<line_sep>end_at=request.args.get('end_at' '' type=str)<line_sep>start_at=get_last_week_start_at()<if><not>start_at<else>datetime.strptime(start_at[:10] '%Y-%m-%d')<line_sep>end_at=date.today()+timedelta(hours=24)<if><not>end_at<else>datetime.strptime(end_at[:10] '%Y-%m-%d')<line_sep>form.start_at.data=start_at<line_sep>form.end_at.data=end_at<line_sep>form.user.data=str(user_id)<line_sep>ids=[user.id<for>user User.query.filter_by(department_id=current_user.department_id)]<line_sep>qst=Report.query.filter_by().filter(Report.created_at.between(start_at end_at)).filter(Report.author_id.in_(ids))<if_stmt>user_id<block_start>qst=qst.filter_by(author_id=user_id)<block_end><if_stmt>form.validate_on_submit()<block_start><pass><block_end>pagination=qst.filter_by().order_by(Report.year.desc()).order_by(Report.week_count.desc()).order_by(Report.created_at.desc()).paginate(page=page per_page=current_app.config['PER_PAGE'])<line_sep><return>render_template('report/read_department.html' form=form pagination=pagination)<block_end>@report.route('/read/crew/' methods=['GET' 'POST'])@permission_required(Permission.READ_ALL_REPORT)<def_stmt>read_crew <block_start>form=ReadCrewForm()<line_sep>user_choices=[('0' '*')]<line_sep>department_choices=user_choices[:]<for_stmt>dept Department.query.all()<block_start>department_choices.extend([(str(dept.id) dept.name)])<line_sep>user_choices.extend([(str(user.id) user.username)<for>user User.query.filter_by(department_id=dept.id)])<block_end>form.user.choices=user_choices<line_sep>form.department.choices=department_choices<line_sep>page=request.args.get('page' 1 type=int)<line_sep>department_id=request.args.get('department' 0 type=int)<line_sep>user_id=request.args.get('user' 0 type=int)<line_sep>start_at=request.args.get('start_at' '' type=str)<line_sep>end_at=request.args.get('end_at' '' type=str)<line_sep>start_at=get_last_week_start_at()<if><not>start_at<else>datetime.strptime(start_at[:10] '%Y-%m-%d')<line_sep>end_at=date.today()+timedelta(hours=24)<if><not>end_at<else>datetime.strptime(end_at[:10] '%Y-%m-%d')<line_sep>form.start_at.data=start_at<line_sep>form.end_at.data=end_at<line_sep>form.user.data=str(user_id)<line_sep>form.department.data=str(department_id)<line_sep>qst=Report.query.filter_by().filter(Report.created_at.between(start_at end_at))<if_stmt>department_id<block_start>ids=[user.id<for>user User.query.filter_by(department_id=department_id)]<line_sep>qst=qst.filter(Report.author_id.in_(ids))<block_end><if_stmt>user_id<block_start>qst=qst.filter_by(author_id=user_id)<block_end><if_stmt>form.validate_on_submit()<block_start><pass><block_end>pagination=qst.filter_by().order_by(Report.year.desc()).order_by(Report.week_count.desc()).order_by(Report.created_at.desc()).paginate(page=page per_page=current_app.config['PER_PAGE'])<line_sep><return>render_template('report/read_crew.html' form=form pagination=pagination)<block_end>@report.route('/statistics/department/' methods=['GET'])@permission_required(Permission.READ_DEPARTMENT_REPORT)<def_stmt>statistics_department <block_start>qst=Report.query.filter_by()<line_sep>dept_users=[user<for>user User.query.filter_by(department_id=current_user.department_id)<if><not>user.is_ignored]<line_sep>ids=[user.id<for>user dept_users]<if_stmt>ids<block_start>qst=qst.filter(Report.author_id.in_(ids))<block_end><else_stmt><block_start>qst=qst.filter(<false>)<block_end>submitted_users=[report.author<for>report qst.filter_by(week_count=get_week_count() year=datetime.today().year)]<line_sep>unsubmitted_users=set(dept_users)-set(submitted_users)<line_sep>data={'已交':len(submitted_users) '未交':len(unsubmitted_users)}<line_sep>names={'has_submitted':[user.username<for>user submitted_users] 'not_yet':[user.username<for>user unsubmitted_users]}<line_sep><return>render_template('report/statistics_department.html' data=data names=names week_count=get_week_count() start_at=get_this_monday() end_at=get_this_monday()+timedelta(days=6))<block_end>@report.route('/statistics/department/last_week' methods=['GET'])@permission_required(Permission.READ_DEPARTMENT_REPORT)<def_stmt>statistics_department_last_week <block_start>qst=Report.query.filter_by()<line_sep>dept_users=[user<for>user User.query.filter_by(department_id=current_user.department_id)<if><not>user.is_ignored]<line_sep>ids=[user.id<for>user dept_users]<if_stmt>ids<block_start>qst=qst.filter(Report.author_id.in_(ids))<block_end><else_stmt><block_start>qst=qst.filter(<false>)<block_end>submitted_users=[report.author<for>report qst.filter_by(week_count=get_week_count(get_last_week()) year=get_last_week().year)]<line_sep>unsubmitted_users=set(dept_users)-set(submitted_users)<line_sep>data={'已交':len(submitted_users) '未交':len(unsubmitted_users)}<line_sep>names={'has_submitted':[user.username<for>user submitted_users] 'not_yet':[user.username<for>user unsubmitted_users]}<line_sep><return>render_template('report/statistics_department.html' data=data names=names week_count=get_week_count(get_last_week()) start_at=get_last_week_start_at() end_at=get_last_week_end_at()-timedelta(days=1))<block_end>@report.route('/statistics/crew/' methods=['GET' 'POST'])@permission_required(Permission.READ_ALL_REPORT)<def_stmt>statistics_crew <block_start>stash=[]<line_sep>contrast={}<line_sep>reminder_emails=set()<line_sep>form=EmailReminderForm()<for_stmt>dept Department.query.filter_by()<block_start>qst=Report.query.filter_by()<line_sep>dept_users=[user<for>user User.query.filter_by(department_id=dept.id)<if><not>user.is_ignored]<line_sep>ids=[user.id<for>user dept_users]<if_stmt>ids<block_start>qst=qst.filter(Report.author_id.in_(ids))<block_end><else_stmt><block_start>qst=qst.filter(<false>)<block_end>submitted_users=[report.author<for>report qst.filter_by(week_count=get_week_count() year=datetime.today().year)]<line_sep>unsubmitted_users=set(dept_users)-set(submitted_users)<line_sep>reminder_emails<augor>set([user.email<for>user unsubmitted_users])<line_sep>names={'has_submitted':[user.username<for>user submitted_users] 'not_yet':[user.username<for>user unsubmitted_users]}<line_sep>stash.append({'names':names 'dept_name':dept.name})<line_sep>contrast[dept.name]=len(dept_users)-len(submitted_users)<block_end><if_stmt>form.validate_on_submit()<block_start>subject='Reminder of Report of week'+str(get_week_count())+' From:'+str(get_this_monday())+' To:'+str(get_this_monday()+timedelta(days=6))<line_sep>send_email(reminder_emails subject 'email/reminder' user=current_user week_count=get_week_count() start_at=get_this_monday() end_at=get_this_monday()+timedelta(days=6))<line_sep>flash(_('Email has been sent to:')+'\n{}'.format(reminder_emails))<block_end><return>render_template('report/statistics_crew.html' contrast=contrast stash=stash week_count=get_week_count() form=form start_at=get_this_monday() end_at=get_this_monday()+timedelta(days=6))<block_end>@report.route('/statistics/crew/last_week' methods=['GET' 'POST'])@permission_required(Permission.READ_ALL_REPORT)<def_stmt>statistics_crew_last_week <block_start>stash=[]<line_sep>contrast={}<line_sep>reminder_emails=set()<line_sep>form=EmailReminderForm()<for_stmt>dept Department.query.filter_by()<block_start>qst=Report.query.filter_by()<line_sep>dept_users=[user<for>user User.query.filter_by(department_id=dept.id)<if><not>user.is_ignored]<line_sep>ids=[user.id<for>user dept_users]<if_stmt>ids<block_start>qst=qst.filter(Report.author_id.in_(ids))<block_end><else_stmt><block_start>qst=qst.filter(<false>)<block_end>submitted_users=[report.author<for>report qst.filter_by(week_count=get_week_count(get_last_week()) year=get_last_week().year)]<line_sep>unsubmitted_users=set(dept_users)-set(submitted_users)<line_sep>reminder_emails<augor>set([user.email<for>user unsubmitted_users])<line_sep>names={'has_submitted':[user.username<for>user submitted_users] 'not_yet':[user.username<for>user unsubmitted_users]}<line_sep>stash.append({'names':names 'dept_name':dept.name})<line_sep>contrast[dept.name]=len(dept_users)-len(submitted_users)<block_end><if_stmt>form.validate_on_submit()<block_start>subject='Reminder of Report of week'+str(get_week_count(get_last_week()))+' From:'+str(get_last_week_start_at())+' To:'+str(get_last_week_end_at()-timedelta(days=1))<line_sep>send_email(reminder_emails subject 'email/reminder' user=current_user week_count=get_week_count(get_last_week()) start_at=get_last_week_start_at() end_at=get_last_week_end_at()-timedelta(days=1))<line_sep>flash(_('Email has been sent to:')+'\n{}'.format(reminder_emails))<block_end><return>render_template('report/statistics_crew.html' contrast=contrast stash=stash form=form week_count=get_week_count(get_last_week()) start_at=get_last_week_start_at() end_at=get_last_week_end_at()-timedelta(days=1))<block_end>
# -*- coding: utf-8 -*- # Copyright 2015-2019 grafana-dashboard-builder contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_future_stmt> unicode_literals<import_stmt>base64<import_stmt>json<import_stmt>logging<try_stmt><block_start><import_from_stmt>cookielib CookieJar<block_end><except_stmt>ImportError<block_start><import_from_stmt>http.cookiejar CookieJar<block_end><try_stmt><block_start><import_from_stmt>urllib2 build_opener HTTPHandler HTTPSHandler HTTPCookieProcessor HTTPDefaultErrorHandler Request BaseHandler<block_end><except_stmt>ImportError<block_start><import_from_stmt>urllib.request build_opener HTTPHandler HTTPSHandler HTTPCookieProcessor HTTPDefaultErrorHandler Request BaseHandler<block_end><try_stmt><block_start><import_from_stmt>urlparse urlparse<block_end><except_stmt>ImportError<block_start><import_from_stmt>urllib.parse urlparse<block_end><import_stmt>requests<import_from_stmt>requests_kerberos HTTPKerberosAuth<line_sep>__author__='<NAME> <<EMAIL>>'<line_sep>logger=logging.getLogger(__name__)<class_stmt>BaseConnection(object)<block_start>_headers={'Content-type':'application/json' 'Accept':'application/json'}<def_stmt>__init__ self host auth_header debug=0<block_start>self._host=host<line_sep>self._headers['Authorization']=auth_header<line_sep>self._opener=build_opener(HTTPHandler(debuglevel=debug) HTTPSHandler(debuglevel=debug) HTTPCookieProcessor(CookieJar()) LoggingHandler() HTTPDefaultErrorHandler())<block_end><def_stmt>make_request self uri body=<none><block_start>request=Request('{0}{1}'.format(self._host uri) json.dumps(body).encode('utf-8')<if>body<else><none> headers=self._headers)<line_sep>response_body=self._opener.open(request).read()<line_sep><return>{}<if>(response_body<is><none><or>response_body<eq>'')<else>json.loads(response_body)<block_end><block_end><class_stmt>BasicAuthConnection(BaseConnection)<block_start><def_stmt>__init__ self username password host debug=0<block_start>logger.debug('Creating new connection with username=%s host=%s' username host)<line_sep>base64string=base64.encodestring(('%s:%s'%(username password)).encode('utf-8')).replace(b'\n' b'')<line_sep>super(BasicAuthConnection self).__init__(host b'Basic '+base64string debug)<block_end><block_end><class_stmt>BearerAuthConnection(BaseConnection)<block_start><def_stmt>__init__ self token host debug=0<block_start>logger.debug('Creating new connection with token=%s host=%s' token[:5] host)<line_sep>super(BearerAuthConnection self).__init__(host 'Bearer %s'%token.strip() debug)<block_end><block_end><class_stmt>LoggingHandler(BaseHandler)<block_start><def_stmt>__init__ self<block_start><pass><block_end># noinspection PyMethodMayBeStatic <def_stmt>http_request self request<block_start>path=urlparse(request.get_full_url()).path<line_sep>logger.debug('Sending request: method=%s uri=%s' request.get_method() path)<line_sep><return>request<block_end># noinspection PyMethodMayBeStatic,PyUnusedLocal <def_stmt>http_response self request response<block_start>logger.debug('Response received: status=%s msg=%s' response.getcode() response.msg)<line_sep><return>response<block_end>https_request=http_request<line_sep>https_response=http_response<block_end><class_stmt>KerberosConnection(object)<block_start><def_stmt>__init__ self host<block_start>logger.debug('Creating new kerberos connection with host=%s' host)<line_sep>self._host=host<block_end><def_stmt>make_request self uri body=<none><block_start>response=requests.post('{0}{1}'.format(self._host uri) json=body auth=HTTPKerberosAuth() verify=<false>)<line_sep><return>response.json()<block_end><block_end>
# -*- coding: utf-8 -*- """ walle-web :copyright: © 2015-2019 walle-web.io :created time: 2018-11-26 16:06:44 :author: <EMAIL> """<import_from_stmt>datetime datetime<import_from_stmt>sqlalchemy String Integer DateTime<import_from_stmt>walle.model.database SurrogatePK<import_from_stmt>walle.model.database db Model<import_from_stmt>walle.model.user UserModel<import_from_stmt>walle.service.extensions permission<import_from_stmt>walle.service.rbac.role *<class_stmt>RoleModel(object)<block_start>_role_super='SUPER'<line_sep>_role_owner='OWNER'<line_sep>_role_master='MASTER'<line_sep>_role_developer='DEVELOPER'<line_sep>_role_reporter='REPORTER'<line_sep>@classmethod<def_stmt>list cls<block_start>roles=[{'id':cls._role_super 'name':'超级管理员'} {'id':cls._role_owner 'name':'空间所有者'} {'id':cls._role_master 'name':'项目管理员'} {'id':cls._role_developer 'name':'开发者'} {'id':cls._role_reporter 'name':'访客'} ]<line_sep><return>roles len(roles)<block_end>@classmethod<def_stmt>item cls role_id<block_start><return><none><block_end>@classmethod<def_stmt>menu_url cls url<block_start><if_stmt>url<eq>'/'<block_start><return>url<block_end>prefix='admin'<if>current_user.role<eq>SUPER<else>session['space_info']['name']<line_sep><return>'/'+prefix+url<block_end><block_end>
# Youtube Trending Feed Reader # Written by XZANATOL <import_from_stmt>optparse OptionParser<import_from_stmt>pymongo MongoClient<import_stmt>pandas<as>pd<import_stmt>sys<line_sep># Help menu usage=""" <Script> [Options] [Options] -h, --help Shows this help message and exit -c, --csv Reads data from "Youtube.csv" file -m, --mongo Reads data from MongoDB """<line_sep># Load args parser=OptionParser()<line_sep>parser.add_option("-c" "--csv" action="store_true" dest="csv" help="Saves extracted contents to a CSV file.")<line_sep>parser.add_option("-m" "--mongo" action="store_true" dest="mongo" help="Saves extracted contents to a MongoDB.")<def_stmt>read_mongo # Connect to service <block_start>client=MongoClient("127.0.0.1")<line_sep># Create an object db=client.Youtube.trending<line_sep><return>db.find()<block_end># Return all values <def_stmt>read_csv # read databse <block_start>df=pd.read_csv("Youtube.csv")<line_sep>data=[]<for_stmt>index,row df.iterrows()<block_start>data.append(row)# Append each dictionary to the list <block_end><return>data<block_end># Return all values <def_stmt>display data<block_start>i=0<for_stmt>card data# For every 10 cards print section <block_start><if_stmt>i%10<eq>0<block_start>c=input("Show Section? [y/n] > ")<if_stmt>c.lower()<eq>"y"<block_start>print("***********************************")<line_sep>print(f"""{card["section"]} section""")<line_sep>print("***********************************")<block_end><else_stmt><block_start>sys.exit()# If had enough of reading <block_end><block_end>i<augadd>1# Increament print("Title:" card["title"])<line_sep>print("Link:" card["link"])<line_sep>print("Channel:" card["channel"])<line_sep>print("Views:" card["views"])<line_sep>print("Time:" card["date"])<line_sep>print("==============================================")<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>(options args)=parser.parse_args()<line_sep># Flags csv=options.csv<line_sep>mongo=options.mongo<line_sep># Validate flags <if_stmt><not>(bool(csv)^bool(mongo))# XNOR Gate <block_start>print(usage)<line_sep>sys.exit()<block_end><if_stmt>mongo<block_start>data=read_mongo()<block_end><else_stmt><block_start>data=read_csv()<block_end>display(data)<block_end>
<import_from_stmt>django.contrib.postgres.fields ArrayField<import_from_stmt>django.db migrations models<def_stmt>convert_reimbursement_numbers_to_array apps schema_editor<block_start>Reimbursement=apps.get_model("chamber_of_deputies" "Reimbursement")<for_stmt>record Reimbursement.objects.all()<block_start>record.numbers=record.reimbursement_numbers.split(", ")<line_sep>record.save()<block_end><block_end><def_stmt>convert_reimbursement_numbers_to_array_rollback apps schema_editor<block_start>Reimbursement=apps.get_model("chamber_of_deputies" "Reimbursement")<for_stmt>record Reimbursement.objects.all()<block_start>record.reimbursement_numbers=", ".join(record.numbers)<line_sep>record.save()<block_end><block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("chamber_of_deputies" "0003_remove_available_in_latest_dataset_field")]<line_sep>operations=[migrations.AlterField(model_name="reimbursement" name="document_id" field=models.IntegerField(db_index=<true>) ) migrations.AlterField(model_name="reimbursement" name="supplier" field=models.CharField(max_length=256) ) migrations.AlterField(model_name="reimbursement" name="issue_date" field=models.DateField(null=<true>) ) migrations.RenameField(model_name="reimbursement" old_name="total_reimbursement_value" new_name="total_value" ) migrations.RenameField(model_name="reimbursement" old_name="subquota_id" new_name="subquota_number" ) migrations.AddField(model_name="reimbursement" name="numbers" field=ArrayField(models.CharField(max_length=128) default=list) ) migrations.RunPython(convert_reimbursement_numbers_to_array convert_reimbursement_numbers_to_array_rollback ) migrations.RemoveField(model_name="reimbursement" name="net_values") migrations.RemoveField(model_name="reimbursement" name="reimbursement_numbers") migrations.RemoveField(model_name="reimbursement" name="reimbursement_values") ]<block_end>
error_map={0:<none> -1:"连接服务失败" -2:"链路认证失败" -3:"主机地址不可用" -4:"发送数据错误" -5:"测试编号不合法" -6:"没准备好测试网络" -7:"当前网络测试还没结束" -8:"没用可用的接入前置" -9:"数据路径不可用" -10:"重复登录" -11:"内部错误" -12:"上一次请求还没有结束" -13:"输入参数非法" -14:"授权码不合法" -15:"授权码超期" -16:"授权码类型不匹配" -17:"API还没有准备好" -18:"UDP端口监听失败" -19:"UDP正在监听" -20:"接口未实现" -21:"每次登陆只允许调用一次" -22:"超过下单频率。" -10000:"输入数据为NULL" -10001:"输入错误的:TAPIYNFLAG" -10002:"输入错误的:TAPILOGLEVEL" -10003:"输入错误的:TAPICommodityType" -10004:"输入错误的:TAPICallOrPutFlagType" -12001:"输入错误的:TAPIAccountType" -12003:"输入错误的:TAPIAccountState" -12004:"输入错误的:TAPIAccountFamilyType" -12005:"输入错误的:TAPIOrderTypeType" -12006:"输入错误的:TAPIOrderSourceType" -12007:"输入错误的:TAPITimeInForceType" -12008:"输入错误的:TAPISideType" -12009:"输入错误的:TAPIPositionEffectType" -12010:"输入错误的:TAPIHedgeFlagType" -12011:"输入错误的:TAPIOrderStateType" -12012:"输入错误的:TAPICalculateModeType" -12013:"输入错误的:TAPIMatchSourceType" -12014:"输入错误的:TAPIOpenCloseModeType" -12015:"输入错误的:TAPIFutureAlgType" -12016:"输入错误的:TAPIOptionAlgType" -12017:"输入错误的:TAPIBankAccountLWFlagType" -12021:"输入错误的:TAPIMarginCalculateModeType" -12022:"输入错误的:TAPIOptionMarginCalculateModeType" -12023:"输入错误的:TAPICmbDirectType" -12024:"输入错误的:TAPIDeliveryModeType" -12025:"输入错误的:TAPIContractTypeType" -12035:"输入错误的:TAPITacticsTypeType" -12036:"输入错误的:TAPIORDERACT" -12041:"输入错误的:TAPITriggerConditionType" -12042:"输入错误的:TAPITriggerPriceTypeType" -12043:"输入错误的:TAPITradingStateType" -12044:"输入错误的:TAPIMarketLevelType" -12045:"输入错误的:TAPIOrderQryTypeType" 1:"主动断开" 2:"被动断开" 3:"读错误" 4:"写错误" 5:"缓冲区满" 6:"异步操作错误" 7:"解析数据错误" 8:"连接超时" 9:"初始化失败" 10:"已经连接" 11:"工作线程已结束" 12:"操作正在进行,请稍后重试" 13:"心跳检测失败" 10001:"登录过程执行错误" 10002:"登录用户不存在" 10003:"需要进行动态认证" 10004:"登录用户未授权" 10005:"登录模块不正确" 10006:"需要强制修改密码" 10007:"登录状态禁止登陆" 10008:"登录密码不正确" 10009:"没有该模块登录权限" 10010:"登录数量超限" 10011:"登录用户不在服务器标识下可登录用户列表中" 10012:"登录用户已被冻结" 10013:"密码错误,用户冻结" 10014:"客户状态不允许登录" 10015:"需要进行二次认证" 10016:<none> 10017:<none> 10018:"登录用户密码超过有效天数" 10101:"登录用户信息查询失败" 11001:"数据库操作失败" 11501:"登录用户下属所有资金账号查询失败" 11701:"登录用户密码修改失败" 11702:"登录用户密码修改失败——原始密码错误" 11703:"登录用户密码修改失败——不能与前n次密码相同" 11704:"新密码不符合密码复杂度要求" 20201:"资金账号信息查询失败" 20701:"客户交易编码查询失败" 22801:"合约信息查询失败" 22901:"特殊期权标的查询失败" 25501:"品种委托类型查询失败" 25601:"品种委托时间有效性查询失败" 28901:"用户下单频率查询失败" 60001:"资金账号不存在" 60002:"资金账号状态不正确" 60003:"资金账号交易中心不一致" 60004:"资金账号无期权交易权限" 60005:"资金账号无品种交易权限" 60006:"资金账号无开仓权限" 60007:"资金账号风控项检查失败" 60011:"下单无效的合约" 60021:"客户权限禁止交易" 60022:"客户品种分组禁止交易" 60023:"客户合约特设禁止交易" 60024:"系统权限禁止交易" 60031:"持仓量超过最大限制" 60032:"下单超过单笔最大量" 60033:"下单合约无交易路由" 60034:"委托价格超出偏离范围" 60035:"超过GiveUp最大持仓量" 60036:"下单自动审核失败" 60037:"LME未准备就绪" 60038:"平仓方式错误" 60039:"下单对应的父账号资金不足" 60040:"互换单的合约格式错误" 60051:"下单资金不足" 60052:"手续费参数错误" 60053:"保证金参数错误" 60061:"撤单无此系统号" 60062:"此状态不允许撤单" 60063:"录单不允许撤单" 60071:"此状态不允许改单" 60072:"人工单不允许改单" 60081:"已删除报单不能转移" 60082:"人工单不允许改单" 60091:"录单重复" 60092:"保证金参数错误" 60100:"操作账号只可查询" 60101:"合约行情价格修改失败" 60102:"即使子帐号又是做市商不能应价" 60103:"下单找不到交易编码" 60104:"操作账号只可开仓" 60105:"操作账号没有上期挂单查询权限" 60106:"限期有效单不能小于当前交易日" 60107:"该编码不允许申请或拆分组合" 60108:"非本服务器标记下的账号不允许操作" 60109:"行权或弃权量超过可用量" 60110:"没有订单审核权限" 60111:"下单超过上手单笔最大量" 60115:"非大连应价单不允许两笔委托量不一致" 60117:"申请不允许重复提交" 60118:"超过账号下单频率限制" 60119:"组合表不存在的组合方向或投保标志" 61001:"订单操作频率过高" 61002:"委托查询返回前不能进行下次查询" 72001:"超过行情最大总订阅数" 72002:"超过该交易所行情最大订阅数" 72101:"没有该行情的订阅权限" 72102:"没有该交易所下行情的订阅权限" 72103:"品种不存在" 72104:"合约可能不存在" 83001:"不支持的行情协议" 14001:"二次验证失败" 14002:"二次验证超时" 11000:"数据库连接失败" 11002:"不允许一对多" 11003:"删除失败-存在关联信息," 11004:"删除分组失败-分组有下属或在操作员下属中" 12001:"登录用户密码修改失败-原始密码错误" 12002:"登录用户密码修改失败-不能与前n次密码相同" 12003:"登录用户密码修改失败-新密码不符合密码复杂度要求" 13001:"一个币种组只能设置一个基币" 13002:"基币只能是美元或港币" 60012:"LME未准备就绪" 60013:"不支持的下单类型" 60014:"错误的埋单类型" 60015:"不合法的委托类型" 60025:"客户权限只可平仓" 60026:"客户合约特设只可平仓" 60027:"系统权限只可平仓" 60028:"只可平仓提前天数限制只可平仓" 60029:"客户品种风控权限禁止交易" 60030:"客户品种风控权限只可平仓" 60041:"未登录网关" 60042:"未找到网关信息" 60054:"总基币资金不足" 60055:"超过保证金额度" 60056:"总基币超过开仓比例限制" 60057:"独立币种组超过开仓比例限制" 60058:"风险阵列参数错误" 60073:"风险报单不允许改单" 60074:"成交量大于改单量" 60075:"预埋单不允许改单" 60112:"下单超过上手最大持仓量" 60121:"开平方式错误" 60122:"委托平仓持仓不足" 60123:"成交平仓失败" 60131:"未找到本地委托" 60132:"与网关断开连接" 60141:"录单成交重复" 60142:"录单成交未找到对应委托" 60143:"录单成交合约不存在" 60144:"录单成交参数错误" 60145:"录单成交委托状态错误" 60151:"成交删除未找到成交" 60152:"此状态成交不可删" 60161:"不允许录入此状态订单" 60162:"错误的修改订单请求" 60163:"订单不可删,存在对应成交" 60164:"不合法的委托状态" 60165:"此状态不允许订单转移" 60166:"订单不允许删除" 60171:"做市商双边撤单未找到委托" 60172:"做市商双边撤单客户不一致" 60173:"做市商双边撤单品种不一致" 60174:"做市商双边撤单合约不一致" 60175:"做市商双边撤单买卖方向相同" 60176:"做市商双边撤单买卖方向错误" 60177:"做市商单边检查未通过" 60181:"埋单激活失败,订单未找到" 60182:"埋单激活失败,非有效状态" 80001:"网关未就绪,未连接上手" 80002:"品种错误" 80003:"合约错误" 80004:"报单字段有误" 80005:"价格不合法" 80006:"数量不合法" 80007:"报单类型不合法" 80008:"委托模式不合法" 80009:"委托不存在(改单、撤单)" 80010:"发送报单失败" 80011:"被上手拒绝" 90001:"前置不允许该模块登录" 90002:"一次请求太多数据" 90003:"前置没有所要数据" 90004:"所查询的操作员信息不存在" 90011:"前置与交易断开" 90012:"前置与管理断开" 90021:"下属资金账号不存在" 90022:"该操作员不允许交易" 90023:"查询频率过快" 90024:"该授权不予许登录" 90025:"自成交验证不通过" -23:"查询频率太快。" -24:"不符合调用条件。" -25:"改单撤单时没有找到对应订单。" -26:"日志路径为空。" -27:"打开日志文件失败" -28:"没有交易员登录权限" -29:"没有订单录入或者成交录入" -30:"没有订单修改和订单删除权限,成交删除权限" -31:"没有订单转移权限" -32:"成交录入时系统号为空" -33:"成交删除时成交号为空。" -34:"成交删除时没有找到对应的成交" -35:"订单修改时客户账号变动。" -36:"订单转移时客户账号没有变动" -37:"修改的电话密码位数不对或者包含特殊字符。" -38:"未绑定的二次认证信息" -39:"二次认证有效期内不能再申请二次认证码" -40:"没有设置客户密码的权限。" -41:"风险保单单客户无法撤销或更改" -42:"改单是客户账号填写与订单客户账号不一致" -11001:"输入错误的:TAPIBucketDateFlag" -11002:"输入错误的:TAPIHisQuoteType" -12002:"输入错误的:TAPIUserTypeType" -12018:"输入错误的:TAPIBankAccountStateType" -12019:"输入错误的:TAPIBankAccountSwapStateType" -12020:"输入错误的:TAPIBankAccountTransferStateType" -12026:"输入错误的:TAPIPartyTypeType" -12027:"输入错误的:TAPIPartyCertificateTypeType" -12028:"输入错误的:TAPIMsgReceiverType" -12029:"输入错误的:TAPIMsgTypeType" -12030:"输入错误的:TAPIMsgLevelType" -12031:"输入错误的:TAPITransferDirectType" -12032:"输入错误的:TAPITransferStateType" -12033:"输入错误的:TAPITransferTypeType" -12034:"输入错误的:TAPITransferDeviceIDType" -12037:"输入错误的:TAPIBillTypeType" -12038:"输入错误的:TAPIBillFileTypeType" -12039:"输入错误的:TAPIOFFFlagType" -12040:"输入错误的:TAPICashAdjustTypeType" -12046:"输入错误的: ClientID,ClientID包含特殊字符。" -13001:"历史行情查询参数不合法" -13002:"价格和数量中包含NAN或者INF不合法的数值" -12047:"输入错误的到期日" -12048:"错误的密码类型" -12049:"错误的结算数据类型" }<line_sep>
<import_from_stmt>rpython.translator.tool.cbuild ExternalCompilationInfo<import_from_stmt>rpython.rtyper.tool rffi_platform<as>platform<import_from_stmt>rpython.rtyper.lltypesystem rffi lltype<import_from_stmt>hippy.tool.platform get_gmake<import_stmt>subprocess<import_stmt>py<line_sep>LIBDIR=py.path.local(__file__).join('..' 'lib' 'whirlpool/')<line_sep>subprocess.check_call([get_gmake() '-C' str(LIBDIR)])<line_sep>eci=ExternalCompilationInfo(includes=['whirlpool.h'] library_dirs=[str(LIBDIR)] libraries=['whirlpool1'] testonly_libraries=['whirlpool'] include_dirs=[str(LIBDIR)])<class_stmt>CConfig<block_start>_compilation_info_=eci<line_sep>WHIRLPOOL_CTX=platform.Struct('WHIRLPOOL_CTX' [])<block_end>globals().update(platform.configure(CConfig))<def_stmt>external name args result<block_start><return>rffi.llexternal(name args result compilation_info=eci releasegil=<false>)<block_end>PTR_WHIRLPOOL_CTX=lltype.Ptr(WHIRLPOOL_CTX)<line_sep>c_WHIRLPOOLInit=external('WHIRLPOOLInit' [PTR_WHIRLPOOL_CTX] lltype.Void)<line_sep>c_WHIRLPOOLUpdate=external('WHIRLPOOLUpdate' [PTR_WHIRLPOOL_CTX rffi.CCHARP rffi.UINT] lltype.Void)<line_sep>c_WHIRLPOOLFinal=external('WHIRLPOOLFinal' [rffi.CCHARP PTR_WHIRLPOOL_CTX] lltype.Void)<line_sep>
<import_from_stmt>collections.abc Iterator<import_from_stmt>functools wraps<import_from_stmt>numbers Number<import_stmt>numpy<as>np<import_from_stmt>tlz merge<import_from_stmt>..base tokenize<import_from_stmt>..highlevelgraph HighLevelGraph<import_from_stmt>.core Array<line_sep>@wraps(np.percentile)<def_stmt>_percentile a q interpolation="linear"<block_start>n=len(a)<if_stmt><not>len(a)<block_start><return><none> n<block_end><if_stmt>isinstance(q Iterator)<block_start>q=list(q)<block_end><if_stmt>a.dtype.name<eq>"category"<block_start>result=np.percentile(a.cat.codes q interpolation=interpolation)<import_stmt>pandas<as>pd<line_sep><return>pd.Categorical.from_codes(result a.dtype.categories a.dtype.ordered) n<block_end><if_stmt>type(a.dtype).__name__<eq>"DatetimeTZDtype"<block_start><import_stmt>pandas<as>pd<if_stmt>isinstance(a (pd.Series pd.Index))<block_start>a=a.values<block_end><block_end><if_stmt>np.issubdtype(a.dtype np.datetime64)<block_start>values=a<line_sep>a2=values.view("i8")<line_sep>result=np.percentile(a2 q interpolation=interpolation).astype(values.dtype)<if_stmt>q[0]<eq>0# https://github.com/dask/dask/issues/6864 <block_start>result[0]=min(result[0] values.min())<block_end><return>result n<block_end><if_stmt><not>np.issubdtype(a.dtype np.number)<block_start>interpolation="nearest"<block_end><return>np.percentile(a q interpolation=interpolation) n<block_end><def_stmt>_tdigest_chunk a<block_start><import_from_stmt>crick TDigest<line_sep>t=TDigest()<line_sep>t.update(a)<line_sep><return>t<block_end><def_stmt>_percentiles_from_tdigest qs digests<block_start><import_from_stmt>crick TDigest<line_sep>t=TDigest()<line_sep>t.merge(*digests)<line_sep><return>np.array(t.quantile(qs/100.0))<block_end><def_stmt>percentile a q interpolation="linear" method="default"<block_start>"""Approximate percentile of 1-D array Parameters ---------- a : Array q : array_like of float Percentile or sequence of percentiles to compute, which must be between 0 and 100 inclusive. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional The interpolation method to use when the desired percentile lies between two data points ``i < j``. Only valid for ``method='dask'``. - 'linear': ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. - 'lower': ``i``. - 'higher': ``j``. - 'nearest': ``i`` or ``j``, whichever is nearest. - 'midpoint': ``(i + j) / 2``. method : {'default', 'dask', 'tdigest'}, optional What method to use. By default will use dask's internal custom algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest for floats and ints and fallback to the ``'dask'`` otherwise. See Also -------- numpy.percentile : Numpy's equivalent Percentile function """<import_from_stmt>.dispatch percentile_lookup<as>_percentile<import_from_stmt>.utils array_safe meta_from_array<if_stmt><not>a.ndim<eq>1<block_start><raise>NotImplementedError("Percentiles only implemented for 1-d arrays")<block_end><if_stmt>isinstance(q Number)<block_start>q=[q]<block_end>q=array_safe(q like=meta_from_array(a))<line_sep>token=tokenize(a q interpolation)<line_sep>dtype=a.dtype<if_stmt>np.issubdtype(dtype np.integer)<block_start>dtype=(array_safe([] dtype=dtype like=meta_from_array(a))/0.5).dtype<block_end>meta=meta_from_array(a dtype=dtype)<line_sep>allowed_methods=["default" "dask" "tdigest"]<if_stmt>method<not><in>allowed_methods<block_start><raise>ValueError("method can only be 'default', 'dask' or 'tdigest'")<block_end><if_stmt>method<eq>"default"<block_start>internal_method="dask"<block_end><else_stmt><block_start>internal_method=method<block_end># Allow using t-digest if interpolation is allowed and dtype is of floating or integer type <if_stmt>(internal_method<eq>"tdigest"<and>interpolation<eq>"linear"<and>(np.issubdtype(dtype np.floating)<or>np.issubdtype(dtype np.integer)))<block_start><import_from_stmt>dask.utils import_required<line_sep>import_required("crick" "crick is a required dependency for using the t-digest method.")<line_sep>name="percentile_tdigest_chunk-"+token<line_sep>dsk={(name i):(_tdigest_chunk key)<for>i,key enumerate(a.__dask_keys__())}<line_sep>name2="percentile_tdigest-"+token<line_sep>dsk2={(name2 0):(_percentiles_from_tdigest q sorted(dsk))}<block_end># Otherwise use the custom percentile algorithm <else_stmt># Add 0 and 100 during calculation for more robust behavior (hopefully) <block_start>calc_q=np.pad(q 1 mode="constant")<line_sep>calc_q[-1]=100<line_sep>name="percentile_chunk-"+token<line_sep>dsk={(name i):(_percentile key calc_q interpolation)<for>i,key enumerate(a.__dask_keys__())}<line_sep>name2="percentile-"+token<line_sep>dsk2={(name2 0):(merge_percentiles q [calc_q]<times>len(a.chunks[0]) sorted(dsk) interpolation )}<block_end>dsk=merge(dsk dsk2)<line_sep>graph=HighLevelGraph.from_collections(name2 dsk dependencies=[a])<line_sep><return>Array(graph name2 chunks=((len(q) ) ) meta=meta)<block_end><def_stmt>merge_percentiles finalq qs vals interpolation="lower" Ns=<none> raise_on_nan=<true><block_start>"""Combine several percentile calculations of different data. Parameters ---------- finalq : numpy.array Percentiles to compute (must use same scale as ``qs``). qs : sequence of :class:`numpy.array`s Percentiles calculated on different sets of data. vals : sequence of :class:`numpy.array`s Resulting values associated with percentiles ``qs``. Ns : sequence of integers The number of data elements associated with each data set. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} Specify the type of interpolation to use to calculate final percentiles. For more information, see :func:`numpy.percentile`. Examples -------- >>> finalq = [10, 20, 30, 40, 50, 60, 70, 80] >>> qs = [[20, 40, 60, 80], [20, 40, 60, 80]] >>> vals = [np.array([1, 2, 3, 4]), np.array([10, 11, 12, 13])] >>> Ns = [100, 100] # Both original arrays had 100 elements >>> merge_percentiles(finalq, qs, vals, Ns=Ns) array([ 1, 2, 3, 4, 10, 11, 12, 13]) """<import_from_stmt>.utils array_safe<if_stmt>isinstance(finalq Iterator)<block_start>finalq=list(finalq)<block_end>finalq=array_safe(finalq like=finalq)<line_sep>qs=list(map(list qs))<line_sep>vals=list(vals)<if_stmt>Ns<is><none><block_start>vals,Ns=zip(*vals)<block_end>Ns=list(Ns)<line_sep>L=list(zip(*[(q val N)<for>q,val,N zip(qs vals Ns)<if>N]))<if_stmt><not>L<block_start><if_stmt>raise_on_nan<block_start><raise>ValueError("No non-trivial arrays found")<block_end><return>np.full(len(qs[0])-2 np.nan)<block_end>qs,vals,Ns=L<line_sep># TODO: Perform this check above in percentile once dtype checking is easy # Here we silently change meaning <if_stmt>vals[0].dtype.name<eq>"category"<block_start>result=merge_percentiles(finalq qs [v.codes<for>v vals] interpolation Ns raise_on_nan)<import_stmt>pandas<as>pd<line_sep><return>pd.Categorical.from_codes(result vals[0].categories vals[0].ordered)<block_end><if_stmt><not>np.issubdtype(vals[0].dtype np.number)<block_start>interpolation="nearest"<block_end><if_stmt>len(vals)<ne>len(qs)<or>len(Ns)<ne>len(qs)<block_start><raise>ValueError("qs, vals, and Ns parameters must be the same length")<block_end># transform qs and Ns into number of observations between percentiles counts=[]<for_stmt>q,N zip(qs Ns)<block_start>count=np.empty_like(finalq shape=len(q))<line_sep>count[1:]=np.diff(array_safe(q like=q[0]))<line_sep>count[0]=q[0]<line_sep>count<augmul>N<line_sep>counts.append(count)<block_end># Sort by calculated percentile values, then number of observations. combined_vals=np.concatenate(vals)<line_sep>combined_counts=array_safe(np.concatenate(counts) like=combined_vals)<line_sep>sort_order=np.argsort(combined_vals)<line_sep>combined_vals=np.take(combined_vals sort_order)<line_sep>combined_counts=np.take(combined_counts sort_order)<line_sep># percentile-like, but scaled by total number of observations combined_q=np.cumsum(combined_counts)<line_sep># rescale finalq percentiles to match combined_q finalq=array_safe(finalq like=combined_vals)<line_sep>desired_q=finalq<times>sum(Ns)<line_sep># the behavior of different interpolation methods should be # investigated further. <if_stmt>interpolation<eq>"linear"<block_start>rv=np.interp(desired_q combined_q combined_vals)<block_end><else_stmt><block_start>left=np.searchsorted(combined_q desired_q side="left")<line_sep>right=np.searchsorted(combined_q desired_q side="right")-1<line_sep>np.minimum(left len(combined_vals)-1 left)# don't exceed max index lower=np.minimum(left right)<line_sep>upper=np.maximum(left right)<if_stmt>interpolation<eq>"lower"<block_start>rv=combined_vals[lower]<block_end><elif_stmt>interpolation<eq>"higher"<block_start>rv=combined_vals[upper]<block_end><elif_stmt>interpolation<eq>"midpoint"<block_start>rv=0.5<times>(combined_vals[lower]+combined_vals[upper])<block_end><elif_stmt>interpolation<eq>"nearest"<block_start>lower_residual=np.abs(combined_q[lower]-desired_q)<line_sep>upper_residual=np.abs(combined_q[upper]-desired_q)<line_sep>mask=lower_residual<g>upper_residual<line_sep>index=lower# alias; we no longer need lower index[mask]=upper[mask]<line_sep>rv=combined_vals[index]<block_end><else_stmt><block_start><raise>ValueError("interpolation can only be 'linear', 'lower', "<concat>"'higher', 'midpoint', or 'nearest'")<block_end><block_end><return>rv<block_end>
<import_from_stmt>typing Tuple<import_from_stmt>PyQt5 QtCore QtOpenGL QtWidgets QtGui<import_from_stmt>moderngl_window.context.base BaseWindow<import_from_stmt>moderngl_window.context.pyqt5.keys Keys<class_stmt>Window(BaseWindow)<block_start>""" A basic window implementation using PyQt5 with the goal of creating an OpenGL context and handle keyboard and mouse input. This window bypasses Qt's own event loop to make things as flexible as possible. If you need to use the event loop and are using other features in Qt as well, this example can still be useful as a reference when creating your own window. """<line_sep>#: Name of the window name="pyqt5"<line_sep>#: PyQt5 specific key constants keys=Keys<line_sep># PyQt supports mode buttons, but we are limited by other libraries _mouse_button_map={1:1 2:2 4:3 }<def_stmt>__init__ self **kwargs<block_start>super().__init__(**kwargs)<line_sep># Specify OpenGL context parameters gl=QtOpenGL.QGLFormat()<line_sep>gl.setVersion(self.gl_version[0] self.gl_version[1])<line_sep>gl.setProfile(QtOpenGL.QGLFormat.CoreProfile)<line_sep>gl.setDepthBufferSize(24)<line_sep>gl.setDoubleBuffer(<true>)<line_sep>gl.setSwapInterval(1<if>self.vsync<else>0)<line_sep># Configure multisampling if needed <if_stmt>self.samples<g>1<block_start>gl.setSampleBuffers(<true>)<line_sep>gl.setSamples(int(self.samples))<block_end># We need an application object, but we are bypassing the library's # internal event loop to avoid unnecessary work self._app=QtWidgets.QApplication([])<line_sep># Create the OpenGL widget self._widget=QtOpenGL.QGLWidget(gl)<line_sep>self.title=self._title<line_sep># If fullscreen we change the window to match the desktop on the primary screen <if_stmt>self.fullscreen<block_start>rect=QtWidgets.QDesktopWidget().screenGeometry()<line_sep>self._width=rect.width()<line_sep>self._height=rect.height()<line_sep>self._buffer_width=rect.width()<times>self._widget.devicePixelRatio()<line_sep>self._buffer_height=rect.height()<times>self._widget.devicePixelRatio()<block_end><if_stmt>self.resizable# Ensure a valid resize policy when window is resizable <block_start>size_policy=QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding QtWidgets.QSizePolicy.Expanding )<line_sep>self._widget.setSizePolicy(size_policy)<line_sep>self._widget.resize(self.width self.height)<block_end><else_stmt><block_start>self._widget.setFixedSize(self.width self.height)<block_end># Center the window on the screen if in window mode <if_stmt><not>self.fullscreen<block_start>center_window_position=(self.position[0]-self.width/2 self.position[1]-self.height/2 )<line_sep>self._widget.move(*center_window_position)<block_end># Needs to be set before show() self._widget.resizeGL=self.resize<line_sep>self.cursor=self._cursor<if_stmt>self.fullscreen<block_start>self._widget.showFullScreen()<block_end><else_stmt><block_start>self._widget.show()<block_end># We want mouse position events self._widget.setMouseTracking(<true>)<line_sep># Override event functions in qt self._widget.keyPressEvent=self.key_pressed_event<line_sep>self._widget.keyReleaseEvent=self.key_release_event<line_sep>self._widget.mouseMoveEvent=self.mouse_move_event<line_sep>self._widget.mousePressEvent=self.mouse_press_event<line_sep>self._widget.mouseReleaseEvent=self.mouse_release_event<line_sep>self._widget.wheelEvent=self.mouse_wheel_event<line_sep>self._widget.closeEvent=self.close_event<line_sep>self._widget.showEvent=self.show_event<line_sep>self._widget.hideEvent=self.hide_event<line_sep># Attach to the context self.init_mgl_context()<line_sep># Ensure retina and 4k displays get the right viewport self._buffer_width=self._width<times>self._widget.devicePixelRatio()<line_sep>self._buffer_height=self._height<times>self._widget.devicePixelRatio()<line_sep>self.set_default_viewport()<block_end><def_stmt>_set_fullscreen self value:bool<arrow><none><block_start><if_stmt>value<block_start>self._widget.showFullScreen()<block_end><else_stmt><block_start>self._widget.showNormal()<block_end><block_end>@property<def_stmt>size self<arrow>Tuple[int int]<block_start>"""Tuple[int, int]: current window size. This property also support assignment:: # Resize the window to 1000 x 1000 window.size = 1000, 1000 """<line_sep><return>self._width self._height<block_end>@size.setter<def_stmt>size self value:Tuple[int int]<block_start>pos=self.position<line_sep>self._widget.setGeometry(pos[0] pos[1] value[0] value[1])<block_end>@property<def_stmt>position self<arrow>Tuple[int int]<block_start>"""Tuple[int, int]: The current window position. This property can also be set to move the window:: # Move window to 100, 100 window.position = 100, 100 """<line_sep>geo=self._widget.geometry()<line_sep><return>geo.x() geo.y()<block_end>@position.setter<def_stmt>position self value:Tuple[int int]<block_start>self._widget.setGeometry(value[0] value[1] self._width self._height)<block_end><def_stmt>swap_buffers self<arrow><none><block_start>"""Swap buffers, set viewport, trigger events and increment frame counter"""<line_sep>self._widget.swapBuffers()<line_sep>self.set_default_viewport()<line_sep>self._app.processEvents()<line_sep>self._frames<augadd>1<block_end>@property<def_stmt>cursor self<arrow>bool<block_start>"""bool: Should the mouse cursor be visible inside the window? This property can also be assigned to:: # Disable cursor window.cursor = False """<line_sep><return>self._cursor<block_end>@cursor.setter<def_stmt>cursor self value:bool<block_start><if_stmt>value<is><true><block_start>self._widget.setCursor(QtCore.Qt.ArrowCursor)<block_end><else_stmt><block_start>self._widget.setCursor(QtCore.Qt.BlankCursor)<block_end>self._cursor=value<block_end>@property<def_stmt>title self<arrow>str<block_start>"""str: Window title. This property can also be set:: window.title = "New Title" """<line_sep><return>self._title<block_end>@title.setter<def_stmt>title self value:str<block_start>self._widget.setWindowTitle(value)<line_sep>self._title=value<block_end><def_stmt>resize self width:int height:int<arrow><none><block_start>"""Replacement for Qt's ``resizeGL`` method. Args: width: New window width height: New window height """<line_sep>self._width=width<floordiv>self._widget.devicePixelRatio()<line_sep>self._height=height<floordiv>self._widget.devicePixelRatio()<line_sep>self._buffer_width=width<line_sep>self._buffer_height=height<if_stmt>self._ctx<block_start>self.set_default_viewport()<block_end># Make sure we notify the example about the resize super().resize(self._buffer_width self._buffer_height)<block_end><def_stmt>_handle_modifiers self mods<arrow><none><block_start>"""Update modifiers"""<line_sep>self._modifiers.shift=bool(mods&QtCore.Qt.ShiftModifier)<line_sep>self._modifiers.ctrl=bool(mods&QtCore.Qt.ControlModifier)<line_sep>self._modifiers.alt=bool(mods&QtCore.Qt.AltModifier)<block_end><def_stmt>_set_icon self icon_path:str<arrow><none><block_start>self._widget.setWindowIcon(QtGui.QIcon(icon_path))<block_end><def_stmt>key_pressed_event self event<arrow><none><block_start>"""Process Qt key press events forwarding them to standard methods Args: event: The qtevent instance """<if_stmt>self._exit_key<is><not><none><and>event.key()<eq>self._exit_key<block_start>self.close()<block_end><if_stmt>self._fs_key<is><not><none><and>event.key()<eq>self._fs_key<block_start>self.fullscreen=<not>self.fullscreen<block_end>self._handle_modifiers(event.modifiers())<line_sep>self._key_pressed_map[event.key()]=<true><line_sep>self._key_event_func(event.key() self.keys.ACTION_PRESS self._modifiers)<line_sep>text=event.text()<if_stmt>text.strip()<or>event.key()<eq>self.keys.SPACE<block_start>self._unicode_char_entered_func(text)<block_end><block_end><def_stmt>key_release_event self event<arrow><none><block_start>"""Process Qt key release events forwarding them to standard methods Args: event: The qtevent instance """<line_sep>self._handle_modifiers(event.modifiers())<line_sep>self._key_pressed_map[event.key()]=<false><line_sep>self._key_event_func(event.key() self.keys.ACTION_RELEASE self._modifiers)<block_end><def_stmt>mouse_move_event self event<arrow><none><block_start>"""Forward mouse cursor position events to standard methods Args: event: The qtevent instance """<line_sep>x,y=event.x() event.y()<line_sep>dx,dy=self._calc_mouse_delta(x y)<if_stmt>self.mouse_states.any<block_start>self._mouse_drag_event_func(x y dx dy)<block_end><else_stmt><block_start>self._mouse_position_event_func(x y dx dy)<block_end><block_end><def_stmt>mouse_press_event self event<arrow><none><block_start>"""Forward mouse press events to standard methods Args: event: The qtevent instance """<line_sep>self._handle_modifiers(event.modifiers())<line_sep>button=self._mouse_button_map.get(event.button())<if_stmt>button<is><none><block_start><return><block_end>self._handle_mouse_button_state_change(button <true>)<line_sep>self._mouse_press_event_func(event.x() event.y() button)<block_end><def_stmt>mouse_release_event self event<arrow><none><block_start>"""Forward mouse release events to standard methods Args: event: The qtevent instance """<line_sep>self._handle_modifiers(event.modifiers())<line_sep>button=self._mouse_button_map.get(event.button())<if_stmt>button<is><none><block_start><return><block_end>self._handle_mouse_button_state_change(button <false>)<line_sep>self._mouse_release_event_func(event.x() event.y() button)<block_end><def_stmt>mouse_wheel_event self event<block_start>"""Forward mouse wheel events to standard metods. From Qt docs: Returns the distance that the wheel is rotated, in eighths of a degree. A positive value indicates that the wheel was rotated forwards away from the user; a negative value indicates that the wheel was rotated backwards toward the user. Most mouse types work in steps of 15 degrees, in which case the delta value is a multiple of 120; i.e., 120 units * 1/8 = 15 degrees. However, some mice have finer-resolution wheels and send delta values that are less than 120 units (less than 15 degrees). To support this possibility, you can either cumulatively add the delta values from events until the value of 120 is reached, then scroll the widget, or you can partially scroll the widget in response to each wheel event. Args: event (QWheelEvent): Mouse wheel event """<line_sep>self._handle_modifiers(event.modifiers())<line_sep>point=event.angleDelta()<line_sep>self._mouse_scroll_event_func(point.x()/120.0 point.y()/120.0)<block_end><def_stmt>close_event self event<arrow><none><block_start>"""The standard PyQt close events Args: event: The qtevent instance """<line_sep>self.close()<block_end><def_stmt>close self<block_start>"""Close the window"""<line_sep>super().close()<line_sep>self._close_func()<block_end><def_stmt>show_event self event<block_start>"""The standard Qt show event"""<line_sep>self._iconify_func(<false>)<block_end><def_stmt>hide_event self event<block_start>"""The standard Qt hide event"""<line_sep>self._iconify_func(<true>)<block_end><def_stmt>destroy self<arrow><none><block_start>"""Quit the Qt application to exit the window gracefully"""<line_sep>QtCore.QCoreApplication.instance().quit()<block_end><block_end>
# Copyright 2018 The CapsLayer Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ========================================================================== <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.python.keras.utils.data_utils get_file<import_from_stmt>tensorflow.python.keras.datasets.cifar load_batch<import_from_stmt>capslayer.data.utils.TFRecordHelper int64_feature bytes_feature<line_sep>URL="https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"<line_sep>md5sum='eb9058c3a382ffc7106e4002c42a8d85'<def_stmt>load_cifar100 split path=<none><block_start><if_stmt>path<is><none><block_start>cache_path=os.path.join(os.path.expanduser('~') ".capslayer")<line_sep>path=get_file('cifar-100-python' cache_dir=cache_path file_hash=md5sum origin=URL untar=<true>)<block_end>split=split.lower()<if_stmt>split<eq>'test'<block_start>fpath=os.path.join(path 'test')<line_sep>images,labels=load_batch(fpath label_key='fine_labels')<block_end><else_stmt><block_start>fpath=os.path.join(path 'train')<line_sep>images,labels=load_batch(fpath label_key='fine_labels')<line_sep>idx=np.arange(len(images))<line_sep>np.random.seed(201808)<line_sep>np.random.shuffle(idx)<line_sep>labels=np.reshape(labels (-1 ))<line_sep>images=images[idx[:45000]]<if>split<eq>"train"<else>images[idx[45000:]]<line_sep>labels=labels[idx[:45000]]<if>split<eq>"train"<else>labels[idx[45000:]]<block_end>images=np.reshape(images.transpose(0 2 3 1) (-1 3072)).astype(np.float32)<line_sep>labels=np.reshape(labels (-1 )).astype(np.int32)<line_sep><return>(zip(images labels))<block_end><def_stmt>encode_and_write dataset filename<block_start><with_stmt>tf.python_io.TFRecordWriter(filename)<as>writer<block_start><for_stmt>image,label dataset<block_start>print(image.shape)<line_sep>exit()<line_sep>image_raw=image.tostring()<line_sep>example=tf.train.Example(features=tf.train.Features(feature={'image':bytes_feature(image_raw) 'label':int64_feature(label)}))<line_sep>writer.write(example.SerializeToString())<block_end><block_end><block_end><def_stmt>tfrecord_runner path=<none> force=<true><block_start>train_set=load_cifar100(path=path split='train')<line_sep>eval_set=load_cifar100(path=path split='eval')<line_sep>test_set=load_cifar100(path=path split='test')<if_stmt>path<is><none><block_start>path=os.path.join(os.path.expanduser('~') ".capslayer" "datasets" "cifar100")<block_end><if_stmt><not>os.path.exists(path)<block_start>os.makedirs(path)<block_end>train_set_outpath=os.path.join(path "train_cifar100.tfrecord")<line_sep>eval_set_outpath=os.path.join(path "eval_cifar100.tfrecord")<line_sep>test_set_outpath=os.path.join(path "test_cifar100.tfrecord")<if_stmt><not>os.path.exists(train_set_outpath)<or>force<block_start>encode_and_write(train_set train_set_outpath)<block_end><if_stmt><not>os.path.exists(eval_set_outpath)<or>force<block_start>encode_and_write(eval_set eval_set_outpath)<block_end><if_stmt><not>os.path.exists(test_set_outpath)<or>force<block_start>encode_and_write(test_set test_set_outpath)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>data=load_cifar100(split='train')<line_sep>print(data)<block_end>
PARSING_SCHEME={'name':'a' 'games_played':'td[data-stat="g"]:first' 'minutes_played':'td[data-stat="mp"]:first' 'field_goals':'td[data-stat="fg"]:first' 'field_goal_attempts':'td[data-stat="fga"]:first' 'field_goal_percentage':'td[data-stat="fg_pct"]:first' 'three_point_field_goals':'td[data-stat="fg3"]:first' 'three_point_field_goal_attempts':'td[data-stat="fg3a"]:first' 'three_point_field_goal_percentage':'td[data-stat="fg3_pct"]:first' 'two_point_field_goals':'td[data-stat="fg2"]:first' 'two_point_field_goal_attempts':'td[data-stat="fg2a"]:first' 'two_point_field_goal_percentage':'td[data-stat="fg2_pct"]:first' 'free_throws':'td[data-stat="ft"]:first' 'free_throw_attempts':'td[data-stat="fta"]:first' 'free_throw_percentage':'td[data-stat="ft_pct"]:first' 'offensive_rebounds':'td[data-stat="orb"]:first' 'defensive_rebounds':'td[data-stat="drb"]:first' 'total_rebounds':'td[data-stat="trb"]:first' 'assists':'td[data-stat="ast"]:first' 'steals':'td[data-stat="stl"]:first' 'blocks':'td[data-stat="blk"]:first' 'turnovers':'td[data-stat="tov"]:first' 'personal_fouls':'td[data-stat="pf"]:first' 'points':'td[data-stat="pts"]:first' 'opp_minutes_played':'td[data-stat="mp"]:first' 'opp_field_goals':'td[data-stat="opp_fg"]:first' 'opp_field_goal_attempts':'td[data-stat="opp_fga"]:first' 'opp_field_goal_percentage':'td[data-stat="opp_fg_pct"]:first' 'opp_three_point_field_goals':'td[data-stat="opp_fg3"]:first' 'opp_three_point_field_goal_attempts':'td[data-stat="opp_fg3a"]:first' 'opp_three_point_field_goal_percentage':'td[data-stat="opp_fg3_pct"]:first' 'opp_two_point_field_goals':'td[data-stat="opp_fg2"]:first' 'opp_two_point_field_goal_attempts':'td[data-stat="opp_fg2a"]:first' 'opp_two_point_field_goal_percentage':'td[data-stat="opp_fg2_pct"]:first' 'opp_free_throws':'td[data-stat="opp_ft"]:first' 'opp_free_throw_attempts':'td[data-stat="opp_fta"]:first' 'opp_free_throw_percentage':'td[data-stat="opp_ft_pct"]:first' 'opp_offensive_rebounds':'td[data-stat="opp_orb"]:first' 'opp_defensive_rebounds':'td[data-stat="opp_drb"]:first' 'opp_total_rebounds':'td[data-stat="opp_trb"]:first' 'opp_assists':'td[data-stat="opp_ast"]:first' 'opp_steals':'td[data-stat="opp_stl"]:first' 'opp_blocks':'td[data-stat="opp_blk"]:first' 'opp_turnovers':'td[data-stat="opp_tov"]:first' 'opp_personal_fouls':'td[data-stat="opp_pf"]:first' 'opp_points':'td[data-stat="opp_pts"]:first'}<line_sep>SCHEDULE_SCHEME={'game':'th[data-stat="g"]:first' 'date':'td[data-stat="date_game"]:first' 'time':'td[data-stat="game_start_time"]:first' 'boxscore':'td[data-stat="box_score_text"]:first' 'location':'td[data-stat="game_location"]:first' 'opponent_abbr':'td[data-stat="opp_id"]:first' 'opponent_name':'td[data-stat="opp_name"]:first' 'result':'td[data-stat="game_result"]:first' 'points_scored':'td[data-stat="pts"]:first' 'points_allowed':'td[data-stat="opp_pts"]:first' 'wins':'td[data-stat="wins"]:first' 'losses':'td[data-stat="losses"]:first' 'streak':'td[data-stat="game_streak"]:first'}<line_sep>BOXSCORE_SCHEME={'date':'div[class="scorebox_meta"]' 'location':'div[class="scorebox_meta"]' 'away_name':'a[itemprop="name"]:first' 'home_name':'a[itemprop="name"]:last' 'winning_name':'' 'winning_abbr':'' 'losing_name':'' 'losing_abbr':'' 'summary':'table#line_score' 'pace':'td[data-stat="pace"]:first' 'away_record':'div[class="table_wrapper"] h2' 'away_minutes_played':'tfoot td[data-stat="mp"]' 'away_field_goals':'tfoot td[data-stat="fg"]' 'away_field_goal_attempts':'tfoot td[data-stat="fga"]' 'away_field_goal_percentage':'tfoot td[data-stat="fg_pct"]' 'away_two_point_field_goals':'tfoot td[data-stat="fg2"]' 'away_two_point_field_goal_attempts':'tfoot td[data-stat="fg2a"]' 'away_two_point_field_goal_percentage':'tfoot td[data-stat="fg2_pct"]' 'away_three_point_field_goals':'tfoot td[data-stat="fg3"]' 'away_three_point_field_goal_attempts':'tfoot td[data-stat="fg3a"]' 'away_three_point_field_goal_percentage':'tfoot td[data-stat="fg3_pct"]' 'away_free_throws':'tfoot td[data-stat="ft"]' 'away_free_throw_attempts':'tfoot td[data-stat="fta"]' 'away_free_throw_percentage':'tfoot td[data-stat="ft_pct"]' 'away_offensive_rebounds':'tfoot td[data-stat="orb"]' 'away_defensive_rebounds':'tfoot td[data-stat="drb"]' 'away_total_rebounds':'tfoot td[data-stat="trb"]' 'away_assists':'tfoot td[data-stat="ast"]' 'away_steals':'tfoot td[data-stat="stl"]' 'away_blocks':'tfoot td[data-stat="blk"]' 'away_turnovers':'tfoot td[data-stat="tov"]' 'away_personal_fouls':'tfoot td[data-stat="pf"]' 'away_points':'tfoot td[data-stat="pts"]' 'away_true_shooting_percentage':'tfoot td[data-stat="ts_pct"]' 'away_effective_field_goal_percentage':'tfoot td[data-stat="efg_pct"]' 'away_three_point_attempt_rate':'tfoot td[data-stat="fg3a_per_fga_pct"]' 'away_free_throw_attempt_rate':'tfoot td[data-stat="fta_per_fga_pct"]' 'away_offensive_rebound_percentage':'tfoot td[data-stat="orb_pct"]' 'away_defensive_rebound_percentage':'tfoot td[data-stat="drb_pct"]' 'away_total_rebound_percentage':'tfoot td[data-stat="trb_pct"]' 'away_assist_percentage':'tfoot td[data-stat="ast_pct"]' 'away_steal_percentage':'tfoot td[data-stat="stl_pct"]' 'away_block_percentage':'tfoot td[data-stat="blk_pct"]' 'away_turnover_percentage':'tfoot td[data-stat="tov_pct"]' 'away_offensive_rating':'tfoot td[data-stat="off_rtg"]' 'away_defensive_rating':'tfoot td[data-stat="def_rtg"]' 'home_record':'div[class="table_wrapper"] h2' 'home_minutes_played':'tfoot td[data-stat="mp"]' 'home_field_goals':'tfoot td[data-stat="fg"]' 'home_field_goal_attempts':'tfoot td[data-stat="fga"]' 'home_field_goal_percentage':'tfoot td[data-stat="fg_pct"]' 'home_two_point_field_goals':'tfoot td[data-stat="fg2"]' 'home_two_point_field_goal_attempts':'tfoot td[data-stat="fg2a"]' 'home_two_point_field_goal_percentage':'tfoot td[data-stat="fg2_pct"]' 'home_three_point_field_goals':'tfoot td[data-stat="fg3"]' 'home_three_point_field_goal_attempts':'tfoot td[data-stat="fg3a"]' 'home_three_point_field_goal_percentage':'tfoot td[data-stat="fg3_pct"]' 'home_free_throws':'tfoot td[data-stat="ft"]' 'home_free_throw_attempts':'tfoot td[data-stat="fta"]' 'home_free_throw_percentage':'tfoot td[data-stat="ft_pct"]' 'home_offensive_rebounds':'tfoot td[data-stat="orb"]' 'home_defensive_rebounds':'tfoot td[data-stat="drb"]' 'home_total_rebounds':'tfoot td[data-stat="trb"]' 'home_assists':'tfoot td[data-stat="ast"]' 'home_steals':'tfoot td[data-stat="stl"]' 'home_blocks':'tfoot td[data-stat="blk"]' 'home_turnovers':'tfoot td[data-stat="tov"]' 'home_personal_fouls':'tfoot td[data-stat="pf"]' 'home_points':'div[class="score"]' 'home_true_shooting_percentage':'tfoot td[data-stat="ts_pct"]' 'home_effective_field_goal_percentage':'tfoot td[data-stat="efg_pct"]' 'home_three_point_attempt_rate':'tfoot td[data-stat="fg3a_per_fga_pct"]' 'home_free_throw_attempt_rate':'tfoot td[data-stat="fta_per_fga_pct"]' 'home_offensive_rebound_percentage':'tfoot td[data-stat="orb_pct"]' 'home_defensive_rebound_percentage':'tfoot td[data-stat="drb_pct"]' 'home_total_rebound_percentage':'tfoot td[data-stat="trb_pct"]' 'home_assist_percentage':'tfoot td[data-stat="ast_pct"]' 'home_steal_percentage':'tfoot td[data-stat="stl_pct"]' 'home_block_percentage':'tfoot td[data-stat="blk_pct"]' 'home_turnover_percentage':'tfoot td[data-stat="tov_pct"]' 'home_offensive_rating':'tfoot td[data-stat="off_rtg"]' 'home_defensive_rating':'tfoot td[data-stat="def_rtg"]'}<line_sep>BOXSCORE_ELEMENT_INDEX={'date':0 'location':1 'home_record':-1 'home_minutes_played':7 'home_field_goals':7 'home_field_goal_attempts':7 'home_field_goal_percentage':7 'home_two_point_field_goals':7 'home_two_point_field_goal_attempts':7 'home_two_point_field_goal_percentage':7 'home_three_point_field_goals':7 'home_three_point_field_goal_attempts':7 'home_three_point_field_goal_percentage':7 'home_free_throws':7 'home_free_throw_attempts':7 'home_free_throw_percentage':7 'home_offensive_rebounds':7 'home_defensive_rebounds':7 'home_total_rebounds':7 'home_assists':7 'home_steals':7 'home_blocks':7 'home_turnovers':7 'home_personal_fouls':7 'home_points':-1 'home_true_shooting_percentage':7 'home_effective_field_goal_percentage':7 'home_three_point_attempt_rate':7 'home_free_throw_attempt_rate':7 'home_offensive_rebound_percentage':7 'home_defensive_rebound_percentage':7 'home_total_rebound_percentage':7 'home_assist_percentage':7 'home_steal_percentage':7 'home_block_percentage':7 'home_turnover_percentage':7 'home_offensive_rating':7 'home_defensive_rating':7}<line_sep>PLAYER_SCHEME={'summary':'[data-template="Partials/Teams/Summary"]' 'season':'th[data-stat="season"]:first' 'name':'h1' 'team_abbreviation':'td[data-stat="team_id"]' 'position':'td[data-stat="pos"]' 'height':'span[itemprop="height"]' 'weight':'span[itemprop="weight"]' 'birth_date':'td[data-stat=""]' 'nationality':'td[data-stat=""]' 'age':'nobr' 'games_played':'td[data-stat="g"]' 'games_started':'td[data-stat="gs"]' 'minutes_played':'td[data-stat="mp"]' 'field_goals':'td[data-stat="fg"]' 'field_goal_attempts':'td[data-stat="fga"]' 'field_goal_percentage':'td[data-stat="fg_pct"]' 'three_pointers':'td[data-stat="fg3"]' 'three_point_attempts':'td[data-stat="fg3a"]' 'three_point_percentage':'td[data-stat="fg3_pct"]' 'two_pointers':'td[data-stat="fg2"]' 'two_point_attempts':'td[data-stat="fg2a"]' 'two_point_percentage':'td[data-stat="fg2_pct"]' 'effective_field_goal_percentage':'td[data-stat="efg_pct"]' 'free_throws':'td[data-stat="ft"]' 'free_throw_attempts':'td[data-stat="fta"]' 'free_throw_percentage':'td[data-stat="ft_pct"]' 'offensive_rebounds':'td[data-stat="orb"]' 'defensive_rebounds':'td[data-stat="drb"]' 'total_rebounds':'td[data-stat="trb"]' 'assists':'td[data-stat="ast"]' 'steals':'td[data-stat="stl"]' 'blocks':'td[data-stat="blk"]' 'turnovers':'td[data-stat="tov"]' 'personal_fouls':'td[data-stat="pf"]' 'points':'td[data-stat="pts"]' 'player_efficiency_rating':'td[data-stat="per"]' 'true_shooting_percentage':'td[data-stat="ts_pct"]' 'three_point_attempt_rate':'td[data-stat="fg3a_per_fga_pct"]' 'free_throw_attempt_rate':'td[data-stat="fta_per_fga_pct"]' 'offensive_rebound_percentage':'td[data-stat="orb_pct"]' 'defensive_rebound_percentage':'td[data-stat="drb_pct"]' 'total_rebound_percentage':'td[data-stat="trb_pct"]' 'assist_percentage':'td[data-stat="ast_pct"]' 'steal_percentage':'td[data-stat="stl_pct"]' 'block_percentage':'td[data-stat="blk_pct"]' 'turnover_percentage':'td[data-stat="tov_pct"]' 'usage_percentage':'td[data-stat="usg_pct"]' 'offensive_win_shares':'td[data-stat="ows"]' 'defensive_win_shares':'td[data-stat="dws"]' 'win_shares':'td[data-stat="ws"]' 'win_shares_per_48_minutes':'td[data-stat="ws_per_48"]' 'offensive_box_plus_minus':'td[data-stat="obpm"]' 'defensive_box_plus_minus':'td[data-stat="dbpm"]' 'box_plus_minus':'td[data-stat="bpm"]' 'defensive_rating':'td[data-stat="def_rtg"]' 'offensive_rating':'td[data-stat="off_rtg"]' 'boxscore_box_plus_minus':'td[data-stat="plus_minus"]' 'value_over_replacement_player':'td[data-stat="vorp"]' 'shooting_distance':'td[data-stat="avg_dist"]' 'percentage_shots_two_pointers':'td[data-stat="fg2a_pct_fga"]' 'percentage_zero_to_three_footers':'td[data-stat="pct_fga_00_03"]' 'percentage_three_to_ten_footers':'td[data-stat="pct_fga_03_10"]' 'percentage_ten_to_sixteen_footers':'td[data-stat="pct_fga_10_16"]' 'percentage_sixteen_foot_plus_two_pointers':'td[data-stat="pct_fga_16_xx"]' 'percentage_shots_three_pointers':'td[data-stat="fg3a_pct_fga"]' 'field_goal_perc_zero_to_three_feet':'td[data-stat="fg_pct_00_03"]' 'field_goal_perc_three_to_ten_feet':'td[data-stat="fg_pct_03_10"]' 'field_goal_perc_ten_to_sixteen_feet':'td[data-stat="fg_pct_10_16"]' 'field_goal_perc_sixteen_foot_plus_two_pointers':'td[data-stat="fg_pct_16_xx"]' 'two_pointers_assisted_percentage':'td[data-stat="fg2_pct_ast"]' 'percentage_field_goals_as_dunks':'td[data-stat="pct_fg2_dunk"]' 'dunks':'td[data-stat="fg2_dunk"]' 'three_pointers_assisted_percentage':'td[data-stat="fg3_pct_ast"]' 'percentage_of_three_pointers_from_corner':'td[data-stat="pct_fg3a_corner"]' 'three_point_shot_percentage_from_corner':'td[data-stat="fg3_pct_corner"]' 'half_court_heaves':'td[data-stat="fg3a_heave"]' 'half_court_heaves_made':'td[data-stat="fg3_heave"]' 'point_guard_percentage':'td[data-stat="pct_1"]' 'shooting_guard_percentage':'td[data-stat="pct_2"]' 'small_forward_percentage':'td[data-stat="pct_3"]' 'power_forward_percentage':'td[data-stat="pct_4"]' 'center_percentage':'td[data-stat="pct_5"]' 'on_court_plus_minus':'td[data-stat="plus_minus_on"]' 'net_plus_minus':'td[data-stat="plus_minus_net"]' 'passing_turnovers':'td[data-stat="tov_bad_pass"]' 'lost_ball_turnovers':'td[data-stat="tov_lost_ball"]' 'other_turnovers':'td[data-stat="tov_other"]' 'shooting_fouls':'td[data-stat="fouls_shooting"]' 'blocking_fouls':'td[data-stat="fouls_blocking"]' 'offensive_fouls':'td[data-stat="fouls_offensive"]' 'take_fouls':'td[data-stat="fouls_take"]' 'points_generated_by_assists':'td[data-stat="astd_pts"]' 'shooting_fouls_drawn':'td[data-stat="drawn_shooting"]' 'and_ones':'td[data-stat="and1s"]' 'shots_blocked':'td[data-stat="fga_blkd"]' 'salary':'td[data-stat="salary"]' 'field_goals_per_poss':'td[data-stat="fg_per_poss"]' 'field_goal_attempts_per_poss':'td[data-stat="fga_per_poss"]' 'three_pointers_per_poss':'td[data-stat="fg3_per_poss"]' 'three_point_attempts_per_poss':'td[data-stat="fg3a_per_poss"]' 'two_pointers_per_poss':'td[data-stat="fg2_per_poss"]' 'two_point_attempts_per_poss':'td[data-stat="fg2a_per_poss"]' 'free_throws_per_poss':'td[data-stat="ft_per_poss"]' 'free_throw_attempts_per_poss':'td[data-stat="fta_per_poss"]' 'offensive_rebounds_per_poss':'td[data-stat="orb_per_poss"]' 'defensive_rebounds_per_poss':'td[data-stat="drb_per_poss"]' 'total_rebounds_per_poss':'td[data-stat="trb_per_poss"]' 'assists_per_poss':'td[data-stat="ast_per_poss"]' 'steals_per_poss':'td[data-stat="stl_per_poss"]' 'blocks_per_poss':'td[data-stat="blk_per_poss"]' 'turnovers_per_poss':'td[data-stat="tov_per_poss"]' 'personal_fouls_per_poss':'td[data-stat="pf_per_poss"]' 'points_per_poss':'td[data-stat="pts_per_poss"]'}<line_sep>NATIONALITY={'ao':'Angola' 'ag':'Antigua and Barbuda' 'ar':'Argentina' 'au':'Australia' 'at':'Austria' 'bs':'Bahamas' 'be':'Belgium' 'ba':'Bosnia and Herzegovina' 'br':'Brazil' 'bg':'Bulgaria' 'cm':'Cameroon' 'ca':'Canada' 'td':'Chad' 'co':'Colombia' 'cv':'Cape Verde' 'cn':'China' 'hr':'Croatia' 'cu':'Cuba' 'cz':'Czech Republic' 'cd':'Democratic Replubic of Congo' 'dk':'Denmark' 'dm':'Dominica' 'do':'Dominican Replubic' 'eg':'Egypt' 'ee':'Estonia' 'fi':'Finland' 'fr':'France' 'gf':'French Guiana' 'ga':'Gabon' 'ge':'Georgia' 'de':'Germany' 'gh':'Ghana' 'gr':'Greece' 'gp':'Guadeloupe' 'gn':'Guinea' 'gy':'Guyana' 'ht':'Haiti' 'hu':'Hungary' 'is':'Iceland' 'ie':'Ireland' 'ir':'Islamic Replubic of Iran' 'il':'Israel' 'it':'Italy' 'jm':'Jamaica' 'jp':'Japan' 'lv':'Latvia' 'lb':'Lebanon' 'lt':'Lithuania' 'lu':'Luxembourg' 'ml':'Mali' 'mq':'Martinique' 'mx':'Mexico' 'me':'Montenegro' 'ma':'Morocco' 'nl':'Netherlands' 'nz':'New Zealand' 'ng':'Nigeria' 'no':'Norway' 'pa':'Panama' 'pl':'Poland' 'pr':'Puerto Rico' 'ke':'Kenya' 'kr':'Republic of Korea' 'mk':'Republic of Macedonia' 'cg':'Republic of Congo' 'ro':'Romania' 'ru':'Russian Federation' 'lc':'Saint Lucia' 'vc':'Saint Vincent and the Grenadines' 'sd':'Sudan' 'sn':'Senegal' 'rs':'Serbia' 'sk':'Slovakia' 'si':'Slovenia' 'za':'South Africa' 'ss':'South Sudan' 'es':'Spain' 'se':'Sweden' 'ch':'Switzerland' 'tw':'Taiwan' 'tt':'Trinidad and Tobago' 'tn':'Tunisia' 'tr':'Turkey' 'us':'United States of America' 'vi':'U.S. Virgin Islands' 'ua':'Ukraine' 'gb':'United Kingdom' 'tz':'United Republic of Tanzania' 'uy':'Uruguay' 've':'Venezuela'}<line_sep>SEASON_PAGE_URL='http://www.basketball-reference.com/leagues/NBA_%s.html'<line_sep>SCHEDULE_URL='http://www.basketball-reference.com/teams/%s/%s_games.html'<line_sep>BOXSCORE_URL='https://www.basketball-reference.com/boxscores/%s.html'<line_sep>BOXSCORES_URL=('https://www.basketball-reference.com/boxscores/'<concat>'?month=%s&day=%s&year=%s')<line_sep>PLAYER_URL='https://www.basketball-reference.com/players/%s/%s.html'<line_sep>ROSTER_URL='https://www.basketball-reference.com/teams/%s/%s.html'<line_sep>
<import_from_stmt>flask_assistant.manager Context<import_from_stmt>tests.helpers build_payload get_query_response<def_stmt>test_intents_with_different_formatting simple_client intent_payload<block_start>resp=get_query_response(simple_client intent_payload)<assert_stmt>"Message"<in>resp["fulfillmentText"]<line_sep>resp=simple_client.post("/" data=intent_payload)<assert_stmt>resp.status_code<eq>200<assert_stmt>b"Message"<in>resp.data<block_end><def_stmt>test_add_context_to_response context_assist<block_start>client=context_assist.app.test_client()<line_sep>payload=build_payload("AddContext")<line_sep>resp=get_query_response(client payload)<line_sep># full_name = f"projects/{context_assist._project_id}/agent/sessions/{context_assist.session_id}/contexts/SampleContext" # context_item = {"lifespanCount": 5, "name": full_name, "parameters": {}} # TODO access context_manager from assist, check for full context name <assert_stmt>"SampleContext"<in>resp["outputContexts"][0]["name"]<line_sep># assert context_item in resp["outputContexts"] <block_end><def_stmt>test_add_context_to_manager context_assist# with statement allows context locals to be accessed # remains within the actual request to the flask app <block_start><with_stmt>context_assist.app.test_client()<as>client<block_start>payload=build_payload("AddContext")<line_sep>resp=get_query_response(client payload)<line_sep>context_obj=Context("SampleContext")<assert_stmt>context_obj<in>context_assist.context_manager.active<block_end><block_end># def test_need_context_to_match_action(context_assist): # with context_assist.app.test_client() as client: # payload = build_payload('ContextRequired') # resp = get_query_response(client, payload) # assert 'Matched because SampleContext was active' not in resp['speech'] <def_stmt>test_docs_context docs_assist# adds 'vegetarian' context <block_start><with_stmt>docs_assist.app.test_client()<as>client<block_start>payload=build_payload("give-diet" params={"diet":"vegetarian"})<line_sep>resp=get_query_response(client payload)<line_sep>context_obj=Context("vegetarian")<assert_stmt>context_obj<in>docs_assist.context_manager.active<line_sep>next_payload=build_payload("get-food" contexts=resp["outputContexts"])<line_sep>next_resp=get_query_response(client next_payload)<assert_stmt>"farmers market"<in>next_resp["fulfillmentText"]<block_end># adds 'carnivore' context <with_stmt>docs_assist.app.test_client()<as>client<block_start>payload=build_payload("give-diet" params={"diet":"carnivore"})<line_sep>resp=get_query_response(client payload)<line_sep>context_obj=Context("carnivore")<assert_stmt>context_obj<in>docs_assist.context_manager.active<line_sep>next_payload=build_payload("get-food" contexts=resp["outputContexts"])<line_sep>next_resp=get_query_response(client next_payload)<assert_stmt>"farmers market"<not><in>next_resp["fulfillmentText"]<assert_stmt>"BBQ"<in>next_resp["fulfillmentText"]<block_end><block_end>
<import_stmt>unittest<import_from_stmt>ofxclient Account<import_from_stmt>ofxclient BankAccount<import_from_stmt>ofxclient BrokerageAccount<import_from_stmt>ofxclient CreditCardAccount<import_from_stmt>ofxclient Institution<class_stmt>OfxAccountTests(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>institution=Institution(id='1' org='Example' url='http://example.com' username='username' password='password')<line_sep>self.institution=institution<block_end><def_stmt>testNumberRequired self<block_start>a={'institution':self.institution}<line_sep>self.assertRaises(TypeError Account **a)<block_end><def_stmt>testInstitutionRequired self<block_start>a={'number':'12345'}<line_sep>self.assertRaises(TypeError Account **a)<block_end><def_stmt>testMasked self<block_start>account=Account(number='12345' institution=self.institution)<line_sep>self.assertEqual(account.number_masked() '***2345')<line_sep>account.number='1234'<line_sep>self.assertEqual(account.number_masked() '***1234')<line_sep>account.number='123'<line_sep>self.assertEqual(account.number_masked() '***123')<line_sep>account.number='12'<line_sep>self.assertEqual(account.number_masked() '***12')<line_sep>account.number='1'<line_sep>self.assertEqual(account.number_masked() '***1')<block_end><def_stmt>testDescription self<block_start>account=Account(number='12345' institution=self.institution)<line_sep>self.assertEqual(account.description '***2345' 'kwarg is not required and defaults')<line_sep>account=Account(number='12345' institution=self.institution description=<none>)<line_sep>self.assertEqual(account.description '***2345' 'None defaults')<line_sep>account=Account(number='12345' institution=self.institution description='')<line_sep>self.assertEqual(account.description '***2345' 'empty string desc defaults')<line_sep>account=Account(number='12345' institution=self.institution description='0')<line_sep>self.assertEqual(account.description '0' '0 string is preserved')<line_sep>account=Account(number='12345' institution=self.institution description='passed')<line_sep>self.assertEqual(account.description 'passed')<block_end><def_stmt>testNoInstitution self<block_start>account=Account(number='12345' institution=<none>)<block_end><block_end>
<import_from_stmt>django.shortcuts render<import_from_stmt>django.contrib.auth.decorators login_required<line_sep># Create your views here. @login_required<def_stmt>testHtml request<block_start>user=request.user.username<line_sep><return>render(request 'test.html')<block_end>@login_required<def_stmt>testIndex request<block_start>user=request.user.username<line_sep><return>render(request 'anew/index.html')<block_end>
<import_from_stmt>datetime datetime<import_from_stmt>os.path dirname join<import_stmt>pytest# noqa <import_from_stmt>city_scrapers_core.constants COMMISSION PASSED<import_from_stmt>city_scrapers_core.utils file_response<import_from_stmt>freezegun freeze_time<import_from_stmt>city_scrapers.spiders.chi_ssa_24 ChiSsa24Spider<line_sep>test_response=file_response(join(dirname(__file__) "files" "chi_ssa_24.html") url="https://rpba.org/ssa-24/" )<line_sep>test_detail_response=file_response(join(dirname(__file__) "files" "chi_ssa_24_detail.html") url=("https://business.rpba.org/events/details/clark-morse-glenwood-ssa-24-commissioners-meeting-6355"# noqa ) )<line_sep>spider=ChiSsa24Spider()<line_sep>freezer=freeze_time("2019-12-10")<line_sep>freezer.start()<line_sep>spider.link_date_map=spider._parse_links(test_response)<line_sep>parsed_item=[item<for>item spider._parse_detail(test_detail_response)][0]<line_sep>freezer.stop()<def_stmt>test_title <block_start><assert_stmt>parsed_item["title"]<eq>"Commission"<block_end><def_stmt>test_description <block_start><assert_stmt>parsed_item["description"]<eq>""<block_end><def_stmt>test_start <block_start><assert_stmt>parsed_item["start"]<eq>datetime(2019 9 4 9 0)<block_end><def_stmt>test_end <block_start><assert_stmt>parsed_item["end"]<eq>datetime(2019 9 4 10 0)<block_end><def_stmt>test_time_notes <block_start><assert_stmt>parsed_item["time_notes"]<eq>""<block_end><def_stmt>test_id <block_start><assert_stmt>parsed_item["id"]<eq>"chi_ssa_24/201909040900/x/commission"<block_end><def_stmt>test_status <block_start><assert_stmt>parsed_item["status"]<eq>PASSED<block_end><def_stmt>test_location <block_start><assert_stmt>parsed_item["location"]<eq>{"address":"1448 W. Morse Ave. Chicago, IL 60626" "name":"Rogers Park Business Alliance" }<block_end><def_stmt>test_source <block_start><assert_stmt>parsed_item["source"]<eq>test_detail_response.url<block_end><def_stmt>test_links <block_start><assert_stmt>parsed_item["links"]<eq>[{"href":"https://rpba.org/wp-content/uploads/2019/09/24-9.4.19-Agenda.pdf" "title":"Agenda" } {"href":"https://rpba.org/wp-content/uploads/2019/11/24-9.4.19-Minutes.pdf" "title":"Minutes" } ]<block_end><def_stmt>test_classification <block_start><assert_stmt>parsed_item["classification"]<eq>COMMISSION<block_end><def_stmt>test_all_day <block_start><assert_stmt>parsed_item["all_day"]<is><false><block_end>
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Benchmarking the library on inference and training """<line_sep># If checking the tensors placement # tf.debugging.set_log_device_placement(True) <import_stmt>argparse<import_stmt>csv<import_stmt>timeit<import_from_stmt>time time<import_from_stmt>typing List<import_from_stmt>transformers AutoConfig AutoTokenizer is_tf_available is_torch_available<if_stmt>is_tf_available()<block_start><import_stmt>tensorflow<as>tf<import_from_stmt>transformers TFAutoModel<block_end><if_stmt>is_torch_available()<block_start><import_stmt>torch<import_from_stmt>transformers AutoModel<block_end>input_text="""Bent over their instruments, three hundred Fertilizers were plunged, as the Director of Hatcheries and Conditioning entered the room, in the scarcely breathing silence, the absent-minded, soliloquizing hum or whistle, of absorbed concentration. A troop of newly arrived students, very young, pink and callow, followed nervously, rather abjectly, at the Director's heels. Each of them carried a notebook, in which, whenever the great man spoke, he desperately scribbled. Straight from the horse's mouth. It was a rare privilege. The D. H. C. for Central London always made a point of personally conducting his new students round the various departments. "Just to give you a general idea," he would explain to them. For of course some sort of general idea they must have, if they were to do their work intelligently-though as little of one, if they were to be good and happy members of society, as possible. For particulars, as every one knows, make for virtue and happiness; generalities are intellectu- ally necessary evils. Not philosophers but fret-sawyers and stamp col- lectors compose the backbone of society. "To-morrow," he would add, smiling at them with a slightly menacing geniality, "you'll be settling down to serious work. You won't have time for generalities. Meanwhile ..." Meanwhile, it was a privilege. Straight from the horse's mouth into the notebook. The boys scribbled like mad. Tall and rather thin but upright, the Director advanced into the room. He had a long chin and big rather prominent teeth, just covered, when he was not talking, by his full, floridly curved lips. Old, young? Thirty? Fifty? Fifty-five? It was hard to say. And anyhow the question didn't arise; in this year of stability, A. F. 632, it didn't occur to you to ask it. "I shall begin at the beginning," said the D.H.C. and the more zealous students recorded his intention in their notebooks: Begin at the begin- ning. "These," he waved his hand, "are the incubators." And opening an insulated door he showed them racks upon racks of numbered test- tubes. "The week's supply of ova. Kept," he explained, "at blood heat; whereas the male gametes," and here he opened another door, "they have to be kept at thirty-five instead of thirty-seven. Full blood heat sterilizes." Rams wrapped in theremogene beget no lambs. Still leaning against the incubators he gave them, while the pencils scurried illegibly across the pages, a brief description of the modern fertilizing process; spoke first, of course, of its surgical introduc- tion-"the operation undergone voluntarily for the good of Society, not to mention the fact that it carries a bonus amounting to six months' salary"; continued with some account of the technique for preserving the excised ovary alive and actively developing; passed on to a consid- eration of optimum temperature, salinity, viscosity; referred to the liq- uor in which the detached and ripened eggs were kept; and, leading his charges to the work tables, actually showed them how this liquor was drawn off from the test-tubes; how it was let out drop by drop onto the specially warmed slides of the microscopes; how the eggs which it contained were inspected for abnormalities, counted and transferred to a porous receptacle; how (and he now took them to watch the operation) this receptacle was immersed in a warm bouillon containing free-swimming spermatozoa-at a minimum concentration of one hundred thousand per cubic centimetre, he insisted; and how, after ten minutes, the container was lifted out of the liquor and its contents re-examined; how, if any of the eggs remained unfertilized, it was again immersed, and, if necessary, yet again; how the fertilized ova went back to the incubators; where the Alphas and Betas re- mained until definitely bottled; while the Gammas, Deltas and Epsilons were brought out again, after only thirty-six hours, to undergo Bo- kanovsky's Process. "Bokanovsky's Process," repeated the Director, and the students un- derlined the words in their little notebooks. One egg, one embryo, one adult-normality. But a bokanovskified egg will bud, will proliferate, will divide. From eight to ninety-six buds, and every bud will grow into a perfectly formed embryo, and every embryo into a full-sized adult. Making ninety-six human beings grow where only one grew before. Progress. "Essentially," the D.H.C. concluded, "bokanovskification consists of a series of arrests of development. We check the normal growth and, paradoxically enough, the egg responds by budding." Responds by budding. The pencils were busy. He pointed. On a very slowly moving band a rack-full of test-tubes was entering a large metal box, another, rack-full was emerging. Machinery faintly purred. It took eight minutes for the tubes to go through, he told them. Eight minutes of hard X-rays being about as much as an egg can stand. A few died; of the rest, the least susceptible divided into two; most put out four buds; some eight; all were returned to the incubators, where the buds began to develop; then, after two days, were suddenly chilled, chilled and checked. Two, four, eight, the buds in their turn budded; and having budded were dosed almost to death with alcohol; consequently burgeoned again and having budded-bud out of bud out of bud-were thereafter-further arrest being generally fatal-left to develop in peace. By which time the original egg was in a fair way to becoming anything from eight to ninety-six embryos- a prodigious improvement, you will agree, on nature. Identical twins-but not in piddling twos and threes as in the old viviparous days, when an egg would sometimes accidentally divide; actually by dozens, by scores at a time. "Scores," the Director repeated and flung out his arms, as though he were distributing largesse. "Scores." But one of the students was fool enough to ask where the advantage lay. "My good boy!" The Director wheeled sharply round on him. "Can't you see? Can't you see?" He raised a hand; his expression was solemn. "Bokanovsky's Process is one of the major instruments of social stabil- ity!" Major instruments of social stability. Standard men and women; in uniform batches. The whole of a small factory staffed with the products of a single bokanovskified egg. "Ninety-six identical twins working ninety-six identical machines!" The voice was almost tremulous with enthusiasm. "You really know where you are. For the first time in history." He quoted the planetary motto. "Community, Identity, Stability." Grand words. "If we could bo- kanovskify indefinitely the whole problem would be solved." Solved by standard Gammas, unvarying Deltas, uniform Epsilons. Mil- lions of identical twins. The principle of mass production at last applied to biology. "But, alas," the Director shook his head, "we can't bokanovskify indefi- nitely." Ninety-six seemed to be the limit; seventy-two a good average. From the same ovary and with gametes of the same male to manufacture as many batches of identical twins as possible-that was the best (sadly a second best) that they could do. And even that was difficult. "For in nature it takes thirty years for two hundred eggs to reach ma- turity. But our business is to stabilize the population at this moment, here and now. Dribbling out twins over a quarter of a century-what would be the use of that?" Obviously, no use at all. But Podsnap's Technique had immensely ac- celerated the process of ripening. They could make sure of at least a hundred and fifty mature eggs within two years. Fertilize and bo- kanovskify-in other words, multiply by seventy-two-and you get an average of nearly eleven thousand brothers and sisters in a hundred and fifty batches of identical twins, all within two years of the same age. "And in exceptional cases we can make one ovary yield us over fifteen thousand adult individuals." Beckoning to a fair-haired, ruddy young man who happened to be passing at the moment. "<NAME>," he called. The ruddy young man approached. "Can you tell us the record for a single ovary, <NAME>?" "Sixteen thousand and twelve in this Centre," <NAME> replied with- out hesitation. He spoke very quickly, had a vivacious blue eye, and took an evident pleasure in quoting figures. "Sixteen thousand and twelve; in one hundred and eighty-nine batches of identicals. But of course they've done much better," he rattled on, "in some of the tropi- cal Centres. Singapore has often produced over sixteen thousand five hundred; and Mombasa has actually touched the seventeen thousand mark. But then they have unfair advantages. You should see the way a negro ovary responds to pituitary! It's quite astonishing, when you're used to working with European material. Still," he added, with a laugh (but the light of combat was in his eyes and the lift of his chin was challenging), "still, we mean to beat them if we can. I'm working on a wonderful Delta-Minus ovary at this moment. Only just eighteen months old. Over twelve thousand seven hundred children already, ei- ther decanted or in embryo. And still going strong. We'll beat them yet." "That's the spirit I like!" cried the Director, and clapped <NAME> on the shoulder. "Come along with us, and give these boys the benefit of your expert knowledge." Mr. Foster smiled modestly. "With pleasure." They went. In the Bottling Room all was harmonious bustle and ordered activity. Flaps of fresh sow's peritoneum ready cut to the proper size came shooting up in little lifts from the Organ Store in the sub-basement. Whizz and then, click! the lift-hatches hew open; the bottle-liner had only to reach out a hand, take the flap, insert, smooth-down, and be- fore the lined bottle had had time to travel out of reach along the end- less band, whizz, click! another flap of peritoneum had shot up from the depths, ready to be slipped into yet another bottle, the next of that slow interminable procession on the band. Next to the Liners stood the Matriculators. The procession advanced; one by one the eggs were transferred from their test-tubes to the larger containers; deftly the peritoneal lining was slit, the morula dropped into place, the saline solution poured in ... and already the bottle had passed, and it was the turn of the labellers. Heredity, date of fertilization, membership of Bokanovsky Group-details were trans- ferred from test-tube to bottle. No longer anonymous, but named, identified, the procession marched slowly on; on through an opening in the wall, slowly on into the Social Predestination Room. "Eighty-eight cubic metres of card-index," said Mr. Foster with relish, as they entered."""<def_stmt>create_setup_and_compute model_names:List[str] gpu:bool=<true> tensorflow:bool=<false> average_over:int=3 torchscript:bool=<false> xla:bool=<false> amp:bool=<false> fp16:bool=<false> save_to_csv:bool=<false> csv_filename:str=f"results_{round(time())}.csv" <block_start><if_stmt>xla<block_start>tf.config.optimizer.set_jit(<true>)<block_end><if_stmt>amp<block_start>tf.config.optimizer.set_experimental_options({"auto_mixed_precision":<true>})<block_end><if_stmt>tensorflow<block_start>dictionary={model_name:{}<for>model_name model_names}<line_sep>results=_compute_tensorflow(model_names dictionary average_over amp)<block_end><else_stmt><block_start>device="cuda"<if>(gpu<and>torch.cuda.is_available())<else>"cpu"<line_sep>dictionary={model_name:{}<for>model_name model_names}<line_sep>results=_compute_pytorch(model_names dictionary average_over device torchscript fp16)<block_end>print("=========== RESULTS ===========")<for_stmt>model_name model_names<block_start>print("\t"+f"======= MODEL CHECKPOINT: {model_name} =======")<for_stmt>batch_size results[model_name]["bs"]<block_start>print("\t\t"+f"===== BATCH SIZE: {batch_size} =====")<for_stmt>slice_size results[model_name]["ss"]<block_start>result=results[model_name]["results"][batch_size][slice_size]<if_stmt>isinstance(result str)<block_start>print(f"\t\t{model_name}/{batch_size}/{slice_size}: "<concat>f"{result}")<block_end><else_stmt><block_start>print(f"\t\t{model_name}/{batch_size}/{slice_size}: "<concat>f"{(round(1000<times>result)/1000)}"<concat>f"s")<block_end><block_end><block_end><block_end><if_stmt>save_to_csv<block_start><with_stmt>open(csv_filename mode="w")<as>csv_file<block_start>fieldnames=["model" "1x8" "1x64" "1x128" "1x256" "1x512" "1x1024" "2x8" "2x64" "2x128" "2x256" "2x512" "2x1024" "4x8" "4x64" "4x128" "4x256" "4x512" "4x1024" "8x8" "8x64" "8x128" "8x256" "8x512" "8x1024" ]<line_sep>writer=csv.DictWriter(csv_file fieldnames=fieldnames)<line_sep>writer.writeheader()<for_stmt>model_name model_names<block_start>model_results={f"{bs}x{ss}":results[model_name]["results"][bs][ss]<for>bs results[model_name]["results"]<for>ss results[model_name]["results"][bs]}<line_sep>writer.writerow({"model":model_name **model_results})<block_end><block_end><block_end><block_end><def_stmt>_compute_pytorch model_names dictionary average_over device torchscript fp16<block_start><for_stmt>c,model_name enumerate(model_names)<block_start>print(f"{c+1} / {len(model_names)}")<line_sep>config=AutoConfig.from_pretrained(model_name torchscript=torchscript)<line_sep>model=AutoModel.from_pretrained(model_name config=config)<line_sep>tokenizer=AutoTokenizer.from_pretrained(model_name)<line_sep>tokenized_sequence=tokenizer.encode(input_text add_special_tokens=<false>)<line_sep>max_input_size=tokenizer.max_model_input_sizes[model_name]<line_sep>batch_sizes=[1 2 4 8]<line_sep>slice_sizes=[8 64 128 256 512 1024]<line_sep>dictionary[model_name]={"bs":batch_sizes "ss":slice_sizes "results":{}}<line_sep>dictionary[model_name]["results"]={i:{}<for>i batch_sizes}<for_stmt>batch_size batch_sizes<block_start><if_stmt>fp16<block_start>model.half()<block_end>model.to(device)<line_sep>model.eval()<for_stmt>slice_size slice_sizes<block_start><if_stmt>max_input_size<is><not><none><and>slice_size<g>max_input_size<block_start>dictionary[model_name]["results"][batch_size][slice_size]="N/A"<block_end><else_stmt><block_start>sequence=torch.tensor(tokenized_sequence[:slice_size] device=device).repeat(batch_size 1)<try_stmt><block_start><if_stmt>torchscript<block_start>print("Tracing model with sequence size" sequence.shape)<line_sep>inference=torch.jit.trace(model sequence)<line_sep>inference(sequence)<block_end><else_stmt><block_start>inference=model<line_sep>inference(sequence)<block_end>print("Going through model with sequence of shape" sequence.shape)<line_sep>runtimes=timeit.repeat(<lambda>:inference(sequence) repeat=average_over number=3)<line_sep>average_time=sum(runtimes)/float(len(runtimes))/3.0<line_sep>dictionary[model_name]["results"][batch_size][slice_size]=average_time<block_end><except_stmt>RuntimeError<as>e<block_start>print("Doesn't fit on GPU." e)<line_sep>torch.cuda.empty_cache()<line_sep>dictionary[model_name]["results"][batch_size][slice_size]="N/A"<block_end><block_end><block_end><block_end><block_end><return>dictionary<block_end><def_stmt>_compute_tensorflow model_names dictionary average_over amp<block_start><for_stmt>c,model_name enumerate(model_names)<block_start>print(f"{c+1} / {len(model_names)}")<line_sep>config=AutoConfig.from_pretrained(model_name)<line_sep>model=TFAutoModel.from_pretrained(model_name config=config)<line_sep>tokenizer=AutoTokenizer.from_pretrained(model_name)<line_sep>tokenized_sequence=tokenizer.encode(input_text add_special_tokens=<false>)<line_sep>max_input_size=tokenizer.max_model_input_sizes[model_name]<line_sep>batch_sizes=[1 2 4 8]<line_sep>slice_sizes=[8 64 128 256 512 1024]<line_sep>dictionary[model_name]={"bs":batch_sizes "ss":slice_sizes "results":{}}<line_sep>dictionary[model_name]["results"]={i:{}<for>i batch_sizes}<line_sep>print("Using model" model)<line_sep>@tf.function<def_stmt>inference inputs<block_start><return>model(inputs)<block_end><for_stmt>batch_size batch_sizes<block_start><for_stmt>slice_size slice_sizes<block_start><if_stmt>max_input_size<is><not><none><and>slice_size<g>max_input_size<block_start>dictionary[model_name]["results"][batch_size][slice_size]="N/A"<block_end><else_stmt><block_start>sequence=tf.stack([tf.squeeze(tf.constant(tokenized_sequence[:slice_size])[<none> :])]<times>batch_size)<try_stmt><block_start>print("Going through model with sequence of shape" sequence.shape)<line_sep># To make sure that the model is traced + that the tensors are on the appropriate device inference(sequence)<line_sep>runtimes=timeit.repeat(<lambda>:inference(sequence) repeat=average_over number=3)<line_sep>average_time=sum(runtimes)/float(len(runtimes))/3.0<line_sep>dictionary[model_name]["results"][batch_size][slice_size]=average_time<block_end><except_stmt>tf.errors.ResourceExhaustedError<as>e<block_start>print("Doesn't fit on GPU." e)<line_sep>torch.cuda.empty_cache()<line_sep>dictionary[model_name]["results"][batch_size][slice_size]="N/A"<block_end><block_end><block_end><block_end><block_end><return>dictionary<block_end><def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--models" required=<false> type=str default="all" help="Model checkpoints to be provided "<concat>"to the AutoModel classes. Leave "<concat>"blank to benchmark the base version "<concat>"of all available model "<concat>"architectures." )<line_sep>parser.add_argument("--torch" required=<false> action="store_true" help="Benchmark the Pytorch version of the "<concat>"models")<line_sep>parser.add_argument("--torch_cuda" required=<false> action="store_true" help="Pytorch only: run on available "<concat>"cuda devices")<line_sep>parser.add_argument("--torchscript" required=<false> action="store_true" help="Pytorch only: trace the models "<concat>"using torchscript" )<line_sep>parser.add_argument("--tensorflow" required=<false> action="store_true" help="Benchmark the TensorFlow version "<concat>"of the models. Will run on GPU if "<concat>"the correct dependencies are "<concat>"installed" )<line_sep>parser.add_argument("--xla" required=<false> action="store_true" help="TensorFlow only: use XLA acceleration.")<line_sep>parser.add_argument("--amp" required=<false> action="store_true" help="TensorFlow only: use automatic mixed precision acceleration." )<line_sep>parser.add_argument("--fp16" required=<false> action="store_true" help="PyTorch only: use FP16 to accelerate inference.")<line_sep>parser.add_argument("--keras_predict" required=<false> action="store_true" help="Whether to use model.predict "<concat>"instead of model() to do a "<concat>"forward pass." )<line_sep>parser.add_argument("--save_to_csv" required=<false> action="store_true" help="Save to a CSV file.")<line_sep>parser.add_argument("--csv_filename" required=<false> default=<none> help="CSV filename used if saving results to csv.")<line_sep>parser.add_argument("--average_over" required=<false> default=30 type=int help="Times an experiment will be run.")<line_sep>args=parser.parse_args()<if_stmt>args.models<eq>"all"<block_start>args.models=["gpt2" "bert-base-cased" "xlnet-base-cased" "xlm-mlm-en-2048" "transfo-xl-wt103" "openai-gpt" "distilbert-base-uncased" "distilgpt2" "roberta-base" "ctrl" ]<block_end><else_stmt><block_start>args.models=args.models.split()<block_end>print("Running with arguments" args)<if_stmt>args.torch<block_start><if_stmt>is_torch_available()<block_start>create_setup_and_compute(model_names=args.models tensorflow=<false> gpu=args.torch_cuda torchscript=args.torchscript fp16=args.fp16 save_to_csv=args.save_to_csv csv_filename=args.csv_filename average_over=args.average_over )<block_end><else_stmt><block_start><raise>ImportError("Trying to run a PyTorch benchmark but PyTorch was not found in the environment.")<block_end><block_end><if_stmt>args.tensorflow<block_start><if_stmt>is_tf_available()<block_start>create_setup_and_compute(model_names=args.models tensorflow=<true> xla=args.xla amp=args.amp save_to_csv=args.save_to_csv csv_filename=args.csv_filename average_over=args.average_over )<block_end><else_stmt><block_start><raise>ImportError("Trying to run a TensorFlow benchmark but TensorFlow was not found in the environment.")<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
""" Copyright 2020 The Magma Authors. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_stmt>unittest<import_stmt>warnings<import_from_stmt>concurrent.futures Future<import_from_stmt>lte.protos.mconfig.mconfigs_pb2 PipelineD<import_from_stmt>lte.protos.pipelined_pb2 FlowRequest<import_from_stmt>lte.protos.policydb_pb2 FlowMatch<import_from_stmt>magma.pipelined.app.dpi DPIController<import_from_stmt>magma.pipelined.bridge_util BridgeTools<import_from_stmt>magma.pipelined.policy_converters convert_ipv4_str_to_ip_proto<import_from_stmt>magma.pipelined.tests.app.start_pipelined PipelinedController TestSetup <import_from_stmt>magma.pipelined.tests.pipelined_test_util SnapshotVerifier create_service_manager start_ryu_app_thread stop_ryu_app_thread <import_from_stmt>nose.tools nottest<class_stmt>InternalPktIpfixExportTest(unittest.TestCase)<block_start>BRIDGE='testing_br'<line_sep>IFACE='testing_br'<line_sep>MAC_DEST="5e:cc:cc:b1:49:4b"<line_sep>BRIDGE_IP='192.168.128.1'<line_sep>DPI_PORT='mon1'<line_sep>DPI_IP='1.1.1.1'<line_sep>@classmethod<def_stmt>setUpClass cls<block_start>""" Starts the thread which launches ryu apps Create a testing bridge, add a port, setup the port interfaces. Then launch the ryu apps for testing pipelined. Gets the references to apps launched by using futures, mocks the redis policy_dictionary of dpi_controller """<line_sep>super(InternalPktIpfixExportTest cls).setUpClass()<line_sep>warnings.simplefilter('ignore')<line_sep>cls._static_rule_dict={}<line_sep>cls.service_manager=create_service_manager([PipelineD.DPI] ['ue_mac' 'ipfix'] )<line_sep>cls._tbl_num=cls.service_manager.get_table_num(DPIController.APP_NAME )<line_sep>ue_mac_controller_reference=Future()<line_sep>dpi_controller_reference=Future()<line_sep>ipfix_controller_reference=Future()<line_sep>testing_controller_reference=Future()<line_sep>test_setup=TestSetup(apps=[PipelinedController.UEMac PipelinedController.DPI PipelinedController.IPFIX PipelinedController.Testing PipelinedController.StartupFlows ] references={PipelinedController.UEMac:ue_mac_controller_reference PipelinedController.DPI:dpi_controller_reference PipelinedController.Arp:Future() PipelinedController.IPFIX:ipfix_controller_reference PipelinedController.Testing:testing_controller_reference PipelinedController.StartupFlows:Future() } config={'bridge_name':cls.BRIDGE 'bridge_ip_address':'192.168.128.1' 'internal_ip_subnet':'192.168.0.0/16' 'nat_iface':'eth2' 'enodeb_iface':'eth1' 'enable_queue_pgm':<false> 'clean_restart':<true> 'setup_type':'CWF' 'dpi':{'enabled':<true> 'mon_port':'mon1' 'mon_port_number':32769 'idle_timeout':42 } 'ipfix':{'enabled':<true> 'probability':65 'collector_set_id':1 'collector_ip':'1.1.1.1' 'collector_port':65010 'cache_timeout':60 'obs_domain_id':1 'obs_point_id':1 } 'conntrackd':{'enabled':<true> } 'ovs_gtp_port_number':32768 } mconfig=PipelineD() loop=<none> service_manager=cls.service_manager integ_test=<false> )<line_sep>BridgeTools.create_bridge(cls.BRIDGE cls.IFACE)<line_sep>BridgeTools.create_internal_iface(cls.BRIDGE cls.DPI_PORT cls.DPI_IP )<line_sep>cls.thread=start_ryu_app_thread(test_setup)<line_sep>cls.ue_mac_controller=ue_mac_controller_reference.result()<line_sep>cls.dpi_controller=dpi_controller_reference.result()<line_sep>cls.ipfix_controller=ipfix_controller_reference.result()<line_sep>cls.testing_controller=testing_controller_reference.result()<line_sep>cls.dpi_controller._policy_dict=cls._static_rule_dict<block_end>@classmethod<def_stmt>tearDownClass cls<block_start>stop_ryu_app_thread(cls.thread)<line_sep>BridgeTools.destroy_bridge(cls.BRIDGE)<block_end><def_stmt>test_subscriber_policy self<block_start>""" Classify DPI flow, verify internal packet is generated Assert: snapshots math """<line_sep>imsi='IMSI010000000088888'<line_sep>ue_mac='5e:cc:cc:b1:49:4b'<line_sep>self.ue_mac_controller.add_ue_mac_flow(imsi ue_mac)<line_sep>flow_match=FlowMatch(ip_proto=FlowMatch.IPPROTO_TCP ip_dst=convert_ipv4_str_to_ip_proto('192.168.3.11') ip_src=convert_ipv4_str_to_ip_proto('1.2.3.0') tcp_dst=80 tcp_src=51115 direction=FlowMatch.UPLINK )<line_sep>self.dpi_controller.add_classify_flow(flow_match FlowRequest.FLOW_FINAL_CLASSIFICATION 'base.ip.http.facebook' 'tbd' )<line_sep>self.ipfix_controller.add_ue_sample_flow(imsi "magma_is_awesome_msisdn" "00:11:22:33:44:55" "apn_name123456789" 145 )<line_sep>snapshot_verifier=SnapshotVerifier(self self.BRIDGE self.service_manager include_stats=<false> )<with_stmt>snapshot_verifier<block_start><pass><block_end>self.ipfix_controller.delete_ue_sample_flow(imsi)<line_sep>snapshot_verifier=SnapshotVerifier(self self.BRIDGE self.service_manager 'after_deletion' include_stats=<false> )<with_stmt>snapshot_verifier<block_start><pass><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
""" Tests for chunked adjustments. """<import_from_stmt>collections namedtuple<import_from_stmt>itertools chain product<import_from_stmt>string ascii_lowercase ascii_uppercase<import_from_stmt>textwrap dedent<import_from_stmt>unittest TestCase<import_from_stmt>nose_parameterized parameterized<import_from_stmt>numpy arange array asarray dtype full <import_from_stmt>six.moves zip_longest<import_from_stmt>toolz curry<import_from_stmt>zipline.errors WindowLengthNotPositive WindowLengthTooLong<import_from_stmt>zipline.lib.adjustment Boolean1DArrayOverwrite BooleanOverwrite Datetime641DArrayOverwrite Datetime64Overwrite Float641DArrayOverwrite Float64Multiply Float64Overwrite Int64Overwrite Object1DArrayOverwrite ObjectOverwrite <import_from_stmt>zipline.lib.adjusted_array AdjustedArray<import_from_stmt>zipline.lib.labelarray LabelArray<import_from_stmt>zipline.testing check_arrays<import_from_stmt>zipline.testing.predicates assert_equal<import_from_stmt>zipline.utils.compat unicode<import_from_stmt>zipline.utils.numpy_utils coerce_to_dtype datetime64ns_dtype default_missing_value_for_dtype bool_dtype float64_dtype int64_dtype object_dtype <def_stmt>moving_window array nrows<block_start>""" Simple moving window generator over a 2D numpy array. """<line_sep>count=num_windows_of_length_M_on_buffers_of_length_N(nrows len(array))<for_stmt>i range(count)<block_start><yield>array[i:i+nrows]<block_end><block_end><def_stmt>num_windows_of_length_M_on_buffers_of_length_N M N<block_start>""" For a window of length M rolling over a buffer of length N, there are (N - M) + 1 legal windows. Example: If my array has N=4 rows, and I want windows of length M=2, there are 3 legal windows: data[0:2], data[1:3], and data[2:4]. """<line_sep><return>N-M+1<block_end><def_stmt>valid_window_lengths underlying_buffer_length<block_start>""" An iterator of all legal window lengths on a buffer of a given length. Returns values from 1 to underlying_buffer_length. """<line_sep><return>iter(range(1 underlying_buffer_length+1))<block_end>@curry<def_stmt>as_dtype dtype data<block_start>""" Curried wrapper around array.astype for when you have the dtype before you have the data. """<line_sep><return>asarray(data).astype(dtype)<block_end>@curry<def_stmt>as_labelarray initial_dtype missing_value array<block_start>""" Curried wrapper around LabelArray, that round-trips the input data through `initial_dtype` first. """<line_sep><return>LabelArray(array.astype(initial_dtype) missing_value=initial_dtype.type(missing_value) )<block_end>bytes_dtype=dtype('S3')<line_sep>unicode_dtype=dtype('U3')<line_sep>AdjustmentCase=namedtuple('AdjustmentCase' ['name' 'baseline' 'window_length' 'adjustments' 'missing_value' 'perspective_offset' 'expected_result' ])<def_stmt>_gen_unadjusted_cases name make_input make_expected_output missing_value<block_start>nrows=6<line_sep>ncols=3<line_sep>raw_data=arange(nrows<times>ncols).reshape(nrows ncols)<line_sep>input_array=make_input(raw_data)<line_sep>expected_output_array=make_expected_output(raw_data)<for_stmt>windowlen valid_window_lengths(nrows)<block_start>num_legal_windows=num_windows_of_length_M_on_buffers_of_length_N(windowlen nrows)<line_sep><yield>AdjustmentCase(name="%s_length_%d"%(name windowlen) baseline=input_array window_length=windowlen adjustments={} missing_value=missing_value perspective_offset=0 expected_result=[expected_output_array[offset:offset+windowlen]<for>offset range(num_legal_windows)] )<block_end><block_end><def_stmt>_gen_multiplicative_adjustment_cases dtype<block_start>""" Generate expected moving windows on a buffer with adjustments. We proceed by constructing, at each row, the view of the array we expect in in all windows anchored on that row. In general, if we have an adjustment to be applied once we process the row at index N, should see that adjustment applied to the underlying buffer for any window containing the row at index N. We then build all legal windows over these buffers. """<line_sep>adjustment_type={float64_dtype:Float64Multiply }[dtype]<line_sep>nrows,ncols=6 3<line_sep>adjustments={}<line_sep>buffer_as_of=[<none>]<times>6<line_sep>baseline=buffer_as_of[0]=full((nrows ncols) 1 dtype=dtype)<line_sep># Note that row indices are inclusive! adjustments[1]=[adjustment_type(0 0 0 0 coerce_to_dtype(dtype 2)) ]<line_sep>buffer_as_of[1]=array([[2 1 1] [1 1 1] [1 1 1] [1 1 1] [1 1 1] [1 1 1]] dtype=dtype)<line_sep># No adjustment at index 2. buffer_as_of[2]=buffer_as_of[1]<line_sep>adjustments[3]=[adjustment_type(1 2 1 1 coerce_to_dtype(dtype 3)) adjustment_type(0 1 0 0 coerce_to_dtype(dtype 4)) ]<line_sep>buffer_as_of[3]=array([[8 1 1] [4 3 1] [1 3 1] [1 1 1] [1 1 1] [1 1 1]] dtype=dtype)<line_sep>adjustments[4]=[adjustment_type(0 3 2 2 coerce_to_dtype(dtype 5))]<line_sep>buffer_as_of[4]=array([[8 1 5] [4 3 5] [1 3 5] [1 1 5] [1 1 1] [1 1 1]] dtype=dtype)<line_sep>adjustments[5]=[adjustment_type(0 4 1 1 coerce_to_dtype(dtype 6)) adjustment_type(2 2 2 2 coerce_to_dtype(dtype 7)) ]<line_sep>buffer_as_of[5]=array([[8 6 5] [4 18 5] [1 18 35] [1 6 5] [1 6 1] [1 1 1]] dtype=dtype)<line_sep><return>_gen_expectations(baseline default_missing_value_for_dtype(dtype) adjustments buffer_as_of nrows perspective_offsets=(0 1) )<block_end><def_stmt>_gen_overwrite_adjustment_cases dtype<block_start>""" Generate test cases for overwrite adjustments. The algorithm used here is the same as the one used above for multiplicative adjustments. The only difference is the semantics of how the adjustments are expected to modify the arrays. This is parameterized on `make_input` and `make_expected_output` functions, which take 2-D lists of values and transform them into desired input/output arrays. We do this so that we can easily test both vanilla numpy ndarrays and our own LabelArray class for strings. """<line_sep>adjustment_type={float64_dtype:Float64Overwrite datetime64ns_dtype:Datetime64Overwrite int64_dtype:Int64Overwrite bytes_dtype:ObjectOverwrite unicode_dtype:ObjectOverwrite object_dtype:ObjectOverwrite bool_dtype:BooleanOverwrite }[dtype]<line_sep>make_expected_dtype=as_dtype(dtype)<line_sep>missing_value=default_missing_value_for_dtype(datetime64ns_dtype)<if_stmt>dtype<eq>object_dtype# When we're testing object dtypes, we expect to have strings, but # coerce_to_dtype(object, 3) just gives 3 as a Python integer. <block_start><def_stmt>make_overwrite_value dtype value<block_start><return>str(value)<block_end><block_end><else_stmt><block_start>make_overwrite_value=coerce_to_dtype<block_end>adjustments={}<line_sep>buffer_as_of=[<none>]<times>6<line_sep>baseline=make_expected_dtype([[2 2 2] [2 2 2] [2 2 2] [2 2 2] [2 2 2] [2 2 2]])<line_sep>buffer_as_of[0]=make_expected_dtype([[2 2 2] [2 2 2] [2 2 2] [2 2 2] [2 2 2] [2 2 2]])<line_sep># Note that row indices are inclusive! adjustments[1]=[adjustment_type(0 0 0 0 make_overwrite_value(dtype 1)) ]<line_sep>buffer_as_of[1]=make_expected_dtype([[1 2 2] [2 2 2] [2 2 2] [2 2 2] [2 2 2] [2 2 2]])<line_sep># No adjustment at index 2. buffer_as_of[2]=buffer_as_of[1]<line_sep>adjustments[3]=[adjustment_type(1 2 1 1 make_overwrite_value(dtype 3)) adjustment_type(0 1 0 0 make_overwrite_value(dtype 4)) ]<line_sep>buffer_as_of[3]=make_expected_dtype([[4 2 2] [4 3 2] [2 3 2] [2 2 2] [2 2 2] [2 2 2]])<line_sep>adjustments[4]=[adjustment_type(0 3 2 2 make_overwrite_value(dtype 5))]<line_sep>buffer_as_of[4]=make_expected_dtype([[4 2 5] [4 3 5] [2 3 5] [2 2 5] [2 2 2] [2 2 2]])<line_sep>adjustments[5]=[adjustment_type(0 4 1 1 make_overwrite_value(dtype 6)) adjustment_type(2 2 2 2 make_overwrite_value(dtype 7)) ]<line_sep>buffer_as_of[5]=make_expected_dtype([[4 6 5] [4 6 5] [2 6 7] [2 6 5] [2 6 2] [2 2 2]])<line_sep><return>_gen_expectations(baseline missing_value adjustments buffer_as_of nrows=6 perspective_offsets=(0 1) )<block_end><def_stmt>_gen_overwrite_1d_array_adjustment_case dtype<block_start>""" Generate test cases for overwrite adjustments. The algorithm used here is the same as the one used above for multiplicative adjustments. The only difference is the semantics of how the adjustments are expected to modify the arrays. This is parameterized on `make_input` and `make_expected_output` functions, which take 1-D lists of values and transform them into desired input/output arrays. We do this so that we can easily test both vanilla numpy ndarrays and our own LabelArray class for strings. """<line_sep>adjustment_type={bool_dtype:Boolean1DArrayOverwrite float64_dtype:Float641DArrayOverwrite datetime64ns_dtype:Datetime641DArrayOverwrite }[dtype]<line_sep>make_expected_dtype=as_dtype(dtype)<line_sep>missing_value=default_missing_value_for_dtype(datetime64ns_dtype)<line_sep>adjustments={}<line_sep>buffer_as_of=[<none>]<times>6<line_sep>baseline=make_expected_dtype([[2 2 2] [2 2 2] [2 2 2] [2 2 2] [2 2 2] [2 2 2]])<line_sep>buffer_as_of[0]=make_expected_dtype([[2 2 2] [2 2 2] [2 2 2] [2 2 2] [2 2 2] [2 2 2]])<line_sep>vals1=[1]<line_sep># Note that row indices are inclusive! adjustments[1]=[adjustment_type(0 0 0 0 array([coerce_to_dtype(dtype val)<for>val vals1]))]<line_sep>buffer_as_of[1]=make_expected_dtype([[1 2 2] [2 2 2] [2 2 2] [2 2 2] [2 2 2] [2 2 2]])<line_sep># No adjustment at index 2. buffer_as_of[2]=buffer_as_of[1]<line_sep>vals3=[4 4 1]<line_sep>adjustments[3]=[adjustment_type(0 2 0 0 array([coerce_to_dtype(dtype val)<for>val vals3]))]<line_sep>buffer_as_of[3]=make_expected_dtype([[4 2 2] [4 2 2] [1 2 2] [2 2 2] [2 2 2] [2 2 2]])<line_sep>vals4=[5]<times>4<line_sep>adjustments[4]=[adjustment_type(0 3 2 2 array([coerce_to_dtype(dtype val)<for>val vals4]))]<line_sep>buffer_as_of[4]=make_expected_dtype([[4 2 5] [4 2 5] [1 2 5] [2 2 5] [2 2 2] [2 2 2]])<line_sep>vals5=range(1 6)<line_sep>adjustments[5]=[adjustment_type(0 4 1 1 array([coerce_to_dtype(dtype val)<for>val vals5])) ]<line_sep>buffer_as_of[5]=make_expected_dtype([[4 1 5] [4 2 5] [1 3 5] [2 4 5] [2 5 2] [2 2 2]])<line_sep><return>_gen_expectations(baseline missing_value adjustments buffer_as_of nrows=6 perspective_offsets=(0 1) )<block_end><def_stmt>_gen_expectations baseline missing_value adjustments buffer_as_of nrows perspective_offsets<block_start><for_stmt>windowlen,perspective_offset product(valid_window_lengths(nrows) perspective_offsets)# How long is an iterator of length-N windows on this buffer? # For example, for a window of length 3 on a buffer of length 6, there # are four valid windows. <block_start>num_legal_windows=num_windows_of_length_M_on_buffers_of_length_N(windowlen nrows)<line_sep># Build the sequence of regions in the underlying buffer we expect to # see. For example, with a window length of 3 on a buffer of length 6, # we expect to see: # (buffer[0:3], buffer[1:4], buffer[2:5], buffer[3:6]) # slices=[slice(i i+windowlen)<for>i range(num_legal_windows)]<line_sep># The sequence of perspectives we expect to take on the underlying # data. For example, with a window length of 3 and a perspective offset # of 1, we expect to see: # (buffer_as_of[3], buffer_as_of[4], buffer_as_of[5], buffer_as_of[5]) # initial_perspective=windowlen+perspective_offset-1<line_sep>perspectives=range(initial_perspective initial_perspective+num_legal_windows)<def_stmt>as_of p# perspective_offset can push us past the end of the underlying # buffer/adjustments. When it does, we should always see the latest # version of the buffer. <block_start><if_stmt>p<ge>len(buffer_as_of)<block_start><return>buffer_as_of[-1]<block_end><return>buffer_as_of[p]<block_end>expected_iterator_results=[as_of(perspective)[slice_]<for>slice_,perspective zip(slices perspectives)]<line_sep>test_name="dtype_{}_length_{}_perpective_offset_{}".format(baseline.dtype windowlen perspective_offset )<line_sep><yield>AdjustmentCase(name=test_name baseline=baseline window_length=windowlen adjustments=adjustments missing_value=missing_value perspective_offset=perspective_offset expected_result=expected_iterator_results)<block_end><block_end><class_stmt>AdjustedArrayTestCase(TestCase)<block_start><def_stmt>test_traverse_invalidating self<block_start>data=arange(5<times>3 dtype='f8').reshape(5 3)<line_sep>original_data=data.copy()<line_sep>adjustments={2:[Float64Multiply(0 4 0 2 2.0)]}<line_sep>adjusted_array=AdjustedArray(data adjustments float('nan'))<for_stmt>_ adjusted_array.traverse(1 copy=<false>)<block_start><pass><block_end>assert_equal(data original_data<times>2)<with_stmt>self.assertRaises(ValueError)<as>e<block_start>adjusted_array.traverse(1)<block_end>assert_equal(str(e.exception) 'cannot traverse invalidated AdjustedArray' )<block_end><def_stmt>test_copy self<block_start>data=arange(5<times>3 dtype='f8').reshape(5 3)<line_sep>original_data=data.copy()<line_sep>adjustments={2:[Float64Multiply(0 4 0 2 2.0)]}<line_sep>adjusted_array=AdjustedArray(data adjustments float('nan'))<line_sep>traverse_copy=adjusted_array.copy()<line_sep>clean_copy=adjusted_array.copy()<line_sep>a_it=adjusted_array.traverse(2 copy=<false>)<line_sep>b_it=traverse_copy.traverse(2 copy=<false>)<for_stmt>a,b zip(a_it b_it)<block_start>assert_equal(a b)<block_end><with_stmt>self.assertRaises(ValueError)<as>e<block_start>adjusted_array.copy()<block_end>assert_equal(str(e.exception) 'cannot copy invalidated AdjustedArray' )<line_sep># the clean copy should have the original data even though the # original adjusted array has it's data mutated in place assert_equal(clean_copy.data original_data)<line_sep>assert_equal(adjusted_array.data original_data<times>2)<block_end>@parameterized.expand(chain(_gen_unadjusted_cases('float' make_input=as_dtype(float64_dtype) make_expected_output=as_dtype(float64_dtype) missing_value=default_missing_value_for_dtype(float64_dtype) ) _gen_unadjusted_cases('datetime' make_input=as_dtype(datetime64ns_dtype) make_expected_output=as_dtype(datetime64ns_dtype) missing_value=default_missing_value_for_dtype(datetime64ns_dtype) ) # Test passing an array of strings to AdjustedArray. _gen_unadjusted_cases('bytes_ndarray' make_input=as_dtype(bytes_dtype) make_expected_output=as_labelarray(bytes_dtype b'') missing_value=b'' ) _gen_unadjusted_cases('unicode_ndarray' make_input=as_dtype(unicode_dtype) make_expected_output=as_labelarray(unicode_dtype u'') missing_value=u'' ) _gen_unadjusted_cases('object_ndarray' make_input=<lambda>a:a.astype(unicode).astype(object) make_expected_output=as_labelarray(unicode_dtype u'') missing_value='' ) # Test passing a LabelArray directly to AdjustedArray. _gen_unadjusted_cases('bytes_labelarray' make_input=as_labelarray(bytes_dtype b'') make_expected_output=as_labelarray(bytes_dtype b'') missing_value=b'' ) _gen_unadjusted_cases('unicode_labelarray' make_input=as_labelarray(unicode_dtype <none>) make_expected_output=as_labelarray(unicode_dtype <none>) missing_value=u'' ) _gen_unadjusted_cases('object_labelarray' make_input=(<lambda>a:LabelArray(a.astype(unicode).astype(object) u'')) make_expected_output=as_labelarray(unicode_dtype '') missing_value='' ) ))<def_stmt>test_no_adjustments self name data lookback adjustments missing_value perspective_offset expected_output<block_start>array=AdjustedArray(data adjustments missing_value)<for_stmt>_ range(2)# Iterate 2x ensure adjusted_arrays are re-usable. <block_start>in_out=zip(array.traverse(lookback) expected_output)<for_stmt>yielded,expected_yield in_out<block_start>check_arrays(yielded expected_yield)<block_end><block_end><block_end>@parameterized.expand(_gen_multiplicative_adjustment_cases(float64_dtype))<def_stmt>test_multiplicative_adjustments self name data lookback adjustments missing_value perspective_offset expected<block_start>array=AdjustedArray(data adjustments missing_value)<for_stmt>_ range(2)# Iterate 2x ensure adjusted_arrays are re-usable. <block_start>window_iter=array.traverse(lookback perspective_offset=perspective_offset )<for_stmt>yielded,expected_yield zip_longest(window_iter expected)<block_start>check_arrays(yielded expected_yield)<block_end><block_end><block_end>@parameterized.expand(chain(_gen_overwrite_adjustment_cases(bool_dtype) _gen_overwrite_adjustment_cases(int64_dtype) _gen_overwrite_adjustment_cases(float64_dtype) _gen_overwrite_adjustment_cases(datetime64ns_dtype) _gen_overwrite_1d_array_adjustment_case(float64_dtype) _gen_overwrite_1d_array_adjustment_case(datetime64ns_dtype) _gen_overwrite_1d_array_adjustment_case(bool_dtype) # There are six cases here: # Using np.bytes/np.unicode/object arrays as inputs. # Passing np.bytes/np.unicode/object arrays to LabelArray, # and using those as input. # # The outputs should always be LabelArrays. _gen_unadjusted_cases('bytes_ndarray' make_input=as_dtype(bytes_dtype) make_expected_output=as_labelarray(bytes_dtype b'') missing_value=b'' ) _gen_unadjusted_cases('unicode_ndarray' make_input=as_dtype(unicode_dtype) make_expected_output=as_labelarray(unicode_dtype u'') missing_value=u'' ) _gen_unadjusted_cases('object_ndarray' make_input=<lambda>a:a.astype(unicode).astype(object) make_expected_output=as_labelarray(unicode_dtype u'') missing_value=u'' ) _gen_unadjusted_cases('bytes_labelarray' make_input=as_labelarray(bytes_dtype b'') make_expected_output=as_labelarray(bytes_dtype b'') missing_value=b'' ) _gen_unadjusted_cases('unicode_labelarray' make_input=as_labelarray(unicode_dtype u'') make_expected_output=as_labelarray(unicode_dtype u'') missing_value=u'' ) _gen_unadjusted_cases('object_labelarray' make_input=(<lambda>a:LabelArray(a.astype(unicode).astype(object) <none> )) make_expected_output=as_labelarray(unicode_dtype u'') missing_value=<none> ) ))<def_stmt>test_overwrite_adjustment_cases self name baseline lookback adjustments missing_value perspective_offset expected<block_start>array=AdjustedArray(baseline adjustments missing_value)<for_stmt>_ range(2)# Iterate 2x ensure adjusted_arrays are re-usable. <block_start>window_iter=array.traverse(lookback perspective_offset=perspective_offset )<for_stmt>yielded,expected_yield zip_longest(window_iter expected)<block_start>check_arrays(yielded expected_yield)<block_end><block_end><block_end><def_stmt>test_object1darrayoverwrite self<block_start>pairs=[u+l<for>u,l product(ascii_uppercase ascii_lowercase)]<line_sep>categories=pairs+['~'+c<for>c pairs]<line_sep>baseline=LabelArray(array([[''.join((r c))<for>c 'abc']<for>r ascii_uppercase]) <none> categories )<line_sep>full_expected=baseline.copy()<def_stmt>flip cs<block_start><if_stmt>cs<is><none><block_start><return><none><block_end><if_stmt>cs[0]<ne>'~'<block_start><return>'~'+cs<block_end><return>cs<block_end><def_stmt>make_overwrite fr lr fc lc<block_start>fr,lr,fc,lc=map(ord (fr lr fc lc))<line_sep>fr<augsub>ord('A')<line_sep>lr<augsub>ord('A')<line_sep>fc<augsub>ord('a')<line_sep>lc<augsub>ord('a')<line_sep><return>Object1DArrayOverwrite(fr lr fc lc baseline[fr:lr+1 fc].map(flip) )<block_end>overwrites={3:[make_overwrite('A' 'B' 'a' 'a')] 4:[make_overwrite('A' 'C' 'b' 'c')] 5:[make_overwrite('D' 'D' 'a' 'b')] }<line_sep>it=AdjustedArray(baseline overwrites <none>).traverse(3)<line_sep>window=next(it)<line_sep>expected=full_expected[:3]<line_sep>check_arrays(window expected)<line_sep>window=next(it)<line_sep>full_expected[0:2 0]=LabelArray(['~Aa' '~Ba'] <none>)<line_sep>expected=full_expected[1:4]<line_sep>check_arrays(window expected)<line_sep>window=next(it)<line_sep>full_expected[0:3 1:3]=LabelArray([['~Ab' '~Ac'] ['~Bb' '~Bc'] ['~Cb' '~Cb']] <none>)<line_sep>expected=full_expected[2:5]<line_sep>check_arrays(window expected)<line_sep>window=next(it)<line_sep>full_expected[3 :2]='~Da'<line_sep>expected=full_expected[3:6]<line_sep>check_arrays(window expected)<block_end><def_stmt>test_invalid_lookback self<block_start>data=arange(30 dtype=float).reshape(6 5)<line_sep>adj_array=AdjustedArray(data {} float('nan'))<with_stmt>self.assertRaises(WindowLengthTooLong)<block_start>adj_array.traverse(7)<block_end><with_stmt>self.assertRaises(WindowLengthNotPositive)<block_start>adj_array.traverse(0)<block_end><with_stmt>self.assertRaises(WindowLengthNotPositive)<block_start>adj_array.traverse(-1)<block_end><block_end><def_stmt>test_array_views_arent_writable self<block_start>data=arange(30 dtype=float).reshape(6 5)<line_sep>adj_array=AdjustedArray(data {} float('nan'))<for_stmt>frame adj_array.traverse(3)<block_start><with_stmt>self.assertRaises(ValueError)<block_start>frame[0 0]=5.0<block_end><block_end><block_end><def_stmt>test_inspect self<block_start>data=arange(15 dtype=float).reshape(5 3)<line_sep>adj_array=AdjustedArray(data {4:[Float64Multiply(2 3 0 0 4.0)]} float('nan') )<line_sep>expected=dedent("""\ Adjusted Array (float64): Data: array([[ 0., 1., 2.], [ 3., 4., 5.], [ 6., 7., 8.], [ 9., 10., 11.], [ 12., 13., 14.]]) Adjustments: {4: [Float64Multiply(first_row=2, last_row=3, first_col=0, \ last_col=0, value=4.000000)]} """)<line_sep>got=adj_array.inspect()<line_sep>self.assertEqual(expected got)<block_end><def_stmt>test_update_labels self<block_start>data=array([['aaa' 'bbb' 'ccc'] ['ddd' 'eee' 'fff'] ['ggg' 'hhh' 'iii'] ['jjj' 'kkk' 'lll'] ['mmm' 'nnn' 'ooo'] ])<line_sep>label_array=LabelArray(data missing_value='')<line_sep>adj_array=AdjustedArray(data=label_array adjustments={4:[ObjectOverwrite(2 3 0 0 'ppp')]} missing_value='' )<line_sep>expected_data=array([['aaa-foo' 'bbb-foo' 'ccc-foo'] ['ddd-foo' 'eee-foo' 'fff-foo'] ['ggg-foo' 'hhh-foo' 'iii-foo'] ['jjj-foo' 'kkk-foo' 'lll-foo'] ['mmm-foo' 'nnn-foo' 'ooo-foo'] ])<line_sep>expected_label_array=LabelArray(expected_data missing_value='')<line_sep>expected_adj_array=AdjustedArray(data=expected_label_array adjustments={4:[ObjectOverwrite(2 3 0 0 'ppp-foo')]} missing_value='' )<line_sep>adj_array.update_labels(<lambda>x:x+'-foo')<line_sep># Check that the mapped AdjustedArray has the expected baseline # values and adjustment values. check_arrays(adj_array.data expected_adj_array.data)<line_sep>self.assertEqual(adj_array.adjustments expected_adj_array.adjustments)<block_end>A=Float64Multiply(0 4 1 1 0.5)<line_sep>B=Float64Overwrite(3 3 4 4 4.2)<line_sep>C=Float64Multiply(0 2 0 0 0.14)<line_sep>D=Float64Overwrite(0 3 0 0 4.0)<line_sep>E=Float64Overwrite(0 0 1 1 3.7)<line_sep>F=Float64Multiply(0 4 3 3 10.0)<line_sep>G=Float64Overwrite(5 5 4 4 1.7)<line_sep>H=Float64Multiply(0 4 2 2 0.99)<line_sep>S=Float64Multiply(0 1 4 4 5.06)<line_sep>@parameterized.expand([(# Initial adjustments {1:[A B] 2:[C] 4:[D] } # Adjustments to add {1:[E] 2:[F G] 3:[H S] } # Expected adjustments with 'append' {1:[A B E] 2:[C F G] 3:[H S] 4:[D] } # Expected adjustments with 'prepend' {1:[E A B] 2:[F G C] 3:[H S] 4:[D] } )])<def_stmt>test_update_adjustments self initial_adjustments adjustments_to_add expected_adjustments_with_append expected_adjustments_with_prepend<block_start>methods=['append' 'prepend']<line_sep>expected_outputs=[expected_adjustments_with_append expected_adjustments_with_prepend]<for_stmt>method,expected_output zip(methods expected_outputs)<block_start>data=arange(30 dtype=float).reshape(6 5)<line_sep>adjusted_array=AdjustedArray(data initial_adjustments float('nan'))<line_sep>adjusted_array.update_adjustments(adjustments_to_add method)<line_sep>self.assertEqual(adjusted_array.adjustments expected_output)<block_end><block_end><block_end>
""" Write a function that calculates the number of days between two given dates. Input Data: Date1 = 2011-1-1 Date2 = 2021-1-1' """<import_stmt>datetime<def_stmt>date_diff Date1 Date2<block_start>delta=Date2-Date1<line_sep><return>(delta)<block_end>
# Computes expected results for `testGRU()` in `Tests/TensorFlowTests/LayerTests.swift`. # Requires 'tensorflow>=2.0.0a0' (e.g. "pip install tensorflow==2.2.0"). <import_stmt>sys<import_stmt>numpy<import_stmt>tensorflow<as>tf<line_sep># Set random seed for repetable results tf.random.set_seed(0)<def_stmt>indented s<block_start><return>'\n'.join([' '+l<for>l s.split('\n')])<block_end><def_stmt>swift_tensor name tensor<block_start><if_stmt>hasattr(tensor 'numpy')<block_start>tensor=tensor.numpy()<block_end><def_stmt>format_float x<block_start>formatted=numpy.format_float_positional(x unique=<true>)<if_stmt>formatted[-1]<eq>'.'<block_start><return>formatted+'0'<block_end><return>formatted<block_end>formatter={'float_kind':format_float}<line_sep><return>'let {} = Tensor<Float>(\n{}\n)'.format(name indented(numpy.array2string(tensor separator=',' formatter=formatter)))<block_end>units=4<line_sep>input_dim=3<line_sep>input_length=4<line_sep>go_backwards="go_backwards"<in>sys.argv<line_sep># Initialize the keras model with the GRU. gru=tf.keras.layers.GRU(input_dim=input_dim units=units activation="tanh" recurrent_activation="sigmoid" return_sequences=<true> return_state=<true> go_backwards=go_backwards)<line_sep>x_input=tf.keras.Input(shape=[input_length input_dim])<line_sep>initial_state=tf.keras.Input(shape=[units])<line_sep>initial_state_input=[initial_state]<line_sep>output=gru(x_input initial_state=initial_state_input)<line_sep>model=tf.keras.Model(inputs=[x_input initial_state_input] outputs=[output])<line_sep>[kernel recurrent_kernel bias]=gru.get_weights()<line_sep>update_kernel=kernel[: :units]<line_sep>update_recurrent_kernel=recurrent_kernel[: :units]<line_sep>reset_kernel=kernel[: units:units<times>2]<line_sep>reset_recurrent_kernel=recurrent_kernel[: units:units<times>2]<line_sep>new_kernel=kernel[: units<times>2:]<line_sep>new_recurrent_kernel=recurrent_kernel[: units<times>2:]<line_sep>update_bias=bias[0][:units]<line_sep>update_recurrent_bias=bias[1][:units]<line_sep>reset_bias=bias[0][units:units<times>2]<line_sep>reset_recurrent_bias=bias[1][units:units<times>2]<line_sep>new_bias=bias[0][units<times>2:]<line_sep>new_recurrent_bias=bias[1][units<times>2:]<line_sep># Print the GRU weights. print(swift_tensor('updateKernel' update_kernel))<line_sep>print(swift_tensor('resetKernel' reset_kernel))<line_sep>print(swift_tensor('outputKernel' new_kernel))<line_sep>print(swift_tensor('updateRecurrentKernel' update_recurrent_kernel))<line_sep>print(swift_tensor('resetRecurrentKernel' reset_recurrent_kernel))<line_sep>print(swift_tensor('outputRecurrentKernel' new_recurrent_kernel))<line_sep>print(swift_tensor('updateBias' update_bias))<line_sep>print(swift_tensor('resetBias' reset_bias))<line_sep>print(swift_tensor('outputBias' new_bias))<line_sep>print(swift_tensor('updateRecurrentBias' update_recurrent_bias))<line_sep>print(swift_tensor('resetRecurrentBias' reset_recurrent_bias))<line_sep>print(swift_tensor('outputRecurrentBias' new_recurrent_bias))<line_sep># Initialize input data and print it. x=tf.keras.initializers.GlorotUniform()(shape=[1 input_length input_dim])<line_sep>initial_state=[tf.keras.initializers.GlorotUniform()(shape=[1 units]) ]<line_sep>print(swift_tensor('x' x))<line_sep>print(swift_tensor('initialState' initial_state[0]))<line_sep># Run forwards and backwards pass and print the results. <with_stmt>tf.GradientTape()<as>tape<block_start>tape.watch(x)<line_sep>tape.watch(initial_state)<line_sep>[[states final_state]]=model([x initial_state])<line_sep>sum_output=tf.reduce_sum(states[0][-1])<block_end>[grad_model grad_x grad_initial_state]=tape.gradient(sum_output [model.variables x initial_state])<line_sep>[grad_kernel grad_recurrent_kernel grad_bias]=grad_model<line_sep>[grad_initial_state]=grad_initial_state<line_sep>grad_update_kernel=grad_kernel[: :units]<line_sep>grad_update_recurrent_kernel=grad_recurrent_kernel[: :units]<line_sep>grad_reset_kernel=grad_kernel[: units:units<times>2]<line_sep>grad_reset_recurrent_kernel=grad_recurrent_kernel[: units:units<times>2]<line_sep>grad_new_kernel=grad_kernel[: units<times>2:]<line_sep>grad_new_recurrent_kernel=grad_recurrent_kernel[: units<times>2:]<line_sep>grad_update_bias=grad_bias[0][:units]<line_sep>grad_update_recurrent_bias=grad_bias[1][:units]<line_sep>grad_reset_bias=grad_bias[0][units:units<times>2]<line_sep>grad_reset_recurrent_bias=grad_bias[1][units:units<times>2]<line_sep>grad_new_bias=grad_bias[0][units<times>2:]<line_sep>grad_new_recurrent_bias=grad_bias[1][units<times>2:]<line_sep>print(swift_tensor('expectedSum' sum_output))<line_sep>print(swift_tensor('expectedStates' states))<line_sep>print(swift_tensor('expectedFinalState' final_state))<line_sep>print(swift_tensor('expectedGradX' grad_x))<line_sep>print(swift_tensor('expectedGradInitialState' grad_initial_state))<line_sep>print(swift_tensor('expectedGradUpdateKernel' grad_update_kernel))<line_sep>print(swift_tensor('expectedGradResetKernel' grad_reset_kernel))<line_sep>print(swift_tensor('expectedGradOutputKernel' grad_new_kernel))<line_sep>print(swift_tensor('expectedGradUpdateRecurrentKernel' grad_update_recurrent_kernel))<line_sep>print(swift_tensor('expectedGradResetRecurrentKernel' grad_reset_recurrent_kernel))<line_sep>print(swift_tensor('expectedGradOutputRecurrentKernel' grad_new_recurrent_kernel))<line_sep>print(swift_tensor('expectedGradUpdateBias' grad_update_bias))<line_sep>print(swift_tensor('expectedGradResetBias' grad_reset_bias))<line_sep>print(swift_tensor('expectedGradOutputBias' grad_new_bias))<line_sep>print(swift_tensor('expectedGradUpdateRecurrentBias' grad_update_recurrent_bias))<line_sep>print(swift_tensor('expectedGradResetRecurrentBias' grad_reset_recurrent_bias))<line_sep>print(swift_tensor('expectedGradOutputRecurrentBias' grad_new_recurrent_bias))<line_sep>
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>gin<import_stmt>tensorflow<as>tf<line_sep>@gin.configurable<def_stmt>split_observation_fn o<block_start>dimo=o.get_shape().as_list()[-1]<assert_stmt>dimo<eq>23 ("The dimension does not match.")<line_sep>task_specific_ob,agent_pose,agent_vel,internal_states,action=tf.split(o [3 6 6 6 2] axis=-1)<line_sep><return>(action task_specific_ob)<block_end>
# Author: <NAME> <<EMAIL>> # # License: BSD 3 clause <import_from_future_stmt> division<import_stmt>numpy<as>np<import_stmt>scipy.sparse<as>sp<import_stmt>operator<import_stmt>array<import_from_stmt>sklearn.utils check_random_state<import_from_stmt>sklearn.utils.fixes astype<import_from_stmt>._random sample_without_replacement<line_sep>__all__=['sample_without_replacement' 'choice']<line_sep># This is a backport of np.random.choice from numpy 1.7 # The function can be removed when we bump the requirements to >=1.7 <def_stmt>choice a size=<none> replace=<true> p=<none> random_state=<none><block_start>""" choice(a, size=None, replace=True, p=None) Generates a random sample from a given 1-D array .. versionadded:: 1.7.0 Parameters ----------- a : 1-D array-like or int If an ndarray, a random sample is generated from its elements. If an int, the random sample is generated as if a was np.arange(n) size : int or tuple of ints, optional Output shape. Default is None, in which case a single value is returned. replace : boolean, optional Whether the sample is with or without replacement. p : 1-D array-like, optional The probabilities associated with each entry in a. If not given the sample assumes a uniform distribution over all entries in a. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns -------- samples : 1-D ndarray, shape (size,) The generated random samples Raises ------- ValueError If a is an int and less than zero, if a or p are not 1-dimensional, if a is an array-like of size 0, if p is not a vector of probabilities, if a and p have different lengths, or if replace=False and the sample size is greater than the population size See Also --------- randint, shuffle, permutation Examples --------- Generate a uniform random sample from np.arange(5) of size 3: >>> np.random.choice(5, 3) # doctest: +SKIP array([0, 3, 4]) >>> #This is equivalent to np.random.randint(0,5,3) Generate a non-uniform random sample from np.arange(5) of size 3: >>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP array([3, 3, 0]) Generate a uniform random sample from np.arange(5) of size 3 without replacement: >>> np.random.choice(5, 3, replace=False) # doctest: +SKIP array([3,1,0]) >>> #This is equivalent to np.random.shuffle(np.arange(5))[:3] Generate a non-uniform random sample from np.arange(5) of size 3 without replacement: >>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0]) ... # doctest: +SKIP array([2, 3, 0]) Any of the above can be repeated with an arbitrary array-like instead of just integers. For instance: >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher'] >>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3]) ... # doctest: +SKIP array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'], dtype='|S11') """<line_sep>random_state=check_random_state(random_state)<line_sep># Format and Verify input a=np.array(a copy=<false>)<if_stmt>a.ndim<eq>0<block_start><try_stmt># __index__ must return an integer by python rules. <block_start>pop_size=operator.index(a.item())<block_end><except_stmt>TypeError<block_start><raise>ValueError("a must be 1-dimensional or an integer")<block_end><if_stmt>pop_size<le>0<block_start><raise>ValueError("a must be greater than 0")<block_end><block_end><elif_stmt>a.ndim<ne>1<block_start><raise>ValueError("a must be 1-dimensional")<block_end><else_stmt><block_start>pop_size=a.shape[0]<if_stmt>pop_size<is>0<block_start><raise>ValueError("a must be non-empty")<block_end><block_end><if_stmt>p<is><not><none><block_start>p=np.array(p dtype=np.double ndmin=1 copy=<false>)<if_stmt>p.ndim<ne>1<block_start><raise>ValueError("p must be 1-dimensional")<block_end><if_stmt>p.size<ne>pop_size<block_start><raise>ValueError("a and p must have same size")<block_end><if_stmt>np.any(p<l>0)<block_start><raise>ValueError("probabilities are not non-negative")<block_end><if_stmt><not>np.allclose(p.sum() 1)<block_start><raise>ValueError("probabilities do not sum to 1")<block_end><block_end>shape=size<if_stmt>shape<is><not><none><block_start>size=np.prod(shape dtype=np.intp)<block_end><else_stmt><block_start>size=1<block_end># Actual sampling <if_stmt>replace<block_start><if_stmt>p<is><not><none><block_start>cdf=p.cumsum()<line_sep>cdf<augdiv>cdf[-1]<line_sep>uniform_samples=random_state.random_sample(shape)<line_sep>idx=cdf.searchsorted(uniform_samples side='right')<line_sep># searchsorted returns a scalar idx=np.array(idx copy=<false>)<block_end><else_stmt><block_start>idx=random_state.randint(0 pop_size size=shape)<block_end><block_end><else_stmt><block_start><if_stmt>size<g>pop_size<block_start><raise>ValueError("Cannot take a larger sample than "<concat>"population when 'replace=False'")<block_end><if_stmt>p<is><not><none><block_start><if_stmt>np.sum(p<g>0)<l>size<block_start><raise>ValueError("Fewer non-zero entries in p than size")<block_end>n_uniq=0<line_sep>p=p.copy()<line_sep>found=np.zeros(shape dtype=np.int)<line_sep>flat_found=found.ravel()<while_stmt>n_uniq<l>size<block_start>x=random_state.rand(size-n_uniq)<if_stmt>n_uniq<g>0<block_start>p[flat_found[0:n_uniq]]=0<block_end>cdf=np.cumsum(p)<line_sep>cdf<augdiv>cdf[-1]<line_sep>new=cdf.searchsorted(x side='right')<line_sep>_,unique_indices=np.unique(new return_index=<true>)<line_sep>unique_indices.sort()<line_sep>new=new.take(unique_indices)<line_sep>flat_found[n_uniq:n_uniq+new.size]=new<line_sep>n_uniq<augadd>new.size<block_end>idx=found<block_end><else_stmt><block_start>idx=random_state.permutation(pop_size)[:size]<if_stmt>shape<is><not><none><block_start>idx.shape=shape<block_end><block_end><block_end><if_stmt>shape<is><none><and>isinstance(idx np.ndarray)# In most cases a scalar will have been made an array <block_start>idx=idx.item(0)<block_end># Use samples as indices for a if a is array-like <if_stmt>a.ndim<eq>0<block_start><return>idx<block_end><if_stmt>shape<is><not><none><and>idx.ndim<eq>0# If size == () then the user requested a 0-d array as opposed to # a scalar object when size is None. However a[idx] is always a # scalar and not an array. So this makes sure the result is an # array, taking into account that np.array(item) may not work # for object arrays. <block_start>res=np.empty(() dtype=a.dtype)<line_sep>res[()]=a[idx]<line_sep><return>res<block_end><return>a[idx]<block_end><def_stmt>random_choice_csc n_samples classes class_probability=<none> random_state=<none><block_start>"""Generate a sparse random matrix given column class distributions Parameters ---------- n_samples : int, Number of samples to draw in each column. classes : list of size n_outputs of arrays of size (n_classes,) List of classes for each column. class_probability : list of size n_outputs of arrays of size (n_classes,) Optional (default=None). Class distribution of each column. If None the uniform distribution is assumed. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- random_matrix : sparse csc matrix of size (n_samples, n_outputs) """<line_sep>data=array.array('i')<line_sep>indices=array.array('i')<line_sep>indptr=array.array('i' [0])<for_stmt>j range(len(classes))<block_start>classes[j]=np.asarray(classes[j])<if_stmt>classes[j].dtype.kind<ne>'i'<block_start><raise>ValueError("class dtype %s is not supported"%classes[j].dtype)<block_end>classes[j]=astype(classes[j] np.int64 copy=<false>)<line_sep># use uniform distribution if no class_probability is given <if_stmt>class_probability<is><none><block_start>class_prob_j=np.empty(shape=classes[j].shape[0])<line_sep>class_prob_j.fill(1/classes[j].shape[0])<block_end><else_stmt><block_start>class_prob_j=np.asarray(class_probability[j])<block_end><if_stmt>np.sum(class_prob_j)<ne>1.0<block_start><raise>ValueError("Probability array at index {0} does not sum to "<concat>"one".format(j))<block_end><if_stmt>class_prob_j.shape[0]<ne>classes[j].shape[0]<block_start><raise>ValueError("classes[{0}] (length {1}) and "<concat>"class_probability[{0}] (length {2}) have "<concat>"different length.".format(j classes[j].shape[0] class_prob_j.shape[0]))<block_end># If 0 is not present in the classes insert it with a probability 0.0 <if_stmt>0<not><in>classes[j]<block_start>classes[j]=np.insert(classes[j] 0 0)<line_sep>class_prob_j=np.insert(class_prob_j 0 0.0)<block_end># If there are nonzero classes choose randomly using class_probability rng=check_random_state(random_state)<if_stmt>classes[j].shape[0]<g>1<block_start>p_nonzero=1-class_prob_j[classes[j]<eq>0]<line_sep>nnz=int(n_samples<times>p_nonzero)<line_sep>ind_sample=sample_without_replacement(n_population=n_samples n_samples=nnz random_state=random_state)<line_sep>indices.extend(ind_sample)<line_sep># Normalize probabilites for the nonzero elements classes_j_nonzero=classes[j]<ne>0<line_sep>class_probability_nz=class_prob_j[classes_j_nonzero]<line_sep>class_probability_nz_norm=(class_probability_nz/np.sum(class_probability_nz))<line_sep>classes_ind=np.searchsorted(class_probability_nz_norm.cumsum() rng.rand(nnz))<line_sep>data.extend(classes[j][classes_j_nonzero][classes_ind])<block_end>indptr.append(len(indices))<block_end><return>sp.csc_matrix((data indices indptr) (n_samples len(classes)) dtype=int)<block_end>
<import_stmt>torch<import_from_stmt>basicsr.models.archs.stylegan2_arch StyleGAN2Discriminator StyleGAN2Generator <def_stmt>convert_net_g ori_net crt_net<block_start>"""Convert network generator."""<for_stmt>crt_k,crt_v crt_net.items()<block_start><if_stmt>'style_mlp'<in>crt_k<block_start>ori_k=crt_k.replace('style_mlp' 'style')<block_end><elif_stmt>'constant_input.weight'<in>crt_k<block_start>ori_k=crt_k.replace('constant_input.weight' 'input.input')<block_end># style conv1 <elif_stmt>'style_conv1.modulated_conv'<in>crt_k<block_start>ori_k=crt_k.replace('style_conv1.modulated_conv' 'conv1.conv')<block_end><elif_stmt>'style_conv1'<in>crt_k<block_start><if_stmt>crt_v.shape<eq>torch.Size([1])<block_start>ori_k=crt_k.replace('style_conv1' 'conv1.noise')<block_end><else_stmt><block_start>ori_k=crt_k.replace('style_conv1' 'conv1')<block_end><block_end># style conv <elif_stmt>'style_convs'<in>crt_k<block_start>ori_k=crt_k.replace('style_convs' 'convs').replace('modulated_conv' 'conv')<if_stmt>crt_v.shape<eq>torch.Size([1])<block_start>ori_k=ori_k.replace('.weight' '.noise.weight')<block_end><block_end># to_rgb1 <elif_stmt>'to_rgb1.modulated_conv'<in>crt_k<block_start>ori_k=crt_k.replace('to_rgb1.modulated_conv' 'to_rgb1.conv')<block_end># to_rgbs <elif_stmt>'to_rgbs'<in>crt_k<block_start>ori_k=crt_k.replace('modulated_conv' 'conv')<block_end><elif_stmt>'noises'<in>crt_k<block_start>ori_k=crt_k.replace('.noise' '.noise_')<block_end><else_stmt><block_start>ori_k=crt_k<block_end># replace <if_stmt>crt_net[crt_k].size()<ne>ori_net[ori_k].size()<block_start><raise>ValueError('Wrong tensor size: \n'<concat>f'crt_net: {crt_net[crt_k].size()}\n'<concat>f'ori_net: {ori_net[ori_k].size()}')<block_end><else_stmt><block_start>crt_net[crt_k]=ori_net[ori_k]<block_end><block_end><return>crt_net<block_end><def_stmt>convert_net_d ori_net crt_net<block_start>"""Convert network discriminator."""<for_stmt>crt_k,crt_v crt_net.items()<block_start><if_stmt>'conv_body'<in>crt_k<block_start>ori_k=crt_k.replace('conv_body' 'convs')<block_end><else_stmt><block_start>ori_k=crt_k<block_end># replace <if_stmt>crt_net[crt_k].size()<ne>ori_net[ori_k].size()<block_start><raise>ValueError('Wrong tensor size: \n'<concat>f'crt_net: {crt_net[crt_k].size()}\n'<concat>f'ori_net: {ori_net[ori_k].size()}')<block_end><else_stmt><block_start>crt_net[crt_k]=ori_net[ori_k]<block_end><block_end><return>crt_net<block_end><if_stmt>__name__<eq>'__main__'<block_start>"""Convert official stylegan2 weights from stylegan2-pytorch."""<line_sep># configuration ori_net=torch.load('experiments/pretrained_models/stylegan2-ffhq.pth')<line_sep>save_path_g='experiments/pretrained_models/stylegan2_ffhq_config_f_1024_official.pth'# noqa: E501 save_path_d='experiments/pretrained_models/stylegan2_ffhq_config_f_1024_discriminator_official.pth'# noqa: E501 out_size=1024<line_sep>channel_multiplier=1<line_sep># convert generator crt_net=StyleGAN2Generator(out_size num_style_feat=512 num_mlp=8 channel_multiplier=channel_multiplier)<line_sep>crt_net=crt_net.state_dict()<line_sep>crt_net_params_ema=convert_net_g(ori_net['g_ema'] crt_net)<line_sep>torch.save(dict(params_ema=crt_net_params_ema latent_avg=ori_net['latent_avg']) save_path_g)<line_sep># convert discriminator crt_net=StyleGAN2Discriminator(out_size channel_multiplier=channel_multiplier)<line_sep>crt_net=crt_net.state_dict()<line_sep>crt_net_params=convert_net_d(ori_net['d'] crt_net)<line_sep>torch.save(dict(params=crt_net_params) save_path_d)<block_end>
# # Copyright (c) 2021 Facebook, Inc. and its affiliates. # # This file is part of NeuralDB. # See https://github.com/facebookresearch/NeuralDB for further info. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>glob<import_stmt>json<import_from_stmt>collections OrderedDict defaultdict<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>neuraldb.evaluation.scoring_functions f1<import_from_stmt>functools reduce<def_stmt>load_experiment path db_sizes<block_start>running_score=defaultdict(<lambda>:defaultdict(int))<line_sep>running_count=defaultdict(<lambda>:defaultdict(int))<line_sep>print(path)<with_stmt>open(path)<as>f<block_start><for_stmt>line f<block_start>instance=json.loads(line)<line_sep>actual=instance["actual"]<line_sep>prediction=instance["prediction"]<if_stmt>"dbsize"<not><in>instance["metadata"]<block_start>db_idx,q_idx=(instance["metadata"]["database_idx"] instance["metadata"]["question_idx"] )<line_sep>dbsize=db_sizes[(db_idx q_idx)]<block_end><else_stmt><block_start>dbsize=instance["metadata"]["dbsize"]<block_end><if_stmt>dbsize<eq>0<block_start>dbsize="0"<block_end><elif_stmt>dbsize<eq>1<block_start>dbsize="1"<block_end><elif_stmt>dbsize<l>5<block_start>dbsize="2-4"<block_end><elif_stmt>dbsize<l>10<block_start>dbsize="5-9"<block_end><elif_stmt>dbsize<l>20<block_start>dbsize="10-19"<block_end><else_stmt><block_start>dbsize="20+"<block_end>local_score=f1(set(actual) set(prediction))<line_sep># relation = instance["metadata"]["relation"] # running_score["relation"][relation] += local_score # running_count["relation"][relation] += 1 qtype=instance["metadata"]["type"]<if_stmt>qtype<in>{"argmin" "argmax" "min" "max"}<block_start>qtype="minmax"<block_end>running_score["type"][qtype]<augadd>local_score<line_sep>running_count["type"][qtype]<augadd>1<line_sep>running_score["size"][dbsize]<augadd>local_score<line_sep>running_count["size"][dbsize]<augadd>1<line_sep>running_score["all"][""]<augadd>local_score<line_sep>running_count["all"][""]<augadd>1<block_end><block_end>scores={}<for_stmt>k,v running_score.items()<block_start><for_stmt>attr,val v.items()<block_start>score=(running_score[k][attr]/running_count[k][attr]<if>running_count[k][attr]<else>0)<line_sep>print(f"Running score: {k}\t{attr}\t\t{score}")<line_sep>scores["_".join([k attr])]=(running_score[k][attr]/running_count[k][attr]<if>running_count[k][attr]<else>0)<block_end><block_end><return>scores<block_end><if_stmt>__name__<eq>"__main__"<block_start>dbs=["v2.4_25" "v2.4_50" "v2.4_100" "v2.4_250" "v2.4_500" "v2.4_1000"]<line_sep>all_dbs={}<for_stmt>file dbs<block_start>master_file=f"resources/{file}/test.jsonl"<line_sep>db_sizes=dict()<with_stmt>open(master_file)<as>f<block_start><for_stmt>db_idx,line enumerate(f)<block_start>database=json.loads(line)<for_stmt>q_idx,query enumerate(database["queries"])<block_start>db_sizes[(db_idx q_idx)]=(len(set(reduce(<lambda>a b:a+b query["facts"])))<if>len(query["facts"])<else>0)<block_end><block_end><block_end>all_dbs[file]=db_sizes<block_end>ndb_predictions=glob.glob("consolidated/work/*/**/predictions.jsonl" recursive=<true>)<line_sep>all_experiments=[]<for_stmt>prediction ndb_predictions<block_start>experiment=OrderedDict()<for_stmt>element prediction.split("/")<block_start><if_stmt>","<in>element<block_start><for_stmt>kvp element.split(",")<block_start>k,v=kvp.split("=" maxsplit=1)<line_sep>experiment[k]=v<block_end><block_end><elif_stmt>"-"<in>element<block_start><for_stmt>kvp element.split(",")<block_start>k,v=kvp.split("-" maxsplit=1)<line_sep>experiment[k]=v<block_end><block_end><block_end>experiment["dataset"]=prediction.split("/")[2]<line_sep>experiment["path"]=prediction<if_stmt>experiment["generator"]<eq>"spj_rand"<block_start>experiment["retriever"]="ssg"<block_end><elif_stmt>"retriever"<not><in>experiment<block_start>experiment["retriever"]=""<block_end>all_experiments.append(experiment)<block_end>print("Reading by experiment: \n\n\n")<for_stmt>expt all_experiments<block_start>expt.update(load_experiment(expt["path"] all_dbs[expt["dataset"]]))<del_stmt>expt["path"]<block_end>original_frame=pd.DataFrame(all_experiments)<line_sep># original_frame[original_frame.select_dtypes(include=['number']).columns] *= 100 pd.set_option("display.width" 1000)<line_sep>pd.set_option("display.max_columns" <none>)<line_sep>aggr={"all_":[np.mean np.std]}<line_sep>pt=pd.pivot_table(original_frame index=["dataset" "model" "generator" "retriever" "lr" "steps"] aggfunc=aggr fill_value=0 )<line_sep>frame=pd.DataFrame(pt.to_records())<line_sep>frame.columns=[hdr.replace("('all_', '" "all.").replace("('size_" "size_").replace(", " ".").replace(")" "").replace("'" "")<for>hdr frame.columns]<line_sep>print(pt)<line_sep>final_configs=[["t5" "1e-4" "spj"] ["t5" "1e-4" "spj_rand"] # ["longformer", "1e-4", "perfectir"], # ["t5-fid", "1e-4", "perfectir"], ]<line_sep># ,["t5-fid-max1","1e-4","perfectir"],] <import_stmt>matplotlib.pyplot<as>plt<line_sep>plt.style.use("ggplot")<line_sep>fig,ax=plt.subplots(figsize=(5 3))<line_sep>all_series=[]<line_sep>all_stds=[]<for_stmt>model,lr,gene final_configs<block_start>print(model lr gene)<line_sep>series=[]<line_sep>stds=[]<for_stmt>db dbs<block_start>k="all"<line_sep>series.extend(frame[(frame.model<eq>model)&(frame.lr<eq>lr)&(frame.generator<eq>gene)&(frame.dataset<eq>db)][k+".mean"])<line_sep>stds.extend(frame[(frame.model<eq>model)&(frame.lr<eq>lr)&(frame.generator<eq>gene)&(frame.dataset<eq>db)][k+".std"])<block_end>all_series.append(series)<line_sep>all_stds.append(stds)<block_end>final_configs=[# ["t5", "1e-4", "externalir", "tfidf"], # ["t5", "1e-4", "externalir", "dpr"], ["t5" "1e-4" "externalir2" "tfidf"] ["t5" "1e-4" "externalir2" "dpr"] ]<for_stmt>model,lr,gene,retr final_configs<block_start>print(model lr gene)<line_sep>series=[]<line_sep>stds=[]<for_stmt>db dbs<block_start>k="all"<line_sep>print(frame[(frame.model<eq>model)&(frame.lr<eq>lr)&(frame.generator<eq>gene)&(frame.retriever<eq>retr)&(frame.dataset<eq>db)])<line_sep>series.extend(frame[(frame.model<eq>model)&(frame.lr<eq>lr)&(frame.generator<eq>gene)&(frame.retriever<eq>retr)&(frame.dataset<eq>db)][k+".mean"])<line_sep>stds.extend(frame[(frame.model<eq>model)&(frame.lr<eq>lr)&(frame.generator<eq>gene)&(frame.retriever<eq>retr)&(frame.dataset<eq>db)][k+".std"])<block_end><if_stmt>len(series)<g>6<block_start>all_series.append(series[1:])<line_sep>all_stds.append(stds[1:])<block_end><else_stmt><block_start>all_series.append(series)<line_sep>all_stds.append(stds)<block_end><block_end><for_stmt>series,stds zip(all_series all_stds)<block_start>print(series)<line_sep>ax.plot(series)<line_sep>ax.fill_between(range(len(series)) [min(1 s+i)<for>(s i) zip(series stds)] [s-i<for>(s i) zip(series stds)] alpha=0.4 )<block_end>plt.xticks(range(len(dbs)) labels=[k.replace("v2.4_" "")<for>k dbs])<line_sep>plt.xlabel("Number of facts in DB")<line_sep>plt.ylabel("Answer Accuracy")<line_sep>plt.legend(["SPJ PerfectIR" "SSG+SPJ" "T5 + TF-IDF" "T5 + DPR" ] # "T5 FiD", "TF-IDF", "DPR"]) loc="lower left" fontsize="x-small" )<line_sep># plt.tight_layout() # plt.show() plt.savefig("ssg_dbsize.pdf" bbox_inches="tight")<block_end>