content
stringlengths
0
1.55M
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for the rasterization_backend."""<import_from_stmt>tensorflow_graphics.rendering rasterization_backend<import_from_stmt>tensorflow_graphics.rendering.tests rasterization_backend_test_base<import_from_stmt>tensorflow_graphics.util test_case<class_stmt>CPURasterizationBackendTest(rasterization_backend_test_base.RasterizationBackendTestBase)<block_start><def_stmt>setUp self<block_start>super(CPURasterizationBackendTest self).setUp()<line_sep>self._backend=rasterization_backend.RasterizationBackends.CPU<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>test_case.main()<block_end>
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>.blocks PeriodicEmbed Conv2dBlock<class_stmt>SceneFlowFieldNet(nn.Module)<block_start><def_stmt>__init__ self time_dependent=<true> N_freq_xyz=0 N_freq_t=0 output_dim=3 net_width=32 n_layers=3 activation='lrelu' norm='none'<block_start>super().__init__()<line_sep>N_input_channel_xyz=3+3<times>2<times>N_freq_xyz<line_sep>N_input_channel_t=1+1<times>2<times>N_freq_t<line_sep>N_input_channel=N_input_channel_xyz+N_input_channel_t<if>time_dependent<else>N_input_channel_xyz<if_stmt>N_freq_xyz<eq>0<block_start>xyz_embed=nn.Identity()<block_end><else_stmt><block_start>xyz_embed=PeriodicEmbed(max_freq=N_freq_xyz N_freq=N_freq_xyz)<block_end><if_stmt>N_freq_t<eq>0<block_start>t_embed=nn.Identity()<block_end><else_stmt><block_start>t_embed=PeriodicEmbed(max_freq=N_freq_t N_freq=N_freq_t)<block_end>convs=[Conv2dBlock(N_input_channel net_width 1 1 norm=norm activation=activation)]<for_stmt>i range(n_layers)<block_start>convs.append(Conv2dBlock(net_width net_width 1 1 norm=norm activation=activation))<block_end>convs.append(Conv2dBlock(net_width output_dim 1 1 norm='none' activation='none'))<line_sep>self.convs=nn.Sequential(*convs)<line_sep>self.t_embed=t_embed<line_sep>self.xyz_embed=xyz_embed<line_sep>self.time_dependent=time_dependent<block_end><def_stmt>forward self x t=<none><block_start>x=x.contiguous()<if_stmt>t<is><none><and>self.time_dependent<block_start><raise>ValueError<block_end>xyz_embedded=self.xyz_embed(x)<if_stmt>self.time_dependent<block_start>t_embedded=self.t_embed(t)<line_sep>input_feat=torch.cat([t_embedded xyz_embedded] 1)<block_end><else_stmt><block_start>input_feat=xyz_embedded<block_end><return>self.convs(input_feat)<block_end><block_end>
<import_stmt>pytest<import_stmt>packerlicious.builder<as>builder<class_stmt>TestVMwareIsoBuilder(object)<block_start><def_stmt>test_required_fields_missing self<block_start>b=builder.VMwareIso()<with_stmt>pytest.raises(ValueError)<as>excinfo<block_start>b.to_dict()<block_end><assert_stmt>'required'<in>str(excinfo.value)<block_end><def_stmt>test_iso_checksum_mutually_exclusive self<block_start>b=builder.VMwareIso(iso_url="/url/to/iso" iso_checksum_type=builder.VirtualboxIso.MD5 iso_checksum="my_checksum" iso_checksum_url="my_checksum_url" )<with_stmt>pytest.raises(ValueError)<as>excinfo<block_start>b.to_dict()<block_end><assert_stmt>'VMwareIso: only one of the following can be specified: iso_checksum, iso_checksum_url'<eq>str(excinfo.value)<block_end><block_end><class_stmt>TestVMwareVmxBuilder(object)<block_start><def_stmt>test_required_fields_missing self<block_start>b=builder.VMwareVmx()<with_stmt>pytest.raises(ValueError)<as>excinfo<block_start>b.to_dict()<block_end><assert_stmt>'required'<in>str(excinfo.value)<block_end><block_end>
# ****************************************************************************** # Copyright 2014-2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ****************************************************************************** <import_from_stmt>future standard_library<line_sep>standard_library.install_aliases()# triggers E402, hence noqa below <import_stmt>h5py# noqa <import_from_stmt>collections defaultdict# noqa <import_stmt>numpy<as>np# noqa <import_stmt>os# noqa <import_from_stmt>neon logger<as>neon_logger# noqa <import_from_stmt>neon.data.text_preprocessing clean_string# noqa <import_from_stmt>neon.util.compat pickle# noqa <def_stmt>build_data_train path='.' filepath='labeledTrainData.tsv' vocab_file=<none> vocab=<none> skip_headers=<true> train_ratio=0.8<block_start>""" Loads the data file and spits out a h5 file with record of {y, review_text, review_int} Typically two passes over the data. 1st pass is for vocab and pre-processing. (WARNING: to get phrases, we need to go though multiple passes). 2nd pass is converting text into integers. We will deal with integers from thereafter. WARNING: we use h5 just as proof of concept for handling large datasets Datasets may fit entirely in memory as numpy as array """<line_sep>fname_h5=filepath+'.h5'<if_stmt>vocab_file<is><none><block_start>fname_vocab=filepath+'.vocab'<block_end><else_stmt><block_start>fname_vocab=vocab_file<block_end><if_stmt><not>os.path.exists(fname_h5)<or><not>os.path.exists(fname_vocab)# create the h5 store - NOTE: hdf5 is row-oriented store and we slice rows # reviews_text holds the metadata and processed text file # reviews_int holds the ratings, ints <block_start>h5f=h5py.File(fname_h5 'w')<line_sep>shape,maxshape=(2<power>16 ) (<none> )<line_sep>dt=np.dtype([('y' np.uint8) ('split' np.bool) ('num_words' np.uint16) # WARNING: vlen=bytes in python 3 ('text' h5py.special_dtype(vlen=str))])<line_sep>reviews_text=h5f.create_dataset('reviews' shape=shape maxshape=maxshape dtype=dt compression='gzip')<line_sep>reviews_train=h5f.create_dataset('train' shape=shape maxshape=maxshape dtype=h5py.special_dtype(vlen=np.int32) compression='gzip')<line_sep>reviews_valid=h5f.create_dataset('valid' shape=shape maxshape=maxshape dtype=h5py.special_dtype(vlen=np.int32) compression='gzip')<line_sep>wdata=np.zeros((1 ) dtype=dt)<line_sep># init vocab only for train data build_vocab=<false><if_stmt>vocab<is><none><block_start>vocab=defaultdict(int)<line_sep>build_vocab=<true><block_end>nsamples=0<line_sep># open the file, skip the headers if needed f=open(filepath 'r')<if_stmt>skip_headers<block_start>f.readline()<block_end><for_stmt>i,line enumerate(f)<block_start>_,rating,review=line.strip().split('\t')<line_sep># clean the review review=clean_string(review)<line_sep>review_words=review.strip().split()<line_sep>num_words=len(review_words)<line_sep>split=int(np.random.rand()<l>train_ratio)<line_sep># create record wdata['y']=int(float(rating))<line_sep>wdata['text']=review<line_sep>wdata['num_words']=num_words<line_sep>wdata['split']=split<line_sep>reviews_text[i]=wdata<line_sep># update the vocab if needed <if_stmt>build_vocab<block_start><for_stmt>word review_words<block_start>vocab[word]<augadd>1<block_end><block_end>nsamples<augadd>1<block_end># histogram of class labels, sentence length ratings,counts=np.unique(reviews_text['y'][:nsamples] return_counts=<true>)<line_sep>sen_len,sen_len_counts=np.unique(reviews_text['num_words'][:nsamples] return_counts=<true>)<line_sep>vocab_size=len(vocab)<line_sep>nclass=len(ratings)<line_sep>reviews_text.attrs['vocab_size']=vocab_size<line_sep>reviews_text.attrs['nrows']=nsamples<line_sep>reviews_text.attrs['nclass']=nclass<line_sep>reviews_text.attrs['class_distribution']=counts<line_sep>neon_logger.display("vocabulary size - {}".format(vocab_size))<line_sep>neon_logger.display("# of samples - {}".format(nsamples))<line_sep>neon_logger.display("# of classes {}".format(nclass))<line_sep>neon_logger.display("class distribution - {} {}".format(ratings counts))<line_sep>sen_counts=list(zip(sen_len sen_len_counts))<line_sep>sen_counts=sorted(sen_counts key=<lambda>kv:kv[1] reverse=<true>)<line_sep>neon_logger.display("sentence length - {} {} {}".format(len(sen_len) sen_len sen_len_counts))<line_sep># WARNING: assume vocab is of order ~4-5 million words. # sort the vocab , re-assign ids by its frequency. Useful for downstream tasks # only done for train data <if_stmt>build_vocab<block_start>vocab_sorted=sorted(list(vocab.items()) key=<lambda>kv:kv[1] reverse=<true>)<line_sep>vocab={}<for_stmt>i,t enumerate(list(zip(*vocab_sorted))[0])<block_start>vocab[t]=i<block_end><block_end># map text to integers ntrain=0<line_sep>nvalid=0<for_stmt>i range(nsamples)<block_start>text=reviews_text[i]['text']<line_sep>y=int(reviews_text[i]['y'])<line_sep>split=reviews_text[i]['split']<line_sep>text_int=[y]+[vocab[t]<for>t text.strip().split()]<if_stmt>split<block_start>reviews_train[ntrain]=text_int<line_sep>ntrain<augadd>1<block_end><else_stmt><block_start>reviews_valid[nvalid]=text_int<line_sep>nvalid<augadd>1<block_end><block_end>reviews_text.attrs['ntrain']=ntrain<line_sep>reviews_text.attrs['nvalid']=nvalid<line_sep>neon_logger.display("# of train - {0}, # of valid - {1}".format(reviews_text.attrs['ntrain'] reviews_text.attrs['nvalid']))<line_sep># close open files h5f.close()<line_sep>f.close()<block_end><if_stmt><not>os.path.exists(fname_vocab)<block_start>rev_vocab={}<for_stmt>wrd,wrd_id vocab.items()<block_start>rev_vocab[wrd_id]=wrd<block_end>neon_logger.display("vocabulary from IMDB dataset is saved into {}".format(fname_vocab))<line_sep>pickle.dump((vocab rev_vocab) open(fname_vocab 'wb') 2)<block_end><return>fname_h5 fname_vocab<block_end>
# (C) Datadog, Inc. 2020-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) <import_stmt>json<import_stmt>os<import_from_stmt>datetime datetime<import_from_stmt>typing Optional<import_from_stmt>....console echo_info<class_stmt>Cache<block_start>""" Cache data that is expensive to compute. Use JSON format. """<def_stmt>__init__ self app_dir:str cache_name:str expiration:datetime<block_start>cache_path=os.path.join(app_dir '.cache')<line_sep>self.__path=os.path.join(cache_path cache_name)<try_stmt><block_start>os.mkdir(cache_path)<block_end><except_stmt>FileExistsError<block_start><pass><block_end><try_stmt><block_start>creation_time=datetime.utcfromtimestamp(os.path.getctime(self.__path))<if_stmt>creation_time<l>expiration<block_start>echo_info(f'Cache expired. Removing cache {self.__path}')<line_sep>os.remove(self.__path)<block_end><block_end><except_stmt>OSError# file does not exist <block_start><pass><block_end><block_end><def_stmt>get_value self<arrow>Optional[object]<block_start><try_stmt><block_start><with_stmt>open(self.__path)<as>f<block_start>echo_info(f'Load from {self.__path}')<line_sep>value_json=f.read()<line_sep><return>json.loads(value_json)<block_end><block_end><except_stmt>FileNotFoundError<block_start><return><none><block_end><except_stmt>Exception<as>e<block_start><raise>Exception(f'Invalid cache object in {self.__path} {type(e)}')<from>e<block_end><block_end><def_stmt>set_value self value:object<block_start>value_json=json.dumps(value)<line_sep>stat=<none><try_stmt><block_start>stat=os.stat(self.__path)<block_end><except_stmt>FileNotFoundError<block_start><pass><block_end><with_stmt>open(self.__path 'w')<as>f<block_start>f.write(value_json)<block_end><if_stmt>stat# restore file dates for cache expiration <block_start>os.utime(self.__path (stat.st_atime stat.st_mtime))<block_end><block_end><block_end>
<import_from_stmt>changes.testutils APITestCase<import_stmt>datetime<import_stmt>mock<class_stmt>CachedSnapshotClusterDetailsAPITestCase(APITestCase)<block_start><def_stmt>setUp self<block_start>super(CachedSnapshotClusterDetailsAPITestCase self).setUp()<line_sep>self.mock_datetime=datetime.datetime.utcnow()<block_end><def_stmt>get_endpoint_path self cluster<block_start><return>'/api/0/snapshots/cache/clusters/{0}/'.format(cluster)<block_end><def_stmt>test_empty self<block_start>resp=self.client.get(self.get_endpoint_path('cluster'))<assert_stmt>resp.status_code<eq>200<line_sep>data=self.unserialize(resp)<assert_stmt>data<eq>[]<block_end>@mock.patch('changes.lib.snapshot_garbage_collection.get_current_datetime')<def_stmt>test_get_current_datetime self get_current_datetime<block_start>"""Metatest that verifies that the time-mock is functional. """<line_sep>get_current_datetime.return_value=self.mock_datetime<line_sep>self.client.get(self.get_endpoint_path('cluster'))<line_sep>get_current_datetime.assert_any_call()<block_end>@mock.patch('changes.lib.snapshot_garbage_collection.get_current_datetime')<def_stmt>test_multiproject self get_current_datetime<block_start>""" Integration test (minus mocking out time) on the endpoint, which is different from the lib-tests which mock out get_plans in the garbage collector. """<line_sep>project1=self.create_project()<line_sep>project2=self.create_project()<line_sep>plan1_1=self.create_plan(project1)<line_sep>plan1_2=self.create_plan(project1)<line_sep>plan2_1=self.create_plan(project2)<line_sep>plan2_2=self.create_plan(project2)<line_sep>plan2_3=self.create_plan(project2)<line_sep>self.create_step(plan1_1 data={'cluster':'cluster1'})<line_sep>self.create_step(plan1_2 data={'cluster':'cluster2'})<line_sep>self.create_step(plan2_1 data={'cluster':'cluster2'})<line_sep>self.create_step(plan2_2 data={'cluster':'cluster2'})<line_sep>snapshot1=self.create_snapshot(project1)<line_sep>snapshot2=self.create_snapshot(project2)<line_sep>snapshot_image1_1=self.create_snapshot_image(snapshot1 plan1_1)<line_sep>snapshot_image1_2=self.create_snapshot_image(snapshot1 plan1_2)<line_sep>snapshot_image2_1=self.create_snapshot_image(snapshot2 plan2_1)<line_sep>snapshot_image2_2=self.create_snapshot_image(snapshot2 plan2_2)<line_sep>snapshot_image2_3=self.create_snapshot_image(snapshot2 plan2_3)<line_sep>self.create_cached_snapshot_image(snapshot_image1_1)<line_sep>self.create_cached_snapshot_image(snapshot_image1_2 expiration_date=self.mock_datetime+datetime.timedelta(0 1))<line_sep>self.create_cached_snapshot_image(snapshot_image2_1 expiration_date=self.mock_datetime-datetime.timedelta(0 1))<line_sep>self.create_cached_snapshot_image(snapshot_image2_2)<line_sep>self.create_cached_snapshot_image(snapshot_image2_3)<line_sep>get_current_datetime.return_value=self.mock_datetime<line_sep>resp=self.client.get(self.get_endpoint_path('cluster2'))<assert_stmt>resp.status_code<eq>200<line_sep>data=self.unserialize(resp)<assert_stmt>len(data)<eq>2<assert_stmt>snapshot_image1_2.id.hex<in>data<assert_stmt>snapshot_image2_2.id.hex<in>data<line_sep># Ensure that nonexisting clusters still give empty even when there # is actually some data (unlike test_empty) resp=self.client.get(self.get_endpoint_path('cluster3'))<assert_stmt>resp.status_code<eq>200<line_sep>data=self.unserialize(resp)<assert_stmt>data<eq>[]<block_end><block_end>
<import_stmt>numpy<as>np<import_from_stmt>deepneuro.outputs.inference ModelInference<import_from_stmt>deepneuro.utilities.util add_parameter docker_print<class_stmt>PatchesInference(ModelInference)<block_start>""" """<def_stmt>load self kwargs<block_start>""" Parameters ---------- patch_overlaps: int, optional The amount of times a grid of patches is predicted over an entire output volume. Subsequent grids are offset from the original grid by patch_size / patch_overlaps, and the final output is the result of averaging over each grid for each voxel. Default is 1. input_patch_shape: tuple, optional The input dimensions of the predicted patches, not including batch size. If None, DeepNeuro will attempt to extract this value from the given model. Default is None. output_patch_shape: tuple, optional The output dimensions of the predicted patches, not including batch size. If smaller than the input patch size in any dimension, patches will be cropped symmetrically by the difference in size to meet this shape. Default is None. check_empty_patch: bool, optional Do not predict patches if they only contain zeros. Default is True. pad_borders: bool, optional Pads input borders by patch_size / 2 with zeros. This allows patches at the boundary of an image to be successfully predicted, albeit with zero infill values. Default is True. patch_dimensions: tuple or list, optional output_patch_dimensions: tuple or list, optional """<line_sep>super(PatchesInference self).load(kwargs)<line_sep># Patching Parameters add_parameter(self kwargs 'patch_overlaps' 1)<line_sep>add_parameter(self kwargs 'input_patch_shape' <none>)<line_sep>add_parameter(self kwargs 'output_patch_shape' <none>)<line_sep>add_parameter(self kwargs 'check_empty_patch' <true>)<line_sep>add_parameter(self kwargs 'pad_borders' <true>)<line_sep>add_parameter(self kwargs 'keep_channels' <none>)<line_sep>add_parameter(self kwargs 'patch_dimensions' <none>)<line_sep>add_parameter(self kwargs 'output_patch_dimensions' self.patch_dimensions)<line_sep>self.batch_size=1<block_end><def_stmt>process_case self input_data model=<none><block_start>"""Summary Parameters ---------- input_data : TYPE Description model : None, optional Description Returns ------- TYPE Description """<line_sep>input_data=input_data[self.lead_key]<if_stmt>model<is><not><none><block_start>self.model=model<block_end><if_stmt>self.channels_first<block_start>input_data=np.swapaxes(input_data 1 -1)<block_end><if_stmt>self.input_channels<is><not><none><block_start>input_data=np.take(input_data self.input_channels self.channels_dim)<block_end># Determine patch shape. Currently only extends to spatial patching. # This leading dims business has got to have a better solution.. <if_stmt>self.input_patch_shape<is><none><block_start>self.input_patch_shape=self.model.model_input_shape<block_end><if_stmt>self.output_patch_shape<is><none><block_start>self.output_patch_shape=self.model.model_output_shape<block_end>self.input_dim=len(self.input_patch_shape)-2<if_stmt>self.patch_dimensions<is><none><block_start><if_stmt>self.channels_first<block_start>self.patch_dimensions=[-1<times>self.input_dim+x<for>x range(self.input_dim)]<block_end><else_stmt><block_start>self.patch_dimensions=[-1<times>self.input_dim+x-1<for>x range(self.input_dim)]<block_end><if_stmt>self.output_patch_dimensions<is><none><block_start>self.output_patch_dimensions=self.patch_dimensions<block_end><block_end>self.output_shape=[1]+list(self.model.model_output_shape)[1:]# Weird <for_stmt>i range(len(self.patch_dimensions))<block_start>self.output_shape[self.output_patch_dimensions[i]]=input_data.shape[self.patch_dimensions[i]]<block_end>output_data=self.predict(input_data)<if_stmt>self.output_channels<is><not><none><block_start>output_data=np.take(output_data self.output_channels self.channels_dim)<block_end># Will fail for time-data. <if_stmt>self.channels_first<block_start>output_data=np.swapaxes(output_data 1 -1)<block_end>self.return_objects.append(output_data)<line_sep><return>output_data<block_end><def_stmt>predict self input_data<block_start>repetition_offsets=[np.linspace(0 self.input_patch_shape[axis]-1 self.patch_overlaps+1 dtype=int)[:-1]<for>axis self.patch_dimensions]<if_stmt>self.pad_borders# TODO -- Clean up this border-padding code and make it more readable. <block_start>input_pad_dimensions=[(0 0)]<times>input_data.ndim<line_sep>repatched_shape=self.output_shape<line_sep>new_input_shape=list(input_data.shape)<for_stmt>idx,dim enumerate(self.patch_dimensions)# Might not work for odd-shaped patches; check. <block_start>input_pad_dimensions[dim]=(int(self.input_patch_shape[dim]<floordiv>2) int(self.input_patch_shape[dim]<floordiv>2))<line_sep>new_input_shape[dim]<augadd>self.input_patch_shape[dim]<block_end><for_stmt>idx,dim enumerate(self.output_patch_dimensions)<block_start>repatched_shape[dim]<augadd>self.input_patch_shape[dim]<block_end>padded_input_data=np.zeros(new_input_shape)<if_stmt>self.channels_first<block_start>input_slice=[slice(<none>)]<times>2+[slice(self.input_patch_shape[dim]<floordiv>2 -self.input_patch_shape[dim]<floordiv>2 <none>)<for>dim self.patch_dimensions]<block_end><else_stmt><block_start>input_slice=[slice(<none>)]+[slice(self.input_patch_shape[dim]<floordiv>2 -self.input_patch_shape[dim]<floordiv>2 <none>)<for>dim self.patch_dimensions]+[slice(<none>)]<block_end>padded_input_data[tuple(input_slice)]=input_data<line_sep>input_data=padded_input_data<block_end><else_stmt><block_start>repatched_shape=self.output_shape<block_end>repatched_image=np.zeros(repatched_shape)<line_sep>corner_data_dims=[input_data.shape[axis]<for>axis self.patch_dimensions]<line_sep>corner_patch_dims=[self.output_patch_shape[axis]<for>axis self.patch_dimensions]<line_sep>all_corners=np.indices(corner_data_dims)<line_sep># There must be a better way to round up to an integer.. possible_corners_slice=[slice(<none>)]+[slice(self.input_patch_shape[dim]<floordiv>2 -self.input_patch_shape[dim]<floordiv>2 <none>)<for>dim self.patch_dimensions]<line_sep>all_corners=all_corners[tuple(possible_corners_slice)]<for_stmt>rep_idx range(self.patch_overlaps)<block_start><if_stmt>self.verbose<block_start>docker_print('Predicting patch set' str(rep_idx+1)+'/'+str(self.patch_overlaps)+'...')<block_end>corners_grid_shape=[slice(<none>)]<for_stmt>dim range(all_corners.ndim-1)<block_start>corners_grid_shape<augadd>[slice(repetition_offsets[dim][rep_idx] corner_data_dims[dim] corner_patch_dims[dim])]<block_end>corners_list=all_corners[tuple(corners_grid_shape)]<line_sep>corners_list=np.reshape(corners_list (corners_list.shape[0] -1)).T<if_stmt>self.check_empty_patch<block_start>corners_list=self.remove_empty_patches(input_data corners_list)<block_end><for_stmt>corner_list_idx range(0 corners_list.shape[0] self.batch_size)<block_start>corner_batch=corners_list[corner_list_idx:corner_list_idx+self.batch_size]<line_sep>input_patches=self.grab_patch(input_data corner_batch)<line_sep>prediction=self.run_inference(input_patches)<line_sep>self.insert_patch(repatched_image prediction corner_batch)<block_end><if_stmt>rep_idx<eq>0<block_start>output_data=np.copy(repatched_image)<block_end><else_stmt><block_start>output_data=self.aggregate_predictions(output_data repatched_image rep_idx)<block_end><block_end><if_stmt>self.pad_borders<block_start>output_slice=[slice(<none>)]<times>output_data.ndim# Weird <for_stmt>idx,dim enumerate(self.output_patch_dimensions)# Might not work for odd-shaped patches; check. <block_start>output_slice[dim]=slice(self.input_patch_shape[dim]<floordiv>2 -self.input_patch_shape[dim]<floordiv>2 1)<block_end>output_data=output_data[tuple(output_slice)]<block_end><if_stmt>self.keep_channels<is><not><none><block_start>output_data=np.take(output_data self.keep_channels axis=-1)<block_end><return>output_data<block_end><def_stmt>run_inference self data<block_start><return>self.model.predict(data)<block_end><def_stmt>aggregate_predictions self output_data repatched_image rep_idx<block_start>output_data=output_data+(1.0/(rep_idx))<times>(repatched_image-output_data)# Running Average <return>output_data<block_end><def_stmt>pad_data self data pad_dimensions# Maybe more effecient than np.pad? Created for testing a different purpose. <block_start><for_stmt>idx,width enumerate(pad_dimensions)<block_start>pad_block_1,pad_block_2=list(data.shape) list(data.shape)<line_sep>pad_block_1[idx]=width[0]<line_sep>pad_block_2[idx]=width[1]<line_sep>data=np.concatenate((np.zeros(pad_block_1) data np.zeros(pad_block_2)) axis=idx)<block_end><return>data<block_end><def_stmt>remove_empty_patches self input_data corners_list<block_start>corner_selections=[]<for_stmt>corner_idx,corner enumerate(corners_list)<block_start>output_slice=[slice(<none>)]<times>input_data.ndim# Weird <for_stmt>idx,dim enumerate(self.patch_dimensions)<block_start>output_slice[dim]=slice(corner[idx]-self.input_patch_shape[dim]<floordiv>2 corner[idx]+self.input_patch_shape[dim]<floordiv>2 1)<block_end>corner_selections<augadd>[np.any(input_data[tuple(output_slice)])]<block_end><return>corners_list[corner_selections]<block_end><def_stmt>grab_patch self input_data corner_list<block_start>""" Given a corner coordinate, a patch_shape, and some input_data, returns a patch or array of patches. """<line_sep>output_patches_shape=(corner_list.shape[0] )+self.input_patch_shape[1:]<line_sep>output_patches=np.zeros((output_patches_shape))<for_stmt>corner_idx,corner enumerate(corner_list)<block_start>output_slice=[slice(<none>)]<times>input_data.ndim# Weird <for_stmt>idx,dim enumerate(self.patch_dimensions)<block_start>output_slice[dim]=slice(corner[idx]-self.input_patch_shape[dim]<floordiv>2 corner[idx]+self.input_patch_shape[dim]<floordiv>2 1)<block_end>output_patches[corner_idx <ellipsis>]=input_data[tuple(output_slice)]<block_end><return>output_patches<block_end><def_stmt>insert_patch self input_data patches corner_list# Some ineffeciencies in the function. TODO: come back and rewrite. <block_start><for_stmt>corner_idx,corner enumerate(corner_list)<block_start>insert_slice=[slice(<none>)]<times>input_data.ndim# Weird <for_stmt>idx,dim enumerate(self.output_patch_dimensions)# Might not work for odd-shaped patches; check. <block_start>insert_slice[dim]=slice(corner[idx]-self.output_patch_shape[dim]<floordiv>2 corner[idx]+self.output_patch_shape[dim]<floordiv>2 1)<block_end>insert_patch=patches[corner_idx <ellipsis>]<if_stmt><not>np.array_equal(np.take(self.output_patch_shape self.output_patch_dimensions) np.take(self.input_patch_shape self.patch_dimensions))# Necessary if statement? <block_start>patch_slice=[slice(<none>)]<times>insert_patch.ndim# Weird <for_stmt>idx,dim enumerate(self.output_patch_dimensions)# Might not work for odd-shaped patches; check. <block_start>patch_slice[dim]=slice((self.input_patch_shape[dim]-self.output_patch_shape[dim])<floordiv>2 -(self.input_patch_shape[dim]-self.output_patch_shape[dim])<floordiv>2 1)<block_end>insert_patch=insert_patch[tuple(patch_slice)]<block_end>input_data[tuple(insert_slice)]=insert_patch<block_end><return>input_data<block_end><block_end>
# # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER # # Copyright (c) 2015 Juniper Networks, Inc. # All rights reserved. # # Use is subject to license terms. # # Licensed under the Apache License, Version 2.0 (the ?License?); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_from_stmt>django.db models<class_stmt>Topology(models.Model)<block_start>description=models.TextField(default="none" verbose_name="Description")<line_sep>name=models.TextField(default="noname" verbose_name="name")<line_sep>json=models.TextField(verbose_name="json")<line_sep>created=models.DateTimeField(auto_now_add=<true>)<line_sep>modified=models.DateTimeField(verbose_name="modified" auto_now=<true>)<class_stmt>Meta<block_start>verbose_name='Topology'<line_sep>verbose_name_plural='topologies'<block_end><block_end><class_stmt>ConfigSet(models.Model)<block_start>topology=models.ForeignKey('Topology')<line_sep>name=models.TextField()<line_sep>description=models.TextField()<line_sep>created=models.DateTimeField(auto_now_add=<true>)<line_sep>modified=models.DateTimeField(auto_now=<true>)<class_stmt>Meta<block_start>verbose_name='ConfigSet'<line_sep>verbose_name_plural='configSets'<block_end><block_end><class_stmt>Config(models.Model)<block_start>configSet=models.ForeignKey('ConfigSet')<line_sep>name=models.TextField()<line_sep>type=models.TextField()<line_sep>created=models.DateTimeField(auto_now_add=<true>)<line_sep>modified=models.DateTimeField(auto_now=<true>)<line_sep>ip=models.GenericIPAddressField()<line_sep>deviceConfig=models.TextField()<line_sep>password=models.TextField()<class_stmt>Meta<block_start>verbose_name='Config'<line_sep>verbose_name_plural='configs'<block_end><block_end>
<def_stmt>transform dataset rot_center=0 tune_rot_center=<true><block_start>"""Reconstruct sinograms using the tomopy gridrec algorithm Typically, a data exchange file would be loaded for this reconstruction. This operation will attempt to perform flat-field correction of the raw data using the dark and white background data found in the data exchange file. This operator also requires either the tomviz/tomopy-pipeline docker image, or a python environment with tomopy installed. """<import_stmt>numpy<as>np<import_stmt>tomopy<line_sep># Get the current volume as a numpy array. array=dataset.active_scalars<line_sep>dark=dataset.dark<line_sep>white=dataset.white<line_sep>angles=dataset.tilt_angles<line_sep>tilt_axis=dataset.tilt_axis<line_sep># TomoPy wants the tilt axis to be zero, so ensure that is true <if_stmt>tilt_axis<eq>2<block_start>order=[2 1 0]<line_sep>array=np.transpose(array order)<if_stmt>dark<is><not><none><and>white<is><not><none><block_start>dark=np.transpose(dark order)<line_sep>white=np.transpose(white order)<block_end><block_end><if_stmt>angles<is><not><none># tomopy wants radians <block_start>theta=np.radians(angles)<block_end><else_stmt># Assume it is equally spaced between 0 and 180 degrees <block_start>theta=tomopy.angles(array.shape[0])<block_end># Perform flat-field correction of raw data <if_stmt>white<is><not><none><and>dark<is><not><none><block_start>array=tomopy.normalize(array white dark cutoff=1.4)<block_end><if_stmt>rot_center<eq>0# Try to find it automatically <block_start>init=array.shape[2]/2.0<line_sep>rot_center=tomopy.find_center(array theta init=init ind=0 tol=0.5)<block_end><elif_stmt>tune_rot_center# Tune the center <block_start>rot_center=tomopy.find_center(array theta init=rot_center ind=0 tol=0.5)<block_end># Calculate -log(array) array=tomopy.minus_log(array)<line_sep># Remove nan, neg, and inf values array=tomopy.remove_nan(array val=0.0)<line_sep>array=tomopy.remove_neg(array val=0.00)<line_sep>array[np.where(array<eq>np.inf)]=0.00<line_sep># Perform the reconstruction array=tomopy.recon(array theta center=rot_center algorithm='gridrec')<line_sep># Mask each reconstructed slice with a circle. array=tomopy.circ_mask(array axis=0 ratio=0.95)<line_sep># Set the transformed array child=dataset.create_child_dataset()<line_sep>child.active_scalars=array<line_sep>return_values={}<line_sep>return_values['reconstruction']=child<line_sep><return>return_values<block_end>
# first line: 10 @memory.cache<def_stmt>read_wav <block_start>wav=dl.data.get_smashing_baby()<line_sep><return>wavfile.read(wav)<block_end>
# Copyright 2019 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Namespace support postgresql Revision ID: 035 Revises: 034 Create Date: 2019-08-01 15:48:34.115639 """<line_sep># revision identifiers, used by Alembic. revision='035'<line_sep>down_revision='034'<import_from_stmt>alembic op<import_from_stmt>sqlalchemy.engine reflection<def_stmt>upgrade <block_start>inspect=reflection.Inspector.from_engine(op.get_bind())<line_sep>unique_constraints=[unique_constraint['name']<for>unique_constraint inspect.get_unique_constraints('workflow_definitions_v2')]<if_stmt>'workflow_definitions_v2_name_project_id_key'<in>unique_constraints<block_start>op.drop_constraint('workflow_definitions_v2_name_project_id_key' table_name='workflow_definitions_v2')<block_end><block_end>
<import_from_stmt>tljh.normalize generate_system_username<import_from_stmt>tljh user<import_from_stmt>tljh configurer<import_from_stmt>systemdspawner SystemdSpawner<import_from_stmt>traitlets Dict Unicode List<import_from_stmt>jupyterhub_configurator.mixins ConfiguratorSpawnerMixin<class_stmt>CustomSpawner(SystemdSpawner)<block_start>""" SystemdSpawner with user creation on spawn. FIXME: Remove this somehow? """<line_sep>user_groups=Dict(key_trait=Unicode() value_trait=List(Unicode()) config=<true>)<def_stmt>start self<block_start>""" Perform system user activities before starting server """<line_sep># FIXME: Move this elsewhere? Into the Authenticator? system_username=generate_system_username('jupyter-'+self.user.name)<line_sep># FIXME: This is a hack. Allow setting username directly instead self.username_template=system_username<line_sep>user.ensure_user(system_username)<line_sep>user.ensure_user_group(system_username 'jupyterhub-users')<if_stmt>self.user.admin<block_start>user.ensure_user_group(system_username 'jupyterhub-admins')<block_end><else_stmt><block_start>user.remove_user_group(system_username 'jupyterhub-admins')<block_end><if_stmt>self.user_groups<block_start><for_stmt>group,users self.user_groups.items()<block_start><if_stmt>self.user.name<in>users<block_start>user.ensure_user_group(system_username group)<block_end><block_end><block_end><return>super().start()<block_end><block_end>cfg=configurer.load_config()<line_sep># Use the jupyterhub-configurator mixin only if configurator is enabled # otherwise, any bugs in the configurator backend will stop new user spawns! <if_stmt>cfg['services']['configurator']['enabled']# Dynamically create the Spawner class using `type`(https://docs.python.org/3/library/functions.html?#type), # based on whether or not it should inherit from ConfiguratorSpawnerMixin <block_start>UserCreatingSpawner=type('UserCreatingSpawner' (ConfiguratorSpawnerMixin CustomSpawner) {})<block_end><else_stmt><block_start>UserCreatingSpawner=type('UserCreatingSpawner' (CustomSpawner ) {})<block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>model.config cfg<import_from_stmt>model.train_val filter_roidb SolverWrapper<import_from_stmt>utils.timer Timer<try_stmt><block_start><import_stmt>cPickle<as>pickle<block_end><except_stmt>ImportError<block_start><import_stmt>pickle<block_end><import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_stmt>glob<import_stmt>time<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.python pywrap_tensorflow<class_stmt>MemorySolverWrapper(SolverWrapper)<block_start>""" A wrapper class for the training process of spatial memory """<def_stmt>construct_graph self sess<block_start><with_stmt>sess.graph.as_default()# Set the random seed for tensorflow <block_start>tf.set_random_seed(cfg.RNG_SEED)<line_sep># Build the main computation graph layers=self.net.create_architecture('TRAIN' self.imdb.num_classes tag='default')<line_sep># Define the loss loss=layers['total_loss']<line_sep># Set learning rate and momentum lr=tf.Variable(cfg.TRAIN.RATE trainable=<false>)<line_sep>self.optimizer=tf.train.MomentumOptimizer(lr cfg.TRAIN.MOMENTUM)<line_sep># Compute the gradients with regard to the loss gvs=self.optimizer.compute_gradients(loss)<line_sep>grad_summaries=[]<for_stmt>grad,var gvs<block_start><if_stmt>'SMN'<not><in>var.name<and>'GMN'<not><in>var.name<block_start><continue><block_end>grad_summaries.append(tf.summary.histogram('TRAIN/'+var.name var))<if_stmt>grad<is><not><none><block_start>grad_summaries.append(tf.summary.histogram('GRAD/'+var.name grad))<block_end><block_end># Double the gradient of the bias if set <if_stmt>cfg.TRAIN.DOUBLE_BIAS<block_start>final_gvs=[]<with_stmt>tf.variable_scope('Gradient_Mult')<as>scope<block_start><for_stmt>grad,var gvs<block_start>scale=1.<if_stmt>cfg.TRAIN.DOUBLE_BIAS<and>'/biases:'<in>var.name<block_start>scale<augmul>2.<block_end><if_stmt><not>np.allclose(scale 1.0)<block_start>grad=tf.multiply(grad scale)<block_end>final_gvs.append((grad var))<block_end><block_end>train_op=self.optimizer.apply_gradients(final_gvs)<block_end><else_stmt><block_start>train_op=self.optimizer.apply_gradients(gvs)<block_end>self.summary_grads=tf.summary.merge(grad_summaries)<line_sep># We will handle the snapshots ourselves self.saver=tf.train.Saver(max_to_keep=100000)<line_sep># Write the train and validation information to tensorboard self.writer=tf.summary.FileWriter(self.tbdir sess.graph)<line_sep>self.valwriter=tf.summary.FileWriter(self.tbvaldir)<block_end><return>lr train_op<block_end><def_stmt>train_model self sess max_iters# Build data layers for both training and validation set <block_start>self.data_layer=self.imdb.data_layer(self.roidb self.imdb.num_classes)<line_sep>self.data_layer_val=self.imdb.data_layer(self.valroidb self.imdb.num_classes random=<true>)<line_sep># Construct the computation graph lr,train_op=self.construct_graph(sess)<line_sep># Find previous snapshots if there is any to restore from lsf,nfiles,sfiles=self.find_previous()<line_sep># Initialize the variables or restore them from the last snapshot <if_stmt>lsf<eq>0<block_start>rate,last_snapshot_iter,stepsizes,np_paths,ss_paths=self.initialize(sess)<block_end><else_stmt><block_start>rate,last_snapshot_iter,stepsizes,np_paths,ss_paths=self.restore(sess str(sfiles[-1]) str(nfiles[-1]))<block_end>timer=Timer()<line_sep>iter=last_snapshot_iter+1<line_sep>last_summary_iter=iter<line_sep>last_summary_time=time.time()<line_sep># Make sure the lists are not empty stepsizes.append(max_iters)<line_sep>stepsizes.reverse()<line_sep>next_stepsize=stepsizes.pop()<while_stmt>iter<l>max_iters+1# Learning rate <block_start><if_stmt>iter<eq>next_stepsize+1# Add snapshot here before reducing the learning rate <block_start>self.snapshot(sess iter)<line_sep>rate<augmul>cfg.TRAIN.GAMMA<line_sep>sess.run(tf.assign(lr rate))<line_sep>next_stepsize=stepsizes.pop()<block_end>timer.tic()<line_sep># Get training data, one batch at a time blobs=self.data_layer.forward()<line_sep>now=time.time()<if_stmt>iter<eq>1<or>(now-last_summary_time<g>cfg.TRAIN.SUMMARY_INTERVAL<and>iter-last_summary_iter<g>cfg.TRAIN.SUMMARY_ITERS)# Compute the graph with summary <block_start>loss_cls,total_loss,summary,gsummary=self.net.train_step_with_summary(sess blobs train_op self.summary_grads)<line_sep>self.writer.add_summary(summary float(iter))<line_sep>self.writer.add_summary(gsummary float(iter+1))<line_sep># Also check the summary on the validation set blobs_val=self.data_layer_val.forward()<line_sep>summary_val=self.net.get_summary(sess blobs_val)<line_sep>self.valwriter.add_summary(summary_val float(iter))<line_sep>last_summary_iter=iter<line_sep>last_summary_time=now<block_end><else_stmt># Compute the graph without summary <block_start>loss_cls,total_loss=self.net.train_step(sess blobs train_op)<block_end>timer.toc()<line_sep># Display training information <if_stmt>iter%(cfg.TRAIN.DISPLAY)<eq>0<block_start>print('iter: %d / %d, total loss: %.6f\n >>> loss_cls: %.6f\n >>> lr: %f'%(iter max_iters total_loss loss_cls lr.eval()))<line_sep>print('speed: {:.3f}s / iter'.format(timer.average_time))<block_end># Snapshotting <if_stmt>iter%cfg.TRAIN.SNAPSHOT_ITERS<eq>0<block_start>last_snapshot_iter=iter<line_sep>ss_path,np_path=self.snapshot(sess iter)<line_sep>np_paths.append(np_path)<line_sep>ss_paths.append(ss_path)<line_sep># Remove the old snapshots if there are too many <if_stmt>len(np_paths)<g>cfg.TRAIN.SNAPSHOT_KEPT<block_start>self.remove_snapshot(np_paths ss_paths)<block_end><block_end>iter<augadd>1<block_end><if_stmt>last_snapshot_iter<ne>iter-1<block_start>self.snapshot(sess iter-1)<block_end>self.writer.close()<line_sep>self.valwriter.close()<block_end><block_end><def_stmt>train_net network imdb roidb valroidb output_dir tb_dir pretrained_model=<none> max_iters=40000<block_start>"""Train a Faster R-CNN network with memory."""<line_sep>roidb=filter_roidb(roidb)<line_sep>valroidb=filter_roidb(valroidb)<line_sep>tfconfig=tf.ConfigProto(allow_soft_placement=<true>)<line_sep>tfconfig.gpu_options.allow_growth=<true><with_stmt>tf.Session(config=tfconfig)<as>sess<block_start>sw=MemorySolverWrapper(sess network imdb roidb valroidb output_dir tb_dir pretrained_model=pretrained_model)<line_sep>print('Solving...')<line_sep>sw.train_model(sess max_iters)<line_sep>print('done solving')<block_end><block_end>
<import_stmt>pytest<import_from_stmt>MDAnalysis SelectionError<import_from_stmt>rdkit Chem<import_from_stmt>prolif.molecule Molecule pdbqt_supplier mol2_supplier sdf_supplier <import_from_stmt>prolif.residue ResidueId<import_from_stmt>prolif.datafiles datapath<import_from_stmt>.test_base TestBaseRDKitMol rdkit_mol ligand_rdkit u<class_stmt>TestMolecule(TestBaseRDKitMol)<block_start>@pytest.fixture(scope="class")<def_stmt>mol self<block_start><return>Molecule(rdkit_mol)<block_end><def_stmt>test_mapindex self mol<block_start><for_stmt>atom mol.GetAtoms()<block_start><assert_stmt>atom.GetUnsignedProp("mapindex")<eq>atom.GetIdx()<block_end><block_end><def_stmt>test_from_mda self<block_start>rdkit_mol=Molecule(ligand_rdkit)<line_sep>mda_mol=Molecule.from_mda(u "resname LIG")<assert_stmt>rdkit_mol[0].resid<eq>mda_mol[0].resid<assert_stmt>(rdkit_mol.HasSubstructMatch(mda_mol)<and>mda_mol.HasSubstructMatch(rdkit_mol))<block_end><def_stmt>test_from_mda_empty_ag self<block_start>ag=u.select_atoms("resname FOO")<with_stmt>pytest.raises(SelectionError match="AtomGroup is empty")<block_start>Molecule.from_mda(ag)<block_end><block_end><def_stmt>test_from_rdkit self<block_start>rdkit_mol=Molecule(ligand_rdkit)<line_sep>newmol=Molecule.from_rdkit(ligand_rdkit)<assert_stmt>rdkit_mol[0].resid<eq>newmol[0].resid<block_end><def_stmt>test_from_rdkit_default_resid self<block_start>mol=Chem.MolFromSmiles("CCO")<line_sep>newmol=Molecule.from_rdkit(mol)<assert_stmt>newmol[0].resid<eq>ResidueId("UNL" 1)<block_end><def_stmt>test_from_rdkit_resid_args self<block_start>mol=Chem.MolFromSmiles("CCO")<line_sep>newmol=Molecule.from_rdkit(mol "FOO" 42 "A")<assert_stmt>newmol[0].resid<eq>ResidueId("FOO" 42 "A")<block_end>@pytest.mark.parametrize("key" [0 42 -1 "LYS49.A" ResidueId("LYS" 49 "A")])<def_stmt>test_getitem self mol key<block_start><assert_stmt>mol[key].resid<is>mol.residues[key].resid<block_end><def_stmt>test_iter self mol<block_start><for_stmt>i,r enumerate(mol)<block_start><assert_stmt>r.resid<eq>mol[i].resid<block_end><block_end><def_stmt>test_n_residues self mol<block_start><assert_stmt>mol.n_residues<eq>mol.residues.n_residues<block_end><block_end><class_stmt>TestSupplier<block_start><def_stmt>test_pdbqt self<block_start>path=datapath/"vina"<line_sep>pdbqts=sorted(path.glob("*.pdbqt"))<line_sep>template=Chem.MolFromSmiles("C[NH+]1CC(C(=O)NC2(C)OC3(O)C4CCCN4C(=O)"<concat>"C(Cc4ccccc4)N3C2=O)C=C2c3cccc4[nH]cc"<concat>"(c34)CC21")<line_sep>suppl=pdbqt_supplier(pdbqts template)<line_sep>mols=list(suppl)<assert_stmt>isinstance(mols[0] Molecule)<assert_stmt>len(mols)<eq>len(pdbqts)<block_end><def_stmt>test_sdf self<block_start>path=str(datapath/"vina"/"vina_output.sdf")<line_sep>suppl=sdf_supplier(path)<line_sep>mols=list(suppl)<assert_stmt>isinstance(mols[0] Molecule)<assert_stmt>len(mols)<eq>9<line_sep>mi=mols[0].GetAtomWithIdx(0).GetMonomerInfo()<assert_stmt>all([mi.GetResidueName()<eq>"UNL" mi.GetResidueNumber()<eq>1 mi.GetChainId()<eq>""])<block_end><def_stmt>test_mol2 self<block_start>path=str(datapath/"vina"/"vina_output.mol2")<line_sep>suppl=mol2_supplier(path)<line_sep>mols=list(suppl)<assert_stmt>isinstance(mols[0] Molecule)<assert_stmt>len(mols)<eq>9<line_sep>mi=mols[0].GetAtomWithIdx(0).GetMonomerInfo()<assert_stmt>all([mi.GetResidueName()<eq>"UNL" mi.GetResidueNumber()<eq>1 mi.GetChainId()<eq>""])<block_end><def_stmt>test_mol2_starting_with_comment self<block_start>path=str(datapath/"mol_comment.mol2")<line_sep>suppl=mol2_supplier(path)<line_sep>mol=next(suppl)<assert_stmt>mol<is><not><none><block_end><block_end>
<import_stmt>time<import_from_stmt>pyb UART<import_from_stmt>modbus ModbusRTU<line_sep>uart=UART(3 115200 parity=<none> stop=2 timeout=1 timeout_char=4)<line_sep>modbus=ModbusRTU(uart register_num=9999)<while_stmt>(<true>)<block_start><if_stmt>modbus.any()<block_start>modbus.handle(debug=<true>)<block_end><else_stmt><block_start>time.sleep_ms(100)<line_sep>modbus.REGISTER[0]=1000<line_sep>modbus.REGISTER[1]<augadd>1<line_sep>modbus.REGISTER[3]<augadd>3<line_sep>#print(modbus.REGISTER[10:15]) # image processing in there <block_end><block_end>
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A simple invertible tokenizer. Converts from a unicode string to a list of tokens (represented as Unicode strings). This tokenizer has the following desirable properties: - It is invertible. - Alphanumeric characters are broken away from non-alphanumeric characters. - A single space between words does not produce an extra token. - The full Unicode punctuation and separator set is recognized. The tokenization algorithm is as follows: 1. Split the text into a list of tokens, splitting at every boundary of an alphanumeric character and a non-alphanumeric character. This produces a list which alternates between "alphanumeric tokens" (strings of alphanumeric characters) and "non-alphanumeric tokens" (strings of non-alphanumeric characters). 2. Remove every token consisting of a single space, unless it is the very first or very last token in the list. These tokens are now implied by the fact that there are two adjacent alphanumeric tokens. e.g. u"Dude - that's so cool." -> [u"Dude", u" - ", u"that", u"'", u"s", u"so", u"cool", u"."] """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>collections<import_stmt>sys<import_stmt>unicodedata<import_stmt>six<import_stmt>logging<import_from_stmt>six.moves range# pylint: disable=redefined-builtin # from tensor2tensor.utils import mlperf_log <import_stmt>time<import_stmt>glob<line_sep># Conversion between Unicode and UTF-8, if required (on Python2) _native_to_unicode=(<lambda>s:s.decode("utf-8"))<if>six.PY2<else>(<lambda>s:s)<line_sep>logger=logging.getLogger(__name__)<line_sep># This set contains all letter and number characters. _ALPHANUMERIC_CHAR_SET=set(six.unichr(i)<for>i range(sys.maxunicode)<if>(unicodedata.category(six.unichr(i)).startswith("L")<or>unicodedata.category(six.unichr(i)).startswith("N")<or>unicodedata.category(six.unichr(i)).startswith("P")))<line_sep># unicodedata.category(six.unichr(i)).startswith("S") <def_stmt>encode text<block_start>"""Encode a unicode string as a list of tokens. Args: text: a unicode string Returns: a list of tokens as Unicode strings """<if_stmt><not>text<block_start><return>[]<block_end>ret=[]<line_sep>token_start=0<line_sep># Classify each character in the input string is_alnum=[c<in>_ALPHANUMERIC_CHAR_SET<for>c text]<line_sep>add_remaining=<false><for_stmt>pos range(1 len(text))<block_start>add_remaining=<false><if_stmt>is_alnum[pos]<ne>is_alnum[pos-1]<block_start><if_stmt><not>is_alnum[pos]<block_start>token=text[token_start:pos]<if_stmt>token<ne>u" "<or>token_start<eq>0<block_start>add_remaining=<false><line_sep>ret.append(token)<block_end><block_end><else_stmt><block_start>add_remaining=<true><line_sep>token_start=pos<block_end><block_end><block_end>final_token=text[token_start:]<if>text[-1]<in>_ALPHANUMERIC_CHAR_SET<else>text[token_start:-1]<if_stmt>add_remaining<block_start>ret.append(final_token)<block_end># split on punctuation final_tokens=[]<for_stmt>token ret<block_start>splitted_token=_run_split_on_punc(token)<line_sep>final_tokens.extend(splitted_token)<block_end><return>final_tokens<block_end><def_stmt>_run_split_on_punc text never_split=<none><block_start>"""Splits punctuation on a piece of text."""<if_stmt>never_split<is><not><none><and>text<in>never_split<block_start><return>[text]<block_end>chars=list(text)<line_sep>i=0<line_sep>start_new_word=<true><line_sep>output=[]<while_stmt>i<l>len(chars)<block_start>char=chars[i]<if_stmt>_is_punctuation(char)<block_start>output.append([char])<line_sep>start_new_word=<true><block_end><else_stmt><block_start><if_stmt>start_new_word<block_start>output.append([])<block_end>start_new_word=<false><line_sep>output[-1].append(char)<block_end>i<augadd>1<block_end><return>["".join(x)<for>x output]<block_end><def_stmt>_is_punctuation char<block_start>"""Checks whether `chars` is a punctuation character."""<line_sep>cp=ord(char)<line_sep># We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. <if_stmt>(cp<ge>33<and>cp<le>47)<or>(cp<ge>58<and>cp<le>64)<or>(cp<ge>91<and>cp<le>96)<or>(cp<ge>123<and>cp<le>126)<block_start><return><true><block_end>cat=unicodedata.category(char)<if_stmt>cat.startswith("P")<block_start><return><true><block_end><return><false><block_end><def_stmt>decode tokens<block_start>"""Decode a list of tokens to a unicode string. Args: tokens: a list of Unicode strings Returns: a unicode string """<line_sep>token_is_alnum=[t[0]<in>_ALPHANUMERIC_CHAR_SET<for>t tokens]<line_sep>ret=[]<for_stmt>i,token enumerate(tokens)<block_start><if_stmt>i<g>0<and>token_is_alnum[i-1]<and>token_is_alnum[i]<block_start>ret.append(u" ")<block_end>ret.append(token)<block_end><return>"".join(ret)<block_end><def_stmt>_read_filepattern filepattern max_lines=<none> split_on_newlines=<true> do_lower_case=<false><block_start>"""Reads files matching a wildcard pattern, yielding the contents. Args: filepattern: A wildcard pattern matching one or more files. max_lines: If set, stop reading after reading this many lines. split_on_newlines: A boolean. If true, then split files by lines and strip leading and trailing whitespace from each line. Otherwise, treat each file as a single string. Yields: The contents of the files as lines, if split_on_newlines is True, or the entire contents of each file if False. """<line_sep>filenames=sorted(glob.glob(filepattern))<line_sep>print(filenames 'do lower case:' do_lower_case)<line_sep>lines_read=0<for_stmt>filename filenames<block_start>start=time.time()<with_stmt>open(filename)<as>f<block_start><if_stmt>split_on_newlines<block_start><for_stmt>line f<block_start><if_stmt>do_lower_case<block_start>line=line.lower()<block_end><yield>line.strip()<line_sep>lines_read<augadd>1<if_stmt>max_lines<and>lines_read<ge>max_lines<block_start><return><block_end><if_stmt>lines_read%100000<eq>0<block_start>print("read" lines_read "lines," time.time()-start "secs elapsed")<block_end><block_end><block_end><else_stmt><block_start><if_stmt>max_lines<block_start>doc=[]<for_stmt>line f<block_start><if_stmt>do_lower_case<block_start>line=line.lower()<block_end>doc.append(line)<line_sep>lines_read<augadd>1<if_stmt>max_lines<and>lines_read<ge>max_lines<block_start><yield>"".join(doc)<line_sep><return><block_end><block_end><yield>"".join(doc)<block_end><else_stmt><block_start><yield>f.read()<block_end><block_end><block_end>print(time.time()-start "for reading read file :" filename)<block_end><block_end><def_stmt>corpus_token_counts text_filepattern corpus_max_lines split_on_newlines=<true> additional_chars="" do_lower_case=<false><block_start>"""Read the corpus and compute a dictionary of token counts. Args: text_filepattern: A pattern matching one or more files. corpus_max_lines: An integer; maximum total lines to read. split_on_newlines: A boolean. If true, then split files by lines and strip leading and trailing whitespace from each line. Otherwise, treat each file as a single string. additional_chars: A String. Each consisting characters will be treat as normal alphabets so that they will be included in each vocab. Returns: a dictionary mapping token to count. """<if_stmt>additional_chars<block_start>_ALPHANUMERIC_CHAR_SET.add(additional_chars)<block_end>counts=collections.Counter()<for_stmt>doc _read_filepattern(text_filepattern max_lines=corpus_max_lines split_on_newlines=split_on_newlines do_lower_case=do_lower_case)<block_start>counts.update(encode(_native_to_unicode(doc)))<block_end>print("read all files")<line_sep><return>counts<block_end><def_stmt>vocab_token_counts text_filepattern max_lines do_lower_case=<false><block_start>"""Read a vocab file and return a dictionary of token counts. Reads a two-column CSV file of tokens and their frequency in a dataset. The tokens are presumed to be generated by encode() or the equivalent. Args: text_filepattern: A pattern matching one or more files. max_lines: An integer; maximum total lines to read. Returns: a dictionary mapping token to count. """<line_sep>ret={}<for_stmt>i,line enumerate(_read_filepattern(text_filepattern max_lines=max_lines))<block_start><if_stmt>","<not><in>line<block_start>logger.warning("Malformed vocab line #%d '%s'" i line)<line_sep><continue><block_end><if_stmt>do_lower_case<block_start>line=line.lower()<block_end>token,count=line.rsplit("," 1)<line_sep>ret[_native_to_unicode(token)]=int(count)<block_end><return>ret<block_end>
<import_from_stmt>typing Dict List<line_sep>SecurityRequirement=Dict[str List[str]]<line_sep>""" Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the [Security Schemes](#componentsSecuritySchemes) under the [Components Object](#componentsObject). Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the [OpenAPI Object](#oasObject) or [Operation Object](#operationObject), only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request. References: - https://swagger.io/docs/specification/authentication/ """<line_sep>
<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.python.keras.losses LossFunctionWrapper<def_stmt>_to_tensor x dtype<block_start>"""Convert the input `x` to a tensor of type `dtype`. OBS: Code implemented by Tensorflow # Arguments x: An object to be converted (numpy array, list, tensors). dtype: The destination type. # Returns A tensor. """<line_sep>x=tf.convert_to_tensor(x)<if_stmt>x.dtype<ne>dtype<block_start>x=tf.cast(x dtype)<block_end><return>x<block_end><def_stmt>_get_shapes_and_one_hot y_true y_pred<block_start>shape=y_pred.get_shape()<line_sep>n_classes=shape[-1]<line_sep># Squeeze dim -1 if it is == 1, otherwise leave it dims=tf.cond(tf.equal(y_true.shape[-1]<or>-1 1) <lambda>:tf.shape(y_true)[:-1] <lambda>:tf.shape(y_true))<line_sep>y_true=tf.reshape(y_true dims)<line_sep>y_true=tf.one_hot(tf.cast(y_true tf.uint8) depth=n_classes)<line_sep><return>y_true shape n_classes<block_end><def_stmt>sparse_jaccard_distance_loss y_true y_pred smooth=1<block_start>""" Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|)) The jaccard distance loss is usefull for unbalanced datasets. This has been shifted so it converges on 0 and is smoothed to avoid exploding or disapearing gradient. Approximates the class-wise jaccard distance computed per-batch element across spatial image dimensions. Returns the 1 - mean(per_class_distance) for each batch element. :param y_true: :param y_pred: :param smooth: :return: Ref: https://en.wikipedia.org/wiki/Jaccard_index @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96 @author: wassname """<line_sep>y_true,shape,n_classes=_get_shapes_and_one_hot(y_true y_pred)<line_sep>reduction_dims=range(len(shape))[1:-1]<line_sep>intersection=tf.reduce_sum(y_true<times>y_pred axis=reduction_dims)<line_sep>sum_=tf.reduce_sum(y_true+y_pred axis=reduction_dims)<line_sep>jac=(intersection+smooth)/(sum_-intersection+smooth)<line_sep><return>1.0-tf.reduce_mean(jac axis=-1 keepdims=<true>)<block_end><class_stmt>SparseJaccardDistanceLoss(LossFunctionWrapper)<block_start>""" tf reduction wrapper for sparse_jaccard_distance_loss """<def_stmt>__init__ self reduction smooth=1 name='sparse_jaccard_distance_loss' **kwargs<block_start>super(SparseJaccardDistanceLoss self).__init__(sparse_jaccard_distance_loss name=name reduction=reduction smooth=smooth)<block_end><block_end><def_stmt>sparse_dice_loss y_true y_pred smooth=1<block_start>""" Approximates the class-wise dice coefficient computed per-batch element across spatial image dimensions. Returns the 1 - mean(per_class_dice) for each batch element. :param y_true: :param y_pred: :param smooth: :return: """<line_sep>y_true,shape,n_classes=_get_shapes_and_one_hot(y_true y_pred)<line_sep>reduction_dims=range(len(shape))[1:-1]<line_sep>intersection=tf.reduce_sum(y_true<times>y_pred axis=reduction_dims)<line_sep>union=tf.reduce_sum(y_true+y_pred axis=reduction_dims)<line_sep>dice=(2<times>intersection+smooth)/(union+smooth)<line_sep><return>1.0-tf.reduce_mean(dice axis=-1 keepdims=<true>)<block_end><class_stmt>SparseDiceLoss(LossFunctionWrapper)<block_start>""" tf reduction wrapper for sparse_dice_loss """<def_stmt>__init__ self reduction smooth=1 name='sparse_dice_loss' **kwargs<block_start>super(SparseDiceLoss self).__init__(sparse_dice_loss name=name reduction=reduction smooth=smooth)<block_end><block_end><def_stmt>sparse_exponential_logarithmic_loss y_true y_pred gamma_dice gamma_cross weight_dice weight_cross<block_start>""" TODO :param y_true: :param y_pred: :param smooth: :return: """<line_sep>y_true,shape,n_classes=_get_shapes_and_one_hot(y_true y_pred)<line_sep>reduction_dims=range(len(shape))[1:-1]<line_sep># Clip for numerical stability _epsilon=_to_tensor(10e-8 y_pred.dtype.base_dtype)<line_sep>y_pred=tf.clip_by_value(y_pred _epsilon 1.-_epsilon)<line_sep># Compute exp log dice intersect=2<times>tf.reduce_sum(y_true<times>y_pred axis=reduction_dims)+1<line_sep>union=tf.reduce_sum(y_true+y_pred axis=reduction_dims)+1<line_sep>exp_log_dice=tf.math.pow(-tf.math.log(intersect/union) gamma_dice)<line_sep>mean_exp_log_dice=tf.reduce_mean(exp_log_dice axis=-1 keepdims=<true>)<line_sep># Compute exp cross entropy entropy=tf.reduce_sum(y_true<times>-tf.math.log(y_pred) axis=-1 keepdims=<true>)<line_sep>exp_entropy=tf.reduce_mean(tf.math.pow(entropy gamma_cross) axis=reduction_dims)<line_sep># Compute output res=weight_dice<times>mean_exp_log_dice+weight_cross<times>exp_entropy<line_sep><return>res<block_end><class_stmt>SparseExponentialLogarithmicLoss(LossFunctionWrapper)<block_start>""" https://link.springer.com/content/pdf/10.1007%2F978-3-030-00931-1_70.pdf """<def_stmt>__init__ self reduction gamma_dice=0.3 gamma_cross=0.3 weight_dice=1 weight_cross=1 name="sparse_exponential_logarithmic_loss"<block_start>super(SparseExponentialLogarithmicLoss self).__init__(sparse_exponential_logarithmic_loss name=name reduction=reduction gamma_dice=gamma_dice gamma_cross=gamma_cross weight_dice=weight_dice weight_cross=weight_cross)<block_end><block_end><def_stmt>sparse_focal_loss y_true y_pred gamma class_weights<block_start>""" TODO :param y_true: :param y_pred: :param smooth: :return: """<line_sep>y_true,shape,n_classes=_get_shapes_and_one_hot(y_true y_pred)<line_sep>reduction_dims=range(len(shape))[1:-1]<line_sep># Clip for numerical stability _epsilon=_to_tensor(10e-8 y_pred.dtype.base_dtype)<line_sep>y_pred=tf.clip_by_value(y_pred _epsilon 1.-_epsilon)<if_stmt>class_weights<is><none><block_start>class_weights=[1]<times>n_classes<block_end># Compute the focal loss entropy=tf.math.log(y_pred)<line_sep>modulator=tf.math.pow((1-y_pred) gamma)<line_sep>loss=-tf.reduce_sum(class_weights<times>y_true<times>modulator<times>entropy axis=-1 keepdims=<true>)<line_sep><return>tf.reduce_mean(loss axis=reduction_dims)<block_end><class_stmt>SparseFocalLoss(LossFunctionWrapper)<block_start>""" https://arxiv.org/pdf/1708.02002.pdf """<def_stmt>__init__ self reduction gamma=2 class_weights=<none> name="sparse_focal_loss"<block_start>super(SparseFocalLoss self).__init__(sparse_focal_loss name=name reduction=reduction gamma=gamma class_weights=class_weights)<block_end><block_end><def_stmt>sparse_generalized_dice_loss y_true y_pred type_weight<block_start>""" Function to calculate the Generalised Dice Loss defined in <NAME>. et. al. (2017) Generalised Dice overlap as a deep learning loss function for highly unbalanced segmentations. DLMIA 2017 """<line_sep>y_true,shape,n_classes=_get_shapes_and_one_hot(y_true y_pred)<line_sep>reduction_dims=range(len(shape))[1:-1]<line_sep>ref_vol=tf.reduce_sum(y_true axis=reduction_dims)<line_sep>intersect=tf.reduce_sum(y_true<times>y_pred axis=reduction_dims)<line_sep>seg_vol=tf.reduce_sum(y_pred axis=reduction_dims)<if_stmt>type_weight.lower()<eq>'square'<block_start>weights=tf.math.reciprocal(tf.math.square(ref_vol))<block_end><elif_stmt>type_weight.lower()<eq>'simple'<block_start>weights=tf.math.reciprocal(ref_vol)<block_end><elif_stmt>type_weight.lower()<eq>'uniform'<block_start>weights=tf.ones_like(ref_vol)<block_end><else_stmt><block_start><raise>ValueError("The variable type_weight \"{}\""<concat>"is not defined.".format(type_weight))<block_end># Make array of new weight in which infinite values are replaced by # ones. new_weights=tf.where(tf.math.is_inf(weights) tf.zeros_like(weights) weights)<line_sep># Set final weights as either original weights or highest observed # non-infinite weight weights=tf.where(tf.math.is_inf(weights) tf.ones_like(weights)<times>tf.reduce_max(new_weights) weights)<line_sep># calculate generalized dice score eps=1e-6<line_sep>numerator=2<times>tf.multiply(weights intersect)<line_sep>denom=tf.multiply(weights seg_vol+ref_vol)+eps<line_sep>generalised_dice_score=numerator/denom<line_sep><return>1-tf.reduce_mean(generalised_dice_score axis=-1 keepdims=<true>)<block_end><class_stmt>SparseGeneralizedDiceLoss(LossFunctionWrapper)<block_start>""" Based on implementation in NiftyNet at: http://niftynet.readthedocs.io/en/dev/_modules/niftynet/layer/ loss_segmentation.html#generalised_dice_loss Class based to allow passing of parameters to the function at construction time in keras. """<def_stmt>__init__ self reduction type_weight="Square" name='sparse_generalized_dice_loss'<block_start>super(SparseGeneralizedDiceLoss self).__init__(sparse_generalized_dice_loss name=name reduction=reduction type_weight=type_weight)<block_end><block_end># Aliases SparseExpLogDice=SparseExponentialLogarithmicLoss<line_sep>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. <import_stmt>asyncio<import_stmt>datetime<import_stmt>multiprocessing<import_stmt>os<import_stmt>unittest<import_from_stmt>cdm.enums CdmStatusLevel CdmObjectType CdmLogCode<import_from_stmt>cdm.objectmodel CdmCorpusDefinition CdmManifestDefinition<import_from_stmt>cdm.storage LocalAdapter<import_from_stmt>tests.common async_test TestHelper<import_from_stmt>tests.mock_storage_adapter MockStorageAdapter<import_from_stmt>cdm.storage.syms SymsAdapter<import_from_stmt>tests.syms_test_helper SymsTestHelper<class_stmt>PersistenceLayerTest(unittest.TestCase)<block_start>test_subpath=os.path.join('Persistence' 'PersistenceLayer')<line_sep>@async_test<async_keyword><def_stmt>test_invalid_json self<block_start>test_input_path=TestHelper.get_input_folder_path(self.test_subpath 'test_invalid_json')<line_sep>corpus=CdmCorpusDefinition()<line_sep>corpus.storage.mount('local' LocalAdapter(test_input_path))<line_sep>corpus.storage.default_namespace='local'<line_sep>invalid_manifest=<none><try_stmt><block_start>invalid_manifest=<await>corpus.fetch_object_async('local:/invalidManifest.manifest.cdm.json')<block_end><except_stmt>Exception<as>e<block_start>self.fail('Error should not be thrown when input json is invalid.')<block_end>self.assertIsNone(invalid_manifest)<block_end>@async_test<async_keyword><def_stmt>test_loading_invalid_model_json_name self<block_start>test_input_path=TestHelper.get_input_folder_path(self.test_subpath 'test_loading_invalid_model_json_name')<line_sep>corpus=CdmCorpusDefinition()<line_sep>corpus.storage.mount('local' LocalAdapter(test_input_path))<line_sep>corpus.storage.default_namespace='local'<line_sep># We are trying to load a file with an invalid name, so fetch_object_async should just return None. invalid_model_json=<await>corpus.fetch_object_async('test.model.json')<line_sep>self.assertIsNone(invalid_model_json)<block_end>@async_test<async_keyword><def_stmt>test_saving_invalid_model_json_name self<block_start>corpus=CdmCorpusDefinition()<line_sep>corpus.ctx.report_at_level=CdmStatusLevel.WARNING<line_sep>corpus.storage.unmount('cdm')<line_sep>corpus.storage.default_namespace='local'<line_sep>manifest=CdmManifestDefinition(corpus.ctx 'manifest')<line_sep>corpus.storage.fetch_root_folder('local').documents.append(manifest)<line_sep>all_docs={}# type: Dict[str, str] test_adapter=MockStorageAdapter(all_docs)<line_sep>corpus.storage._set_adapter('local' test_adapter)<line_sep>new_manifest_from_model_json_name='my.model.json'<line_sep><await>manifest.save_as_async(new_manifest_from_model_json_name <true>)<line_sep># TODO: because we can load documents properly now, save_as_async returns false. Will check the value returned from save_as_async() when the problem is solved self.assertFalse('/'+new_manifest_from_model_json_name<in>all_docs)<block_end>@async_test<async_keyword><def_stmt>test_model_json_type_attribute_persistence self<block_start>corpus=TestHelper.get_local_corpus(self.test_subpath 'TestModelJsonTypeAttributePersistence')<line_sep># we need to create a second adapter to the output folder to fool the OM into thinking it's different # this is because there is a bug currently that prevents us from saving and then loading a model.json corpus.storage.mount('alternateOutput' LocalAdapter(TestHelper.get_actual_output_folder_path(self.test_subpath 'TestModelJsonTypeAttributePersistence')))<line_sep># create manifest entity_name='TestTypeAttributePersistence'<line_sep>local_root=corpus.storage.fetch_root_folder('local')<line_sep>output_root=corpus.storage.fetch_root_folder('output')<line_sep>manifest=corpus.make_object(CdmObjectType.MANIFEST_DEF 'tempAbstract')# type: CdmManifestDefinition manifest.imports.append('cdm:/foundations.cdm.json' <none>)<line_sep>local_root.documents.append(manifest)<line_sep># create entity doc=corpus.make_object(CdmObjectType.DOCUMENT_DEF entity_name+'.cdm.json')# type: CdmManifestDefinition doc.imports.append('cdm:/foundations.cdm.json' <none>)<line_sep>local_root.documents.append(doc doc.name)<line_sep>entity_def=doc.definitions.append(entity_name CdmObjectType.ENTITY_DEF)# type: CdmEntityDeclarationDefinition # create type attribute cdm_type_attribute_definition=corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF entity_name <false>)# type: CdmTypeAttributeDefinition cdm_type_attribute_definition.is_read_only=<true><line_sep>entity_def.attributes.append(cdm_type_attribute_definition)<line_sep>manifest.entities.append(entity_def)<line_sep>manifest_resolved=<await>manifest.create_resolved_manifest_async('default' <none>)<line_sep>output_root.documents.append(manifest_resolved)<line_sep>manifest_resolved.imports.append('cdm:/foundations.cdm.json')<line_sep><await>manifest_resolved.save_as_async('model.json' <true>)<line_sep>new_manifest=<await>corpus.fetch_object_async('alternateOutput:/model.json')# type: CdmManifestDefinition new_ent=<await>corpus.fetch_object_async(new_manifest.entities[0].entity_path manifest)# type: CdmEntityDefinition type_attribute=new_ent.attributes[0]<line_sep>self.assertTrue(type_attribute.is_read_only)<block_end>@async_test<async_keyword><def_stmt>test_missing_persistence_format self<block_start>expected_log_codes={CdmLogCode.ERR_PERSIST_CLASS_MISSING}<line_sep>corpus=TestHelper.get_local_corpus(self.test_subpath 'TestMissingPersistenceFormat' expected_codes=expected_log_codes)# type: CdmCorpusDefinition folder=corpus.storage.fetch_root_folder(corpus.storage.default_namespace)# type: CdmFolderDefinition manifest=corpus.make_object(CdmObjectType.MANIFEST_DEF 'someManifest')# type: CdmManifestDefinition folder.documents.append(manifest)<line_sep># trying to save to an unsupported format should return false and not fail succeded=<await>manifest.save_as_async('manifest.unSupportedExtension')<line_sep>self.assertFalse(succeded)<block_end><async_keyword><def_stmt>run_syms_save_manifest self manifest:CdmManifestDefinition<block_start>self.assertTrue(<await>manifest.save_as_async('syms:/{}/{}.manifest.cdm.json'.format(manifest.manifest_name manifest.manifest_name)))<block_end><async_keyword><def_stmt>run_syms_fetch_manifest self corpus:CdmCorpusDefinition manifest_expected:'CdmManifestDefinition' filename:str threadnumber:str=''<block_start>manifest_read_databases=<await>corpus.fetch_object_async('syms:/databases.manifest.cdm.json')<line_sep>self.assertIsNotNone(manifest_read_databases)<line_sep>self.assertEqual('databases.manifest.cdm.json' manifest_read_databases.manifest_name)<if_stmt><not>any(db.manifest_name<eq>manifest_expected.manifest_name<for>db manifest_read_databases.sub_manifests)<block_start>self.fail('Database {} does not exist'.format(manifest_expected.manifest_name))<block_end>manifest_actual=<await>corpus.fetch_object_async('syms:/{}/{}.manifest.cdm.json'.format(manifest_expected.manifest_name manifest_expected.manifest_name) manifest_read_databases <none> <true>)<line_sep><await>manifest_actual.save_as_async('localActOutput:/{}{}'.format(filename threadnumber))<line_sep><await>manifest_expected.save_as_async('localExpOutput:/{}{}'.format(filename threadnumber))<line_sep>actual_content=TestHelper.get_actual_output_data(self.test_subpath 'TestSymsSavingAndFetchingDocument' filename)<line_sep>expected_content=TestHelper.get_expected_output_data(self.test_subpath 'TestSymsSavingAndFetchingDocument' filename)<line_sep>ret=TestHelper.compare_same_object(actual_content expected_content)<if_stmt>ret<is><not>''<block_start>self.fail(ret)<block_end><block_end><async_keyword><def_stmt>run_syms_fetch_document self corpus:'CdmCorpusDefinition' manifest_expected:'CdmManifestDefinition'<block_start><for_stmt>ent manifest_expected.entities<block_start>doc=<await>corpus.fetch_object_async('syms:/{}/{}.cdm.json'.format(manifest_expected.manifest_name ent.entity_name))<line_sep>self.assertIsNotNone(doc)<line_sep>self.assertTrue(doc.name<eq>'{}.cdm.json'.format(ent.entity_name))<block_end><block_end><async_keyword><def_stmt>run_syms_smart_adls_adapter_mount_logic self<block_start>syms_adapter=SymsTestHelper.create_adapter_with_clientid()<line_sep>corpus=CdmCorpusDefinition()<line_sep>corpus.storage.mount('syms' syms_adapter)<line_sep>adls_adapter1=SymsTestHelper.create_adapter_clientid_with_shared_key(1)<line_sep>adls_adapter2=SymsTestHelper.create_adapter_clientid_with_shared_key(2)<line_sep>count_adapter_count_before=len(corpus.storage.namespace_adapters)<line_sep>manifest_read_databases=<await>corpus.fetch_object_async('syms:/databases.manifest.cdm.json')<line_sep>manifest=<await>corpus.fetch_object_async('syms:/{}/{}.manifest.cdm.json'.format(manifest_read_databases.sub_manifests[0].manifest_name manifest_read_databases.sub_manifests[0].manifest_name) manifest_read_databases <none> <true>)<line_sep>count_adapter_count_after=len(corpus.storage.namespace_adapters)<line_sep>self.assertEqual(count_adapter_count_before+2 count_adapter_count_after)<line_sep>self.assertIsNotNone(corpus.storage.adapter_path_to_corpus_path('https://{}{}'.format(adls_adapter1.hostname adls_adapter1.root)))<line_sep>self.assertIsNotNone(corpus.storage.adapter_path_to_corpus_path('https://{}{}'.format(adls_adapter2.hostname adls_adapter2.root)))<block_end>@[email protected](SymsTestHelper.if_syms_run_tests_flag_not_set() 'SYMS environment variables not set up')<async_keyword><def_stmt>test_syms_saving_and_fetching_document self<block_start>syms_adapter=SymsTestHelper.create_adapter_with_clientid()<line_sep><await>SymsTestHelper.clean_database(syms_adapter SymsTestHelper.DATABASE_NAME)<line_sep>test_input_path=TestHelper.get_input_folder_path(self.test_subpath 'TestSymsSavingAndFetchingDocument')<line_sep>test_act_output_path=TestHelper.get_actual_output_folder_path(self.test_subpath 'TestSymsSavingAndFetchingDocument')<line_sep>test_exp_output_path=TestHelper.get_expected_output_folder_path(self.test_subpath 'TestSymsSavingAndFetchingDocument')<line_sep>corpus=CdmCorpusDefinition()<line_sep>adls_adapter1=SymsTestHelper.create_adapter_clientid_with_shared_key(1)<line_sep>adls_adapter2=SymsTestHelper.create_adapter_clientid_with_shared_key(2)<line_sep>local_input_adapter=LocalAdapter(test_input_path)<line_sep>local_act_output_adapter=LocalAdapter(test_act_output_path)<line_sep>local_exp_output_adapter=LocalAdapter(test_exp_output_path)<line_sep>corpus.storage.mount('adls1' adls_adapter1)<line_sep>corpus.storage.mount('adls2' adls_adapter2)<line_sep>corpus.storage.mount('syms' syms_adapter)<line_sep>corpus.storage.mount('localInput' local_input_adapter)<line_sep>corpus.storage.mount('localActOutput' local_act_output_adapter)<line_sep>corpus.storage.mount('localExpOutput' local_exp_output_adapter)<line_sep>corpus.storage.unmount('cdm')<line_sep>corpus.storage.default_namespace='localInput'<line_sep>manifest=<await>corpus.fetch_object_async('default.manifest.cdm.json')<line_sep>manifest.manifest_name=SymsTestHelper.DATABASE_NAME<line_sep><await>self.run_syms_save_manifest(manifest)<line_sep><await>self.run_syms_fetch_manifest(corpus manifest 'default.manifest.cdm.json')<line_sep><await>self.run_syms_fetch_document(corpus manifest)<line_sep>manifest_modified=<await>corpus.fetch_object_async('defaultmodified.manifest.cdm.json')<line_sep>manifest_modified.manifest_name=SymsTestHelper.DATABASE_NAME<line_sep>manifest_modified.entities[0].set_last_file_modified_time(datetime.datetime.now(datetime.timezone.utc))<line_sep><await>self.run_syms_save_manifest(manifest_modified)<line_sep><await>self.run_syms_fetch_manifest(corpus manifest_modified 'defaultmodified.manifest.cdm.json')<line_sep><await>self.run_syms_fetch_document(corpus manifest_modified)<line_sep><await>self.run_syms_smart_adls_adapter_mount_logic()<line_sep><await>SymsTestHelper.clean_database(syms_adapter SymsTestHelper.DATABASE_NAME)<block_end><block_end>
# ----------------------------------------------------------- # Code adapted from: # https://github.com/akanazawa/cmr/blob/master/nnutils/train_utils.py # # MIT License # # Copyright (c) 2018 akanazawa # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ----------------------------------------------------------- # Generic Training Utils. <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>torch<import_stmt>os<import_stmt>os.path<as>osp<import_stmt>time<import_stmt>pdb<import_from_stmt>absl flags<import_from_stmt>..utils.tf_visualizer Visualizer<as>TfVisualizer<import_stmt>torchvision.utils<as>vutils<line_sep>#-------------- flags -------------# #----------------------------------# ## Flags for training curr_path=osp.dirname(osp.abspath(__file__))<line_sep>cache_path=osp.join(curr_path '..' 'cachedir')<line_sep>flags.DEFINE_string('name' 'exp_name' 'Experiment Name')<line_sep>flags.DEFINE_integer('gpu_id' 0 'Which gpu to use')<line_sep>flags.DEFINE_integer('optim_bs' 1 'Perform parameter update every optim_bs iterations')<line_sep>flags.DEFINE_integer('num_epochs' 500 'Number of epochs to train')<line_sep>flags.DEFINE_integer('num_pretrain_epochs' 0 'If >0, we will pretain from an existing saved model.')<line_sep>flags.DEFINE_float('learning_rate' 1e-4 'learning rate')<line_sep>flags.DEFINE_float('beta1' 0.9 'Momentum term of adam')<line_sep>flags.DEFINE_bool('use_sgd' <false> 'if true uses sgd instead of adam, beta1 is used as momentum')<line_sep>flags.DEFINE_bool('multi_gpu' <false> 'if true use multiple GPUs')<line_sep>flags.DEFINE_integer('num_iter' 0 'Number of training iterations. 0 -> Use epoch_iter')<line_sep>## Flags for logging and snapshotting flags.DEFINE_string('checkpoint_dir' osp.join(cache_path 'snapshots') 'Root directory for output files')<line_sep>flags.DEFINE_string('vis_dir' osp.join(cache_path 'visualization') 'Root directory for visualizations')<line_sep>flags.DEFINE_integer('print_freq' 20 'scalar logging frequency')<line_sep>flags.DEFINE_integer('save_latest_freq' 3000 'save latest model every x iterations')<line_sep>flags.DEFINE_integer('save_epoch_freq' 301 'save model every k epochs')<line_sep>flags.DEFINE_integer('lr_step_epoch_freq' 5 'Reduce LR by factor of 10 every k ephochs')<line_sep>flags.DEFINE_integer('batch_size' 64 'Size of minibatches')<line_sep>flags.DEFINE_integer('workers' 16 'dataloader worker number')<line_sep>## Flags for visualization flags.DEFINE_integer('display_freq' 100 'visuals logging frequency')<line_sep>flags.DEFINE_integer('min_display_iter' 400 'Skip plotting for initial iterations')<line_sep>flags.DEFINE_boolean('display_visuals' <true> 'whether to display images')<line_sep>flags.DEFINE_boolean('print_scalars' <true> 'whether to print scalars')<line_sep>flags.DEFINE_boolean('plot_scalars' <true> 'whether to plot scalars')<line_sep>flags.DEFINE_boolean('is_train' <true> 'Are we training ?')<line_sep>#--------- training class ---------# #----------------------------------# <class_stmt>Trainer()<block_start><def_stmt>__init__ self opts<block_start>self.opts=opts<line_sep>self.gpu_id=opts.gpu_id<line_sep>torch.cuda.set_device(opts.gpu_id)<line_sep>self.Tensor=torch.cuda.FloatTensor<if>(self.gpu_id<is><not><none>)<else>torch.Tensor<line_sep>self.invalid_batch=<false>#the trainer can optionally reset this every iteration during set_input call self.save_dir=os.path.join(opts.checkpoint_dir opts.name)<line_sep>self.vis_dir=os.path.join(opts.vis_dir opts.name)<if_stmt><not>os.path.exists(self.save_dir)<block_start>os.makedirs(self.save_dir)<block_end><if_stmt><not>os.path.exists(self.vis_dir)<block_start>os.makedirs(self.vis_dir)<block_end>log_file=os.path.join(self.save_dir 'opts.log')<line_sep>self.sc_dict={}<with_stmt>open(log_file 'w')<as>f<block_start><for_stmt>k dir(opts)<block_start>f.write('{}: {}\n'.format(k opts.__getattr__(k)))<block_end><block_end><block_end># helper saving function that can be used by subclasses <def_stmt>save_network self network network_label epoch_label gpu_id=<none><block_start>save_filename='{}_net_{}.pth'.format(network_label epoch_label)<line_sep>save_path=os.path.join(self.save_dir save_filename)<if_stmt>(self.opts.multi_gpu)<block_start>torch.save(network.module.cpu().state_dict() save_path)<block_end><else_stmt><block_start>torch.save(network.cpu().state_dict() save_path)<block_end><if_stmt>gpu_id<is><not><none><and>torch.cuda.is_available()<block_start>network.cuda(device=gpu_id)<block_end><return><block_end># helper loading function that can be used by subclasses <def_stmt>load_network self network network_label epoch_label network_dir=<none><block_start>print('Loading model')<line_sep>save_filename='{}_net_{}.pth'.format(network_label epoch_label)<if_stmt>network_dir<is><none><block_start>network_dir=self.save_dir<block_end>save_path=os.path.join(network_dir save_filename)<line_sep>network.load_state_dict(torch.load(save_path))<line_sep><return><block_end><def_stmt>define_model self<block_start>'''Should be implemented by the child class.'''<line_sep><raise>NotImplementedError<block_end><def_stmt>init_dataset self<block_start>'''Should be implemented by the child class.'''<line_sep><raise>NotImplementedError<block_end><def_stmt>define_criterion self<block_start>'''Should be implemented by the child class.'''<line_sep><raise>NotImplementedError<block_end><def_stmt>set_input self batch<block_start>'''Should be implemented by the child class.'''<line_sep><raise>NotImplementedError<block_end><def_stmt>forward self<block_start>'''Should compute self.total_loss. To be implemented by the child class.'''<line_sep><raise>NotImplementedError<block_end><def_stmt>save self epoch_prefix<block_start>'''Saves the model.'''<line_sep>self.save_network(self.model 'pred' epoch_prefix gpu_id=self.opts.gpu_id)<line_sep><return><block_end><def_stmt>get_current_visuals self<block_start><return>{}<block_end><def_stmt>get_current_scalars self<block_start><return>self.sc_dict<block_end><def_stmt>register_scalars self sc_dict beta=0.99<block_start>''' Keeps a running smoothed average of some scalars. '''<for_stmt>k sc_dict<block_start><if_stmt>k<not><in>self.sc_dict<block_start>self.sc_dict[k]=sc_dict[k]<block_end><else_stmt><block_start>self.sc_dict[k]=beta<times>self.sc_dict[k]+(1-beta)<times>sc_dict[k]<block_end><block_end><block_end><def_stmt>get_current_points self<block_start><return>{}<block_end><def_stmt>init_training self<block_start>opts=self.opts<line_sep>self.iteration_num=0<line_sep>self.init_dataset()<line_sep>self.define_model()<line_sep>self.define_criterion()<if_stmt>opts.use_sgd<block_start>self.optimizer=torch.optim.SGD(self.model.parameters() lr=opts.learning_rate momentum=opts.beta1)<block_end><else_stmt><block_start>param_list=list(self.model.parameters())<if_stmt>(opts.use_gan)<block_start>param_list=param_list+list(self.discriminator.parameters())<block_end>self.optimizer=torch.optim.Adam(filter(<lambda>p:p.requires_grad param_list) lr=opts.learning_rate betas=(opts.beta1 0.999))<block_end><block_end><def_stmt>adjust_learning_rate self optimizer=<none><block_start>"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""<if_stmt>(optimizer<is><none>)<block_start>optimizer=self.optimizer<block_end><for_stmt>param_group optimizer.param_groups<block_start>param_group['lr']=self.opts.learning_rate/(1+self.iteration_num<times>5e-4)<block_end><block_end><def_stmt>train self<block_start>opts=self.opts<line_sep>self.visualizer=TfVisualizer(opts)<line_sep>self.smoothed_total_loss=0<line_sep>visualizer=self.visualizer<line_sep>total_steps=0<line_sep>optim_steps=0<line_sep>dataset_size=len(self.dataloader)<for_stmt>epoch range(opts.num_pretrain_epochs opts.num_epochs)<block_start>epoch_iter=0<line_sep>self.curr_epoch=epoch<for_stmt>i,batch enumerate(self.dataloader)<block_start>self.iteration_num<augadd>1<line_sep>self.adjust_learning_rate()<line_sep>t_init=time.time()<line_sep>self.set_input(batch)<line_sep>t_batch=time.time()<if_stmt><not>self.invalid_batch<block_start>optim_steps<augadd>1<if_stmt>optim_steps%opts.optim_bs<eq>0<block_start>self.optimizer.zero_grad()<block_end>self.forward()<line_sep>self.smoothed_total_loss=self.smoothed_total_loss<times>0.99+0.01<times>self.total_loss<line_sep>t_forw=time.time()<line_sep>self.total_loss.backward()<line_sep>t_backw=time.time()<if_stmt>optim_steps%opts.optim_bs<eq>0<block_start>self.optimizer.step()<block_end>t_opt=time.time()<block_end>total_steps<augadd>1<line_sep>epoch_iter<augadd>1<if_stmt>opts.display_visuals<and>(total_steps%opts.display_freq<eq>0)<block_start>iter_end_time=time.time()<line_sep>#visualizer.log_images(self.get_current_visuals(), epoch*dataset_size + epoch_iter) vis_dict=self.get_current_visuals()<for_stmt>k,v vis_dict.items()<block_start><if_stmt>('mesh'<in>k)<block_start>v.save_obj(os.path.join(self.vis_dir k+'.obj') save_texture=<true>)<block_end><else_stmt><block_start>vutils.save_image(v os.path.join(self.vis_dir k+'.png'))<block_end><block_end><del_stmt>vis_dict<block_end><if_stmt>opts.print_scalars<and>(total_steps%opts.print_freq<eq>0)<block_start>scalars=self.get_current_scalars()<line_sep>visualizer.print_current_scalars(epoch epoch_iter scalars)<block_end><if_stmt>total_steps%opts.save_latest_freq<eq>0<block_start>print('saving the model at the end of epoch {:d}, iters {:d}'.format(epoch total_steps))<line_sep>self.save('latest')<block_end><if_stmt>total_steps<eq>opts.num_iter<block_start><return><block_end><block_end><if_stmt>(epoch+1)%opts.save_epoch_freq<eq>0<block_start>print('saving the model at the end of epoch {:d}, iters {:d}'.format(epoch total_steps))<line_sep>self.save('latest')<line_sep>self.save(epoch+1)<block_end><block_end><block_end><block_end>
<import_stmt>os<import_stmt>sys<import_stmt>struct<import_from_stmt>binascii hexlify unhexlify<line_sep># crypto primitive imports <import_from_stmt>cryptography.hazmat.primitives.hmac HMAC<import_from_stmt>cryptography.hazmat.backends default_backend<import_from_stmt>cryptography.hazmat.primitives.ciphers Cipher<import_from_stmt>cryptography.hazmat.primitives.ciphers.modes GCM CBC<import_from_stmt>cryptography.hazmat.primitives.hashes SHA1 SHA256 SHA384 MD5 Hash<import_from_stmt>cryptography.hazmat.primitives.ciphers.algorithms AES ARC4 TripleDES Camellia SEED<line_sep># constants sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')<import_from_stmt>pmercury.utils.tls_constants *<class_stmt>TLS_CRYPTO<block_start><def_stmt>__init__ self<block_start>self.cur_mode=<none><line_sep>self.session_metadata=<none><line_sep>self.tls_sequence=<none><line_sep>self.tls13_handshake=<none><line_sep>self.kdf={}<line_sep>self.kdf['TLS 1.0']=self.kdf_tls10<line_sep>self.kdf['TLS 1.1']=self.kdf_tls11<line_sep>self.kdf['TLS 1.2']=self.kdf_tls12<line_sep>self.kdf['TLS 1.3']=self.kdf_tls13<block_end><def_stmt>kdf_tls10 self cr sr secret cipher_params flow_key<block_start><if_stmt>flow_key+self.cur_mode<not><in>self.session_metadata<block_start>self.session_metadata[flow_key+self.cur_mode]={}<block_end><if_stmt>'cbc_initial_decrypt'<not><in>self.session_metadata[flow_key+self.cur_mode]<block_start>self.session_metadata[flow_key+self.cur_mode]['cbc_initial_decrypt']=1<line_sep>fixed_iv_length=cipher_params['iv_length']<block_end><else_stmt><block_start>fixed_iv_length=cipher_params['fixed_iv_length']<block_end>label=b'key expansion'<line_sep>secret_md5=secret[:len(secret)/2]<line_sep>secret_sha=secret[-len(secret)/2:]<line_sep>md5_material=b''<line_sep>cur_hash=self.hmac(secret_md5 MD5() b'%s%s%s'%(label sr cr))<for_stmt>i range(16)<block_start>md5_material<augadd>self.hmac(secret_md5 MD5() b'%s%s%s%s'%(cur_hash label sr cr))<line_sep>cur_hash=self.hmac(secret_md5 MD5() cur_hash)<block_end>sha_material=b''<line_sep>cur_hash=self.hmac(secret_sha SHA1() b'%s%s%s'%(label sr cr))<for_stmt>i range(16)<block_start>sha_material<augadd>self.hmac(secret_sha SHA1() b'%s%s%s%s'%(cur_hash label sr cr))<line_sep>cur_hash=self.hmac(secret_sha SHA1() cur_hash)<block_end>output=b''<for_stmt>i range(min(len(md5_material) len(sha_material)))<block_start>output<augadd>chr(ord(md5_material[i])^ord(sha_material[i]))<block_end>key_material_lengths=[cipher_params['mac_key_length']]<times>2+[cipher_params['enc_key_length']]<times>2+[fixed_iv_length]<times>2<line_sep>offset=0<line_sep>key_material=[]<for_stmt>l key_material_lengths<block_start>key_material.append(output[offset:offset+l])<line_sep>offset<augadd>l<block_end><return>key_material<block_end><def_stmt>kdf_tls11 self cr sr secret cipher_params flow_key<block_start>label=b'key expansion'<line_sep>secret_md5=secret[:len(secret)/2]<line_sep>secret_sha=secret[-len(secret)/2:]<line_sep>md5_material=b''<line_sep>cur_hash=self.hmac(secret_md5 MD5() b'%s%s%s'%(label sr cr))<for_stmt>i range(16)<block_start>md5_material<augadd>self.hmac(secret_md5 MD5() b'%s%s%s%s'%(cur_hash label sr cr))<line_sep>cur_hash=self.hmac(secret_md5 MD5() cur_hash)<block_end>sha_material=b''<line_sep>cur_hash=self.hmac(secret_sha SHA1() b'%s%s%s'%(label sr cr))<for_stmt>i range(16)<block_start>sha_material<augadd>self.hmac(secret_sha SHA1() b'%s%s%s%s'%(cur_hash label sr cr))<line_sep>cur_hash=self.hmac(secret_sha SHA1() cur_hash)<block_end>output=b''<for_stmt>i range(min(len(md5_material) len(sha_material)))<block_start>output<augadd>chr(ord(md5_material[i])^ord(sha_material[i]))<block_end>key_material_lengths=[cipher_params['mac_key_length']]<times>2+[cipher_params['enc_key_length']]<times>2+[cipher_params['fixed_iv_length']]<times>2<line_sep>offset=0<line_sep>key_material=[]<for_stmt>l key_material_lengths<block_start>key_material.append(output[offset:offset+l])<line_sep>offset<augadd>l<block_end><return>key_material<block_end><def_stmt>kdf_tls12 self cr sr secret cipher_params flow_key<block_start>label=b'key expansion'<line_sep>digest_type=cipher_params['prf']()<line_sep>cur_hash=self.hmac(secret digest_type b'%s%s%s'%(label sr cr))<line_sep>output=b''<for_stmt>i range(16)<block_start>output<augadd>self.hmac(secret digest_type b'%s%s%s%s'%(cur_hash label sr cr))<line_sep>cur_hash=self.hmac(secret digest_type cur_hash)<block_end>key_material_lengths=[cipher_params['mac_key_length']]<times>2+[cipher_params['enc_key_length']]<times>2+[cipher_params['fixed_iv_length']]<times>2<line_sep>offset=0<line_sep>key_material=[]<for_stmt>l key_material_lengths<block_start>key_material.append(output[offset:offset+l])<line_sep>offset<augadd>l<block_end><return>key_material<block_end><def_stmt>kdf_tls13 self secret label length cipher_params flow_key<block_start>digest_type=cipher_params['prf']()<line_sep>key=b''<line_sep>block=b''<line_sep>ind=0<while_stmt>len(key)<l>length<block_start>ind<augadd>1<line_sep>block=self.hmac(secret digest_type b'%s%s%s'%(block label struct.pack('B' ind)))<line_sep>key<augadd>block<block_end><return>key[:length]<block_end><def_stmt>hmac self secret digest_type msg<block_start>tmp=HMAC(secret digest_type default_backend())<line_sep>tmp.update(msg)<line_sep><return>tmp.finalize()<block_end><def_stmt>hash_ self digest_type msg<block_start>tmp=Hash(digest_type default_backend())<line_sep>tmp.update(msg)<line_sep><return>tmp.finalize()<block_end><def_stmt>get_secret self client_random secrets cur_flow_key<block_start>secret=<none><if_stmt>client_random<not><in>secrets<block_start><return><none><block_end><if_stmt><not>self.session_metadata['version'].startswith('TLS 1.3')<block_start>secret=unhexlify(secrets[client_random]['master_secret'])<block_end># find appropriate master secret <if_stmt>cur_flow_key<not><in>self.tls13_handshake<block_start>self.tls13_handshake[cur_flow_key]=<true><block_end><if_stmt>self.cur_mode<eq>'client'<and>self.tls13_handshake[cur_flow_key]<eq><true><and>'client_handshake_secret'<in>secrets[client_random]<block_start>secret=unhexlify(secrets[client_random]['client_handshake_secret'])<block_end><elif_stmt>self.cur_mode<eq>'server'<and>self.tls13_handshake[cur_flow_key]<eq><true><and>'server_handshake_secret'<in>secrets[client_random]<block_start>secret=unhexlify(secrets[client_random]['server_handshake_secret'])<block_end><elif_stmt>self.cur_mode<eq>'client'<and>self.tls13_handshake[cur_flow_key]<eq><false><and>'client_traffic_secret'<in>secrets[client_random]<block_start>secret=unhexlify(secrets[client_random]['client_traffic_secret'])<block_end><elif_stmt>self.cur_mode<eq>'server'<and>self.tls13_handshake[cur_flow_key]<eq><false><and>'server_traffic_secret'<in>secrets[client_random]<block_start>secret=unhexlify(secrets[client_random]['server_traffic_secret'])<block_end><return>secret<block_end><def_stmt>get_explicit_material self flow_key data cipher_params<block_start>enc=<none><line_sep>iv=<none><if_stmt>self.session_metadata['version']<eq>'TLS 1.0'<block_start>enc=data<if_stmt>cipher_params['mode']<eq>CBC<block_start><if_stmt>flow_key+self.cur_mode<not><in>self.session_metadata<or>'cbc_initial_decrypt'<not><in>self.session_metadata[flow_key+self.cur_mode]<or>'cur_iv'<not><in>self.session_metadata[flow_key+self.cur_mode]<block_start>iv=b''<block_end><else_stmt><block_start>iv=self.session_metadata[flow_key+self.cur_mode]['cur_iv']<block_end><block_end><block_end><elif_stmt>self.session_metadata['version']<in>['TLS 1.1' 'TLS 1.2']<block_start>enc=data[cipher_params['iv_length']:]<line_sep>iv=data[:cipher_params['iv_length']]<block_end><elif_stmt>self.session_metadata['version'].startswith('TLS 1.3')<block_start>enc=data<line_sep>iv=b''<block_end><return>enc iv<block_end><def_stmt>get_implicit_material self client_random server_random master_secret cipher_params flow_key explicit_iv<block_start>key=<none><line_sep>iv=<none><if_stmt>self.session_metadata['version']<in>['SSL 3.0' 'TLS 1.0' 'TLS 1.1' 'TLS 1.2']<block_start>c_mac_key,s_mac_key,c_key,s_key,c_iv,s_iv=self.kdf[self.session_metadata['version']](client_random server_random master_secret cipher_params flow_key)<if_stmt>self.cur_mode<eq>'client'<block_start>key=c_key<line_sep>iv=c_iv+explicit_iv<block_end><else_stmt><block_start>key=s_key<line_sep>iv=s_iv+explicit_iv<block_end><block_end><elif_stmt>self.session_metadata['version'].startswith('TLS 1.3')<block_start>cur_flow_key=flow_key+self.cur_mode<line_sep>label_str=b''<if_stmt>self.session_metadata['version']<eq>'TLS 1.3'<or>self.session_metadata['version']<eq>'TLS 1.3 (draft 20)'<block_start>label_str=b'tls13 '<block_end><else_stmt><block_start>label_str=b'TLS 1.3, '<block_end>tmp_label=label_str+b'key'<line_sep>len_=struct.pack(b'!H' cipher_params['enc_key_length'])<line_sep>tmp_label=b'%s%s%s%s'%(len_ struct.pack(b'B' len(tmp_label)) tmp_label b'\x00')<line_sep>key=self.kdf_tls13(master_secret tmp_label cipher_params['enc_key_length'] cipher_params flow_key)<line_sep>tmp_label=label_str+b'iv'<line_sep>len_=struct.pack(b'!H' cipher_params['iv_length'])<line_sep>tmp_label=b'%s%s%s%s'%(len_ struct.pack(b'B' len(tmp_label)) tmp_label b'\x00')<line_sep>implicit_iv=self.kdf_tls13(master_secret tmp_label cipher_params['iv_length'] cipher_params flow_key)<line_sep># calculate nonce iv2=struct.pack(b'!Q' self.tls_sequence[cur_flow_key]).rjust(len(implicit_iv) b'\x00')<line_sep>iv=b''.join([struct.pack(b'B' v^implicit_iv[i])<for>i,v enumerate(iv2)])<block_end><return>key iv<block_end># strip MAC/AEAD/Padding <def_stmt>get_data self result flow_key cipher_params encrypted_data<block_start>padding_length=0<line_sep># strip padding <if_stmt>self.session_metadata['version'].startswith('TLS 1.3')<block_start><for_stmt>i range(len(result)-1 -1 -1)<block_start><if_stmt>result[i]<ne>b'\x00'<block_start><break><block_end>padding_length<augadd>1<block_end>result=result[:-padding_length-1]<block_end><else_stmt><block_start><if_stmt>cipher_params['mode']<eq>CBC<block_start>padding_length=int(hexlify(result[-1:]) 16)<if_stmt>len(result)<l>padding_length+1<block_start>padding_length=0<block_end><else_stmt><block_start><for_stmt>i range(1 padding_length+1)<block_start><if_stmt>int(hexlify(result[-(i+1):-i]) 16)<ne>padding_length<block_start>padding_length=0<line_sep><break><block_end><block_end><block_end><if_stmt>padding_length<ne>0<block_start>padding_length<augadd>1<line_sep>result=result[:-padding_length]<block_end># set up IV for TLS 1.0 <if_stmt>self.session_metadata['version']<eq>'TLS 1.0'<block_start><if_stmt>flow_key+self.cur_mode<not><in>self.session_metadata<block_start>self.session_metadata[flow_key+self.cur_mode]={}<block_end>self.session_metadata[flow_key+self.cur_mode]['cur_iv']=encrypted_data[-cipher_params['iv_length']:]<block_end><block_end><block_end># strip AEAD/MAC auth_length=0<if_stmt>cipher_params['mode']<eq>GCM<block_start><if_stmt>cipher_params['enc_key_length']<eq>32<block_start>result=result[:-16]<block_end><elif_stmt>cipher_params['enc_key_length']<eq>16<block_start>result=result<block_end>auth_length=cipher_params['enc_key_length']<block_end><elif_stmt>cipher_params['mac_key_length']<g>0<block_start>result=result[:-cipher_params['mac_key_length']]<line_sep>auth_length=cipher_params['mac_key_length']<block_end><return>result padding_length auth_length<block_end># get encrypted data and crypto parameters, output plaintext <def_stmt>get_plaintext self data cipher_params key iv flow_key<block_start><if_stmt>cipher_params['cipher']<eq>AES<block_start><if_stmt>cipher_params['mode']<eq>CBC<block_start>decryptor=Cipher(cipher_params['cipher'](key) cipher_params['mode'](iv) default_backend()).decryptor()<block_end><if_stmt>cipher_params['mode']<eq>GCM<block_start><if_stmt>len(data[-16:])<l>16<block_start><return><none><block_end>decryptor=Cipher(cipher_params['cipher'](key) cipher_params['mode'](iv data[-16:]) default_backend()).decryptor()<block_end><block_end><elif_stmt>cipher_params['cipher']<eq>ARC4<block_start><if_stmt>flow_key+self.cur_mode<not><in>self.session_metadata<block_start>self.session_metadata[flow_key+self.cur_mode]={}<block_end><if_stmt>'decryptor'<not><in>self.session_metadata[flow_key+self.cur_mode]<block_start>self.session_metadata[flow_key+self.cur_mode]['decryptor']=decryptor=Cipher(cipher_params['cipher'](key) <none> default_backend()).decryptor()<block_end>decryptor=self.session_metadata[flow_key+self.cur_mode]['decryptor']<block_end><elif_stmt>cipher_params['cipher']<eq>TripleDES<block_start>decryptor=Cipher(cipher_params['cipher'](key) cipher_params['mode'](iv) default_backend()).decryptor()<block_end><elif_stmt>cipher_params['cipher']<eq>Camellia<block_start>decryptor=Cipher(cipher_params['cipher'](key) cipher_params['mode'](iv) default_backend()).decryptor()<block_end><elif_stmt>cipher_params['cipher']<eq>SEED<block_start>decryptor=Cipher(cipher_params['cipher'](key) cipher_params['mode'](iv) default_backend()).decryptor()<block_end><else_stmt><block_start>print('%s Not Supported'%cipher_params['cipher'])<line_sep><return><none><block_end><return>decryptor.update(data)<block_end># Main decrypt function <def_stmt>decrypt self data flow_key cur_mode session_metadata tls_sequence secrets tls13_handshake<block_start>self.cur_mode=cur_mode<line_sep>self.session_metadata=session_metadata<line_sep>self.tls_sequence=tls_sequence<line_sep>self.tls13_handshake=tls13_handshake<if_stmt>'selected_cipher_suite'<not><in>self.session_metadata<or>'client_random'<not><in>self.session_metadata<or>'server_random'<not><in>self.session_metadata<block_start><return><none> <none> <none><block_end>cur_flow_key=flow_key+self.cur_mode<if_stmt>self.session_metadata['selected_cipher_suite']<not><in>TLS_CIPHER_SUITES<block_start>print('NYI:\t'+self.session_metadata['selected_cipher_suite'])<line_sep><return><none> <none> <none><block_end>cipher_params=TLS_CIPHER_SUITES[self.session_metadata['selected_cipher_suite']]<line_sep>client_random=self.session_metadata['client_random']<line_sep>server_random=self.session_metadata['server_random']<line_sep># set initial sequence number for decryption <if_stmt>cur_flow_key<not><in>self.tls_sequence<block_start>self.tls_sequence[cur_flow_key]=0<block_end># get master secret, varies for TLS 1.3 master_secret=self.get_secret(client_random secrets cur_flow_key)<if_stmt>master_secret<eq><none><block_start><return><none> <none> <none><block_end># get encrypted data and (if necessary) explicit iv encrypted_data,explicit_iv=self.get_explicit_material(flow_key data cipher_params)<if_stmt>encrypted_data<eq><none><or>explicit_iv<eq><none><block_start><return><none> <none> <none><block_end># get encryption key and implicit iv key,iv=self.get_implicit_material(unhexlify(client_random) unhexlify(server_random) master_secret cipher_params flow_key explicit_iv)<line_sep># decrypt encrypted text result=self.get_plaintext(encrypted_data cipher_params key iv flow_key)<if_stmt>result<eq><none><block_start><return><none> <none> <none><block_end># determine if padding is used result,padding_length,auth_length=self.get_data(result flow_key cipher_params encrypted_data)<line_sep># update sequence number self.tls_sequence[cur_flow_key]<augadd>1<line_sep><return>result padding_length auth_length<block_end><block_end>
<import_from_stmt>typing List<import_from_stmt>gym.spaces.discrete Discrete<class_stmt>ActionSpace(Discrete)<block_start><def_stmt>__init__ self actions:List[str]<block_start>self.actions=actions<line_sep>self._ix_to_action={ix:action<for>ix,action enumerate(self.actions)}<line_sep>self._action_to_ix={action:ix<for>ix,action enumerate(self.actions)}<line_sep>super().__init__(len(self.actions))<block_end><def_stmt>__post_init__ self<block_start>self._ix_to_action={ix:action<for>ix,action enumerate(self.actions)}<line_sep>self._action_to_ix={action:ix<for>ix,action enumerate(self.actions)}<block_end><def_stmt>action_to_ix self action:str<arrow>int<block_start><return>self._action_to_ix[action]<block_end><def_stmt>ix_to_action self ix:int<arrow>str<block_start><return>self._ix_to_action[ix]<block_end><def_stmt>size self<arrow>int<block_start><return>self.n<block_end><def_stmt>__repr__ self<block_start><return>f"Discrete Action Space with {self.size()} actions: {self.actions}"<block_end><block_end>
<import_from_future_stmt> unicode_literals<import_stmt>json<import_stmt>logging<import_from_stmt>django.core mail<import_from_stmt>django.db models<import_from_stmt>django.utils.module_loading import_string<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>.utils split_recipient_list<line_sep>logger=logging.getLogger(__name__)<class_stmt>LogEntry(models.Model)<block_start>datetime=models.DateTimeField(auto_now_add=<true> editable=<false> verbose_name=_('Date time'))<line_sep>message=models.TextField(blank=<true> editable=<false> verbose_name=_('Message'))<class_stmt>Meta<block_start>get_latest_by='datetime'<line_sep>ordering=('-datetime' )<line_sep>verbose_name=_('Log entry')<line_sep>verbose_name_plural=_('Log entries')<block_end><block_end><class_stmt>UserMailer(models.Model)<block_start>label=models.CharField(max_length=128 unique=<true> verbose_name=_('Label'))<line_sep>default=models.BooleanField(default=<true> help_text=_('If default, this mailing profile will be pre-selected on the '<concat>'document mailing form.') verbose_name=_('Default'))<line_sep>enabled=models.BooleanField(default=<true> verbose_name=_('Enabled'))<line_sep>backend_path=models.CharField(max_length=128 help_text=_('The dotted Python path to the backend class.') verbose_name=_('Backend path'))<line_sep>backend_data=models.TextField(blank=<true> verbose_name=_('Backend data'))<class_stmt>Meta<block_start>ordering=('label' )<line_sep>verbose_name=_('User mailer')<line_sep>verbose_name_plural=_('User mailers')<block_end><def_stmt>__str__ self<block_start><return>self.label<block_end><def_stmt>save self *args **kwargs<block_start><if_stmt>self.default<block_start>UserMailer.objects.select_for_update().exclude(pk=self.pk).update(default=<false>)<block_end><return>super(UserMailer self).save(*args **kwargs)<block_end><def_stmt>backend_label self<block_start><return>self.get_backend().label<block_end><def_stmt>get_backend self<block_start><return>import_string(self.backend_path)<block_end><def_stmt>get_connection self<block_start><return>mail.get_connection(backend=self.get_backend().class_path **self.loads())<block_end><def_stmt>loads self<block_start><return>json.loads(self.backend_data)<block_end><def_stmt>dumps self data<block_start>self.backend_data=json.dumps(data)<line_sep>self.save()<block_end><def_stmt>send self subject='' body='' to=<none> document=<none> as_attachment=<false><block_start>recipient_list=split_recipient_list(recipients=[to])<with_stmt>self.get_connection()<as>connection<block_start>email_message=mail.EmailMultiAlternatives(subject=subject body=body to=recipient_list connection=connection)<if_stmt>as_attachment<block_start><with_stmt>document.open()<as>descriptor<block_start>email_message.attach(filename=document.label content=descriptor.read() mimetype=document.file_mimetype)<block_end><block_end><try_stmt><block_start>email_message.send()<block_end><except_stmt>Exception<as>exception<block_start>self.error_log.create(message=exception)<block_end><else_stmt><block_start>self.error_log.all().delete()<block_end><block_end><block_end><def_stmt>test self to<block_start>self.send(to=to subject=_('Test email from Mayan EDMS'))<block_end><block_end><class_stmt>UserMailerLogEntry(models.Model)<block_start>user_mailer=models.ForeignKey(UserMailer on_delete=models.CASCADE related_name='error_log' verbose_name=_('User mailer'))<line_sep>datetime=models.DateTimeField(auto_now_add=<true> editable=<false> verbose_name=_('Date time'))<line_sep>message=models.TextField(blank=<true> editable=<false> verbose_name=_('Message'))<class_stmt>Meta<block_start>get_latest_by='datetime'<line_sep>ordering=('-datetime' )<line_sep>verbose_name=_('User mailer log entry')<line_sep>verbose_name_plural=_('User mailer log entries')<block_end><block_end>
<import_from_stmt>typing List<line_sep># 二叉树的层平均值 <class_stmt>TreeNode<block_start><def_stmt>__init__ self x<block_start>self.val=x<line_sep>self.left=<none><line_sep>self.right=<none><block_end><block_end><class_stmt>Solution# 广度优先 <block_start><def_stmt>averageOfLevels_1 self root:TreeNode<arrow>List[float]<block_start>queue,res=[root] []<while_stmt>len(queue)<g>0<block_start>n,count=len(queue) 0<for_stmt>i range(n)<block_start>node=queue.pop(0)<line_sep>count<augadd>node.val<if_stmt>node.left<block_start>queue.append(node.left)<block_end><if_stmt>node.right<block_start>queue.append(node.right)<block_end><block_end>res.append(count/n)<block_end><return>res<block_end><block_end>
<import_stmt>unittest<import_stmt>base<class_stmt>Test(base.BaseScriptTest unittest.TestCase)<block_start>command_line="./scripts/line_select.py ${features}"<line_sep>input_features=base.TestFile("""0 1 1 0 1 0""")<line_sep>input_stdin=base.TestFile("""a b d e f""")<line_sep>output_stdout=base.TestFile("""b e""")<block_end>
<import_from_stmt>.fastpunct FastPunct<line_sep>
<import_from_future_stmt> division print_function absolute_import<import_stmt>time<import_stmt>warnings<import_stmt>numpy<as>np<import_stmt>itertools<as>itr<import_stmt>sys<import_from_stmt>contextlib contextmanager<line_sep>warnings.simplefilter("ignore" np.ComplexWarning)<line_sep>_is_verbose=<false><line_sep>_is_silent=<false><class_stmt>AbortException(Exception)<block_start>""" This exception is used for when the user wants to quit algorithms mid-way. The `AbortException` can for instance be sent by pygame input, and caught by whatever is running the algorithm. """<line_sep><pass><block_end><def_stmt>bytesize arr<block_start>""" Returns the memory byte size of a Numpy array as an integer. """<line_sep>byte_size=np.prod(arr.shape)<times>np.dtype(arr.dtype).itemsize<line_sep><return>byte_size<block_end><def_stmt>humanize_bytesize byte_size<block_start>order=np.log(byte_size)/np.log(1024)<line_sep>orders=[(5 'PB') (4 'TB') (3 'GB') (2 'MB') (1 'KB') (0 'B')]<for_stmt>ex,name orders<block_start><if_stmt>order<ge>ex<block_start><return>'{:.4g} {}'.format(byte_size/1024<power>ex name)<block_end><block_end><block_end><def_stmt>memsize arr<block_start>""" Returns the required memory of a Numpy array as a humanly readable string. """<line_sep><return>humanize_bytesize(bytesize(arr))<block_end><def_stmt>span arr<block_start>""" Calculate and return the mininum and maximum of an array. Parameters ---------- arr : ndarray Numpy array. Returns ------- min : dtype Minimum of array. max : dtype Maximum of array. """<line_sep># TODO: This could be made faster with a custom ufunc <return>(np.min(arr) np.max(arr))<block_end><def_stmt>apply_once func arr axes keepdims=<true><block_start>""" Similar to `numpy.apply_over_axes`, except this performs the operation over a flattened version of all the axes, meaning that the function will only be called once. This only makes a difference for non-linear functions. Parameters ---------- func : callback Function that operates well on Numpy arrays and returns a single value of compatible dtype. arr : ndarray Array to do operation over. axes : int or iterable Specifies the axes to perform the operation. Only one call will be made to `func`, with all values flattened. keepdims : bool By default, this is True, so the collapsed dimensions remain with length 1. This is simlar to `numpy.apply_over_axes` in that regard. If this is set to False, the dimensions are removed, just like when using for instance `numpy.sum` over a single axis. Note that this is safer than subsequently calling squeeze, since this option will preserve length-1 dimensions that were not operated on. Examples -------- >>> import deepdish as dd >>> import numpy as np >>> rs = np.random.RandomState(0) >>> x = rs.uniform(size=(10, 3, 3)) Image that you have ten 3x3 images and you want to calculate each image's intensity standard deviation: >>> np.apply_over_axes(np.std, x, [1, 2]).ravel() array([ 0.06056838, 0.08230712, 0.08135083, 0.09938963, 0.08533604, 0.07830725, 0.066148 , 0.07983019, 0.08134123, 0.01839635]) This is the same as ``x.std(1).std(1)``, which is not the standard deviation of all 9 pixels together. To fix this we can flatten the pixels and try again: >>> x.reshape(10, 9).std(axis=1) array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064, 0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717]) This is exactly what this function does for you: >>> dd.apply_once(np.std, x, [1, 2], keepdims=False) array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064, 0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717]) """<line_sep>all_axes=np.arange(arr.ndim)<if_stmt>isinstance(axes int)<block_start>axes={axes}<block_end><else_stmt><block_start>axes=set(axis%arr.ndim<for>axis axes)<block_end>principal_axis=min(axes)<for_stmt>i,axis enumerate(axes)<block_start>axis0=principal_axis+i<if_stmt>axis<ne>axis0<block_start>all_axes[axis0],all_axes[axis]=all_axes[axis] all_axes[axis0]<block_end><block_end>transposed_arr=arr.transpose(all_axes)<line_sep>new_shape=[]<line_sep>new_shape_keepdims=[]<for_stmt>axis,dim enumerate(arr.shape)<block_start><if_stmt>axis<eq>principal_axis<block_start>new_shape.append(-1)<block_end><elif_stmt>axis<not><in>axes<block_start>new_shape.append(dim)<block_end><if_stmt>axis<in>axes<block_start>new_shape_keepdims.append(1)<block_end><else_stmt><block_start>new_shape_keepdims.append(dim)<block_end><block_end>collapsed=np.apply_along_axis(func principal_axis transposed_arr.reshape(new_shape))<if_stmt>keepdims<block_start><return>collapsed.reshape(new_shape_keepdims)<block_end><else_stmt><block_start><return>collapsed<block_end><block_end><def_stmt>tupled_argmax a<block_start>""" Argmax that returns an index tuple. Note that `numpy.argmax` will return a scalar index as if you had flattened the array. Parameters ---------- a : array_like Input array. Returns ------- index : tuple Tuple of index, even if `a` is one-dimensional. Note that this can immediately be used to index `a` as in ``a[index]``. Examples -------- >>> import numpy as np >>> import deepdish as dd >>> a = np.arange(6).reshape(2,3) >>> a array([[0, 1, 2], [3, 4, 5]]) >>> dd.tupled_argmax(a) (1, 2) """<line_sep><return>np.unravel_index(np.argmax(a) np.shape(a))<block_end><def_stmt>multi_range *args<block_start><return>itr.product(*[range(a)<for>a args])<block_end>@contextmanager<def_stmt>timed name=<none> file=sys.stdout callback=<none> wall_clock=<true><block_start>""" Context manager to make it easy to time the execution of a piece of code. This timer will never run your code several times and is meant more for simple in-production timing, instead of benchmarking. Reports the wall-clock time (using `time.time`) and not the processor time. Parameters ---------- name : str Name of the timing block, to identify it. file : file handler Which file handler to print the results to. Default is standard output. If a numpy array and size 1 is given, the time in seconds will be stored inside it. Ignored if `callback` is set. callback : callable This offer even more flexibility than `file`. The callable will be called at the end of the execution with a single floating point argument with the elapsed time in seconds. Examples -------- >>> import deepdish as dd >>> import time The `timed` function is a context manager, so everything inside the ``with`` block will be timed. The results will be printed by default to standard output: >>> with dd.timed('Sleep'): # doctest: +SKIP ... time.sleep(1) [timed] Sleep: 1.001035451889038 s Using the `callback` parameter, we can accumulate multiple runs into a list: >>> times = [] >>> for i in range(3): # doctest: +SKIP ... with dd.timed(callback=times.append): ... time.sleep(1) >>> times # doctest: +SKIP [1.0035350322723389, 1.0035550594329834, 1.0039470195770264] """<line_sep>start=time.time()<line_sep><yield><line_sep>end=time.time()<line_sep>delta=end-start<if_stmt>callback<is><not><none><block_start>callback(delta)<block_end><elif_stmt>isinstance(file np.ndarray)<and>len(file)<eq>1<block_start>file[0]=delta<block_end><else_stmt><block_start>name_str=' {}'.format(name)<if>name<is><not><none><else>''<line_sep>print(("[timed]{0}: {1} s".format(name_str delta)) file=file)<block_end><block_end><class_stmt>SliceClass(object)<block_start><def_stmt>__getitem__ self index<block_start><return>index<block_end><block_end>aslice=SliceClass()<line_sep>
<import_stmt>numpy<as>np<try_stmt><block_start><try_stmt><block_start><import_from_stmt>numpy nanmean<block_end><except_stmt>ImportError<block_start><import_from_stmt>scipy.stats nanmean<block_end><block_end><except_stmt>ImportError<as>ex<block_start>print("Image-registration requires either numpy >= 1.8 or scipy.")<line_sep><raise>ex<block_end><def_stmt>downsample myarr factor estimator=nanmean<block_start>""" Downsample a 2D array by averaging over *factor* pixels in each axis. Crops upper edge if the shape is not a multiple of factor. This code is pure np and should be fast. keywords: estimator - default to mean. You can downsample by summing or something else if you want a different estimator (e.g., downsampling error: you want to sum & divide by sqrt(n)) """<line_sep>ys,xs=myarr.shape<line_sep>crarr=myarr[:ys-(ys%int(factor)) :xs-(xs%int(factor))]<line_sep>dsarr=estimator(np.concatenate([[crarr[i::factor j::factor]<for>i range(factor)]<for>j range(factor)]) axis=0)<line_sep><return>dsarr<block_end><def_stmt>downsample_cube myarr factor ignoredim=0<block_start>""" Downsample a 3D array by averaging over *factor* pixels on the last two axes. """<if_stmt>ignoredim<g>0<block_start>myarr=myarr.swapaxes(0 ignoredim)<block_end>zs,ys,xs=myarr.shape<line_sep>crarr=myarr[: :ys-(ys%int(factor)) :xs-(xs%int(factor))]<line_sep>dsarr=mean(np.concatenate([[crarr[: i::factor j::factor]<for>i range(factor)]<for>j range(factor)]) axis=0)<if_stmt>ignoredim<g>0<block_start>dsarr=dsarr.swapaxes(0 ignoredim)<block_end><return>dsarr<block_end><def_stmt>downsample_1d myarr factor estimator=nanmean<block_start>""" Downsample a 1D array by averaging over *factor* pixels. Crops right side if the shape is not a multiple of factor. This code is pure np and should be fast. keywords: estimator - default to mean. You can downsample by summing or something else if you want a different estimator (e.g., downsampling error: you want to sum & divide by sqrt(n)) """<assert_stmt>xs.ndim<eq>1<line_sep>xs=myarr.size<line_sep>crarr=myarr[:xs-(xs%int(factor))]<line_sep>dsarr=estimator(np.concatenate([[crarr[i::factor]<for>i range(factor)]]) axis=0)<line_sep><return>dsarr<block_end><def_stmt>downsample_axis myarr factor axis estimator=nanmean truncate=<false><block_start>""" Downsample an ND array by averaging over *factor* pixels along an axis. Crops right side if the shape is not a multiple of factor. This code is pure np and should be fast. keywords: estimator - default to mean. You can downsample by summing or something else if you want a different estimator (e.g., downsampling error: you want to sum & divide by sqrt(n)) """<line_sep># size of the dimension of interest xs=myarr.shape[axis]<if_stmt>xs%int(factor)<ne>0<block_start><if_stmt>truncate<block_start>view=[slice(<none>)<for>ii range(myarr.ndim)]<line_sep>view[axis]=slice(<none> xs-(xs%int(factor)))<line_sep>crarr=myarr[view]<block_end><else_stmt><block_start>newshape=list(myarr.shape)<line_sep>newshape[axis]=(factor-xs%int(factor))<line_sep>extension=np.empty(newshape)<times>np.nan<line_sep>crarr=np.concatenate((myarr extension) axis=axis)<block_end><block_end><else_stmt><block_start>crarr=myarr<block_end><def_stmt>makeslice startpoint axis=axis step=factor# make empty slices <block_start>view=[slice(<none>)<for>ii range(myarr.ndim)]<line_sep># then fill the appropriate slice view[axis]=slice(startpoint <none> step)<line_sep><return>view<block_end># The extra braces here are crucial: We're adding an extra dimension so we # can average across it! stacked_array=np.concatenate([[crarr[makeslice(ii)]]<for>ii range(factor)])<line_sep>dsarr=estimator(stacked_array axis=0)<line_sep><return>dsarr<block_end>
<import_from_stmt>honssh log<import_stmt>os<import_stmt>struct<import_stmt>hashlib<import_stmt>json<import_stmt>socket<line_sep>BUFSIZ=16384<line_sep>OP_ERROR=0<line_sep>OP_INFO=1<line_sep>OP_AUTH=2<line_sep>OP_PUBLISH=3<line_sep>OP_SUBSCRIBE=4<line_sep>MAXBUF=1024<power>2<line_sep>SIZES={OP_ERROR:5+MAXBUF OP_INFO:5+256+20 OP_AUTH:5+256+20 OP_PUBLISH:5+MAXBUF OP_SUBSCRIBE:5+256<times>2 }<line_sep>HONSSHAUTHCHAN='honssh.auth'<line_sep>HONSSHSESHCHAN='honssh.sessions'<class_stmt>BadClient(Exception)<block_start><pass><block_end># packs a string with 1 byte length field <def_stmt>strpack8 x<block_start><if_stmt>isinstance(x str)<block_start>x=x.encode('latin1')<block_end><return>struct.pack('!B' len(x))+x<block_end># unpacks a string with 1 byte length field <def_stmt>strunpack8 x<block_start>l=x[0]<line_sep><return>x[1:1+l] x[1+l:]<block_end><def_stmt>msghdr op data<block_start><return>struct.pack('!iB' 5+len(data) op)+data<block_end><def_stmt>msgpublish ident chan data<block_start><return>msghdr(OP_PUBLISH strpack8(ident)+strpack8(chan)+data)<block_end><def_stmt>msgsubscribe ident chan<block_start><if_stmt>isinstance(chan str)<block_start>chan=chan.encode('latin1')<block_end><return>msghdr(OP_SUBSCRIBE strpack8(ident)+chan)<block_end><def_stmt>msgauth rand ident secret<block_start>hash=hashlib.sha1(bytes(rand)+secret).digest()<line_sep><return>msghdr(OP_AUTH strpack8(ident)+hash)<block_end><class_stmt>FeedUnpack(object)<block_start><def_stmt>__init__ self<block_start>self.buf=bytearray()<block_end><def_stmt>__iter__ self<block_start><return>self<block_end><def_stmt>next self<block_start><return>self.unpack()<block_end><def_stmt>feed self data<block_start>self.buf.extend(data)<block_end><def_stmt>unpack self<block_start><if_stmt>len(self.buf)<l>5<block_start><raise>StopIteration('No message.')<block_end>ml,opcode=struct.unpack('!iB' buffer(self.buf 0 5))<if_stmt>ml<g>SIZES.get(opcode MAXBUF)<block_start><raise>BadClient('Not respecting MAXBUF.')<block_end><if_stmt>len(self.buf)<l>ml<block_start><raise>StopIteration('No message.')<block_end>data=bytearray(buffer(self.buf 5 ml-5))<del_stmt>self.buf[:ml]<line_sep><return>opcode data<block_end><block_end><class_stmt>hpclient(object)<block_start><def_stmt>__init__ self server port ident secret<block_start>log.msg(log.LCYAN '[PLUGIN][HPFEEDS]' 'hpfeeds client init broker {0}:{1}, identifier {2}'.format(server port ident))<line_sep>self.server,self.port=server int(port)<line_sep>self.ident,self.secret=ident.encode('latin1') secret.encode('latin1')<line_sep>self.unpacker=FeedUnpack()<line_sep>self.state='INIT'<line_sep>self.connect()<line_sep>self.sendfiles=[]<line_sep>self.filehandle=<none><line_sep>self.s=<none><block_end><def_stmt>connect self<block_start>self.s=socket.socket(socket.AF_INET socket.SOCK_STREAM)<line_sep>self.s.settimeout(3)<try_stmt><block_start>self.s.connect((self.server self.port))<block_end><except_stmt><block_start>log.msg(log.LCYAN '[PLUGIN][HPFEEDS]' 'hpfeeds client could not connect to broker.')<line_sep>self.s=<none><block_end><else_stmt><block_start>self.s.settimeout(<none>)<line_sep>self.handle_established()<block_end><block_end><def_stmt>send self data<block_start><if_stmt><not>self.s<block_start><return><block_end>self.s.send(data)<block_end><def_stmt>close self<block_start>self.s.close()<line_sep>self.s=<none><block_end><def_stmt>handle_established self<block_start>log.msg(log.LCYAN '[PLUGIN][HPFEEDS]' 'hpclient established')<while_stmt>self.state<ne>'GOTINFO'<block_start>self.read()<block_end># quickly try to see if there was an error message self.s.settimeout(0.5)<line_sep>self.read()<line_sep>self.s.settimeout(<none>)<block_end><def_stmt>read self<block_start><if_stmt><not>self.s<block_start><return><block_end><try_stmt><block_start>d=self.s.recv(BUFSIZ)<block_end><except_stmt>socket.timeout<block_start><return><block_end><if_stmt><not>d<block_start>self.close()<line_sep><return><block_end>self.unpacker.feed(d)<try_stmt><block_start><for_stmt>opcode,data self.unpacker<block_start>log.msg(log.LCYAN '[PLUGIN][HPFEEDS]' 'hpclient msg opcode {0} data {1}'.format(opcode data))<if_stmt>opcode<eq>OP_INFO<block_start>name,rand=strunpack8(data)<line_sep>log.msg(log.LCYAN '[PLUGIN][HPFEEDS]' 'hpclient server name {0} rand {1}'.format(name rand))<line_sep>self.send(msgauth(rand self.ident self.secret))<line_sep>self.state='GOTINFO'<block_end><elif_stmt>opcode<eq>OP_PUBLISH<block_start>ident,data=strunpack8(data)<line_sep>chan,data=strunpack8(data)<line_sep>log.msg(log.LCYAN '[PLUGIN][HPFEEDS]' 'publish to {0} by {1}: {2}'.format(chan ident data))<block_end><elif_stmt>opcode<eq>OP_ERROR<block_start>log.err('[PLUGIN][HPFEEDS] - errormessage from server: {0}'.format(data))<block_end><else_stmt><block_start>log.err('[PLUGIN][HPFEEDS] - unknown opcode message: {0}'.format(opcode))<block_end><block_end><block_end><except_stmt>BadClient<block_start>log.err('[PLUGIN][HPFEEDS] - unpacker error, disconnecting.')<line_sep>self.close()<block_end><block_end><def_stmt>publish self channel **kwargs<block_start><try_stmt><block_start>self.send(msgpublish(self.ident channel json.dumps(kwargs).encode('latin1')))<block_end><except_stmt>Exception e<block_start>log.err('[PLUGIN][HPFEEDS] - connection to hpfriends lost: {0}'.format(e))<line_sep>log.err('[PLUGIN][HPFEEDS] - connecting')<line_sep>self.connect()<line_sep>self.send(msgpublish(self.ident channel json.dumps(kwargs).encode('latin1')))<block_end><block_end><def_stmt>sendfile self filepath# does not read complete binary into memory, read and send chunks <block_start><if_stmt><not>self.filehandle# FIXME: Where does 'i' come from?? <block_start>self.sendfileheader(i.file)<line_sep>self.sendfiledata()<block_end><else_stmt><block_start>self.sendfiles.append(filepath)<block_end><block_end><def_stmt>sendfileheader self filepath<block_start>self.filehandle=open(filepath 'rb')<line_sep>fsize=os.stat(filepath).st_size<line_sep># FIXME: Where does 'UNIQUECHAN' come from?? headc=strpack8(self.ident)+strpack8(UNIQUECHAN)<line_sep>headh=struct.pack('!iB' 5+len(headc)+fsize OP_PUBLISH)<line_sep>self.send(headh+headc)<block_end><def_stmt>sendfiledata self<block_start>tmp=self.filehandle.read(BUFSIZ)<if_stmt><not>tmp<block_start><if_stmt>self.sendfiles<block_start>fp=self.sendfiles.pop(0)<line_sep>self.sendfileheader(fp)<block_end><else_stmt><block_start>self.filehandle=<none><line_sep>self.handle_io_in(b'')<block_end><block_end><else_stmt><block_start>self.send(tmp)<block_end><block_end><block_end>
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for gfsa.model.model_util."""<import_stmt>functools<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing parameterized<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_stmt>numpy<as>np<import_from_stmt>gfsa.model model_util<class_stmt>LossUtilTest(parameterized.TestCase)<block_start>@parameterized.named_parameters({"testcase_name":"min" "minval":1 "maxval":<none> "expected":[1. 1. 2. 3. 4.] } {"testcase_name":"max" "minval":<none> "maxval":3 "expected":[0. 1. 2. 3. 3.] } {"testcase_name":"both" "minval":1 "maxval":3 "expected":[1. 1. 2. 3. 3.] })<def_stmt>test_forward_clip self minval maxval expected<block_start>vals,tangents=jax.jvp(functools.partial(model_util.forward_clip minval=minval maxval=maxval) (jnp.arange(5).astype(jnp.float32) ) (jnp.ones((5 )) ))<line_sep>np.testing.assert_allclose(vals expected)<line_sep>np.testing.assert_allclose(tangents np.ones((5 )))<block_end><def_stmt>test_safe_logit self<block_start>probs=jnp.array([0 1e-20 1e-3 0.9 1])<line_sep>logits=model_util.safe_logit(probs)<line_sep>self.assertTrue(np.all(np.isfinite(logits)))<line_sep>np.testing.assert_allclose(logits[1:3] jax.scipy.special.logit(probs[1:3]))<block_end><def_stmt>test_binary_logit_cross_entropy self<block_start>logits=jnp.array([-10. -5. 0. 5. 10.])<line_sep>true_probs=jax.nn.sigmoid(logits)<line_sep>false_probs=jax.nn.sigmoid(-logits)<line_sep>true_nll=model_util.binary_logit_cross_entropy(logits jnp.ones([5] dtype=bool))<line_sep>false_nll=model_util.binary_logit_cross_entropy(logits jnp.zeros([5] dtype=bool))<line_sep>np.testing.assert_allclose(true_nll -jnp.log(true_probs) atol=1e-7)<line_sep>np.testing.assert_allclose(false_nll -jnp.log(false_probs) atol=1e-7)<block_end><def_stmt>test_linear_cross_entropy self<block_start>probs=jnp.array([0 1e-20 1e-3 0.9 1 1 1-1e-7 1-1e-3 0.1 0])<line_sep>targets=jnp.array([<true>]<times>5+[<false>]<times>5)<line_sep>losses=model_util.linear_cross_entropy(probs targets)<line_sep># Losses are clipped to be finite. self.assertTrue(np.all(np.isfinite(losses)))<line_sep># Loss values make sense. np.testing.assert_allclose(losses[1:5] [-np.log(1e-20) -np.log(1e-3) -np.log(0.9) 0] atol=1e-5)<line_sep>self.assertGreater(losses[0] losses[1])<line_sep># note: losses for false targets have especially low precision due to # rounding errors for small values close to 1. np.testing.assert_allclose(losses[6] -np.log(1e-7) atol=0.2)<line_sep>np.testing.assert_allclose(losses[7:10] [-np.log(1e-3) -np.log(0.9) 0] atol=1e-4)<line_sep>self.assertGreater(losses[5] losses[6])<line_sep># Gradients are finite. gradients=jax.grad(<lambda>x:jnp.sum(model_util.linear_cross_entropy(x targets)))(probs)<line_sep>self.assertTrue(np.all(np.isfinite(gradients)))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>absltest.main()<block_end>
# -*- encoding: utf-8 -*- <import_from_stmt>random uniform<import_from_stmt>time sleep<import_from_stmt>custom.xuexi_chrome XuexiChrome<def_stmt>logout browser:XuexiChrome<block_start>browser.xuexi_get('https://www.xuexi.cn/')<line_sep>sleep(round(uniform(1 2) 2))<line_sep>logoutBtn=browser.find_element_by_class_name('logged-link')<line_sep>logoutBtn.click()<block_end>
<import_from_stmt>a_module.a_file a_func<if_stmt>__name__<eq>'__main__'<block_start>a_func()<block_end>
<import_from_stmt>labml_app.db job<line_sep>SAMPLE_SPECS_DICT={'parameters':[] 'definitions':{} 'response':{}}<line_sep>sync={"parameters":[{"name":"computer_uuid" "in":"query" "type":"string" "required":"true" "description":"0c112ffda506f10f9f793c0fb6d9de4b43595d03" } {"name":"runs" "in":"body" "type":"list" "description":"Runs to be synced with the server" "example":[{'uuid':'0c112ffda506f10f9f793c0fb6d9de4b43595d03' 'size_tensorboard':10.2 'size_checkpoints':15.4}]} ] "responses":{"200":{"description":"Synced server side run_uuid list" "schema":{'type':'object' 'properties':{'runs':{'type':'object' 'example':{'active':['0c112ffda506f10f9f793c0fb6d9de4b43595d03'] 'deleted':['0c112ffda506f10f9f793c0fb6d9de4b43595d03'] 'unknown':['0c112ffda506f10f9f793c0fb6d9de4b43595d03']}} }} }}}<line_sep>polling={"parameters":[{"name":"computer_uuid" "in":"query" "type":"string" "required":"true" "description":"0c112ffda506f10f9f793c0fb6d9de4b43595d03" } {"name":"jobs" "in":"body" "type":"list" "description":"Status of the jobs initiated by UI" "example":[{'uuid':'0c112ffda506f10f9f793c0fb6d9de4b43595d03' 'status':job.JobStatuses.SUCCESS} {'uuid':'0c112ffda506f10f9f793c0fb6d9de4b43595d03' 'status':job.JobStatuses.FAIL}]}] "responses":{"200":{"description":"List of pending jobs" "schema":{'type':'object' 'properties':{'jobs':{'type':'list' 'example':[{'uuid':'0c112ffda506f10f9f793c0fb6d9de4b43595d03' 'status':job.JobStatuses.INITIATED 'created_time':'16234567' 'completed_time':<none> 'method':job.JobMethods.START_TENSORBOARD 'data':{'runs':['0c112ffda506f10f9f793c0fb6d9de4b43595d03']}}]}}} }}}<line_sep>start_tensor_board={"parameters":[{"name":"computer_uuid" "in":"path" "type":"string" "required":"true" "description":"0c112ffda506f10f9f793c0fb6d9de4b43595d03" } {"name":"runs" "in":"body" "type":"list" "description":"Set of runs to start TB. Note that all the runs should be from a same computer" "example":['0c112ffda506f10f9f793c0fb6d9de4b43595d03']} ] "responses":{"200":{"description":"job details with the response" "schema":{'type':'object' 'example':{'uuid':'0c112ffda506f10f9f793c0fb6d9de4b43595d03' 'status':job.JobStatuses.SUCCESS 'created_time':'16234567' 'completed_time':'16234567' 'method':job.JobMethods.START_TENSORBOARD}} }}}<line_sep>clear_checkpoints={"parameters":[{"name":"computer_uuid" "in":"path" "type":"string" "required":"true" "description":"0c112ffda506f10f9f793c0fb6d9de4b43595d03" } {"name":"runs" "in":"body" "type":"list" "description":"Set of runs to clear checkpoints. Note that all the runs should be from same a computer" "example":['0c112ffda506f10f9f793c0fb6d9de4b43595d03']} ] "responses":{"200":{"description":"job details with the response" "schema":{'type':'object' 'example':{'uuid':'0c112ffda506f10f9f793c0fb6d9de4b43595d03' 'status':job.JobStatuses.SUCCESS 'created_time':'16234567' 'completed_time':'16234567' 'method':job.JobMethods.START_TENSORBOARD}} }}}<line_sep>get_computer={"parameters":[{"name":"session_uuid" "in":"path" "type":"string" "required":"true" "description":"0c112ffda506f10f9f793c0fb6d9de4b43595d03" } ] "responses":{"200":{"description":"Synced server side run_uuid list" "schema":{'type':'object' 'example':{'sessions':['0c112ffda506f10f9f793c0fb6d9de4b43595d03' '0c112ffda506f10f9f793c0fb6d9de4b43595d03'] 'session_uuid':'0c112ffda506f10f9f793c0fb6d9de4b43595d03' }} }}}<line_sep>
""" Follow up for "Unique Paths": Now consider if some obstacles are added to the grids. How many unique paths would there be? An obstacle and empty space is marked as 1 and 0 respectively in the grid. For example, There is one obstacle in the middle of a 3x3 grid as illustrated below. [ [0,0,0], [0,1,0], [0,0,0] ] The total number of unique paths is 2. Note: m and n will be at most 100. """<class_stmt>Solution# @param obstacleGrid, a list of lists of integers # @return an integer <block_start><def_stmt>uniquePathsWithObstacles self obstacleGrid<block_start>n=len(obstacleGrid)<line_sep>m=len(obstacleGrid[0])<line_sep>t=[[-1<for>i range(m)]<for>j range(n)]<line_sep><return>self.unique_paths(obstacleGrid m-1 n-1 t)<block_end><def_stmt>unique_paths self grid x y t<block_start><if_stmt>x<eq>0<and>y<eq>0<block_start>t[y][x]=1<if>grid[y][x]<eq>0<else>0<line_sep><return>t[y][x]<block_end><elif_stmt>grid[y][x]<eq>1<block_start>t[y][x]=0<line_sep><return>t[y][x]<block_end><elif_stmt>t[y][x]<ne>-1<block_start><return>t[y][x]<block_end><elif_stmt>x<g>0<and>y<eq>0<block_start>t[y][x]=self.unique_paths(grid x-1 y t)<line_sep><return>t[y][x]<block_end><elif_stmt>y<g>0<and>x<eq>0<block_start>t[y][x]=self.unique_paths(grid x y-1 t)<line_sep><return>t[y][x]<block_end><else_stmt><block_start>a=self.unique_paths(grid x-1 y t)<line_sep>b=self.unique_paths(grid x y-1 t)<line_sep>t[y][x]=a+b<line_sep><return>t[y][x]<block_end><block_end><block_end>
<import_stmt>csv<import_stmt>os<import_from_stmt>datetime datetime<import_stmt>pandas<as>pd<import_from_stmt>cakechat.config PREDICTION_MODE_FOR_TESTS MAX_PREDICTIONS_LENGTH<import_from_stmt>cakechat.dialog_model.inference get_nn_responses<import_from_stmt>cakechat.dialog_model.model_utils transform_context_token_ids_to_sentences<import_from_stmt>cakechat.dialog_model.quality calculate_model_mean_perplexity calculate_response_ngram_distinctness<import_from_stmt>cakechat.utils.files_utils ensure_dir<import_from_stmt>cakechat.utils.logger get_logger<line_sep>_logger=get_logger(__name__)<def_stmt>calculate_and_log_val_metrics nn_model context_sensitive_val context_free_val prediction_mode=PREDICTION_MODE_FOR_TESTS calculate_ngram_distance=<true><block_start>metric_name_to_value={'context_free_perplexity':calculate_model_mean_perplexity(nn_model context_free_val) 'context_sensitive_perplexity':calculate_model_mean_perplexity(nn_model context_sensitive_val)}<if_stmt>calculate_ngram_distance<block_start><for_stmt>metric_name,ngram_len [('unigram_distinctness' 1) ('bigram_distinctness' 2)]<block_start>metric_name_to_value[metric_name]=calculate_response_ngram_distinctness(context_sensitive_val.x nn_model ngram_len=ngram_len mode=prediction_mode condition_ids=context_sensitive_val.condition_ids)<block_end><block_end><for_stmt>metric_name,metric_value metric_name_to_value.items()<block_start>_logger.info('Val set {}: {:.3f}'.format(metric_name metric_value))<block_end><return>metric_name_to_value<block_end><def_stmt>_init_csv_writer predictions_path output_seq_len model_name<block_start><with_stmt>open(predictions_path 'w' encoding='utf-8')<as>fh<block_start>csv_writer=csv.writer(fh delimiter='\t')<line_sep>csv_writer.writerow([model_name])<line_sep>csv_writer.writerow(['date: {}'.format(datetime.now().strftime('%Y-%m-%d %H:%M'))])<line_sep>csv_writer.writerow(['{} maximum tokens in the response'.format(output_seq_len)])<line_sep>csv_writer.writerow([''])<block_end><block_end># empty row for better readability <def_stmt>log_predictions predictions_path contexts_token_ids nn_model prediction_modes output_seq_len=MAX_PREDICTIONS_LENGTH **kwargs<block_start>""" Generate responses for provided contexts and save the results on the disk. For a given context several responses will be generated - one for each mode from the prediction_modes list. :param predictions_path: Generated responses will be saved to this file :param contexts_token_ids: contexts token ids, numpy array of shape (batch_size, context_len, INPUT_SEQUENCE_LENGTH) :param nn_model: instance of CakeChatModel class :param prediction_modes: See PREDICTION_MODES for available options :param output_seq_len: Max number of tokens in generated responses """<line_sep>_logger.info('Logging responses for test set')<line_sep># Create all the directories for the prediction path in case they don't exist ensure_dir(os.path.dirname(predictions_path))<line_sep>_init_csv_writer(predictions_path output_seq_len nn_model.model_name)<line_sep>contexts=transform_context_token_ids_to_sentences(contexts_token_ids nn_model.index_to_token)<line_sep>predictions_data=pd.DataFrame()<line_sep>predictions_data['contexts']=contexts<for_stmt>prediction_mode prediction_modes<block_start>predicted_responses=get_nn_responses(contexts_token_ids nn_model prediction_mode **kwargs)<line_sep># list of lists of strings, shape (contexts_num, 1) predicted_responses=[response[0]<for>response predicted_responses]<line_sep># list of strings, shape (contexts_num) predictions_data[prediction_mode]=predicted_responses<block_end>predictions_data.to_csv(predictions_path sep='\t' index=<false> encoding='utf-8' mode='a' float_format='%.2f')<line_sep>_logger.info('Dumped {} predicted responses to {}'.format(len(contexts) predictions_path))<block_end>
# Licensed to Modin Development Team under one or more contributor license agreements. # See the NOTICE file distributed with this work for additional information regarding # copyright ownership. The Modin Development Team licenses this file to you under the # Apache License, Version 2.0 (the "License"); you may not use this file except in # compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. # FIXME: This whole module is duplicating the logic of `default.py` and should be removed. """Module houses default functions builder class."""<import_from_stmt>.default DefaultMethod<class_stmt>ObjTypeDeterminer<block_start>""" Class that routes work to the frame. Provides an instance which forwards all of the `__getattribute__` calls to an object under which `key` function is applied. """<def_stmt>__getattr__ self key<block_start>""" Build function that executes `key` function over passed frame. Parameters ---------- key : str Returns ------- callable Function that takes DataFrame and executes `key` function on it. """<def_stmt>func df *args **kwargs<block_start>"""Access specified attribute of the passed object and call it if it's callable."""<line_sep>prop=getattr(df key)<if_stmt>callable(prop)<block_start><return>prop(*args **kwargs)<block_end><else_stmt><block_start><return>prop<block_end><block_end><return>func<block_end><block_end><class_stmt>AnyDefault(DefaultMethod)<block_start>"""Builder for default-to-pandas methods which can be executed under any type of object."""<line_sep>@classmethod<def_stmt>register cls func obj_type=<none> **kwargs<block_start>""" Build function that do fallback to default pandas implementation for passed `func`. Parameters ---------- func : callable or str, Function to apply to the casted to pandas frame. obj_type : object, optional If `func` is a string with a function name then `obj_type` provides an object to search function in. If not specified `ObjTypeDeterminer` will be used. **kwargs : kwargs Additional parameters that will be used for building. Returns ------- callable Function that takes query compiler, does fallback to pandas and applies `func` to the casted to pandas frame. """<if_stmt>obj_type<is><none><block_start>obj_type=ObjTypeDeterminer()<block_end><return>cls.call(func obj_type=obj_type **kwargs)<block_end><block_end>
<import_from_stmt>datetime datetime<import_stmt>os<import_stmt>sys<import_stmt>logging<import_stmt>cPickle<import_stmt>sqlite3<import_stmt>operator<import_stmt>copy<import_stmt>numpy<as>np<import_stmt>theano<import_from_stmt>theano.sandbox.cuda.var CudaNdarraySharedVariable<line_sep>floatX=theano.config.floatX<class_stmt>Log<block_start><def_stmt>__init__ self experiment_name="experiment" description=<none> save_outputs=<false> save_model=<false> save_epoch_error=<false> save_to_database=<none> logger=<none><block_start>self.experiment_name=experiment_name<line_sep>self.description=description<line_sep>self.save_outputs=save_outputs<line_sep>self.save_model=save_model<line_sep>self.save_epoch_error=save_epoch_error<line_sep>self.save_to_database=save_to_database<line_sep>dt=datetime.now()<line_sep>dt=dt.strftime('%Y%m%d_%H%M_%S%f')<line_sep>self.exp_id=experiment_name+'_'+dt<if_stmt>save_outputs<or>save_model<block_start>save_dir=os.environ['MOZI_SAVE_PATH']<if_stmt><not>os.path.exists(save_dir)<block_start>os.mkdir(save_dir)<block_end>self.exp_dir=save_dir+'/'+self.exp_id<if_stmt><not>os.path.exists(self.exp_dir)<block_start>os.mkdir(self.exp_dir)<block_end><block_end>self.logger=logger<if_stmt>self.logger<is><none><block_start>self.logger=logging.getLogger(__name__)<line_sep>self.logger.setLevel(logging.DEBUG)<block_end>self.logger.info('exp_id: '+experiment_name)<if_stmt>save_outputs<block_start>ch=logging.FileHandler(filename=self.exp_dir+'/outputs.log')<line_sep>ch.setLevel(logging.INFO)<line_sep>formatter=logging.Formatter('%(message)s')<line_sep>ch.setFormatter(formatter)<line_sep>self.logger.addHandler(ch)<block_end><if_stmt>save_epoch_error<block_start>self.epoch_error_path=self.exp_dir+'/epoch_error.csv'<with_stmt>open(self.epoch_error_path 'wb')<as>epoch_file<block_start>epoch_file.write('Epoch,Train_Cost,Valid_Cost,Valid_Error\n')<block_end><block_end><if_stmt>description<is><not><none><block_start>self.logger.info('Description: '+self.description)<block_end><if_stmt>save_to_database<block_start>self.first_time_record=<true><if_stmt><not>os.path.exists(os.environ['MOZI_DATABASE_PATH'])<block_start>os.mkdir(os.environ['MOZI_DATABASE_PATH'])<block_end><block_end><block_end><def_stmt>info self msg<block_start>self.logger.info(msg)<block_end><def_stmt>print_records self<block_start>sorted_ls=sorted(self.save_to_database['records'].iteritems() key=operator.itemgetter(0))<for_stmt>key,value sorted_ls<block_start>self.info(key+': '+str(value))<block_end><block_end><def_stmt>_log_outputs self outputs<block_start>dt=datetime.now()<line_sep>dt=dt.strftime('%Y-%m-%d %H:%M')<line_sep>self.logger.info('Time: '+dt)<for_stmt>(name val) outputs<block_start>self.logger.info(name+': '+str(val))<block_end><if_stmt>self.save_outputs<block_start>self.logger.info('[ outputs saved to: %s ]\n'%self.exp_id)<block_end><block_end><def_stmt>_save_model self model<block_start><with_stmt>open(self.exp_dir+'/model.pkl' 'wb')<as>pkl_file<block_start>cPickle.dump(model pkl_file)<block_end><block_end><def_stmt>_save_epoch_error self epoch train_cost valid_cost valid_error<block_start><with_stmt>open(self.epoch_error_path 'ab')<as>epoch_file<block_start>epoch_file.write('{},{},{},{}\n'.format(epoch train_cost valid_cost valid_error))<block_end><block_end><def_stmt>_save_to_database self epoch train_cost valid_error best_valid_error<block_start>conn=sqlite3.connect(os.environ['MOZI_DATABASE_PATH']+'/'+self.save_to_database['name'])<line_sep>cur=conn.cursor()<if_stmt>self.first_time_record<block_start>query='CREATE TABLE IF NOT EXISTS '+self.experiment_name+'(exp_id TEXT PRIMARY KEY NOT NULL,'<for_stmt>k,v self.save_to_database['records'].items()<block_start><if_stmt>type(v)<is>str<block_start>query<augadd>k+' TEXT,'<block_end><elif_stmt>type(v)<is>int<block_start>query<augadd>k+' INT,'<block_end><elif_stmt>type(v)<is>float<block_start>query<augadd>k+' REAL,'<block_end><else_stmt><block_start><try_stmt><block_start>self.save_to_database['records'][k]=str(v)<line_sep>query<augadd>str(k)+' TEXT,'<block_end><except_stmt><block_start><raise>Exception("Error: The input types for records '{}' of {}".format(k type(v))+" is not primitive types (str, int, float) and not castable as str.")<block_end><block_end><block_end>query<augadd>'epoch INT, train_cost REAL, valid_error REAL, best_valid_error REAL);'<line_sep>cur.execute(query)<try_stmt><block_start>query='INSERT INTO '+self.experiment_name+' VALUES('<line_sep>ls=[self.exp_id]<for_stmt>k,v self.save_to_database['records'].items()<block_start>query<augadd>'?,'<line_sep>ls.append(v)<block_end>query<augadd>'?,?,?,?,?);'<line_sep>ls.extend([epoch train_cost valid_error best_valid_error])<line_sep>cur.execute(query ls)<line_sep>self.first_time_record=<false><block_end><except_stmt>sqlite3.OperationalError<as>err<block_start>self.logger.error('sqlite3.OperationalError: '+err.message)<line_sep>self.logger.error('Solution: Change the experiment_name in Log() to a new name, '+'or drop the same table name from the database. '+'experiment_name is used as the table name.')<line_sep><raise><block_end><block_end><else_stmt><block_start>cur.execute('UPDATE '+self.experiment_name+' SET '+'epoch = ?, '+'train_cost = ?,'+'valid_error = ?,'+'best_valid_error = ?'+"WHERE exp_id='%s'"%self.exp_id [epoch train_cost valid_error best_valid_error])<block_end>conn.commit()<line_sep>conn.close()<block_end><block_end>
# pylint: disable=unused-import <import_from_stmt>dataclasses field<import_from_stmt>. converters settings<import_from_stmt>.decorators auto datafile<import_from_stmt>.manager Missing<import_from_stmt>.model Model<line_sep>
''' PROBLEM: Length of Last Word Given a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word (last word means the last appearing word if we loop from left to right) in the string. If the last word does not exist, return 0. Note: A word is defined as a maximal substring consisting of non-space characters only. Example: Input: "<NAME>" Output: 5 Problem link : https://leetcode.com/problems/length-of-last-word/ '''<line_sep>''' APPROACH - We can convert string into list of words and can calculate length using reverse indexing '''<class_stmt>Solution<block_start><def_stmt>lengthOfLastWord self s:str<arrow>int<block_start>a=s.split()<if_stmt>(len(a)<ge>1)<block_start><return>len(a[-1])<block_end><else_stmt><block_start><return>0<block_end><block_end><block_end>
# variables for database and url configuration <import_from_stmt>config Config<import_from_stmt>supabase Client create_client<class_stmt>SupabaseDB<block_start>""" class instance for database connection to supabase :str: url: configuration for database url for data inside supafast project :str: key: configuration for database secret key for authentication :object: supabase: Supabase instance for connection to database environment """<line_sep>url:str=Config.URL<line_sep>key:str=Config.KEY<line_sep>supabase:Client=create_client(url key)<block_end>
<import_from_stmt>enum Enum auto<class_stmt>HTTPMethod(str Enum)<block_start><def_stmt>_generate_next_value_ name start count last_values<block_start><return>name.upper()<block_end><def_stmt>__eq__ self value:object<arrow>bool<block_start>value=str(value).upper()<line_sep><return>super().__eq__(value)<block_end><def_stmt>__hash__ self<arrow>int<block_start><return>hash(self.value)<block_end><def_stmt>__str__ self<arrow>str<block_start><return>self.value<block_end>GET=auto()<line_sep>POST=auto()<line_sep>PUT=auto()<line_sep>HEAD=auto()<line_sep>OPTIONS=auto()<line_sep>PATCH=auto()<line_sep>DELETE=auto()<block_end>HTTP_METHODS=tuple(HTTPMethod.__members__.values())<line_sep>DEFAULT_HTTP_CONTENT_TYPE="application/octet-stream"<line_sep>
imports=""<line_sep>loader=""" //handle := C.MemoryLoadLibrary(unsafe.Pointer(&full_payload[0]),(C.size_t)(len(full_payload))) handle := C.MemoryLoadLibraryEx(unsafe.Pointer(&full_payload[0]), (C.size_t)(len(full_payload)), (*[0]byte)(C.MemoryDefaultLoadLibrary), // loadLibrary func ptr (*[0]byte)(C.MemoryDefaultGetProcAddress), // getProcAddress func ptr (*[0]byte)(C.MemoryDefaultFreeLibrary), // freeLibrary func ptr unsafe.Pointer(nil), // void *userdata (we're not passing any data to the dll or exe) ) if handle == nil { fmt.Println("MemoryLoadLibrary failed") os.Exit(1) } //output := C.MemoryCallEntryPoint(handle) _ = C.MemoryCallEntryPoint(handle) //fmt.Println(output) C.MemoryFreeLibrary(handle) """<line_sep>
<import_stmt>pytest<import_stmt>progressbar<line_sep>@pytest.mark.parametrize('value,expected' [(<none> ' 0.0 B') (1 ' 1.0 B') (2<power>10-1 '1023.0 B') (2<power>10+0 ' 1.0 KiB') (2<power>20 ' 1.0 MiB') (2<power>30 ' 1.0 GiB') (2<power>40 ' 1.0 TiB') (2<power>50 ' 1.0 PiB') (2<power>60 ' 1.0 EiB') (2<power>70 ' 1.0 ZiB') (2<power>80 ' 1.0 YiB') (2<power>90 '1024.0 YiB') ])<def_stmt>test_data_size value expected<block_start>widget=progressbar.DataSize()<assert_stmt>widget(<none> dict(value=value))<eq>expected<block_end>
"""Models for the database of ahmia."""<import_stmt>logging<import_from_stmt>datetime timedelta<import_from_stmt>django.conf settings<import_from_stmt>django.db models DatabaseError<import_from_stmt>django.utils timezone<import_from_stmt>. utils<import_from_stmt>.validators validate_onion_url validate_status validate_onion<line_sep>logger=logging.getLogger("ahmia")<class_stmt>HiddenWebsite(models.Model)<block_start>"""Hidden service website."""<line_sep># For instance: http://3g2upl4pq6kufc4m.onion/ onion=models.URLField(validators=[validate_onion_url validate_status] unique=<true>)<def_stmt>__str__ self<block_start><return>str(self.onion)<block_end><block_end><class_stmt>PagePopScoreManager(models.Manager)<block_start>""" Manager for PagePopScore Model """<def_stmt>get_or_None self **kwargs<block_start>""" :param kwargs: same that would be given to get() :return: the object found or None """<try_stmt><block_start><return>self.get(**kwargs)<block_end><except_stmt>self.model.DoesNotExist<block_start><return><none><block_end><block_end><def_stmt>get_score self **kwargs<block_start>""" Returns the score but handles the DoesNotExist case returning None instead. :param kwargs: the lookup attributes for get() :rtype: float """<try_stmt><block_start><return>self.get(**kwargs).score<block_end><except_stmt>self.model.DoesNotExist<block_start><return><none><block_end><block_end><block_end><class_stmt>PagePopScore(models.Model)<block_start>""" Note: This will be called by bulk create thus save(), pre_save(), post_save() will not be called """<line_sep>onion=models.URLField(validators=[validate_onion validate_status] unique=<true>)<line_sep>score=models.FloatField(default=0 verbose_name='PagePop score' help_text='Score as returned by PagePop algorithm')<line_sep>objects=PagePopScoreManager()<def_stmt>__str__ self<block_start><return>"{0}: {1}".format(self.onion self.score)<block_end><block_end><class_stmt>PagePopStats(models.Model)<block_start>"""One entry/row is created by rank_pages command"""<line_sep>day=models.DateField(default=utils.timezone_today unique=<true>)<line_sep>num_links=models.IntegerField(null=<true> verbose_name='Number of Links' help_text='Number of links in general')<line_sep>num_edges=models.IntegerField(null=<true> verbose_name='Number of Edges' help_text='Number of Unique inter-domain Links')<line_sep>num_nodes=models.IntegerField(null=<true> verbose_name='Number of nodes' help_text='Number of onion domains (nodes)')<def_stmt>__str__ self<block_start><return>str(self.day)<block_end><block_end># *** Statistics related models and managers following *** # <class_stmt>MetricQuerySet(models.QuerySet)<block_start>"""Custom queryset to be used to filter SearchQueries per time"""<def_stmt>today self<block_start>"""Used to count the daily number so far"""<line_sep>utc=timezone.now()<line_sep>today_start=utc.replace(hour=0 minute=0 second=0 microsecond=0)<line_sep><return>self.filter(updated__gte=today_start)<block_end><def_stmt>month self<block_start>""" Filter the queryset by looking up `settings.USAGE_STATS_DAYS` (default 30) back """<line_sep>utc=timezone.now()<line_sep>oldest_utc=utc-timedelta(days=settings.USAGE_STATS_DAYS)<line_sep><return>self.filter(updated__gte=oldest_utc)<block_end><block_end><class_stmt>MetricManager(models.Manager)<block_start><def_stmt>get_queryset self<block_start><return>MetricQuerySet(self.model using=self._db)<block_end><def_stmt>today self<block_start><return>self.get_queryset().today()<block_end><def_stmt>month self<block_start><return>self.get_queryset().month()<block_end><def_stmt>add_or_increment self **kwargs<block_start>""" Handles Metric table updates: If object does not exist create it, else update the counter (occurrences) of same instances in the table :param kwargs: A Dict containing the attributes that identify the obj :return the object that was created or updated """<try_stmt><block_start>obj,created=self.get_or_create(**kwargs)<if_stmt><not>created<block_start>obj.occurrences<augadd>1<block_end>obj.save()<block_end><except_stmt>DatabaseError<as>e<block_start>logger.exception(e)<line_sep>obj=<none><line_sep># stats shouldn't disrupt website functionality <block_end><return>obj<block_end><block_end><class_stmt>Metric(models.Model)<block_start>"""Abstract base class for all Metric models"""<line_sep>NETWORKS=(('T' 'TOR') ('I' 'I2P') )<line_sep>updated=models.DateTimeField(default=timezone.now)<line_sep>network=models.CharField(max_length=1 default='T' choices=NETWORKS)<line_sep>occurrences=models.IntegerField(default=1)<line_sep>objects=MetricManager()<class_stmt>Meta<block_start>abstract=<true><block_end><block_end><class_stmt>SearchQuery(Metric)<block_start>"""Used for search stastistics"""<line_sep>search_term=models.CharField(max_length=64)<def_stmt>__str__ self<block_start><return>self.search_term[:25]<block_end><class_stmt>Meta<block_start>unique_together=('search_term' 'network')<block_end><block_end><class_stmt>SearchResultsClick(Metric)<block_start>"""Used for clicks statistics"""<line_sep>clicked=models.URLField()<line_sep>onion_domain=models.URLField(validators=[validate_onion_url])<line_sep>search_term=models.CharField(max_length=64)<def_stmt>__str__ self<block_start><return>self.clicked[:50]<block_end><class_stmt>Meta<block_start>unique_together=("clicked" "search_term" "onion_domain")<block_end><block_end># todo Reconsider the current workflow: We recalculate Stats for # the current day when `manage.py update_stats` is ran. Thus it # ends up being redundant to keep *Stats tables in the DB? <class_stmt>StatsQuerySet(models.QuerySet)<block_start>"""Custom queryset to be used to filter Stats per time"""<def_stmt>month self<block_start>""" Actually rather than looking into the current month (e.g April) we filter back `settings.USAGE_STATS_DAYS` (default 30) days """<line_sep># todo can we merge with MetricManager.month - DRY ? utc=timezone.now().date()<line_sep>oldest_utc=utc-timedelta(days=settings.USAGE_STATS_DAYS)<line_sep><return>self.filter(day__gte=oldest_utc)<block_end><block_end><class_stmt>Stats(models.Model)<block_start>""" Abstract base class. Subclasses to be used for storing precalculated statistics, computed by update_stats management command (app: stats) """<line_sep># horizontal axis: 30 last days (common for 4 plots) day=models.DateField(unique=<true> default=utils.timezone_today)<line_sep># Vertical axis: Metrics (4 plots) num_queries=models.IntegerField(default=0)<line_sep>num_unique_queries=models.IntegerField(default=0)<line_sep>num_clicks=models.IntegerField(default=0)<line_sep>num_unique_clicks=models.IntegerField(default=0)<line_sep>objects=StatsQuerySet.as_manager()<class_stmt>Meta<block_start>abstract=<true><line_sep>ordering=['day']<block_end><block_end><class_stmt>TorStats(Stats)<block_start><def_stmt>__str__ self<block_start><return>str("Tor stats: %s"%self.day)<block_end><block_end><class_stmt>I2PStats(Stats)<block_start><def_stmt>__str__ self<block_start><return>str("I2P stats: %s"%self.day)<block_end><block_end>
"""The citybikes component."""<line_sep>
# Copyright (c) 2021 by Apex.AI Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 <import_stmt>os<import_stmt>unittest<import_stmt>launch<import_from_stmt>launch_ros.substitutions ExecutableInPackage<import_stmt>launch_testing<import_stmt>launch_testing.actions<import_from_stmt>launch_testing.asserts assertSequentialStdout<import_stmt>pytest<line_sep># @brief Test goal: "Integrationtest for the complexdata example of iceoryx" # @pre setup ROS2 launch executables for RouDi (debug mode) and the example processes # @post check if all applications return exitcode 0 (success) after test run @pytest.mark.launch_test<def_stmt>generate_test_description <block_start>proc_env=os.environ.copy()<line_sep>colcon_prefix_path=os.environ.get('COLCON_PREFIX_PATH' '')<line_sep>executable_list=['iox-cpp-publisher-vector' 'iox-cpp-subscriber-vector' 'iox-cpp-publisher-complexdata' 'iox-cpp-subscriber-complexdata']<line_sep>process_list=[]<for_stmt>exec executable_list<block_start>tmp_exec=os.path.join(colcon_prefix_path 'example_complexdata/bin/' exec)<line_sep>tmp_process=launch.actions.ExecuteProcess(cmd=[tmp_exec] env=proc_env output='screen')<line_sep>process_list.append(tmp_process)<block_end>print("Process list:" process_list)<line_sep>roudi_executable=os.path.join(colcon_prefix_path 'iceoryx_posh/bin/' 'iox-roudi')<line_sep>roudi_process=launch.actions.ExecuteProcess(cmd=[roudi_executable '-l' 'debug'] env=proc_env output='screen' sigterm_timeout='20')<line_sep><return>launch.LaunchDescription([process_list[0] process_list[1] process_list[2] process_list[3] roudi_process launch_testing.actions.ReadyToTest()]) {'iox-cpp-publisher-vector':process_list[0] 'iox-cpp-subscriber-vector':process_list[1] 'iox-cpp-publisher-complexdata':process_list[2] 'iox-cpp-subscriber-complexdata':process_list[3] 'roudi_process':roudi_process}<block_end># These tests will run concurrently with the dut process. After this test is done, # the launch system will shut down RouDi <class_stmt>TestComplexDataExample(unittest.TestCase)<block_start><def_stmt>test_roudi_ready self proc_output<block_start>proc_output.assertWaitFor('RouDi is ready for clients' timeout=45 stream='stdout')<block_end><def_stmt>test_publisher_subscriber_data_exchange self proc_output<block_start>proc_output.assertWaitFor('iox-cpp-subscriber-vector got values: 15, 16, 17, 18, 19' timeout=45 stream='stdout')<block_end><def_stmt>test_publisher_subscriber_untyped_data_exchange self proc_output<block_start>proc_output.assertWaitFor('iox-cpp-subscriber-complexdata got values:\nstringForwardList: hello, world\nintegerList: 15, 22, 11\noptionalList: optional is empty, 42\nfloatStack: 44, 33, 22, 11, 0\nsomeString: hello iceoryx\ndoubleVector: 11, 12, 13, 14, 15\nvariantVector: seven, 8, nine' timeout=45 stream='stdout')<block_end><block_end># These tests run after shutdown and examine the stdout log @launch_testing.post_shutdown_test()<class_stmt>TestComplexdataExampleExitCodes(unittest.TestCase)<block_start><def_stmt>test_exit_code self proc_info<block_start>launch_testing.asserts.assertExitCodes(proc_info)<block_end><block_end>
<import_stmt>pytest<import_from_stmt>endpoints.appr.models_cnr _strip_sha256_header<line_sep>@pytest.mark.parametrize("digest,expected" [("sha256:251b6897608fb18b8a91ac9abac686e2e95245d5a041f2d1e78fe7a815e6480a" "251b6897608fb18b8a91ac9abac686e2e95245d5a041f2d1e78fe7a815e6480a" ) ("251b6897608fb18b8a91ac9abac686e2e95245d5a041f2d1e78fe7a815e6480a" "251b6897608fb18b8a91ac9abac686e2e95245d5a041f2d1e78fe7a815e6480a" ) ] )<def_stmt>test_stip_sha256 digest expected<block_start><assert_stmt>_strip_sha256_header(digest)<eq>expected<block_end>
# Python (this imports the Python re module for RegEx) <import_stmt>re<import_from_stmt>genie.metaparser MetaParser<import_from_stmt>genie.metaparser.util.schemaengine Any Or Optional<import_from_stmt>genie.libs.parser.utils.common Common<line_sep># ============================== # Schema for 'show fabricpath isis adjacency' # ============================== <class_stmt>ShowFabricpathIsisAdjacencySchema(MetaParser)<block_start>''' Schema for "show fabricpath isis adjacency" '''<line_sep># These are the key-value pairs to add to the parsed dictionary # schema = { # Any(): # {'adj-hold-time-out': str, # 'adj-intf-name-out': str, # 'adj-sys-name-out': str, # 'adj-state-out':str} # } schema={'domain':{Any():{Optional('interfaces'):{Any():{'system_id':str 'snpa':str 'level':int 'state':str 'hold_time':str }}}}}<block_end># ============================== # Parser for 'show fabricpath isis adjacency' # ============================== # The parser class inherits from the schema class <class_stmt>ShowFabricpathIsisAdjacency(ShowFabricpathIsisAdjacencySchema)<block_start>''' Parser for "show fabricpath isis adjacency"'''<line_sep>cli_command='show fabricpath isis adjacency'<line_sep># Defines a function to run the cli_command <def_stmt>cli self output=<none><block_start><if_stmt>output<is><none><block_start>out=self.device.execute(self.cli_command)<block_end><else_stmt><block_start>out=output<block_end># Initializes the Python dictionary variable parsed_dict={}<line_sep># Defines the regex for the first line of device output, which is: # Sessions for VRF default, total: 3, established: 3 p1=re.compile(r'Fabricpath IS-IS domain: +(?P<domain>(\S+)) +Fabricpath IS-IS adjacency database:$')<line_sep># Defines the regex for the next line of device output, which is: # System ID SNPA Level State Hold Time Interface # Switch-A N/A 1 UP 00:00:28 port-channel1 p2=re.compile(r'(?P<system_id>(\S+)) + (?P<snpa>(\S+)) + (?P<level>(\d+)) +(?P<state>(UP|DOWN)) + (?P<hold_time>(\S+)) + (?P<interface>(\S+))$')<for_stmt>line out.splitlines()<block_start>line=line.strip()<line_sep># IS-IS Process: test VRF: default m=p1.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>domain=group['domain']<line_sep>intf_dict=parsed_dict.setdefault('domain' {}).setdefault(domain {})<block_end>m=p2.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>system_id=group['system_id']<line_sep>snpa=group['snpa']<line_sep>level=int(group['level'])<line_sep>state=group['state']<line_sep>hold_time=group['hold_time']<line_sep>interface=Common.convert_intf_name(group['interface'])<line_sep>level_dict=intf_dict.setdefault('interfaces' {}).setdefault(interface {})<line_sep>level_dict.update({'system_id':system_id})<line_sep>level_dict.update({'snpa':snpa})<line_sep>level_dict.update({'level':level})<line_sep>level_dict.update({'state':state})<line_sep>level_dict.update({'hold_time':hold_time})<block_end><block_end><return>parsed_dict<block_end><block_end>
# -*- coding: utf-8 -*- # Copyright (c) 2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """INC Bench database manager."""<import_stmt>logging<import_stmt>os<import_from_stmt>typing Any Optional<import_from_stmt>sqlalchemy MetaData create_engine<import_from_stmt>sqlalchemy.engine Engine<import_from_stmt>sqlalchemy.orm declarative_base<import_from_stmt>neural_compressor.ux.utils.consts WORKDIR_LOCATION<import_from_stmt>neural_compressor.ux.utils.logger log<import_from_stmt>neural_compressor.ux.utils.singleton Singleton<line_sep>naming_convention={"ix":"ix_%(column_0_label)s" "uq":"uq_%(table_name)s_%(column_0_name)s" "ck":"ck_%(table_name)s_%(constraint_name)s" "fk":"fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s" "pk":"pk_%(table_name)s" }<line_sep>meta=MetaData(naming_convention=naming_convention)<line_sep>Base:Any=declarative_base(metadata=meta)<class_stmt>DBManager(metaclass=Singleton)<block_start>"""Database manager class."""<def_stmt>__init__ self database_location:Optional[str]=<none> log_level:Optional[int]=<none><block_start>"""Initialize database manager."""<line_sep>self._engine:Optional[Engine]=<none><line_sep>self.database_location:str=os.path.join(WORKDIR_LOCATION "bench.db")<line_sep>self.debug:bool=<false><line_sep>self.dialect:str="sqlite"<if_stmt>database_location<is><not><none><block_start>self.database_location=database_location<block_end>self.database_entrypoint=f"{self.dialect}:///{self.database_location}"<if_stmt>log_level<eq>logging.DEBUG<block_start>self.debug=<true><block_end><block_end><def_stmt>initialize_database self<arrow><none><block_start>"""Initialize database by creating engine and session."""<line_sep>self.create_sqlalchemy_engine()<block_end><def_stmt>create_sqlalchemy_engine self<arrow>Engine<block_start>"""Create SQLAlchemy engine."""<line_sep>log.debug(f"Making engine with database: {self.database_entrypoint}")<line_sep><return>create_engine(self.database_entrypoint echo=self.debug)<block_end>@property<def_stmt>engine self<arrow>Engine<block_start>"""Ensure that SQLAlchemy engine is created."""<line_sep>is_engine_instance=isinstance(self._engine Engine)<if_stmt><not>is_engine_instance<block_start>self._engine=self.create_sqlalchemy_engine()<block_end><return>self._engine<block_end># type: ignore <def_stmt>create_all self<arrow><none><block_start>"""Make a call to database to create all tables."""<line_sep>log.debug("Creating connection")<line_sep>connection=self.engine.connect()<try_stmt><block_start>log.debug("Creating all")<line_sep>Base.metadata.create_all(self.engine)<block_end><finally_stmt><block_start>connection.close()<block_end><block_end><block_end>
<import_stmt>torch<import_stmt>torchelie.utils<as>tu<import_stmt>torch.nn<as>nn<import_stmt>torchelie.nn<as>tnn<import_from_stmt>torchelie.models ClassificationHead<import_from_stmt>typing Optional<import_from_stmt>collections OrderedDict<line_sep>Block=tnn.PreactResBlockBottleneck<class_stmt>UBlock(nn.Module)<block_start>inner:Optional[nn.Module]<line_sep>skip:Optional[nn.Module]<line_sep>encode:nn.Module<line_sep>@tu.experimental<def_stmt>__init__ self ch:int inner:Optional[nn.Module] with_skip:bool=<true><arrow><none><block_start>super(UBlock self).__init__()<line_sep>self.inner=inner<if_stmt>with_skip<and>inner<is><not><none><block_start>self.skip=Block(ch ch)<block_end><else_stmt><block_start>self.skip=<none><block_end>self.encode=tnn.CondSeq(nn.MaxPool2d(3 1 1) nn.UpsamplingBilinear2d(scale_factor=0.5) Block(ch ch))<line_sep>self.decode=tnn.CondSeq(Block(ch ch) nn.UpsamplingBilinear2d(scale_factor=2))<block_end><def_stmt>forward self x:torch.Tensor<arrow>torch.Tensor<block_start>e=self.encode(x)<if_stmt>self.inner<is><not><none><block_start>e2=self.inner(e)<block_end><else_stmt><block_start>e2=e<block_end><if_stmt>self.skip<is><not><none><block_start>e2<augadd>self.skip(e)<block_end><return>self.decode(e2)<block_end><block_end><class_stmt>UBlock1(nn.Module)<block_start>@tu.experimental<def_stmt>__init__ self ch<block_start>super(UBlock1 self).__init__()<line_sep>self.inner=tnn.CondSeq(nn.MaxPool2d(3 1 1) nn.UpsamplingBilinear2d(scale_factor=0.5) Block(ch ch) nn.UpsamplingBilinear2d(scale_factor=2))<block_end><def_stmt>forward self x<block_start><return>self.inner(x)<block_end><block_end><class_stmt>AttentionBlock(nn.Module)<block_start>mask:Optional[tnn.CondSeq]<def_stmt>__init__ self ch:int n_down:int n_trunk:int=2 n_post:int=1 n_pre:int=1 n_att_conv:int=2 with_skips:bool=<true><arrow><none><block_start>super(AttentionBlock self).__init__()<line_sep>self.pre=tnn.CondSeq(*[Block(ch ch)<for>_ range(n_pre)])<line_sep>self.post=tnn.CondSeq(*[Block(ch ch)<for>_ range(n_post)])<line_sep>self.trunk=tnn.CondSeq(*[Block(ch ch)<for>_ range(n_trunk)])<line_sep>soft:nn.Module=UBlock1(ch)<for_stmt>_ range(n_down-1)<block_start>soft=UBlock(ch soft with_skip=with_skips)<block_end><if_stmt>n_down<ge>0<block_start>conv1=[soft]<for_stmt>i range(n_att_conv)<block_start>conv1<augadd>[nn.BatchNorm2d(ch) nn.ReLU(<true>) tu.kaiming(tnn.Conv1x1(ch ch bias=(i<ne>n_att_conv-1)))]<block_end>conv1.append(nn.Sigmoid())<line_sep>self.mask=tnn.CondSeq(*conv1)<block_end><else_stmt><block_start>self.mask=<none><block_end><block_end><def_stmt>forward self x:torch.Tensor<arrow>torch.Tensor<block_start>x=self.pre(x)<line_sep>t=self.trunk(x)<if_stmt>self.mask<is><not><none><block_start>t=t<times>(self.mask(x)+1)<block_end><return>self.post(t)<block_end><block_end><class_stmt>Attention56Bone(tnn.CondSeq)<block_start>""" Attention56 bone Args: in_ch (int): number of channels in the images """<line_sep>@tu.experimental<def_stmt>__init__ self num_classes:int<arrow><none><block_start>super(Attention56Bone self).__init__(OrderedDict([('head' tnn.CondSeq(tu.kaiming(tnn.Conv2d(3 64 7 stride=2)) nn.ReLU(<true>) nn.MaxPool2d(3 2 1))) ('pre1' Block(64 256)) ('attn1' AttentionBlock(256 3)) ('pre2' Block(256 512 stride=2)) ('attn2' AttentionBlock(512 2)) ('pre3' Block(512 1024 stride=2)) ('attn3' AttentionBlock(1024 1)) ('pre4' tnn.CondSeq(Block(1024 2048 stride=2) Block(2048 2048) Block(2048 2048) )) ('classifier' ClassificationHead(2048 num_classes))]))<block_end><block_end>@tu.experimental<def_stmt>attention56 num_classes<block_start>""" Build a attention56 network Args: num_classes (int): number of classes in_ch (int): number of channels in the images """<line_sep><return>Attention56Bone(num_classes)<block_end>
''' Code for reading and managing USGS spectral library data. References: <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>., 2017, USGS Spectral Library Version 7: U.S. Geological Survey Data Series 1035, 61 p., https://doi.org/10.3133/ds1035. '''<import_from_future_stmt> absolute_import division print_function unicode_literals<import_from_stmt>spectral.utilities.python23 IS_PYTHON3 tobytes frombytes<import_from_stmt>.spectral_database SpectralDatabase<import_stmt>re<import_stmt>logging<import_stmt>sqlite3<import_stmt>array<if_stmt>IS_PYTHON3<block_start><def_stmt>readline fin<block_start><return>fin.readline()<block_end><def_stmt>open_file filename<block_start><return>open(filename encoding='iso-8859-1')<block_end><block_end><else_stmt><block_start><def_stmt>readline fin<block_start><return>fin.readline().decode('iso-8859-1')<block_end><def_stmt>open_file filename<block_start><return>open(filename)<block_end><block_end>table_schemas=['CREATE TABLE Samples (SampleID INTEGER PRIMARY KEY, LibName TEXT, Record INTEGER, '<concat>'Description TEXT, Spectrometer TEXT, Purity TEXT, MeasurementType TEXT, Chapter TEXT, FileName TEXT, '<concat>'AssumedWLSpmeterDataID INTEGER, '<concat>'NumValues INTEGER, MinValue FLOAT, MaxValue FLOAT, ValuesArray BLOB)' 'CREATE TABLE SpectrometerData (SpectrometerDataID INTEGER PRIMARY KEY, LibName TEXT, '<concat>'Record INTEGER, MeasurementType TEXT, Unit TEXT, Name TEXT, Description TEXT, FileName TEXT, '<concat>'NumValues INTEGER, MinValue FLOAT, MaxValue FLOAT, ValuesArray BLOB)']<line_sep>arraytypecode=chr(ord('f'))<def_stmt>array_from_blob blob<block_start>a=array.array(arraytypecode)<line_sep>frombytes(a blob)<line_sep><return>a<block_end><def_stmt>array_to_blob arr<block_start><return>sqlite3.Binary(tobytes(array.array(arraytypecode arr)))<block_end># Actually these are not all spectrometer names, but kind of it. _spectrometer_names={'ASD':['ASD'] 'ASTER':['ASTER'] 'AVIRIS':['AVIRIS' 'aviris'] 'BECK':['BECK'] 'CRISM JOINED MTR3':['CRISM Bandpass(FWHM) JOINED MTR3' 'CRISM Waves JOINED MTR3' 'CRISM Bandpass JOINED MTR3' 'CRISM JOINED MTR3'] 'CRISM GLOBAL':['CRISM Bandpass(FWHM) GLOBAL' 'CRISM Wavelengths GLOBAL' 'CRISM Waves GLOBAL' 'CRISM GLOBAL'] 'Hyperion':['Hyperion'] 'HyMap2':['HyMap2'] 'Landsat8':['Landsat8'] 'M3':['M3'] 'NIC4':['NIC4'] 'Sentinel2':['Sentinel2' 'Sentinel-2'] 'VIMS':['VIMS'] 'WorldView3':['WorldView3']}<class_stmt>SpectrometerData<block_start>''' Holds data for spectrometer, from USGS spectral library. '''<def_stmt>__init__ self libname record measurement_type unit spectrometer_name description file_name values<block_start>self.libname=libname<line_sep>self.record=record<line_sep>self.measurement_type=measurement_type<line_sep>self.unit=unit<line_sep>self.spectrometer_name=spectrometer_name<line_sep>self.description=description<line_sep>self.file_name=file_name<line_sep>self.values=values<block_end><def_stmt>header self<block_start>''' Returns: String representation of basic meta data. '''<line_sep><return>'{0} Record={1}: {2} {3} {4}'.format(self.libname self.record self.measurement self.description)<block_end>@classmethod<def_stmt>read_from_file cls filename<block_start>''' Constructs SpectrometerData from file. Arguments: `filename` (str): Path to file containing data. Returns: A `SpectrometerData` constructed from data parsed from file. '''<import_stmt>os<line_sep>logger=logging.getLogger('spectral')<with_stmt>open_file(filename)<as>f<block_start>header_line=readline(f)<if_stmt><not>header_line<block_start><raise>Exception('{0} has empty header line or no lines at all.'.format(filename))<block_end>libname,record,measurement_type,unit,spectrometer_name,description=SpectrometerData._parse_header(header_line.strip())<line_sep>values=[]<for_stmt>line f<block_start><if_stmt><not>line<block_start><break><block_end><try_stmt><block_start>values.append(float(line.strip()))<block_end><except_stmt><block_start>logger.error('In file %s found unparsable line.' filename)<block_end><block_end>file_name=os.path.basename(filename)<line_sep><return>cls(libname record measurement_type unit spectrometer_name description file_name values)<block_end><block_end>@staticmethod<def_stmt>_find_spectrometer_name header_line<block_start><for_stmt>sname,alt_names _spectrometer_names.items()<block_start><for_stmt>alt_name alt_names<block_start><if_stmt>alt_name<in>header_line<block_start><return>sname<block_end><block_end><block_end><raise>Exception('Could not find spectrometer for header {0}'.format(header_line))<block_end>@staticmethod<def_stmt>_assume_measurement_type header_line<block_start>header_line=header_line.lower()<line_sep># The order of checking these things is important. <if_stmt>'wavelength'<in>header_line<or>'waves'<in>header_line<block_start><return>'Wavelengths'<block_end><if_stmt>'bandpass'<in>header_line<or>'fwhm'<in>header_line<or>'bandwidths'<in>header_line<block_start><return>'Bandpass'<block_end><if_stmt>'resolution'<in>header_line<block_start><return>'Resolution'<block_end><if_stmt>'wavenumber'<in>header_line<block_start><return>'Wavenumber'<block_end><if_stmt>'srf'<in>header_line<block_start><return>'SRF'<block_end><raise>Exception('Could not assume measurement type for header line {0}'.format(header_line))<block_end>@staticmethod<def_stmt>_assume_unit header_line measurement_type<block_start><if_stmt>measurement_type<eq>'Wavelengths'<or>measurement_type<eq>'Bandpass'<or>measurement_type<eq>'Resolution'<block_start><if_stmt>re.search(r'\bnm\b' header_line)<is><not><none><block_start><return>'nanometer'<block_end><if_stmt>'nanometer'<in>header_line<block_start><return>'nanometer'<block_end># 'um', 'microns' are usually found in these files, but this is default # anyway. <return>'micrometer'<block_end><elif_stmt>measurement_type<eq>'Wavenumber'<block_start><return>'cm^-1'<block_end><elif_stmt>measurement_type<eq>'SRF'<block_start><return>'none'<block_end><else_stmt><block_start><return>'unknown'<block_end><block_end>@staticmethod<def_stmt>_parse_header header_line# It is difficult to parse this data, # things are separated by spaces, but inside of what should be single datum, # there are spaces, so only human can get it right. <block_start>elements=header_line.split()<line_sep>libname=elements[0]<line_sep># From 'Record=1234:' extract 1234. record=int(elements[1].split('=')[1][:-1])<line_sep># Join everything after record into description. description=' '.join(elements[2:])<line_sep>measurement_type=SpectrometerData._assume_measurement_type(header_line)<line_sep>unit=SpectrometerData._assume_unit(header_line measurement_type)<line_sep>spectrometer_name=SpectrometerData._find_spectrometer_name(header_line)<line_sep><return>libname record measurement_type unit spectrometer_name description<block_end><block_end><class_stmt>SampleData<block_start>''' Holds parsed data for single sample from USGS spectral library. '''<def_stmt>__init__ self libname=<none> record=<none> description=<none> spectrometer=<none> purity=<none> measurement_type=<none> chapter=<none> file_name=<none> values=<none><block_start>self.libname=libname<line_sep>self.record=record<line_sep>self.description=description<line_sep>self.spectrometer=spectrometer<line_sep>self.purity=purity<line_sep>self.measurement_type=measurement_type<line_sep>self.chapter=chapter<line_sep>self.file_name=file_name<line_sep>self.values=values<block_end><def_stmt>header self<block_start>''' Returns: String representation of basic meta data. '''<line_sep><return>'{0} Record={1}: {2} {3}{4} {5}'.format(self.libname self.record self.description self.spectrometer self.purity self.measurement_type)<block_end>@staticmethod<def_stmt>_parse_header header_line<block_start>elements=header_line.split()<line_sep>libname=elements[0]<line_sep># From 'Record=1234:' extract 1234. record=int(elements[1].split('=')[1][:-1])<line_sep># Join everything after record into description. description=' '.join(elements[2:])<line_sep># Split 'AVIRIS13aa' into ['', 'AVIRIS13', 'aa', '']. smpurity=re.split('([A-Z0-9]+)([a-z]+)' elements[-2])<line_sep># There is case with capital leters like 'NIC4AA' <if_stmt>len(smpurity)<eq>1<block_start>smpurity=re.split('([A-Z]+[0-9])([A-Z]+)' elements[-2])<line_sep>smpurity[2]=smpurity[2].lower()<block_end>spectrometer=smpurity[1]<line_sep>purity=smpurity[2]<line_sep>measurement_type=elements[-1]<line_sep><return>libname record description spectrometer purity measurement_type<block_end>@classmethod<def_stmt>read_from_file cls filename chapter=<none><block_start>''' Constructs SampleData from file. Arguments: `filename` (str): Path to file containing data. Returns: A `SampleData` constructed from data parsed from file. '''<import_stmt>os<line_sep>logger=logging.getLogger('spectral')<with_stmt>open(filename)<as>f<block_start>header_line=f.readline()<if_stmt><not>header_line<block_start><raise>Exception('{0} has empty header line or no lines at all.'.format(filename))<block_end>libname,record,description,spectrometer,purity,measurement_type=SampleData._parse_header(header_line.strip())<line_sep>values=[]<for_stmt>line f<block_start><if_stmt><not>line<block_start><break><block_end><try_stmt><block_start>values.append(float(line.strip()))<block_end><except_stmt><block_start>logger.error('In file %s found unparsable line.' filename)<block_end><block_end>file_name=os.path.basename(filename)<line_sep><return>cls(libname record description spectrometer purity measurement_type chapter file_name values)<block_end><block_end><block_end><class_stmt>USGSDatabase(SpectralDatabase)<block_start>'''A relational database to manage USGS spectral library data.'''<line_sep>schemas=table_schemas<def_stmt>_assume_wavelength_spectrometer_data_id self sampleData# We can't know this for sure, but these heuristics haven't failed so far. # Prepare paramters. # These parameters are mandatory to match. <block_start>libname=sampleData.libname<line_sep>num_values=len(sampleData.values)<line_sep># Spectrometer might not match in subdirectories where data is convolved # or resampled. In other directories, without spectrometer there is # few possible choices, so spectrometer isolates the one we need. spectrometer=sampleData.spectrometer<line_sep>logger=logging.getLogger('spectral')<line_sep># Start with the most specific. query='''SELECT SpectrometerDataID FROM SpectrometerData WHERE MeasurementType = 'Wavelengths' AND LibName = ? AND NumValues = ? AND Name = ?'''<line_sep>result=self.cursor.execute(query (libname num_values spectrometer))<line_sep>rows=result.fetchall()<if_stmt>len(rows)<eq>0<block_start>query='''SELECT SpectrometerDataID FROM SpectrometerData WHERE MeasurementType = 'Wavelengths' AND LibName = ? AND NumValues = ? AND Name LIKE ?'''<line_sep>result=self.cursor.execute(# ASDFR -> ASD, and '%' just to be sure. query (libname num_values spectrometer[:3]+'%'))<line_sep>rows=result.fetchall()<block_end><if_stmt>len(rows)<ge>1<block_start><if_stmt>len(rows)<g>1<block_start>logger.warning('Found multiple spectrometers with measurement_type Wavelengths, '<concat>' LibName %s, NumValues %d and Name %s' libname num_values spectrometer)<block_end><return>rows[0][0]<block_end># Try to be less specific without spectrometer name. query='''SELECT SpectrometerDataID FROM SpectrometerData WHERE MeasurementType = 'Wavelengths' AND LibName = ? AND NumValues = ?'''<line_sep>result=self.cursor.execute(query (libname num_values))<line_sep>rows=result.fetchall()<if_stmt>len(rows)<l>1<block_start><raise>Exception('Wavelengths for spectrometer not found, for LibName = {0} and NumValues = {1}, from file {2}'.format(libname num_values sampleData.file_name))<block_end><if_stmt>len(rows)<g>1<block_start>logger.warning('Found multiple spectrometers with measurement_type Wavelengths, '<concat>' LibName %s and NumValues %d, from file %s' libname num_values sampleData.file_name)<block_end><return>rows[0][0]<block_end><def_stmt>_add_sample_data self spdata<block_start>sql='''INSERT INTO Samples (LibName, Record, Description, Spectrometer, Purity, MeasurementType, Chapter, FileName, AssumedWLSpmeterDataID, NumValues, MinValue, MaxValue, ValuesArray) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'''<line_sep>values=array_to_blob(spdata.values)<line_sep>num_values=len(spdata.values)<line_sep>min_value=min(spdata.values)<line_sep>max_value=max(spdata.values)<line_sep>assumedWLSpmeterDataID=self._assume_wavelength_spectrometer_data_id(spdata)<line_sep>self.cursor.execute(sql (spdata.libname spdata.record spdata.description spdata.spectrometer spdata.purity spdata.measurement_type spdata.chapter spdata.file_name assumedWLSpmeterDataID num_values min_value max_value values))<line_sep>rowId=self.cursor.lastrowid<line_sep>self.db.commit()<line_sep><return>rowId<block_end><def_stmt>_add_spectrometer_data self spdata<block_start>sql='''INSERT INTO SpectrometerData (LibName, Record, MeasurementType, Unit, Name, Description, FileName, NumValues, MinValue, MaxValue, ValuesArray) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'''<line_sep>values=array_to_blob(spdata.values)<line_sep>num_values=len(spdata.values)<line_sep>min_value=min(spdata.values)<line_sep>max_value=max(spdata.values)<line_sep>self.cursor.execute(sql (spdata.libname spdata.record spdata.measurement_type spdata.unit spdata.spectrometer_name spdata.description spdata.file_name num_values min_value max_value values))<line_sep>rowId=self.cursor.lastrowid<line_sep>self.db.commit()<line_sep><return>rowId<block_end>@classmethod<def_stmt>create cls filename usgs_data_dir=<none><block_start>'''Creates an USGS relational database by parsing USGS data files. Arguments: `filename` (str): Name of the new sqlite database file to create. `usgs_data_dir` (str): Path to the USGS ASCII data directory. This directory should contain subdirectories, which containes chapter directories. E.g. if provided `usgs_data_dir` is '/home/user/usgs/ASCIIdata', then relative path to single sample could be 'ASCIIdata_splib07b/ChapterL_Liquids/splib07b_H2O-Ice_GDS136_77K_BECKa_AREF.txt' If this argument is not provided, no data will be imported. Returns: An :class:`~spectral.database.USGSDatabase` object. Example:: >>> USGSDatabase.create("usgs_lib.db", "/home/user/usgs/ASCIIdata") This is a class method (it does not require instantiating an USGSDatabase object) that creates a new database by parsing files in the USGS library ASCIIdata directory. Normally, this should only need to be called once. Subsequently, a corresponding database object can be created by instantiating a new USGSDatabase object with the path the database file as its argument. For example:: >>> from spectral.database.usgs import USGSDatabase >>> db = USGSDatabase("usgs_lib.db") '''<import_stmt>os<if_stmt>os.path.isfile(filename)<block_start><raise>Exception('Error: Specified file already exists.')<block_end>db=cls()<line_sep>db._connect(filename)<for_stmt>schema cls.schemas<block_start>db.cursor.execute(schema)<block_end><if_stmt>usgs_data_dir<block_start>db._import_files(usgs_data_dir)<block_end><return>db<block_end><def_stmt>__init__ self sqlite_filename=<none><block_start>'''Creates a database object to interface an existing database. Arguments: `sqlite_filename` (str): Name of the database file. If this argument is not provided, an interface to a database file will not be established. Returns: An :class:`~spectral.USGSDatabase` connected to the database. '''<import_from_stmt>spectral.io.spyfile find_file_path<if_stmt>sqlite_filename<block_start>self._connect(find_file_path(sqlite_filename))<block_end><else_stmt><block_start>self.db=<none><line_sep>self.cursor=<none><block_end><block_end><def_stmt>_import_files self data_dir<block_start><import_from_stmt>glob glob<import_stmt>numpy<import_stmt>os<line_sep>logger=logging.getLogger('spectral')<if_stmt><not>os.path.isdir(data_dir)<block_start><raise>Exception('Error: Invalid directory name specified.')<block_end>num_sample_files=0<line_sep>num_spectrometer_files=0<line_sep>num_failed_sample_files=0<line_sep>num_failed_spectromter_files=0<for_stmt>sublib os.listdir(data_dir)<block_start>sublib_dir=os.path.join(data_dir sublib)<if_stmt><not>os.path.isdir(sublib_dir)<block_start><continue><block_end># Process instrument data one by one. <for_stmt>f glob(sublib_dir+'/*.txt')<block_start>logger.info('Importing spectrometer file %s' f)<try_stmt><block_start>spdata=SpectrometerData.read_from_file(f)<line_sep>self._add_spectrometer_data(spdata)<line_sep>num_spectrometer_files<augadd>1<block_end><except_stmt>Exception<as>e<block_start>logger.error('Failed to import spectrometer file %s' f)<line_sep>logger.error(e)<line_sep>num_failed_spectromter_files<augadd>1<block_end><block_end># Go into each chapter directory and process individual samples. <for_stmt>chapter os.listdir(sublib_dir)# Skip errorbars directory. Maybe add support for parsing it later. <block_start><if_stmt>chapter<eq>'errorbars'<block_start><continue><block_end>chapter_dir=os.path.join(sublib_dir chapter)<if_stmt><not>os.path.isdir(chapter_dir)<block_start><continue><block_end><for_stmt>f glob(chapter_dir+'/*.txt')<block_start>logger.info('Importing sample file %s' f)<try_stmt><block_start>spdata=SampleData.read_from_file(f chapter)<line_sep>self._add_sample_data(spdata)<line_sep>num_sample_files<augadd>1<block_end><except_stmt>Exception<as>e<block_start>logger.error('Failed to import sample file %s' f)<line_sep>logger.error(e)<line_sep>num_failed_sample_files<augadd>1<block_end><block_end><block_end><block_end>logger.info('Imported %d sample files and %d spectrometer files. '<concat>'%d failed sample files, and %d failed spectrometer files.' num_sample_files num_spectrometer_files num_failed_sample_files num_failed_spectromter_files)<block_end><def_stmt>get_spectrum self sampleID<block_start>'''Returns a spectrum from the database. Usage: (x, y) = usgs.get_spectrum(sampleID) Arguments: `sampleID` (int): The **SampleID** value for the desired spectrum from the **Samples** table in the database. Returns: `x` (list): Band centers for the spectrum. This is extraced from assumed spectrometer for given sample. `y` (list): Spectrum data values for each band. Returns a pair of vectors containing the wavelengths and measured values values of a measurment. '''<import_stmt>array<line_sep>query='''SELECT ValuesArray, AssumedWLSpmeterDataID FROM Samples WHERE SampleID = ?'''<line_sep>result=self.cursor.execute(query (sampleID ))<line_sep>rows=result.fetchall()<if_stmt>len(rows)<l>1<block_start><raise>Exception('Measurement record not found.')<block_end>y=array_from_blob(rows[0][0])<line_sep>assumedWLSpmeterDataID=rows[0][1]<line_sep>query='''SELECT ValuesArray FROM SpectrometerData WHERE SpectrometerDataID = ?'''<line_sep>result=self.cursor.execute(query (assumedWLSpmeterDataID ))<line_sep>rows=result.fetchall()<if_stmt>len(rows)<l>1<block_start><raise>Exception('Measurement (wavelengths) record not found.')<block_end>x=array_from_blob(rows[0][0])<line_sep><return>(list(x) list(y))<block_end><def_stmt>create_envi_spectral_library self spectrumIDs bandInfo<block_start>'''Creates an ENVI-formatted spectral library for a list of spectra. Arguments: `spectrumIDs` (list of ints): List of **SampleID** values for of spectra in the "Samples" table of the USGS database. `bandInfo` (:class:`~spectral.BandInfo`): The spectral bands to which the original USGS library spectra will be resampled. Returns: A :class:`~spectral.io.envi.SpectralLibrary` object. The IDs passed to the method should correspond to the SampleID field of the USGS database "Samples" table. All specified spectra will be resampled to the same discretization specified by the bandInfo parameter. See :class:`spectral.BandResampler` for details on the resampling method used. Note that expected units for bands are micrometers. '''<import_from_stmt>spectral.algorithms.resampling BandResampler<import_from_stmt>spectral.io.envi SpectralLibrary<import_stmt>numpy<import_stmt>unicodedata<line_sep>spectra=numpy.empty((len(spectrumIDs) len(bandInfo.centers)))<line_sep>cursor=self.cursor.execute(''' SELECT a.ValuesArray, b.ValuesArray, a.Description, b.Unit FROM Samples AS a INNER JOIN SpectrometerData AS b ON a.AssumedWLSpmeterDataID = b.SpectrometerDataID WHERE a.SampleID IN ({0})'''.format(','.join(['?']<times>len(spectrumIDs))) spectrumIDs)<line_sep>names=[]<for_stmt>i,s enumerate(cursor)<block_start>y=array_from_blob(s[0])<line_sep>x=array_from_blob(s[1])<line_sep>name=s[2]<line_sep>unit=s[3]<if_stmt>unit<eq>'nanometers'<block_start>x<augdiv>1000<block_end>resample=BandResampler(x bandInfo.centers <none> bandInfo.bandwidths)<line_sep>spectra[i]=resample(y)<line_sep>names.append(unicodedata.normalize('NFKD' name).encode('ascii' 'ignore'))<block_end>header={}<line_sep>header['wavelength units']='um'<line_sep>header['spectra names']=names<line_sep>header['wavelength']=bandInfo.centers<line_sep>header['fwhm']=bandInfo.bandwidths<line_sep><return>SpectralLibrary(spectra header {})<block_end><block_end>
<import_stmt>responses<import_stmt>unittest<import_from_stmt>tests.support with_resource with_fixture characters<import_from_stmt>twitter_ads.account Account<import_from_stmt>twitter_ads.client Client<import_from_stmt>twitter_ads.targeting AudienceSummary<import_from_stmt>twitter_ads API_VERSION<line_sep>@responses.activate<def_stmt>test_audience_summary <block_start>responses.add(responses.GET with_resource('/'+API_VERSION+'/accounts/2iqph') body=with_fixture('accounts_load') content_type='application/json')<line_sep>responses.add(responses.POST with_resource('/'+API_VERSION+'/accounts/2iqph/audience_summary') body=with_fixture('audience_summary') content_type='application/json')<line_sep>client=Client(characters(40) characters(40) characters(40) characters(40))<line_sep>account=Account.load(client '2iqph')<line_sep>params={"targeting_criteria":[{"targeting_type":"LOCATION" "targeting_value":"96683cc9126741d1"} {"targeting_type":"BROAD_KEYWORD" "targeting_value":"cats"} {"targeting_type":"SIMILAR_TO_FOLLOWERS_OF_USER" "targeting_value":"14230524"} {"targeting_type":"SIMILAR_TO_FOLLOWERS_OF_USER" "targeting_value":"90420314"}]}<line_sep>audience_summary=AudienceSummary.load(account=account params=params)<line_sep>print(audience_summary)<assert_stmt>audience_summary<is><not><none><assert_stmt>audience_summary.audience_size<is><not><none><assert_stmt>audience_summary.audience_size['min']<eq>41133600<assert_stmt>audience_summary.audience_size['max']<eq>50274400<block_end>
""" Jacobian of a general hyperelliptic curve """<line_sep># **************************************************************************** # Copyright (C) 2006 <NAME> <<EMAIL>> # Distributed under the terms of the GNU General Public License (GPL) # http://www.gnu.org/licenses/ # **************************************************************************** <import_from_stmt>sage.rings.all Integer QQ<import_from_stmt>sage.misc.lazy_attribute lazy_attribute<import_from_stmt>sage.schemes.jacobians.abstract_jacobian Jacobian_generic<import_from_stmt>. jacobian_homset<import_from_stmt>. jacobian_morphism<import_from_stmt>sage.misc.lazy_import lazy_import<import_from_stmt>.jacobian_endomorphism_utils get_is_geom_field is_geom_trivial_when_field<line_sep>lazy_import('sage.interfaces.genus2reduction' ['genus2reduction' 'Genus2reduction'])<class_stmt>HyperellipticJacobian_generic(Jacobian_generic)<block_start>""" EXAMPLES:: sage: FF = FiniteField(2003) sage: R.<x> = PolynomialRing(FF) sage: f = x**5 + 1184*x**3 + 1846*x**2 + 956*x + 560 sage: C = HyperellipticCurve(f) sage: J = C.jacobian() sage: a = x**2 + 376*x + 245; b = 1015*x + 1368 sage: X = J(FF) sage: D = X([a,b]) sage: D (x^2 + 376*x + 245, y + 988*x + 635) sage: J(0) (1) sage: D == J([a,b]) True sage: D == D + J(0) True An more extended example, demonstrating arithmetic in J(QQ) and J(K) for a number field K/QQ. :: sage: P.<x> = PolynomialRing(QQ) sage: f = x^5 - x + 1; h = x sage: C = HyperellipticCurve(f,h,'u,v') sage: C Hyperelliptic Curve over Rational Field defined by v^2 + u*v = u^5 - u + 1 sage: PP = C.ambient_space() sage: PP Projective Space of dimension 2 over Rational Field sage: C.defining_polynomial() -x0^5 + x0*x1*x2^3 + x1^2*x2^3 + x0*x2^4 - x2^5 sage: C(QQ) Set of rational points of Hyperelliptic Curve over Rational Field defined by v^2 + u*v = u^5 - u + 1 sage: K.<t> = NumberField(x^2-2) sage: C(K) Set of rational points of Hyperelliptic Curve over Number Field in t with defining polynomial x^2 - 2 defined by v^2 + u*v = u^5 - u + 1 sage: P = C(QQ)(0,1,1); P (0 : 1 : 1) sage: P == C(0,1,1) True sage: C(0,1,1).parent() Set of rational points of Hyperelliptic Curve over Rational Field defined by v^2 + u*v = u^5 - u + 1 sage: P1 = C(K)(P) sage: P2 = C(K)([2,4*t-1,1]) sage: P3 = C(K)([-1/2,1/8*(7*t+2),1]) sage: P1, P2, P3 ((0 : 1 : 1), (2 : 4*t - 1 : 1), (-1/2 : 7/8*t + 1/4 : 1)) sage: J = C.jacobian() sage: J Jacobian of Hyperelliptic Curve over Rational Field defined by v^2 + u*v = u^5 - u + 1 sage: Q = J(QQ)(P); Q (u, v - 1) sage: for i in range(6): Q*i (1) (u, v - 1) (u^2, v + u - 1) (u^2, v + 1) (u, v + 1) (1) sage: Q1 = J(K)(P1); print("%s -> %s"%( P1, Q1 )) (0 : 1 : 1) -> (u, v - 1) sage: Q2 = J(K)(P2); print("%s -> %s"%( P2, Q2 )) (2 : 4*t - 1 : 1) -> (u - 2, v - 4*t + 1) sage: Q3 = J(K)(P3); print("%s -> %s"%( P3, Q3 )) (-1/2 : 7/8*t + 1/4 : 1) -> (u + 1/2, v - 7/8*t - 1/4) sage: R.<x> = PolynomialRing(K) sage: Q4 = J(K)([x^2-t,R(1)]) sage: for i in range(4): Q4*i (1) (u^2 - t, v - 1) (u^2 + (-3/4*t - 9/16)*u + 1/2*t + 1/4, v + (-1/32*t - 57/64)*u + 1/2*t + 9/16) (u^2 + (1352416/247009*t - 1636930/247009)*u - 1156544/247009*t + 1900544/247009, v + (-2326345442/122763473*t + 3233153137/122763473)*u + 2439343104/122763473*t - 3350862929/122763473) sage: R2 = Q2*5; R2 (u^2 - 3789465233/116983808*u - 267915823/58491904, v + (-233827256513849/1789384327168*t + 1/2)*u - 15782925357447/894692163584*t) sage: R3 = Q3*5; R3 (u^2 + 5663300808399913890623/14426454798950909645952*u - 26531814176395676231273/28852909597901819291904, v + (253155440321645614070860868199103/2450498420175733688903836378159104*t + 1/2)*u + 2427708505064902611513563431764311/4900996840351467377807672756318208*t) sage: R4 = Q4*5; R4 (u^2 - 3789465233/116983808*u - 267915823/58491904, v + (233827256513849/1789384327168*t + 1/2)*u + 15782925357447/894692163584*t) Thus we find the following identity:: sage: 5*Q2 + 5*Q4 (1) Moreover the following relation holds in the 5-torsion subgroup:: sage: Q2 + Q4 == 2*Q1 True TESTS:: sage: k.<a> = GF(9); R.<x> = k[] sage: J1 = HyperellipticCurve(x^3 + x - 1, x+a).jacobian() sage: FF = FiniteField(2003) sage: R.<x> = PolynomialRing(FF) sage: f = x**5 + 1184*x**3 + 1846*x**2 + 956*x + 560 sage: J2 = HyperellipticCurve(f).jacobian() sage: J1 == J1 True sage: J1 == J2 False """<def_stmt>dimension self<block_start>""" Return the dimension of this Jacobian. OUTPUT: Integer EXAMPLES:: sage: k.<a> = GF(9); R.<x> = k[] sage: HyperellipticCurve(x^3 + x - 1, x+a).jacobian().dimension() 1 sage: g = HyperellipticCurve(x^6 + x - 1, x+a).jacobian().dimension(); g 2 sage: type(g) <... 'sage.rings.integer.Integer'> """<line_sep><return>Integer(self.curve().genus())<block_end><def_stmt>point self mumford check=<true><block_start><try_stmt><block_start><return>self(self.base_ring())(mumford)<block_end><except_stmt>AttributeError<block_start><raise>ValueError("Arguments must determine a valid Mumford divisor.")<block_end><block_end><def_stmt>_point_homset self *args **kwds<block_start><return>jacobian_homset.JacobianHomset_divisor_classes(*args **kwds)<block_end><def_stmt>_point self *args **kwds<block_start><return>jacobian_morphism.JacobianMorphism_divisor_class_field(*args **kwds)<block_end>#################################################################### # Some properties of geometric Endomorphism ring and algebra #################################################################### @lazy_attribute<def_stmt>_have_established_geometrically_trivial self<block_start>r""" Initialize the flag which determines whether or not we have already established if the geometric endomorphism ring is trivial. This is related to the warning at the top of the `jacobian_endomorphism_utils.py` module. INPUT: - ``self`` -- The Jacobian. OUTPUT: The boolean ``False``; this will be updated by other methods. EXAMPLES: This is LMFDB curve 262144.d.524288.2:: sage: R.<x> = QQ[] sage: f = x^5 + x^4 + 4*x^3 + 8*x^2 + 5*x + 1 sage: C = HyperellipticCurve(f) sage: J = C.jacobian() sage: J._have_established_geometrically_trivial False """<line_sep><return><false><block_end>@lazy_attribute<def_stmt>_have_established_geometrically_field self<block_start>r""" Initialize the flag which determines whether or not we have already established if the geometric endomorphism ring is trivial. This is related to the warning at the top of the `jacobian_endomorphism_utils.py` module. INPUT: - ``self`` -- The Jacobian. OUTPUT: The boolean ``False``; this will be updated by other methods. EXAMPLES: This is LMFDB curve 262144.d.524288.2:: sage: R.<x> = QQ[] sage: f = x^5 + x^4 + 4*x^3 + 8*x^2 + 5*x + 1 sage: C = HyperellipticCurve(f) sage: J = C.jacobian() sage: J._have_established_geometrically_field False """<line_sep><return><false><block_end><def_stmt>geometric_endomorphism_algebra_is_field self B=200 proof=<false><block_start>r""" Return whether the geometric endomorphism algebra is a field. This implies that the Jacobian of the curve is geometrically simple. It is based on Algorithm 4.10 from from [Lom2019]_ INPUT: - ``B`` -- (default: 200) the bound which appears in the statement of the algorithm from [Lom2019]_ - ``proof`` -- (default: False) whether or not to insist on a provably correct answer. This is related to the warning in the docstring of this module: if this function returns ``False``, then strictly speaking this has not been proven to be ``False`` until one has exhibited a non-trivial endomorphism, which these methods are not designed to carry out. If one is convinced that this method should return ``True``, but it is returning ``False``, then this can be exhibited by increasing `B`. OUTPUT: Boolean indicating whether or not the geometric endomorphism algebra is a field. EXAMPLES: This is LMFDB curve 262144.d.524288.2 which has QM. Although its Jacobian is geometrically simple, the geometric endomorphism algebra is not a field:: sage: R.<x> = QQ[] sage: f = x^5 + x^4 + 4*x^3 + 8*x^2 + 5*x + 1 sage: C = HyperellipticCurve(f) sage: J = C.jacobian() sage: J.geometric_endomorphism_algebra_is_field() False This is LMFDB curve 50000.a.200000.1:: sage: f = 8*x^5 + 1 sage: C = HyperellipticCurve(f) sage: J = C.jacobian() sage: J.geometric_endomorphism_algebra_is_field() True """<if_stmt>self._have_established_geometrically_field<block_start><return><true><block_end>C=self.curve()<if_stmt>C.genus()<ne>2<block_start><raise>NotImplementedError("Current implementation requires the curve to be of genus 2")<block_end><if_stmt>C.base_ring()<ne>QQ<block_start><raise>NotImplementedError("Current implementation requires the curve to be defined over the rationals")<block_end>f,h=C.hyperelliptic_polynomials()<if_stmt>h<ne>0<block_start><raise>NotImplementedError("Current implementation requires the curve to be in the form y^2 = f(x)")<block_end>red_data=genus2reduction(0 f)<line_sep>cond_C=red_data.conductor# WARNING: this is only the prime_to_2 conductor. bad_primes=cond_C.prime_divisors()<line_sep>self._bad_primes=bad_primes<line_sep>is_abs_simp,is_def_geom_trivial=get_is_geom_field(f C bad_primes B)<if_stmt>is_def_geom_trivial<block_start>self._have_established_geometrically_trivial=<true><block_end><if_stmt>is_abs_simp<block_start>self._have_established_geometrically_field=<true><line_sep><return><true><block_end><if_stmt>proof<block_start><raise>NotImplementedError("Rigorous computation of lower bounds of endomorphism algebras has not yet been implemented.")<block_end><return><false><block_end><def_stmt>geometric_endomorphism_ring_is_ZZ self B=200 proof=<false><block_start>r""" Return whether the geometric endomorphism ring of ``self`` is the integer ring `\ZZ`. INPUT: - ``B`` -- (default: 200) the bound which appears in the statement of the algorithm from [Lom2019]_ - ``proof`` -- (default: False) whether or not to insist on a provably correct answer. This is related to the warning in the module docstring of `jacobian_endomorphisms.py`: if this function returns ``False``, then strictly speaking this has not been proven to be ``False`` until one has exhibited a non-trivial endomorphism, which the methods in that module are not designed to carry out. If one is convinced that this method should return ``True``, but it is returning ``False``, then this can be exhibited by increasing `B`. OUTPUT: Boolean indicating whether or not the geometric endomorphism ring is isomorphic to the integer ring. EXAMPLES: This is LMFDB curve 603.a.603.2:: sage: R.<x> = QQ[] sage: f = 4*x^5 + x^4 - 4*x^3 + 2*x^2 + 4*x + 1 sage: C = HyperellipticCurve(f) sage: J = C.jacobian() sage: J.geometric_endomorphism_ring_is_ZZ() True This is LMFDB curve 1152.a.147456.1 whose geometric endomorphism ring is isomorphic to the group of 2x2 matrices over `\QQ`:: sage: f = x^6 - 2*x^4 + 2*x^2 - 1 sage: C = HyperellipticCurve(f) sage: J = C.jacobian() sage: J.geometric_endomorphism_ring_is_ZZ() False This is LMFDB curve 20736.k.373248.1 whose geometric endomorphism ring is isomorphic to the group of 2x2 matrices over a CM field:: sage: f = x^6 + 8 sage: C = HyperellipticCurve(f) sage: J = C.jacobian() sage: J.geometric_endomorphism_ring_is_ZZ() False This is LMFDB curve 708.a.181248.1:: sage: R.<x> = QQ[] sage: f = -3*x^6 - 16*x^5 + 36*x^4 + 194*x^3 - 164*x^2 - 392*x - 143 sage: C = HyperellipticCurve(f) sage: J = C.jacobian() sage: J.geometric_endomorphism_ring_is_ZZ() True This is LMFDB curve 10609.a.10609.1 whose geometric endomorphism ring is an order in a real quadratic field:: sage: f = x^6 + 2*x^4 + 2*x^3 + 5*x^2 + 6*x + 1 sage: C = HyperellipticCurve(f) sage: J = C.jacobian() sage: J.geometric_endomorphism_ring_is_ZZ() False This is LMFDB curve 160000.c.800000.1 whose geometric endomorphism ring is an order in a CM field:: sage: f = x^5 - 1 sage: C = HyperellipticCurve(f) sage: J = C.jacobian() sage: J.geometric_endomorphism_ring_is_ZZ() False This is LMFDB curve 262144.d.524288.2 whose geometric endomorphism ring is an order in a quaternion algebra:: sage: f = x^5 + x^4 + 4*x^3 + 8*x^2 + 5*x + 1 sage: C = HyperellipticCurve(f) sage: J = C.jacobian() sage: J.geometric_endomorphism_ring_is_ZZ() False This is LMFDB curve 578.a.2312.1 whose geometric endomorphism ring is `\QQ \times \QQ`:: sage: f = 4*x^5 - 7*x^4 + 10*x^3 - 7*x^2 + 4*x sage: C = HyperellipticCurve(f) sage: J = C.jacobian() sage: J.geometric_endomorphism_ring_is_ZZ() False """<if_stmt>self._have_established_geometrically_trivial<block_start><return><true><block_end>is_abs_simple=self.geometric_endomorphism_algebra_is_field(B=B proof=proof)<if_stmt>self._have_established_geometrically_trivial<block_start><return><true><block_end><if_stmt>is_abs_simple<and>is_geom_trivial_when_field(self.curve() self._bad_primes)<block_start><return><true><block_end><if_stmt>proof<block_start><raise>NotImplementedError("Rigorous computation of lower bounds of endomorphism rings has not yet been implemented.")<block_end><return><false><block_end><block_end>
<import_stmt>pytest<line_sep>pytestmark=pytest.mark.network<line_sep>@pytest.fixture(scope='module')<def_stmt>firewall <block_start><import_from_stmt>fabtools.require.shorewall firewall<import_stmt>fabtools.shorewall<line_sep>firewall(rules=[fabtools.shorewall.Ping() fabtools.shorewall.SSH() fabtools.shorewall.HTTP() fabtools.shorewall.HTTPS() fabtools.shorewall.SMTP() fabtools.shorewall.rule(port=1234 source=fabtools.shorewall.hosts(['example.com']) ) ])<block_end><def_stmt>test_require_firewall_started firewall<block_start><import_from_stmt>fabtools.require.shorewall started<import_from_stmt>fabtools.shorewall is_started<line_sep>started()<assert_stmt>is_started()<block_end><def_stmt>test_require_firewall_stopped firewall<block_start><import_from_stmt>fabtools.require.shorewall stopped<import_from_stmt>fabtools.shorewall is_stopped<line_sep>stopped()<assert_stmt>is_stopped()<block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import unicode_literals<import_stmt>logging<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>django forms<import_from_stmt>ckeditor.widgets CKEditorWidget<import_from_stmt>system.models Configuration<import_from_stmt>.. models<line_sep>log=logging.getLogger(__name__)<class_stmt>PlanForm(forms.ModelForm)<block_start>description=forms.CharField(widget=CKEditorWidget() required=<false>)<line_sep>replication_topology=forms.ModelChoiceField(queryset=models.ReplicationTopology.objects.all())<class_stmt>Meta<block_start>model=models.Plan<block_end><def_stmt>clean_has_persistence self<block_start>engine=self.cleaned_data['engine']<if_stmt><not>engine.engine_type.is_in_memory<block_start><return><true><block_end><return>self.cleaned_data['has_persistence']<block_end><def_stmt>clean self<block_start>cleaned_data=super(PlanForm self).clean()<line_sep>engine=cleaned_data.get("engine")<if_stmt><not>engine<block_start>msg=_("Please select a Engine Type")<line_sep>log.warning(u"%s"%msg)<line_sep><raise>forms.ValidationError(msg)<block_end><return>cleaned_data<block_end><block_end><class_stmt>PlanAttrInlineFormset(forms.models.BaseInlineFormSet)<block_start><def_stmt>clean self<block_start><if_stmt>self.instance.is_pre_provisioned<block_start><return><block_end><if_stmt><not>self.instance.is_ha<block_start><return><block_end><if_stmt><not>self.is_valid()<block_start><return><block_end>bundles=self.cleaned_data[0].get('bundle_group')<if_stmt><not>bundles<block_start><raise>forms.ValidationError("Please select the bundle's")<block_end><block_end><block_end>
#! /usr/bin/env python3 # Boolector: Satisfiablity Modulo Theories (SMT) solver. # # Copyright (C) 2007-2021 by the authors listed in the AUTHORS file. # # This file is part of Boolector. # See COPYING for more information on using this software. # <import_from_stmt>argparse ArgumentParser<def_stmt>sexpr l<block_start>l=[str(i)<for>i l]<line_sep><return>"({})".format(" ".join(l))<block_end><def_stmt>cmd tag string=""<block_start><if_stmt>string<eq>""<block_start>print(sexpr([tag]))<block_end><else_stmt><block_start>print(sexpr([tag string]))<block_end><block_end><def_stmt>arsort index_bw elem_bw<block_start><return>sexpr(["Array" bvsort(index_bw) bvsort(elem_bw)])<block_end><def_stmt>bvsort bw<block_start><return>sexpr(["_" "BitVec" bw])<block_end><def_stmt>var sym sort<block_start>print("(declare-fun {} () {})".format(sym sort))<block_end><def_stmt>bvconst val bw<block_start><return>"(_ bv{} {})".format(int(val) bw)<block_end><def_stmt>fun sym params sort term<block_start>s_params=" ".join(["({} {})".format(p s)<for>[p s] params])<line_sep>print("(define-fun {} ({}) {} {})".format(sym s_params sort term))<block_end><def_stmt>funapp sym terms<block_start>l=[sym]<line_sep>l.extend(terms)<line_sep><return>sexpr(l)<block_end><if_stmt>__name__<eq>"__main__"<block_start>aparser=ArgumentParser()<line_sep>aparser.add_argument("index_bw" type=int help="index bit width")<line_sep>args=aparser.parse_args()<line_sep>args.elem_bw=args.index_bw<line_sep>max_idx=2<power>args.index_bw-1<line_sep>cmd("set-logic" "QF_AUFBV")<line_sep>var("k" bvsort(args.index_bw))<line_sep>var("a" arsort(args.index_bw args.elem_bw))<for_stmt>i range(0 max_idx+1)<block_start>var("j{}".format(i) bvsort(args.index_bw))<block_end><for_stmt>i range(0 max_idx+1)<block_start>fun("w{}".format(i) [("p{}".format(i) bvsort(args.index_bw))] bvsort(args.elem_bw) "(ite (= p{} j{}) j{} ({} p{}))".format(i i i "select a"<if><not>i<else>"w{}".format(i-1) i))<block_end><for_stmt>i range(0 max_idx+1)<block_start>fun("rw{}".format(i) [("p{}".format(i) bvsort(args.index_bw))] bvsort(args.elem_bw) "(ite (= p{} (bvsub (_ bv{} {}) j{})) p{} ({} p{}))".format(i max_idx args.index_bw i i "select a"<if><not>i<else>"rw{}".format(i-1) i))<block_end>print("(assert (distinct" end='')<for_stmt>i range(0 max_idx+1)<block_start>print(" j{}".format(i) end='')<block_end>print("))")<line_sep>cmd("assert" "(not (= (w{} k) (rw{} k)))".format(max_idx max_idx))<line_sep>cmd("check-sat")<line_sep>cmd("exit")<block_end>
# -*- coding: utf-8 -*- <import_from_stmt>verta._protos.public.registry StageService_pb2<import_from_stmt>. _stage_change<class_stmt>Production(_stage_change._StageChange)<block_start>"""The model version is in production. Parameters ---------- comment : str, optional Comment associated with this stage change. Attributes ---------- comment : str or None Comment associated with this stage change. Examples -------- .. code-block:: python from verta.registry.stage_change import Production model_ver.change_stage(Production("Rolling out to prod.")) model_ver.stage # "production" """<line_sep>_STAGE=StageService_pb2.StageEnum.PRODUCTION<block_end>
<import_stmt>json<import_stmt>pytest<import_stmt>re<import_from_stmt>model_mommy mommy<import_from_stmt>rest_framework status<import_from_stmt>usaspending_api.download.lookups JOB_STATUS<import_from_stmt>usaspending_api.etl.award_helpers update_awards<import_from_stmt>usaspending_api.references.models DisasterEmergencyFundCode<import_from_stmt>usaspending_api.search.tests.data.utilities setup_elasticsearch_test<def_stmt>_post client def_codes=<none> query=<none> award_type_codes=<none> file_format=<none><block_start>request_body={}<line_sep>filters={}<if_stmt>def_codes<block_start>filters["def_codes"]=def_codes<block_end><if_stmt>query<block_start>filters["query"]=query<block_end><if_stmt>award_type_codes<block_start>filters["award_type_codes"]=award_type_codes<block_end>request_body["filters"]=filters<if_stmt>file_format<block_start>request_body["file_format"]=file_format<block_end>resp=client.post("/api/v2/download/disaster/" content_type="application/json" data=json.dumps(request_body))<line_sep><return>resp<block_end>@pytest.fixture<def_stmt>awards_and_transactions transactional_db# Populate job status lookup table <block_start><for_stmt>js JOB_STATUS<block_start>mommy.make("download.JobStatus" job_status_id=js.id name=js.name description=js.desc)<block_end># Awards award1=mommy.make("awards.Award" type="07" total_loan_value=3 generated_unique_award_id="ASST_NEW_1")<line_sep>award2=mommy.make("awards.Award" type="07" total_loan_value=30 generated_unique_award_id="ASST_NEW_2")<line_sep>award3=mommy.make("awards.Award" type="08" total_loan_value=300 generated_unique_award_id="ASST_NEW_3")<line_sep>award4=mommy.make("awards.Award" type="B" total_loan_value=0 generated_unique_award_id="CONT_NEW_1")<line_sep>award5=mommy.make("awards.Award" type="A" total_loan_value=0 generated_unique_award_id="CONT_NEW_2")<line_sep>award6=mommy.make("awards.Award" type="C" total_loan_value=0 generated_unique_award_id="CONT_NEW_3")<line_sep>award7=mommy.make("awards.Award" type="D" total_loan_value=0 generated_unique_award_id="CONT_NEW_4")<line_sep># Disaster Emergency Fund Code defc1=mommy.make("references.DisasterEmergencyFundCode" code="L" public_law="PUBLIC LAW FOR CODE L" title="TITLE FOR CODE L" group_name="covid_19" )<line_sep>defc2=mommy.make("references.DisasterEmergencyFundCode" code="M" public_law="PUBLIC LAW FOR CODE M" title="TITLE FOR CODE M" group_name="covid_19" )<line_sep>mommy.make("references.DisasterEmergencyFundCode" code="N" public_law="PUBLIC LAW FOR CODE N" title="TITLE FOR CODE N" group_name="covid_19" )<line_sep># Submission Attributes sub1=mommy.make("submissions.SubmissionAttributes" reporting_fiscal_year=2022 reporting_fiscal_period=8 quarter_format_flag=<false> reporting_period_start="2022-05-01" )<line_sep>sub2=mommy.make("submissions.SubmissionAttributes" reporting_fiscal_year=2022 reporting_fiscal_period=8 quarter_format_flag=<false> reporting_period_start="2022-05-01" )<line_sep>sub3=mommy.make("submissions.SubmissionAttributes" reporting_fiscal_year=2022 reporting_fiscal_period=8 quarter_format_flag=<false> reporting_period_start="2022-05-01" )<line_sep># Financial Accounts by Awards mommy.make("awards.FinancialAccountsByAwards" pk=1 award=award1 submission=sub1 disaster_emergency_fund=defc1 gross_outlay_amount_by_award_cpe=1 transaction_obligated_amount=2 )<line_sep>mommy.make("awards.FinancialAccountsByAwards" pk=2 award=award2 submission=sub1 disaster_emergency_fund=defc1 gross_outlay_amount_by_award_cpe=10 transaction_obligated_amount=20 )<line_sep>mommy.make("awards.FinancialAccountsByAwards" pk=3 award=award3 submission=sub2 disaster_emergency_fund=defc1 gross_outlay_amount_by_award_cpe=100 transaction_obligated_amount=200 )<line_sep>mommy.make("awards.FinancialAccountsByAwards" pk=4 award=award4 submission=sub2 disaster_emergency_fund=defc1 gross_outlay_amount_by_award_cpe=1000 transaction_obligated_amount=2000 )<line_sep>mommy.make("awards.FinancialAccountsByAwards" pk=5 award=award5 submission=sub3 disaster_emergency_fund=defc2 gross_outlay_amount_by_award_cpe=10000 transaction_obligated_amount=20000 )<line_sep>mommy.make("awards.FinancialAccountsByAwards" pk=6 award=award6 submission=sub3 disaster_emergency_fund=defc2 gross_outlay_amount_by_award_cpe=100000 transaction_obligated_amount=200000 )<line_sep>mommy.make("awards.FinancialAccountsByAwards" pk=7 award=award7 submission=sub3 disaster_emergency_fund=defc2 gross_outlay_amount_by_award_cpe=1000000 transaction_obligated_amount=2000000 )<line_sep># DABS Submission Window Schedule mommy.make("submissions.DABSSubmissionWindowSchedule" id="2022081" is_quarter=<false> period_start_date="2022-05-01" period_end_date="2022-05-30" submission_fiscal_year=2022 submission_fiscal_quarter=3 submission_fiscal_month=8 submission_reveal_date="2020-5-15" )<line_sep>mommy.make("submissions.DABSSubmissionWindowSchedule" id="2022080" is_quarter=<true> period_start_date="2022-05-01" period_end_date="2022-05-30" submission_fiscal_year=2022 submission_fiscal_quarter=3 submission_fiscal_month=8 submission_reveal_date="2020-5-15" )<line_sep># Transaction Normalized mommy.make("awards.TransactionNormalized" id=10 award=award1 federal_action_obligation=5 action_date="2022-01-01" is_fpds=<false> unique_award_key="ASST_NEW_1" )<line_sep>mommy.make("awards.TransactionNormalized" id=20 award=award2 federal_action_obligation=50 action_date="2022-01-02" is_fpds=<false> unique_award_key="ASST_NEW_2" )<line_sep>mommy.make("awards.TransactionNormalized" id=30 award=award3 federal_action_obligation=500 action_date="2022-01-03" is_fpds=<false> unique_award_key="ASST_NEW_3" )<line_sep>mommy.make("awards.TransactionNormalized" id=40 award=award4 federal_action_obligation=5000 action_date="2022-01-04" is_fpds=<true> unique_award_key="CONT_NEW_1" )<line_sep>mommy.make("awards.TransactionNormalized" id=50 award=award5 federal_action_obligation=50000 action_date="2022-01-05" is_fpds=<true> unique_award_key="CONT_NEW_2" )<line_sep>mommy.make("awards.TransactionNormalized" id=60 award=award6 federal_action_obligation=500000 action_date="2022-01-06" is_fpds=<true> unique_award_key="CONT_NEW_3" )<line_sep>mommy.make("awards.TransactionNormalized" id=70 award=award7 federal_action_obligation=5000000 action_date="2022-01-07" is_fpds=<true> unique_award_key="CONT_NEW_4" )<line_sep># Transaction FABS mommy.make("awards.TransactionFABS" transaction_id=10 cfda_number="10.100" legal_entity_country_code="USA" legal_entity_state_code=<none> legal_entity_county_code=<none> legal_entity_county_name=<none> legal_entity_congressional=<none> awardee_or_recipient_legal="RECIPIENT 1" awardee_or_recipient_uniqu=<none> )<line_sep>mommy.make("awards.TransactionFABS" transaction_id=20 cfda_number="20.200" legal_entity_country_code="USA" legal_entity_state_code="SC" legal_entity_county_code="001" legal_entity_county_name="CHARLESTON" legal_entity_congressional="90" awardee_or_recipient_legal="RECIPIENT 2" awardee_or_recipient_uniqu="456789123" )<line_sep>mommy.make("awards.TransactionFABS" transaction_id=30 cfda_number="20.200" legal_entity_country_code="USA" legal_entity_state_code="SC" legal_entity_county_code="001" legal_entity_county_name="CHARLESTON" legal_entity_congressional="50" awardee_or_recipient_legal="RECIPIENT 3" awardee_or_recipient_uniqu="987654321" )<line_sep># Transaction FPDS mommy.make("awards.TransactionFPDS" transaction_id=40 legal_entity_country_code="USA" legal_entity_state_code="WA" legal_entity_county_code="005" legal_entity_county_name="TEST NAME" legal_entity_congressional="50" awardee_or_recipient_legal="MULTIPLE RECIPIENTS" awardee_or_recipient_uniqu="096354360" )<line_sep>mommy.make("awards.TransactionFPDS" transaction_id=50 legal_entity_country_code="USA" legal_entity_state_code="WA" legal_entity_county_code="005" legal_entity_county_name="TEST NAME" legal_entity_congressional="50" awardee_or_recipient_legal=<none> awardee_or_recipient_uniqu="987654321" )<line_sep>mommy.make("awards.TransactionFPDS" transaction_id=60 legal_entity_country_code="USA" legal_entity_state_code="SC" legal_entity_county_code="005" legal_entity_county_name="TEST NAME" legal_entity_congressional="50" awardee_or_recipient_legal=<none> awardee_or_recipient_uniqu="987654321" )<line_sep>mommy.make("awards.TransactionFPDS" transaction_id=70 legal_entity_country_code="USA" legal_entity_state_code="SC" legal_entity_county_code="01" legal_entity_county_name="CHARLESTON" legal_entity_congressional="10" awardee_or_recipient_legal="MULTIPLE RECIPIENTS" awardee_or_recipient_uniqu=<none> )<line_sep>def_codes=list(DisasterEmergencyFundCode.objects.filter(group_name="covid_19").order_by("code").values_list("code" flat=<true>))<line_sep>mommy.make("download.DownloadJob" job_status_id=1 file_name="COVID-19_Profile_2021-09-20_H20M11S49647843.zip" error_message=<none> json_request=json.dumps({"filters":{"def_codes":def_codes}}) )<line_sep># Set latest_award for each award update_awards()<block_end><def_stmt>test_csv_download_success client monkeypatch awards_and_transactions elasticsearch_award_index<block_start>setup_elasticsearch_test(monkeypatch elasticsearch_award_index)<line_sep>resp=_post(client def_codes=["L"])<line_sep>resp_json=resp.json()<assert_stmt>resp.status_code<eq>status.HTTP_200_OK<assert_stmt>re.match(r".*COVID-19_Profile_.*\.zip" resp_json["file_url"])<assert_stmt>resp_json["download_request"]["file_format"]<eq>"csv"<line_sep># "def_codes" intentionally out of order to test that the order doesn't matter resp=_post(client def_codes=["M" "N" "L"])<line_sep>resp_json=resp.json()<assert_stmt>resp.status_code<eq>status.HTTP_200_OK<assert_stmt>re.match(r".*COVID-19_Profile_2021-09-20_H20M11S49647843.zip" resp_json["file_url"])<line_sep>resp=_post(client)<line_sep>resp_json=resp.json()<assert_stmt>resp.status_code<eq>status.HTTP_200_OK<assert_stmt>re.match(r".*COVID-19_Profile_2021-09-20_H20M11S49647843.zip" resp_json["file_url"])<block_end><def_stmt>test_tsv_download_success client monkeypatch awards_and_transactions elasticsearch_award_index<block_start>setup_elasticsearch_test(monkeypatch elasticsearch_award_index)<line_sep>resp=_post(client def_codes=["L"] file_format="tsv")<line_sep>resp_json=resp.json()<assert_stmt>resp.status_code<eq>status.HTTP_200_OK<assert_stmt>re.match(r".*COVID-19_Profile_.*\.zip" resp_json["file_url"])<assert_stmt>resp_json["download_request"]["file_format"]<eq>"tsv"<block_end><def_stmt>test_pstxt_download_success client monkeypatch awards_and_transactions elasticsearch_award_index<block_start>setup_elasticsearch_test(monkeypatch elasticsearch_award_index)<line_sep>resp=_post(client def_codes=["L"] file_format="pstxt")<line_sep>resp_json=resp.json()<assert_stmt>resp.status_code<eq>status.HTTP_200_OK<assert_stmt>re.match(r".*COVID-19_Profile_.*\.zip" resp_json["file_url"])<assert_stmt>resp_json["download_request"]["file_format"]<eq>"pstxt"<block_end><def_stmt>test_download_failure_with_two_defc client monkeypatch awards_and_transactions elasticsearch_award_index<block_start>setup_elasticsearch_test(monkeypatch elasticsearch_award_index)<line_sep>resp=_post(client def_codes=["L" "M"])<assert_stmt>resp.status_code<eq>status.HTTP_400_BAD_REQUEST<assert_stmt>(resp.json()["detail"]<eq>"The Disaster Download is currently limited to either all COVID-19 DEFC or a single COVID-19 DEFC.")<block_end>
<import_from_stmt>.component DcmipInitialConditions<line_sep>__all__=(DcmipInitialConditions)<line_sep>
# # Copyright (c) 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Module for abstract base class for checkpoint object and checkpoint collection """<import_from_stmt>typing Any Dict List<class_stmt>Saver(object)<block_start>""" ABC for saver objects that implement saving/restoring to/from path, and merging two savers. """<line_sep>@property<def_stmt>path self<block_start>""" Relative path for save/load. If two saver objects return the same path, they must be merge-able. """<line_sep><raise>NotImplementedError<block_end><def_stmt>save self sess:Any save_path:str<arrow>List[str]<block_start>""" Save to save_path :param sess: active session for session-based frameworks (e.g. TF) :param save_path: full path to save checkpoint (typically directory plus self.path plus checkpoint count). :return: list of all saved paths """<line_sep><raise>NotImplementedError<block_end><def_stmt>restore self sess:Any restore_path:str<arrow><none><block_start>""" Restore from restore_path :param sess: active session for session-based frameworks (e.g. TF) :param restore_path: full path to load checkpoint from. """<line_sep><raise>NotImplementedError<block_end><def_stmt>merge self other:'Saver'<arrow><none><block_start>""" Merge other saver into this saver :param other: saver to be merged into self """<line_sep><raise>NotImplementedError<block_end><block_end><class_stmt>SaverCollection(object)<block_start>""" Object for storing a collection of saver objects. It takes care of ensuring uniqueness of saver paths and merging savers if they have the same path. For example, if a saver handles saving a generic key/value file for all networks in a single file, it can use a more generic path and all savers of all networks would be merged into a single saver that saves/restores parameters for all networks. NOTE: If two savers have the same path, the respective saver class must support merging them into a single saver that saves/restores all merged parameters. """<def_stmt>__init__ self saver:Saver=<none><block_start>""" :param saver: optional initial saver for the collection """<line_sep>self._saver_dict=dict()# type: Dict[str, Saver] <if_stmt>saver<is><not><none><block_start>self._saver_dict[saver.path]=saver<block_end><block_end><def_stmt>add self saver:Saver<block_start>""" Add a new saver to the collection. If saver.path is already in the collection, merge the new saver with the existing saver. :param saver: new saver to be added to collection """<if_stmt>saver.path<in>self._saver_dict<block_start>self._saver_dict[saver.path].merge(saver)<block_end><else_stmt><block_start>self._saver_dict[saver.path]=saver<block_end><block_end><def_stmt>update self other:'SaverCollection'<block_start>""" Merge savers from other collection into self :param other: saver collection to update self with. """<for_stmt>c other<block_start>self.add(c)<block_end><block_end><def_stmt>save self sess:Any save_path:str<arrow>List[str]<block_start>""" Call save on all savers in the collection :param sess: active session for session-based frameworks (e.g. TF) :param save_path: path for saving checkpoints using savers. All saved file paths must start with this path in their full path. For example if save_path is '/home/checkpoints/checkpoint-01', then saved file paths can be '/home/checkpoints/checkpoint-01.main-network' but not '/home/checkpoints/main-network' :return: list of all saved paths """<line_sep>paths=list()<for_stmt>saver self<block_start>paths.extend(saver.save(sess self._full_path(save_path saver)))<block_end><return>paths<block_end><def_stmt>restore self sess:Any restore_path:str<arrow><none><block_start>""" Call restore on all savers in the collection :param sess: active session for session-based frameworks (e.g. TF) :param restore_path: path for restoring checkpoint using savers. """<for_stmt>saver self<block_start>saver.restore(sess self._full_path(restore_path saver))<block_end><block_end><def_stmt>__iter__ self<block_start>""" Return an iterator for savers in the collection :return: saver iterator """<line_sep><return>(v<for>v self._saver_dict.values())<block_end>@staticmethod<def_stmt>_full_path path_prefix:str saver:Saver<arrow>str<block_start>""" Concatenates path of the saver to parent prefix to create full save path :param path_prefix: prefix of the path :param saver: saver object to get unique path extension from :return: full path """<if_stmt>saver.path<eq>""<block_start><return>path_prefix<block_end><return>"{}.{}".format(path_prefix saver.path)<block_end><block_end>
""" Define the ISONet model for the CIFAR datasets. """<import_stmt>kale.predict.isonet<as>isonet<def_stmt>get_config cfg<block_start>""" Sets the hypermeters (architecture) for ISONet using the config file Args: cfg: A YACS config object. """<line_sep>config_params={"net_params":{"use_dirac":cfg.ISON.DIRAC_INIT "use_dropout":cfg.ISON.DROPOUT "dropout_rate":cfg.ISON.DROPOUT_RATE "nc":cfg.DATASET.NUM_CLASSES "depths":cfg.ISON.DEPTH "has_bn":cfg.ISON.HAS_BN "use_srelu":cfg.ISON.SReLU "transfun":cfg.ISON.TRANS_FUN "has_st":cfg.ISON.HAS_ST }}<line_sep><return>config_params<block_end># Inherite and override <class_stmt>CifarIsoNet(isonet.ISONet)<block_start>"""Constructs the ISONet for CIFAR datasets Args: cfg: A YACS config object. """<def_stmt>__init__ self net_params<block_start>super(CifarIsoNet self).__init__(net_params)<line_sep># define network structures (override) self._construct(net_params)<line_sep># initialization self._network_init(net_params["use_dirac"])<block_end><def_stmt>_construct self net_params<block_start><assert_stmt>(net_params["depths"]-2)%6<eq>0 "Model depth should be of the format 6n + 2 for cifar"<line_sep># Seems because this is a ResNet # Each stage has the same number of blocks for cifar d=int((net_params["depths"]-2)/6)<line_sep># Stem: (N, 3, 32, 32) -> (N, 16, 32, 32) self.stem=isonet.ResStem(w_in=3 w_out=16 net_params=net_params kernelsize=3 stride=1 padding=1)<line_sep># Stage 1: (N, 16, 32, 32) -> (N, 16, 32, 32) self.s1=isonet.ResStage(w_in=16 w_out=16 stride=1 net_params=net_params d=d)<line_sep># Stage 2: (N, 16, 32, 32) -> (N, 32, 16, 16) self.s2=isonet.ResStage(w_in=16 w_out=32 stride=2 net_params=net_params d=d)<line_sep># Stage 3: (N, 32, 16, 16) -> (N, 64, 8, 8) self.s3=isonet.ResStage(w_in=32 w_out=64 stride=2 net_params=net_params d=d)<line_sep># Head: (N, 64, 8, 8) -> (N, num_classes) self.head=isonet.ResHead(w_in=64 net_params=net_params)<block_end><block_end><def_stmt>get_model cfg<block_start>""" Builds and returns an ISONet model for CIFAR datasets according to the config object passed. Args: cfg: A YACS config object. """<line_sep>config_params=get_config(cfg)<line_sep>net_params=config_params["net_params"]<line_sep>net=CifarIsoNet(net_params)<line_sep><return>net<block_end>
<import_from_future_stmt> unicode_literals<import_from_future_stmt> absolute_import<import_stmt>copy<import_stmt>datetime<import_stmt>json<import_stmt>time<import_stmt>django<import_stmt>django.utils.timezone<as>timezone<import_from_stmt>django.test TestCase TransactionTestCase<import_stmt>error.test.utils<as>error_test_utils<import_stmt>job.test.utils<as>job_test_utils<import_stmt>storage.test.utils<as>storage_test_utils<import_stmt>trigger.test.utils<as>trigger_test_utils<import_from_stmt>data.data.data Data<import_from_stmt>data.data.json.data_v6 convert_data_to_v6_json<import_from_stmt>error.models Error<import_from_stmt>job.configuration.data.exceptions InvalidConnection<import_from_stmt>job.configuration.data.job_data JobData<import_from_stmt>job.configuration.interface.job_interface JobInterface<import_from_stmt>job.configuration.results.job_results JobResults<import_from_stmt>job.seed.results.job_results JobResults<as>SeedJobResults<import_from_stmt>job.models Job JobExecution JobExecutionOutput JobInputFile JobType JobTypeRevision JobTypeTag<import_from_stmt>node.resources.json.resources Resources<class_stmt>TestJobManager(TransactionTestCase)<block_start><def_stmt>test_process_job_input self<block_start>"""Tests calling JobManager.process_job_input()"""<line_sep>date_1=timezone.now()<line_sep>min_src_started_job_1=date_1-datetime.timedelta(days=200)<line_sep>max_src_ended_job_1=date_1+datetime.timedelta(days=200)<line_sep>date_2=date_1+datetime.timedelta(minutes=30)<line_sep>date_3=date_1+datetime.timedelta(minutes=40)<line_sep>date_4=date_1+datetime.timedelta(minutes=50)<line_sep>min_src_started_job_2=date_1-datetime.timedelta(days=500)<line_sep>max_src_ended_job_2=date_1+datetime.timedelta(days=500)<line_sep>s_class='A'<line_sep>s_sensor='1'<line_sep>collection='12345'<line_sep>task='abcd'<line_sep>workspace=storage_test_utils.create_workspace()<line_sep>file_1=storage_test_utils.create_file(workspace=workspace file_size=10485760.0 source_sensor_class=s_class source_sensor=s_sensor source_collection=collection source_task=task)<line_sep>file_2=storage_test_utils.create_file(workspace=workspace file_size=104857600.0 source_started=date_2 source_ended=date_3 source_sensor_class=s_class source_sensor=s_sensor source_collection=collection source_task=task)<line_sep>file_3=storage_test_utils.create_file(workspace=workspace file_size=987654321.0 source_started=min_src_started_job_1 source_ended=date_4)<line_sep>file_4=storage_test_utils.create_file(workspace=workspace file_size=46546.0 source_ended=max_src_ended_job_1)<line_sep>file_5=storage_test_utils.create_file(workspace=workspace file_size=83457.0 source_started=date_2)<line_sep>file_6=storage_test_utils.create_file(workspace=workspace file_size=42126588636633.0 source_ended=date_4)<line_sep>file_7=storage_test_utils.create_file(workspace=workspace file_size=76645464662354.0)<line_sep>file_8=storage_test_utils.create_file(workspace=workspace file_size=4654.0 source_started=min_src_started_job_2)<line_sep>file_9=storage_test_utils.create_file(workspace=workspace file_size=545.0 source_started=date_3 source_ended=max_src_ended_job_2)<line_sep>file_10=storage_test_utils.create_file(workspace=workspace file_size=0.154 source_ended=date_4 source_sensor_class=s_class source_sensor=s_sensor source_collection=collection source_task=task)<line_sep>interface={'command':'my_command' 'inputs':{'files':[{'name':'Input 1' 'mediaTypes':['text/plain'] } {'name':'Input 2' 'mediaTypes':['text/plain'] }]} 'outputs':{'files':[{'name':'Output 1' 'mediaType':'image/png' }]}}<line_sep>job_type=job_test_utils.create_seed_job_type(interface=interface)<line_sep>data_1={'version':'1.0' 'input_data':[{'name':'Input 1' 'file_id':file_1.id} {'name':'Input 2' 'file_ids':[file_2.id file_3.id file_4.id file_5.id]}] 'output_data':[{'name':'Output 1' 'workspace_id':workspace.id}]}<line_sep>data_2={'version':'1.0' 'input_data':[{'name':'Input 1' 'file_id':file_6.id} {'name':'Input 2' 'file_ids':[file_7.id file_8.id file_9.id file_10.id]}] 'output_data':[{'name':'Output 1' 'workspace_id':workspace.id}]}<line_sep>job_1=job_test_utils.create_job(job_type=job_type num_exes=0 status='PENDING' input_file_size=<none> input=data_1)<line_sep>job_2=job_test_utils.create_job(job_type=job_type num_exes=0 status='PENDING' input_file_size=<none> input=data_2)<line_sep># Execute method Job.objects.process_job_input(job_1)<line_sep>Job.objects.process_job_input(job_2)<line_sep># Retrieve updated job models jobs=Job.objects.filter(id__in=[job_1.id job_2.id]).order_by('id')<line_sep>job_1=jobs[0]<line_sep>job_2=jobs[1]<line_sep># Check jobs for expected fields self.assertEqual(job_1.input_file_size 1053.0)<line_sep>self.assertEqual(job_1.source_started min_src_started_job_1)<line_sep>self.assertEqual(job_1.source_ended max_src_ended_job_1)<line_sep>self.assertEqual(job_1.source_sensor_class s_class)<line_sep>self.assertEqual(job_1.source_sensor s_sensor)<line_sep>self.assertEqual(job_1.source_collection collection)<line_sep>self.assertEqual(job_1.source_task task)<line_sep>self.assertEqual(job_2.input_file_size 113269857.0)<line_sep>self.assertEqual(job_2.source_started min_src_started_job_2)<line_sep>self.assertEqual(job_2.source_ended max_src_ended_job_2)<line_sep>self.assertEqual(job_2.source_sensor_class s_class)<line_sep>self.assertEqual(job_2.source_sensor s_sensor)<line_sep>self.assertEqual(job_2.source_collection collection)<line_sep>self.assertEqual(job_2.source_task task)<line_sep># Make sure job input file models are created job_input_files=JobInputFile.objects.filter(job_id=job_1.id)<line_sep>self.assertEqual(len(job_input_files) 5)<line_sep>input_files_dict={'Input 1':set() 'Input 2':set()}<for_stmt>job_input_file job_input_files<block_start>input_files_dict[job_input_file.job_input].add(job_input_file.input_file_id)<block_end>self.assertDictEqual(input_files_dict {'Input 1':{file_1.id} 'Input 2':{file_2.id file_3.id file_4.id file_5.id}})<line_sep>job_input_files=JobInputFile.objects.filter(job_id=job_2.id)<line_sep>self.assertEqual(len(job_input_files) 5)<line_sep>input_files_dict={'Input 1':set() 'Input 2':set()}<for_stmt>job_input_file job_input_files<block_start>input_files_dict[job_input_file.job_input].add(job_input_file.input_file_id)<block_end>self.assertDictEqual(input_files_dict {'Input 1':{file_6.id} 'Input 2':{file_7.id file_8.id file_9.id file_10.id}})<block_end><def_stmt>test_process_job_output self<block_start>"""Tests calling JobManager.process_job_output()"""<line_sep>output_1=JobResults()<line_sep>output_1.add_file_parameter('foo' 1)<line_sep>output_2=JobResults()<line_sep>output_2.add_file_parameter('foo' 2)<line_sep># These jobs have completed and have their execution results job_exe_1=job_test_utils.create_job_exe(status='COMPLETED' output=output_1)<line_sep>job_exe_2=job_test_utils.create_job_exe(status='COMPLETED' output=output_2)<line_sep># These jobs have their execution results, but have not completed job_exe_3=job_test_utils.create_job_exe(status='RUNNING')<line_sep>job_exe_4=job_test_utils.create_job_exe(status='RUNNING')<for_stmt>job_exe [job_exe_3 job_exe_4]<block_start>job_exe_output=JobExecutionOutput()<line_sep>job_exe_output.job_exe_id=job_exe.id<line_sep>job_exe_output.job_id=job_exe.job_id<line_sep>job_exe_output.job_type_id=job_exe.job.job_type_id<line_sep>job_exe_output.exe_num=job_exe.exe_num<line_sep>job_exe_output.output=JobResults().get_dict()<line_sep>job_exe_output.save()<block_end># These jobs have completed, but do not have their execution results job_exe_5=job_test_utils.create_job_exe(status='RUNNING')<line_sep>job_exe_6=job_test_utils.create_job_exe(status='RUNNING')<for_stmt>job [job_exe_5.job job_exe_6.job]<block_start>job.status='COMPLETED'<line_sep>job.save()<block_end># Test method job_ids=[job_exe.job_id<for>job_exe [job_exe_1 job_exe_2 job_exe_3 job_exe_4 job_exe_5 job_exe_6]]<line_sep>result_ids=Job.objects.process_job_output(job_ids timezone.now())<line_sep>self.assertEqual(set(result_ids) {job_exe_1.job_id job_exe_2.job_id})<line_sep># Jobs 1 and 2 should have output populated, jobs 3 through 6 should not jobs=list(Job.objects.filter(id__in=job_ids).order_by('id'))<line_sep>self.assertEqual(len(jobs) 6)<line_sep>self.assertTrue(jobs[0].has_output())<line_sep>self.assertDictEqual(jobs[0].output output_1.get_dict())<line_sep>self.assertTrue(jobs[1].has_output())<line_sep>self.assertDictEqual(jobs[1].output output_2.get_dict())<line_sep>self.assertFalse(jobs[2].has_output())<line_sep>self.assertFalse(jobs[3].has_output())<line_sep>self.assertFalse(jobs[4].has_output())<line_sep>self.assertFalse(jobs[5].has_output())<block_end><def_stmt>test_queue_job_timestamps self<block_start>"""Tests that job attributes are updated when a job is queued."""<line_sep>data_dict=convert_data_to_v6_json(Data()).get_dict()<line_sep>job=job_test_utils.create_job(num_exes=1 status='CANCELED' input=data_dict started=timezone.now() ended=timezone.now())<line_sep>Job.objects.update_jobs_to_queued([job] timezone.now() requeue=<true>)<line_sep>job=Job.objects.get(pk=job.id)<line_sep>self.assertEqual(job.status 'QUEUED')<line_sep>self.assertIsNotNone(job.queued)<line_sep>self.assertIsNone(job.started)<line_sep>self.assertIsNone(job.ended)<block_end><def_stmt>test_queue_superseded_jobs self<block_start>"""Tests that JobManager.update_jobs_to_queued() does not queue superseded jobs"""<line_sep>job=job_test_utils.create_job(status='FAILED')<line_sep>Job.objects.supersede_jobs([job.id] timezone.now())<line_sep>job_ids=Job.objects.update_jobs_to_queued([job] timezone.now())<line_sep>job=Job.objects.get(pk=job.id)<line_sep>self.assertListEqual(job_ids [])<line_sep>self.assertEqual(job.status 'FAILED')<line_sep>self.assertTrue(job.is_superseded)<block_end><def_stmt>test_superseded_job self<block_start>"""Tests creating a job that supersedes another job"""<line_sep>old_job=job_test_utils.create_job()<line_sep>event=trigger_test_utils.create_trigger_event()<line_sep>new_job=Job.objects.create_job_v6(old_job.job_type_rev event.id superseded_job=old_job)<line_sep>new_job.save()<line_sep>when=timezone.now()<line_sep>Job.objects.supersede_jobs([old_job.id] when)<line_sep>new_job=Job.objects.get(pk=new_job.id)<line_sep>self.assertEqual(new_job.status 'PENDING')<line_sep>self.assertFalse(new_job.is_superseded)<line_sep>self.assertEqual(new_job.root_superseded_job_id old_job.id)<line_sep>self.assertEqual(new_job.superseded_job_id old_job.id)<line_sep>self.assertIsNone(new_job.superseded)<line_sep>old_job=Job.objects.get(pk=old_job.id)<line_sep>self.assertTrue(old_job.is_superseded)<line_sep>self.assertEqual(old_job.superseded when)<block_end><block_end><class_stmt>TestJob(TestCase)<block_start><def_stmt>setUp self<block_start>django.setup()<block_end><def_stmt>test_get_seed_job_results self<block_start>"""Test retrieving job results from a Seed job type"""<line_sep>job_type=job_test_utils.create_seed_job_type()<line_sep>input={"version":"1.0" "input_data":{} "output_data":{}}<line_sep>job=job_test_utils.create_job(job_type input=input)<line_sep>self.assertIsInstance(job.get_job_results() SeedJobResults)<block_end><block_end><class_stmt>TestJobType(TransactionTestCase)<block_start><def_stmt>setUp self<block_start>django.setup()<line_sep>seed_interface_str=""" { "seedVersion": "1.0.0", "job": { "name": "test", "jobVersion": "1.0.0", "packageVersion": "1.0.0", "title": "Test job to exercise Seed functionality", "description": "Reads input file and ", "tags": [ "testing", "seed" ], "maintainer": { "name": "<NAME>", "organization": "E-corp", "email": "<EMAIL>", "url": "http://www.example.com", "phone": "666-555-4321" }, "timeout": 3600, "interface": { "command": "${INPUT_TEXT} ${INPUT_FILES} ${READ_LENGTH}", "inputs": { "files": [ { "name": "INPUT_TEXT", "mediaTypes": [ "text/plain" ], "partial": true }, { "name": "INPUT_FILES", "multiple": true } ], "json": [ { "name": "READ_LENGTH", "type": "integer" }, { "name": "OUTPUT_COUNT", "type": "integer" } ] }, "outputs": { "files": [ { "name": "OUTPUT_FILES", "mediaType": "text/plain", "multiple": true, "pattern": "output_files*.txt" }, { "name": "OUTPUT_TEXT", "mediaType": "text/plain", "pattern": "output_text.txt" } ], "json": [ { "name": "cell_count", "key": "cellCount", "type": "integer" } ] }, "mounts": [ { "name": "MOUNT_PATH", "path": "/the/container/path", "mode": "ro" } ], "settings": [ { "name": "DB_HOST", "secret": false }, { "name": "DB_PASS", "secret": true } ] }, "resources": { "scalar": [ { "name": "cpus", "value": 1.5 }, { "name": "mem", "value": 244.0 }, { "name": "sharedMem", "value": 1.0 }, { "name": "disk", "value": 11.0, "inputMultiplier": 4.0 } ] }, "errors": [ { "code": 1, "name": "data-issue", "title": "Data Issue discovered", "description": "There was a problem with input data", "category": "data" }, { "code": 2, "name": "missing-mount", "title": "Missing mount", "description": "Expected mount point not available at run time", "category": "job" }, { "code": 3, "name": "missing-setting", "title": "Missing setting", "description": "Expected setting not defined in environment variable", "category": "job" }, { "code": 4, "name": "missing-env", "title": "Missing environment", "description": "Expected environment not provided", "category": "job" } ] } } """<line_sep>self.seed_job_type=job_test_utils.create_seed_job_type(manifest=json.loads(seed_interface_str))<block_end><def_stmt>test_get_seed_cpu_resource_from_seed_interface self<block_start>job_type=self.seed_job_type<line_sep>value=job_type.get_resources().get_json().get_dict()<line_sep>self.assertEqual(1.5 value['resources']['cpus'])<block_end><def_stmt>test_get_seed_mem_resource_from_seed_interface self<block_start>job_type=self.seed_job_type<line_sep>value=job_type.get_resources().get_json().get_dict()<line_sep>self.assertEqual(244.0 value['resources']['mem'])<block_end><def_stmt>test_get_seed_sharedmem_resource_from_seed_interface self<block_start>job_type=self.seed_job_type<line_sep>value=job_type.get_resources().get_json().get_dict()<line_sep>self.assertEqual(1.0 value['resources']['sharedmem'])<block_end><def_stmt>test_get_seed_disk_resource_from_seed_interface self<block_start>job_type=self.seed_job_type<line_sep>value=job_type.get_resources().get_json().get_dict()<line_sep>self.assertEqual(11.0 value['resources']['disk'])<block_end><def_stmt>test_get_job_version_array self<block_start>job_type=self.seed_job_type<line_sep>version='1.0.0'<line_sep>value=job_type.get_job_version_array(version)<line_sep>self.assertEqual([1 0 0 <none>] value)<line_sep>version='1.0.0-0'<line_sep>value=job_type.get_job_version_array(version)<line_sep>self.assertEqual([1 0 0 0] value)<line_sep>version='1.0.0-alpha'<line_sep>value=job_type.get_job_version_array(version)<line_sep>self.assertEqual([1 0 0 97] value)<line_sep>version='1.0'<line_sep>value=job_type.get_job_version_array(version)<line_sep>self.assertEqual([0 0 0 0] value)<block_end><block_end><class_stmt>TestJobTypeRevision(TransactionTestCase)<block_start><def_stmt>setUp self<block_start>django.setup()<line_sep>self.seed_job_type=job_test_utils.create_seed_job_type()<line_sep>self.seed_job_type_rev=JobTypeRevision.objects.get_revision(self.seed_job_type.name self.seed_job_type.version self.seed_job_type.revision_num)<block_end><def_stmt>test_revision_get_input_interface self<block_start>self.assertEqual(self.seed_job_type_rev.get_input_interface().parameters['INPUT_IMAGE'].PARAM_TYPE 'file')<block_end><def_stmt>test_revision_get_output_interface self<block_start>self.assertEqual(self.seed_job_type_rev.get_output_interface().parameters['OUTPUT_IMAGE'].PARAM_TYPE 'file')<block_end><block_end><class_stmt>TestJobTypeRunningStatus(TestCase)<block_start><def_stmt>setUp self<block_start>django.setup()<line_sep>manifest1=job_test_utils.create_seed_manifest(name='type-1' jobVersion='1.0.0')<line_sep>self.job_type_1=job_test_utils.create_seed_job_type(manifest=manifest1)<line_sep>manifest2=job_test_utils.create_seed_manifest(name='type-2' jobVersion='2.0.0')<line_sep>self.job_type_2=job_test_utils.create_seed_job_type(manifest=manifest2)<line_sep>manifest3=job_test_utils.create_seed_manifest(name='type-1' jobVersion='2.0.0')<line_sep>self.job_type_3=job_test_utils.create_seed_job_type(manifest=manifest3)<line_sep>self.entry_1_longest=datetime.datetime.utcfromtimestamp(500000).replace(tzinfo=timezone.utc)<line_sep>self.entry_1_shortest=datetime.datetime.utcfromtimestamp(650000).replace(tzinfo=timezone.utc)<line_sep>self.entry_2_longest=datetime.datetime.utcfromtimestamp(600000).replace(tzinfo=timezone.utc)<line_sep>self.entry_2_shortest=datetime.datetime.utcfromtimestamp(750000).replace(tzinfo=timezone.utc)<line_sep>self.entry_3_longest=datetime.datetime.utcfromtimestamp(700000).replace(tzinfo=timezone.utc)<line_sep>self.entry_3_shortest=datetime.datetime.utcfromtimestamp(800000).replace(tzinfo=timezone.utc)<line_sep>job_test_utils.create_job(job_type=self.job_type_1 status='RUNNING' last_status_change=self.entry_1_longest)<line_sep>job_test_utils.create_job(job_type=self.job_type_1 status='RUNNING' last_status_change=self.entry_1_shortest)<line_sep>job_test_utils.create_job(job_type=self.job_type_2 status='RUNNING' last_status_change=self.entry_2_shortest)<line_sep>job_test_utils.create_job(job_type=self.job_type_2 status='RUNNING' last_status_change=self.entry_2_longest)<line_sep>job_test_utils.create_job(job_type=self.job_type_2 status='RUNNING' last_status_change=self.entry_2_shortest)<line_sep>job_test_utils.create_job(job_type=self.job_type_3 status='RUNNING' last_status_change=self.entry_3_shortest)<line_sep>job_test_utils.create_job(job_type=self.job_type_3 status='RUNNING' last_status_change=self.entry_3_longest)<line_sep>job_test_utils.create_job(job_type=self.job_type_3 status='RUNNING' last_status_change=self.entry_3_longest)<line_sep>job_test_utils.create_job(job_type=self.job_type_3 status='RUNNING' last_status_change=self.entry_3_shortest)<block_end><def_stmt>test_successful self<block_start>"""Tests calling the get_running_job_status method on JobExecutionManager."""<line_sep>status=JobType.objects.get_running_status()<line_sep>self.assertEqual(len(status) 3)<line_sep># Check entry 1 self.assertEqual(status[0].job_type.id self.job_type_1.id)<line_sep>self.assertEqual(status[0].job_type.name 'type-1')<line_sep>self.assertEqual(status[0].job_type.version '1.0.0')<line_sep>self.assertEqual(status[0].count 2)<line_sep>self.assertEqual(status[0].longest_running self.entry_1_longest)<line_sep># Check entry 2 self.assertEqual(status[1].job_type.id self.job_type_2.id)<line_sep>self.assertEqual(status[1].job_type.name 'type-2')<line_sep>self.assertEqual(status[1].job_type.version '2.0.0')<line_sep>self.assertEqual(status[1].count 3)<line_sep>self.assertEqual(status[1].longest_running self.entry_2_longest)<line_sep># Check entry 3 self.assertEqual(status[2].job_type.id self.job_type_3.id)<line_sep>self.assertEqual(status[2].job_type.name 'type-1')<line_sep>self.assertEqual(status[2].job_type.version '2.0.0')<line_sep>self.assertEqual(status[2].count 4)<line_sep>self.assertEqual(status[2].longest_running self.entry_3_longest)<block_end><block_end><class_stmt>TestJobTypeFailedStatus(TestCase)<block_start><def_stmt>setUp self<block_start>django.setup()<line_sep>self.job_type_1=job_test_utils.create_seed_job_type(job_version='1.0')<line_sep>self.job_type_2=job_test_utils.create_seed_job_type(job_version='2.0')<line_sep>self.job_type_3=job_test_utils.create_seed_job_type(job_version='2.0')<line_sep>self.error_1=Error.objects.create(name='Error 1' description='Test' category='SYSTEM')<line_sep>self.error_2=Error.objects.create(name='Error 2' description='Test' category='SYSTEM')<line_sep>self.error_3=Error.objects.create(name='Error 3' description='Test' category='DATA')<line_sep># Date stamps for errors self.entry_1_last_time=datetime.datetime.utcfromtimestamp(590000).replace(tzinfo=timezone.utc)<line_sep>self.entry_1_first_time=datetime.datetime.utcfromtimestamp(580000).replace(tzinfo=timezone.utc)<line_sep>self.entry_2_time=datetime.datetime.utcfromtimestamp(585000).replace(tzinfo=timezone.utc)<line_sep>self.entry_3_last_time=datetime.datetime.utcfromtimestamp(490000).replace(tzinfo=timezone.utc)<line_sep>self.entry_3_mid_time=datetime.datetime.utcfromtimestamp(480000).replace(tzinfo=timezone.utc)<line_sep>self.entry_3_first_time=datetime.datetime.utcfromtimestamp(470000).replace(tzinfo=timezone.utc)<line_sep>self.entry_4_time=datetime.datetime.utcfromtimestamp(385000).replace(tzinfo=timezone.utc)<line_sep># Create jobs job_test_utils.create_job(job_type=self.job_type_1 status='RUNNING' last_status_change=timezone.now())<line_sep>job_test_utils.create_job(job_type=self.job_type_1 error=self.error_1 status='FAILED' last_status_change=self.entry_2_time)<line_sep>job_test_utils.create_job(job_type=self.job_type_2 error=self.error_1 status='FAILED' last_status_change=self.entry_4_time)<line_sep>job_test_utils.create_job(job_type=self.job_type_2 error=self.error_2 status='FAILED' last_status_change=self.entry_1_last_time)<line_sep>job_test_utils.create_job(job_type=self.job_type_2 error=self.error_2 status='FAILED' last_status_change=self.entry_1_first_time)<line_sep>job_test_utils.create_job(job_type=self.job_type_3 error=self.error_2 status='FAILED' last_status_change=self.entry_3_mid_time)<line_sep>job_test_utils.create_job(job_type=self.job_type_3 error=self.error_2 status='FAILED' last_status_change=self.entry_3_last_time)<line_sep>job_test_utils.create_job(job_type=self.job_type_3 error=self.error_2 status='FAILED' last_status_change=self.entry_3_first_time)<line_sep>job_test_utils.create_job(job_type=self.job_type_3 error=self.error_3 status='FAILED' last_status_change=timezone.now())<block_end><def_stmt>test_successful self<block_start>"""Tests calling the get_failed_jobs_with_system_errors method on JobManager."""<line_sep>status=JobType.objects.get_failed_status()<line_sep>self.assertEqual(len(status) 4)<line_sep># Check entry 1 self.assertEqual(status[0].job_type.id self.job_type_2.id)<line_sep>self.assertEqual(status[0].job_type.version '2.0')<line_sep>self.assertEqual(status[0].error.name 'Error 2')<line_sep>self.assertEqual(status[0].count 2)<line_sep>self.assertEqual(status[0].first_error self.entry_1_first_time)<line_sep>self.assertEqual(status[0].last_error self.entry_1_last_time)<line_sep># Check entry 2 self.assertEqual(status[1].job_type.id self.job_type_1.id)<line_sep>self.assertEqual(status[1].job_type.version '1.0')<line_sep>self.assertEqual(status[1].error.name 'Error 1')<line_sep>self.assertEqual(status[1].count 1)<line_sep>self.assertEqual(status[1].first_error self.entry_2_time)<line_sep>self.assertEqual(status[1].last_error self.entry_2_time)<line_sep># Check entry 3 self.assertEqual(status[2].job_type.id self.job_type_3.id)<line_sep>self.assertEqual(status[2].job_type.version '2.0')<line_sep>self.assertEqual(status[2].error.name 'Error 2')<line_sep>self.assertEqual(status[2].count 3)<line_sep>self.assertEqual(status[2].first_error self.entry_3_first_time)<line_sep>self.assertEqual(status[2].last_error self.entry_3_last_time)<line_sep># Check entry 4 self.assertEqual(status[3].job_type.id self.job_type_2.id)<line_sep>self.assertEqual(status[3].job_type.version '2.0')<line_sep>self.assertEqual(status[3].error.name 'Error 1')<line_sep>self.assertEqual(status[3].count 1)<line_sep>self.assertEqual(status[3].first_error self.entry_4_time)<line_sep>self.assertEqual(status[3].last_error self.entry_4_time)<block_end><block_end><class_stmt>TestJobTypeTagManager(TransactionTestCase)<block_start><def_stmt>setUp self<block_start>django.setup()<line_sep>self.job_type1=job_test_utils.create_seed_job_type()<line_sep>self.tag_set1=["tag1" "tag2" "oneandfour"]<line_sep>self.job_type2=job_test_utils.create_seed_job_type()<line_sep>self.tag_set2=["tag3" "tag4"]<line_sep>self.job_type3=job_test_utils.create_seed_job_type()<line_sep>self.tag_set3=["tag5" "tag6"]<line_sep>self.job_type4=job_test_utils.create_seed_job_type()<line_sep>self.tag_set4=["tag7" "tag8" "oneandfour"]<line_sep>JobTypeTag.objects.create_job_type_tags(self.job_type1 self.tag_set1)<line_sep>JobTypeTag.objects.create_job_type_tags(self.job_type3 self.tag_set3)<line_sep>JobTypeTag.objects.create_job_type_tags(self.job_type4 self.tag_set4)<block_end><def_stmt>test_create_job_type_tags self<block_start>"""Tests calling JobTypeManager.create_job_type_tags()"""<line_sep>result=JobTypeTag.objects.create_job_type_tags(self.job_type2 self.tag_set2)<line_sep>self.assertEqual(len(result) 2)<block_end><def_stmt>test_clear_job_type_tags self<block_start>"""Tests calling JobTypeManager.clear_job_type_tags()"""<line_sep>tags=[jt_tag.tag<for>jt_tag JobTypeTag.objects.filter(job_type_id=self.job_type3.id)]<line_sep>self.assertListEqual(tags self.tag_set3)<line_sep>JobTypeTag.objects.clear_job_type_tags(self.job_type3.id)<line_sep>tags=[jt_tag.tag<for>jt_tag JobTypeTag.objects.filter(job_type_id=self.job_type3.id)]<line_sep>self.assertEqual(len(tags) 0)<block_end><block_end>
# Copyright 2016-2021, Pulumi Corporation. All rights reserved. <import_stmt>azure.functions<as>func<def_stmt>main req:func.HttpRequest<arrow>func.HttpResponse<block_start>body='Hello there {}'.format(req.params.get('name'))<line_sep><return>func.HttpResponse(body status_code=200)<block_end>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<line_sep># noqa <import_from_future_stmt> unicode_literals<import_from_stmt>django.conf.urls include url<import_from_stmt>common.constants SAAS_CODE_REGEX<import_from_stmt>saas views<line_sep>urlpatterns=[# 应用列表 url(r'^list/' include([url(r'^$' views.SaaSListPageView.as_view() name="saas_list") url(r'^query/$' views.SaaSListView.as_view()) ])) # 应用基本信息 url(r'^(?P<app_code>'+SAAS_CODE_REGEX+')/' include([url(r'^info/$' views.InfoView.as_view()) # FIXME: change to restful-like api if more action on saas # 删除SaaS应用 url(r'^delete/$' views.DeleteSaaSView.as_view()) url(r'^logo/$' views.ModifyAppLogoView.as_view()) # 上传SaaS应用 url(r'^upload/$' views.UploadView.as_view()) # 发布相关 # 发布部署页面 url(r'^release/' include([url(r'^$' views.ReleasePageView.as_view()) # 发布记录页面 url(r'^record/$' views.RecordView.as_view()) # 下架页面 url(r'^offline/$' views.OfflinePageView.as_view()) # 执行发布 url(r'^online/(?P<saas_app_version_id>\d+)/$' views.OnlineView.as_view()) ])) ])) url(r'^0/release/$' views.ReleasePageView.as_view() {'app_code':0}) # for legency system, keep below # saas/release/online, # saas/upload, url(r'^release/online/(?P<saas_app_version_id>\d+)/$' views.OnlineView.as_view()) url(r'^upload/(?P<app_code>'+SAAS_CODE_REGEX+')/$' views.UploadView.as_view()) ]<line_sep>
# coding=utf-8 KEYBOARD_URL_MAPS={'default':[['Site wide shortcuts' # keyboard category [# ('keyboard shortcut', 'keyboard info') ('s' 'Focus search bar') ('g n' 'Go to Notifications') ('g h' 'Go to personal page') ('?' 'Bring up this help dialog') ] ] ['Registration and login' [('l r' 'Open register window') ('l o' 'Open login window') ('l t' 'Logout') ('l c' 'Close register/login window') ] ] ['Notifications' [('e / I / y' 'Mark as read') ] ] ['Personal page' [('g s' 'Go to personal settings page') ('g t' 'Go to personal topic page') ]]] '/':[['Topic list shortcuts' [('j' 'Move selection down') ('k' 'Move selection up') ('o' 'Open selection') ] ] ['Create Topic' [('t o' 'Open create topic window') ('t q' 'Close create topic window') ('t s' 'Submit create topic') ] ]] '/post':[['Reply Topic' [('p o' 'Open reply topic window') ('p q' 'Close reply topic window') ('p s' 'Submit reply topic') ] ] ]}<line_sep># http://clrs.cc/ CATEGORY_COLORS=('#001f3f' # Navy '#0074D9' # Blue '#7FDBFF' # Aqua '#39CCCC' # Teal '#3D9970' # Olive '#2ECC40' # Green '#01FF70' # Lime '#FFDC00' # Yellow '#FF851B' # Orange '#FF4136' # Red '#85144b' # Maroon '#F012BE' # Fuchsia '#b10dc9' # Purple '#111111' # black '#aaaaaa' # Gray '#dddddd' # Silver )<line_sep>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_stmt>pickle<import_stmt>re<import_stmt>os<import_stmt>job<import_from_stmt>collections defaultdict<class_stmt>Report(object)<block_start>'''Contains information about a completed job, such as the number of crashes and stack traces from every crash. The report is usually displayed on a web page. '''<def_stmt>__init__ self job_id<block_start>self.num_queries=0<line_sep>self.run_time=0<line_sep>self.run_date=0<line_sep>self.job_name=''<line_sep>self.num_crashes=0<line_sep>self.num_row_count_mismatch=0<line_sep>self.num_mismatch=0<line_sep>self.job_id=job_id<line_sep>self.git_hash=''<line_sep>self.grouped_results=<none><line_sep>self.parent_job_name=''<line_sep>self.num_queries_returned_correct_data=0<line_sep>self.get_results()<block_end>@property<def_stmt>run_time_str self<block_start>'''Return the running time of the job as a string in human readable format.'''<line_sep>m,s=divmod(self.run_time 60)<line_sep>h,m=divmod(m 60)<line_sep><return>'{0:02d}:{1:02d}:{2:02d}'.format(int(h) int(m) int(s))<block_end><def_stmt>classify_error self error<block_start>d={ur'LINE \d+:':'Postgres_error' ur'Permission denied':'permission_denied' ur'^AnalysisException':'AnalysisException' ur'^Column \d+ in row \d+ does not match':'mismatch' ur'^Could not connect':'could_not_connect' ur'^IllegalStateException':'IllegalStateException' ur'^Invalid query handle: ':'invalid_query_handle' ur'^Known issue:':'known_issue' ur'^Operation is in ERROR_STATE':'error_state' ur'^Query timed out after \d+ seconds':'timeout' ur'^Row counts do not match':'row_counts' ur'^Too much data':'too_much_data' ur'^Unknown expr node type: \d+':'unkown_node' ur'^Year is out of valid range':'year_range' ur'^[A-Za-z]+ out of range':'out_of_range' ur'^division by zero':'division_by_zero'}<for_stmt>r d<block_start><if_stmt>re.search(r error)<block_start><return>d[r]<block_end><block_end><return>'unrecognized'<block_end><def_stmt>group_queries self all_queries group_func<block_start>'''General function that returns a dictionary with keys that are generated by group_func. all_queries is a list of queries. group_func should take query as a parameter and return a string containing an interesting property of the query which will be used as key in the dictionary. '''<line_sep>grouped_queries=defaultdict(list)<for_stmt>query all_queries<block_start>grouped_queries[group_func(query)].append(query)<block_end><return>grouped_queries<block_end><def_stmt>__str__ self<block_start>'''TODO: Render report as text. '''<line_sep><return>''<block_end><def_stmt>get_first_impala_frame self query_result<block_start>'''Extracts the first impala frame in the stack trace. '''<line_sep>stack=query_result['formatted_stack']<if_stmt>stack<block_start><for_stmt>line stack.split('\n')<block_start>match=re.search(ur'(impala::.*) \(' line)<if_stmt>match<block_start><return>match.group(1)<block_end><block_end><block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>_format_stack self stack<block_start>'''Cleans up the stack trace. '''<def_stmt>clean_frame frame#remove memory address from each frame <block_start>reg=re.match(ur'#\d+ *0x[0123456789abcdef]* in (.*)' frame)<if_stmt>reg<block_start><return>reg.group(1)<block_end># this is for matching lines like "#7 SLL_Next (this=0x9046780, src=0x90467c8... reg=re.match(ur'#\d+ *(\S.*)' frame)<if_stmt>reg<block_start><return>reg.group(1)<block_end><return>frame<block_end><def_stmt>stack_gen <block_start>'''Generator that yields impala stack trace lines line by line. '''<if_stmt>stack<block_start>active=<false><for_stmt>line stack.split('\n')<block_start><if_stmt>active<or>line.startswith('#0')<block_start>active=<true><line_sep><yield>line<block_end><block_end><block_end><block_end><return>'\n'.join(clean_frame(l)<for>l stack_gen())<block_end><def_stmt>get_results self<block_start>'''Analyses the completed job and extracts important results into self. This method should be called as soon as the object is created. '''<import_from_stmt>controller PATH_TO_FINISHED_JOBS<def_stmt>group_outer_func query<block_start><if_stmt>'stack'<in>query<block_start><return>'stack'<block_end><return>self.classify_error(query['error'])<block_end><def_stmt>stack_group_func query<block_start><return>self.get_first_impala_frame(query['stack'])<block_end><with_stmt>open(os.path.join(PATH_TO_FINISHED_JOBS self.job_id))<as>f<block_start>job=pickle.load(f)<line_sep>self.grouped_results=self.group_queries(job.result_list group_outer_func)<block_end># Format the stack for queries that have a stack <for_stmt>query self.grouped_results['stack']<block_start>query['formatted_stack']=self._format_stack(query['stack'])<block_end>self.num_crashes=len(self.grouped_results['stack'])<line_sep>self.num_row_count_mismatch=len(self.grouped_results['row_counts'])<line_sep>self.num_mismatch=len(self.grouped_results['mismatch'])<line_sep>self.grouped_stacks=self.group_queries(self.grouped_results['stack'] self.get_first_impala_frame)<line_sep>self.run_time=job.stop_time-job.start_time<line_sep>self.run_date=job.start_time<line_sep>self.job_name=job.job_name<line_sep>self.git_hash=job.git_hash<line_sep>self.num_queries_executed=job.num_queries_executed<line_sep>self.num_queries_returned_correct_data=job.num_queries_returned_correct_data<if_stmt>job.parent_job<block_start><with_stmt>open(os.path.join(PATH_TO_FINISHED_JOBS job.parent_job))<as>f<block_start>parent_job=pickle.load(f)<line_sep>self.parent_job_name=parent_job.job_name<block_end><block_end><block_end><def_stmt>save_pickle self<block_start><import_from_stmt>controller PATH_TO_REPORTS<with_stmt>open(os.path.join(PATH_TO_REPORTS self.job_id) 'w')<as>f<block_start>pickle.dump(self f)<block_end><block_end><block_end>
""" Stacked Density Estimates ------------------------- To plot a stacked graph of estimates, use a shared ``extent`` and a fixed number of subdivision ``steps`` to ensure that the points for each area align well. Density estimates of measurements for each iris flower feature are plot in a stacked method. In addition, setting ``counts`` to true multiplies the densities by the number of data points in each group, preserving proportional differences. """<line_sep># category: area charts <import_stmt>altair<as>alt<import_from_stmt>vega_datasets data<line_sep>source=data.iris()<line_sep>alt.Chart(source).transform_fold(['petalWidth' 'petalLength' 'sepalWidth' 'sepalLength'] as_=['Measurement_type' 'value']).transform_density(density='value' bandwidth=0.3 groupby=['Measurement_type'] extent=[0 8] counts=<true> steps=200).mark_area().encode(alt.X('value:Q') alt.Y('density:Q' stack='zero') alt.Color('Measurement_type:N')).properties(width=400 height=100)<line_sep>
<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>torch.autograd Variable<import_stmt>torch.nn<as>nn<import_stmt>torch.optim<import_stmt>json<import_stmt>torch.utils.data.sampler<import_stmt>os<import_stmt>glob<import_stmt>random<import_stmt>time<import_stmt>configs<import_stmt>backbone<import_stmt>data.feature_loader<as>feat_loader<import_from_stmt>data.datamgr SetDataManager<import_from_stmt>methods.baselinetrain BaselineTrain<import_from_stmt>methods.baselinefinetune BaselineFinetune<import_from_stmt>methods.protonet ProtoNet<import_from_stmt>io_utils model_dict parse_args get_resume_file get_best_file get_assigned_file<import_from_stmt>datasets ISIC_few_shot EuroSAT_few_shot CropDisease_few_shot Chest_few_shot<def_stmt>feature_evaluation cl_data_file model n_way=5 n_support=5 n_query=15 adaptation=<false><block_start>class_list=cl_data_file.keys()<line_sep>select_class=random.sample(class_list n_way)<line_sep>z_all=[]<for_stmt>cl select_class<block_start>img_feat=cl_data_file[cl]<line_sep>perm_ids=np.random.permutation(len(img_feat)).tolist()<line_sep>z_all.append([np.squeeze(img_feat[perm_ids[i]])<for>i range(n_support+n_query)])<block_end># stack each batch z_all=torch.from_numpy(np.array(z_all))<line_sep>model.n_query=n_query<if_stmt>adaptation<block_start>scores=model.set_forward_adaptation(z_all is_feature=<true>)<block_end><else_stmt><block_start>scores=model.set_forward(z_all is_feature=<true>)<block_end>pred=scores.data.cpu().numpy().argmax(axis=1)<line_sep>y=np.repeat(range(n_way) n_query)<line_sep>acc=np.mean(pred<eq>y)<times>100<line_sep><return>acc<block_end><if_stmt>__name__<eq>'__main__'<block_start>params=parse_args('test')<line_sep>acc_all=[]<line_sep>iter_num=600<line_sep>few_shot_params=dict(n_way=params.test_n_way n_support=params.n_shot)<if_stmt>params.method<eq>'baseline'<block_start>model=BaselineFinetune(model_dict[params.model] **few_shot_params)<block_end><elif_stmt>params.method<eq>'protonet'<block_start>model=ProtoNet(model_dict[params.model] **few_shot_params)<block_end><else_stmt><block_start><raise>ValueError('Unknown method')<block_end>model=model.cuda()<line_sep>checkpoint_dir='%s/checkpoints/%s/%s_%s'%(configs.save_dir 'miniImageNet' params.model params.method)<if_stmt>params.train_aug<block_start>checkpoint_dir<augadd>'_aug'<block_end><if_stmt><not>params.method<in>['baseline']<block_start>checkpoint_dir<augadd>'_%dway_%dshot'%(params.train_n_way params.n_shot)<block_end><if_stmt><not>params.method<in>['baseline']<block_start><if_stmt>params.save_iter<ne>-1<block_start>modelfile=get_assigned_file(checkpoint_dir params.save_iter)<block_end><else_stmt><block_start>modelfile=get_best_file(checkpoint_dir)<block_end><if_stmt>modelfile<is><not><none><block_start>tmp=torch.load(modelfile)<line_sep>model.load_state_dict(tmp['state'])<block_end><block_end>#params.save_iter = 399 <if_stmt>params.save_iter<ne>-1<block_start>novel_file=os.path.join(checkpoint_dir.replace("checkpoints" "features") params.dataset+"_"+str(params.save_iter)+".hdf5")#defaut split = novel, but you can also test base or val classes <block_end><else_stmt><block_start>novel_file=os.path.join(checkpoint_dir.replace("checkpoints" "features") params.dataset+".hdf5")<block_end>#defaut split = novel, but you can also test base or val classes cl_data_file=feat_loader.init_loader(novel_file)<for_stmt>i range(iter_num)<block_start>print(i)<line_sep>acc=feature_evaluation(cl_data_file model n_query=15 adaptation=params.adaptation **few_shot_params)<line_sep>print(acc)<line_sep>acc_all.append(acc)<block_end>acc_all=np.asarray(acc_all)<line_sep>acc_mean=np.mean(acc_all)<line_sep>acc_std=np.std(acc_all)<line_sep>print('%d Test Acc = %4.2f%% +- %4.2f%%'%(iter_num acc_mean 1.96<times>acc_std/np.sqrt(iter_num)))<block_end>
# Copyright 2012 Viewfinder Inc. All Rights Reserved. """Customization for display of database tables. Default: default display of a table row *: customized versions by table """<line_sep>__author__='<EMAIL> (<NAME>)'<import_stmt>logging<import_stmt>pprint<import_stmt>time<import_from_stmt>tornado.escape url_escape xhtml_escape<import_from_stmt>viewfinder.backend.base.util ConvertToString<import_from_stmt>viewfinder.backend.db.contact Contact<import_from_stmt>viewfinder.backend.db.db_client DBKey<import_from_stmt>viewfinder.backend.db.follower Follower<import_from_stmt>viewfinder.backend.db.schema UnpackLocation UnpackPlacemark<import_from_stmt>viewfinder.backend.db.episode Episode<import_from_stmt>viewfinder.backend.db.photo Photo<import_from_stmt>viewfinder.backend.db.viewpoint Viewpoint<class_stmt>FmtDefault(object)<block_start><def_stmt>__init__ self table<block_start>self._table=table<block_end><def_stmt>FormatItemAttributes self item<block_start>"""Returns an array of item attributes, one per column in the table definition, formatted for display in HTML table. """<line_sep>attributes=self._FormatAllAttributes(item)<line_sep>rows=[pretty<for>_,_,_,pretty attributes]<line_sep><return>rows<block_end><def_stmt>FormatItemAttributesForView self item<block_start>"""Return an array of rows. Each row consists of "column name", "key", "value". """<line_sep>attributes=self._FormatAllAttributes(item)<line_sep>rows=[(name key pretty)<for>name,key,_,pretty attributes]<line_sep>rows.extend(self._GetExtraViewFields(item))<line_sep><return>rows<block_end><def_stmt>_GetExtraViewFields self item<block_start>"""Class used to append new fields in per-object view. Nothing by default. Must be a list of (name, key, pretty)."""<line_sep><return>[]<block_end>@staticmethod<def_stmt>_Escape val# Need to cast to string for int-valued columns (eg: user_id). <block_start><return>url_escape(ConvertToString(val))<block_end>@staticmethod<def_stmt>_XEscape val# Need to cast to string for int-valued columns (eg: user_id). <block_start><return>xhtml_escape(ConvertToString(val))<block_end>@staticmethod<def_stmt>_HashQueryLink table key name=<none><block_start><return>'<a href="/admin/db?table=%s&type=query&hash_key=%s">%s</a>'%(FmtDefault._Escape(table) FmtDefault._Escape(key) FmtDefault._XEscape(name<if>name<is><not><none><else>key))<block_end>@staticmethod<def_stmt>_SortQueryLink table hash_key sort_key name=<none><block_start>"""Builds a query link for a hash_key and sort_key. Sort key operator is 'EQ'."""<line_sep><return>'<a href="/admin/db?table=%s&type=query&hash_key=%s&sort_key=%s&sort_desc=EQ">%s</a>'%(FmtDefault._Escape(table) FmtDefault._Escape(hash_key) FmtDefault._Escape(sort_key) FmtDefault._XEscape(name<if>name<is><not><none><else>'%s:%s'%(hash_key sort_key)))<block_end>@staticmethod<def_stmt>_EpisodeLink vp name=<none><block_start><return>FmtDefault._HashQueryLink('Episode' vp name)<block_end>@staticmethod<def_stmt>_PhotoLink vp name=<none><block_start><return>FmtDefault._HashQueryLink('Photo' vp name)<block_end>@staticmethod<def_stmt>_UserLink vp name=<none><block_start><return>FmtDefault._HashQueryLink('User' vp name)<block_end>@staticmethod<def_stmt>_ViewpointLink vp name=<none><block_start><return>FmtDefault._HashQueryLink('Viewpoint' vp name)<block_end><def_stmt>_FormatAllAttributes self item<block_start>"""Build list of (column, key, value, pretty_value). We need a list to keep the columns ordered."""<line_sep>attrs=[]<for_stmt>name self._table.GetColumnNames()<block_start>c=self._table.GetColumn(name)<line_sep>value=item.get(c.key <none>)<line_sep>pretty=self._FormatAttribute(name value)<if>value<is><not><none><else>'-'<line_sep>attrs.append((name c.key value pretty))<block_end><return>attrs<block_end><def_stmt>_FormatAttribute self name value<block_start>"""Returns the attribute value; If none, returns '-'. Formats by default the following fields: 'viewpoint_id', 'episode_id', 'photo_id', 'timestamp', 'Location', 'Placemark'. """<if_stmt>name<eq>'viewpoint_id'<or>name<eq>'private_vp_id'<block_start>did,(vid sid)=Viewpoint.DeconstructViewpointId(value)<line_sep>pretty='%s/%d/%d'%(value did vid)<line_sep><return>FmtDefault._ViewpointLink(value pretty)<block_end><elif_stmt>name<eq>'user_id'<or>name<eq>'sender_id'<block_start><return>self._UserLink(value)<block_end><elif_stmt>name<eq>'episode_id'<or>name<eq>'parent_ep_id'<block_start>ts,did,(eid sid)=Episode.DeconstructEpisodeId(value)<line_sep>pretty='%s/%d/%d'%(value did eid)<line_sep><return>self._EpisodeLink(value pretty)<block_end><elif_stmt>name<eq>'photo_id'<or>name<eq>'parent_id'<block_start>ts,did,(pid sid)=Photo.DeconstructPhotoId(value)<line_sep>pretty='%s/%d/%d'%(value did pid)<line_sep><return>self._PhotoLink(value pretty)<block_end><elif_stmt>name<eq>'timestamp'<or>name<eq>'last_updated'<or>name<eq>'expires'<or>name<eq>'last_fetch'<block_start><return>self._FormatTimestamp(value)<block_end><elif_stmt>name<eq>'location'<block_start><return>self._XEscape(', '.join(['%s: %s'%(k v)<for>k,v UnpackLocation(value)._asdict().items()]))<block_end><elif_stmt>name<eq>'placemark'<block_start><return>self._XEscape(', '.join(['%s: %s'%(k v)<for>k,v UnpackPlacemark(value)._asdict().items()]))<block_end><else_stmt><block_start><return>self._XEscape('%s'%value)<block_end><block_end><def_stmt>_FormatTimestamp self timestamp<block_start>"""Formats a timestamp (in UTC) via default format."""<line_sep><return>self._XEscape(time.asctime(time.gmtime(timestamp)))<block_end><def_stmt>_GetQueryURL self table hash_key<block_start>"""Returns a URL to display a DB query of the table using hash key 'hash_key'. """<line_sep><return>'/admin/db?table=%s&type=query&hash_key=%s'%(self._Escape(table) self._Escape(repr(hash_key)))<block_end><block_end><class_stmt>FmtAccounting(FmtDefault)<block_start>_names={'vs':'viewpoint_size' 'us':'user_size' 'ow':'owned_by' 'sb':'shared_by' 'vt':'visible_to'}<def_stmt>_FormatAttribute self name value<block_start><if_stmt>name<eq>'hash_key'<block_start>split=value.split(':')<line_sep>prefix=split[0]<line_sep>prefix_name=self._names[prefix]<if_stmt>prefix<eq>'vs'<block_start><return>'%s:%s'%(self._XEscape(prefix_name) self._ViewpointLink(split[1]))<block_end><elif_stmt>prefix<eq>'us'<block_start><return>'%s:%s'%(self._XEscape(prefix_name) self._UserLink(split[1]))<block_end><block_end><elif_stmt>name<eq>'sort_key'<block_start>split=value.split(':')<line_sep>prefix=split[0]<line_sep>prefix_name=self._names[prefix]<if_stmt>len(split)<eq>1<block_start><return>prefix_name<block_end><elif_stmt>prefix<eq>'ow'<or>prefix<eq>'sb'<block_start><return>'%s:%s'%(self._XEscape(prefix_name) self._UserLink(split[1]))<block_end><block_end><return>FmtDefault._FormatAttribute(self name value)<block_end><block_end><class_stmt>FmtEpisode(FmtDefault)<block_start><def_stmt>_GetExtraViewFields self item<block_start>ep_id=item.get('ei')<line_sep>extras=[]<line_sep>extras.append(self._HashQueryLink('Index' 'ev:pa:%s'%ep_id 'Children'))<line_sep>extras.append(self._HashQueryLink('Post' ep_id 'Posts'))<line_sep><return>[('Extras' '' ' &middot '.join(extras))]<block_end><block_end><class_stmt>FmtIdentity(FmtDefault)<block_start><def_stmt>_GetExtraViewFields self item<block_start>id_id=item.get('ke')<line_sep>extras=[]<line_sep>extras.append(self._HashQueryLink('Index' 'co:id:%s'%id_id 'In-contacts'))<line_sep><return>[('Extras' '' ' &middot '.join(extras))]<block_end><block_end><class_stmt>FmtIndex(FmtDefault)<block_start><def_stmt>_FormatAllAttributes self item<block_start>"""Build list of (column, key, value, pretty_value). We need a list to keep the columns ordered. The interpretation of the 'key' column depends on the beginning of the 'term' column."""<line_sep>attrs=[]<line_sep>term=item.get('t' <none>)<line_sep>key=item.get('k' <none>)<line_sep>data=item.get('d' <none>)<line_sep>split=term.split(':')<line_sep>table=split[0]<line_sep>key_pretty=key<if_stmt>table<eq>'co'<block_start>db_key=Contact._ParseIndexKey(key)<line_sep>key_pretty=self._SortQueryLink('Contact' db_key.hash_key db_key.range_key)<block_end><elif_stmt>table<eq>'ev'<block_start>key_pretty=self._EpisodeLink(key)<block_end><elif_stmt>table<eq>'fo'<block_start>db_key=Follower._ParseIndexKey(key)<line_sep>key_pretty=self._SortQueryLink('Follower' db_key.hash_key db_key.range_key)<block_end><elif_stmt>table<eq>'id'<block_start>key_pretty=self._HashQueryLink('Identity' key)<block_end><elif_stmt>table<eq>'vp'<block_start>key_pretty=self._ViewpointLink(key)<block_end>attrs.append(('term' 't' term term))<line_sep>attrs.append(('key' 'k' key key_pretty))<line_sep>attrs.append(('data' 't' data data))<line_sep>attrs.append(('_version' '_ve' data data))<line_sep><return>attrs<block_end><block_end><class_stmt>FmtLock(FmtDefault)<block_start><def_stmt>_FormatAttribute self name value<block_start>"""Formats 'expiration' as human readable date/times. """<if_stmt>name<eq>'expiration'<block_start><if_stmt>value<l>time.time()<block_start><return>'<i>Expired</i>'<block_end><else_stmt><block_start><return>self._FormatTimestamp(value)<block_end><block_end><else_stmt><block_start><return>FmtDefault._FormatAttribute(self name value)<block_end><block_end><block_end><class_stmt>FmtOperation(FmtDefault)<block_start><def_stmt>_FormatAttribute self name value<block_start>"""Formats 'timestamp' as human readable date/time, {'json', 'first_exception', 'last_exception'} as <pre/> blocks for readability. """<if_stmt>name<in>('json' 'first_exception' 'last_exception')<block_start><return>'<pre>%s</pre>'%self._XEscape(value)<block_end><elif_stmt>name<eq>'backoff'<block_start><if_stmt>value<l>time.time()<block_start><return>'<i>Expired</i>'<block_end><else_stmt><block_start><return>self._FormatTimestamp(value)<block_end><block_end><else_stmt><block_start><return>FmtDefault._FormatAttribute(self name value)<block_end><block_end><block_end><class_stmt>FmtUser(FmtDefault)<block_start><def_stmt>_GetExtraViewFields self item<block_start>user_id=item.get('ui')<line_sep>extras=[]<line_sep>extras.append(self._HashQueryLink('Accounting' 'us:%s'%user_id 'Accounting'))<line_sep>extras.append(self._HashQueryLink('Contact' user_id 'Contacts'))<line_sep>extras.append(self._HashQueryLink('Device' user_id 'Devices'))<line_sep>extras.append(self._HashQueryLink('Index' 'ev:ui:%s'%user_id 'Episodes'))<line_sep>extras.append(self._HashQueryLink('Followed' user_id 'Followed'))<line_sep>extras.append(self._HashQueryLink('Follower' user_id 'Follower'))<line_sep>extras.append(self._HashQueryLink('Friend' user_id 'Friends'))<line_sep>extras.append(self._HashQueryLink('Index' 'id:ui:%s'%user_id 'Identities'))<line_sep>extras.append(self._HashQueryLink('Notification' user_id 'Notifications'))<line_sep>extras.append(self._HashQueryLink('Settings' 'us:%s'%user_id 'Settings'))<line_sep>extras.append(self._HashQueryLink('Subscription' user_id 'Subscriptions'))<line_sep>extras.append(self._HashQueryLink('Index' 'vp:ui:%s'%user_id 'Viewpoints'))<line_sep><return>[('Extras' '' ' &middot '.join(extras))]<block_end><block_end><class_stmt>FmtViewpoint(FmtDefault)<block_start><def_stmt>_GetExtraViewFields self item<block_start>vp_id=item.get('vi')<line_sep>extras=[]<line_sep>extras.append(self._HashQueryLink('Accounting' 'vs:%s'%vp_id 'Accounting'))<line_sep>extras.append(self._HashQueryLink('Activity' vp_id 'Activities'))<line_sep>extras.append(self._HashQueryLink('Comment' vp_id 'Comments'))<line_sep>extras.append(self._HashQueryLink('Index' 'ev:vi:%s'%vp_id 'Episodes'))<line_sep>extras.append(self._HashQueryLink('Index' 'fo:vi:%s'%vp_id 'Followers'))<line_sep><return>[('Extras' '' ' &middot '.join(extras))]<block_end><block_end>
<import_from_stmt>django.contrib admin<import_from_stmt>django.db models<import_from_stmt>django forms<import_stmt>nested_admin<import_from_stmt>.models FreeText Poll Question MultipleChoiceGroup MultipleChoice Survey Text Textarea <class_stmt>TextInline(nested_admin.NestedTabularInline)<block_start>model=Text<line_sep>extra=1<line_sep>min_num=1<line_sep>max_num=1<line_sep>sortable_field_name="position"<line_sep>formfield_overrides={models.PositiveSmallIntegerField:{'widget':forms.HiddenInput} }<block_end><class_stmt>TextareaInline(nested_admin.NestedTabularInline)<block_start>model=Textarea<line_sep>extra=1<line_sep>min_num=1<line_sep>max_num=1<line_sep>sortable_field_name="position"<line_sep>formfield_overrides={models.PositiveSmallIntegerField:{'widget':forms.HiddenInput} }<block_end><class_stmt>RadioInline(nested_admin.NestedTabularInline)<block_start>model=MultipleChoice<line_sep>sortable_field_name="position"<line_sep>extra=0<line_sep>min_num=1<line_sep>max_num=8<line_sep>radio_fields={'style':admin.HORIZONTAL}<line_sep>formfield_overrides={models.PositiveSmallIntegerField:{'widget':forms.HiddenInput} }<block_end><class_stmt>RadioGroupInline(nested_admin.NestedTabularInline)<block_start>model=MultipleChoiceGroup<line_sep>inlines=(RadioInline )<line_sep>extra=0<line_sep>min_num=1<line_sep>max_num=1<line_sep>sortable_field_name="position"<line_sep>formfield_overrides={models.PositiveSmallIntegerField:{'widget':forms.HiddenInput} }<block_end><class_stmt>DropDownInline(nested_admin.NestedTabularInline)<block_start>model=MultipleChoice<line_sep>sortable_field_name="position"<line_sep>extra=0<line_sep>min_num=1<line_sep>max_num=8<line_sep>formfield_overrides={models.PositiveSmallIntegerField:{'widget':forms.HiddenInput} }<block_end><class_stmt>DropDownGroupInline(nested_admin.NestedTabularInline)<block_start>model=MultipleChoiceGroup<line_sep>inlines=(DropDownInline )<line_sep>extra=0<line_sep>min_num=1<line_sep>max_num=1<line_sep>sortable_field_name="position"<line_sep>formfield_overrides={models.PositiveSmallIntegerField:{'widget':forms.HiddenInput} }<block_end><class_stmt>QuestionInline(nested_admin.NestedStackedPolymorphicInline)<block_start><class_stmt>FreeTextInline(nested_admin.NestedStackedPolymorphicInline.Child)<block_start>model=FreeText<line_sep>inlines=(TextInline TextareaInline DropDownGroupInline)<line_sep>sortable_field_name="position"<line_sep>formfield_overrides={models.PositiveSmallIntegerField:{'widget':forms.HiddenInput} }<block_end><class_stmt>PollInline(nested_admin.NestedStackedPolymorphicInline.Child)<block_start>model=Poll<line_sep>inlines=(TextInline RadioGroupInline )<line_sep>sortable_field_name="position"<line_sep>formfield_overrides={models.PositiveSmallIntegerField:{'widget':forms.HiddenInput} }<block_end>model=Question<line_sep>extra=0<line_sep>sortable_field_name="position"<line_sep>child_inlines=(FreeTextInline PollInline )<line_sep>formfield_overrides={models.PositiveSmallIntegerField:{'widget':forms.HiddenInput} }<block_end>@admin.register(Survey)<class_stmt>SurveyAdmin(nested_admin.NestedPolymorphicModelAdmin)<block_start>inlines=(QuestionInline )<block_end>
# -*- coding: utf-8 -*- <import_from_stmt>functools lru_cache<import_stmt>numpy<as>np<import_from_stmt>..base Property<import_from_stmt>..types.prediction GaussianMeasurementPrediction<import_from_stmt>..types.update Update<import_from_stmt>..models.measurement.linear LinearGaussian<import_from_stmt>..updater.kalman KalmanUpdater<class_stmt>InformationKalmanUpdater(KalmanUpdater)<block_start>r"""A class which implements the update of information form of the Kalman filter. This is conceptually very simple. The update proceeds as: .. math:: Y_{k|k} = Y_{k|k-1} + H^{T}_k R^{-1}_k H_k \mathbf{y}_{k|k} = \mathbf{y}_{k|k-1} + H^{T}_k R^{-1}_k \mathbf{z}_{k} where :math:`\mathbf{y}_{k|k-1}` is the predicted information state and :math:`Y_{k|k-1}` the predicted information matrix which form the :class:`~.InformationStatePrediction` object. The measurement matrix :math:`H_k` and measurement covariance :math:`R_k` are those in the Kalman filter (see tutorial 1). An :class:`~.InformationStateUpdate` object is returned. Note ---- Analogously with the :class:`~.InformationKalmanPredictor`, the measurement model is queried for the existence of an :meth:`inverse_covar()` property. If absent, the :meth:`covar()` is inverted. """<line_sep>measurement_model:LinearGaussian=Property(default=<none> doc="A linear Gaussian measurement model. This need not be defined if "<concat>"a measurement model is provided in the measurement. If no model "<concat>"specified on construction, or in the measurement, then error "<concat>"will be thrown.")<def_stmt>_inverse_measurement_covar self measurement_model **kwargs<block_start>"""Return the inverse of the measurement covariance (or calculate it) Parameters ---------- measurement_model The measurement model to be queried **kwargs : various, optional These are passed to :meth:`~.LinearGaussian.covar()` Returns ------- : :class:`numpy.ndarray` The inverse of the measurement covariance, :math:`R_k^{-1}` """<if_stmt>hasattr(measurement_model 'inverse_covar')<block_start>inv_measurement_covar=measurement_model.inverse_covar(**kwargs)<block_end><else_stmt><block_start>inv_measurement_covar=np.linalg.inv(measurement_model.covar(**kwargs))<block_end><return>inv_measurement_covar<block_end>@lru_cache()<def_stmt>predict_measurement self predicted_state measurement_model=<none> **kwargs<block_start>r"""There's no direct analogue of a predicted measurement in the information form. This method is therefore provided to return the predicted measurement as would the standard Kalman updater. This is mainly for compatibility as it's not anticipated that it would be used in the usual operation of the information filter. Parameters ---------- predicted_information_state : :class:`~.State` The predicted state in information form :math:`\mathbf{y}_{k|k-1}` measurement_model : :class:`~.MeasurementModel` The measurement model. If omitted, the model in the updater object is used **kwargs : various These are passed to :meth:`~.MeasurementModel.matrix()` Returns ------- : :class:`~.GaussianMeasurementPrediction` The measurement prediction, :math:`H \mathbf{x}_{k|k-1}` """<line_sep># If a measurement model is not specified then use the one that's # native to the updater measurement_model=self._check_measurement_model(measurement_model)<line_sep>hh=self._measurement_matrix(predicted_state=predicted_state measurement_model=measurement_model **kwargs)<line_sep>predicted_covariance=np.linalg.inv(predicted_state.precision)<line_sep>predicted_state_mean=predicted_covariance@predicted_state.state_vector<line_sep>predicted_measurement=hh@predicted_state_mean<line_sep>innovation_covariance=hh@[email protected]+measurement_model.covar()<line_sep><return>GaussianMeasurementPrediction(predicted_measurement innovation_covariance predicted_state.timestamp [email protected])<block_end><def_stmt>update self hypothesis **kwargs<block_start>r"""The Information filter update (corrector) method. Given a hypothesised association between a predicted information state and an actual measurement, calculate the posterior information state. Parameters ---------- hypothesis : :class:`~.SingleHypothesis` the prediction-measurement association hypothesis. This hypothesis carries a predicted information state. **kwargs : various These are passed to :meth:`predict_measurement` Returns ------- : :class:`~.InformationStateUpdate` The posterior information state with information state :math:`\mathbf{y}_{k|k}` and precision :math:`Y_{k|k}` """<line_sep>measurement_model=hypothesis.measurement.measurement_model<line_sep>measurement_model=self._check_measurement_model(measurement_model)<line_sep>pred_info_mean=hypothesis.prediction.state_vector<line_sep>hh=measurement_model.matrix()<line_sep>invr=self._inverse_measurement_covar(measurement_model)<line_sep>posterior_precision=hypothesis.prediction.precision+hh.T@invr@hh<line_sep>posterior_information_mean=pred_info_mean+hh.T@[email protected]_vector<if_stmt>self.force_symmetric_covariance<block_start>posterior_precision=(posterior_precision+posterior_precision.T)/2<block_end><return>Update.from_state(hypothesis.prediction posterior_information_mean posterior_precision timestamp=hypothesis.measurement.timestamp hypothesis=hypothesis)<block_end><block_end>
<import_from_stmt>dagster daily_schedule<line_sep># start_preset_helper <def_stmt>daily_schedule_definition_from_pipeline_preset pipeline preset_name start_date<block_start>preset=pipeline.get_preset(preset_name)<if_stmt><not>preset<block_start><raise>Exception("Preset {preset_name} was not found "<concat>"on pipeline {pipeline_name}".format(preset_name=preset_name pipeline_name=pipeline.name))<block_end>@daily_schedule(start_date=start_date pipeline_name=pipeline.name solid_selection=preset.solid_selection mode=preset.mode tags_fn_for_date=<lambda>_:preset.tags )<def_stmt>my_schedule _date<block_start><return>preset.run_config<block_end><return>my_schedule<block_end># end_preset_helper
<import_stmt>argparse<import_from_stmt>data *<import_from_stmt>unet *<def_stmt>test args# Data Load <block_start>testset=dataset(args mode='test')<line_sep># Model Load model=unet(args)<line_sep>model.load_weights(args.ckpt_path)<line_sep># Model Test results=model.predict_generator(testset steps=1 verbose=1)<line_sep># Save predictions save_result(args results)<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser(description=__doc__)<line_sep>parser.add_argument('--batch_size' type=int default=155 help='batch size.')<line_sep>parser.add_argument('--data' type=str default='complete' help='MRI Label data to train')<line_sep>parser.add_argument('--image_root' type=str default='../data/train/image_FLAIR' help='the root directory containing the image dataset.')<line_sep>parser.add_argument('--label_root' type=str default='../data/train/label' help='the root directory containing the label dataset')<line_sep>parser.add_argument('--image_folder1' type=str default='BRATS_074' help='the directory containing the image dataset.')<line_sep>parser.add_argument('--label_folder1' type=str default='BRATS_074' help='the directory containing the label dataset.')<line_sep>parser.add_argument('--output_root' type=str default='./output' help='the directory to save results')<line_sep>parser.add_argument('--ckpt_path' type=str default='./checkpoint/unet.hdf5' help='The directory containing the segmentation model checkpoint.')<line_sep>args=parser.parse_args()<line_sep>test(args)<block_end>
""" Plugin for Rackspace Cloud Orchestration mock. """<import_from_stmt>mimic.rest.heat_api HeatApi<line_sep>heat=HeatApi()<line_sep>
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** <import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<import_from_stmt>. outputs<line_sep>__all__=['GetConnectionResult' 'AwaitableGetConnectionResult' 'get_connection' 'get_connection_output' ]<line_sep>@pulumi.output_type<class_stmt>GetConnectionResult<block_start>""" A collection of values returned by getConnection. """<def_stmt>__init__ __self__ arn=<none> catalog_id=<none> connection_properties=<none> connection_type=<none> description=<none> id=<none> match_criterias=<none> name=<none> physical_connection_requirements=<none> tags=<none><block_start><if_stmt>arn<and><not>isinstance(arn str)<block_start><raise>TypeError("Expected argument 'arn' to be a str")<block_end>pulumi.set(__self__ "arn" arn)<if_stmt>catalog_id<and><not>isinstance(catalog_id str)<block_start><raise>TypeError("Expected argument 'catalog_id' to be a str")<block_end>pulumi.set(__self__ "catalog_id" catalog_id)<if_stmt>connection_properties<and><not>isinstance(connection_properties dict)<block_start><raise>TypeError("Expected argument 'connection_properties' to be a dict")<block_end>pulumi.set(__self__ "connection_properties" connection_properties)<if_stmt>connection_type<and><not>isinstance(connection_type str)<block_start><raise>TypeError("Expected argument 'connection_type' to be a str")<block_end>pulumi.set(__self__ "connection_type" connection_type)<if_stmt>description<and><not>isinstance(description str)<block_start><raise>TypeError("Expected argument 'description' to be a str")<block_end>pulumi.set(__self__ "description" description)<if_stmt>id<and><not>isinstance(id str)<block_start><raise>TypeError("Expected argument 'id' to be a str")<block_end>pulumi.set(__self__ "id" id)<if_stmt>match_criterias<and><not>isinstance(match_criterias list)<block_start><raise>TypeError("Expected argument 'match_criterias' to be a list")<block_end>pulumi.set(__self__ "match_criterias" match_criterias)<if_stmt>name<and><not>isinstance(name str)<block_start><raise>TypeError("Expected argument 'name' to be a str")<block_end>pulumi.set(__self__ "name" name)<if_stmt>physical_connection_requirements<and><not>isinstance(physical_connection_requirements list)<block_start><raise>TypeError("Expected argument 'physical_connection_requirements' to be a list")<block_end>pulumi.set(__self__ "physical_connection_requirements" physical_connection_requirements)<if_stmt>tags<and><not>isinstance(tags dict)<block_start><raise>TypeError("Expected argument 'tags' to be a dict")<block_end>pulumi.set(__self__ "tags" tags)<block_end>@[email protected]<def_stmt>arn self<arrow>str<block_start>""" The ARN of the Glue Connection. """<line_sep><return>pulumi.get(self "arn")<block_end>@[email protected](name="catalogId")<def_stmt>catalog_id self<arrow>str<block_start>""" The catalog ID of the Glue Connection. """<line_sep><return>pulumi.get(self "catalog_id")<block_end>@[email protected](name="connectionProperties")<def_stmt>connection_properties self<arrow>Mapping[str str]<block_start><return>pulumi.get(self "connection_properties")<block_end>@[email protected](name="connectionType")<def_stmt>connection_type self<arrow>str<block_start>""" The type of Glue Connection. """<line_sep><return>pulumi.get(self "connection_type")<block_end>@[email protected]<def_stmt>description self<arrow>str<block_start>""" Description of the connection. """<line_sep><return>pulumi.get(self "description")<block_end>@[email protected]<def_stmt>id self<arrow>str<block_start><return>pulumi.get(self "id")<block_end>@[email protected](name="matchCriterias")<def_stmt>match_criterias self<arrow>Sequence[str]<block_start>""" A list of criteria that can be used in selecting this connection. """<line_sep><return>pulumi.get(self "match_criterias")<block_end>@[email protected]<def_stmt>name self<arrow>str<block_start>""" The name of the Glue Connection. """<line_sep><return>pulumi.get(self "name")<block_end>@[email protected](name="physicalConnectionRequirements")<def_stmt>physical_connection_requirements self<arrow>Sequence['outputs.GetConnectionPhysicalConnectionRequirementResult']<block_start>""" A map of physical connection requirements, such as VPC and SecurityGroup. """<line_sep><return>pulumi.get(self "physical_connection_requirements")<block_end>@[email protected]<def_stmt>tags self<arrow>Mapping[str str]<block_start>""" The tags assigned to the resource """<line_sep><return>pulumi.get(self "tags")<block_end><block_end><class_stmt>AwaitableGetConnectionResult(GetConnectionResult)# pylint: disable=using-constant-test <block_start><def_stmt>__await__ self<block_start><if_stmt><false><block_start><yield>self<block_end><return>GetConnectionResult(arn=self.arn catalog_id=self.catalog_id connection_properties=self.connection_properties connection_type=self.connection_type description=self.description id=self.id match_criterias=self.match_criterias name=self.name physical_connection_requirements=self.physical_connection_requirements tags=self.tags)<block_end><block_end><def_stmt>get_connection id:Optional[str]=<none> tags:Optional[Mapping[str str]]=<none> opts:Optional[pulumi.InvokeOptions]=<none><arrow>AwaitableGetConnectionResult<block_start>""" This data source can be used to fetch information about a specific Glue Connection. ## Example Usage ```python import pulumi import pulumi_aws as aws example = aws.glue.get_connection(id="123456789123:connection") ``` :param str id: A concatenation of the catalog ID and connection name. For example, if your account ID is `123456789123` and the connection name is `conn` then the ID is `123456789123:conn`. :param Mapping[str, str] tags: The tags assigned to the resource """<line_sep>__args__=dict()<line_sep>__args__['id']=id<line_sep>__args__['tags']=tags<if_stmt>opts<is><none><block_start>opts=pulumi.InvokeOptions()<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end>__ret__=pulumi.runtime.invoke('aws:glue/getConnection:getConnection' __args__ opts=opts typ=GetConnectionResult).value<line_sep><return>AwaitableGetConnectionResult(arn=__ret__.arn catalog_id=__ret__.catalog_id connection_properties=__ret__.connection_properties connection_type=__ret__.connection_type description=__ret__.description id=__ret__.id match_criterias=__ret__.match_criterias name=__ret__.name physical_connection_requirements=__ret__.physical_connection_requirements tags=__ret__.tags)<block_end>@_utilities.lift_output_func(get_connection)<def_stmt>get_connection_output id:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Optional[Mapping[str str]]]]=<none> opts:Optional[pulumi.InvokeOptions]=<none><arrow>pulumi.Output[GetConnectionResult]<block_start>""" This data source can be used to fetch information about a specific Glue Connection. ## Example Usage ```python import pulumi import pulumi_aws as aws example = aws.glue.get_connection(id="123456789123:connection") ``` :param str id: A concatenation of the catalog ID and connection name. For example, if your account ID is `123456789123` and the connection name is `conn` then the ID is `123456789123:conn`. :param Mapping[str, str] tags: The tags assigned to the resource """<line_sep><ellipsis><block_end>
<def_stmt>extractLilBlissNovels item<block_start>""" """<line_sep>vol,chp,frag,postfix=extractVolChapterFragmentPostfix(item['title'])<if_stmt><not>(chp<or>vol)<or>'preview'<in>item['title'].lower()<block_start><return><none><block_end><if_stmt>':'<in>item['title']<and>'Side Story'<in>item['title']<and><not>postfix<block_start>postfix=item['title'].split(':')[-1]<block_end><if_stmt>'<NAME>'<in>item['tags']<block_start><return>buildReleaseMessageWithType(item '<NAME>' vol chp frag=frag postfix=postfix)<block_end><if_stmt>'Memory Lost'<in>item['tags']<block_start><return>buildReleaseMessageWithType(item 'Memory Lost' vol chp frag=frag postfix=postfix)<block_end><return><false><block_end>
# Column/Label Types NULL='null'<line_sep>CATEGORICAL='categorical'<line_sep>TEXT='text'<line_sep>NUMERICAL='numerical'<line_sep>ENTITY='entity'<line_sep># Feature Types ARRAY='array'<line_sep>
# type: ignore[override] <import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>torch Tensor<import_from_stmt>flambe.nn.mlp MLPEncoder<import_from_stmt>flambe.nn.module Module<class_stmt>MixtureOfSoftmax(Module)<block_start>"""Implement the MixtureOfSoftmax output layer. Attributes ---------- pi: FullyConnected softmax layer over the different softmax layers: [FullyConnected] list of the k softmax layers """<def_stmt>__init__ self input_size:int output_size:int k:int=1 take_log:bool=<true><arrow><none><block_start>"""Initialize the MOS layer. Parameters ---------- input_size: int input dimension output_size: int output dimension k: int (Default: 1) number of softmax in the mixture """<line_sep>super().__init__()<line_sep>self.pi_w=MLPEncoder(input_size k)<line_sep>self.softmax=nn.Softmax()<line_sep>self.layers=[MLPEncoder(input_size output_size)<for>_ range(k)]<line_sep>self.tanh=nn.Tanh()<line_sep>self.activation=nn.LogSoftmax()<if>take_log<else>nn.Softmax()<block_end><def_stmt>forward self data:Tensor<arrow>Tensor<block_start>"""Implement mixture of softmax for language modeling. Parameters ---------- data: torch.Tensor seq_len x batch_size x hidden_size Return ------- out: Variable output matrix of shape seq_len x batch_size x out_size """<line_sep>w=self.softmax(self.pi_w(data))<line_sep># Compute k softmax, and combine using above weights out=[w[: : i]<times>self.tanh(W(data))<for>i,W enumerate(self.layers)]<line_sep>out=torch.cat(out dim=0).sum(dim=0)<line_sep><return>self.activation(out)<block_end><block_end>
<import_stmt>os<import_stmt>shutil<import_stmt>sys<import_from_stmt>osgeo gdal<import_from_stmt>unittest TestCase<import_from_stmt>py4j.java_gateway java_import<import_stmt>gdaltest<import_from_stmt>pymrgeo.instance is_instance_of<import_from_stmt>pymrgeo.mrgeo MrGeo<class_stmt>MrGeoTests(TestCase)<block_start>GENERATE_BASELINE_DATA=<false><line_sep>classname=<none><line_sep>mrgeo=<none><line_sep>gateway=<none><line_sep>_CWD=os.getcwd()<line_sep>_OUTPUT="output"<line_sep>_OUTPUT_HDFS=<none><line_sep>_OUTPUT_BASE="/mrgeo/test-files/output/"<line_sep>_INPUT="testFiles"<line_sep>_INPUT_HDFS=<none><line_sep>_INPUT_BASE="/mrgeo/test-files/"<line_sep>inputdir=<none><line_sep>inputhdfs=<none><line_sep>outputdir=<none><line_sep>outputhdfs=<none><def_stmt>compareraster self raster testname nodata=-9999<block_start><if_stmt>self.GENERATE_BASELINE_DATA<block_start>self.saveraster(raster testname nodata)<block_end><else_stmt># jvm = self.gateway.jvm # test = raster.mapop.toDataset(False) <block_start>testimage=self.outputdir+testname<line_sep>raster.export(testimage singleFile=<true> format="tiff" overridenodata=nodata)<line_sep>testimage<augadd>".tif"<line_sep>test=gdal.Open(testimage)<line_sep>golden=gdal.Open(self.inputdir+testname+".tif")<line_sep># compare as GDAL Datasets. gdaltest.compare_db(self golden test)<line_sep>os.remove(testimage)<block_end><block_end><def_stmt>comparelocalraster self testname<block_start><if_stmt><not>self.GENERATE_BASELINE_DATA<block_start>golden=gdal.Open(self.inputdir+testname+".tif")<line_sep>test=gdal.Open(self.outputdir+testname+".tif")<line_sep># compare as GDAL Datasets. gdaltest.compare_db(self golden test)<block_end><block_end><def_stmt>saveraster self raster testname nodata=-9999<block_start>name=self.inputdir+testname<line_sep>raster.export(name singleFile=<true> format="tiff" overridenodata=nodata)<block_end><def_stmt>savevector self vector testname<block_start>name=self.inputdir+testname+".tsv"<line_sep>vector.save(name)<block_end><def_stmt>comparevector self vector testname<block_start><if_stmt>self.GENERATE_BASELINE_DATA<block_start>self.savevector(vector str(testname))<block_end><else_stmt><block_start>jvm=self.mrgeo._get_jvm()<line_sep># test = raster.mapop.toDataset(False) java_import(jvm "org.mrgeo.hdfs.vector.DelimitedVectorReader")<line_sep>testvector=str(self.outputhdfs+testname+".tsv")<line_sep>vector.ssave(testvector)<line_sep>expectedvector=str(self.inputdir+testname+".tsv")<line_sep>vdp_expected=jvm.DataProviderFactory.getVectorDataProvider(expectedvector jvm.DataProviderFactory.AccessMode.READ jvm.HadoopUtils.createConfiguration())<line_sep>expected_geom_reader=vdp_expected.getVectorReader().get()<line_sep>vdp=jvm.DataProviderFactory.getVectorDataProvider(testvector jvm.DataProviderFactory.AccessMode.READ jvm.HadoopUtils.createConfiguration())<line_sep>self.assertTrue(vdp<is><not><none>)<line_sep>vector_reader=vdp.getVectorReader()<line_sep>self.assertTrue(vector_reader<is><not><none>)<line_sep>self.assertTrue(is_instance_of(self.mrgeo.gateway vector_reader jvm.DelimitedVectorReader))<line_sep>self.assertEquals(vdp_expected.getVectorReader().count() vector_reader.count())<line_sep>geom_reader=vector_reader.get()<line_sep>self.assertTrue(geom_reader<is><not><none>)<while_stmt>expected_geom_reader.hasNext()<block_start>expected_geom=expected_geom_reader.next()<line_sep>geom=geom_reader.next()<line_sep>self.assertTrue(geom<is><not><none>)<line_sep>self.assertEquals(expected_geom.type() geom.type())<line_sep>self.assertAlmostEquals(float(expected_geom.getAttribute("COST_S")) float(geom.getAttribute("COST_S")) delta=0.001)<line_sep>self.assertAlmostEquals(float(expected_geom.getAttribute("DISTANCE_M")) float(geom.getAttribute("DISTANCE_M")) delta=0.001)<line_sep>self.assertAlmostEquals(float(expected_geom.getAttribute("MINSPEED_MPS")) float(geom.getAttribute("MINSPEED_MPS")) delta=0.001)<line_sep>self.assertAlmostEquals(float(expected_geom.getAttribute("MAXSPEED_MPS")) float(geom.getAttribute("MAXSPEED_MPS")) delta=0.001)<line_sep>self.assertAlmostEquals(float(expected_geom.getAttribute("AVGSPEED_MPS")) float(geom.getAttribute("AVGSPEED_MPS")) delta=0.001)<block_end># Should not be any more geometries in the actual output self.assertFalse(geom_reader.hasNext())<line_sep>jvm.HadoopFileUtils.delete(testvector)<block_end><block_end>@classmethod<def_stmt>copy cls srcfile srcpath=<none> dstpath=<none> dstfile=<none><block_start>jvm=cls.mrgeo._get_jvm()<line_sep>java_import(jvm "org.mrgeo.hdfs.utils.HadoopFileUtils")<line_sep>java_import(jvm "org.apache.hadoop.fs.Path")<if_stmt>srcpath<is><not><none><block_start>src=srcpath<if_stmt><not>src.endswith('/')<block_start>src<augadd>'/'<block_end>src<augadd>srcfile<block_end><else_stmt><block_start>src=srcfile<block_end><if_stmt><not>os.path.exists(src)<block_start><if_stmt>os.path.exists(cls.inputdir+src)<block_start>src=cls.inputdir+src<block_end><block_end><if_stmt><not>os.path.exists(src)<block_start><raise>Exception("Source ("+src+") is not a file or directory")<block_end><if_stmt>dstfile<is><not><none><block_start>dst=dstfile<if_stmt><not>dst.endswith('/')<block_start>dst<augadd>'/'<block_end>dst<augadd>dstfile<if_stmt><not>os.path.isfile(src)<block_start><raise>Exception("Source ("+src+") is must be a file")<block_end><if_stmt>jvm.HadoopFileUtils.exists(dst)<block_start>jvm.HadoopFileUtils.delete(dst)<block_end>jvm.HadoopFileUtils.copyFileToHdfs(src dst)<line_sep><return>dst<block_end><elif_stmt>dstpath<is><not><none><block_start>dst=dstpath<block_end><else_stmt><block_start>dst=cls.inputhdfs<block_end>basefile=os.path.basename(src)<line_sep>dstfile=dst+basefile<if_stmt>jvm.HadoopFileUtils.exists(dstfile)<block_start>jvm.HadoopFileUtils.delete(dstfile)<block_end>jvm.HadoopFileUtils.copyToHdfs(src dst)<line_sep><return>dstfile<block_end>@classmethod<def_stmt>setUpClass cls<block_start>cls.classname=cls.__name__<line_sep># print(cls.classname + " setup") cls.mrgeo=MrGeo()<line_sep># cls.mrgeo = MrGeo(host="localhost", port=12345) # already running, remote mrgeo jvm=cls.mrgeo._get_jvm()<line_sep>java_import(jvm "org.apache.hadoop.conf.Configuration")<line_sep>java_import(jvm "org.apache.hadoop.fs.Path")<line_sep>java_import(jvm "org.mrgeo.data.DataProviderFactory")<line_sep>java_import(jvm "org.mrgeo.data.vector.VectorDataProvider")<line_sep>java_import(jvm "org.mrgeo.data.vector.VectorReader")<line_sep>java_import(jvm "org.mrgeo.hdfs.vector.DelimitedVectorReader")<line_sep>fs=jvm.HadoopFileUtils.getFileSystem()<line_sep>p=jvm.Path(cls._INPUT_BASE).makeQualified(fs)<line_sep>cls._INPUT_HDFS=p<line_sep>p=jvm.Path(cls._OUTPUT_BASE).makeQualified(fs)<line_sep>cls._OUTPUT_HDFS=p<line_sep>basedir=os.getenv('BASEDIR' '.')<line_sep>dirname=os.path.abspath(basedir)<try_stmt><block_start><while_stmt><true><block_start>names=os.listdir(dirname)<if_stmt>cls._INPUT<in>names<block_start><break><block_end>dirname=os.path.abspath(os.path.join(dirname os.pardir))<block_end><block_end><except_stmt>OSError<block_start><pass><block_end>basedir=os.path.abspath(dirname)<line_sep>cls.inputdir=os.path.abspath(basedir+'/'+cls._INPUT+"/"+cls.classname)+'/'<line_sep>cls.outputdir=os.path.abspath(basedir+'/'+cls._INPUT+'/'+cls._OUTPUT+"/"+cls.classname)+'/'<line_sep>cls.inputhdfs=jvm.Path(cls._INPUT_HDFS "python/"+cls.classname).makeQualified(fs).toString()+'/'<line_sep>cls.outputhdfs=jvm.Path(cls._OUTPUT_HDFS "python/"+cls.classname).makeQualified(fs).toString()+'/'<if_stmt><not>os.path.exists(cls.inputdir)<block_start>os.makedirs(cls.inputdir)<block_end><if_stmt>os.path.exists(cls.outputdir)<block_start>shutil.rmtree(cls.outputdir ignore_errors=<true>)<block_end><if_stmt><not>os.path.exists(cls.outputdir)<block_start>os.makedirs(cls.outputdir)<block_end>jvm.HadoopFileUtils.create(cls.inputhdfs)<if_stmt>jvm.HadoopFileUtils.exists(cls.outputhdfs)<block_start>jvm.HadoopFileUtils.cleanDirectory(cls.outputhdfs)<block_end>jvm.HadoopFileUtils.create(cls.outputhdfs)<line_sep>jvm.MrGeoProperties.getInstance().setProperty(jvm.MrGeoConstants.MRGEO_HDFS_IMAGE cls.inputhdfs)<line_sep>jvm.MrGeoProperties.getInstance().setProperty(jvm.MrGeoConstants.MRGEO_HDFS_VECTOR cls.inputhdfs)<line_sep>jvm.LoggingUtils.setDefaultLogLevel(jvm.LoggingUtils.ERROR)<block_end>@classmethod<def_stmt>tearDownClass cls<block_start>cls.mrgeo.disconnect()<block_end><def_stmt>setUp self<block_start>self.name=self._testMethodName<line_sep>self._doublebox("Starting" self.classname+":"+self.name)<line_sep>self.mrgeo.usedebug()<line_sep>self.mrgeo.start()<line_sep>jvm=self.mrgeo._get_jvm()<line_sep>jvm.MrGeoProperties.getInstance().setProperty(jvm.MrGeoConstants.MRGEO_HDFS_IMAGE self.inputhdfs)<line_sep>jvm.MrGeoProperties.getInstance().setProperty(jvm.MrGeoConstants.MRGEO_HDFS_VECTOR self.inputhdfs)<block_end><def_stmt>tearDown self<block_start>self.mrgeo.stop()<line_sep>self._doublebox("Test Finished" self.classname+":"+self.name)<block_end><def_stmt>debug_logging self<block_start>jvm=self.mrgeo._get_jvm()<line_sep>jvm.LoggingUtils.setDefaultLogLevel(jvm.LoggingUtils.DEBUG)<block_end><def_stmt>info_logging self<block_start>jvm=self.mrgeo._get_jvm()<line_sep>jvm.LoggingUtils.setDefaultLogLevel(jvm.LoggingUtils.INFO)<block_end><def_stmt>warn_logging self<block_start>jvm=self.mrgeo._get_jvm()<line_sep>jvm.LoggingUtils.setDefaultLogLevel(jvm.LoggingUtils.WARN)<block_end><def_stmt>error_logging self<block_start>jvm=self.mrgeo._get_jvm()<line_sep>jvm.LoggingUtils.setDefaultLogLevel(jvm.LoggingUtils.ERROR)<block_end>@staticmethod<def_stmt>_doublebox text name<block_start>sys.stdout.flush()<line_sep>width=len(name)<if_stmt>width<l>len(text)<block_start>width=len(text)<block_end>fmt="{:*<"+str(width+4)+"}"<line_sep>print(fmt.format(""))<line_sep>fmt="{:<"+str(width+2)+"}"<line_sep>print(fmt.format("*")+" *")<line_sep>fmt="{:<"+str(width)+"}"<line_sep>print("* "+fmt.format(text)+" *")<line_sep>fmt="{:<"+str(width+2)+"}"<line_sep>print(fmt.format("*")+" *")<line_sep>fmt="{:*<"+str(width+4)+"}"<line_sep>print(fmt.format(""))<line_sep>fmt="{:<"+str(width)+"}"<line_sep>print("* "+fmt.format(name)+" *")<line_sep>fmt="{:*<"+str(width+4)+"}"<line_sep>print(fmt.format(""))<line_sep>print("")<line_sep>sys.stdout.flush()<block_end><block_end><class_stmt>VectorTestExpectation<block_start><def_stmt>__init__ self cost distance minSpeed maxSpeed avgSpeed<block_start>self.cost=cost<line_sep>self.distance=distance<line_sep>self.minSpeed=minSpeed<line_sep>self.maxSpeed=maxSpeed<line_sep>self.avgSpeed=avgSpeed<block_end><block_end>
<import_from_stmt>django.apps AppConfig<class_stmt>FastrunnerConfig(AppConfig)<block_start>name='fastrunner'<block_end>
<import_from_stmt>typing Dict<import_from_stmt>botocore.paginate Paginator<class_stmt>ListApps(Paginator)<block_start><def_stmt>paginate self PaginationConfig:Dict=<none><arrow>Dict<block_start><pass><block_end><block_end><class_stmt>ListBranches(Paginator)<block_start><def_stmt>paginate self appId:str PaginationConfig:Dict=<none><arrow>Dict<block_start><pass><block_end><block_end><class_stmt>ListDomainAssociations(Paginator)<block_start><def_stmt>paginate self appId:str PaginationConfig:Dict=<none><arrow>Dict<block_start><pass><block_end><block_end><class_stmt>ListJobs(Paginator)<block_start><def_stmt>paginate self appId:str branchName:str PaginationConfig:Dict=<none><arrow>Dict<block_start><pass><block_end><block_end>
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. <import_stmt>struct<import_stmt>json<import_from_stmt>oci.encryption.internal.utils convert_to_bytes convert_to_str convert_bytes_to_base64_encoded_string convert_base64_encoded_string_to_bytes <import_from_stmt>oci.encryption.internal.models EncryptedDataHeader EncryptedDataHeaderDataEncryptionKey <line_sep>SERIALIZATION_FORMAT_VERSION=0x0001<line_sep>METADATA_KEY_ENCRYPTED_CONTENT_FORMAT="encryptedContentFormat"<line_sep>METADATA_KEY_ENCRYPTED_DATA_KEYS="encryptedDataKeys"<line_sep>METADATA_KEY_IV="iv"<line_sep>METADATA_KEY_ALGORITHM_ID="algorithmId"<line_sep>METADATA_KEY_ADDITIONAL_AUTHENTICATED_DATA="additionalAuthenticatedData"<line_sep>ENCRYPTED_DATA_KEY_MASTER_KEY_ID="masterKeyId"<line_sep>ENCRYPTED_DATA_KEY_VAULT_ID="vaultId"<line_sep>ENCRYPTED_DATA_KEY_ENCRYPTED_DATA_KEY="encryptedDataKey"<line_sep>ENCRYPTED_DATA_KEY_REGION="region"<line_sep># docs: https://docs.python.org/3.8/library/struct.html STRUCT_HEADER_FORMAT=(">"# big endian <concat>"H"# serialization format version ID <concat>"I"# JSON metadata length <concat>"{json_metadata_length}s"# JSON metadata )<def_stmt>serialize_header encrypted_data_header<block_start>encrypted_data_keys=[]<for_stmt>encrypted_data_key encrypted_data_header.encrypted_data_keys<block_start>encrypted_data_keys.append({ENCRYPTED_DATA_KEY_MASTER_KEY_ID:encrypted_data_key.master_key_id ENCRYPTED_DATA_KEY_VAULT_ID:encrypted_data_key.vault_id ENCRYPTED_DATA_KEY_ENCRYPTED_DATA_KEY:convert_bytes_to_base64_encoded_string(encrypted_data_key.encrypted_data_key_bytes) ENCRYPTED_DATA_KEY_REGION:encrypted_data_key.region })<block_end>metadata={METADATA_KEY_ENCRYPTED_CONTENT_FORMAT:encrypted_data_header.encrypted_content_format METADATA_KEY_ENCRYPTED_DATA_KEYS:encrypted_data_keys METADATA_KEY_IV:convert_bytes_to_base64_encoded_string(encrypted_data_header.iv_bytes) METADATA_KEY_ALGORITHM_ID:encrypted_data_header.algorithm_id METADATA_KEY_ADDITIONAL_AUTHENTICATED_DATA:convert_to_str(encrypted_data_header.additional_authenticated_data_bytes) }<line_sep>json_header_as_string=json.dumps(metadata)<line_sep>header_format=STRUCT_HEADER_FORMAT.format(json_metadata_length=len(json_header_as_string))<line_sep>packed_header=struct.pack(header_format SERIALIZATION_FORMAT_VERSION len(json_header_as_string) convert_to_bytes(json_header_as_string) )<line_sep><return>packed_header<block_end><def_stmt>deserialize_header_from_stream ciphertext_stream<block_start>short_format=">H"<line_sep>short_size_offset=struct.calcsize(short_format)<line_sep>unsigned_int_format=">I"<line_sep>unsigned_int_size_offset=struct.calcsize(unsigned_int_format)<line_sep>offset=0<line_sep># get serialization format version next_content=ciphertext_stream.read(short_size_offset)<line_sep>(serialization_format_version )=struct.unpack_from(short_format next_content offset)<line_sep>offset=offset+short_size_offset<if_stmt>serialization_format_version<ne>SERIALIZATION_FORMAT_VERSION<block_start><raise>ValueError("Could not deserialize header with unrecognized serialization format version: {}".format(serialization_format_version))<block_end># get json metadata length next_content=ciphertext_stream.read(unsigned_int_size_offset)<line_sep>(json_metadata_length )=struct.unpack_from(unsigned_int_format next_content)<line_sep>offset=offset+short_size_offset<line_sep># get json metadata chunk_format="{}s".format(json_metadata_length)<line_sep>next_content=ciphertext_stream.read(struct.calcsize(chunk_format))<line_sep>(json_metadata_bytes )=struct.unpack_from(chunk_format next_content)<line_sep>offset=offset+struct.calcsize(chunk_format)<line_sep>json_metadata=convert_to_str(json_metadata_bytes)<try_stmt><block_start>metadata=json.loads(json_metadata)<block_end><except_stmt>ValueError<as>e<block_start><raise>ValueError("Could not parse metadata inside header. Error: {}".format(str(e)))<block_end>required_top_level_keys=[METADATA_KEY_IV METADATA_KEY_ALGORITHM_ID METADATA_KEY_ADDITIONAL_AUTHENTICATED_DATA ]<line_sep>required_encrypted_data_key_keys=[ENCRYPTED_DATA_KEY_MASTER_KEY_ID ENCRYPTED_DATA_KEY_VAULT_ID ENCRYPTED_DATA_KEY_ENCRYPTED_DATA_KEY ENCRYPTED_DATA_KEY_REGION ]<line_sep>missing_or_none_top_level_keys=[required_key<for>required_key required_top_level_keys<if>(required_key<not><in>metadata)<or>(metadata.get(required_key <none>)<is><none>)<or>(isinstance(metadata.get(required_key) list)<and>len(metadata.get(required_key))<eq>0)]<if_stmt>missing_or_none_top_level_keys<block_start><raise>ValueError("Invalid header. The following metadata keys must be present and not null: {}.".format(", ".join(missing_or_none_top_level_keys)))<block_end>encrypted_data_keys_raw=metadata.get(METADATA_KEY_ENCRYPTED_DATA_KEYS)<line_sep>encrypted_data_keys=[]<for_stmt>encrypted_data_key_raw encrypted_data_keys_raw<block_start>missing_or_none_dek_keys=[required_key<for>required_key required_encrypted_data_key_keys<if>(required_key<not><in>encrypted_data_key_raw)<or>(encrypted_data_key_raw.get(required_key <none>)<is><none>)]<if_stmt>missing_or_none_dek_keys<block_start><raise>ValueError("Invalid header. The following metadata keys must be present and not null in each encrypted data key: {}.".format(", ".join(missing_or_none_dek_keys)))<block_end>encrypted_data_keys.append(EncryptedDataHeaderDataEncryptionKey(master_key_id=encrypted_data_key_raw.get(ENCRYPTED_DATA_KEY_MASTER_KEY_ID) vault_id=encrypted_data_key_raw.get(ENCRYPTED_DATA_KEY_VAULT_ID) encrypted_data_key_bytes=convert_base64_encoded_string_to_bytes(encrypted_data_key_raw.get(ENCRYPTED_DATA_KEY_ENCRYPTED_DATA_KEY)) region=encrypted_data_key_raw.get(ENCRYPTED_DATA_KEY_REGION) ))<block_end>header=EncryptedDataHeader(encrypted_data_keys=encrypted_data_keys iv_bytes=convert_base64_encoded_string_to_bytes(metadata.get(METADATA_KEY_IV)) algorithm_id=metadata.get(METADATA_KEY_ALGORITHM_ID) additional_authenticated_data_bytes=convert_to_bytes(metadata.get(METADATA_KEY_ADDITIONAL_AUTHENTICATED_DATA)) )<line_sep><return>header<block_end>
<import_from_stmt>functools partial<def_stmt>run_in_executor executor loop func *args **kwargs<block_start><if_stmt>kwargs<block_start>func=partial(func **kwargs)<block_end><return>loop.run_in_executor(executor func *args)<block_end>
<import_stmt>scipy.io<import_stmt>numpy<as>np<import_stmt>os sys pdb pickle<line_sep>######## Mask-RCNN keypoint order ######## # % 1: nose # % 2: left eye # % 3: right eye # % 4: left ear # % 5: right ear # % 6: left shoulder # % 7: right shoulder # % 8: left elbow # % 9: right elbow # % 10: left wrist # % 11: right wrist # % 12: left hip # % 13: right hip # % 14: left knee # % 15: right knee # % 16: left ankle # % 17: right ankle ######## OpenPose keypoint order ######## # MSCOCO Pose part_str = [nose, neck, Rsho, Relb, Rwri, Lsho, Lelb, Lwri, Rhip, Rkne, Rank, Lhip, Lkne, Lank, Leye, Reye, Lear, Rear, pt19] keyNum=18<line_sep>openPose_maskRCNN_trans_dic={0:0 1:<none> 2:6 3:8 4:10 5:5 6:7 7:9 8:12 9:14 10:16 11:11 12:13 13:15 14:1 15:2 16:3 17:4}<def_stmt>mat2dic img_dir pose_mat_path<block_start>pose_mat=scipy.io.loadmat(pose_mat_path)['joint2d']<line_sep>N,_,_=pose_mat.shape<line_sep>img_name_list=sorted(os.listdir(img_dir))<assert_stmt>N<eq>len(img_name_list) 'number of pose and img are different'<line_sep>pose_dic={}<for_stmt>idx,img_name enumerate(img_name_list)<block_start>crs=pose_mat[idx : :]<line_sep>RCV=np.zeros([keyNum 3])<for_stmt>k range(keyNum)<block_start>k_idx=openPose_maskRCNN_trans_dic[k]<if_stmt>k_idx<is><not><none><block_start>c,r=crs[: k_idx]<if_stmt><not>(0<eq>c<and>0<eq>r)<block_start>RCV[k 0]=r<line_sep>RCV[k 1]=c<line_sep>RCV[k 2]=1## 1 means visible, 0 means invisible <block_end><block_end>## Makeup neck keypoint with leftShoulder and rightShoulder r0,c0,v0=RCV[2 :]<line_sep>r1,c1,v1=RCV[5 :]<if_stmt>v0<and>v1<block_start>RCV[1 0]=(r0+r1)/2<line_sep>RCV[1 1]=(c0+c1)/2<line_sep>RCV[1 2]=1<block_end><block_end>pose_dic[img_name]=RCV<block_end>save_path=os.path.join(os.path.dirname(pose_mat_path) os.path.basename(pose_mat_path).split('_')[-1].replace('.mat' '.pickle'))<with_stmt>open(save_path 'w')<as>f<block_start>pickle.dump(pose_dic f)<block_end><block_end>img_dir=''<line_sep>pose_mat_path=''<line_sep>mat2dic(img_dir pose_mat_path)<line_sep>
<def_stmt>next0 A n x<block_start><while_stmt>x<l>n<and>A[x]<ne>0<block_start>x<augadd>1<block_end><return>x<block_end>n=int(input())<line_sep>A=[int(j)<for>j input().split()]<line_sep>b=0<for_stmt>i range(n)<block_start><if_stmt>A[i]<eq>1<block_start>b=next0(A n max(b i))<if_stmt>b<eq>n<block_start><break><block_end>A[i],A[b]=A[b] A[i]<block_end><block_end><for_stmt>i A<block_start>print(i end=" ")<block_end>
<import_stmt>os<import_stmt>numpy<as>np<def_stmt>initialize_pyrngs <block_start><import_from_stmt>gslrandom PyRNG get_omp_num_threads<if_stmt>"OMP_NUM_THREADS"<in>os.environ<block_start>num_threads=os.environ["OMP_NUM_THREADS"]<block_end><else_stmt><block_start>num_threads=get_omp_num_threads()<block_end><assert_stmt>num_threads<g>0<line_sep># Choose random seeds seeds=np.random.randint(2<power>16 size=num_threads)<line_sep><return>[PyRNG(seed)<for>seed seeds]<block_end><def_stmt>convert_discrete_to_continuous S dt# Convert S to continuous time <block_start><import_from_stmt>pybasicbayes.util.general ibincount<line_sep>T=S.shape[0]<times>dt<line_sep>S_ct=dt<times>np.concatenate([ibincount(Sk)<for>Sk S.T]).astype(float)<line_sep>S_ct<augadd>dt<times>np.random.rand(*S_ct.shape)<assert_stmt>np.all(S_ct<l>T)<line_sep>C_ct=np.concatenate([k<times>np.ones(Sk.sum())<for>k,Sk enumerate(S.T)]).astype(int)<line_sep># Sort the data perm=np.argsort(S_ct)<line_sep>S_ct=S_ct[perm]<line_sep>C_ct=C_ct[perm]<line_sep><return>S_ct C_ct T<block_end><def_stmt>convert_continuous_to_discrete S C dt T_min T_max<block_start>bins=np.arange(T_min T_max dt)<if_stmt>bins[-1]<ne>T_max<block_start>bins=np.hstack((bins [T_max]))<block_end>T=bins.size-1<line_sep>K=C.max()+1<line_sep>S_dt=np.zeros((T K))<for_stmt>k range(K)<block_start>S_dt[: k]=np.histogram(S[C<eq>k] bins)[0]<block_end><assert_stmt>S_dt.sum()<eq>len(S)<line_sep><return>S_dt.astype(np.int)<block_end><def_stmt>get_unique_file_name filedir filename<block_start>""" Get a unique filename by appending filename with .x, where x is the next untaken number """<import_stmt>fnmatch<line_sep># Get the number of conflicting log files fnames=os.listdir(filedir)<line_sep>conflicts=fnmatch.filter(fnames "%s*"%filename)<line_sep>nconflicts=len(conflicts)<if_stmt>nconflicts<g>0<block_start>unique_name="%s.%d"%(filename nconflicts+1)<block_end><else_stmt><block_start>unique_name=filename<block_end><return>unique_name<block_end><def_stmt>logistic x lam_max=1.0<block_start><return>lam_max<times>1.0/(1.0+np.exp(-x))<block_end><def_stmt>logit x lam_max=1.0<block_start><return>np.log(x/lam_max)-np.log(1-(x/lam_max))<block_end><def_stmt>sample_nig mu0 lmbda0 alpha0 beta0<block_start>mu0,lmbda0,alpha0,beta0=np.broadcast_arrays(mu0 lmbda0 alpha0 beta0)<line_sep>shp=mu0.shape<assert_stmt>lmbda0.shape<eq>alpha0.shape<eq>beta0.shape<eq>shp<line_sep>tau=np.array(np.random.gamma(alpha0 1./beta0)).reshape(shp)<line_sep>mu=np.array(np.random.normal(mu0 np.sqrt(1./(lmbda0<times>tau)))).reshape(shp)<line_sep><return>mu tau<block_end>
<import_from_stmt>dataviva db<import_from_stmt>dataviva.utils.auto_serialize AutoSerialize<import_from_stmt>dataviva.api.attrs.models Bra Course_sc School<class_stmt>Sc(db.Model AutoSerialize)<block_start>__abstract__=<true><line_sep>year=db.Column(db.Integer(4) primary_key=<true>)<line_sep>age=db.Column(db.Float())<line_sep>classes=db.Column(db.Integer(11))<line_sep>enrolled=db.Column(db.Integer(11))<line_sep>enrolled_growth=db.Column(db.Float())<line_sep>enrolled_growth_5=db.Column(db.Float())<block_end><class_stmt>Yb_sc(Sc)<block_start>__tablename__='sc_yb'<line_sep>bra_id=db.Column(db.String(9) db.ForeignKey(Bra.id) primary_key=<true>)<line_sep>num_schools=db.Column(db.Integer(11))<line_sep>bra_id_len=db.Column(db.Integer(1))<def_stmt>__repr__ self<block_start><return>'<Yb {0}.{1}>'.format(self.year self.bra_id)<block_end><block_end><class_stmt>Ys(Sc)<block_start>__tablename__='sc_ys'<line_sep>school_id=db.Column(db.String(8) db.ForeignKey(School.id) primary_key=<true>)<def_stmt>__repr__ self<block_start><return>'<Ys %d.%s>'%(self.year self.school_id)<block_end><block_end><class_stmt>Ybs(Sc)<block_start>__tablename__='sc_ybs'<line_sep>bra_id=db.Column(db.String(9) db.ForeignKey(Bra.id) primary_key=<true>)<line_sep>school_id=db.Column(db.String(8) db.ForeignKey(School.id) primary_key=<true>)<line_sep>bra_id_len=db.Column(db.Integer(1))<def_stmt>__repr__ self<block_start><return>'<Ybs %d.%s.%s>'%(self.year self.bra_id self.school_id)<block_end><block_end><class_stmt>Ybc_sc(Sc)<block_start>__tablename__='sc_ybc'<line_sep>bra_id=db.Column(db.String(9) db.ForeignKey(Bra.id) primary_key=<true>)<line_sep>course_sc_id=db.Column(db.String(5) db.ForeignKey(Course_sc.id) primary_key=<true>)<line_sep>bra_id_len=db.Column(db.Integer(1))<line_sep>course_sc_id_len=db.Column(db.Integer(1))<def_stmt>__repr__ self<block_start><return>'<Ybc %d.%s.%s>'%(self.year self.bra_id self.course_sc_id)<block_end><block_end><class_stmt>Yc_sc(Sc)<block_start>__tablename__='sc_yc'<line_sep>course_sc_id=db.Column(db.String(5) db.ForeignKey(Course_sc.id) primary_key=<true>)<line_sep>course_sc_id_len=db.Column(db.Integer(1))<def_stmt>__repr__ self<block_start><return>'<Ybc %d.%s>'%(self.year self.course_sc_id)<block_end><block_end><class_stmt>Ysc(Sc)<block_start>__tablename__='sc_ysc'<line_sep>school_id=db.Column(db.String(8) db.ForeignKey(School.id) primary_key=<true>)<line_sep>course_sc_id=db.Column(db.String(5) db.ForeignKey(Course_sc.id) primary_key=<true>)<line_sep>course_sc_id_len=db.Column(db.Integer(1))<def_stmt>__repr__ self<block_start><return>'<Ysc %d.%s>'%(self.year self.school_id)<block_end><block_end><class_stmt>Ybsc(Sc)<block_start>__tablename__='sc_ybsc'<line_sep>bra_id=db.Column(db.String(9) db.ForeignKey(Bra.id) primary_key=<true>)<line_sep>school_id=db.Column(db.String(8) db.ForeignKey(School.id) primary_key=<true>)<line_sep>course_sc_id=db.Column(db.String(5) db.ForeignKey(Course_sc.id) primary_key=<true>)<line_sep>course_sc_id_len=db.Column(db.Integer(1))<line_sep>bra_id_len=db.Column(db.Integer(1))<def_stmt>__repr__ self<block_start><return>'<Ybsc %d.%s.%s.%s>'%(self.year self.bra_id self.school_id self.course_sc_id)<block_end><block_end>
<import_stmt>time random<line_sep>MAX_N=200010<def_stmt>naiveMatching T P<block_start>n=len(T)<line_sep>m=len(P)<line_sep>freq=0<for_stmt>i range(n)<block_start>found=<true><for_stmt>j range(m)<block_start><if_stmt><not>found<block_start><break><block_end><if_stmt>i+j<ge>n<or>P[j]<ne>T[i+j]<block_start>found=<false><block_end><block_end><if_stmt>found<block_start>freq<augadd>1<block_end><block_end><return>freq<block_end>b=[0]<times>MAX_N<def_stmt>kmpPreprocess P<block_start><global>b<line_sep>m=len(P)<line_sep>i,j=0 -1<line_sep>b[0]=-1<while_stmt>i<l>m<block_start><while_stmt>j<ge>0<and>P[i]<ne>P[j]<block_start>j=b[j]<block_end>i<augadd>1<line_sep>j<augadd>1<line_sep>b[i]=j<block_end><block_end><def_stmt>kmpSearch T P<block_start><global>b<line_sep>n=len(T)<line_sep>m=len(P)<line_sep>freq=0<line_sep>i,j=0 0<while_stmt>i<l>n<block_start><while_stmt>j<ge>0<and>T[i]<ne>P[j]<block_start>j=b[j]<block_end>i<augadd>1<line_sep>j<augadd>1<if_stmt>j<eq>m<block_start>freq<augadd>1<line_sep>j=b[j]<block_end><block_end><return>freq<block_end>p=131<line_sep>M=10<power>9+7<line_sep>Pow=[0]<times>MAX_N<line_sep>h=[0]<times>MAX_N<def_stmt>computeRollingHash T<block_start>n=len(T)<line_sep>Pow[0]=1<for_stmt>i range(1 n)<block_start>Pow[i]=(Pow[i-1]<times>p)%M<block_end>h[0]=0<for_stmt>i range(n)<block_start><if_stmt>i<ne>0<block_start>h[i]=h[i-1]<line_sep><block_end>h[i]=(h[i]+(ord(T[i])<times>Pow[i])%M)%M<block_end><block_end><def_stmt>extEuclid a b<block_start>xx,yy=0 1<line_sep>x,y=1 0<while_stmt>b<ne>0<block_start>q=a<floordiv>b<line_sep>a,b=b a%b<line_sep>x,xx=xx x-q<times>xx<line_sep>y,yy=yy y-q<times>yy<block_end><return>a x y<block_end><def_stmt>modInverse b m<block_start>d,x,y=extEuclid(b m)<if_stmt>d<ne>1<block_start><return>-1<block_end><return>(x+m)%m<block_end><def_stmt>hash_fast L R<block_start><if_stmt>L<eq>0<block_start><return>h[R]<block_end>ans=((h[R]-h[L-1])%M+M)%M<line_sep>ans=(ans<times>modInverse(Pow[L] M))%M<line_sep><return>ans<block_end><def_stmt>main <block_start>extreme_limit=100000<line_sep>letters=['A' 'B']<line_sep>T=''.join([random.choice(letters)<for>_ range(extreme_limit-1)])+'B'<line_sep>P=''.join([random.choice(letters)<for>_ range(10)])<line_sep>n=len(T)<line_sep>m=len(P)<line_sep>time.clock()<line_sep>freq=0<line_sep>pos=T.find(P 0)<while_stmt>pos<ne>-1<block_start>freq<augadd>1<line_sep>pos=T.find(P pos+1)<block_end>print('String Library, #match = %d'%freq)<line_sep>print('Runtime =' time.clock() 's')<line_sep>time.clock()<line_sep>print('Naive Matching, #match = %d'%naiveMatching(T P))<line_sep>print('Runtime =' time.clock() 's')<line_sep>time.clock()<line_sep>computeRollingHash(T)<line_sep>hP=0<for_stmt>i range(m)<block_start>hP=(hP+ord(P[i])<times>Pow[i])%M<block_end>freq=0<for_stmt>i range(n-m+1)<block_start><if_stmt>hash_fast(i i+m-1)<eq>hP<block_start>freq<augadd>1<block_end><block_end>print('Rabin-Karp, #match = %d'%freq)<line_sep>print('Runtime =' time.clock() 's')<line_sep>time.clock()<line_sep>kmpPreprocess(P)<line_sep>print('Knuth-Morris-Pratt, #match = %d'%kmpSearch(T P))<line_sep>print('Runtime =' time.clock() 's')<block_end>main()<line_sep>
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. <import_from_stmt>.build make_optimizer<import_from_stmt>.build make_lr_scheduler<import_from_stmt>.lr_scheduler WarmupMultiStepLR<line_sep>
<import_stmt>struct<import_stmt>sys<line_sep>f_max=sys.float_info.max<line_sep>print(f_max)<line_sep># 1.7976931348623157e+308 print(struct.pack('>d' f_max))<line_sep># b'\x7f\xef\xff\xff\xff\xff\xff\xff' print(type(struct.pack('>d' f_max)))<line_sep># <class 'bytes'> print(struct.pack('<d' f_max))<line_sep># b'\xff\xff\xff\xff\xff\xff\xef\x7f' print(struct.unpack('>Q' struct.pack('>d' f_max)))<line_sep># (9218868437227405311,) print(type(struct.unpack('>Q' struct.pack('>d' f_max))))<line_sep># <class 'tuple'> print(struct.unpack('>Q' struct.pack('>d' f_max))[0])<line_sep># 9218868437227405311 print(type(struct.unpack('>Q' struct.pack('>d' f_max))[0]))<line_sep># <class 'int'> print(struct.unpack('>d' struct.pack('>d' f_max))[0])<line_sep># 1.7976931348623157e+308 print(hex(struct.unpack('>Q' struct.pack('>d' f_max))[0]))<line_sep># 0x7fefffffffffffff print(type(hex(struct.unpack('>Q' struct.pack('>d' f_max))[0])))<line_sep># <class 'str'> <def_stmt>double_to_hex f<block_start><return>hex(struct.unpack('>Q' struct.pack('>d' f))[0])<block_end>print(double_to_hex(f_max))<line_sep># 0x7fefffffffffffff print(double_to_hex(42.195))<line_sep># 0x404518f5c28f5c29 print(double_to_hex(1e500))<line_sep># 0x7ff0000000000000 print(double_to_hex(1e-500))<line_sep># 0x0 print(int(double_to_hex(f_max) 16))<line_sep># 9218868437227405311 print(bin(int(double_to_hex(f_max) 16)))<line_sep># 0b111111111101111111111111111111111111111111111111111111111111111 print(oct(int(double_to_hex(f_max) 16)))<line_sep># 0o777577777777777777777 <def_stmt>double_to_bin f<block_start><return>bin(struct.unpack('>Q' struct.pack('>d' f))[0])<block_end><def_stmt>double_to_oct f<block_start><return>oct(struct.unpack('>Q' struct.pack('>d' f))[0])<block_end>print(double_to_bin(f_max))<line_sep># 0b111111111101111111111111111111111111111111111111111111111111111 print(double_to_oct(f_max))<line_sep># 0o777577777777777777777 <def_stmt>float_to_hex f<block_start><return>hex(struct.unpack('>I' struct.pack('>f' f))[0])<block_end>print(float_to_hex(42.195))<line_sep># 0x4228c7ae
<import_stmt>os<import_stmt>sys<import_stmt>errno<line_sep>sys.path.append('../../common')<import_from_stmt>env_indigo *<line_sep>indigo=Indigo()<line_sep>indigo.setOption("molfile-saving-skip-date" "1")<if_stmt><not>os.path.exists(joinPathPy("out" __file__))<block_start><try_stmt><block_start>os.makedirs(joinPathPy("out" __file__))<block_end><except_stmt>OSError<as>e<block_start><if_stmt>e.errno<ne>errno.EEXIST<block_start><raise><block_end><block_end><block_end>saver=indigo.createFileSaver(joinPathPy("out/rsite.sdf" __file__) "sdf")<line_sep>mol=indigo.loadMolecule("CCNNCN")<line_sep>mol.addRSite("R")<line_sep>mol.addRSite("R")<line_sep>mol.addRSite("R1")<line_sep>mol.addRSite("")<line_sep>a3=mol.addRSite("R3")<line_sep>print(mol.molfile())<line_sep>saver.append(mol)<line_sep>mol.addRSite("R1, R3")<line_sep>print(mol.molfile())<line_sep>saver.append(mol)<line_sep>a3.resetAtom("N")<line_sep>print(mol.molfile())<line_sep>saver.append(mol)<line_sep>a0=mol.getAtom(0)<line_sep>a0.setRSite("R4")<line_sep>print(mol.molfile())<line_sep>saver.append(mol)<line_sep>a1=mol.getAtom(1)<line_sep>a1.resetAtom("O")<line_sep>print(mol.molfile())<line_sep>saver.append(mol)<line_sep>a1.setRSite("R4")<line_sep>a1.highlight()<line_sep>print(mol.molfile())<line_sep>saver.append(mol)<line_sep>mol=indigo.loadMolecule("CCNNCN")<line_sep>print(mol.checkRGroups())<line_sep>mol.addRSite("R1")<line_sep>print(mol.checkRGroups())<line_sep>mol=indigo.loadMolecule(''' Ketcher 12091616232D 1 1.00000 0.00000 0 2 1 0 0 0 999 V2000 13.6750 -5.9750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 14.5410 -6.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 M APO 1 2 1 M END ''')<line_sep>print(mol.checkRGroups())<line_sep>mol=indigo.loadMolecule('''$MDL REV 1 $MOL $HDR $END HDR $CTAB 2 1 0 0 0 999 V2000 13.6750 -5.9750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 14.5410 -6.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 M END $END CTAB $RGP 1 $CTAB 2 1 0 0 0 999 V2000 13.3500 -9.9750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 14.2160 -10.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 M END $END CTAB $END RGP $END MOL ''')<line_sep>print(mol.checkRGroups())<line_sep>mol=indigo.loadMolecule('''$MDL REV 1 0209181741 $MOL $HDR Mrv0541 02091817412D $END HDR $CTAB 6 6 0 0 0 0 999 V2000 0.0000 0.8250 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.7145 0.4125 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0 0.7145 -0.4125 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0 -0.0000 -0.8250 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.7145 -0.4125 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0 -0.7145 0.4125 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 0 2 3 1 0 0 0 0 3 4 1 0 0 0 0 4 5 1 0 0 0 0 5 6 1 0 0 0 0 1 6 1 0 0 0 0 M LOG 1 1 0 0 M LOG 1 2 0 0 M RGP 2 3 2 6 1 M END $END CTAB $RGP 1 $CTAB 1 0 0 0 0 0 999 V2000 3.8966 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 M END $END CTAB $CTAB 1 0 0 0 0 0 999 V2000 6.2538 -2.4750 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0 M END $END CTAB $END RGP $RGP 2 $CTAB 1 0 0 0 0 0 999 V2000 3.8966 -4.9500 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 M END $END CTAB $CTAB 1 0 0 0 0 0 999 V2000 6.2538 -4.9500 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0 M END $END CTAB $END RGP $END MOL ''')<line_sep>print(mol.molfile())<line_sep>print(mol.smiles())<line_sep>mol=indigo.loadMolecule('''$MDL REV 1 $MOL $HDR $END HDR $CTAB 8 8 0 0 0 999 V2000 0.1786 1.3406 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0 0.1786 0.5156 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.8931 0.1031 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.8931 -0.7219 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.1786 -1.1344 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.5359 -0.7219 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.5359 0.1031 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -1.2503 0.5156 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 0 2 3 1 0 0 0 0 3 4 1 0 0 0 0 4 5 1 0 0 0 0 5 6 1 0 0 0 0 6 7 1 0 0 0 0 2 7 1 0 0 0 0 7 8 1 0 0 0 0 M RGP 2 1 1 8 2 M LOG 1 2 1 1 0,1 M END $END CTAB $RGP 2 $CTAB 1 0 0 0 0 999 V2000 4.0752 -5.2594 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0 M END $END CTAB $END RGP $RGP 1 $CTAB 3 2 0 0 0 999 V2000 4.0752 -2.3719 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 4.7897 -2.7844 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 5.5042 -2.3719 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 0 2 3 1 0 0 0 0 M END $END CTAB $END RGP $END MOL ''')<line_sep>print(mol.smiles())<line_sep>mol=indigo.loadMolecule('''$MDL REV 1 0212181244 $MOL $HDR Mrv0541 02121812442D $END HDR $CTAB 4 3 0 0 0 0 999 V2000 0.4125 0.7145 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0 0.0000 -0.0000 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0 0.4125 -0.7145 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0 -0.8250 -0.0000 0.0000 I 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 0 2 3 1 0 0 0 0 2 4 1 0 0 0 0 M LOG 1 1 0 0 M RGP 1 2 1 M END $END CTAB $RGP 1 $CTAB 7 6 0 0 0 0 999 V2000 3.8304 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 4.5448 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 5.2593 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 5.9738 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 5.9738 -3.7125 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 6.6882 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 7.4027 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 0 2 3 1 0 0 0 0 3 4 1 0 0 0 0 4 5 1 0 0 0 0 4 6 1 0 0 0 0 6 7 1 0 0 0 0 M APO 2 5 2 7 1 M END $END CTAB $CTAB 7 6 0 0 0 0 999 V2000 10.7100 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 11.4245 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 12.1390 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 12.8535 -2.8875 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0 12.8535 -3.7125 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 13.5679 -2.4750 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 14.2824 -2.8875 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 0 2 3 1 0 0 0 0 3 4 1 0 0 0 0 4 5 1 0 0 0 0 4 6 1 0 0 0 0 6 7 1 0 0 0 0 M APO 2 5 2 7 1 M END $END CTAB $END RGP $END MOL ''')<line_sep>print(mol.smiles())<line_sep>m=indigo.loadMolecule("C1O[*]CO[*]1 |$;;_R2;;;_R1$,RG:_R1={C},{N},_R2={C},{N}|")<line_sep>print(m.molfile())<line_sep>m=indigo.loadMolecule("[*]C1CCCCC1[*] |$_R1;;;;;;;_R2$,RG:_R1={CCC},_R2={N},LOG={_R1:;;>0._R2:_R1;H;0,1}|")<line_sep>print(m.molfile())<line_sep>m=indigo.loadMolecule("|RG:_R1={CCCCCC}|")<line_sep>print(m.molfile())<line_sep>
""" Testing for (Normalized) DCG metric. """<import_from_stmt>. helpers<import_stmt>itertools<import_stmt>numpy<as>np<import_stmt>pyltr<class_stmt>TestDCG(helpers.TestMetric)<block_start><def_stmt>get_metric self<block_start><return>pyltr.metrics.DCG(k=3)<block_end><def_stmt>get_queries_with_values self<block_start><yield>[] 0.0<line_sep><yield>[0] 0.0<line_sep><yield>[1] 1.0<line_sep><yield>[2] 3.0<line_sep><yield>[2 1 0] 3.6309297535714578<line_sep><yield>[0 0 0] 0.0<line_sep><yield>[2 5 1] 23.058822360715183<line_sep><yield>[2 5 1 9] 23.058822360715183<block_end><def_stmt>get_queries self<block_start><for_stmt>i range(0 5)<block_start><for_stmt>tup itertools.product(*([(0 1 2.5)]<times>i))<block_start><yield>np.array(tup)<block_end><block_end><block_end><block_end><class_stmt>TestNDCG(helpers.TestMetric)<block_start><def_stmt>get_metric self<block_start><return>pyltr.metrics.NDCG(k=3)<block_end><def_stmt>get_queries_with_values self<block_start><yield>[] 0.0<line_sep><yield>[0] 0.0<line_sep><yield>[1] 1.0<line_sep><yield>[2] 1.0<line_sep><yield>[2 1 0] 1.0<line_sep><yield>[1 2 0] 0.7967075809905066<line_sep><yield>[0 0 0] 0.0<line_sep><yield>[2 5 1] 0.6905329824556825<line_sep><yield>[2 5 1 9] 0.04333885914794999<line_sep><yield>[3 2 1 1] 1.0<block_end><def_stmt>get_queries self<block_start><for_stmt>i range(0 5)<block_start><for_stmt>tup itertools.product(*([(0 1 2.5)]<times>i))<block_start><yield>np.array(tup)<block_end><block_end><block_end><block_end>
# theme.py """ """<import_stmt>os re<import_stmt>pygame<import_from_stmt>.const *<import_from_stmt>. widget<import_from_stmt>. surface<import_from_stmt>.basic parse_color is_color<line_sep>__file__=os.path.abspath(__file__)<def_stmt>_list_themes dir<block_start>d={}<for_stmt>entry os.listdir(dir)<block_start><if_stmt>os.path.exists(os.path.join(dir entry 'config.txt'))<block_start>d[entry]=os.path.join(dir entry)<block_end><block_end><return>d<block_end><class_stmt>Theme<block_start>"""Theme interface. If you wish to create your own theme, create a class with this interface, and pass it to gui.App via gui.App(theme=MyTheme()). """<def_stmt>__init__ self dirs='default'<block_start>"""Theme constructor. Keyword arguments: dirs -- Name of the theme dir to load a theme from. May be an absolute path to a theme, if pgu is not installed, or if you created your own theme. May include several dirs in a list if data is spread across several themes. Example: theme = gui.Theme("default") theme = gui.Theme(["mytheme","mytheme2"]) """<line_sep>self.config={}<line_sep>self._loaded=[]<line_sep>self.cache={}<line_sep>self._preload(dirs)<line_sep>pygame.font.init()<block_end><def_stmt>_preload self ds<block_start><if_stmt><not>isinstance(ds list)<block_start>ds=[ds]<block_end><for_stmt>d ds<block_start><if_stmt>d<not><in>self._loaded<block_start>self._load(d)<block_end>self._loaded.append(d)<block_end><block_end><def_stmt>_load self name#theme_dir = themes[name] #try to load the local dir, or absolute path <block_start>dnames=[name]<line_sep>#if the package isn't installed and people are just #trying out the scripts or examples dnames.append(os.path.join(os.path.dirname(__file__) ".." ".." "data" "themes" name))<line_sep>#if the package is installed, and the package is installed #in /usr/lib/python2.3/site-packages/pgu/ #or c:\python23\lib\site-packages\pgu\ #the data is in ... lib/../share/ ... dnames.append(os.path.join(os.path.dirname(__file__) ".." ".." ".." ".." "share" "pgu" "themes" name))<line_sep>dnames.append(os.path.join(os.path.dirname(__file__) ".." ".." ".." ".." ".." "share" "pgu" "themes" name))<line_sep>dnames.append(os.path.join(os.path.dirname(__file__) ".." ".." "share" "pgu" "themes" name))<for_stmt>dname dnames<block_start><if_stmt>os.path.isdir(dname)<block_start><break><block_end><block_end><if_stmt><not>os.path.isdir(dname)<block_start><raise>Exception('could not find theme '+name)<block_end>fname=os.path.join(dname "config.txt")<if_stmt>os.path.isfile(fname)<block_start><try_stmt><block_start>f=open(fname)<for_stmt>line f.readlines()<block_start>args=line.strip().split()<if_stmt>len(args)<l>3<block_start><continue><block_end>pcls=""<line_sep>(cls attr vals)=(args[0] args[1] args[2:])<if_stmt>(":"<in>cls)<block_start>(cls pcls)=cls.split(":")<block_end>self.config[cls pcls attr]=(dname vals)<block_end><block_end><finally_stmt><block_start>f.close()<block_end><block_end>fname=os.path.join(dname "style.ini")<if_stmt>os.path.isfile(fname)<block_start><import_stmt>ConfigParser<line_sep>cfg=ConfigParser.ConfigParser()<line_sep>f=open(fname 'r')<line_sep>cfg.readfp(f)<for_stmt>section cfg.sections()<block_start>cls=section<line_sep>pcls=''<if_stmt>cls.find(":")<ge>0<block_start>cls,pcls=cls.split(":")<block_end><for_stmt>attr cfg.options(section)<block_start>vals=cfg.get(section attr).strip().split()<line_sep>self.config[cls pcls attr]=(dname vals)<block_end><block_end><block_end><block_end>image_extensions=(".gif" ".jpg" ".bmp" ".png" ".tga")<def_stmt>_get self cls pcls attr<block_start>key=(cls pcls attr)<if_stmt><not>key<in>self.config<block_start><return><block_end><if_stmt>key<in>self.cache# This property is already in the cache <block_start><return>self.cache[key]<block_end>(dname vals)=self.config[key]<if_stmt>(os.path.splitext(vals[0].lower())[1]<in>self.image_extensions)# This is an image attribute <block_start>v=pygame.image.load(os.path.join(dname vals[0]))<block_end><elif_stmt>(attr<eq>"color"<or>attr<eq>"background")# This is a color value <block_start>v=parse_color(vals[0])<block_end><elif_stmt>(attr<eq>"font")# This is a font value <block_start>name=vals[0]<line_sep>size=int(vals[1])<if_stmt>(name.endswith(".ttf"))# Load the font from a file <block_start>v=pygame.font.Font(os.path.join(dname name) size)<block_end><else_stmt># Must be a system font <block_start>v=pygame.font.SysFont(name size)<block_end><block_end><else_stmt><block_start><try_stmt><block_start>v=int(vals[0])<block_end><except_stmt><block_start>v=vals[0]<block_end><block_end>self.cache[key]=v<line_sep><return>v<block_end><def_stmt>get self cls pcls attr<block_start>"""Interface method -- get the value of a style attribute. Arguments: cls -- class, for example "checkbox", "button", etc. pcls -- pseudo class, for example "hover", "down", etc. attr -- attribute, for example "image", "background", "font", "color", etc. This method is called from [[gui-style]] """<if_stmt><not>self._loaded# Load the default theme <block_start>self._preload("default")<block_end>o=(cls pcls attr)<line_sep>#if o in self.cache: # return self.cache[o] v=self._get(cls pcls attr)<if_stmt>v#self.cache[o] = v <block_start><return>v<block_end>v=self._get(cls "" attr)<if_stmt>v#self.cache[o] = v <block_start><return>v<block_end>v=self._get("default" "" attr)<if_stmt>v#self.cache[o] = v <block_start><return>v<block_end>self.cache[o]=0<line_sep><return>0<block_end><def_stmt>box self w s<block_start>style=w.style<line_sep>c=(0 0 0)<if_stmt>style.border_color<ne>0<block_start>c=style.border_color<block_end>w,h=s.get_width() s.get_height()<line_sep>s.fill(c (0 0 w style.border_top))<line_sep>s.fill(c (0 h-style.border_bottom w style.border_bottom))<line_sep>s.fill(c (0 0 style.border_left h))<line_sep>s.fill(c (w-style.border_right 0 style.border_right h))<block_end><def_stmt>getspacing self w# return the top, right, bottom, left spacing around the widget <block_start><if_stmt><not>hasattr(w '_spacing')#HACK: assume spacing doesn't change re pcls <block_start>s=w.style<line_sep>xt=s.margin_top+s.border_top+s.padding_top<line_sep>xr=s.padding_right+s.border_right+s.margin_right<line_sep>xb=s.padding_bottom+s.border_bottom+s.margin_bottom<line_sep>xl=s.margin_left+s.border_left+s.padding_left<line_sep>w._spacing=xt xr xb xl<block_end><return>w._spacing<block_end><def_stmt>resize self w m# Returns the rectangle expanded in each direction <block_start><def_stmt>expand_rect rect left top right bottom<block_start><return>pygame.Rect(rect.x-left rect.y-top rect.w+left+right rect.h+top+bottom)<block_end><def_stmt>func width=<none> height=<none><block_start>s=w.style<line_sep>pt,pr,pb,pl=(s.padding_top s.padding_right s.padding_bottom s.padding_left)<line_sep>bt,br,bb,bl=(s.border_top s.border_right s.border_bottom s.border_left)<line_sep>mt,mr,mb,ml=(s.margin_top s.margin_right s.margin_bottom s.margin_left)<line_sep># Calculate the total space on each side top=pt+bt+mt<line_sep>right=pr+br+mr<line_sep>bottom=pb+bb+mb<line_sep>left=pl+bl+ml<line_sep>ttw=left+right<line_sep>tth=top+bottom<line_sep>ww,hh=<none> <none><if_stmt>width<ne><none><block_start>ww=width-ttw<block_end><if_stmt>height<ne><none><block_start>hh=height-tth<block_end>ww,hh=m(ww hh)<if_stmt>width<eq><none><block_start>width=ww<block_end><if_stmt>height<eq><none><block_start>height=hh<block_end>#if the widget hasn't respected the style.width, #style height, we'll add in the space for it... width=max(width-ttw ww w.style.width)<line_sep>height=max(height-tth hh w.style.height)<line_sep>#width = max(ww,w.style.width-tw) #height = max(hh,w.style.height-th) r=pygame.Rect(left top width height)<line_sep>w._rect_padding=expand_rect(r pl pt pr pb)<line_sep>w._rect_border=expand_rect(w._rect_padding bl bt br bb)<line_sep>w._rect_margin=expand_rect(w._rect_border ml mt mr mb)<line_sep># align it within it's zone of power. rect=pygame.Rect(left top ww hh)<line_sep>dx=width-rect.w<line_sep>dy=height-rect.h<line_sep>rect.x<augadd>(w.style.align+1)<times>dx/2<line_sep>rect.y<augadd>(w.style.valign+1)<times>dy/2<line_sep>w._rect_content=rect<line_sep><return>(w._rect_margin.w w._rect_margin.h)<block_end><return>func<block_end><def_stmt>paint self w m<block_start><def_stmt>func s# if w.disabled: # if not hasattr(w,'_disabled_bkgr'): # w._disabled_bkgr = s.convert() # orig = s # s = w._disabled_bkgr.convert() # if not hasattr(w,'_theme_paint_bkgr'): # w._theme_paint_bkgr = s.convert() # else: # s.blit(w._theme_paint_bkgr,(0,0)) # # if w.disabled: # orig = s # s = w._theme_paint_bkgr.convert() <block_start><if_stmt>w.disabled<block_start><if_stmt>(<not>(hasattr(w '_theme_bkgr')<and>w._theme_bkgr.get_width()<eq>s.get_width()<and>w._theme_bkgr.get_height()<eq>s.get_height()))<block_start>w._theme_bkgr=s.copy()<block_end>orig=s<line_sep>s=w._theme_bkgr<line_sep>s.fill((0 0 0 0))<line_sep>s.blit(orig (0 0))<block_end><if_stmt>w.background<block_start>w.background.paint(surface.subsurface(s w._rect_border))<block_end>self.box(w surface.subsurface(s w._rect_border))<line_sep>r=m(surface.subsurface(s w._rect_content))<if_stmt>w.disabled<block_start>s.set_alpha(128)<line_sep>orig.blit(s (0 0))<block_end># if w.disabled: # orig.blit(w._disabled_bkgr,(0,0)) # s.set_alpha(128) # orig.blit(s,(0,0)) w._painted=<true><line_sep><return>r<block_end><return>func<block_end><def_stmt>event self w m<block_start><def_stmt>func e<block_start>rect=w._rect_content<if_stmt>(<not>rect)# This should never be the case, but it sometimes happens that _rect_content isn't # set before a mouse event is received. In this case we'll ignore the event. <block_start><return>m(e)<block_end><if_stmt>e.type<eq>MOUSEBUTTONUP<or>e.type<eq>MOUSEBUTTONDOWN<block_start>sub=pygame.event.Event(e.type {'button':e.button 'pos':(e.pos[0]-rect.x e.pos[1]-rect.y)})<block_end><elif_stmt>e.type<eq>CLICK<block_start>sub=pygame.event.Event(e.type {'button':e.button 'pos':(e.pos[0]-rect.x e.pos[1]-rect.y)})<block_end><elif_stmt>e.type<eq>MOUSEMOTION<block_start>sub=pygame.event.Event(e.type {'buttons':e.buttons 'pos':(e.pos[0]-rect.x e.pos[1]-rect.y) 'rel':e.rel})<block_end><else_stmt><block_start>sub=e<block_end><return>m(sub)<block_end><return>func<block_end><def_stmt>update self w m<block_start><def_stmt>func s<block_start><if_stmt>w.disabled<block_start><return>[]<block_end>r=m(surface.subsurface(s w._rect_content))<if_stmt>type(r)<eq>list<block_start>dx,dy=w._rect_content.topleft<for_stmt>rr r<block_start>rr.x,rr.y=rr.x+dx rr.y+dy<block_end><block_end><return>r<block_end><return>func<block_end><def_stmt>open self w m<block_start><def_stmt>func widget=<none> x=<none> y=<none><block_start><if_stmt><not>hasattr(w '_rect_content')# HACK: so that container.open won't resize again! <block_start>w.rect.w,w.rect.h=w.resize()<block_end>rect=w._rect_content<line_sep>##print w.__class__.__name__, rect <if_stmt>x<ne><none><block_start>x<augadd>rect.x<block_end><if_stmt>y<ne><none><block_start>y<augadd>rect.y<block_end><return>m(widget x y)<block_end><return>func<block_end>#def open(self,w,m): # def func(widget=None): # return m(widget) # return func <def_stmt>decorate self widget level<block_start>"""Interface method -- decorate a widget. The theme system is given the opportunity to decorate a widget methods at the end of the Widget initializer. Arguments: widget -- the widget to be decorated level -- the amount of decoration to do, False for none, True for normal amount, 'app' for special treatment of App objects. """<line_sep>w=widget<if_stmt>level<eq><false><block_start><return><block_end><if_stmt>type(w.style.background)<ne>int<block_start>w.background=Background(w self)<block_end><if_stmt>level<eq>'app'<block_start><return><block_end><for_stmt>k,v list(w.style.__dict__.items())<block_start><if_stmt>k<in>('border' 'margin' 'padding')<block_start><for_stmt>kk ('top' 'bottom' 'left' 'right')<block_start>setattr(w.style '%s_%s'%(k kk) v)<block_end><block_end><block_end>w.paint=self.paint(w w.paint)<line_sep>w.event=self.event(w w.event)<line_sep>w.update=self.update(w w.update)<line_sep>w.resize=self.resize(w w.resize)<line_sep>w.open=self.open(w w.open)<block_end><def_stmt>render self s box r<block_start>"""Interface method - render a special widget feature. Arguments: s -- a pygame surface box -- box data, a value returned from Theme.get, typically a surface r -- pygame.Rect with the size that the box data should be rendered """<if_stmt>box<eq>0<block_start><return><block_end><if_stmt>is_color(box)<block_start>s.fill(box r)<line_sep><return><block_end>x,y,w,h=r.x r.y r.w r.h<line_sep>ww,hh=int(box.get_width()/3) int(box.get_height()/3)<line_sep>xx,yy=x+w y+h<line_sep>src=pygame.rect.Rect(0 0 ww hh)<line_sep>dest=pygame.rect.Rect(0 0 ww hh)<line_sep>s.set_clip(pygame.Rect(x+ww y+hh w-ww<times>2 h-hh<times>2))<line_sep>src.x,src.y=ww hh<for_stmt>dest.y range(y+hh yy-hh hh)<block_start><for_stmt>dest.x range(x+ww xx-ww ww)<block_start>s.blit(box dest src)<block_end><block_end>s.set_clip(pygame.Rect(x+ww y w-ww<times>3 hh))<line_sep>src.x,src.y,dest.y=ww 0 y<for_stmt>dest.x range(x+ww xx-ww<times>2 ww)<block_start>s.blit(box dest src)<block_end>dest.x=xx-ww<times>2<line_sep>s.set_clip(pygame.Rect(x+ww y w-ww<times>2 hh))<line_sep>s.blit(box dest src)<line_sep>s.set_clip(pygame.Rect(x+ww yy-hh w-ww<times>3 hh))<line_sep>src.x,src.y,dest.y=ww hh<times>2 yy-hh<for_stmt>dest.x range(x+ww xx-ww<times>2 ww)<block_start>s.blit(box dest src)<block_end>dest.x=xx-ww<times>2<line_sep>s.set_clip(pygame.Rect(x+ww yy-hh w-ww<times>2 hh))<line_sep>s.blit(box dest src)<line_sep>s.set_clip(pygame.Rect(x y+hh xx h-hh<times>3))<line_sep>src.y,src.x,dest.x=hh 0 x<for_stmt>dest.y range(y+hh yy-hh<times>2 hh)<block_start>s.blit(box dest src)<block_end>dest.y=yy-hh<times>2<line_sep>s.set_clip(pygame.Rect(x y+hh xx h-hh<times>2))<line_sep>s.blit(box dest src)<line_sep>s.set_clip(pygame.Rect(xx-ww y+hh xx h-hh<times>3))<line_sep>src.y,src.x,dest.x=hh ww<times>2 xx-ww<for_stmt>dest.y range(y+hh yy-hh<times>2 hh)<block_start>s.blit(box dest src)<block_end>dest.y=yy-hh<times>2<line_sep>s.set_clip(pygame.Rect(xx-ww y+hh xx h-hh<times>2))<line_sep>s.blit(box dest src)<line_sep>s.set_clip(s.get_rect())<line_sep>src.x,src.y,dest.x,dest.y=0 0 x y<line_sep>s.blit(box dest src)<line_sep>src.x,src.y,dest.x,dest.y=ww<times>2 0 xx-ww y<line_sep>s.blit(box dest src)<line_sep>src.x,src.y,dest.x,dest.y=0 hh<times>2 x yy-hh<line_sep>s.blit(box dest src)<line_sep>src.x,src.y,dest.x,dest.y=ww<times>2 hh<times>2 xx-ww yy-hh<line_sep>s.blit(box dest src)<block_end><block_end><class_stmt>Background(widget.Widget)<block_start><def_stmt>__init__ self value theme **params<block_start>params['decorate']=<false><line_sep>widget.Widget.__init__(self **params)<line_sep>self.value=value<line_sep>self.theme=theme<block_end><def_stmt>paint self s<block_start>r=pygame.Rect(0 0 s.get_width() s.get_height())<line_sep>v=self.value.style.background<if_stmt>is_color(v)<block_start>s.fill(v)<block_end><else_stmt><block_start>self.theme.render(s v r)<block_end><block_end><block_end>
<import_from_future_stmt> absolute_import division unicode_literals<import_from_stmt>itertools product<import_stmt>numpy<as>np<import_stmt>param<import_from_stmt>matplotlib.patches Wedge Circle<import_from_stmt>matplotlib.collections LineCollection PatchCollection<import_from_stmt>...core.data GridInterface<import_from_stmt>...core.util dimension_sanitizer is_nan<import_from_stmt>...core.spaces HoloMap<import_from_stmt>..mixins HeatMapMixin<import_from_stmt>.element ColorbarPlot<import_from_stmt>.raster QuadMeshPlot<import_from_stmt>.util filter_styles<class_stmt>HeatMapPlot(HeatMapMixin QuadMeshPlot)<block_start>clipping_colors=param.Dict(default={'NaN':'white'} doc=""" Dictionary to specify colors for clipped values, allows setting color for NaN values and for values above and below the min and max value. The min, max or NaN color may specify an RGB(A) color as a color hex string of the form #FFFFFF or #FFFFFFFF or a length 3 or length 4 tuple specifying values in the range 0-1 or a named HTML color.""")<line_sep>padding=param.ClassSelector(default=0 class_=(int float tuple))<line_sep>radial=param.Boolean(default=<false> doc=""" Whether the HeatMap should be radial""")<line_sep>show_values=param.Boolean(default=<false> doc=""" Whether to annotate each pixel with its value.""")<line_sep>xmarks=param.Parameter(default=<none> doc=""" Add separation lines to the heatmap for better readability. By default, does not show any separation lines. If parameter is of type integer, draws the given amount of separations lines spread across heatmap. If parameter is of type list containing integers, show separation lines at given indices. If parameter is of type tuple, draw separation lines at given categorical values. If parameter is of type function, draw separation lines where function returns True for passed heatmap category.""")<line_sep>ymarks=param.Parameter(default=<none> doc=""" Add separation lines to the heatmap for better readability. By default, does not show any separation lines. If parameter is of type integer, draws the given amount of separations lines spread across heatmap. If parameter is of type list containing integers, show separation lines at given indices. If parameter is of type tuple, draw separation lines at given categorical values. If parameter is of type function, draw separation lines where function returns True for passed heatmap category.""")<line_sep>xticks=param.Parameter(default=20 doc=""" Ticks along x-axis/segments specified as an integer, explicit list of ticks or function. If `None`, no ticks are shown.""")<line_sep>yticks=param.Parameter(default=20 doc=""" Ticks along y-axis/annulars specified as an integer, explicit list of ticks or function. If `None`, no ticks are shown.""")<line_sep>@classmethod<def_stmt>is_radial cls heatmap<block_start>heatmap=heatmap.last<if>isinstance(heatmap HoloMap)<else>heatmap<line_sep>opts=cls.lookup_options(heatmap 'plot').options<line_sep><return>((any(o<in>opts<for>o ('start_angle' 'radius_inner' 'radius_outer'))<and><not>(opts.get('radial')<eq><false>))<or>opts.get('radial' <false>))<block_end><def_stmt>_annotate_plot self ax annotations<block_start><for_stmt>a self.handles.get('annotations' {}).values()<block_start>a.remove()<block_end>handles={}<for_stmt>plot_coord,text annotations.items()<block_start>handles[plot_coord]=ax.annotate(text xy=plot_coord xycoords='data' horizontalalignment='center' verticalalignment='center')<block_end><return>handles<block_end><def_stmt>_annotate_values self element xvals yvals<block_start>val_dim=element.vdims[0]<line_sep>vals=element.dimension_values(val_dim).flatten()<line_sep>xpos=xvals[:-1]+np.diff(xvals)/2.<line_sep>ypos=yvals[:-1]+np.diff(yvals)/2.<line_sep>plot_coords=product(xpos ypos)<line_sep>annotations={}<for_stmt>plot_coord,v zip(plot_coords vals)<block_start>text='-'<if>is_nan(v)<else>val_dim.pprint_value(v)<line_sep>annotations[plot_coord]=text<block_end><return>annotations<block_end><def_stmt>_compute_ticks self element xvals yvals xfactors yfactors<block_start>xdim,ydim=element.kdims<if_stmt>self.invert_axes<block_start>xdim,ydim=ydim xdim<block_end>opts=self.lookup_options(element 'plot').options<line_sep>xticks=opts.get('xticks')<if_stmt>xticks<is><none><block_start>xpos=xvals[:-1]+np.diff(xvals)/2.<if_stmt><not>xfactors<block_start>xfactors=element.gridded.dimension_values(xdim <false>)<block_end>xlabels=[xdim.pprint_value(k)<for>k xfactors]<line_sep>xticks=list(zip(xpos xlabels))<block_end>yticks=opts.get('yticks')<if_stmt>yticks<is><none><block_start>ypos=yvals[:-1]+np.diff(yvals)/2.<if_stmt><not>yfactors<block_start>yfactors=element.gridded.dimension_values(ydim <false>)<block_end>ylabels=[ydim.pprint_value(k)<for>k yfactors]<line_sep>yticks=list(zip(ypos ylabels))<block_end><return>xticks yticks<block_end><def_stmt>_draw_markers self ax element marks values factors axis='x'<block_start><if_stmt>marks<is><none><or>self.radial<block_start><return><block_end>self.param.warning('Only radial HeatMaps supports marks, to make the'<concat>'HeatMap quads more distinguishable set linewidths'<concat>'to a non-zero value.')<block_end><def_stmt>init_artists self ax plot_args plot_kwargs<block_start>xfactors=plot_kwargs.pop('xfactors')<line_sep>yfactors=plot_kwargs.pop('yfactors')<line_sep>annotations=plot_kwargs.pop('annotations' <none>)<line_sep>prefixes=['annular' 'xmarks' 'ymarks']<line_sep>plot_kwargs={k:v<for>k,v plot_kwargs.items()<if><not>any(p<in>k<for>p prefixes)}<line_sep>artist=ax.pcolormesh(*plot_args **plot_kwargs)<if_stmt>self.show_values<and>annotations<block_start>self.handles['annotations']=self._annotate_plot(ax annotations)<block_end>self._draw_markers(ax self.current_frame self.xmarks plot_args[0] xfactors axis='x')<line_sep>self._draw_markers(ax self.current_frame self.ymarks plot_args[1] yfactors axis='y')<line_sep><return>{'artist':artist}<block_end><def_stmt>get_data self element ranges style<block_start>xdim,ydim=element.kdims<line_sep>aggregate=element.gridded<if_stmt><not>element._unique<block_start>self.param.warning('HeatMap element index is not unique, ensure you '<concat>'aggregate the data before displaying it, e.g. '<concat>'using heatmap.aggregate(function=np.mean). '<concat>'Duplicate index values have been dropped.')<block_end>data=aggregate.dimension_values(2 flat=<false>)<line_sep>data=np.ma.array(data mask=np.logical_not(np.isfinite(data)))<if_stmt>self.invert_axes<block_start>xdim,ydim=ydim xdim<line_sep>data=data.T[::-1 ::-1]<block_end>xtype=aggregate.interface.dtype(aggregate xdim)<if_stmt>xtype.kind<in>'SUO'<block_start>xvals=np.arange(data.shape[1]+1)-0.5<block_end><else_stmt><block_start>xvals=aggregate.dimension_values(xdim expanded=<false>)<line_sep>xvals=GridInterface._infer_interval_breaks(xvals)<block_end>ytype=aggregate.interface.dtype(aggregate ydim)<if_stmt>ytype.kind<in>'SUO'<block_start>yvals=np.arange(data.shape[0]+1)-0.5<block_end><else_stmt><block_start>yvals=aggregate.dimension_values(ydim expanded=<false>)<line_sep>yvals=GridInterface._infer_interval_breaks(yvals)<block_end>xfactors=list(ranges.get(xdim.name {}).get('factors' []))<line_sep>yfactors=list(ranges.get(ydim.name {}).get('factors' []))<line_sep>xticks,yticks=self._compute_ticks(element xvals yvals xfactors yfactors)<line_sep>style['xfactors']=xfactors<line_sep>style['yfactors']=yfactors<if_stmt>self.show_values<block_start>style['annotations']=self._annotate_values(element.gridded xvals yvals)<block_end>vdim=element.vdims[0]<line_sep>self._norm_kwargs(element ranges style vdim)<if_stmt>'vmin'<in>style<block_start>style['clim']=style.pop('vmin') style.pop('vmax')<block_end><return>(xvals yvals data) style {'xticks':xticks 'yticks':yticks}<block_end><block_end><class_stmt>RadialHeatMapPlot(ColorbarPlot)<block_start>start_angle=param.Number(default=np.pi/2 doc=""" Define starting angle of the first annulars. By default, beings at 12 o clock.""")<line_sep>max_radius=param.Number(default=0.5 doc=""" Define the maximum radius which is used for the x and y range extents. """)<line_sep>radius_inner=param.Number(default=0.1 bounds=(0 0.5) doc=""" Define the radius fraction of inner, empty space.""")<line_sep>radius_outer=param.Number(default=0.05 bounds=(0 1) doc=""" Define the radius fraction of outer space including the labels.""")<line_sep>radial=param.Boolean(default=<true> doc=""" Whether the HeatMap should be radial""")<line_sep>show_values=param.Boolean(default=<false> doc=""" Whether to annotate each pixel with its value.""")<line_sep>xmarks=param.Parameter(default=<none> doc=""" Add separation lines between segments for better readability. By default, does not show any separation lines. If parameter is of type integer, draws the given amount of separations lines spread across radial heatmap. If parameter is of type list containing integers, show separation lines at given indices. If parameter is of type tuple, draw separation lines at given segment values. If parameter is of type function, draw separation lines where function returns True for passed segment value.""")<line_sep>ymarks=param.Parameter(default=<none> doc=""" Add separation lines between annulars for better readability. By default, does not show any separation lines. If parameter is of type integer, draws the given amount of separations lines spread across radial heatmap. If parameter is of type list containing integers, show separation lines at given indices. If parameter is of type tuple, draw separation lines at given annular values. If parameter is of type function, draw separation lines where function returns True for passed annular value.""")<line_sep>xticks=param.Parameter(default=4 doc=""" Ticks along x-axis/segments specified as an integer, explicit list of ticks or function. If `None`, no ticks are shown.""")<line_sep>yticks=param.Parameter(default=4 doc=""" Ticks along y-axis/annulars specified as an integer, explicit list of ticks or function. If `None`, no ticks are shown.""")<line_sep>projection=param.ObjectSelector(default='polar' objects=['polar'])<line_sep>_style_groups=['annular' 'xmarks' 'ymarks']<line_sep>style_opts=['annular_edgecolors' 'annular_linewidth' 'xmarks_linewidth' 'xmarks_edgecolor' 'cmap' 'ymarks_linewidth' 'ymarks_edgecolor']<line_sep>@staticmethod<def_stmt>_map_order_to_ticks start end order reverse=<false><block_start>"""Map elements from given `order` array to bins ranging from `start` to `end`. """<line_sep>size=len(order)<line_sep>bounds=np.linspace(start end size+1)<if_stmt>reverse<block_start>bounds=bounds[::-1]<block_end>mapping=list(zip(bounds[:-1]%(np.pi<times>2) order))<line_sep><return>mapping<block_end>@staticmethod<def_stmt>_compute_separations inner outer angles<block_start>"""Compute x and y positions for separation lines for given angles. """<line_sep><return>[np.array([[a inner] [a outer]])<for>a angles]<block_end>@staticmethod<def_stmt>_get_markers ticks marker<block_start><if_stmt>callable(marker)<block_start>marks=[v<for>v,l ticks<if>marker(l)]<block_end><elif_stmt>isinstance(marker int)<and>marker<block_start>nth_mark=max([np.ceil(len(ticks)/marker).astype(int) 1])<line_sep>marks=[v<for>v,l ticks[::nth_mark]]<block_end><elif_stmt>isinstance(marker tuple)<block_start>marks=[v<for>v,l ticks<if>l<in>marker]<block_end><else_stmt><block_start>marks=[]<block_end><return>marks<block_end>@staticmethod<def_stmt>_get_ticks ticks ticker<block_start><if_stmt>callable(ticker)<block_start>ticks=[(v l)<for>v,l ticks<if>ticker(l)]<block_end><elif_stmt>isinstance(ticker int)<block_start>nth_mark=max([np.ceil(len(ticks)/ticker).astype(int) 1])<line_sep>ticks=ticks[::nth_mark]<block_end><elif_stmt>isinstance(ticker (tuple list))<block_start>nth_mark=max([np.ceil(len(ticks)/len(ticker)).astype(int) 1])<line_sep>ticks=[(v tl)<for>(v l),tl zip(ticks[::nth_mark] ticker)]<block_end><elif_stmt>ticker<block_start>ticks=list(ticker)<block_end><else_stmt><block_start>ticks=[]<block_end><return>ticks<block_end><def_stmt>get_extents self view ranges range_type='combined'<block_start><if_stmt>range_type<eq>'hard'<block_start><return>(np.nan )<times>4<block_end><return>(0 0 np.pi<times>2 self.max_radius+self.radius_outer)<block_end><def_stmt>get_data self element ranges style# dimension labels <block_start>dim_labels=element.dimensions(label=<true>)[:3]<line_sep>x,y,z=[dimension_sanitizer(d)<for>d dim_labels]<if_stmt>self.invert_axes<block_start>x,y=y x<block_end># get raw values aggregate=element.gridded<line_sep>xvals=aggregate.dimension_values(x expanded=<false>)<line_sep>yvals=aggregate.dimension_values(y expanded=<false>)<line_sep>zvals=aggregate.dimension_values(2 flat=<false>)<line_sep># pretty print x and y dimension values if necessary <def_stmt>_pprint dim_label vals<block_start><if_stmt>vals.dtype.kind<not><in>'SU'<block_start>dim=aggregate.get_dimension(dim_label)<line_sep><return>[dim.pprint_value(v)<for>v vals]<block_end><return>vals<block_end>xvals=_pprint(x xvals)<line_sep>yvals=_pprint(y yvals)<line_sep># annular wedges start_angle=self.start_angle<line_sep>end_angle=self.start_angle+2<times>np.pi<line_sep>bins_segment=np.linspace(start_angle end_angle len(xvals)+1)<line_sep>segment_ticks=self._map_order_to_ticks(start_angle end_angle xvals <true>)<line_sep>radius_max=0.5<line_sep>radius_min=radius_max<times>self.radius_inner<line_sep>bins_annular=np.linspace(radius_min radius_max len(yvals)+1)<line_sep>radius_ticks=self._map_order_to_ticks(radius_min radius_max yvals)<line_sep>patches=[]<for_stmt>j range(len(yvals))<block_start>ybin=bins_annular[j:j+2]<for_stmt>i range(len(xvals))[::-1]<block_start>xbin=np.rad2deg(bins_segment[i:i+2])<line_sep>width=ybin[1]-ybin[0]<line_sep>wedge=Wedge((0.5 0.5) ybin[1] xbin[0] xbin[1] width)<line_sep>patches.append(wedge)<block_end><block_end>angles=self._get_markers(segment_ticks self.xmarks)<line_sep>xmarks=self._compute_separations(radius_min radius_max angles)<line_sep>radii=self._get_markers(radius_ticks self.ymarks)<line_sep>ymarks=[Circle((0.5 0.5) r)<for>r radii]<line_sep>style['array']=zvals.flatten()<line_sep>self._norm_kwargs(element ranges style element.vdims[0])<if_stmt>'vmin'<in>style<block_start>style['clim']=style.pop('vmin') style.pop('vmax')<block_end>data={'annular':patches 'xseparator':xmarks 'yseparator':ymarks}<line_sep>xticks=self._get_ticks(segment_ticks self.xticks)<if_stmt><not>isinstance(self.xticks int)<block_start>xticks=[(v-((np.pi)/len(xticks)) l)<for>v,l xticks]<block_end>yticks=self._get_ticks(radius_ticks self.yticks)<line_sep>ticks={'xticks':xticks 'yticks':yticks}<line_sep><return>data style ticks<block_end><def_stmt>init_artists self ax plot_args plot_kwargs# Draw edges <block_start>color_opts=['c' 'cmap' 'vmin' 'vmax' 'norm' 'array']<line_sep>groups=[g<for>g self._style_groups<if>g<ne>'annular']<line_sep>edge_opts=filter_styles(plot_kwargs 'annular' groups)<line_sep>annuli=plot_args['annular']<line_sep>edge_opts.pop('interpolation' <none>)<line_sep>annuli=PatchCollection(annuli transform=ax.transAxes **edge_opts)<line_sep>ax.add_collection(annuli)<line_sep>artists={'artist':annuli}<line_sep>paths=plot_args['xseparator']<if_stmt>paths<block_start>groups=[g<for>g self._style_groups<if>g<ne>'xmarks']<line_sep>xmark_opts=filter_styles(plot_kwargs 'xmarks' groups color_opts)<line_sep>xmark_opts.pop('edgecolors' <none>)<line_sep>xseparators=LineCollection(paths **xmark_opts)<line_sep>ax.add_collection(xseparators)<line_sep>artists['xseparator']=xseparators<block_end>paths=plot_args['yseparator']<if_stmt>paths<block_start>groups=[g<for>g self._style_groups<if>g<ne>'ymarks']<line_sep>ymark_opts=filter_styles(plot_kwargs 'ymarks' groups color_opts)<line_sep>ymark_opts.pop('edgecolors' <none>)<line_sep>yseparators=PatchCollection(paths facecolor='none' transform=ax.transAxes **ymark_opts)<line_sep>ax.add_collection(yseparators)<line_sep>artists['yseparator']=yseparators<block_end><return>artists<block_end><block_end>
#! /usr/bin/env python3 # This file is part of the Astrometry.net suite. # Licensed under a 3-clause BSD style license - see LICENSE <import_from_future_stmt> print_function<import_from_future_stmt> absolute_import<import_from_stmt>astrometry.util.fits *<import_from_stmt>astrometry.util.starutil_numpy *<import_from_stmt>astrometry.util.find_data_file *<import_from_stmt>os.path basename dirname<import_stmt>numpy<as>np<def_stmt>get_photoobj_filename photoobjdir rr run camcol field<block_start>fn=os.path.join(photoobjdir rr '%i'%run '%i'%camcol 'photoObj-%06i-%i-%04i.fits'%(run camcol field))<line_sep><return>fn<block_end><def_stmt>read_photoobjs_in_wcs wcs margin cols=<none> cutToPrimary=<true> wfn=<none> sdss=<none><block_start>''' Read photoObjs that are inside the given 'wcs', plus 'margin' in degrees. '''<import_stmt>logging<line_sep>log=logging.getLogger('read_photoobjs_in_wcs')<line_sep>ra,dec=wcs.radec_center()<line_sep>rad=wcs.radius()<line_sep>rad<augadd>np.hypot(14. 10.)/2/60.<line_sep># a little extra margin rad<augadd>margin<if_stmt>sdss<is><none><block_start><import_from_stmt>astrometry.sdss DR9<line_sep>sdss=DR9()<block_end><if_stmt>wfn<is><none><block_start>wfn=sdss.filenames.get('window_flist' <none>)<if_stmt>wfn<is><none><block_start>wfn='window_flist.fits'<block_end><block_end><if_stmt><not>os.path.exists(wfn)<block_start>print('File does not exist:' wfn '; downloading...')<line_sep>wfn=sdss.retrieve('window_flist' <none> <none> <none> rerun='xxx')<line_sep>print('Retrieved' wfn)<block_end><else_stmt><block_start>print('Using' wfn)<block_end>print('Searching for run,camcol,fields with radius' rad 'deg')<line_sep>RCF=radec_to_sdss_rcf(ra dec radius=rad<times>60. tablefn=wfn)<line_sep>log.debug('Found %i fields possibly in range'%len(RCF))<line_sep>pixmargin=margin<times>3600./wcs.pixel_scale()<line_sep>W,H=wcs.get_width() wcs.get_height()<line_sep>TT=[]<for_stmt>run,camcol,field,r,d RCF<block_start>log.debug('RCF %i/%i/%i'%(run camcol field))<line_sep>rr=sdss.get_rerun(run field=field)<if_stmt>rr<in>[<none> '157']<block_start>log.debug('Rerun 157')<line_sep><continue><block_end>fn=sdss.retrieve('photoObj' run camcol field rerun=rr)<line_sep>#fn = get_photoobj_filename(rr, run, camcol, field) T=fits_table(fn columns=cols)<if_stmt>T<is><none><block_start>log.debug('read 0 from %s'%fn)<line_sep><continue><block_end>log.debug('read %i from %s'%(len(T) fn))<line_sep># while we're reading it, record its length for later... #get_photoobj_length(rr, run, camcol, field) ok,x,y=wcs.radec2pixelxy(T.ra T.dec)<line_sep>x<augsub>1<line_sep>y<augsub>1<line_sep>T.cut((x<g>-pixmargin)<times>(x<l>(W+pixmargin))<times>(y<g>-pixmargin)<times>(y<l>(H+pixmargin)))<if_stmt>cutToPrimary<block_start>T.cut((T.resolve_status&256)<g>0)<line_sep>log.debug('cut to %i within target area and PRIMARY.'%len(T))<block_end><else_stmt><block_start>log.debug('cut to %i within target area.'%len(T))<block_end><if_stmt>len(T)<eq>0<block_start><continue><block_end>TT.append(T)<block_end><if_stmt><not>len(TT)<block_start><return><none><block_end>T=merge_tables(TT)<line_sep><return>T<block_end><class_stmt>RaDecToRcf(object)<block_start><def_stmt>__init__ self tablefn=<none><block_start>self.kd=<none><line_sep>self.sdssxyz=<none><if_stmt>tablefn<is><none><block_start>tablefn=find_data_file('dr7fields.fits')<block_end>self.tab=fits_table(tablefn)<if_stmt>self.tab<is><none><block_start><raise>Exception('Failed to read table of SDSS fields from file: "'+str(tablefn)+'"')<block_end><block_end><def_stmt>__call__ self ra dec spherematch=<true> radius=0 contains=<false><block_start>T=self.tab<line_sep># HACK - magic 13x9 +1 arcmin. <if_stmt>radius<eq>0<block_start>radius=sqrt(14.<power>2+10.<power>2)/2.<block_end>d2=arcmin2distsq(radius)<if_stmt>self.sdssxyz<is><none><block_start>self.sdssxyz=radectoxyz(T.ra T.dec)<block_end><if_stmt><not>spherematch<block_start>rcfs=[]<for_stmt>r,d broadcast(ra dec)<block_start>xyz=radectoxyz(r d)<line_sep>dist2s=sum((xyz-self.sdssxyz)<power>2 axis=1)<line_sep>I=flatnonzero(dist2s<l>d2)<line_sep>rcfs.append(zip(T[I].run T[I].camcol T[I].field T[I].ra T[I].dec))<block_end><block_end><else_stmt><block_start><import_from_stmt>astrometry.libkd spherematch<if_stmt>self.kd<is><none><block_start>self.kd=spherematch.tree_build(self.sdssxyz)<block_end>rds=array([x<for>x broadcast(ra dec)])<line_sep>xyz=radectoxyz(rds[: 0] rds[: 1]).astype(double)<line_sep>kd2=spherematch.tree_build(xyz)<line_sep>notself=<false><line_sep>inds,D=spherematch.trees_match(self.kd kd2 np.sqrt(d2) notself=notself permuted=<true>)<if_stmt>len(inds)<eq>0<block_start><return>[]<block_end>I=np.argsort(D[: 0])<line_sep>inds=inds[I]<line_sep>rcfs=[[]<for>i range(len(rds))]<line_sep>cols=T.columns()<line_sep>gotem=<false><if_stmt>contains<block_start><if_stmt>('ramin'<in>cols<and>'ramax'<in>cols<and>'decmin'<in>cols<and>'decmax'<in>cols)<block_start>gotem=<true><for_stmt>j,i inds<block_start>(r d)=rds[i]<if_stmt>(r<ge>T.ramin[j]<and>r<le>T.ramax[j]<and>d<ge>T.decmin[j]<and>d<le>T.decmax[j])<block_start>rcfs[i].append((T.run[j] T.camcol[j] T.field[j] T.ra[j] T.dec[j]))<block_end>#print '%i fields contain the first query RA,Dec' % len(rcfs[0]) <block_end><block_end><else_stmt><block_start>print('you requested fields *containing* the query RA,Dec,')<line_sep>print('but the fields list file \"%s\" doesn\'t contain RAMIN,RAMAX,DECMIN, and DECMAX columns'%tablefn)<block_end><block_end><if_stmt><not>gotem<block_start><for_stmt>j,i inds<block_start>rcfs[i].append((T.run[j] T.camcol[j] T.field[j] T.ra[j] T.dec[j]))<block_end><block_end><block_end><if_stmt>isscalar(ra)<and>isscalar(dec)<block_start><return>rcfs[0]<block_end><return>rcfs<block_end><block_end># RA,Dec are either scalars or iterables. # Radius is in *arcmin*. sigh. # If scalars, returns a list of (run, camcol, field, ra, dec) tuples, one for each matching field. # If iterable, returns a list containing one list per query (ra,dec) of the same tuple. <def_stmt>radec_to_sdss_rcf ra dec spherematch=<true> radius=0 tablefn=<none> contains=<false><block_start>RD=RaDecToRcf(tablefn=tablefn)<line_sep><return>RD(ra dec spherematch=spherematch radius=radius contains=contains)<block_end><def_stmt>OLD_radec_to_sdss_rcf ra dec spherematch=<true> radius=0 tablefn=<none> contains=<false># This file is generated by merging the files "dr7_e.fits", "dr7_g.fits", and "dr7_a.fits", # whose construction is described in http://trac.astrometry.net/browser/trunk/projects/sdss-tests/README # (and in comments below that I didn't notice before writing this) <block_start><if_stmt>tablefn<is><none><block_start>tablefn=find_data_file('dr7fields.fits')<block_end>sdss=table_fields(tablefn)<if_stmt>sdss<is><none><block_start>print('Failed to read table of SDSS fields from file' tablefn)<line_sep><raise>Exception('Failed to read table of SDSS fields from file: "'+str(tablefn)+'"')<block_end>sdssxyz=radectoxyz(sdss.ra sdss.dec)<line_sep>## HACK - magic 13x9 arcmin. <if_stmt>radius<eq>0<block_start>radius=sqrt(13.<power>2+9.<power>2)/2.<block_end>radius2=arcmin2distsq(radius)<if_stmt><not>spherematch<block_start>rcfs=[]<for_stmt>r,d broadcast(ra dec)<block_start>xyz=radectoxyz(r d)<line_sep>dist2s=sum((xyz-sdssxyz)<power>2 axis=1)<line_sep>I=flatnonzero(dist2s<l>radius2)<if_stmt><false><block_start>print('I:' I)<line_sep>print('fields:' sdss[I].run sdss[I].field sdss[I].camcol)<line_sep>print('RA' sdss[I].ra)<line_sep>print('Dec' sdss[I].dec)<block_end>rcfs.append(zip(sdss[I].run sdss[I].camcol sdss[I].field sdss[I].ra sdss[I].dec))<block_end><block_end><else_stmt><block_start><import_from_stmt>astrometry.libkd spherematch<line_sep>rds=array([x<for>x broadcast(ra dec)])<line_sep>xyz=radectoxyz(rds[: 0] rds[: 1]).astype(double)<line_sep>(inds dists)=spherematch.match(xyz sdssxyz sqrt(radius2))<line_sep>#print 'found %i matches' % len(inds) <if_stmt>len(inds)<eq>0<block_start><return>[]<block_end>#print 'inds:', inds.shape I=np.argsort(dists[: 0])<line_sep>#print 'dists:', dists.shape inds=inds[I :]<line_sep>rcfs=[[]<for>i range(len(rds))]<line_sep>cols=sdss.columns()<line_sep>gotem=<false><if_stmt>contains<block_start><if_stmt>'ramin'<in>cols<and>'ramax'<in>cols<and>'decmin'<in>cols<and>'decmax'<in>cols<block_start>gotem=<true><for_stmt>i,j inds<block_start>(r d)=rds[i]<if_stmt>r<ge>sdss.ramin[j]<and>r<le>sdss.ramax[j]<and>d<ge>sdss.decmin[j]<and>d<le>sdss.decmax[j]<block_start>rcfs[i].append((sdss.run[j] sdss.camcol[j] sdss.field[j] sdss.ra[j] sdss.dec[j]))<block_end><block_end>print('%i fields contain the first query RA,Dec'%len(rcfs[0]))<block_end><else_stmt><block_start>print('you requested fields *containing* the query RA,Dec,')<line_sep>print('but the fields list file \"%s\" doesn\'t contain RAMIN,RAMAX,DECMIN, and DECMAX columns'%tablefn)<block_end><block_end><if_stmt><not>gotem<block_start><for_stmt>i,j inds<block_start>rcfs[i].append((sdss.run[j] sdss.camcol[j] sdss.field[j] sdss.ra[j] sdss.dec[j]))<block_end><block_end><block_end><if_stmt>isscalar(ra)<and>isscalar(dec)<block_start><return>rcfs[0]<block_end><return>rcfs<block_end># The field list was created starting with dstn's list of fields in DR7: # fitscopy dr7_e.fits"[col RUN;FIELD;CAMCOL;RA=(RAMIN+RAMAX)/2;DEC=(DECMIN+DECMAX)/2]" e.fits # fitscopy dr7_g.fits"[col RUN;FIELD;CAMCOL;RA=(RAMIN+RAMAX)/2;DEC=(DECMIN+DECMAX)/2]" g.fits # fitscopy dr7_a.fits"[col RUN;FIELD;CAMCOL;RA=(RAMIN+RAMAX)/2;DEC=(DECMIN+DECMAX)/2]" a.fits # tabmerge g.fits e.fits # tabmerge g.fits+1 e.fits+1 # tabmerge a.fits+1 e.fits+1 # mv e.fits dr7fields.fits # rm g.fits a.fits ''' cd ~/sdss-tests casjobs.py $SDSS_CAS_USER $SDSS_CAS_PASS querywait @dr7_ngood.sql casjobs.py $SDSS_CAS_USER $SDSS_CAS_PASS querywait @dr7_ngood2.sql casjobs.py $SDSS_CAS_USER $SDSS_CAS_PASS outputdownloaddelete mydb.goodfields2 /tmp/dr7.fits fitscopy /tmp/dr7.fits"[col RA=(ramin+ramax)/2;DEC=(decmin+decmax)/2;run;field;camcol;ngood;ramin;ramax;decmin;decmax]" dr7fields.fits casjobs.py $SDSS_CAS_USER $SDSS_CAS_PASS querywait @s82_ngood.sql # Stripe82 has no RunQA table. casjobs.py $SDSS_CAS_USER $SDSS_CAS_PASS querywait @s82_ngood2.sql casjobs.py $SDSS_CAS_USER $SDSS_CAS_PASS outputdownloaddelete mydb.s82goodfields2 s82.fits fitscopy s82.fits"[col RA=(ramin+ramax)/2;DEC=(decmin+decmax)/2;run;field;camcol;ngood;ramin;ramax;decmin;decmax]" s82fields.fits '''<def_stmt>main <block_start><import_stmt>sys<import_from_stmt>optparse OptionParser<line_sep>parser=OptionParser(usage='%prog [options] <ra> <dec>')<line_sep>parser.add_option('-f' dest='fields' help='FITS table of fields to use; default is astrometry/data/dr7fields.fits')<line_sep>parser.add_option('-c' dest='contains' action='store_true' help='Print only fields that *contain* the given point; requires RAMIN,RAMAX,DECMIN,DECMAX fields.')<line_sep>parser.add_option('-b' '--bands' dest='bands' help='Retrieve fpCs of the given bands; default "ugriz"')<line_sep>parser.add_option('-t' dest='filetypes' help='Retrieve this file type (fpC, fpM, psField, tsField, tsObj, etc)' action='append' default=['fpC'])<line_sep>parser.add_option('-r' dest='radius' type=float default=15. help='Search radius (arcmin)')<line_sep>parser.set_defaults(fields=<none> contains=<false> bands='ugriz')<line_sep>(opt args)=parser.parse_args()<if_stmt>len(args)<ne>2<block_start>parser.print_help()<line_sep>print()<line_sep>print('Got extra arguments:' args)<line_sep>sys.exit(-1)<block_end># parse RA,Dec. <try_stmt><block_start>ra=float(args[0])<block_end><except_stmt>ValueError<block_start>ra=hmsstring2ra(args[0])<block_end><try_stmt><block_start>dec=float(args[1])<block_end><except_stmt>ValueError<block_start>dec=dmsstring2dec(args[1])<block_end>tablefn=<none><if_stmt>opt.fields<is><not><none><block_start><if_stmt>os.path.exists(opt.fields)<block_start>tablefn=opt.fields<block_end><else_stmt><block_start>tablefn=find_data_file(opt.fields)<block_end><if_stmt>tablefn<is><none><block_start>print('Failed to find list of fields:' opt.fields)<line_sep>sys.exit(-1)<block_end><block_end># arcmin radius=opt.radius<line_sep>rcfs=radec_to_sdss_rcf(ra dec radius=radius tablefn=tablefn contains=opt.contains)<line_sep>print('ra,dec' ra dec)<line_sep>print('rcfs:' rcfs)<line_sep>print()<for_stmt>(r c f ra1 dec1) rcfs<block_start>print('%i %i %i (dist: %g arcmin)'%(r c f deg2arcmin(degrees_between(ra dec ra1 dec1))))<block_end>print()<for_stmt>(r c f ra1 dec1) rcfs<block_start>print('http://cas.sdss.org/dr7/en/get/frameByRCFZ.asp?R=%i&C=%i&F=%i&Z=0&submit1=Get+Image'%(r c f))<block_end>print()<for_stmt>(r c f ra1 dec1) rcfs<block_start>print('wget "http://cas.sdss.org/dr7/en/get/frameByRCFZ.asp?R=%i&C=%i&F=%i&Z=0&submit1=Get+Image" -O sdss-%04i-%i-%04i.jpg'%(r c f r c f))<block_end><import_from_stmt>.sdss_das sdss_das_get<for_stmt>(r c f ra1 dec1) rcfs<block_start><for_stmt>t opt.filetypes<block_start><for_stmt>b opt.bands<block_start>R=sdss_das_get(t <none> r c f b)<if_stmt>R<is><false><block_start><continue><block_end><if_stmt>t<eq>'fpC'<block_start>fpc=sdss_filename('fpC' r c f b)<line_sep>os.system('gunzip -cd %s.gz > %s'%(fpc fpc))<line_sep>wcs=Tan(filename=fpc)<line_sep>x,y=wcs.radec2pixelxy(ra dec)<line_sep>x,y=int(x) int(y)<line_sep>os.system('imcopy %s"[%i:%i,%i:%i]" !/tmp/cut-%s'%(fpc max(0 x-100) x+100 max(0 y-100) y+100 fpc))<line_sep>os.system('an-fitstopnm -i /tmp/cut-%s -N 1150 -X 1400 | pnmtopng > cut-%s.png'%(fpc fpc))<line_sep>print('R,C,F' r c f)<line_sep>print('x,y' x y)<block_end><block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
"""component/menu Dropdown menu which appears on the navigation bar at the top of the screen refactor incoming """<import_from_stmt>typing List<import_from_stmt>dash.development.base_component ComponentMeta<import_stmt>dash_bootstrap_components<as>dbc<import_from_stmt>chime_dash.app.components.base Component<class_stmt>Menu(Component)<block_start>""" """<def_stmt>get_html self<arrow>List[ComponentMeta]<block_start>menu=dbc.DropdownMenu(children=[dbc.DropdownMenuItem("Penn Medicine" header=<true>) dbc.DropdownMenuItem("Predictive Healthcare" href="http://predictivehealthcare.pennmedicine.org/" external_link=<true> target="_blank" ) dbc.DropdownMenuItem("How to Use CHIME" href="https://code-for-philly.gitbook.io/chime/" external_link=<true> target="_blank" ) ] in_navbar=<true> label="Learn More" color="light" right=<true>)<line_sep><return>[menu]<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>## produce ttGenEvent <import_from_stmt>TopQuarkAnalysis.TopEventProducers.sequences.ttGenEvent_cff *<line_sep>## produce event solution <import_from_stmt>TopQuarkAnalysis.TopEventProducers.producers.TtSemiEvtSolProducer_cfi *<line_sep>## make tqaf layer2 tqafLayer2_ttSemiLeptonic_old=cms.Sequence(makeGenEvt<times>solutions)<line_sep>## produce ttGenEvent <import_from_stmt>TopQuarkAnalysis.TopEventProducers.sequences.ttGenEvent_cff *<line_sep>## produce event solution <import_from_stmt>TopQuarkAnalysis.TopEventProducers.producers.TtDilepEvtSolProducer_cfi *<line_sep>## make tqaf layer2 tqafLayer2_ttFullLeptonic_old=cms.Sequence(makeGenEvt<times>solutions)<line_sep>
<import_from_stmt>logging getLogger<import_from_stmt>requests Session Request<import_from_stmt>datetime datetime timezone date timedelta<import_from_stmt>varken.structures LidarrQueue LidarrAlbum<import_from_stmt>varken.helpers hashit connection_handler<class_stmt>LidarrAPI(object)<block_start><def_stmt>__init__ self server dbmanager<block_start>self.dbmanager=dbmanager<line_sep>self.server=server<line_sep># Create session to reduce server web thread load, and globally define pageSize for all requests self.session=Session()<line_sep>self.session.headers={'X-Api-Key':self.server.api_key}<line_sep>self.logger=getLogger()<block_end><def_stmt>__repr__ self<block_start><return>f"<lidarr-{self.server.id}>"<block_end><def_stmt>get_calendar self query="Missing"<block_start>endpoint='/api/v1/calendar'<line_sep>today=str(date.today())<line_sep>last_days=str(date.today()-timedelta(days=self.server.missing_days))<line_sep>future=str(date.today()+timedelta(days=self.server.future_days))<line_sep>now=datetime.now(timezone.utc).astimezone().isoformat()<if_stmt>query<eq>"Missing"<block_start>params={'start':last_days 'end':today}<block_end><else_stmt><block_start>params={'start':today 'end':future}<block_end>influx_payload=[]<line_sep>influx_albums=[]<line_sep>req=self.session.prepare_request(Request('GET' self.server.url+endpoint params=params))<line_sep>get=connection_handler(self.session req self.server.verify_ssl)<if_stmt><not>get<block_start><return><block_end># Iteratively create a list of LidarrAlbum Objects from response json albums=[]<for_stmt>album get<block_start><try_stmt><block_start>albums.append(LidarrAlbum(**album))<block_end><except_stmt>TypeError<as>e<block_start>self.logger.error('TypeError has occurred : %s while creating LidarrAlbum structure for album. Data '<concat>'attempted is: %s' e album)<block_end><block_end># Add Album to missing list if album is not complete <for_stmt>album albums<block_start>percent_of_tracks=album.statistics.get('percentOfTracks' 0)<if_stmt>percent_of_tracks<ne>100<block_start>influx_albums.append((album.title album.releaseDate album.artist['artistName'] album.id percent_of_tracks f"{album.statistics.get('trackFileCount' 0)}/{album.statistics.get('trackCount' 0)}"))<block_end><block_end><for_stmt>title,release_date,artist_name,album_id,percent_complete,complete_count influx_albums<block_start>hash_id=hashit(f'{self.server.id}{title}{album_id}')<line_sep>influx_payload.append({"measurement":"Lidarr" "tags":{"type":query "sonarrId":album_id "server":self.server.id "albumName":title "artistName":artist_name "percentComplete":percent_complete "completeCount":complete_count "releaseDate":release_date} "time":now "fields":{"hash":hash_id}})<block_end>self.dbmanager.write_points(influx_payload)<block_end><def_stmt>get_queue self<block_start>endpoint='/api/v1/queue'<line_sep>now=datetime.now(timezone.utc).astimezone().isoformat()<line_sep>influx_payload=[]<line_sep>params={'pageSize':1000}<line_sep>req=self.session.prepare_request(Request('GET' self.server.url+endpoint params=params))<line_sep>get=connection_handler(self.session req self.server.verify_ssl)<if_stmt><not>get<block_start><return><block_end>queue=[]<for_stmt>song get['records']<block_start><try_stmt><block_start>queue.append(LidarrQueue(**song))<block_end><except_stmt>TypeError<as>e<block_start>self.logger.error('TypeError has occurred : %s while creating LidarrQueue structure for show. Data '<concat>'attempted is: %s' e song)<block_end><block_end><if_stmt><not>queue<block_start><return><block_end><for_stmt>song queue<block_start><if_stmt>song.protocol.upper()<eq>'USENET'<block_start>protocol_id=1<block_end><else_stmt><block_start>protocol_id=0<block_end>hash_id=hashit(f'{self.server.id}{song.title}{song.artistId}')<line_sep>influx_payload.append({"measurement":"Lidarr" "tags":{"type":"Queue" "id":song.id "server":self.server.id "title":song.title "quality":song.quality['quality']['name'] "protocol":song.protocol "protocol_id":protocol_id "indexer":song.indexer} "time":now "fields":{"hash":hash_id}})<block_end>self.dbmanager.write_points(influx_payload)<block_end><block_end>
<import_from_future_stmt> print_function absolute_import<import_stmt>os sys subprocess shlex tempfile time sklearn.base math<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>pandas_extensions *<import_from_stmt>ExeEstimator *<class_stmt>LibFFMClassifier(ExeEstimator sklearn.base.ClassifierMixin)<block_start>''' options: -l <lambda>: set regularization parameter (default 0) -k <factor>: set number of latent factors (default 4) -t <iteration>: set number of iterations (default 15) -r <eta>: set learning rate (default 0.1) -s <nr_threads>: set number of threads (default 1) -p <path>: set path to the validation set --quiet: quiet model (no output) --norm: do instance-wise normalization --no-rand: disable random update `--norm' helps you to do instance-wise normalization. When it is enabled, you can simply assign `1' to `value' in the data. '''<def_stmt>__init__ self columns lambda_v=0 factor=4 iteration=15 eta=0.1 nr_threads=1 quiet=<false> normalize=<none> no_rand=<none><block_start>ExeEstimator.__init__(self)<line_sep>self.columns=columns.tolist()<if>hasattr(columns 'tolist')<else>columns<line_sep>self.lambda_v=lambda_v<line_sep>self.factor=factor<line_sep>self.iteration=iteration<line_sep>self.eta=eta<line_sep>self.nr_threads=nr_threads<line_sep>self.quiet=quiet<line_sep>self.normalize=normalize<line_sep>self.no_rand=no_rand<block_end><def_stmt>fit self X y=<none><block_start><if_stmt>type(X)<is>str<block_start>train_file=X<block_end><else_stmt><block_start><if_stmt><not>hasattr(X 'values')<block_start>X=pd.DataFrame(X columns=self.columns)<block_end>train_file=self.save_reusable('_libffm_train' 'to_libffm' X y)<block_end># self._model_file = self.save_tmp_file(X, '_libffm_model', True) self._model_file=self.tmpfile('_libffm_model')<line_sep>command='utils/lib/ffm-train.exe'+' -l '+repr(v)+' -k '+repr(r)+' -t '+repr(n)+' -r '+repr(a)+' -s '+repr(s)<if_stmt>self.quiet<block_start>command<augadd>' --quiet'<block_end><if_stmt>self.normalize<block_start>command<augadd>' --norm'<block_end><if_stmt>self.no_rand<block_start>command<augadd>' --no-rand'<block_end>command<augadd>' '+train_file<line_sep>command<augadd>' '+self._model_file<line_sep>running_process=self.make_subprocess(command)<line_sep>self.close_process(running_process)<line_sep><return>self<block_end><def_stmt>predict self X<block_start><if_stmt>type(X)<is>str<block_start>test_file=X<block_end><else_stmt><block_start><if_stmt><not>hasattr(X 'values')<block_start>X=pd.DataFrame(X columns=self.columns)<block_end>test_file=self.save_reusable('_libffm_test' 'to_libffm' X)<block_end>output_file=self.tmpfile('_libffm_predictions')<line_sep>command='utils/lib/ffm-predict.exe '+test_file+' '+self._model_file+' '+output_file<line_sep>running_process=self.make_subprocess(command)<line_sep>self.close_process(running_process)<line_sep>preds=list(self.read_predictions(output_file))<line_sep><return>preds<block_end><def_stmt>predict_proba self X<block_start>predictions=np.asarray(map(<lambda>p:1/(1+math.exp(-p)) self.predict(X)))<line_sep><return>np.vstack([1-predictions predictions]).T<block_end><block_end>
# # Copyright 2021 Red Hat Inc. # SPDX-License-Identifier: Apache-2.0 # """View for OpenShift Usage Reports."""<import_from_stmt>api.common.permissions.openshift_access OpenShiftAccessPermission<import_from_stmt>api.models Provider<import_from_stmt>api.report.ocp.query_handler OCPReportQueryHandler<import_from_stmt>api.report.ocp.serializers OCPCostQueryParamSerializer<import_from_stmt>api.report.ocp.serializers OCPInventoryQueryParamSerializer<import_from_stmt>api.report.view ReportView<import_from_stmt>reporting.provider.ocp.models OCPStorageVolumeLabelSummary<import_from_stmt>reporting.provider.ocp.models OCPUsagePodLabelSummary<class_stmt>OCPView(ReportView)<block_start>"""OCP Base View."""<line_sep>permission_classes=[OpenShiftAccessPermission]<line_sep>provider=Provider.PROVIDER_OCP<line_sep>serializer=OCPInventoryQueryParamSerializer<line_sep>query_handler=OCPReportQueryHandler<line_sep>tag_handler=[OCPUsagePodLabelSummary OCPStorageVolumeLabelSummary]<block_end><class_stmt>OCPMemoryView(OCPView)<block_start>"""Get OpenShift memory usage data."""<line_sep>report="memory"<block_end><class_stmt>OCPCpuView(OCPView)<block_start>"""Get OpenShift compute usage data."""<line_sep>report="cpu"<block_end><class_stmt>OCPCostView(OCPView)<block_start>"""Get OpenShift cost data."""<line_sep>report="costs"<line_sep>serializer=OCPCostQueryParamSerializer<block_end><class_stmt>OCPVolumeView(OCPView)<block_start>"""Get OpenShift volume usage data."""<line_sep>report="volume"<block_end>