content
stringlengths
0
1.55M
# -*- coding=utf-8 -*- # library: jionlp # author: dongrixinyu # license: Apache License 2.0 # Email: <EMAIL> # github: https://github.com/dongrixinyu/JioNLP # description: Preprocessing tool for Chinese NLP <def_stmt>bracket regular_expression<block_start><return>''.join([r'(' regular_expression r')'])<block_end><def_stmt>bracket_absence regular_expression<block_start><return>''.join([r'(' regular_expression r')?'])<block_end><def_stmt>absence regular_expression<block_start><return>''.join([regular_expression r'?'])<block_end><def_stmt>start_end regular_expression<block_start><return>''.join([r'^' regular_expression r'$'])<block_end>
<import_stmt>logging<line_sep>logger=logging.getLogger('awx.main.migrations')<def_stmt>remove_scan_type_nodes apps schema_editor<block_start>WorkflowJobTemplateNode=apps.get_model('main' 'WorkflowJobTemplateNode')<line_sep>WorkflowJobNode=apps.get_model('main' 'WorkflowJobNode')<for_stmt>cls (WorkflowJobNode WorkflowJobTemplateNode)<block_start><for_stmt>node cls.objects.iterator()<block_start>prompts=node.char_prompts<if_stmt>prompts.get('job_type' <none>)<eq>'scan'<block_start>log_text='{} set job_type to scan, which was deprecated in 3.2, removing.'.format(cls)<if_stmt>cls<eq>WorkflowJobNode<block_start>logger.info(log_text)<block_end><else_stmt><block_start>logger.debug(log_text)<block_end>prompts.pop('job_type')<line_sep>node.char_prompts=prompts<line_sep>node.save()<block_end><block_end><block_end><block_end><def_stmt>remove_legacy_fact_cleanup apps schema_editor<block_start>SystemJobTemplate=apps.get_model('main' 'SystemJobTemplate')<for_stmt>job SystemJobTemplate.objects.filter(job_type='cleanup_facts').all()<block_start><for_stmt>sched job.schedules.all()<block_start>sched.delete()<block_end>job.delete()<block_end><block_end>
"""Class to store customized UI parameters."""<class_stmt>UIConfig()<block_start>"""Stores customized UI parameters."""<def_stmt>__init__ self description='Description' button_text='Submit' placeholder='Default placeholder' show_example_form=<false><block_start>self.description=description<line_sep>self.button_text=button_text<line_sep>self.placeholder=placeholder<line_sep>self.show_example_form=show_example_form<block_end><def_stmt>get_description self<block_start>"""Returns the input of the example."""<line_sep><return>self.description<block_end><def_stmt>get_button_text self<block_start>"""Returns the intended output of the example."""<line_sep><return>self.button_text<block_end><def_stmt>get_placeholder self<block_start>"""Returns the intended output of the example."""<line_sep><return>self.placeholder<block_end><def_stmt>get_show_example_form self<block_start>"""Returns whether editable example form is shown."""<line_sep><return>self.show_example_form<block_end><def_stmt>json self<block_start>"""Used to send the parameter values to the API."""<line_sep><return>{"description":self.description "button_text":self.button_text "placeholder":self.placeholder "show_example_form":self.show_example_form}<block_end><block_end>
<import_stmt>pytest<import_from_stmt>plenum.common.constants LAST_SENT_PRE_PREPARE<import_from_stmt>plenum.test waits<import_from_stmt>plenum.test.helper sdk_send_batches_of_random assertExp<import_from_stmt>plenum.test.test_node ensureElectionsDone getPrimaryReplica<import_from_stmt>plenum.test.view_change.helper ensure_view_change<import_from_stmt>stp_core.loop.eventually eventually<line_sep>nodeCount=4<line_sep>backup_inst_id=1<line_sep>num_batches_before=3<line_sep>num_batches_after=1<def_stmt>test_node_erases_last_sent_pp_key_on_view_change looper txnPoolNodeSet sdk_pool_handle sdk_wallet_client tconf# Get a node with a backup primary replica <block_start>replica=getPrimaryReplica(txnPoolNodeSet instId=backup_inst_id)<line_sep>node=replica.node<line_sep># Send some 3PC-batches and wait until the replica orders the 3PC-batches sdk_send_batches_of_random(looper txnPoolNodeSet sdk_pool_handle sdk_wallet_client num_reqs=3 num_batches=num_batches_before timeout=tconf.Max3PCBatchWait)<line_sep>looper.run(eventually(<lambda>:assertExp(replica.last_ordered_3pc<eq>(0 3)) retryWait=1 timeout=waits.expectedTransactionExecutionTime(nodeCount)))<line_sep># Ensure that there is a stored last sent PrePrepare key on the node <assert_stmt>LAST_SENT_PRE_PREPARE<in>node.nodeStatusDB<line_sep># Make the pool perform view change ensure_view_change(looper txnPoolNodeSet)<line_sep>ensureElectionsDone(looper txnPoolNodeSet)<line_sep># Verify that the node has erased the stored last sent PrePrepare key <for_stmt>value node.last_sent_pp_store_helper._load_last_sent_pp_key().values()# + 1 it's after view_change <block_start><assert_stmt>value<eq>[node.viewNo 1]<block_end># Send a 3PC-batch and ensure that the replica orders it sdk_send_batches_of_random(looper txnPoolNodeSet sdk_pool_handle sdk_wallet_client num_reqs=1 num_batches=num_batches_after timeout=tconf.Max3PCBatchWait)<line_sep>looper.run(eventually(<lambda>:assertExp(replica.last_ordered_3pc<eq>(1 num_batches_after+1)) retryWait=1 timeout=waits.expectedTransactionExecutionTime(nodeCount)))<block_end>
# # Copyright (C) 2019 GreenWaves Technologies # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>sys<line_sep>verbose=<false><def_stmt>newLine <block_start>info("")<block_end><def_stmt>critical msg<block_start>""" Print critical message to stderr """<line_sep>sys.stderr.write(msg)<line_sep>sys.stderr.write('\n')<block_end><def_stmt>info msg<block_start>infoWithoutNewLine(msg+'\n')<block_end><def_stmt>infoWithoutNewLine msg<block_start><if_stmt>verbose<block_start>sys.stdout.write(msg)<block_end><block_end>
<import_stmt>logging<import_from_stmt>typing List Text Any Optional Dict<import_from_stmt>rasa_nlu_gao.classifiers INTENT_RANKING_LENGTH<import_from_stmt>rasa.nlu.components Component<import_from_stmt>rasa.nlu.model Metadata<import_from_stmt>rasa.nlu.training_data Message<import_stmt>os<import_stmt>shutil<import_stmt>kashgari<import_from_stmt>kashgari.embeddings BERTEmbedding<import_stmt>kashgari.tasks.classification<as>clf<import_from_stmt>sklearn.model_selection train_test_split<import_from_stmt>tensorflow.keras.callbacks ModelCheckpoint EarlyStopping ReduceLROnPlateau<line_sep>logger=logging.getLogger(__name__)<class_stmt>KashgariIntentClassifier(Component)<block_start>provides=["intent" "intent_ranking"]<line_sep>defaults={"bert_model_path":<none> "sequence_length":"auto" "layer_nums":4 "trainable":<false> "classifier_model":"BiLSTM_Model" "epochs":10 "batch_size":32 "validation_split":0.2 "patience":5 "factor":0.5 # factor of reduce learning late everytime "verbose":1 "use_cudnn_cell":<false>}<def_stmt>__init__ self component_config=<none> model=<none><block_start>super(KashgariIntentClassifier self).__init__(component_config)<line_sep>bert_model_path=self.component_config.get('bert_model_path')<line_sep>sequence_length=self.component_config.get('sequence_length')<line_sep>layer_nums=self.component_config.get('layer_nums')<line_sep>trainable=self.component_config.get('trainable')<line_sep>use_cudnn_cell=self.component_config.get('use_cudnn_cell')<line_sep>kashgari.config.use_cudnn_cell=use_cudnn_cell<line_sep>self.classifier_model=self.component_config.get('classifier_model')<line_sep>self.bert_embedding=BERTEmbedding(bert_model_path task=kashgari.CLASSIFICATION layer_nums=layer_nums trainable=trainable sequence_length=sequence_length)<line_sep>self.tokenizer=self.bert_embedding.tokenizer<line_sep>self.model=model<block_end><def_stmt>train self training_data cfg **kwargs<block_start>classifier_model=eval("clf."+self.classifier_model)<line_sep>epochs=self.component_config.get('epochs')<line_sep>batch_size=self.component_config.get('batch_size')<line_sep>validation_split=self.component_config.get('validation_split')<line_sep>patience=self.component_config.get('patience')<line_sep>factor=self.component_config.get('factor')<line_sep>verbose=self.component_config.get('verbose')<line_sep>X,Y=[] []<for_stmt>msg training_data.intent_examples<block_start>X.append(self.tokenizer.tokenize(msg.text))<line_sep>Y.append(msg.get('intent'))<block_end>train_x,validate_x,train_y,validate_y=train_test_split(X Y test_size=validation_split random_state=100)<line_sep>self.bert_embedding.processor.add_bos_eos=<false><line_sep>self.model=classifier_model(self.bert_embedding)<line_sep>checkpoint=ModelCheckpoint('intent_weights.h5' monitor='val_loss' save_best_only=<true> save_weights_only=<false> verbose=verbose)<line_sep>early_stopping=EarlyStopping(monitor='val_loss' patience=patience)<line_sep>reduce_lr=ReduceLROnPlateau(monitor='val_loss' factor=factor patience=patience verbose=verbose)<line_sep>self.model.fit(train_x train_y validate_x validate_y epochs=epochs batch_size=batch_size callbacks=[checkpoint early_stopping reduce_lr])<block_end><def_stmt>process self message **kwargs<block_start>intent_ranks=self.get_intent_score(message)<line_sep>intent=intent_ranks[0]<line_sep>message.set("intent" intent add_to_output=<true>)<line_sep>message.set("intent_ranking" intent_ranks add_to_output=<true>)<block_end><def_stmt>get_intent_score self message<block_start>intent_top_k=self.model.predict_top_k_class([self.tokenizer.tokenize(message.text)] top_k=INTENT_RANKING_LENGTH)[0]<line_sep>intent_ranks=[{'name':intent_top_k['label'] 'confidence':float(intent_top_k['confidence'])}]<for_stmt>item intent_top_k['candidates']<block_start>intent_ranks.append({'name':item['label'] 'confidence':float(item['confidence'])})<block_end><return>intent_ranks<block_end><def_stmt>persist self file_name:Text model_dir:Text<arrow>Optional[Dict[Text Any]]<block_start>model_path=os.path.join(model_dir file_name)<line_sep>self.model.save(model_path)<line_sep>remove_file=os.path.join(model_path 'model_weights.h5')<line_sep>os.remove(remove_file)<line_sep>shutil.move('intent_weights.h5' model_path)<line_sep>os.rename(os.path.join(model_path 'intent_weights.h5') os.path.join(model_path 'model_weights.h5'))<line_sep><return>{"file":file_name}<block_end>@classmethod<def_stmt>load cls meta:Dict[Text Any] model_dir:Optional[Text]=<none> model_metadata:Optional['Metadata']=<none> cached_component:Optional[Component]=<none> **kwargs:Any<arrow>'KashgariIntentClassifier'<block_start><if_stmt>model_dir<and>meta.get("file")<block_start>file_name=meta.get("file")<line_sep>classifier_model=os.path.join(model_dir file_name)<line_sep>loaded_model=kashgari.utils.load_model(classifier_model)<line_sep><return>cls(component_config=meta model=loaded_model)<block_end><else_stmt><block_start>logger.warning("Failed to load classifier model. Maybe path {} "<concat>"doesn't exist"<concat>"".format(os.path.abspath(model_dir)))<line_sep><return>cls(component_config=meta)<block_end><block_end><block_end>
<import_stmt>pytest<line_sep>testinfra_hosts=['clients']<def_stmt>test_pool_txns_genesis_file_exists host pool_txns_path<block_start>txns_file=host.file(pool_txns_path)<assert_stmt>txns_file.exists<block_end><def_stmt>test_perf_processes_can_connect host venv_path pool_txns_path<block_start><assert_stmt>host.run("{}/bin/perf_processes.py --test_conn -g {}".format(venv_path pool_txns_path)).rc<eq>0<block_end>
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for model_conversion_beam_main."""<import_stmt>os<import_from_stmt>absl flags<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing flagsaver<import_stmt>mock<import_from_stmt>non_semantic_speech_benchmark.export_model model_conversion_beam_main<line_sep>TESTDIR='non_semantic_speech_benchmark/export_model/testdata'<class_stmt>ModelConversionBeamMainTest(absltest.TestCase)<block_start>@mock.patch.object(model_conversion_beam_main.utils 'convert_and_write_model')@flagsaver.flagsaver<def_stmt>test_full_flow self _<block_start>flags.FLAGS.xids=['12321']<line_sep>flags.FLAGS.base_experiment_dir=os.path.join(absltest.get_default_test_srcdir() TESTDIR)<line_sep>flags.FLAGS.output_dir=os.path.join(absltest.get_default_test_tmpdir() 'dummy_out')<line_sep># Frontend args. flags.FLAGS.frame_hop=5<line_sep>flags.FLAGS.frame_width=5<line_sep>flags.FLAGS.num_mel_bins=80<line_sep>flags.FLAGS.n_required=8000<line_sep>model_conversion_beam_main.main(<none>)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
<import_stmt>os<import_from_stmt>osp.common config<import_from_stmt>osp.common.utils partitions<import_from_stmt>flask Flask request<import_from_stmt>rq_dashboard RQDashboard<import_from_stmt>pydoc locate<line_sep># RQ dashboard: app=Flask(__name__)<line_sep>RQDashboard(app)<line_sep>@app.route('/ping')<def_stmt>ping <block_start><return>('pong' 200)<block_end>@app.route('/queue' methods=['POST'])<def_stmt>queue <block_start>""" Queue a work order. """<line_sep>config.rq.enqueue(queue_page request.form['model_import'] request.form['job_import'] int(request.form['worker_count']) int(request.form['offset']) timeout=3600 )<line_sep><return>('' 200)<block_end><def_stmt>queue_page model_import job_import worker_count offset<block_start>""" Spool a page of model instances for a job. Args: model_import (str) job_import (str) worker_count (int) offset (int) """<line_sep># Import callables. model=locate(model_import)<line_sep>job=locate(job_import)<for_stmt>row model.page_cursor(worker_count offset)<block_start>config.rq.enqueue(job row.id)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(port=os.getenv('PORT' 5000))<block_end>
<import_from_stmt>pathlib Path<import_from_stmt>setuptools setup Extension<import_stmt>Cython.Build<line_sep># read the contents of your README file this_directory=Path(__file__).parent<line_sep>long_description=(this_directory/"README.md").read_text()<line_sep>version='0.0.4'<line_sep>ext=Extension(name="UltraDict" sources=["UltraDict.py"])<line_sep>setup(name='UltraDict' version=version description='Sychronized, streaming dictionary that uses shared memory as a backend' long_description=long_description long_description_content_type='text/markdown' author='<NAME>' author_email='<EMAIL>' url='https://github.com/ronny-rentner/UltraDict' cmdclass={'build_ext':Cython.Build.build_ext} package_dir={'UltraDict':'.'} packages=['UltraDict'] zip_safe=<false> ext_modules=Cython.Build.cythonize(ext compiler_directives={'language_level':"3"}) setup_requires=['cython>=0.24.1'] python_requires=">=3.9" )<line_sep>
<import_stmt>unittest<import_from_stmt>unordered_list UnorderedList<class_stmt>CorrectnessTest(unittest.TestCase)<block_start><def_stmt>test_adding self<block_start>l=UnorderedList()<line_sep>self.assertEqual(l.size() 0)<line_sep>self.assertTrue(l.is_empty())<line_sep>l.add(1)<line_sep>self.assertEqual(l.size() 1)<line_sep>self.assertEqual(l.head.value 1)<line_sep>self.assertFalse(l.is_empty())<line_sep>l.add(2)<line_sep>self.assertEqual(l.size() 2)<line_sep>self.assertEqual(l.head.value 2)<block_end><def_stmt>test_searching self<block_start>l=UnorderedList()<for_stmt>i range(4)<block_start>l.add(i)<block_end><for_stmt>i range(4)<block_start>self.assertTrue(l.search(i))<block_end><for_stmt>item (5 <none> <true> "blah")<block_start>self.assertFalse(l.search(item))<block_end><block_end><def_stmt>test_remove self<block_start>l=UnorderedList()<for_stmt>i range(3)<block_start>l.add(i)<block_end># remove from middle l.remove(1)<line_sep>self.assertFalse(l.search(1))<line_sep>self.assertEqual(l.size() 2)<line_sep># remove from end l.remove(2)<line_sep>self.assertFalse(l.search(2))<line_sep>self.assertEqual(l.size() 1)<line_sep># remove from start l.remove(0)<line_sep>self.assertFalse(l.search(0))<line_sep>self.assertEqual(l.size() 0)<block_end><block_end>
<def_stmt>test_derivatives <block_start><import_from_stmt>..interp SmolyakInterp<import_from_stmt>..grid SmolyakGrid<line_sep>d=5<line_sep>N=100<line_sep>mu=2<line_sep>f=<lambda>x:(x).sum(axis=1)<import_stmt>numpy.random<line_sep>ub=numpy.random.random(d)+6<line_sep>lb=numpy.random.random(d)-5<line_sep>sg=SmolyakGrid(d mu lb=lb ub=ub)<line_sep>values=f(sg.grid)<line_sep>si=SmolyakInterp(sg values)<line_sep>gg=numpy.random.random((N d))<line_sep>res,res_s,res_c,res_x=si.interpolate(gg deriv=<true> deriv_th=<true> deriv_X=<true>)<line_sep>T=sg.grid.shape[0]<assert_stmt>res.shape<eq>(N )<assert_stmt>res_s.shape<eq>(N d)<assert_stmt>res_c.shape<eq>(N T)<assert_stmt>res_x.shape<eq>(N T)<line_sep># res_s should be identically 1 <assert_stmt>abs(res_s-1.0).max()<l>1e-8<line_sep>epsilon=1e-6<line_sep># Test derivatives w.r.t. values si2=SmolyakInterp(sg values)<def_stmt>ff y<block_start>x=y.reshape(values.shape)<line_sep>si2.update_theta(x)<line_sep><return>si2.interpolate(gg).ravel()<block_end>y0=values.ravel()<line_sep>r0=ff(y0)<line_sep>jac=numpy.zeros((len(r0) len(y0)))<for_stmt>n range(len(y0))<block_start>yi=y0.copy()<line_sep>yi[n]<augadd>epsilon<line_sep>jac[: n]=(ff(yi)-r0)/epsilon<block_end>jac=jac.reshape((N T))<assert_stmt>abs(jac-res_x).max()<l>1e-7<line_sep># note that accuracy of either numerical or direct computation is not very accurate # Test derivatives w.r.t. coefficients theta_0=si.theta.copy()<def_stmt>ff_c y_c<block_start>si2.theta=y_c.reshape(theta_0.shape)<line_sep><return>si2.interpolate(gg).ravel()<block_end>r0=ff_c(theta_0)<line_sep>jac=numpy.zeros((len(r0) len(theta_0)))<for_stmt>n range(len(y0))<block_start>ti=theta_0.copy()<line_sep>ti[n]<augadd>epsilon<line_sep>jac[: n]=(ff_c(ti)-r0)/epsilon<block_end>jac=jac.reshape((N T))<assert_stmt>abs(jac-res_c).max()<l>1e-7<block_end>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. <import_stmt>os<import_stmt>pytest<import_stmt>torch<line_sep>@[email protected]<def_stmt>test_ddp_extractive_summarization_cnndm_transformers scripts tmp<block_start>ddp_env=os.environ.copy()<line_sep>ddp_env["OMP_NUM_THREADS"]=str(torch.cuda.device_count())<line_sep>ddp_env["KMP_AFFINITY"]="verbose"<line_sep>script=scripts["ddp_bertsumext"]<line_sep>summary_filename="bertsumext_prediction.txt"<import_stmt>subprocess<line_sep>process=subprocess.Popen(["python" script "--data_dir" tmp "--cache_dir" tmp "--output_dir" tmp "--quick_run" "true" "--summary_filename" summary_filename ] env=ddp_env stdout=subprocess.PIPE stderr=subprocess.PIPE )<line_sep>stdout,stderr=process.communicate()<line_sep>print(stdout)<if_stmt>process.returncode<block_start>print(stdout)<line_sep>print(stderr)<assert_stmt><false><block_end><assert_stmt>os.path.exists(os.path.join(tmp summary_filename))<block_end>@pytest.mark.skip(reason="""it takes too long; if the previous test works, and the notebook runs, this should also work.""")@[email protected]<def_stmt>test_ddp_abstractive_summarization_cnndm_transformers scripts tmp<block_start>script=scripts["ddp_bertsumabs"]<line_sep>summary_filename="bertsumabs_prediction.txt"<import_stmt>subprocess<line_sep>process=subprocess.Popen(["python" script "--data_dir" tmp "--cache_dir" tmp "--output_dir" tmp "--quick_run" "true" "--batch_size" "1" "--summary_filename" summary_filename ] stdout=subprocess.PIPE stderr=subprocess.PIPE )<line_sep>stdout,stderr=process.communicate()<line_sep>print(stdout)<if_stmt>process.returncode<block_start>print(stdout)<line_sep>print(stderr)<assert_stmt><false><block_end><assert_stmt>os.path.exists(os.path.join(tmp summary_filename))<block_end>
# script to load SciTe .session files by double-clicking them in Windows Explorer # the .session file type must be present and its 'open" command associated with : # "/path/to/python.exe" "/path/to/scite.py" "%1" # NOTE: /path/to/scite.py MUST be the same as /path/to/scite.exe ! # Example : # "c:\python\python.exe" "c:\program files\wscite\wscite.py" "%1" <import_stmt>sys os subprocess<line_sep># argv[0] is the full path to where this python script was launched from # it must be in the same directory as the SciTe executable ! # argv[1] is the full path to the scite session file we want to load script,sessionpath=sys.argv<line_sep># this gives us the path to the scite executable scite=script[:-2]+'exe'<line_sep># this gives us the basename of the session file and the directory it is in sessiondir,sessionname=os.path.split(sessionpath)<line_sep># here we switch to the session file dir and launch scite with just the file name as the loadsession parm subprocess.Popen([scite "-loadsession:%s"%sessionname] cwd=sessiondir)<line_sep># script ends without waiting for command completion
<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>tensorflow<as>tf<import_from_stmt>fastmri_recon.data.datasets.fastmri_pyfunc_non_cartesian train_nc_kspace_dataset_from_indexable<line_sep>image_shape=[2 640 322 1]<line_sep>af=4<line_sep>us=af/(2/np.pi)<line_sep>image_size=[640 474]<line_sep>kspace_shape=[image_shape[0] 1 640<times>(474<floordiv>af) 1]<line_sep>file_contrast='CORPD_FBK'<line_sep>@pytest.mark.parametrize('ds_kwargs, expected_kspace_shape, orig_shape, use_af' [({} kspace_shape image_shape[-2] <true>) ({'inner_slices':1} [1]+kspace_shape[1:] image_shape[-2] <true>) ({'inner_slices':1 'rand':<true>} [1]+kspace_shape[1:] image_shape[-2] <true>) ({'contrast':file_contrast} kspace_shape image_shape[-2] <true>) ({'n_samples':1} kspace_shape image_shape[-2] <true>) ({} kspace_shape image_shape[-2] <false>) ])<def_stmt>test_train_nc_kspace_dataset_from_indexable create_full_fastmri_test_tmp_dataset ds_kwargs expected_kspace_shape orig_shape use_af <block_start>path=create_full_fastmri_test_tmp_dataset['fastmri_tmp_singlecoil_train']<line_sep>ds=train_nc_kspace_dataset_from_indexable(path image_size af=af<if>use_af<else><none> us=<none><if>use_af<else>us **ds_kwargs )<line_sep>(kspace traj (shape )),image=next(iter(ds))<line_sep># shape verifications <assert_stmt>kspace.shape.as_list()<eq>expected_kspace_shape<assert_stmt>shape.numpy()[0]<eq>orig_shape<assert_stmt>traj.shape.as_list()<eq>[expected_kspace_shape[0] 2 640<times>(474<floordiv>af)]<assert_stmt>image.shape.as_list()<eq>expected_kspace_shape[0:1]+[320 320 1]<block_end><def_stmt>test_spiral_dataset create_full_fastmri_test_tmp_dataset<block_start>path=create_full_fastmri_test_tmp_dataset['fastmri_tmp_singlecoil_train']<line_sep>ds=train_nc_kspace_dataset_from_indexable(path image_size af=af)<line_sep>(kspace traj (shape )),image=next(iter(ds))<line_sep># shape verifications <assert_stmt>kspace.shape.as_list()<eq>kspace_shape<assert_stmt>shape.numpy()[0]<eq>image_shape[-2]<assert_stmt>traj.shape.as_list()<eq>[kspace_shape[0] 2 640<times>(474<floordiv>af)]<assert_stmt>image.shape.as_list()<eq>kspace_shape[0:1]+[320 320 1]<block_end>
<import_stmt>io<import_stmt>pytest<import_from_stmt>pre_commit_dbt.check_script_semicolon check_semicolon<import_from_stmt>pre_commit_dbt.remove_script_semicolon main<line_sep># Input, expected return value, expected output TESTS=((b"foo\n" 0 b"foo\n") (b"" 0 b"") (b"\n\n" 0 b"\n\n") (b"\n\n\n\n" 0 b"\n\n\n\n") (b"foo" 0 b"foo") (b"foo\n;" 1 b"foo\n") (b";" 1 b"") (b";\n\n" 1 b"") (b";\n\n\n\n" 1 b"") (b"foo;" 1 b"foo") (b"\n\n\n\n;" 1 b"\n\n\n\n") (b"\r\r\r\r;" 1 b"\r\r\r\r") (b";foo\n" 0 b";foo\n") )<line_sep>@pytest.mark.parametrize(("input_s" "expected_status_code" "output") TESTS)<def_stmt>test_fix_semicolon input_s expected_status_code output<block_start>file_obj=io.BytesIO(input_s)<line_sep>status_code=check_semicolon(file_obj replace=<true>)<assert_stmt>file_obj.getvalue()<eq>output<assert_stmt>status_code<eq>expected_status_code<block_end><def_stmt>test_fix_semicolon_default <block_start>file_obj=io.BytesIO(b";\n\n")<line_sep>status_code=check_semicolon(file_obj)<assert_stmt>file_obj.getvalue()<eq>b";\n\n"<assert_stmt>status_code<eq>1<block_end>@pytest.mark.parametrize(("input_s" "expected_status_code" "output") TESTS)<def_stmt>test_fix_semicolon_integration input_s expected_status_code output tmpdir<block_start>path=tmpdir.join("file.txt")<line_sep>path.write_binary(input_s)<line_sep>status_code=main([str(path)])<line_sep>file_output=path.read_binary()<assert_stmt>file_output<eq>output<assert_stmt>status_code<eq>expected_status_code<block_end>
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common constants for LKL."""<import_stmt>re<line_sep>LINUX_KERNEL_MODULE_STACK_TRACE='Linux Kernel Library Stack Trace:'<line_sep>#hid-fuzzer: lib/posix-host.c:401: void panic(void): Assertion `0' failed. LINUX_KERNEL_LIBRARY_ASSERT_REGEX=re.compile(r'([^:]+): lib/posix-host\.c:\d+: void panic\(void\): Assertion .*')<line_sep># Linux version 5.4.58+-ab6926695 where 6926695 is the build id. # Unlike in a normal linux version string, we do not know the build hash. LINUX_VERSION_REGEX_LKL=re.compile(r'Linux version .+-(ab([0-9a-f]+)\s)')<line_sep># This is the prefix in the repo.prop for the kernel for all # lkl fuzzers. LKL_REPO_KERNEL_PREFIX='kernel/private/lkl'<line_sep>LKL_BUILD_TARGET='kernel_kasan.lkl_fuzzers'<line_sep>
# Copyright (C) 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME> # # This file is part of breast_cancer_classifier. # # breast_cancer_classifier is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # breast_cancer_classifier is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with breast_cancer_classifier. If not, see <http://www.gnu.org/licenses/>. # ============================================================================== """ Defines the heatmap generation model used in run_producer.py """<import_stmt>copy<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>torch.nn.functional pad<import_stmt>torchvision.models.densenet<as>densenet<class_stmt>ModifiedDenseNet121(nn.Module)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super().__init__()<line_sep>self.densenet=densenet.densenet121(*args **kwargs)<line_sep>self._is_modified=<false><block_end><def_stmt>_modify_densenet self<block_start>""" Replace Conv2d and MaxPool2d to resolve the differences in padding between TensorFlow and PyTorch """<assert_stmt><not>self._is_modified<for_stmt>full_name,nn_module self.densenet.named_modules()<block_start><if_stmt>isinstance(nn_module (nn.Conv2d nn.MaxPool2d))<block_start>module_name_parts=full_name.split(".")<line_sep>parent=self._get_module(self.densenet module_name_parts[:-1])<line_sep>actual_module_name=module_name_parts[-1]<assert_stmt>"conv"<in>module_name_parts[-1]<or>"pool"<in>module_name_parts[-1]<line_sep>setattr(parent actual_module_name TFSamePadWrapper(nn_module))<block_end><block_end>self._is_modified=<true><block_end><def_stmt>load_from_path self model_path<block_start>self.densenet.load_state_dict(torch.load(model_path))<line_sep>self._modify_densenet()<block_end><def_stmt>forward self x<block_start><if_stmt><not>self._is_modified<block_start>self._modify_densenet()<block_end>features=self.densenet.features(x)<line_sep>out=F.relu(features inplace=<true>)<line_sep>out=F.adaptive_avg_pool2d(out (1 1)).view(features.size(0) -1)<line_sep>out=self.densenet.classifier(out)<line_sep><return>out<block_end>@classmethod<def_stmt>_get_module cls model module_name_parts<block_start>obj=model<for_stmt>module_name_part module_name_parts<block_start>obj=getattr(obj module_name_part)<block_end><return>obj<block_end><block_end><class_stmt>TFSamePadWrapper(nn.Module)<block_start>""" Outputs a new convolutional or pooling layer which uses TensorFlow-style "SAME" padding """<def_stmt>__init__ self sub_module<block_start>super(TFSamePadWrapper self).__init__()<line_sep>self.sub_module=copy.deepcopy(sub_module)<line_sep>self.sub_module.padding=0<if_stmt>isinstance(self.sub_module.kernel_size int)<block_start>self.kernel_size=(self.sub_module.kernel_size self.sub_module.kernel_size)<line_sep>self.stride=(self.sub_module.stride self.sub_module.stride)<block_end><else_stmt><block_start>self.kernel_size=self.sub_module.kernel_size<line_sep>self.stride=self.sub_module.stride<block_end><block_end><def_stmt>forward self x<block_start><return>self.sub_module(self.apply_pad(x))<block_end><def_stmt>apply_pad self x<block_start>pad_height=self.calculate_padding(x.shape[2] self.kernel_size[0] self.stride[0])<line_sep>pad_width=self.calculate_padding(x.shape[3] self.kernel_size[1] self.stride[1])<line_sep>pad_top,pad_left=pad_height<floordiv>2 pad_width<floordiv>2<line_sep>pad_bottom,pad_right=pad_height-pad_top pad_width-pad_left<line_sep><return>pad(x [pad_top pad_bottom pad_left pad_right])<block_end>@classmethod<def_stmt>calculate_padding cls in_dim kernel_dim stride_dim<block_start><if_stmt>in_dim%stride_dim<eq>0<block_start><return>max(0 kernel_dim-stride_dim)<block_end><return>max(0 kernel_dim-(in_dim%stride_dim))<block_end><block_end>
<import_stmt>os<import_stmt>shutil<import_from_stmt>os path<def_stmt>main <block_start><for_stmt>filename os.listdir(".")<block_start>print(filename)<line_sep>originalFilename=filename<line_sep>filename=filename.lower()<line_sep>filename=filename.replace("am_" "")<line_sep>os.rename(originalFilename filename)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
""" Code illustration: 1.12 A demonstration of tkinter styling @Tkinter GUI Application Development Blueprints """<import_stmt>tkinter<as>tk<line_sep>root=tk.Tk()<line_sep>root.configure(background='#4D4D4D')#top level styling # connecting to the external styling optionDB.txt root.option_readfile('optionDB.txt')<line_sep>#widget specific styling text=tk.Text(root background='#101010' foreground="#D6D6D6" borderwidth=18 relief='sunken' width=17 height=5)<line_sep>text.insert(tk.END "Style is knowing who you are,what you want to say, and not giving a damn.")<line_sep>text.grid(row=0 column=0 columnspan=6 padx=5 pady=5)<line_sep># all the below widgets derive their styling from optionDB.txt file tk.Button(root text='*').grid(row=1 column=1)<line_sep>tk.Button(root text='^').grid(row=1 column=2)<line_sep>tk.Button(root text='#').grid(row=1 column=3)<line_sep>tk.Button(root text='<').grid(row=2 column=1)<line_sep>tk.Button(root text='OK' cursor='target').grid(row=2 column=2)<line_sep>#changing cursor style tk.Button(root text='>').grid(row=2 column=3)<line_sep>tk.Button(root text='+').grid(row=3 column=1)<line_sep>tk.Button(root text='v').grid(row=3 column=2)<line_sep>tk.Button(root text='-').grid(row=3 column=3)<for_stmt>i range(10)<block_start>tk.Button(root text=str(i)).grid(column=3<if>i%3<eq>0<else>(1<if>i%3<eq>1<else>2) row=4<if>i<le>3<else>(5<if>i<le>6<else>6))<block_end>root.mainloop()<line_sep>
<import_from_stmt>apps.common.func.CommonFunc *<import_from_stmt>django.db.models Max<import_from_stmt>apps.common.func.LanguageFunc *<import_from_stmt>django.shortcuts render HttpResponse<import_from_stmt>all_models.models *<import_from_stmt>urllib parse<import_from_stmt>apps.user_center.services.user_uriService user_uriService<import_from_stmt>apps.common.config commonWebConfig<import_from_stmt>apps.config.services.serviceConfService ServiceConfService<import_from_stmt>apps.config.services.http_confService HttpConfService<import_from_stmt>apps.config.services.uriService UriService<import_from_stmt>apps.common.decorator.permission_normal_funcitons *<import_stmt>json<def_stmt>userUriCheck request<block_start>langDict=getLangTextDict(request)<line_sep>context={}<line_sep>context["httpUserCenterURIConfPage"]="current-page"<line_sep>context["userName"]=request.session.get("userName")<if_stmt><not>isRelease<block_start>context["env"]="test"<block_end># 文本 text={}<line_sep>text["pageTitle"]=langDict["web"]["httpUserCenterUserUriPageTitle"]<line_sep>text["subPageTitle"]=langDict["web"]["httpUserCenterUserUriSubPageTitle"]<line_sep>context["text"]=text<line_sep>context["page"]=1<line_sep>context["uri"]=UriService.getUri(request "ALL")<line_sep>context["title"]="服务配置"<line_sep><return>render(request "InterfaceTest/user_center/user_uri_conf.html" context)<block_end># def getUriData(request): # id = request.GET.get("id") # httpConfData = dbModelToDict(HttpConfService.getHttpConfForId(id)) # httpConfList = httpConfData["httpConf"].split("\n") # result = [] # loop = 0 # for httpConfIndex in range (1,len(httpConfList)): # if httpConfList[httpConfIndex] == "" or "=" not in httpConfList[httpConfIndex]: # continue # # indexData = httpConfList[httpConfIndex].split("=") # result.append({}) # result[loop]["httpConfKey"] = indexData[0].strip() # result[loop]["httpConfValue"] = indexData[1].strip() # # loop += 1 # return HttpResponse(ApiReturn(body=result).toJson()) <def_stmt>queryUserUriConf request<block_start>page=request.POST.get("page" 1)<if_stmt>isInt(page)<block_start>page=int(page)<block_end><else_stmt><block_start><return>HttpResponse("<script>alert('请验证页数参数');</script>")<block_end>checkArr=json.loads(parse.unquote(request.POST.get("queryArr")))<line_sep>execSql="SELECT s.*,tb_user.userName,muser.userName modByName FROM tb_config_uri s LEFT JOIN tb_user ON s.addBy=tb_user.loginName LEFT JOIN tb_user muser ON s.modBy=muser.loginName "<concat>"LEFT JOIN (SELECT * FROM ( SELECT id ucid,uriKey uuUriKey,conflevel FROM tb_user_uri "<concat>"WHERE addBy= '%s' ) b LEFT JOIN (SELECT uriKey cuUriKey FROM tb_config_uri) a ON b.uuUrikey = a.cuUriKey) c ON s.uriKey = c.cuUriKey "<concat>"WHERE s.state = 1"%request.session.get("loginName")<line_sep>checkList=[]<for_stmt>key checkArr<block_start><if_stmt>checkArr[key]<eq>""<block_start><continue><block_end><if_stmt>key<eq>"addBy"<block_start>checkList.append("%%%s%%"%checkArr[key])<line_sep>checkList.append("%%%s%%"%checkArr[key])<line_sep>execSql<augadd>""" and (s.addBy LIKE %s or tb_user.userName LIKE %s) """<line_sep><continue><block_end>checkList.append("%%%s%%"%checkArr[key])<line_sep>execSql<augadd>""" and s.%s """%key<line_sep>execSql<augadd>""" LIKE %s"""<block_end>execSql<augadd>""" order by c.conflevel is null,c.conflevel ASC,s.modTime desc"""<line_sep>print(execSql)<line_sep>context=pagination(execSql checkList page commonWebConfig.userHttpConfPageNum request=request)<line_sep>context["uriServices"]=UriService.getUri(request)<line_sep>context["dubboServices"]=dbModelListToListDict(TbConfigUri.objects.filter(state=1 protocol="DUBBO").order_by("level"))<line_sep>response=render(request "InterfaceTest/user_center/SubPages/user_uri_conf_sub_page.html" context)<line_sep><return>response<block_end><def_stmt>addUserUriSort request<block_start>uriKey=request.POST.get("uriKey")<line_sep>loginName=request.session.get("loginName")<line_sep>userUriCount=user_uriService.queryUserUriCount(loginName)<if_stmt>userUriCount<eq>0<block_start>user_uriService.addUserUrl(loginName uriKey 0)<line_sep><return>HttpResponse(ApiReturn().toJson())<block_end><else_stmt><block_start>userCount=dbModelListToListDict(user_uriService.queryUserUriRepeat(loginName uriKey))<line_sep>editLevel=dbModelListToListDict(user_uriService.queryUserUri(loginName))<if_stmt>len(userCount)<eq>0<block_start><for_stmt>i range(0 len(editLevel))<block_start>editLevel[i]["conflevel"]<augadd>1<line_sep>editLevel[i]["modTime"]=datetime.datetime.now()<line_sep>user_uriService.updateLevel(editLevel[i])<block_end>user_uriService.addUserUrl(loginName uriKey 0)<line_sep><return>HttpResponse(ApiReturn().toJson())<block_end><elif_stmt>userCount[0]["conflevel"]<ne>0<block_start><for_stmt>i range(0 len(editLevel))<block_start>editLevel[i]["conflevel"]=i+1<line_sep>editLevel[i]["modTime"]=datetime.datetime.now()<line_sep>user_uriService.updateLevel(editLevel[i])<block_end>userCount[0]["conflevel"]=0<line_sep>userCount[0]["modTime"]=datetime.datetime.now()<line_sep>user_uriService.updateLevel(userCount[0])<line_sep><return>HttpResponse(ApiReturn().toJson())<block_end><else_stmt><block_start><return>HttpResponse(ApiReturn(ApiReturn.CODE_EXCEPTION "此配置已排在第一位").toJson())<block_end><block_end><block_end>@single_data_permission(TbConfigUri TbConfigUri)<def_stmt>addUserUriApply request<block_start>alias=request.POST.get("alias")<line_sep>protocols=request.POST.get("protocols")<line_sep>loginName=request.session.get("loginName")<line_sep>failedProtocol=""<for_stmt>tmpProtocol protocols.split(",")<block_start><if_stmt>tmpProtocol.strip()<ne>""<block_start><try_stmt><block_start><try_stmt><block_start>id=TbConfigUri.objects.all().aggregate(Max('id'))["id__max"]+1<block_end><except_stmt><block_start>id=1<block_end>tmpUriModel=TbConfigUri()<line_sep>uriKey="%s-%s"%(tmpProtocol.strip().lower() alias)<line_sep>protocol=tmpProtocol.strip()<if_stmt>protocol<eq>"HTTP"<block_start>uriAlias=alias<block_end><else_stmt><block_start>uriAlias="%s(%s)"%(alias tmpProtocol.strip().lower())<block_end>oldData=TbConfigUri.objects.filter(uriKey=uriKey)<if_stmt>len(oldData)<g>0<block_start>data=oldData[0]<line_sep>data.state=1<line_sep>data.save()<block_end><else_stmt><block_start>tmpUriModel.id=id<line_sep>tmpUriModel.alias=uriAlias<line_sep>tmpUriModel.uriDesc="%s的%s服务"%(uriKey tmpProtocol)<line_sep>tmpUriModel.uriKey=uriKey<line_sep>tmpUriModel.protocol=protocol<line_sep>tmpUriModel.addBy=loginName<line_sep>tmpUriModel.save(force_insert=<true>)<block_end><block_end><except_stmt>Exception<as>e<block_start>print(traceback.format_exc())<line_sep>failedProtocol<augadd>tmpProtocol.strip()+" "<block_end><block_end><block_end><if_stmt>failedProtocol<eq>""<block_start><return>HttpResponse(ApiReturn(code=ApiReturn.CODE_OK message="添加成功!").toJson())<block_end><else_stmt><block_start><return>HttpResponse(ApiReturn(code=ApiReturn.CODE_ERROR message="协议%s添加失败!"%failedProtocol).toJson())<block_end><block_end>@single_data_permission(TbConfigUri TbConfigUri)<def_stmt>saveUriEdit request<block_start>uriKey=request.POST.get("uriKey")<line_sep>httpConfDesc=request.POST.get("httpConfDesc")<line_sep>loginName=request.session.get("loginName")<line_sep>TbConfigUri.objects.filter(uriKey=uriKey).update(uriDesc=httpConfDesc modBy=loginName modTime=get_current_time())<line_sep><return>HttpResponse(ApiReturn(code=ApiReturn.CODE_OK message="修改成功!").toJson())<block_end>@single_data_permission(TbConfigUri TbConfigUri)<def_stmt>delUri request<block_start>uriKey=request.GET.get("uriKey")<line_sep>loginName=request.session.get("loginName")<line_sep>TbConfigUri.objects.filter(uriKey=uriKey).update(state=0 modBy=loginName modTime=get_current_time())<line_sep><return>HttpResponse(ApiReturn(code=ApiReturn.CODE_OK message="删除成功!").toJson())<block_end><def_stmt>userEnvUriCheck request<block_start>langDict=getLangTextDict(request)<line_sep>context={}<line_sep>context["httpUserCenterEnvURIConfPage"]="current-page"<line_sep>context["userName"]=request.session.get("userName")<if_stmt><not>isRelease<block_start>context["env"]="test"<block_end># 文本 text={}<line_sep>text["pageTitle"]="请求地址配置"<line_sep>text["subPageTitle"]="请求地址查看"<line_sep>context["text"]=text<line_sep>context["page"]=1<line_sep>context["envConfList"]=HttpConfService.getAllHttpConf(request)<line_sep>context["uri"]=UriService.getUri(request "ALL")<line_sep>context["title"]="请求地址配置"<line_sep><return>render(request "InterfaceTest/user_center/user_env_uri_conf.html" context)<block_end><def_stmt>queryUserEnvUriConf request<block_start>page=request.POST.get("page")<if_stmt>isInt(page)<block_start>page=int(page)<block_end><else_stmt><block_start><return>HttpResponse("<script>alert('请验证页数参数');</script>")<block_end>checkArr=json.loads(parse.unquote(request.POST.get("queryArr")))<line_sep>execSql="SELECT s.*,tb_user.userName,curi.protocol protocol,muser.userName modByName FROM tb_env_uri_conf s LEFT JOIN tb_user ON s.addBy=tb_user.loginName LEFT JOIN tb_user muser ON s.modBy=muser.loginName "<concat>"LEFT JOIN tb_config_http chttp ON s.httpConfKey=chttp.httpConfKey "<concat>"LEFT JOIN tb_config_uri curi ON s.uriKey=curi.uriKey "<concat>"WHERE s.state = 1"<line_sep>checkList=[]<for_stmt>key checkArr<block_start><if_stmt>checkArr[key]<eq>""<block_start><continue><block_end><if_stmt>key<eq>"addBy"<block_start>checkList.append("%%%s%%"%checkArr[key])<line_sep>checkList.append("%%%s%%"%checkArr[key])<line_sep>execSql<augadd>""" and (s.addBy LIKE %s or tb_user.userName LIKE %s) """<line_sep><continue><block_end><if_stmt>key<eq>"protocol"<block_start>checkList.append("%s"%checkArr[key])<line_sep>execSql<augadd>""" and curi.%s """%key<line_sep>execSql<augadd>""" = %s"""<line_sep><continue><block_end><if_stmt>key<in>["httpConfKey" "uriKey"]<block_start>checkList.append("%s"%checkArr[key])<line_sep>execSql<augadd>""" and s.%s """%key<line_sep>execSql<augadd>""" = %s"""<line_sep><continue><block_end>checkList.append("%%%s%%"%checkArr[key])<line_sep>execSql<augadd>""" and s.%s """%key<line_sep>execSql<augadd>""" LIKE %s"""<block_end>execSql<augadd>""" order by s.modTime DESC"""<line_sep>context=pagination(execSql checkList page commonWebConfig.userHttpConfPageNum request=request)<line_sep># context["uriServices"] = UriService.getUri(request) # context["dubboServices"] = dbModelListToListDict(TbConfigUri.objects.filter(state=1, protocol="DUBBO").order_by("level")) response=render(request "InterfaceTest/user_center/SubPages/env_uri_conf_sub_page.html" context)<line_sep><return>response<block_end>@single_data_permission(TbEnvUriConf TbEnvUriConf)<def_stmt>delEnvUri request<block_start>id=request.GET.get("id")<line_sep>TbEnvUriConf.objects.filter(id=id).update(state=0)<line_sep><return>HttpResponse(ApiReturn().toJson())<block_end>@single_data_permission(TbEnvUriConf TbEnvUriConf)<def_stmt>saveEditEnvUri request<block_start>id=request.POST.get("id")<line_sep>requestAddr=request.POST.get("requestAddr")<line_sep>TbEnvUriConf.objects.filter(id=id).update(requestAddr=requestAddr modBy=request.session.get("loginName") state=1)<line_sep><return>HttpResponse(ApiReturn().toJson())<block_end>@single_data_permission(TbEnvUriConf TbEnvUriConf)<def_stmt>saveEnvUri request<block_start>requestAddr=request.POST.get("requestAddr")<line_sep>httpConfKey=request.POST.get("httpConfKey")<line_sep>uriKey=request.POST.get("uriKey")<line_sep>envUri=TbEnvUriConf.objects.filter(httpConfKey=httpConfKey uriKey=uriKey)<if_stmt>(envUri)<block_start><if_stmt>envUri[0].state<eq>1#提示错误 <block_start><return>HttpResponse(ApiReturn(code=ApiReturn.CODE_ERROR message="已经存在的请求配置,不能重复添加,请编辑!").toJson())<block_end><elif_stmt>envUri[0].state<eq>0#进行更新 <block_start>envUri[0].state=1<line_sep>envUri[0].requestAddr=requestAddr<line_sep>envUri[0].addBy=request.session.get("loginName")<line_sep>envUri[0].addTime=get_current_time()<line_sep>envUri[0].save(force_update=<true>)<line_sep><return>HttpResponse(ApiReturn(message="添加成功!").toJson())<block_end><block_end><else_stmt>#进行add <block_start>teuri=TbEnvUriConf()<line_sep>teuri.requestAddr=requestAddr<line_sep>teuri.httpConfKey=httpConfKey<line_sep>teuri.uriKey=uriKey<line_sep>teuri.addBy=request.session.get("loginName")<line_sep>teuri.save(force_insert=<true>)<line_sep><return>HttpResponse(ApiReturn(message="添加成功!!").toJson())<block_end><block_end><def_stmt>delAllUserUri request<block_start>TbUserUri.objects.filter(addBy=request.session.get("loginName")).delete()<line_sep><return>HttpResponse(ApiReturn().toJson())<block_end>
<import_stmt>unittest<import_stmt>fiona<import_stmt>pandas<as>pd<class_stmt>TestFiona(unittest.TestCase)<block_start><def_stmt>test_read self<block_start><with_stmt>fiona.open("/input/tests/data/coutwildrnp.shp")<as>source<block_start>self.assertEqual(67 len(source))<block_end><block_end><block_end>
title=xpath("div[@class=BlogTitle]")<line_sep>urls="http://my\\.oschina\\.net/flashsword/blog/\\d+"<line_sep>result={"title":title "urls":urls}<line_sep>
''' Chapter 7 A demonstration of a responsive window using Grid.rowconfigure and Grid.columnconfigure '''<import_from_stmt>tkinter Tk Button Grid<line_sep>root=Tk()<for_stmt>x range(10)<block_start>btn=Button(root text=x)<line_sep>btn.grid(column=x row=1 sticky='nsew')<line_sep>Grid.rowconfigure(root 2 weight=x)<line_sep>Grid.columnconfigure(root 2 weight=x)<block_end>root.mainloop()<line_sep>
"""Unit tests for utils.py"""<import_stmt>pytest<import_from_stmt>utils bearing calc_travel coordinate_distance deg2rad elevation<def_stmt>test_deg2rad <block_start>"""Unit tests for deg2rad()."""<line_sep># Note: python's math package includes a radians function that # converts degrees to radians. This function could be eliminated # to reduce custom code. <assert_stmt>deg2rad(57.2958)<eq>1.0000003575641672<assert_stmt>deg2rad(1)<eq>0.017453292519943295<assert_stmt>deg2rad(-1)<eq>-0.017453292519943295<block_end>@pytest.mark.skip(reason="Insufficient documentation to test. No docstrings.")<def_stmt>test_elevation <block_start>"""Unit test for elevation()."""<line_sep><pass><block_end><def_stmt>test_bearing <block_start>"""Unit test for bearing()."""<line_sep># Example from: https://www.igismap.com/formula-to-find-bearing-or-heading-angle-between-two-points-latitude-longitude/ lat1,long1=39.099912 -94.581213<line_sep>lat2,long2=38.627089 -90.200203<line_sep>expected_bearing=96.51262423499941<assert_stmt>bearing(lat1 long1 lat2 long2)<eq>expected_bearing<block_end><def_stmt>test_coordinate_distance <block_start>"""Unit test for coordinate_distance()."""<line_sep># Used this app to calculate distance: https://www.movable-type.co.uk/scripts/latlong.html lat1,long1=39.099912 -94.581213<line_sep>lat2,long2=38.627089 -90.200203<line_sep>expected_distance=382900.05037560174<assert_stmt>coordinate_distance(lat1 long1 lat2 long2)<eq>expected_distance<block_end>@pytest.mark.skip(reason="Insufficient documentation to test. What is lead_s?")<def_stmt>test_calc_travel <block_start>"""Unit test for calc_travel()."""<line_sep># note: the code in calc_travel is hard to understand because of the tangle # of calculations. consider reformatting and explaining or explore the possibility # using geopy <pass><block_end>
expected_output={'Et0/2:12':{'type':'BD_PORT' 'is_path_list':<false> 'port':'Et0/2:12'} '[IR]20012:2.2.2.2':{'type':'VXLAN_REP' 'is_path_list':<true> 'path_list':{'id':1191 'path_count':1 'type':'VXLAN_REP' 'description':'[IR]20012:2.2.2.2'}} '[IR]20012:3.3.3.2':{'type':'VXLAN_REP' 'is_path_list':<true> 'path_list':{'id':1184 'path_count':1 'type':'VXLAN_REP' 'description':'[IR]20012:3.3.3.2'}}}<line_sep>
<import_from_stmt>typing Iterator Mapping Sequence Type<import_from_stmt>apischema.conversions.conversions DefaultConversion<import_from_stmt>apischema.conversions.visitor DeserializationVisitor<import_from_stmt>apischema.objects ObjectField<import_from_stmt>apischema.objects.visitor DeserializationObjectVisitor<import_from_stmt>apischema.types AnyType<import_from_stmt>apischema.utils get_origin_or_type<import_from_stmt>apischema.visitor Unsupported<class_stmt>InitFlattenedAliasVisitor(DeserializationObjectVisitor[Iterator[str]] DeserializationVisitor[Iterator[str]])<block_start><def_stmt>mapping self cls:Type[Mapping] key_type:AnyType value_type:AnyType<arrow>Iterator[str]<block_start><yield><from>()<block_end><def_stmt>object self tp:AnyType fields:Sequence[ObjectField]<arrow>Iterator[str]<block_start><for_stmt>field fields<block_start><if_stmt>field.flattened<block_start><yield><from>get_deserialization_flattened_aliases(get_origin_or_type(tp) field self.default_conversion)<block_end><elif_stmt><not>field.is_aggregate<block_start><yield>field.alias<block_end><block_end><block_end><def_stmt>_visited_union self results:Sequence[Iterator[str]]<arrow>Iterator[str]<block_start><if_stmt>len(results)<ne>1<block_start><raise>NotImplementedError<block_end><return>results[0]<block_end><block_end><def_stmt>get_deserialization_flattened_aliases cls:Type field:ObjectField default_conversion:DefaultConversion<arrow>Iterator[str]<block_start><assert_stmt>field.flattened<try_stmt><block_start><yield><from>InitFlattenedAliasVisitor(default_conversion).visit_with_conv(field.type field.deserialization)<block_end><except_stmt>(NotImplementedError Unsupported)<block_start><raise>TypeError(f"Flattened field {cls.__name__}.{field.name} must have an object type")<from><none><block_end><block_end>
""" Tests for work-arounds to known arXiv API bugs. """<import_stmt>arxiv<import_stmt>unittest<class_stmt>TestClient(unittest.TestCase)<block_start><def_stmt>test_missing_title self<block_start>""" Papers with the title "0" do not have a title element in the Atom feed. It's unclear whether other falsey titles (e.g. "False", "null", or empty titles) are allowed by arXiv and are impacted by this bug. This may also surface for other expected fields (e.g. author names). + GitHub issue: https://github.com/lukasschwab/arxiv.py/issues/71 + Bug report: https://groups.google.com/u/1/g/arxiv-api/c/ORENISrc5gc """<line_sep>paper_without_title="2104.12255v1"<try_stmt><block_start>results=list(arxiv.Search(id_list=[paper_without_title]).results())<line_sep>self.assertEqual(len(results) 1)<line_sep>self.assertEqual(results[0].get_short_id() paper_without_title)<block_end><except_stmt>AttributeError<block_start>self.fail("got AttributeError fetching paper without title")<block_end><block_end><block_end>
__author__="<NAME>, <NAME> and <NAME>"<line_sep>__version__="0.0.1"<line_sep>__license__="BSD"<import_stmt>os sys<line_sep>sys.path.append(os.path.abspath(os.path.join(__file__ ".." ".." "..")))<import_stmt>logging<import_from_stmt>autoPyTorch AutoNetClassification<import_from_stmt>autoPyTorch.data_management.data_manager DataManager<line_sep>dm=DataManager(verbose=1)<line_sep>dataset_dir=os.path.abspath(os.path.join(os.path.dirname(__file__) '..' '..' 'datasets'))<line_sep># choose between the 10 classification testcases on real data. TEST_CASE=4<if_stmt>TEST_CASE<eq>1<block_start>dm.read_data("openml:22" is_classification=<true>)<line_sep># 2000 samples, 10 classes, 48 features <block_end><if_stmt>TEST_CASE<eq>2<block_start>dm.read_data("openml:1476" is_classification=<true>)<line_sep># 13910 samples, 6 classes, 128 features <block_end><if_stmt>TEST_CASE<eq>3<block_start>dm.read_data("openml:1464" is_classification=<true>)<line_sep># 748 samples, 2 classes, 4 features <block_end><if_stmt>TEST_CASE<eq>4<block_start>dm.read_data("openml:31" is_classification=<true>)<block_end><if_stmt>TEST_CASE<eq>5<block_start>dm.read_data("openml:28" is_classification=<true>)<line_sep># 5620 samples, 10 classes, 65 features <block_end><if_stmt>TEST_CASE<eq>6<block_start>dm.read_data("openml:42" is_classification=<true>)<line_sep># 683 samples, 19 classes, 36 categorical features <block_end><if_stmt>TEST_CASE<eq>7<block_start>dm.read_data("openml:44" is_classification=<true>)<line_sep># 4601 samples, 2 classes, 58 features <block_end><if_stmt>TEST_CASE<eq>8<block_start>dm.read_data("openml:32" is_classification=<true>)<block_end><if_stmt>TEST_CASE<eq>9<block_start>dm.read_data("openml:334" is_classification=<true>)<block_end><if_stmt>TEST_CASE<eq>10<block_start>dm.read_data("openml:40996" is_classification=<true>)<block_end>autonet=AutoNetClassification(budget_type='epochs' min_budget=1 max_budget=9 num_iterations=1 log_level='info')<line_sep>res=autonet.fit(X_train=dm.X_train Y_train=dm.Y_train early_stopping_patience=3 # validation_split=0.3, categorical_features=dm.categorical_features)<line_sep>print(res)<line_sep>
# Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions for additional type-checking."""<import_stmt>collections<import_stmt>numpy<as>np<class_stmt>MatrixShapeOrTypeException(Exception)<block_start>"""Exception indicating a non-matrix argument."""<line_sep><pass><block_end><def_stmt>CheckIsMatrix arg shape=<none><block_start><return>isinstance(arg np.matrix)<and>(shape<is><none><or>arg.shape<eq>shape)<block_end><def_stmt>RequireMatrixArguments *shapes<block_start>"""A decorator that ensures arguments are np.matrix objects of given shapes. Args: *shapes: A list whose elements are either None or two element tuples (n, m). There should be one element per argument to the decorated function. The non-None arguments will be required to be n-by-m numpy.matrix objects. Returns: Decorator. """<def_stmt>_CheckArguments f<block_start><assert_stmt>len(shapes)<eq>f.func_code.co_argcount<def_stmt>_Wrapped *args **kwargs<block_start><for_stmt>(arg shape) zip(args shapes)<block_start><if_stmt>shape<is><not><none><and><not>CheckIsMatrix(arg shape=shape)<block_start><raise>MatrixShapeOrTypeException(shape)<block_end><block_end><return>f(*args **kwargs)<block_end><return>_Wrapped<block_end><return>_CheckArguments<block_end><def_stmt>MakeNamedVectorClass name field_indices<block_start>"""Generate a class for handling vectors with named sub-components. Returns a class which extends collections.namedtuple so that each element is a "slice" of a dim-by-1 np.matrix. The field_indices argument is a list of pairs. The first entry of each pair is the field name, the second entry is a list of vector indices, e.g.: FooClass = MakeNamedVectorClass('Foo', [('r', [0]), ('i', [1, 2, 3])]) foo_instance = Foo(r=np.matrix[[1.0]], i=np.matrix[[2.0], [3.0], [4.0]]) Here, the total dimension is 4, and foo_instance.ToVector() will be np.matrix([[0.0], [1.0], [2.0], [3.0]]). Args: name: Name to give the class. field_indices: List of tuples defining the class as above. Returns: Named vector class defined as above. """<line_sep>keys=[key<for>(key _) field_indices]<line_sep>indices=[index<for>(_ index) field_indices]<line_sep>all_indices=[]<for_stmt>index indices<block_start>all_indices<augadd>index<block_end>dim=len(all_indices)<assert_stmt>set(all_indices)<eq>set(range(dim))<line_sep>tuple_type=collections.namedtuple(name+'Repr' keys)<class_stmt>NamedVector(tuple_type)<block_start>"""Class representing a dim-by-1 np.matrix with named slices."""<def_stmt>__init__ self *args **kwargs<block_start>indices_dict={key:index<for>(key index) field_indices}<for_stmt>(key value) kwargs.iteritems()<block_start><if_stmt><not>CheckIsMatrix(value shape=(len(indices_dict[key]) 1))<block_start><raise>MatrixShapeOrTypeException((key value))<block_end><block_end>super(NamedVector self).__init__(*args **kwargs)<block_end><def_stmt>ToVector self<block_start>"""Return the dim-by-1 np.matrix combining the named component vectors."""<line_sep>vector=np.matrix(np.zeros((dim 1)))<for_stmt>i,index enumerate(indices)<block_start>vector[index]=self[i]<block_end><return>vector<block_end>@classmethod@RequireMatrixArguments(<none> (dim 1))<def_stmt>FromVector cls vector<block_start>"""Inverse of ToVector()."""<line_sep>values=[<none><for>_ keys]<for_stmt>i,index enumerate(indices)<block_start>values[i]=vector[index]<block_end><return>cls(*values)<block_end>@classmethod<def_stmt>GetIndices cls<block_start>"""Get a namedtuple whose elements are the component indices."""<line_sep><return>tuple_type(*indices)<block_end>@classmethod<def_stmt>GetDim cls<block_start><return>dim<block_end>@classmethod<def_stmt>StepVector cls step_sizes<block_start>"""Maps a {field_name: step_size} dict to a vector of step sizes."""<line_sep>step_vector=np.matrix(np.zeros((cls.GetDim() 1)))<line_sep>indices=cls.GetIndices()<for_stmt>field_name,size step_sizes.iteritems()<block_start>step_vector[getattr(indices field_name) 0]=size<block_end><assert_stmt>(step_vector<g>0.0).all<line_sep><return>step_vector<block_end><block_end><return>NamedVector<block_end><def_stmt>MakeStateClass name field_indices<block_start>"""Creates a class for representing system state. Generates a class for representing the state of a system where some components of the state lie on manifolds such as SO(3). This involves constructing two classes. The first is a class that behaves like a namedtuple with each entry being a component of the state. The second class behaves like a NamedVector and represents a tangent vector for this space. The user must make a subclass of this StateClass returned by this method to handle moving states along tangent directions and recovering tangent directions from pairs of states. An example is given below: class AttitudeState(MakeStateClass( 'AttitudeState, [('omega', range(0, 3)), ('dcm_g2b', range(3, 6))])): def Increment(self, tangent, step=1.0): ... def Decrement(self, other_state): ... state = AttitudeState(omega=np.matrix(np.zeros((3, 1))), dcm_g2b=np.matrix(np.eye(3))) tangent = AttitudeState.Tangent(domega=np.matrix([[1.0], [2.0], [3.0]]), ddcm_g2b=np.matrix([[4.0], [5.0], [6.0]])) # This is equivalent to np.matrix([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]). tangent.ToVector() The structure of the state is given by field_indices which is a list of pairs (field_name, tangent_indices). The string field_name gives a name to this component of the state. The Tangent class is a NamedVector with fields named 'd' + field_name which are stored in the tangent_indices components of the vector. Args: name: Name of the class to create. field_indices: List of pairs (field_name, tangent_indices) describing the structure of the class to create. Returns: A new class as described above. """<line_sep>keys=[key<for>(key _) field_indices]<class_stmt>StateClass(collections.namedtuple(name keys))<block_start>"""Class representing the state of a system."""<line_sep>Tangent=MakeNamedVectorClass(# pylint: disable=invalid-name name+'Tangent' [('d'+key value)<for>(key value) field_indices])<def_stmt>Increment self tangent step=1.0<block_start><raise>NotImplementedError<block_end><def_stmt>Difference self other_state<block_start><raise>NotImplementedError<block_end><block_end><return>StateClass<block_end><def_stmt>MakeFlatStateClass name field_indices<block_start>"""Creates a class for representing system state in R^n. Generates a class for representing the state of a system where the Tangent vectors can be defined by element-wise addition and subtraction of the states. Args: name: See MakeStateClass. field_indices: See MakeStateClass. Returns: A new class as described above. """<class_stmt>FlatStateClass(MakeStateClass(name field_indices))<block_start>"""StateClass representing a state in R^n."""<def_stmt>Increment self tangent step=1.0<block_start><assert_stmt>isinstance(tangent FlatStateClass.Tangent)<line_sep><return>FlatStateClass(*[value+step<times>tangent_value<for>(value tangent_value) zip(self tangent)])<block_end><def_stmt>Difference self other_state<block_start><return>FlatStateClass.Tangent(*[other_value-value<for>(other_value value) zip(other_state self)])<block_end><block_end><return>FlatStateClass<block_end>
<import_from_stmt>.assemble assemble_body assemble_request assemble_request_head assemble_response assemble_response_head<import_from_stmt>.read connection_close expected_http_body_size read_body read_request read_request_head read_response read_response_head <line_sep>__all__=["read_request" "read_request_head" "read_response" "read_response_head" "read_body" "connection_close" "expected_http_body_size" "assemble_request" "assemble_request_head" "assemble_response" "assemble_response_head" "assemble_body" ]<line_sep>
<import_stmt>os<line_sep>CURRENT_DIR=os.path.dirname(__file__)<line_sep>INPUT_DIR=os.path.join(CURRENT_DIR "input")<line_sep>OUTPUT_DIR=os.path.join(CURRENT_DIR "..")<line_sep>
<import_from_future_stmt> division generators print_function<import_stmt>macarico<class_stmt>BehavioralCloning(macarico.Learner)<block_start><def_stmt>__init__ self policy reference<block_start>macarico.Learner.__init__(self)<assert_stmt>isinstance(policy macarico.CostSensitivePolicy)<line_sep>self.policy=policy<line_sep>self.reference=reference<line_sep>self.objective=0.0<block_end><def_stmt>forward self state<block_start>ref=self.reference(state)<line_sep>self.objective<augadd>self.policy.update(state ref)<line_sep><return>ref<block_end><def_stmt>get_objective self _<block_start>ret=self.objective<line_sep>self.objective=0.0<line_sep><return>ret<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>testTask=cms.EDAnalyzer("TestTask" # standard name=cms.untracked.string("TestTask") debug=cms.untracked.int32(0) runkeyVal=cms.untracked.int32(0) runkeyName=cms.untracked.string("pp_run") tagHF=cms.untracked.InputTag("qie10Digis"))<line_sep>
<import_stmt>glob<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_stmt>cv2<line_sep>visual=<true><line_sep>col_names=['youtube_id' 'timestamp_ms' 'class_id' 'class_name' 'object_id' 'object_presence' 'xmin' 'xmax' 'ymin' 'ymax']<line_sep>df=pd.DataFrame.from_csv('yt_bb_detection_validation.csv' header=<none> index_col=<false>)<line_sep>df.columns=col_names<line_sep>frame_num=len(df['youtube_id'])<line_sep>img_path=glob.glob('/mnt/qwang/youtubebb/frames/val*/*/*.jpg')<line_sep>d={key.split('/')[-1]:value<for>(value key) enumerate(img_path)}<for_stmt>n range(frame_num)<block_start><if_stmt>df['object_presence'][n]<block_start>frame_name=df['youtube_id'][n]+'_'+str(df['timestamp_ms'][n])+'_'+str(df['class_id'][n])+'_'+str(df['object_id'][n])+'.jpg'<line_sep>bbox=np.array([df['xmin'][n] df['ymin'][n] df['xmax'][n] df['ymax'][n]])<if_stmt>frame_name<in>d.keys()<block_start>frame_path=img_path[d[frame_name]]<if_stmt>visual<block_start>im=cv2.imread(frame_path)<line_sep>h,w,_=im.shape<line_sep>pt1=(int(bbox[0]<times>w) int(bbox[1]<times>h))<line_sep>pt2=(int(bbox[2]<times>w) int(bbox[3]<times>h))<line_sep>cv2.rectangle(im pt1 pt2 (0 255 0) 2)<line_sep>cv2.imshow('img' im)<line_sep>cv2.waitKey(100)<block_end><block_end><else_stmt><block_start>print('no image: {}'.format(frame_name))<line_sep><pass><block_end><block_end><else_stmt><block_start><pass><block_end><block_end>print('done')<line_sep>
# This sample tests the case where a field descriptor has an implicit # "init" parameter type based on an overload. <import_from_stmt>typing Any Callable Literal Optional Tuple Type TypeVar Union overload <line_sep>T=TypeVar("T")<class_stmt>ModelField<block_start><def_stmt>__init__ self * default:Optional[Any]=<ellipsis> init:Optional[bool]=<true> **kwargs:Any <arrow><none><block_start><ellipsis><block_end><block_end>@overload<def_stmt>field * default:Optional[str]=<none> resolver:Callable[[] Any] init:Literal[<false>]=<false> <arrow>Any<block_start><ellipsis><block_end>@overload<def_stmt>field * default:Optional[str]=<none> resolver:<none>=<none> init:Literal[<true>]=<true> <arrow>Any<block_start><ellipsis><block_end><def_stmt>field * default:Optional[str]=<none> resolver:Optional[Callable[[] Any]]=<none> init:bool=<true> <arrow>Any<block_start><ellipsis><block_end><def_stmt>__dataclass_transform__ * eq_default:bool=<true> order_default:bool=<false> kw_only_default:bool=<false> field_specifiers:Tuple[Union[type Callable[<ellipsis> Any]] <ellipsis>]=(()) <arrow>Callable[[T] T]# If used within a stub file, the following implementation can be # replaced with "...". <block_start><return><lambda>a:a<block_end>@__dataclass_transform__(kw_only_default=<true> field_specifiers=(field ))<def_stmt>create_model * init:bool=<true><arrow>Callable[[Type[T]] Type[T]]<block_start><ellipsis><block_end>@create_model()<class_stmt>CustomerModel<block_start>id:int=field(resolver=<lambda>:0)<line_sep>name:str=field(default="Voldemort")<block_end>CustomerModel()<line_sep>CustomerModel(name="hi")<line_sep># This should generate an error because "id" is not # supposed to be part of the init function. CustomerModel(id=1 name="hi")<line_sep>
<import_stmt>warnings<import_stmt>pandavro<as>pdx<import_from_stmt>optimus.engines.base.io.save BaseSave<import_from_stmt>optimus.helpers.logger logger<import_from_stmt>optimus.helpers.types *<import_from_stmt>optimus.engines.base.io.save BaseSave<class_stmt>Save(BaseSave)<block_start><def_stmt>__init__ self root:'DataFrameType'<block_start>self.root=root<block_end><def_stmt>json self path mode="w" *args **kwargs<block_start>df=self.root.data<try_stmt><block_start>df.to_json(path mode=mode *args **kwargs)<block_end><except_stmt>IOError<as>e<block_start>logger.print(e)<line_sep><raise><block_end><block_end><def_stmt>csv self path mode="rb" *args **kwargs<block_start><try_stmt><block_start>dfd=self.root.cols.cast("*" "str").data<line_sep>dfd.to_csv(path index=<false> mode=mode *args **kwargs)<block_end><except_stmt>IOError<as>error<block_start>logger.print(error)<line_sep><raise><block_end><block_end><def_stmt>parquet self path mode="overwrite" num_partitions=1 *args **kwargs# This character are invalid as column names by parquet <block_start>invalid_character=[" " "," ";" "{" "}" "(" ")" "\n" "\t" "="]<def_stmt>func col_name<block_start><for_stmt>i invalid_character<block_start>col_name=col_name.replace(i "_")<block_end><return>col_name<block_end>df=self.root.cols.rename(func)<try_stmt><block_start>df.data.to_parquet(path mod=mode numpartitions=num_partitions)<block_end><except_stmt>IOError<as>e<block_start>logger.print(e)<line_sep><raise><block_end><block_end><def_stmt>excel self path **kwargs<block_start><try_stmt># df = self.root.data # columns = parse_columns(self, "*", # filter_by_column_types=["date", "array", "vector", "binary", "null"]) <block_start>df=self.root.cols.cast("*" "str")<line_sep># Dask reference # https://docs.dask.org/en/latest/dataframe-api.html#dask.dataframe.to_csv df.to_pandas().to_excel(path index=<false>)<block_end><except_stmt>IOError<as>error<block_start>logger.print(error)<line_sep><raise><block_end><block_end><def_stmt>orc self path **kwargs<block_start><try_stmt><block_start>df=self.root.data<line_sep>df.to_orc(path index=<false> **kwargs)<block_end><except_stmt>IOError<as>error<block_start>logger.print(error)<line_sep><raise><block_end><block_end><def_stmt>avro self path **kwargs<block_start>warnings.warn("Using CPU via pandavro to read the avro dataset")<line_sep>pdx.to_avro(path self.root.to_pandas())<block_end><block_end>
s='one two one two one'<line_sep>print(s.replace('one' 'two').replace('two' 'one'))<line_sep># one one one one one print(s.replace('one' 'X').replace('two' 'one').replace('X' 'two'))<line_sep># two one two one two <def_stmt>swap_str s_org s1 s2 temp='*q@w-e~r^'<block_start><return>s_org.replace(s1 temp).replace(s2 s1).replace(temp s2)<block_end>print(swap_str(s 'one' 'two'))<line_sep># two one two one two print(s.replace('o' 't').replace('t' 'o'))<line_sep># one owo one owo one print(s.translate(str.maketrans({'o':'t' 't':'o'})))<line_sep># tne owt tne owt tne print(s.translate(str.maketrans('ot' 'to')))<line_sep># tne owt tne owt tne
<import_from_stmt>.ScriptTools BitcoinScriptTools<import_from_stmt>.VM BitcoinVM<import_from_stmt>...encoding.bytes32 from_bytes_32<import_from_stmt>pycoin.satoshi errno<import_from_stmt>pycoin.satoshi.flags SIGHASH_NONE SIGHASH_SINGLE SIGHASH_ANYONECANPAY VERIFY_P2SH VERIFY_SIGPUSHONLY VERIFY_CLEANSTACK VERIFY_WITNESS VERIFY_MINIMALIF VERIFY_WITNESS_PUBKEYTYPE <import_from_stmt>.SegwitChecker SegwitChecker<import_from_stmt>.P2SChecker P2SChecker<class_stmt>TxContext(object)<block_start><pass><block_end><class_stmt>BitcoinSolutionChecker(SegwitChecker P2SChecker)<block_start>VM=BitcoinVM<line_sep>ScriptTools=BitcoinScriptTools<line_sep>DEFAULT_FLAGS=VERIFY_P2SH|VERIFY_WITNESS<def_stmt>__init__ self tx<block_start>self.tx=tx<line_sep># self.sighash_cache = {} <block_end><def_stmt>_delete_signature self script sig_blob<block_start>""" Returns a script with the given subscript removed. The subscript must appear in the main script aligned to opcode boundaries for it to be removed. """<line_sep>subscript=self.ScriptTools.compile_push_data_list([sig_blob])<line_sep>new_script=bytearray()<line_sep>pc=0<for_stmt>opcode,data,pc,new_pc self.ScriptTools.get_opcodes(script)<block_start>section=script[pc:new_pc]<if_stmt>section<ne>subscript<block_start>new_script.extend(section)<block_end><block_end><return>bytes(new_script)<block_end><def_stmt>_make_sighash_f self tx_in_idx<block_start><def_stmt>sig_for_hash_type_f hash_type sig_blobs vm<block_start>script=vm.script[vm.begin_code_hash:]<for_stmt>sig_blob sig_blobs<block_start>script=self._delete_signature(script sig_blob)<block_end><return>self._signature_hash(script tx_in_idx hash_type)<block_end><return>sig_for_hash_type_f<block_end><def_stmt>_solution_script_to_stack self tx_context flags traceback_f<block_start><if_stmt>flags&VERIFY_SIGPUSHONLY<block_start>self._check_script_push_only(tx_context.solution_script)<block_end># never use VERIFY_MINIMALIF or VERIFY_WITNESS_PUBKEYTYPE except in segwit f1=flags&~(VERIFY_MINIMALIF|VERIFY_WITNESS_PUBKEYTYPE)<line_sep>vm=self.VM(tx_context.solution_script tx_context self._make_sighash_f(tx_context.tx_in_idx) f1)<line_sep>vm.is_solution_script=<true><line_sep>vm.traceback_f=traceback_f<line_sep>solution_stack=vm.eval_script()<line_sep><return>solution_stack<block_end><def_stmt>_check_script_push_only self script<block_start>scriptStreamer=self.VM.ScriptStreamer<line_sep>pc=0<while_stmt>pc<l>len(script)<block_start>opcode,data,pc,is_ok=scriptStreamer.get_opcode(script pc)<if_stmt>opcode<not><in>scriptStreamer.data_opcodes<block_start><raise>self.ScriptError("signature has non-push opcodes" errno.SIG_PUSHONLY)<block_end><block_end><block_end><def_stmt>_tx_in_for_idx self idx tx_in tx_out_script unsigned_txs_out_idx<block_start><if_stmt>idx<eq>unsigned_txs_out_idx<block_start><return>self.tx.TxIn(tx_in.previous_hash tx_in.previous_index tx_out_script tx_in.sequence)<block_end><return>self.tx.TxIn(tx_in.previous_hash tx_in.previous_index b'' tx_in.sequence)<block_end>@classmethod<def_stmt>delete_subscript class_ script subscript<block_start>""" Returns a script with the given subscript removed. The subscript must appear in the main script aligned to opcode boundaries for it to be removed. """<line_sep>new_script=bytearray()<line_sep>pc=0<for_stmt>opcode,data,pc,new_pc class_.ScriptTools.get_opcodes(script)<block_start>section=script[pc:new_pc]<if_stmt>section<ne>subscript<block_start>new_script.extend(section)<block_end><block_end><return>bytes(new_script)<block_end><def_stmt>_signature_hash self tx_out_script unsigned_txs_out_idx hash_type<block_start>""" Return the canonical hash for a transaction. We need to remove references to the signature, since it's a signature of the hash before the signature is applied. :param tx_out_script: the script the coins for unsigned_txs_out_idx are coming from :param unsigned_txs_out_idx: where to put the tx_out_script :param hash_type: one of SIGHASH_NONE, SIGHASH_SINGLE, SIGHASH_ALL, optionally bitwise or'ed with SIGHASH_ANYONECANPAY """<line_sep># In case concatenating two scripts ends up with two codeseparators, # or an extra one at the end, this prevents all those possible incompatibilities. tx_out_script=self.delete_subscript(tx_out_script self.ScriptTools.compile("OP_CODESEPARATOR"))<line_sep># blank out other inputs' signatures txs_in=[self._tx_in_for_idx(i tx_in tx_out_script unsigned_txs_out_idx)<for>i,tx_in enumerate(self.tx.txs_in)]<line_sep>txs_out=self.tx.txs_out<line_sep># Blank out some of the outputs <if_stmt>(hash_type&0x1f)<eq>SIGHASH_NONE# Wildcard payee <block_start>txs_out=[]<line_sep># Let the others update at will <for_stmt>i range(len(txs_in))<block_start><if_stmt>i<ne>unsigned_txs_out_idx<block_start>txs_in[i].sequence=0<block_end><block_end><block_end><elif_stmt>(hash_type&0x1f)<eq>SIGHASH_SINGLE# This preserves the ability to validate existing legacy # transactions which followed a buggy path in Satoshi's # original code. <block_start><if_stmt>unsigned_txs_out_idx<ge>len(txs_out)# This should probably be moved to a constant, but the # likelihood of ever getting here is already really small # and getting smaller <block_start><return>(1<lshift>248)<block_end># Only lock in the txout payee at same index as txin; delete # any outputs after this one and set all outputs before this # one to "null" (where "null" means an empty script and a # value of -1) txs_out=[self.tx.TxOut(0xffffffffffffffff b'')]<times>unsigned_txs_out_idx<line_sep>txs_out.append(self.tx.txs_out[unsigned_txs_out_idx])<line_sep># Let the others update at will <for_stmt>i range(len(txs_in))<block_start><if_stmt>i<ne>unsigned_txs_out_idx<block_start>txs_in[i].sequence=0<block_end><block_end><block_end># Blank out other inputs completely, not recommended for open transactions <if_stmt>hash_type&SIGHASH_ANYONECANPAY<block_start>txs_in=[txs_in[unsigned_txs_out_idx]]<block_end>tmp_tx=self.tx.__class__(self.tx.version txs_in txs_out self.tx.lock_time)<line_sep><return>from_bytes_32(tmp_tx.hash(hash_type=hash_type))<block_end><def_stmt>tx_context_for_idx self tx_in_idx<block_start>""" solution_script: alleged solution to the puzzle_script puzzle_script: the script protecting the coins """<line_sep>tx_in=self.tx.txs_in[tx_in_idx]<line_sep>tx_context=TxContext()<line_sep>tx_context.lock_time=self.tx.lock_time<line_sep>tx_context.version=self.tx.version<line_sep>tx_context.puzzle_script=b''<if>self.tx.missing_unspent(tx_in_idx)<else>self.tx.unspents[tx_in_idx].script<line_sep>tx_context.solution_script=tx_in.script<line_sep>tx_context.witness_solution_stack=tx_in.witness<line_sep>tx_context.sequence=tx_in.sequence<line_sep>tx_context.tx_in_idx=tx_in_idx<line_sep><return>tx_context<block_end><def_stmt>check_solution self tx_context flags=<none> traceback_f=<none><block_start>""" tx_context: information about the transaction that the VM may need flags: gives the VM hints about which additional constraints to check """<for_stmt>t self.puzzle_and_solution_iterator(tx_context flags=flags traceback_f=traceback_f)<block_start>puzzle_script,solution_stack,flags,sighash_f=t<line_sep>vm=self.VM(puzzle_script tx_context sighash_f flags=flags initial_stack=solution_stack[:])<line_sep>vm.is_solution_script=<false><line_sep>vm.traceback_f=traceback_f<line_sep>stack=vm.eval_script()<if_stmt>len(stack)<eq>0<or><not>vm.bool_from_script_bytes(stack[-1])<block_start><raise>self.ScriptError("eval false" errno.EVAL_FALSE)<block_end><block_end><if_stmt>flags&VERIFY_CLEANSTACK<and>len(stack)<ne>1<block_start><raise>self.ScriptError("stack not clean after evaluation" errno.CLEANSTACK)<block_end><block_end><def_stmt>puzzle_and_solution_iterator self tx_context flags=<none> traceback_f=<none><block_start><if_stmt>flags<is><none><block_start>flags=self.DEFAULT_FLAGS<block_end>solution_stack=self._solution_script_to_stack(tx_context flags=flags traceback_f=traceback_f)<line_sep>puzzle_script=tx_context.puzzle_script<line_sep>flags_1=flags&~(VERIFY_MINIMALIF|VERIFY_WITNESS_PUBKEYTYPE)<line_sep>sighash_f=self._make_sighash_f(tx_context.tx_in_idx)<line_sep><yield>puzzle_script solution_stack flags_1 sighash_f<line_sep>p2sh_tuple=self.p2s_program_tuple(tx_context puzzle_script solution_stack flags_1 sighash_f)<if_stmt>p2sh_tuple<block_start><yield>p2sh_tuple<line_sep>puzzle_script,solution_stack=p2sh_tuple[:2]<block_end>is_p2sh=p2sh_tuple<is><not><none><line_sep>witness_tuple=self.witness_program_tuple(tx_context puzzle_script solution_stack flags is_p2sh)<if_stmt>witness_tuple<block_start><yield>witness_tuple<block_end><block_end><block_end>
<import_from_stmt>datetime datetime<import_from_stmt>app db<class_stmt>Base(db.Model)<block_start>__abstract__=<true><line_sep>id=db.Column(db.Integer primary_key=<true>)<line_sep>created_date=db.Column(db.DateTime index=<true> default=datetime.utcnow)<line_sep>updated_date=db.Column(db.DateTime index=<true> default=datetime.utcnow)<block_end>
<import_stmt>numpy<as>np<import_from_stmt>hls4ml.model.types FixedPrecisionType<import_from_stmt>hls4ml.backends.fpga.fpga_types APTypeConverter<import_from_stmt>hls4ml.model.layers GarNet GarNetStack<import_from_stmt>hls4ml.backends.template LayerConfigTemplate FunctionCallTemplate<line_sep># GarNet templates garnet_common_config_template=""" static const unsigned n_vertices = {n_vertices}; static const unsigned n_vertices_width = {n_vertices_width}; static const unsigned n_in_features = {n_in_features}; static const unsigned distance_width = {distance_width}; static const unsigned output_collapse = {collapse_type}; static const bool mean_by_nvert = {mean_by_nvert}; typedef {norm_t} norm_t; typedef ap_fixed<{distance_width}, {distance_nint}, AP_TRN, AP_SAT> distance_t; typedef {edge_weight_t} edge_weight_t; typedef {edge_weight_aggr_t} edge_weight_aggr_t; typedef {aggr_t} aggr_t; typedef {output_t} output_t; static const unsigned reuse_factor = {reuse}; static const unsigned log2_reuse_factor = {log2_reuse}; """<line_sep>garnet_config_template="""struct config{index} : nnet::garnet_config {{"""<line_sep>garnet_config_template<augadd>garnet_common_config_template<line_sep>garnet_config_template<augadd>""" static const unsigned n_propagate = {n_propagate}; static const unsigned n_aggregators = {n_aggregators}; static const unsigned n_out_features = {n_out_features}; typedef {input_transform_weights_t} input_transform_weights_t; typedef {input_transform_biases_t} input_transform_biases_t; typedef {aggregator_distance_weights_t} aggregator_distance_weights_t; typedef {aggregator_distance_biases_t} aggregator_distance_biases_t; typedef {output_transform_weights_t} output_transform_weights_t; typedef {output_transform_biases_t} output_transform_biases_t; static const input_transform_weights_t (&input_transform_weights)[{input_transform_weights_size}]; static const input_transform_biases_t (&input_transform_biases)[{input_transform_biases_size}]; static const aggregator_distance_weights_t (&aggregator_distance_weights)[{aggregator_distance_weights_size}]; static const aggregator_distance_biases_t (&aggregator_distance_biases)[{aggregator_distance_biases_size}]; static const output_transform_weights_t (&output_transform_weights)[{output_transform_weights_size}]; static const output_transform_biases_t (&output_transform_biases)[{output_transform_biases_size}]; typedef config{index} base_t; }}; const config{index}::input_transform_weights_t (&config{index}::input_transform_weights)[{input_transform_weights_size}] = {input_transform_weights}; const config{index}::input_transform_biases_t (&config{index}::input_transform_biases)[{input_transform_biases_size}] = {input_transform_biases}; const config{index}::aggregator_distance_weights_t (&config{index}::aggregator_distance_weights)[{aggregator_distance_weights_size}] = {aggregator_distance_weights}; const config{index}::aggregator_distance_biases_t (&config{index}::aggregator_distance_biases)[{aggregator_distance_biases_size}] = {aggregator_distance_biases}; const config{index}::output_transform_weights_t (&config{index}::output_transform_weights)[{output_transform_weights_size}] = {output_transform_weights}; const config{index}::output_transform_biases_t (&config{index}::output_transform_biases)[{output_transform_biases_size}] = {output_transform_biases}; """<line_sep>garnet_function_template='nnet::garnet{impl}<{input_t}, {integer_input_t}, {output_t}, {config}>({input}, {nvtx}, {output});'<line_sep>garnet_include_list=['nnet_utils/nnet_garnet.h']<class_stmt>GarNetConfigTemplate(LayerConfigTemplate)<block_start><def_stmt>__init__ self<block_start>super().__init__(GarNet)<line_sep>self.template=(garnet_config_template )<block_end><def_stmt>get_transforms_config self node params<block_start>params['n_in_features']=node.attributes['n_in_features']<line_sep>params['n_propagate']=node.attributes['n_propagate']<line_sep>params['n_aggregators']=node.get_weights('aggregator_distance_biases').shape[0]<line_sep>params['n_out_features']=node.get_weights('output_transform_biases').shape[0]<for_stmt>wname,weights node.weights.items()<block_start>params[wname]=weights.name<line_sep>params['{}_t'.format(wname)]=weights.type.name<line_sep>params['{}_size'.format(wname)]=weights.data_length<block_end><block_end><def_stmt>format self node<block_start>params=self._default_config_params(node)<line_sep>params['n_vertices']=node.attributes['n_vertices']<line_sep>params['n_vertices_width']=int(np.log2(params['n_vertices']))<line_sep>params['distance_width']=12<line_sep>params['distance_nint']=min(4 params['distance_width']-6)# this is tuned params['log2_reuse']=int(np.log2(params['reuse']))<line_sep>## Define default precisions for various internal arrays (can be overridden from the config file) # We always give 10 digits for the subintegral part fwidth=10<line_sep># Integral precision for aggr_t depends on how large the temporary sum for weighed feature mean will be aggr_intw=max(params['log2_reuse'] params['n_vertices_width']-params['log2_reuse'])+3# safety factor 2**3 aggr_w=aggr_intw+fwidth<line_sep># edge_weight_aggr_t does not need the safety factor ew_aggr_intw=aggr_intw-3<line_sep>ew_aggr_w=ew_aggr_intw+fwidth<line_sep># Integral precision for norm is fixed to 4 norm_intw=4<line_sep>norm_w=norm_intw+fwidth<line_sep>vspecs=[('edge_weight' FixedPrecisionType(10 0 signed=<false>)) ('edge_weight_aggr' FixedPrecisionType(ew_aggr_w ew_aggr_intw signed=<false>)) ('aggr' FixedPrecisionType(aggr_w aggr_intw)) ('norm' FixedPrecisionType(norm_w norm_intw signed=<false>))]<line_sep>precision_converter=APTypeConverter()<for_stmt>vname,default_precision vspecs<block_start>params['{}_t'.format(vname)],type_name=node.model.config.get_precision(node var=vname)<if_stmt>type_name.endswith('default_t')<block_start>params['{}_t'.format(vname)]=precision_converter.convert(default_precision).definition_cpp()<block_end><block_end>params['output_t']=node.get_output_variable().type.name<if_stmt>node.attributes['collapse']<in>['mean' 'max']<block_start>params['collapse_type']='collapse_{}'.format(node.attributes['collapse'])<block_end><else_stmt><block_start>params['collapse_type']='no_collapse'<block_end>params['mean_by_nvert']=str(node.attributes['mean_by_nvert']).lower()<line_sep>self.get_transforms_config(node params)<line_sep><return>self.template[0].format(**params)<block_end><block_end><class_stmt>GarNetFunctionTemplate(FunctionCallTemplate)<block_start><def_stmt>__init__ self<block_start>super().__init__(GarNet include_header=garnet_include_list)<line_sep>self.template=garnet_function_template<block_end><def_stmt>format self node<block_start>params=self._default_function_params(node)<line_sep>data=node.get_input_variable(node.inputs[0])<line_sep>integer_input=node.get_input_variable(node.inputs[1])<line_sep>params['input_t']=data.type.name<line_sep>params['input']=data.name<line_sep>params['integer_input_t']=integer_input.type.name<line_sep>params['nvtx']=integer_input.name<if_stmt>node.ref_impl<block_start>params['impl']='_ref'<block_end><else_stmt><block_start>params['impl']=''<block_end><return>self.template.format(**params)<block_end><block_end># GarNetStack Templates garnet_stack_base_config_template="""struct config{index}_base : nnet::garnet_config {{"""<line_sep>garnet_stack_base_config_template<augadd>garnet_common_config_template<line_sep>garnet_stack_base_config_template<augadd>""" static const bool is_stack = true; typedef config{index}_base base_t; }}; struct config{index} : config{index}_base {{ static const unsigned n_sublayers = {n_sublayers}; template<int L> struct sublayer_t : config{index}_base {{}}; }}; {sublayer_configs} """<line_sep>garnet_stack_sublayer_config_template="""template<> struct config{index}::sublayer_t<{il}> : config{index}_base {{ static const unsigned n_in_features = {n_in_features}; static const unsigned n_propagate = {n_propagate}; static const unsigned n_aggregators = {n_aggregators}; static const unsigned n_out_features = {n_out_features}; typedef {input_transform_weights_t} input_transform_weights_t; typedef {input_transform_biases_t} input_transform_biases_t; typedef {aggregator_distance_weights_t} aggregator_distance_weights_t; typedef {aggregator_distance_biases_t} aggregator_distance_biases_t; typedef {output_transform_biases_t} output_transform_biases_t; static const input_transform_weights_t (&input_transform_weights)[{input_transform_weights_size}]; static const input_transform_biases_t (&input_transform_biases)[{input_transform_biases_size}]; static const aggregator_distance_weights_t (&aggregator_distance_weights)[{aggregator_distance_weights_size}]; static const aggregator_distance_biases_t (&aggregator_distance_biases)[{aggregator_distance_biases_size}]; static const output_transform_biases_t (&output_transform_biases)[{output_transform_biases_size}]; typedef config{index}::sublayer_t<{next}> next_layer_t; }}; const config{index}::sublayer_t<{il}>::input_transform_weights_t (&config{index}::sublayer_t<{il}>::input_transform_weights)[{input_transform_weights_size}] = {input_transform_weights}; const config{index}::sublayer_t<{il}>::input_transform_biases_t (&config{index}::sublayer_t<{il}>::input_transform_biases)[{input_transform_biases_size}] = {input_transform_biases}; const config{index}::sublayer_t<{il}>::aggregator_distance_weights_t (&config{index}::sublayer_t<{il}>::aggregator_distance_weights)[{aggregator_distance_weights_size}] = {aggregator_distance_weights}; const config{index}::sublayer_t<{il}>::aggregator_distance_biases_t (&config{index}::sublayer_t<{il}>::aggregator_distance_biases)[{aggregator_distance_biases_size}] = {aggregator_distance_biases}; const config{index}::sublayer_t<{il}>::output_transform_biases_t (&config{index}::sublayer_t<{il}>::output_transform_biases)[{output_transform_biases_size}] = {output_transform_biases}; """<line_sep>garnet_stack_config_template=(garnet_stack_base_config_template garnet_stack_sublayer_config_template)<line_sep>garnet_stack_function_template='nnet::garnet_stack<{input_t}, {integer_input_t}, {output_t}, {config}>({input}, {nvtx}, {output});'<class_stmt>GarNetStackConfigTemplate(GarNetConfigTemplate)<block_start><def_stmt>__init__ self<block_start>super(GarNetConfigTemplate self).__init__(GarNetStack)<line_sep>self.template=garnet_stack_config_template<block_end><def_stmt>get_transforms_config self node params<block_start>_,sublayer_template=self.template<line_sep>params['n_sublayers']=node.attributes['n_sublayers']<line_sep>params['n_in_features']=node.attributes['n_in_features'][0]<line_sep>params['n_out_features']=node.attributes['n_out_features'][-1]<line_sep>sublayer_configs=[]<for_stmt>il range(node.attributes['n_sublayers']-1 -1 -1)<block_start>sub_params={'index':node.index 'il':il}<for_stmt>p ['n_in_features' 'n_propagate' 'n_aggregators' 'n_out_features']<block_start>sub_params[p]=node.attributes[p][il]<block_end><for_stmt>wname,weights node._sublayer_weights[il].items()<block_start>sub_params[wname]=weights.name<line_sep>sub_params['{}_t'.format(wname)]=weights.type.name<line_sep>sub_params['{}_size'.format(wname)]=weights.data_length<block_end><if_stmt>il<ne>node.attributes['n_sublayers']-1<block_start>sub_params['next']=il+1<block_end><else_stmt><block_start>sub_params['next']=0<block_end>sublayer_configs.append(sublayer_template.format(**sub_params))<block_end>params['sublayer_configs']='\n'.join(sublayer_configs)<block_end><block_end><class_stmt>GarNetStackFunctionTemplate(GarNetFunctionTemplate)<block_start><def_stmt>__init__ self<block_start>super(GarNetFunctionTemplate self).__init__(GarNetStack include_header=garnet_include_list)<line_sep>self.template=garnet_stack_function_template<block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>sys<import_stmt>os<import_stmt>tensorflow<as>tf<import_stmt>tf_xception_<line_sep>input_placeholder,output=tf_xception_.KitModel('./xception.npy')<for_stmt>var tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)<block_start>print(var.op.name)<block_end><with_stmt>tf.Session()<as>sess<block_start>init=tf.global_variables_initializer()<line_sep>sess.run(init)<line_sep>predict=sess.run(output feed_dict={input_placeholder:np.ones((1 299 299 3))<times>0.5})<line_sep>print(predict)<line_sep>print(np.argmax(predict))<block_end>
<import_stmt>re<import_from_stmt>datadog_checks.base AgentCheck<import_from_stmt>datadog_checks.base.errors CheckException<import_from_stmt>datadog_checks.utils.subprocess_output get_subprocess_output<line_sep>__version__="1.0.0"<line_sep>__author__="<NAME> <<EMAIL>>"<class_stmt>NagiosPluginWrapperCheck(AgentCheck)<block_start>PERFDATA_RE=(r"([^\s]+|'[^']+')=([-.\d]+)(c|s|ms|us|B|KB|MB|GB|TB|%)?"+r"(?:;([-.\d]+))?(?:;([-.\d]+))?(?:;([-.\d]+))?(?:;([-.\d]+))?")<def_stmt>check self instance<block_start>check_command=instance.get('check_command')<line_sep>metric_namespace=instance.get('metric_namespace')<line_sep>tags=instance.get('tags' [])<line_sep>create_service_check=instance.get('create_service_check' <false>)<if_stmt><not>check_command<block_start><raise>CheckException("Configuration error. Missing check_command definition, please fix nagios_plugin_wrapper.yaml")<block_end><if_stmt><not>metric_namespace<block_start><raise>CheckException("Configuration error. Missing metric_namespace definition, please fix nagios_plugin_wrapper.yaml")<block_end>raw_output=<none><line_sep>err=<none><line_sep>ret=<none><try_stmt><block_start>raw_output,err,ret=get_subprocess_output(check_command self.log)<block_end><except_stmt>Exception<as>e<block_start>error="Failed to execute check_command {check_command} - {error}".format(check_command=check_command error=e)<line_sep>self.log.warning(error)<line_sep><raise>CheckException("check_command '{check_command}' failed to execute, see agent.log for more information.".format(check_command=check_command))<block_end>output,metrics=self._parse_output(raw_output)<if_stmt>metrics<block_start>metrics=self._parse_perfdata(metrics)<for_stmt>label,value metrics<block_start>label=self._sanitize(label)<line_sep>self.log.debug("metric_namespace: {namespace} | tags: {tags} | value: {value} | ret_code: {ret}".format(namespace=metric_namespace tags=tags value=value ret=ret))<line_sep>self.gauge('{metric_namespace}.{label}'.format(metric_namespace=metric_namespace label=label) value tags=tags)<block_end><block_end><if_stmt>output<and>create_service_check<block_start><if_stmt>ret<eq>0<block_start>status=AgentCheck.OK<block_end><elif_stmt>ret<eq>1<block_start>status=AgentCheck.WARNING<block_end><elif_stmt>ret<eq>2<block_start>status=AgentCheck.CRITICAL<block_end><else_stmt><block_start>status=AgentCheck.UNKNOWN<block_end>self.service_check(metric_namespace status tags=tags message=output.rstrip())<block_end><block_end><def_stmt>_parse_output self s<block_start>"""Parse the output text and performance data string"""<try_stmt><block_start>output,metrics=s.rsplit('|' 1)<block_end><except_stmt>ValueError<block_start>self.log.debug("No performance data found in string: {string}, skipping...".format(string=s))<line_sep><return>s <none><block_end><return>output metrics<block_end><def_stmt>_parse_perfdata self s<block_start>"""Parse performance data from a perfdata string"""<line_sep>metrics=[]<line_sep>counters=re.findall(self.PERFDATA_RE s)<if_stmt>counters<is><none><block_start>self.log.warning("Failed to parse performance data: {s}".format(s=s))<line_sep><return>metrics<block_end><for_stmt>(key value uom warn crit min max) counters<block_start><try_stmt><block_start>norm_value=self._normalize_to_unit(float(value) uom)<line_sep>metrics.append((key norm_value))<block_end><except_stmt>ValueError<block_start>self.log.warning("Couldn't convert value '{value}' to float".format(value=value))<block_end><block_end><return>metrics<block_end><def_stmt>_normalize_to_unit self value unit<block_start>"""Normalize the value to the unit returned. We use base-1000 for second-based units, and base-1024 for byte-based units. Sadly, the Nagios-Plugins specification doesn't disambiguate base-1000 (KB) and base-1024 (KiB). """<if_stmt>unit<eq>'ms'<block_start><return>value/1000.0<block_end><if_stmt>unit<eq>'us'<block_start><return>value/1000000.0<block_end><if_stmt>unit<eq>'KB'<block_start><return>value<times>1024<block_end><if_stmt>unit<eq>'MB'<block_start><return>value<times>1024<times>1024<block_end><if_stmt>unit<eq>'GB'<block_start><return>value<times>1024<times>1024<times>1024<block_end><if_stmt>unit<eq>'TB'<block_start><return>value<times>1024<times>1024<times>1024<times>1024<block_end><return>value<block_end><def_stmt>_sanitize self s<block_start>"""Sanitize the name of a metric to remove unwanted chars """<line_sep><return>re.sub("[^\w-]" "" s)<block_end><block_end>
<import_from_stmt>typing Optional<import_from_stmt>src.utils.pubsub publish_to_topic<def_stmt>publish_user user_id:str access_token:Optional[str]=<none><block_start>publish_to_topic("user" {"user_id":user_id "access_token":access_token})<block_end>
<import_stmt>maya.cmds<as>cmds<import_stmt>random<import_from_stmt>mGui gui forms lists<import_from_stmt>mGui.bindings bind<import_from_stmt>mGui.observable ViewCollection<def_stmt>basic_list_binding <block_start>''' Illustrates the basics of binding to a list. The collection 'bound' contains some strings, and we bind it to the VerticalList 'list_view'. Adding items to the collection automatically redraws the list with the new items. In this case they are drawn with buttons, but lists allow you to customize the appearance of items extensively. This example also illustrates how to use closures to capture inter-object references, and how to keep callback functions alive without creating a full class. '''<with_stmt>gui.BindingWindow(title='example window' menuBar=<true>)<as>test_window<block_start>bound=ViewCollection('pPlane1' 'pCube2')<with_stmt>forms.VerticalThreePane()<as>main<block_start>header=gui.Text(label="List classes make it easy to manage collections")<line_sep>list_view=lists.VerticalList(synchronous=<true>)<line_sep>bound<g>bind()<g>list_view.collection<with_stmt>forms.HorizontalStretchForm()<as>buttons<block_start>more=gui.Button(label='Add another')<line_sep>close=gui.Button(label='close')<block_end><block_end><block_end># use closures to capture the UI names without a full class <def_stmt>close_window *_ **__<block_start>cmds.deleteUI(test_window)<block_end><def_stmt>show_more *_ **__<block_start>r=random.choice(("pPlane" "pCube" "pSphere"))+str(random.randint(2 20))<line_sep>bound.append(r)<block_end># bind the functions to the handlers close.command<augadd>close_window test_window<line_sep>more.command<augadd>show_more test_window<line_sep><return>test_window<block_end><if_stmt>__name__<eq>'__main__'<block_start>the_window=basic_list_binding()<line_sep>the_window.show()<block_end>
<import_stmt>sys<line_sep>sys.path.append('../')<import_from_stmt>hydroDL master<import_from_stmt>hydroDL.master default<import_from_stmt>hydroDL.data camels<import_from_stmt>hydroDL.model rnn crit train<import_stmt>json<import_stmt>os<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>random<line_sep># Options for different interface interfaceOpt=1<line_sep># ==1 default, the improved and more interpretable version. It's easier to see the data flow, model setup and training # process. Recommended for most users. # ==0 the original "pro" version we used to run heavy jobs for the paper. It was later improved for clarity to obtain option 1. # Results are very similar for two options and have little difference in computational performance. Action=[1 2]<line_sep># Using Action options to control training different models # 1: Train Base LSTM PUR Models without integrating any soft info # 2: Train CNN-LSTM to integrate FDCs # Hyperparameters EPOCH=300<line_sep>BATCH_SIZE=100<line_sep>RHO=365<line_sep>HIDDENSIZE=256<line_sep>saveEPOCH=10# save model for every "saveEPOCH" epochs Ttrain=[19851001 19951001]# training period LCrange=[19851001 19951001]<line_sep># Define root directory of database and output # Modify this based on your own location of CAMELS dataset # Following the data download instruction in README file, you should organize the folders like # 'your/path/to/Camels/basin_timeseries_v1p2_metForcing_obsFlow' and 'your/path/to/Camels/camels_attributes_v2.0' # Then 'rootDatabase' here should be 'your/path/to/Camels' # You can also define the database directory in hydroDL/__init__.py by modifying pathCamels['DB'] variable rootDatabase=os.path.join(os.path.sep 'scratch' 'Camels')# CAMELS dataset root directory camels.initcamels(rootDatabase)# initialize three camels module-scope variables in camels.py: dirDB, gageDict, statDict rootOut=os.path.join(os.path.sep 'data' 'rnnStreamflow')# Model output root directory # define random seed # seedid = [159654, 109958, 257886, 142365, 229837, 588859] # six seeds randomly generated using np.random.uniform seedid=159654<line_sep>random.seed(seedid)<line_sep>torch.manual_seed(seedid)<line_sep>np.random.seed(seedid)<line_sep>torch.cuda.manual_seed(seedid)<line_sep>torch.backends.cudnn.deterministic=<true><line_sep>torch.backends.cudnn.benchmark=<false><line_sep># Fix seed for training, change it to have different runnings with different seeds # We use the mean discharge of 6 runnings with different seeds to account for randomness # directory to save training results exp_name='PUR'<line_sep>exp_disp='Testrun'<line_sep>save_path=os.path.join(exp_name exp_disp str(seedid))<line_sep># Divide CAMELS dataset into 7 PUR regions gageinfo=camels.gageDict<line_sep>hucinfo=gageinfo['huc']<line_sep>gageid=gageinfo['id']<line_sep># get the id list of each region regionID=list()<line_sep>regionNum=list()<line_sep>regionDivide=[[1 2] [3 6] [4 5 7] [9 10] [8 11 12 13] [14 15 16 18] [17]]# seven regions <for_stmt>ii range(len(regionDivide))<block_start>tempcomb=regionDivide[ii]<line_sep>tempregid=list()<for_stmt>ih tempcomb<block_start>tempid=gageid[hucinfo<eq>ih].tolist()<line_sep>tempregid=tempregid+tempid<block_end>regionID.append(tempregid)<line_sep>regionNum.append(len(tempregid))<block_end># Only for interfaceOpt=0 using multiple GPUs, not used here # cid = 0 # starting GPU id # gnum = 6 # how many GPUs you have # Region withheld as testing target. Take region 1 as an example. # Change this to 1,2,..,7 to run models for all 7 PUR regions in CONUS. testRegion=1<line_sep>iexp=testRegion-1# index TestLS=regionID[iexp]# basin ID list for testing, should be withheld for training TrainLS=list(set(gageid.tolist())-set(TestLS))# basin ID for training gageDic={'TrainID':TrainLS 'TestID':TestLS}<line_sep># prepare the training dataset optData=default.optDataCamels<line_sep>optData=default.update(optData tRange=Ttrain subset=TrainLS lckernel=<none> fdcopt=<false>)<line_sep>climateList=camels.attrLstSel+['p_mean' 'pet_mean' 'p_seasonality' 'frac_snow' 'aridity' 'high_prec_freq' 'high_prec_dur' 'low_prec_freq' 'low_prec_dur']<line_sep># climateList = ['slope_mean', 'area_gages2', 'frac_forest', 'soil_porosity', 'max_water_content'] # climateList = [] optData=default.update(optData varT=camels.forcingLst varC=climateList)<line_sep># varT: forcing used for training varC: attributes used for training # The above controls what attributes used for training, change varC for input-selection-ensemble # for 5 attributes model: climateList = ['slope_mean', 'area_gages2', 'frac_forest', 'soil_porosity', 'max_water_content'] # for no-attribute model: varC = [] # the input-selection ensemble represents using the mean prediction of full, 5-attr and no-attr models, # in total the mean of 3(different attributes)*6(different random seeds) = 18 models <if_stmt>interfaceOpt<eq>1# read data from CAMELS dataset <block_start>df=camels.DataframeCamels(subset=optData['subset'] tRange=optData['tRange'])<line_sep>x=df.getDataTs(varLst=optData['varT'] doNorm=<false> rmNan=<false>)<line_sep>y=df.getDataObs(doNorm=<false> rmNan=<false> basinnorm=<true>)<line_sep># "basinnorm = True" will call camels.basinNorm() on the original discharge data. This will transform discharge # from ft3/s to mm/day and then divided by mean precip to be dimensionless. output = discharge/(area*mean_precip) c=df.getDataConst(varLst=optData['varC'] doNorm=<false> rmNan=<false>)<line_sep># process, do normalization and remove nan series_data=np.concatenate([x y] axis=2)<line_sep>seriesvarLst=camels.forcingLst+['runoff']<line_sep># calculate statistics for normalization and save to a dictionary statDict=camels.getStatDic(attrLst=climateList attrdata=c seriesLst=seriesvarLst seriesdata=series_data)<line_sep># normalize attr_norm=camels.transNormbyDic(c climateList statDict toNorm=<true>)<line_sep>attr_norm[np.isnan(attr_norm)]=0.0<line_sep>series_norm=camels.transNormbyDic(series_data seriesvarLst statDict toNorm=<true>)<line_sep># prepare the inputs xTrain=series_norm[: : :-1]# forcing, not include obs xTrain[np.isnan(xTrain)]=0.0<line_sep>yTrain=np.expand_dims(series_norm[: : -1] 2)<if_stmt>attr_norm.size<eq>0# [], no-attribute case <block_start>attrs=<none><line_sep>Nx=xTrain.shape[-1]<block_end><else_stmt># with attributes <block_start>attrs=attr_norm<line_sep>Nx=xTrain.shape[-1]+attrs.shape[-1]<block_end>Ny=yTrain.shape[-1]<block_end># define loss function optLoss=default.optLossRMSE<line_sep>lossFun=crit.RmseLoss()<line_sep># configuration for training optTrain=default.update(default.optTrainCamels miniBatch=[BATCH_SIZE RHO] nEpoch=EPOCH saveEpoch=saveEPOCH seed=seedid)<line_sep>hucdic='Reg-'+str(iexp+1)+'-Num'+str(regionNum[iexp])<if_stmt>1<in>Action# Train base LSTM PUR model <block_start>out=os.path.join(rootOut save_path hucdic 'Reg-85-95-Sub-Full')<line_sep># out = os.path.join(rootOut, save_path, hucdic,'Reg-85-95-Sub-5attr') # out = os.path.join(rootOut, save_path, hucdic,'Reg-85-95-Sub-Noattr') <if_stmt><not>os.path.isdir(out)<block_start>os.makedirs(out)<block_end># log training gage information gageFile=os.path.join(out 'gage.json')<with_stmt>open(gageFile 'w')<as>fp<block_start>json.dump(gageDic fp indent=4)<block_end># define model config optModel=default.update(default.optLstm name='hydroDL.model.rnn.CudnnLstmModel' hiddenSize=HIDDENSIZE)<if_stmt>interfaceOpt<eq>1# define, load and train model <block_start>optModel=default.update(optModel nx=Nx ny=Ny)<line_sep>model=rnn.CudnnLstmModel(nx=optModel['nx'] ny=optModel['ny'] hiddenSize=optModel['hiddenSize'])<line_sep># Wrap up all the training configurations to one dictionary in order to save into "out" folder masterDict=master.wrapMaster(out optData optModel optLoss optTrain)<line_sep>master.writeMasterFile(masterDict)<line_sep># log statistics statFile=os.path.join(out 'statDict.json')<with_stmt>open(statFile 'w')<as>fp<block_start>json.dump(statDict fp indent=4)<block_end># Train the model trainedModel=train.trainModel(model xTrain yTrain attrs lossFun nEpoch=EPOCH miniBatch=[BATCH_SIZE RHO] saveEpoch=saveEPOCH saveFolder=out)<block_end><if_stmt>interfaceOpt<eq>0# Only need to pass the wrapped configuration dict 'masterDict' for training # nx, ny will be automatically updated later <block_start>masterDict=master.wrapMaster(out optData optModel optLoss optTrain)<line_sep>master.train(masterDict)<line_sep>## Not used here. ## A potential way to run batch jobs simultaneously in background through multiple GPUs and Linux screens. ## To use this, must manually set the "pathCamels['DB']" in hydroDL/__init__.py as your own root path of CAMELS data. ## Use the following master.runTrain() instead of the above master.train(). # master.runTrain(masterDict, cudaID=cid % gnum, screen='test-'+str(cid)) # cid = cid + 1 <block_end><block_end><if_stmt>2<in>Action# Train CNN-LSTM PUR model to integrate FDCs # LCrange defines from which period to get synthetic FDC <block_start>LCTstr=str(LCrange[0])+'-'+str(LCrange[1])<line_sep>out=os.path.join(rootOut save_path hucdic 'Reg-85-95-Sub-Full-FDC'+LCTstr)<line_sep># out = os.path.join(rootOut, save_path, hucdic, 'Reg-85-95-Sub-5attr-FDC' + LCTstr) # out = os.path.join(rootOut, save_path, hucdic, 'Reg-85-95-Sub-Noattr-FDC' + LCTstr) <if_stmt><not>os.path.isdir(out)<block_start>os.makedirs(out)<block_end>gageFile=os.path.join(out 'gage.json')<with_stmt>open(gageFile 'w')<as>fp<block_start>json.dump(gageDic fp indent=4)<block_end>optData=default.update(default.optDataCamels tRange=Ttrain subset=TrainLS lckernel=LCrange fdcopt=<true>)<line_sep># define model convNKS=[(10 5 1) (5 3 3) (1 1 1)]<line_sep># CNN parameters for 3 layers: [(Number of kernels 10,5,1), (kernel size 5,3,3), (stride 1,1,1)] optModel=default.update(default.optCnn1dLstm name='hydroDL.model.rnn.CNN1dLCmodel' hiddenSize=HIDDENSIZE convNKS=convNKS poolOpt=[2 2 1])<line_sep># use CNN-LSTM model <if_stmt>interfaceOpt<eq>1# load data and create synthetic FDCs as inputs <block_start>dffdc=camels.DataframeCamels(subset=optData['subset'] tRange=optData['lckernel'])<line_sep>datatemp=dffdc.getDataObs(doNorm=<false> rmNan=<false> basinnorm=<true>)<line_sep># normalize data dadata=camels.transNormbyDic(datatemp 'runoff' statDict toNorm=<true>)<line_sep>dadata=np.squeeze(dadata)# dim Nbasin*Nday fdcdata=master.master.calFDC(dadata)<line_sep>print('FDC was calculated and used!')<line_sep>xIn=(xTrain fdcdata)<line_sep># load model Nobs=xIn[1].shape[-1]<line_sep>optModel=default.update(optModel nx=Nx ny=Ny nobs=Nobs)# update input dims convpara=optModel['convNKS']<line_sep>model=rnn.CNN1dLCmodel(nx=optModel['nx'] ny=optModel['ny'] nobs=optModel['nobs'] hiddenSize=optModel['hiddenSize'] nkernel=convpara[0] kernelSize=convpara[1] stride=convpara[2] poolOpt=optModel['poolOpt'])<line_sep>print('CNN1d Local calibartion Kernel is used!')<line_sep># Wrap up all the training configurations to one dictionary in order to save into "out" folder masterDict=master.wrapMaster(out optData optModel optLoss optTrain)<line_sep>master.writeMasterFile(masterDict)<line_sep># log statistics statFile=os.path.join(out 'statDict.json')<with_stmt>open(statFile 'w')<as>fp<block_start>json.dump(statDict fp indent=4)<block_end># Train the model trainedModel=train.trainModel(model xIn # need to well defined yTrain attrs lossFun nEpoch=EPOCH miniBatch=[BATCH_SIZE RHO] saveEpoch=saveEPOCH saveFolder=out)<block_end><if_stmt>interfaceOpt<eq>0# Only need to pass the wrapped configuration 'masterDict' for training # nx, ny, nobs will be automatically updated later <block_start>masterDict=master.wrapMaster(out optData optModel optLoss optTrain)<line_sep>master.train(masterDict)<block_end><block_end># train model # master.runTrain(masterDict, cudaID=cid % gnum, screen='test-'+str(cid)) # cid = cid + 1
<import_from_stmt>worldengine.simulations.basic find_threshold_f<import_stmt>numpy<class_stmt>HumiditySimulation(object)<block_start>@staticmethod<def_stmt>is_applicable world<block_start><return>world.has_precipitations()<and>world.has_irrigation()<and>(<not>world.has_humidity())<block_end><def_stmt>execute self world seed<block_start><assert_stmt>seed<is><not><none><line_sep>data,quantiles=self._calculate(world)<line_sep>world.humidity=(data quantiles)<block_end>@staticmethod<def_stmt>_calculate world<block_start>humids=world.humids<line_sep>precipitationWeight=1.0<line_sep>irrigationWeight=3<line_sep>data=numpy.zeros((world.height world.width) dtype=float)<line_sep>data=(world.layers['precipitation'].data<times>precipitationWeight-world.layers['irrigation'].data<times>irrigationWeight)/(precipitationWeight+irrigationWeight)<line_sep># These were originally evenly spaced at 12.5% each but changing them # to a bell curve produced better results ocean=world.layers['ocean'].data<line_sep>quantiles={}<line_sep>quantiles['12']=find_threshold_f(data humids[6] ocean)<line_sep>quantiles['25']=find_threshold_f(data humids[5] ocean)<line_sep>quantiles['37']=find_threshold_f(data humids[4] ocean)<line_sep>quantiles['50']=find_threshold_f(data humids[3] ocean)<line_sep>quantiles['62']=find_threshold_f(data humids[2] ocean)<line_sep>quantiles['75']=find_threshold_f(data humids[1] ocean)<line_sep>quantiles['87']=find_threshold_f(data humids[0] ocean)<line_sep><return>data quantiles<block_end><block_end>
<import_stmt>difflib<import_stmt>glob<import_stmt>gzip<import_stmt>os<import_stmt>tempfile<import_stmt>Cython.Build.Dependencies<import_stmt>Cython.Utils<import_from_stmt>Cython.TestUtils CythonTest<class_stmt>TestCyCache(CythonTest)<block_start><def_stmt>setUp self<block_start>CythonTest.setUp(self)<line_sep>self.temp_dir=tempfile.mkdtemp(prefix='cycache-test' dir='TEST_TMP'<if>os.path.isdir('TEST_TMP')<else><none>)<line_sep>self.src_dir=tempfile.mkdtemp(prefix='src' dir=self.temp_dir)<line_sep>self.cache_dir=tempfile.mkdtemp(prefix='cache' dir=self.temp_dir)<block_end><def_stmt>cache_files self file_glob<block_start><return>glob.glob(os.path.join(self.cache_dir file_glob))<block_end><def_stmt>fresh_cythonize self *args **kwargs<block_start>Cython.Utils.clear_function_caches()<line_sep>Cython.Build.Dependencies._dep_tree=<none># discard method caches Cython.Build.Dependencies.cythonize(*args **kwargs)<block_end><def_stmt>test_cycache_switch self<block_start>content1='value = 1\n'<line_sep>content2='value = 2\n'<line_sep>a_pyx=os.path.join(self.src_dir 'a.pyx')<line_sep>a_c=a_pyx[:-4]+'.c'<with_stmt>open(a_pyx 'w')<as>f<block_start>f.write(content1)<block_end>self.fresh_cythonize(a_pyx cache=self.cache_dir)<line_sep>self.fresh_cythonize(a_pyx cache=self.cache_dir)<line_sep>self.assertEqual(1 len(self.cache_files('a.c*')))<with_stmt>open(a_c)<as>f<block_start>a_contents1=f.read()<block_end>os.unlink(a_c)<with_stmt>open(a_pyx 'w')<as>f<block_start>f.write(content2)<block_end>self.fresh_cythonize(a_pyx cache=self.cache_dir)<with_stmt>open(a_c)<as>f<block_start>a_contents2=f.read()<block_end>os.unlink(a_c)<line_sep>self.assertNotEqual(a_contents1 a_contents2 'C file not changed!')<line_sep>self.assertEqual(2 len(self.cache_files('a.c*')))<with_stmt>open(a_pyx 'w')<as>f<block_start>f.write(content1)<block_end>self.fresh_cythonize(a_pyx cache=self.cache_dir)<line_sep>self.assertEqual(2 len(self.cache_files('a.c*')))<with_stmt>open(a_c)<as>f<block_start>a_contents=f.read()<block_end>self.assertEqual(a_contents a_contents1 msg='\n'.join(list(difflib.unified_diff(a_contents.split('\n') a_contents1.split('\n')))[:10]))<block_end><def_stmt>test_cycache_uses_cache self<block_start>a_pyx=os.path.join(self.src_dir 'a.pyx')<line_sep>a_c=a_pyx[:-4]+'.c'<with_stmt>open(a_pyx 'w')<as>f<block_start>f.write('pass')<block_end>self.fresh_cythonize(a_pyx cache=self.cache_dir)<line_sep>a_cache=os.path.join(self.cache_dir os.listdir(self.cache_dir)[0])<line_sep>gzip.GzipFile(a_cache 'wb').write('fake stuff'.encode('ascii'))<line_sep>os.unlink(a_c)<line_sep>self.fresh_cythonize(a_pyx cache=self.cache_dir)<with_stmt>open(a_c)<as>f<block_start>a_contents=f.read()<block_end>self.assertEqual(a_contents 'fake stuff' 'Unexpected contents: %s...'%a_contents[:100])<block_end><def_stmt>test_multi_file_output self<block_start>a_pyx=os.path.join(self.src_dir 'a.pyx')<line_sep>a_c=a_pyx[:-4]+'.c'<line_sep>a_h=a_pyx[:-4]+'.h'<line_sep>a_api_h=a_pyx[:-4]+'_api.h'<with_stmt>open(a_pyx 'w')<as>f<block_start>f.write('cdef public api int foo(int x): return x\n')<block_end>self.fresh_cythonize(a_pyx cache=self.cache_dir)<line_sep>expected=[a_c a_h a_api_h]<for_stmt>output expected<block_start>self.assertTrue(os.path.exists(output) output)<line_sep>os.unlink(output)<block_end>self.fresh_cythonize(a_pyx cache=self.cache_dir)<for_stmt>output expected<block_start>self.assertTrue(os.path.exists(output) output)<block_end><block_end><def_stmt>test_options_invalidation self<block_start>hash_pyx=os.path.join(self.src_dir 'options.pyx')<line_sep>hash_c=hash_pyx[:-len('.pyx')]+'.c'<with_stmt>open(hash_pyx 'w')<as>f<block_start>f.write('pass')<block_end>self.fresh_cythonize(hash_pyx cache=self.cache_dir cplus=<false>)<line_sep>self.assertEqual(1 len(self.cache_files('options.c*')))<line_sep>os.unlink(hash_c)<line_sep>self.fresh_cythonize(hash_pyx cache=self.cache_dir cplus=<true>)<line_sep>self.assertEqual(2 len(self.cache_files('options.c*')))<line_sep>os.unlink(hash_c)<line_sep>self.fresh_cythonize(hash_pyx cache=self.cache_dir cplus=<false> show_version=<false>)<line_sep>self.assertEqual(2 len(self.cache_files('options.c*')))<line_sep>os.unlink(hash_c)<line_sep>self.fresh_cythonize(hash_pyx cache=self.cache_dir cplus=<false> show_version=<true>)<line_sep>self.assertEqual(2 len(self.cache_files('options.c*')))<block_end><block_end>
# -*- coding:UTF-8 -*- """ implementation of Inception blocks with pytorch @<NAME> 2020_09_011 """<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>models.blocks.conv_bn BN_Conv2d<class_stmt>Stem_v4_Res2(nn.Module)<block_start>""" stem block for Inception-v4 and Inception-RestNet-v2 """<def_stmt>__init__ self<block_start>super(Stem_v4_Res2 self).__init__()<line_sep>self.step1=nn.Sequential(BN_Conv2d(3 32 3 2 0 bias=<false>) BN_Conv2d(32 32 3 1 0 bias=<false>) BN_Conv2d(32 64 3 1 1 bias=<false>))<line_sep>self.step2_pool=nn.MaxPool2d(3 2 0)<line_sep>self.step2_conv=BN_Conv2d(64 96 3 2 0 bias=<false>)<line_sep>self.step3_1=nn.Sequential(BN_Conv2d(160 64 1 1 0 bias=<false>) BN_Conv2d(64 96 3 1 0 bias=<false>))<line_sep>self.step3_2=nn.Sequential(BN_Conv2d(160 64 1 1 0 bias=<false>) BN_Conv2d(64 64 (7 1) (1 1) (3 0) bias=<false>) BN_Conv2d(64 64 (1 7) (1 1) (0 3) bias=<false>) BN_Conv2d(64 96 3 1 0 bias=<false>))<line_sep>self.step4_pool=nn.MaxPool2d(3 2 0)<line_sep>self.step4_conv=BN_Conv2d(192 192 3 2 0 bias=<false>)<block_end><def_stmt>forward self x<block_start>out=self.step1(x)<line_sep>tmp1=self.step2_pool(out)<line_sep>tmp2=self.step2_conv(out)<line_sep>out=torch.cat((tmp1 tmp2) 1)<line_sep>tmp1=self.step3_1(out)<line_sep>tmp2=self.step3_2(out)<line_sep>out=torch.cat((tmp1 tmp2) 1)<line_sep>tmp1=self.step4_pool(out)<line_sep>tmp2=self.step4_conv(out)<line_sep>print(tmp1.shape)<line_sep>print(tmp2.shape)<line_sep>out=torch.cat((tmp1 tmp2) 1)<line_sep><return>out<block_end><block_end><class_stmt>Stem_Res1(nn.Module)<block_start>""" stem block for Inception-ResNet-v1 """<def_stmt>__init__ self<block_start>super(Stem_Res1 self).__init__()<line_sep>self.stem=nn.Sequential(BN_Conv2d(3 32 3 2 0 bias=<false>) BN_Conv2d(32 32 3 1 0 bias=<false>) BN_Conv2d(32 64 3 1 1 bias=<false>) nn.MaxPool2d(3 2 0) BN_Conv2d(64 80 1 1 0 bias=<false>) BN_Conv2d(80 192 3 1 0 bias=<false>) BN_Conv2d(192 256 3 2 0 bias=<false>))<block_end><def_stmt>forward self x<block_start><return>self.stem(x)<block_end><block_end><class_stmt>Inception_A(nn.Module)<block_start>""" Inception-A block for Inception-v4 net """<def_stmt>__init__ self in_channels b1 b2 b3_n1 b3_n3 b4_n1 b4_n3<block_start>super(Inception_A self).__init__()<line_sep>self.branch1=nn.Sequential(nn.AvgPool2d(3 1 1) BN_Conv2d(in_channels b1 1 1 0 bias=<false>))<line_sep>self.branch2=BN_Conv2d(in_channels b2 1 1 0 bias=<false>)<line_sep>self.branch3=nn.Sequential(BN_Conv2d(in_channels b3_n1 1 1 0 bias=<false>) BN_Conv2d(b3_n1 b3_n3 3 1 1 bias=<false>))<line_sep>self.branch4=nn.Sequential(BN_Conv2d(in_channels b4_n1 1 1 0 bias=<false>) BN_Conv2d(b4_n1 b4_n3 3 1 1 bias=<false>) BN_Conv2d(b4_n3 b4_n3 3 1 1 bias=<false>))<block_end><def_stmt>forward self x<block_start>out1=self.branch1(x)<line_sep>out2=self.branch2(x)<line_sep>out3=self.branch3(x)<line_sep>out4=self.branch4(x)<line_sep><return>torch.cat((out1 out2 out3 out4) 1)<block_end><block_end><class_stmt>Inception_B(nn.Module)<block_start>""" Inception-B block for Inception-v4 net """<def_stmt>__init__ self in_channels b1 b2 b3_n1 b3_n1x7 b3_n7x1 b4_n1 b4_n1x7_1 b4_n7x1_1 b4_n1x7_2 b4_n7x1_2<block_start>super(Inception_B self).__init__()<line_sep>self.branch1=nn.Sequential(nn.AvgPool2d(3 1 1) BN_Conv2d(in_channels b1 1 1 0 bias=<false>))<line_sep>self.branch2=BN_Conv2d(in_channels b2 1 1 0 bias=<false>)<line_sep>self.branch3=nn.Sequential(BN_Conv2d(in_channels b3_n1 1 1 0 bias=<false>) BN_Conv2d(b3_n1 b3_n1x7 (1 7) (1 1) (0 3) bias=<false>) BN_Conv2d(b3_n1x7 b3_n7x1 (7 1) (1 1) (3 0) bias=<false>))<line_sep>self.branch4=nn.Sequential(BN_Conv2d(in_channels b4_n1 1 1 0 bias=<false>) BN_Conv2d(b4_n1 b4_n1x7_1 (1 7) (1 1) (0 3) bias=<false>) BN_Conv2d(b4_n1x7_1 b4_n7x1_1 (7 1) (1 1) (3 0) bias=<false>) BN_Conv2d(b4_n7x1_1 b4_n1x7_2 (1 7) (1 1) (0 3) bias=<false>) BN_Conv2d(b4_n1x7_2 b4_n7x1_2 (7 1) (1 1) (3 0) bias=<false>))<block_end><def_stmt>forward self x<block_start>out1=self.branch1(x)<line_sep>out2=self.branch2(x)<line_sep>out3=self.branch3(x)<line_sep>out4=self.branch4(x)<line_sep><return>torch.cat((out1 out2 out3 out4) 1)<block_end><block_end><class_stmt>Inception_C(nn.Module)<block_start>""" Inception-C block for Inception-v4 net """<def_stmt>__init__ self in_channels b1 b2 b3_n1 b3_n1x3_3x1 b4_n1 b4_n1x3 b4_n3x1 b4_n1x3_3x1<block_start>super(Inception_C self).__init__()<line_sep>self.branch1=nn.Sequential(nn.AvgPool2d(3 1 1) BN_Conv2d(in_channels b1 1 1 0 bias=<false>))<line_sep>self.branch2=BN_Conv2d(in_channels b2 1 1 0 bias=<false>)<line_sep>self.branch3_1=BN_Conv2d(in_channels b3_n1 1 1 0 bias=<false>)<line_sep>self.branch3_1x3=BN_Conv2d(b3_n1 b3_n1x3_3x1 (1 3) (1 1) (0 1) bias=<false>)<line_sep>self.branch3_3x1=BN_Conv2d(b3_n1 b3_n1x3_3x1 (3 1) (1 1) (1 0) bias=<false>)<line_sep>self.branch4_1=nn.Sequential(BN_Conv2d(in_channels b4_n1 1 1 0 bias=<false>) BN_Conv2d(b4_n1 b4_n1x3 (1 3) (1 1) (0 1) bias=<false>) BN_Conv2d(b4_n1x3 b4_n3x1 (3 1) (1 1) (1 0) bias=<false>))<line_sep>self.branch4_1x3=BN_Conv2d(b4_n3x1 b4_n1x3_3x1 (1 3) (1 1) (0 1) bias=<false>)<line_sep>self.branch4_3x1=BN_Conv2d(b4_n3x1 b4_n1x3_3x1 (3 1) (1 1) (1 0) bias=<false>)<block_end><def_stmt>forward self x<block_start>out1=self.branch1(x)<line_sep>out2=self.branch2(x)<line_sep>tmp=self.branch3_1(x)<line_sep>out3_1=self.branch3_1x3(tmp)<line_sep>out3_2=self.branch3_3x1(tmp)<line_sep>tmp=self.branch4_1(x)<line_sep>out4_1=self.branch4_1x3(tmp)<line_sep>out4_2=self.branch4_3x1(tmp)<line_sep><return>torch.cat((out1 out2 out3_1 out3_2 out4_1 out4_2) 1)<block_end><block_end><class_stmt>Reduction_A(nn.Module)<block_start>""" Reduction-A block for Inception-v4, Inception-ResNet-v1, Inception-ResNet-v2 nets """<def_stmt>__init__ self in_channels k l m n<block_start>super(Reduction_A self).__init__()<line_sep>self.branch2=BN_Conv2d(in_channels n 3 2 0 bias=<false>)<line_sep>self.branch3=nn.Sequential(BN_Conv2d(in_channels k 1 1 0 bias=<false>) BN_Conv2d(k l 3 1 1 bias=<false>) BN_Conv2d(l m 3 2 0 bias=<false>))<block_end><def_stmt>forward self x<block_start>out1=F.max_pool2d(x 3 2 0)<line_sep>out2=self.branch2(x)<line_sep>out3=self.branch3(x)<line_sep><return>torch.cat((out1 out2 out3) 1)<block_end><block_end><class_stmt>Reduction_B_v4(nn.Module)<block_start>""" Reduction-B block for Inception-v4 net """<def_stmt>__init__ self in_channels b2_n1 b2_n3 b3_n1 b3_n1x7 b3_n7x1 b3_n3<block_start>super(Reduction_B_v4 self).__init__()<line_sep>self.branch2=nn.Sequential(BN_Conv2d(in_channels b2_n1 1 1 0 bias=<false>) BN_Conv2d(b2_n1 b2_n3 3 2 0 bias=<false>))<line_sep>self.branch3=nn.Sequential(BN_Conv2d(in_channels b3_n1 1 1 0 bias=<false>) BN_Conv2d(b3_n1 b3_n1x7 (1 7) (1 1) (0 3) bias=<false>) BN_Conv2d(b3_n1x7 b3_n7x1 (7 1) (1 1) (3 0) bias=<false>) BN_Conv2d(b3_n7x1 b3_n3 3 2 0 bias=<false>))<block_end><def_stmt>forward self x<block_start>out1=F.max_pool2d(x 3 2 0)<line_sep>out2=self.branch2(x)<line_sep>out3=self.branch3(x)<line_sep><return>torch.cat((out1 out2 out3) 1)<block_end><block_end><class_stmt>Reduction_B_Res(nn.Module)<block_start>""" Reduction-B block for Inception-ResNet-v1 \ and Inception-ResNet-v1 net """<def_stmt>__init__ self in_channels b2_n1 b2_n3 b3_n1 b3_n3 b4_n1 b4_n3_1 b4_n3_2<block_start>super(Reduction_B_Res self).__init__()<line_sep>self.branch2=nn.Sequential(BN_Conv2d(in_channels b2_n1 1 1 0 bias=<false>) BN_Conv2d(b2_n1 b2_n3 3 2 0 bias=<false>) )<line_sep>self.branch3=nn.Sequential(BN_Conv2d(in_channels b3_n1 1 1 0 bias=<false>) BN_Conv2d(b3_n1 b3_n3 3 2 0 bias=<false>))<line_sep>self.branch4=nn.Sequential(BN_Conv2d(in_channels b4_n1 1 1 0 bias=<false>) BN_Conv2d(b4_n1 b4_n3_1 3 1 1 bias=<false>) BN_Conv2d(b4_n3_1 b4_n3_2 3 2 0 bias=<false>))<block_end><def_stmt>forward self x<block_start>out1=F.max_pool2d(x 3 2 0)<line_sep>out2=self.branch2(x)<line_sep>out3=self.branch3(x)<line_sep>out4=self.branch4(x)<line_sep><return>torch.cat((out1 out2 out3 out4) 1)<block_end><block_end><class_stmt>Inception_A_res(nn.Module)<block_start>""" Inception-A block for Inception-ResNet-v1\ and Inception-ResNet-v2 net """<def_stmt>__init__ self in_channels b1 b2_n1 b2_n3 b3_n1 b3_n3_1 b3_n3_2 n1_linear<block_start>super(Inception_A_res self).__init__()<line_sep>self.branch1=BN_Conv2d(in_channels b1 1 1 0 bias=<false>)<line_sep>self.branch2=nn.Sequential(BN_Conv2d(in_channels b2_n1 1 1 0 bias=<false>) BN_Conv2d(b2_n1 b2_n3 3 1 1 bias=<false>) )<line_sep>self.branch3=nn.Sequential(BN_Conv2d(in_channels b3_n1 1 1 0 bias=<false>) BN_Conv2d(b3_n1 b3_n3_1 3 1 1 bias=<false>) BN_Conv2d(b3_n3_1 b3_n3_2 3 1 1 bias=<false>))<line_sep>self.conv_linear=nn.Conv2d(b1+b2_n3+b3_n3_2 n1_linear 1 1 0 bias=<true>)<line_sep>self.short_cut=nn.Sequential()<if_stmt>in_channels<ne>n1_linear<block_start>self.short_cut=nn.Sequential(nn.Conv2d(in_channels n1_linear 1 1 0 bias=<false>) nn.BatchNorm2d(n1_linear))<block_end><block_end><def_stmt>forward self x<block_start>out1=self.branch1(x)<line_sep>out2=self.branch2(x)<line_sep>out3=self.branch3(x)<line_sep>out=torch.cat((out1 out2 out3) 1)<line_sep>out=self.conv_linear(out)<line_sep>out<augadd>self.short_cut(x)<line_sep><return>F.relu(out)<block_end><block_end><class_stmt>Inception_B_res(nn.Module)<block_start>""" Inception-A block for Inception-ResNet-v1\ and Inception-ResNet-v2 net """<def_stmt>__init__ self in_channels b1 b2_n1 b2_n1x7 b2_n7x1 n1_linear<block_start>super(Inception_B_res self).__init__()<line_sep>self.branch1=BN_Conv2d(in_channels b1 1 1 0 bias=<false>)<line_sep>self.branch2=nn.Sequential(BN_Conv2d(in_channels b2_n1 1 1 0 bias=<false>) BN_Conv2d(b2_n1 b2_n1x7 (1 7) (1 1) (0 3) bias=<false>) BN_Conv2d(b2_n1x7 b2_n7x1 (7 1) (1 1) (3 0) bias=<false>))<line_sep>self.conv_linear=nn.Conv2d(b1+b2_n7x1 n1_linear 1 1 0 bias=<false>)<line_sep>self.short_cut=nn.Sequential()<if_stmt>in_channels<ne>n1_linear<block_start>self.short_cut=nn.Sequential(nn.Conv2d(in_channels n1_linear 1 1 0 bias=<false>) nn.BatchNorm2d(n1_linear))<block_end><block_end><def_stmt>forward self x<block_start>out1=self.branch1(x)<line_sep>out2=self.branch2(x)<line_sep>out=torch.cat((out1 out2) 1)<line_sep>out=self.conv_linear(out)<line_sep>out<augadd>self.short_cut(x)<line_sep><return>F.relu(out)<block_end><block_end><class_stmt>Inception_C_res(nn.Module)<block_start>""" Inception-C block for Inception-ResNet-v1\ and Inception-ResNet-v2 net """<def_stmt>__init__ self in_channels b1 b2_n1 b2_n1x3 b2_n3x1 n1_linear<block_start>super(Inception_C_res self).__init__()<line_sep>self.branch1=BN_Conv2d(in_channels b1 1 1 0 bias=<false>)<line_sep>self.branch2=nn.Sequential(BN_Conv2d(in_channels b2_n1 1 1 0 bias=<false>) BN_Conv2d(b2_n1 b2_n1x3 (1 3) (1 1) (0 1) bias=<false>) BN_Conv2d(b2_n1x3 b2_n3x1 (3 1) (1 1) (1 0) bias=<false>))<line_sep>self.conv_linear=nn.Conv2d(b1+b2_n3x1 n1_linear 1 1 0 bias=<false>)<line_sep>self.short_cut=nn.Sequential()<if_stmt>in_channels<ne>n1_linear<block_start>self.short_cut=nn.Sequential(nn.Conv2d(in_channels n1_linear 1 1 0 bias=<false>) nn.BatchNorm2d(n1_linear))<block_end><block_end><def_stmt>forward self x<block_start>out1=self.branch1(x)<line_sep>out2=self.branch2(x)<line_sep>out=torch.cat((out1 out2) 1)<line_sep>out=self.conv_linear(out)<line_sep>out<augadd>self.short_cut(x)<line_sep><return>F.relu(out)<block_end><block_end>
<import_from_stmt>django.conf settings<import_from_stmt>django.shortcuts render<import_from_stmt>.confirm verify_token verify_view<import_from_stmt>.errors NotAllFieldCompiled<line_sep>@verify_view<def_stmt>verify request token<block_start><try_stmt><block_start>template=settings.EMAIL_PAGE_TEMPLATE<if_stmt><not>isinstance(template str)<block_start><raise>AttributeError<block_end>success,user=verify_token(token)<line_sep><return>render(request template {'success':success 'user':user 'request':request})<block_end><except_stmt>AttributeError<block_start><raise>NotAllFieldCompiled('EMAIL_PAGE_TEMPLATE field not found')<block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_stmt>aliyunsdkcore.request RpcRequest<import_from_stmt>aliyunsdkecs.endpoint endpoint_data<class_stmt>CreateAutoProvisioningGroupRequest(RpcRequest)<block_start><def_stmt>__init__ self<block_start>RpcRequest.__init__(self 'Ecs' '2014-05-26' 'CreateAutoProvisioningGroup' 'ecs')<line_sep>self.set_method('POST')<if_stmt>hasattr(self "endpoint_map")<block_start>setattr(self "endpoint_map" endpoint_data.getEndpointMap())<block_end><if_stmt>hasattr(self "endpoint_regional")<block_start>setattr(self "endpoint_regional" endpoint_data.getEndpointRegional())<block_end><block_end><def_stmt>get_LaunchConfigurationDataDisks self# RepeatList <block_start><return>self.get_query_params().get('LaunchConfiguration.DataDisk')<block_end><def_stmt>set_LaunchConfigurationDataDisks self LaunchConfigurationDataDisk# RepeatList <block_start><for_stmt>depth1 range(len(LaunchConfigurationDataDisk))<block_start><if_stmt>LaunchConfigurationDataDisk[depth1].get('PerformanceLevel')<is><not><none><block_start>self.add_query_param('LaunchConfiguration.DataDisk.'+str(depth1+1)+'.PerformanceLevel' LaunchConfigurationDataDisk[depth1].get('PerformanceLevel'))<block_end><if_stmt>LaunchConfigurationDataDisk[depth1].get('KmsKeyId')<is><not><none><block_start>self.add_query_param('LaunchConfiguration.DataDisk.'+str(depth1+1)+'.KmsKeyId' LaunchConfigurationDataDisk[depth1].get('KmsKeyId'))<block_end><if_stmt>LaunchConfigurationDataDisk[depth1].get('Description')<is><not><none><block_start>self.add_query_param('LaunchConfiguration.DataDisk.'+str(depth1+1)+'.Description' LaunchConfigurationDataDisk[depth1].get('Description'))<block_end><if_stmt>LaunchConfigurationDataDisk[depth1].get('SnapshotId')<is><not><none><block_start>self.add_query_param('LaunchConfiguration.DataDisk.'+str(depth1+1)+'.SnapshotId' LaunchConfigurationDataDisk[depth1].get('SnapshotId'))<block_end><if_stmt>LaunchConfigurationDataDisk[depth1].get('Size')<is><not><none><block_start>self.add_query_param('LaunchConfiguration.DataDisk.'+str(depth1+1)+'.Size' LaunchConfigurationDataDisk[depth1].get('Size'))<block_end><if_stmt>LaunchConfigurationDataDisk[depth1].get('Device')<is><not><none><block_start>self.add_query_param('LaunchConfiguration.DataDisk.'+str(depth1+1)+'.Device' LaunchConfigurationDataDisk[depth1].get('Device'))<block_end><if_stmt>LaunchConfigurationDataDisk[depth1].get('DiskName')<is><not><none><block_start>self.add_query_param('LaunchConfiguration.DataDisk.'+str(depth1+1)+'.DiskName' LaunchConfigurationDataDisk[depth1].get('DiskName'))<block_end><if_stmt>LaunchConfigurationDataDisk[depth1].get('Category')<is><not><none><block_start>self.add_query_param('LaunchConfiguration.DataDisk.'+str(depth1+1)+'.Category' LaunchConfigurationDataDisk[depth1].get('Category'))<block_end><if_stmt>LaunchConfigurationDataDisk[depth1].get('DeleteWithInstance')<is><not><none><block_start>self.add_query_param('LaunchConfiguration.DataDisk.'+str(depth1+1)+'.DeleteWithInstance' LaunchConfigurationDataDisk[depth1].get('DeleteWithInstance'))<block_end><if_stmt>LaunchConfigurationDataDisk[depth1].get('Encrypted')<is><not><none><block_start>self.add_query_param('LaunchConfiguration.DataDisk.'+str(depth1+1)+'.Encrypted' LaunchConfigurationDataDisk[depth1].get('Encrypted'))<block_end><block_end><block_end><def_stmt>get_ResourceOwnerId self# Long <block_start><return>self.get_query_params().get('ResourceOwnerId')<block_end><def_stmt>set_ResourceOwnerId self ResourceOwnerId# Long <block_start>self.add_query_param('ResourceOwnerId' ResourceOwnerId)<block_end><def_stmt>get_LaunchConfigurationSystemDiskCategory self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.SystemDiskCategory')<block_end><def_stmt>set_LaunchConfigurationSystemDiskCategory self LaunchConfigurationSystemDiskCategory# String <block_start>self.add_query_param('LaunchConfiguration.SystemDiskCategory' LaunchConfigurationSystemDiskCategory)<block_end><def_stmt>get_AutoProvisioningGroupType self# String <block_start><return>self.get_query_params().get('AutoProvisioningGroupType')<block_end><def_stmt>set_AutoProvisioningGroupType self AutoProvisioningGroupType# String <block_start>self.add_query_param('AutoProvisioningGroupType' AutoProvisioningGroupType)<block_end><def_stmt>get_LaunchConfigurationSystemDiskPerformanceLevel self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.SystemDiskPerformanceLevel')<block_end><def_stmt>set_LaunchConfigurationSystemDiskPerformanceLevel self LaunchConfigurationSystemDiskPerformanceLevel# String <block_start>self.add_query_param('LaunchConfiguration.SystemDiskPerformanceLevel' LaunchConfigurationSystemDiskPerformanceLevel)<block_end><def_stmt>get_LaunchConfigurationHostNamess self# RepeatList <block_start><return>self.get_query_params().get('LaunchConfiguration.HostNames')<block_end><def_stmt>set_LaunchConfigurationHostNamess self LaunchConfigurationHostNames# RepeatList <block_start><for_stmt>depth1 range(len(LaunchConfigurationHostNames))<block_start>self.add_query_param('LaunchConfiguration.HostNames.'+str(depth1+1) LaunchConfigurationHostNames[depth1])<block_end><block_end><def_stmt>get_ResourceGroupId self# String <block_start><return>self.get_query_params().get('ResourceGroupId')<block_end><def_stmt>set_ResourceGroupId self ResourceGroupId# String <block_start>self.add_query_param('ResourceGroupId' ResourceGroupId)<block_end><def_stmt>get_LaunchConfigurationImageId self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.ImageId')<block_end><def_stmt>set_LaunchConfigurationImageId self LaunchConfigurationImageId# String <block_start>self.add_query_param('LaunchConfiguration.ImageId' LaunchConfigurationImageId)<block_end><def_stmt>get_LaunchConfigurationResourceGroupId self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.ResourceGroupId')<block_end><def_stmt>set_LaunchConfigurationResourceGroupId self LaunchConfigurationResourceGroupId# String <block_start>self.add_query_param('LaunchConfiguration.ResourceGroupId' LaunchConfigurationResourceGroupId)<block_end><def_stmt>get_PayAsYouGoAllocationStrategy self# String <block_start><return>self.get_query_params().get('PayAsYouGoAllocationStrategy')<block_end><def_stmt>set_PayAsYouGoAllocationStrategy self PayAsYouGoAllocationStrategy# String <block_start>self.add_query_param('PayAsYouGoAllocationStrategy' PayAsYouGoAllocationStrategy)<block_end><def_stmt>get_DefaultTargetCapacityType self# String <block_start><return>self.get_query_params().get('DefaultTargetCapacityType')<block_end><def_stmt>set_DefaultTargetCapacityType self DefaultTargetCapacityType# String <block_start>self.add_query_param('DefaultTargetCapacityType' DefaultTargetCapacityType)<block_end><def_stmt>get_LaunchConfigurationKeyPairName self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.KeyPairName')<block_end><def_stmt>set_LaunchConfigurationKeyPairName self LaunchConfigurationKeyPairName# String <block_start>self.add_query_param('LaunchConfiguration.KeyPairName' LaunchConfigurationKeyPairName)<block_end><def_stmt>get_SystemDiskConfigs self# RepeatList <block_start><return>self.get_query_params().get('SystemDiskConfig')<block_end><def_stmt>set_SystemDiskConfigs self SystemDiskConfig# RepeatList <block_start><for_stmt>depth1 range(len(SystemDiskConfig))<block_start><if_stmt>SystemDiskConfig[depth1].get('DiskCategory')<is><not><none><block_start>self.add_query_param('SystemDiskConfig.'+str(depth1+1)+'.DiskCategory' SystemDiskConfig[depth1].get('DiskCategory'))<block_end><block_end><block_end><def_stmt>get_DataDiskConfigs self# RepeatList <block_start><return>self.get_query_params().get('DataDiskConfig')<block_end><def_stmt>set_DataDiskConfigs self DataDiskConfig# RepeatList <block_start><for_stmt>depth1 range(len(DataDiskConfig))<block_start><if_stmt>DataDiskConfig[depth1].get('DiskCategory')<is><not><none><block_start>self.add_query_param('DataDiskConfig.'+str(depth1+1)+'.DiskCategory' DataDiskConfig[depth1].get('DiskCategory'))<block_end><block_end><block_end><def_stmt>get_ValidUntil self# String <block_start><return>self.get_query_params().get('ValidUntil')<block_end><def_stmt>set_ValidUntil self ValidUntil# String <block_start>self.add_query_param('ValidUntil' ValidUntil)<block_end><def_stmt>get_LaunchTemplateId self# String <block_start><return>self.get_query_params().get('LaunchTemplateId')<block_end><def_stmt>set_LaunchTemplateId self LaunchTemplateId# String <block_start>self.add_query_param('LaunchTemplateId' LaunchTemplateId)<block_end><def_stmt>get_OwnerId self# Long <block_start><return>self.get_query_params().get('OwnerId')<block_end><def_stmt>set_OwnerId self OwnerId# Long <block_start>self.add_query_param('OwnerId' OwnerId)<block_end><def_stmt>get_LaunchConfigurationSystemDiskSize self# Integer <block_start><return>self.get_query_params().get('LaunchConfiguration.SystemDiskSize')<block_end><def_stmt>set_LaunchConfigurationSystemDiskSize self LaunchConfigurationSystemDiskSize# Integer <block_start>self.add_query_param('LaunchConfiguration.SystemDiskSize' LaunchConfigurationSystemDiskSize)<block_end><def_stmt>get_LaunchConfigurationInternetMaxBandwidthOut self# Integer <block_start><return>self.get_query_params().get('LaunchConfiguration.InternetMaxBandwidthOut')<block_end><def_stmt>set_LaunchConfigurationInternetMaxBandwidthOut self LaunchConfigurationInternetMaxBandwidthOut# Integer <block_start>self.add_query_param('LaunchConfiguration.InternetMaxBandwidthOut' LaunchConfigurationInternetMaxBandwidthOut)<block_end><def_stmt>get_LaunchConfigurationHostName self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.HostName')<block_end><def_stmt>set_LaunchConfigurationHostName self LaunchConfigurationHostName# String <block_start>self.add_query_param('LaunchConfiguration.HostName' LaunchConfigurationHostName)<block_end><def_stmt>get_MinTargetCapacity self# String <block_start><return>self.get_query_params().get('MinTargetCapacity')<block_end><def_stmt>set_MinTargetCapacity self MinTargetCapacity# String <block_start>self.add_query_param('MinTargetCapacity' MinTargetCapacity)<block_end><def_stmt>get_MaxSpotPrice self# Float <block_start><return>self.get_query_params().get('MaxSpotPrice')<block_end><def_stmt>set_MaxSpotPrice self MaxSpotPrice# Float <block_start>self.add_query_param('MaxSpotPrice' MaxSpotPrice)<block_end><def_stmt>get_LaunchConfigurationPasswordInherit self# Boolean <block_start><return>self.get_query_params().get('LaunchConfiguration.PasswordInherit')<block_end><def_stmt>set_LaunchConfigurationPasswordInherit self LaunchConfigurationPasswordInherit# Boolean <block_start>self.add_query_param('LaunchConfiguration.PasswordInherit' LaunchConfigurationPasswordInherit)<block_end><def_stmt>get_ClientToken self# String <block_start><return>self.get_query_params().get('ClientToken')<block_end><def_stmt>set_ClientToken self ClientToken# String <block_start>self.add_query_param('ClientToken' ClientToken)<block_end><def_stmt>get_LaunchConfigurationSecurityGroupId self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.SecurityGroupId')<block_end><def_stmt>set_LaunchConfigurationSecurityGroupId self LaunchConfigurationSecurityGroupId# String <block_start>self.add_query_param('LaunchConfiguration.SecurityGroupId' LaunchConfigurationSecurityGroupId)<block_end><def_stmt>get_Description self# String <block_start><return>self.get_query_params().get('Description')<block_end><def_stmt>set_Description self Description# String <block_start>self.add_query_param('Description' Description)<block_end><def_stmt>get_TerminateInstancesWithExpiration self# Boolean <block_start><return>self.get_query_params().get('TerminateInstancesWithExpiration')<block_end><def_stmt>set_TerminateInstancesWithExpiration self TerminateInstancesWithExpiration# Boolean <block_start>self.add_query_param('TerminateInstancesWithExpiration' TerminateInstancesWithExpiration)<block_end><def_stmt>get_LaunchConfigurationUserData self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.UserData')<block_end><def_stmt>set_LaunchConfigurationUserData self LaunchConfigurationUserData# String <block_start>self.add_query_param('LaunchConfiguration.UserData' LaunchConfigurationUserData)<block_end><def_stmt>get_LaunchConfigurationCreditSpecification self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.CreditSpecification')<block_end><def_stmt>set_LaunchConfigurationCreditSpecification self LaunchConfigurationCreditSpecification# String <block_start>self.add_query_param('LaunchConfiguration.CreditSpecification' LaunchConfigurationCreditSpecification)<block_end><def_stmt>get_LaunchConfigurationInstanceName self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.InstanceName')<block_end><def_stmt>set_LaunchConfigurationInstanceName self LaunchConfigurationInstanceName# String <block_start>self.add_query_param('LaunchConfiguration.InstanceName' LaunchConfigurationInstanceName)<block_end><def_stmt>get_LaunchConfigurationInstanceDescription self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.InstanceDescription')<block_end><def_stmt>set_LaunchConfigurationInstanceDescription self LaunchConfigurationInstanceDescription# String <block_start>self.add_query_param('LaunchConfiguration.InstanceDescription' LaunchConfigurationInstanceDescription)<block_end><def_stmt>get_SpotAllocationStrategy self# String <block_start><return>self.get_query_params().get('SpotAllocationStrategy')<block_end><def_stmt>set_SpotAllocationStrategy self SpotAllocationStrategy# String <block_start>self.add_query_param('SpotAllocationStrategy' SpotAllocationStrategy)<block_end><def_stmt>get_TerminateInstances self# Boolean <block_start><return>self.get_query_params().get('TerminateInstances')<block_end><def_stmt>set_TerminateInstances self TerminateInstances# Boolean <block_start>self.add_query_param('TerminateInstances' TerminateInstances)<block_end><def_stmt>get_LaunchConfigurationSystemDiskName self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.SystemDiskName')<block_end><def_stmt>set_LaunchConfigurationSystemDiskName self LaunchConfigurationSystemDiskName# String <block_start>self.add_query_param('LaunchConfiguration.SystemDiskName' LaunchConfigurationSystemDiskName)<block_end><def_stmt>get_LaunchConfigurationSystemDiskDescription self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.SystemDiskDescription')<block_end><def_stmt>set_LaunchConfigurationSystemDiskDescription self LaunchConfigurationSystemDiskDescription# String <block_start>self.add_query_param('LaunchConfiguration.SystemDiskDescription' LaunchConfigurationSystemDiskDescription)<block_end><def_stmt>get_ExcessCapacityTerminationPolicy self# String <block_start><return>self.get_query_params().get('ExcessCapacityTerminationPolicy')<block_end><def_stmt>set_ExcessCapacityTerminationPolicy self ExcessCapacityTerminationPolicy# String <block_start>self.add_query_param('ExcessCapacityTerminationPolicy' ExcessCapacityTerminationPolicy)<block_end><def_stmt>get_LaunchTemplateConfigs self# RepeatList <block_start><return>self.get_query_params().get('LaunchTemplateConfig')<block_end><def_stmt>set_LaunchTemplateConfigs self LaunchTemplateConfig# RepeatList <block_start><for_stmt>depth1 range(len(LaunchTemplateConfig))<block_start><if_stmt>LaunchTemplateConfig[depth1].get('VSwitchId')<is><not><none><block_start>self.add_query_param('LaunchTemplateConfig.'+str(depth1+1)+'.VSwitchId' LaunchTemplateConfig[depth1].get('VSwitchId'))<block_end><if_stmt>LaunchTemplateConfig[depth1].get('MaxPrice')<is><not><none><block_start>self.add_query_param('LaunchTemplateConfig.'+str(depth1+1)+'.MaxPrice' LaunchTemplateConfig[depth1].get('MaxPrice'))<block_end><if_stmt>LaunchTemplateConfig[depth1].get('Priority')<is><not><none><block_start>self.add_query_param('LaunchTemplateConfig.'+str(depth1+1)+'.Priority' LaunchTemplateConfig[depth1].get('Priority'))<block_end><if_stmt>LaunchTemplateConfig[depth1].get('InstanceType')<is><not><none><block_start>self.add_query_param('LaunchTemplateConfig.'+str(depth1+1)+'.InstanceType' LaunchTemplateConfig[depth1].get('InstanceType'))<block_end><if_stmt>LaunchTemplateConfig[depth1].get('WeightedCapacity')<is><not><none><block_start>self.add_query_param('LaunchTemplateConfig.'+str(depth1+1)+'.WeightedCapacity' LaunchTemplateConfig[depth1].get('WeightedCapacity'))<block_end><block_end><block_end><def_stmt>get_LaunchConfigurationRamRoleName self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.RamRoleName')<block_end><def_stmt>set_LaunchConfigurationRamRoleName self LaunchConfigurationRamRoleName# String <block_start>self.add_query_param('LaunchConfiguration.RamRoleName' LaunchConfigurationRamRoleName)<block_end><def_stmt>get_LaunchConfigurationInternetMaxBandwidthIn self# Integer <block_start><return>self.get_query_params().get('LaunchConfiguration.InternetMaxBandwidthIn')<block_end><def_stmt>set_LaunchConfigurationInternetMaxBandwidthIn self LaunchConfigurationInternetMaxBandwidthIn# Integer <block_start>self.add_query_param('LaunchConfiguration.InternetMaxBandwidthIn' LaunchConfigurationInternetMaxBandwidthIn)<block_end><def_stmt>get_SpotInstanceInterruptionBehavior self# String <block_start><return>self.get_query_params().get('SpotInstanceInterruptionBehavior')<block_end><def_stmt>set_SpotInstanceInterruptionBehavior self SpotInstanceInterruptionBehavior# String <block_start>self.add_query_param('SpotInstanceInterruptionBehavior' SpotInstanceInterruptionBehavior)<block_end><def_stmt>get_LaunchConfigurationSecurityEnhancementStrategy self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.SecurityEnhancementStrategy')<block_end><def_stmt>set_LaunchConfigurationSecurityEnhancementStrategy self LaunchConfigurationSecurityEnhancementStrategy# String <block_start>self.add_query_param('LaunchConfiguration.SecurityEnhancementStrategy' LaunchConfigurationSecurityEnhancementStrategy)<block_end><def_stmt>get_LaunchConfigurationTags self# RepeatList <block_start><return>self.get_query_params().get('LaunchConfiguration.Tag')<block_end><def_stmt>set_LaunchConfigurationTags self LaunchConfigurationTag# RepeatList <block_start><for_stmt>depth1 range(len(LaunchConfigurationTag))<block_start><if_stmt>LaunchConfigurationTag[depth1].get('Key')<is><not><none><block_start>self.add_query_param('LaunchConfiguration.Tag.'+str(depth1+1)+'.Key' LaunchConfigurationTag[depth1].get('Key'))<block_end><if_stmt>LaunchConfigurationTag[depth1].get('Value')<is><not><none><block_start>self.add_query_param('LaunchConfiguration.Tag.'+str(depth1+1)+'.Value' LaunchConfigurationTag[depth1].get('Value'))<block_end><block_end><block_end><def_stmt>get_LaunchConfigurationDeploymentSetId self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.DeploymentSetId')<block_end><def_stmt>set_LaunchConfigurationDeploymentSetId self LaunchConfigurationDeploymentSetId# String <block_start>self.add_query_param('LaunchConfiguration.DeploymentSetId' LaunchConfigurationDeploymentSetId)<block_end><def_stmt>get_ResourceOwnerAccount self# String <block_start><return>self.get_query_params().get('ResourceOwnerAccount')<block_end><def_stmt>set_ResourceOwnerAccount self ResourceOwnerAccount# String <block_start>self.add_query_param('ResourceOwnerAccount' ResourceOwnerAccount)<block_end><def_stmt>get_OwnerAccount self# String <block_start><return>self.get_query_params().get('OwnerAccount')<block_end><def_stmt>set_OwnerAccount self OwnerAccount# String <block_start>self.add_query_param('OwnerAccount' OwnerAccount)<block_end><def_stmt>get_SpotInstancePoolsToUseCount self# Integer <block_start><return>self.get_query_params().get('SpotInstancePoolsToUseCount')<block_end><def_stmt>set_SpotInstancePoolsToUseCount self SpotInstancePoolsToUseCount# Integer <block_start>self.add_query_param('SpotInstancePoolsToUseCount' SpotInstancePoolsToUseCount)<block_end><def_stmt>get_LaunchConfigurationInternetChargeType self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.InternetChargeType')<block_end><def_stmt>set_LaunchConfigurationInternetChargeType self LaunchConfigurationInternetChargeType# String <block_start>self.add_query_param('LaunchConfiguration.InternetChargeType' LaunchConfigurationInternetChargeType)<block_end><def_stmt>get_LaunchTemplateVersion self# String <block_start><return>self.get_query_params().get('LaunchTemplateVersion')<block_end><def_stmt>set_LaunchTemplateVersion self LaunchTemplateVersion# String <block_start>self.add_query_param('LaunchTemplateVersion' LaunchTemplateVersion)<block_end><def_stmt>get_LaunchConfigurationIoOptimized self# String <block_start><return>self.get_query_params().get('LaunchConfiguration.IoOptimized')<block_end><def_stmt>set_LaunchConfigurationIoOptimized self LaunchConfigurationIoOptimized# String <block_start>self.add_query_param('LaunchConfiguration.IoOptimized' LaunchConfigurationIoOptimized)<block_end><def_stmt>get_PayAsYouGoTargetCapacity self# String <block_start><return>self.get_query_params().get('PayAsYouGoTargetCapacity')<block_end><def_stmt>set_PayAsYouGoTargetCapacity self PayAsYouGoTargetCapacity# String <block_start>self.add_query_param('PayAsYouGoTargetCapacity' PayAsYouGoTargetCapacity)<block_end><def_stmt>get_TotalTargetCapacity self# String <block_start><return>self.get_query_params().get('TotalTargetCapacity')<block_end><def_stmt>set_TotalTargetCapacity self TotalTargetCapacity# String <block_start>self.add_query_param('TotalTargetCapacity' TotalTargetCapacity)<block_end><def_stmt>get_SpotTargetCapacity self# String <block_start><return>self.get_query_params().get('SpotTargetCapacity')<block_end><def_stmt>set_SpotTargetCapacity self SpotTargetCapacity# String <block_start>self.add_query_param('SpotTargetCapacity' SpotTargetCapacity)<block_end><def_stmt>get_ValidFrom self# String <block_start><return>self.get_query_params().get('ValidFrom')<block_end><def_stmt>set_ValidFrom self ValidFrom# String <block_start>self.add_query_param('ValidFrom' ValidFrom)<block_end><def_stmt>get_AutoProvisioningGroupName self# String <block_start><return>self.get_query_params().get('AutoProvisioningGroupName')<block_end><def_stmt>set_AutoProvisioningGroupName self AutoProvisioningGroupName# String <block_start>self.add_query_param('AutoProvisioningGroupName' AutoProvisioningGroupName)<block_end><block_end>
# Copyright 2021 The KubeEdge Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>interface fedavg s3_transmitter simple_chooser<import_from_stmt>interface Estimator<import_from_stmt>sedna.service.server AggregationServerV2<import_from_stmt>sedna.common.config BaseConfig<def_stmt>run_server <block_start>estimator=Estimator()<line_sep>estimator.saved=BaseConfig.model_url<line_sep>server=AggregationServerV2(data=<none> # mistnet, train, test estimator=estimator aggregation=fedavg transmitter=s3_transmitter chooser=simple_chooser)<line_sep>server.start()<block_end><if_stmt>__name__<eq>'__main__'<block_start>run_server()<block_end>
# Copyright (c) Microsoft. All rights reserved. # Licensed under the MIT license. See LICENSE file in the project root for full license information. """ Version 2.0 """<import_stmt>numpy<as>np<class_stmt>CClassifier(object)<block_start><def_stmt>forward self z<block_start><pass><block_end><block_end># equal to sigmoid but it is used as classification function <class_stmt>Logistic(CClassifier)<block_start><def_stmt>forward self z<block_start>a=1.0/(1.0+np.exp(-z))<line_sep><return>a<block_end><block_end><class_stmt>Softmax(CClassifier)<block_start><def_stmt>forward self z<block_start>shift_z=z-np.max(z axis=1 keepdims=<true>)<line_sep>exp_z=np.exp(shift_z)<line_sep>a=exp_z/np.sum(exp_z axis=1 keepdims=<true>)<line_sep><return>a<block_end><block_end>
# Copyright (C) 2017 <NAME>, Carnegie Mellon University <class_stmt>KgCVAEConfig(object)<block_start>description=<none><line_sep>use_hcf=<true># use dialog act in training (if turn off kgCVAE -> CVAE) update_limit=3000# the number of mini-batch before evaluating the model # how to encode utterance. # bow: add word embedding together # rnn: RNN utterance encoder # bi_rnn: bi_directional RNN utterance encoder sent_type="bi_rnn"<line_sep># latent variable (gaussian variable) latent_size=200# the dimension of latent variable full_kl_step=10000# how many batch before KL cost weight reaches 1.0 dec_keep_prob=1.0# do we use word drop decoder [Bowman el al 2015] # Network general cell_type="gru"# gru or lstm embed_size=200# word embedding size topic_embed_size=30# topic embedding size da_embed_size=30# dialog act embedding size cxt_cell_size=600# context encoder hidden size sent_cell_size=300# utterance encoder hidden size dec_cell_size=400# response decoder hidden size backward_size=10# how many utterance kept in the context window step_size=1# internal usage max_utt_len=40# max number of words in an utterance num_layer=1# number of context RNN layers # Optimization parameters op="adam"<line_sep>grad_clip=5.0# gradient abs max cut init_w=0.08# uniform random from [-init_w, init_w] batch_size=30# mini-batch size init_lr=0.001# initial learning rate lr_hold=1# only used by SGD lr_decay=0.6# only used by SGD keep_prob=1.0# drop out rate improve_threshold=0.996# for early stopping patient_increase=2.0# for early stopping early_stop=<true><line_sep>max_epoch=60# max number of epoch of training grad_noise=0.0<block_end># inject gradient noise?
<import_from_stmt>c7n_openstack.query QueryResourceManager TypeInfo<import_from_stmt>c7n_openstack.provider resources<import_from_stmt>c7n.utils local_session<import_from_stmt>c7n.utils type_schema<import_from_stmt>c7n.filters Filter<import_from_stmt>c7n.filters AgeFilter<line_sep>@resources.register('server')<class_stmt>Server(QueryResourceManager)<block_start><class_stmt>resource_type(TypeInfo)<block_start>enum_spec=('list_servers' <none>)<line_sep>id='id'<line_sep>name='name'<line_sep>set_server_metadata="set_server_metadata"<line_sep>delete_server_metadata="delete_server_metadata"<line_sep>add_server_tag="add_server_tag"<line_sep>set_server_tag="set_server_tag"<line_sep>delete_server_tag="delete_server_tag"<line_sep>default_report_fields=['id' 'name' 'status' 'tenant_id']<block_end><block_end>@Server.filter_registry.register('image')<class_stmt>ImageFilter(Filter)<block_start>"""Filters Servers based on their image attributes :example: .. code-block:: yaml policies: - name: dns-hostname-enabled resource: vpc filters: - type: image image_name: test-image """<line_sep>schema=type_schema('image' image_name={'type':'string'} visibility={'type':'string'} status={'type':'string'})<def_stmt>process self resources event=<none><block_start>results=[]<line_sep>client=local_session(self.manager.session_factory).client()<line_sep>image_name=self.data.get('image_name' <none>)<line_sep>visibility=self.data.get('visibility' <none>)<line_sep>status=self.data.get('status' <none>)<line_sep>images=client.list_images()<for_stmt>r resources<block_start>image=find_object_by_property(images 'id' r.image.id)<line_sep>matched=<true><if_stmt><not>image<block_start><if_stmt>status<eq>"absent"<block_start>results.append(r)<block_end><continue><block_end><if_stmt>image_name<is><not><none><and>image_name<ne>image.name<block_start>matched=<false><block_end><if_stmt>visibility<is><not><none><and>visibility<ne>image.visibility<block_start>matched=<false><block_end><if_stmt>status<is><not><none><and>status<ne>image.status<block_start>matched=<false><block_end><if_stmt>matched<block_start>results.append(r)<block_end><block_end><return>results<block_end><block_end>@Server.filter_registry.register('flavor')<class_stmt>FlavorFilter(Filter)<block_start>"""Filters Servers based on their flavor attributes :example: .. code-block:: yaml policies: - name: dns-hostname-enabled resource: openstack.server filters: - type: flavor flavor_name: m1.tiny """<line_sep>schema=type_schema('flavor' flavor_name={'type':'string'} flavor_id={'type':'string'} vcpus={'type':'integer'} ram={'type':'integer'} swap={'type':'integer'} disk={'type':'integer'} ephemeral={'type':'integer'} is_public={'type':'boolean'} )<def_stmt>server_match_flavor self server flavor_name flavor_id vcpus ram disk ephemeral is_public<block_start>openstack=local_session(self.manager.session_factory).client()<line_sep>server_flavor_name=server.flavor.original_name<line_sep>flavor=openstack.get_flavor(server_flavor_name)<if_stmt><not>flavor<block_start><return><false><block_end><if_stmt>flavor_name<and>flavor.name<ne>flavor_name<block_start><return><false><block_end><if_stmt>flavor_id<and>flavor.id<ne>flavor_id<block_start><return><false><block_end><if_stmt>vcpus<and>flavor.vcpus<ne>int(vcpus)<block_start><return><false><block_end><if_stmt>ram<and>flavor.ram<ne>int(ram)<block_start><return><false><block_end><if_stmt>disk<and>flavor.disk<ne>int(disk)<block_start><return><false><block_end><if_stmt>ephemeral<and>flavor.ephemeral<ne>int(ephemeral)<block_start><return><false><block_end><if_stmt>is_public<is><not><none><and>flavor.is_public<ne>is_public<block_start><return><false><block_end><return><true><block_end><def_stmt>process self resources event=<none><block_start>results=[]<line_sep>flavor_name=self.data.get('flavor_name' <none>)<line_sep>flavor_id=self.data.get('flavor_id' <none>)<line_sep>vcpus=self.data.get('vcpus' <none>)<line_sep>ram=self.data.get('ram' <none>)<line_sep>disk=self.data.get('disk' <none>)<line_sep>ephemeral=self.data.get('ephemeral' <none>)<line_sep>is_public=self.data.get('is_public' <none>)<for_stmt>server resources<block_start><if_stmt>self.server_match_flavor(server flavor_name flavor_id vcpus ram disk ephemeral is_public)<block_start>results.append(server)<block_end><block_end><return>results<block_end><block_end>@Server.filter_registry.register('age')<class_stmt>AgeFilter(AgeFilter)<block_start>date_attribute="launched_at"<line_sep>schema=type_schema('age' op={'$ref':'#/definitions/filters_common/comparison_operators'} days={'type':'number'} hours={'type':'number'} minutes={'type':'number'})<def_stmt>get_resource_data self i<block_start><if_stmt>i.get("launched_at")<block_start><return>i.get("launched_at")<block_end><return>i.get("created_at")<block_end><block_end>@Server.filter_registry.register('tags')<class_stmt>TagsFilter(Filter)<block_start>"""Filters Servers based on their tags :example: .. code-block:: yaml policies: - name: demo resource: openstack.server filters: - type: tags tags: - key: a value: b """<line_sep>tags_definition={'type':'array' 'items':{'type':'object' 'properties':{'key':{'type':'string'} 'value':{'type':'string'}} 'required':['key' 'value'] }}<line_sep>schema=type_schema('tags' tags=tags_definition op={'type':'string' 'enum':['any' 'all']} )<def_stmt>match_any_tags self server tags<block_start><for_stmt>t tags<block_start>str_tag="%s=%s"%(t.get('key') t.get('value'))<if_stmt>str_tag<in>server.tags<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>match_all_tags self server tags<block_start><for_stmt>t tags<block_start>str_tag="%s=%s"%(t.get('key') t.get('value'))<if_stmt>str_tag<not><in>server.tags<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>process self resources event=<none><block_start>results=[]<line_sep>tags=self.data.get('tags' [])<line_sep>op=self.data.get('op' 'all')<line_sep>match_fn={'any':self.match_any_tags 'all':self.match_all_tags}<for_stmt>server resources<block_start><if_stmt>match_fn[op](server tags)<block_start>results.append(server)<block_end><block_end><return>results<block_end><block_end><def_stmt>find_object_by_property collection k v<block_start>result=[]<for_stmt>d collection<block_start><if_stmt>hasattr(d k)<block_start>value=getattr(d k)<block_end><else_stmt><block_start>value=d.get(k)<block_end><if_stmt>(v<is><none><and>value<is><none>)<or>value<eq>v<block_start>result.append(d)<block_end><block_end><if_stmt><not>result<block_start><return><none><block_end><assert_stmt>(len(result)<eq>1)<line_sep><return>result[0]<block_end>
<import_stmt>sys<import_from_stmt>os path<def_stmt>main argv<block_start><if_stmt>len(argv)<g>0<block_start><if_stmt>path.isdir(argv[0]+"/love")<and>path.isfile(argv[0]+"/love/Android.mk")<block_start>print("yes")<block_end><else_stmt><block_start>print("no")<block_end><block_end><else_stmt><block_start>print("no")<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main(sys.argv[1:])<block_end>
""" GridSpec ========= An example demoing gridspec """<import_stmt>matplotlib.pyplot<as>plt<import_stmt>matplotlib.gridspec<as>gridspec<line_sep>plt.figure(figsize=(6 4))<line_sep>G=gridspec.GridSpec(3 3)<line_sep>axes_1=plt.subplot(G[0 :])<line_sep>plt.xticks(())<line_sep>plt.yticks(())<line_sep>plt.text(0.5 0.5 'Axes 1' ha='center' va='center' size=24 alpha=.5)<line_sep>axes_2=plt.subplot(G[1 :-1])<line_sep>plt.xticks(())<line_sep>plt.yticks(())<line_sep>plt.text(0.5 0.5 'Axes 2' ha='center' va='center' size=24 alpha=.5)<line_sep>axes_3=plt.subplot(G[1: -1])<line_sep>plt.xticks(())<line_sep>plt.yticks(())<line_sep>plt.text(0.5 0.5 'Axes 3' ha='center' va='center' size=24 alpha=.5)<line_sep>axes_4=plt.subplot(G[-1 0])<line_sep>plt.xticks(())<line_sep>plt.yticks(())<line_sep>plt.text(0.5 0.5 'Axes 4' ha='center' va='center' size=24 alpha=.5)<line_sep>axes_5=plt.subplot(G[-1 -2])<line_sep>plt.xticks(())<line_sep>plt.yticks(())<line_sep>plt.text(0.5 0.5 'Axes 5' ha='center' va='center' size=24 alpha=.5)<line_sep>plt.tight_layout()<line_sep>plt.show()<line_sep>
<import_from_future_stmt> print_function<line_sep># some python 2 and 3 comnpatibility tweaks <import_stmt>sys<line_sep>py3=sys.version_info<ge>(3 0)<def_stmt>inext v# next value from iterator <block_start><return>next(v)<if>py3<else>v.next()<block_end><import_stmt>os<import_stmt>time<import_stmt>apsw<line_sep>### ### Check we have the expected version of apsw and sqlite ### #@@CAPTURE print(" Using APSW file" apsw.__file__)# from the extension module print(" APSW version" apsw.apswversion())# from the extension module print(" SQLite lib version" apsw.sqlitelibversion())# from the sqlite library code print("SQLite header version" apsw.SQLITE_VERSION_NUMBER)# from the sqlite header file at compile time #@@ENDCAPTURE ### ### Opening/creating database ### connection=apsw.Connection("dbfile")<line_sep>cursor=connection.cursor()<line_sep>### ### simple statement @@ example-cursor ### cursor.execute("create table foo(x,y,z)")<line_sep>### ### using different types ### cursor.execute("insert into foo values(?,?,?)" (1 1.1 <none>))# integer, float/real, Null cursor.execute("insert into foo(x) values(?)" ("abc" ))# string (note trailing comma to ensure tuple!) cursor.execute("insert into foo(x) values(?)" # a blob (binary data) (b"abc\xff\xfe"<if>py3<else>buffer("abc\xff\xfe") ))<line_sep>### ### multiple statements ### cursor.execute("delete from foo; insert into foo values(1,2,3); create table bar(a,b,c) ; insert into foo values(4, 'five', 6.0)")<line_sep>### ### iterator ### <for_stmt>x,y,z cursor.execute("select x,y,z from foo")<block_start>print(cursor.getdescription())# shows column names and declared types print(x y z)<block_end>### ### iterator - multiple statements ### <for_stmt>m,n,o cursor.execute("select x,y,z from foo ; select a,b,c from bar")<block_start>print(m n o)<block_end>### ### bindings - sequence ### cursor.execute("insert into foo values(?,?,?)" (7 'eight' <false>))<line_sep>cursor.execute("insert into foo values(?,?,?1)" ('one' 'two'))# nb sqlite does the numbers from 1 ### ### bindings - dictionary ### cursor.execute("insert into foo values(:alpha, :beta, :gamma)" {'alpha':1 'beta':2 'gamma':'three'})<line_sep>### ### tracing execution @@ example-exectrace ### <def_stmt>mytrace cursor statement bindings<block_start>"Called just before executing each statement"<line_sep>print("SQL:" statement)<if_stmt>bindings<block_start>print("Bindings:" bindings)<block_end><return><true><block_end># if you return False then execution is aborted #@@CAPTURE cursor.setexectrace(mytrace)<line_sep>cursor.execute("drop table bar ; create table bar(x,y,z); select * from foo where x=?" (3 ))<line_sep>#@@ENDCAPTURE ### ### tracing results @@ example-rowtrace ### <def_stmt>rowtrace cursor row<block_start>"""Called with each row of results before they are handed off. You can return None to cause the row to be skipped or a different set of values to return"""<line_sep>print("Row:" row)<line_sep><return>row<block_end>#@@CAPTURE cursor.setrowtrace(rowtrace)<for_stmt>row cursor.execute("select x,y from foo where x>3")<block_start><pass><block_end>#@@ENDCAPTURE # Clear tracers cursor.setrowtrace(<none>)<line_sep>cursor.setexectrace(<none>)<line_sep>### ### executemany ### # (This will work correctly with multiple statements, as well as statements that # return data. The second argument can be anything that is iterable.) cursor.executemany("insert into foo (x) values(?)" ([1] [2] [3]))<line_sep># You can also use it for statements that return data <for_stmt>row cursor.executemany("select * from foo where x=?" ([1] [2] [3]))<block_start>print(row)<block_end>### ### defining your own functions @@ scalar-example ### <def_stmt>ilove7 *args<block_start>"a scalar function"<line_sep>print("ilove7 got" args "but I love 7")<line_sep><return>7<block_end>connection.createscalarfunction("seven" ilove7)<line_sep>#@@CAPTURE <for_stmt>row cursor.execute("select seven(x,y) from foo")<block_start>print(row)<block_end>#@@ENDCAPTURE ### ### aggregate functions are more complex @@ aggregate-example ### # Here we return the longest item when represented as a string. <class_stmt>longest<block_start><def_stmt>__init__ self<block_start>self.longest=""<block_end><def_stmt>step self *args<block_start><for_stmt>arg args<block_start><if_stmt>len(str(arg))<g>len(self.longest)<block_start>self.longest=str(arg)<block_end><block_end><block_end><def_stmt>final self<block_start><return>self.longest<block_end># Under Python 2.3 remove the following line and add # factory=classmethod(factory) at the end @classmethod<def_stmt>factory cls<block_start><return>cls() cls.step cls.final<block_end><block_end>#@@CAPTURE connection.createaggregatefunction("longest" longest.factory)<for_stmt>row cursor.execute("select longest(x,y) from foo")<block_start>print(row)<block_end>#@@ENDCAPTURE ### ### Defining collations. @@ collation-example ### # The default sorting mechanisms don't understand numbers at the end of strings # so here we define a collation that does cursor.execute("create table s(str)")<line_sep>cursor.executemany("insert into s values(?)" (["file1"] ["file7"] ["file17"] ["file20"] ["file3"]))<line_sep>#@@CAPTURE <for_stmt>row cursor.execute("select * from s order by str")<block_start>print(row)<block_end>#@@ENDCAPTURE <def_stmt>strnumcollate s1 s2# return -1 if s1<s2, +1 if s1>s2 else 0 # split values into two parts - the head and the numeric tail <block_start>values=[s1 s2]<for_stmt>vn,v enumerate(values)<block_start><for_stmt>i range(len(v) 0 -1)<block_start><if_stmt>v[i-1]<not><in>"01234567890"<block_start><break><block_end><block_end><try_stmt><block_start>v=(v[:i] int(v[i:]))<block_end><except_stmt>ValueError<block_start>v=(v[:i] <none>)<block_end>values[vn]=v<block_end># compare <if_stmt>values[0]<l>values[1]<block_start><return>-1<block_end><if_stmt>values[0]<g>values[1]<block_start><return>1<block_end><return>0<block_end>connection.createcollation("strnum" strnumcollate)<line_sep>#@@CAPTURE <for_stmt>row cursor.execute("select * from s order by str collate strnum")<block_start>print(row)<block_end>#@@ENDCAPTURE ### ### Authorizer (eg if you want to control what user supplied SQL can do) @@ authorizer-example ### <def_stmt>authorizer operation paramone paramtwo databasename triggerorview<block_start>"""Called when each operation is prepared. We can return SQLITE_OK, SQLITE_DENY or SQLITE_IGNORE"""<line_sep># find the operation name print(apsw.mapping_authorizer_function[operation] paramone paramtwo databasename triggerorview)<if_stmt>operation<eq>apsw.SQLITE_CREATE_TABLE<and>paramone.startswith("private")<block_start><return>apsw.SQLITE_DENY<block_end># not allowed to create tables whose names start with private <return>apsw.SQLITE_OK<block_end># always allow connection.setauthorizer(authorizer)<line_sep>#@@CAPTURE cursor.execute("insert into s values('foo')")<line_sep>cursor.execute("select str from s limit 1")<line_sep>#@@ENDCAPTURE # Cancel authorizer connection.setauthorizer(<none>)<line_sep>### ### progress handler (SQLite 3 experimental feature) @@ example-progress-handler ### # something to give us large numbers of random numbers <import_stmt>random<def_stmt>randomintegers howmany<block_start><for_stmt>i range(howmany)<block_start><yield>(random.randint(0 9999999999) )<block_end><block_end># create a table with 100 random numbers cursor.execute("begin ; create table bigone(x)")<line_sep>cursor.executemany("insert into bigone values(?)" randomintegers(100))<line_sep>cursor.execute("commit")<line_sep># display an ascii spinner _phcount=0<line_sep>_phspinner="|/-\\"<def_stmt>progresshandler <block_start><global>_phcount<line_sep>sys.stdout.write(_phspinner[_phcount%len(_phspinner)]+chr(8))# chr(8) is backspace sys.stdout.flush()<line_sep>_phcount<augadd>1<line_sep>time.sleep(0.1)# deliberate delay so we can see the spinner (SQLite is too fast otherwise!) <return>0<block_end># returning non-zero aborts # register progresshandler every 20 instructions connection.setprogresshandler(progresshandler 20)<line_sep># see it in action - sorting 100 numbers to find the biggest takes a while print("spinny thing -> " end="")<for_stmt>i cursor.execute("select max(x) from bigone")<block_start>print("\n" i sep="" end="")<line_sep>sys.stdout.flush()<block_end>connection.setprogresshandler(<none>)<line_sep>### ### commit hook (SQLite3 experimental feature) @@ example-commithook ### <def_stmt>mycommithook <block_start>print("in commit hook")<line_sep>hour=time.localtime()[3]<if_stmt>hour<l>8<or>hour<g>17<block_start>print("no commits out of hours")<line_sep><return>1# abort commits outside of 8am through 6pm <block_end>print("commits okay at this time")<line_sep><return>0<block_end># let commit go ahead #@@CAPTURE connection.setcommithook(mycommithook)<try_stmt><block_start>cursor.execute("begin; create table example(x,y,z); insert into example values (3,4,5) ; commit")<block_end><except_stmt>apsw.ConstraintError<block_start>print("commit was not allowed")<block_end>connection.setcommithook(<none>)<line_sep>#@@ENDCAPTURE ### ### update hook @@ example-updatehook ### <def_stmt>myupdatehook type databasename tablename rowid<block_start>print("Updated: %s database %s, table %s, row %d"%(apsw.mapping_authorizer_function[type] databasename tablename rowid))<block_end>#@@CAPTURE connection.setupdatehook(myupdatehook)<line_sep>cursor.execute("insert into s values(?)" ("file93" ))<line_sep>cursor.execute("update s set str=? where str=?" ("file94" "file93"))<line_sep>cursor.execute("delete from s where str=?" ("file94" ))<line_sep>connection.setupdatehook(<none>)<line_sep>#@@ENDCAPTURE ### ### Blob I/O @@ example-blobio ### cursor.execute("create table blobby(x,y)")<line_sep># Add a blob we will fill in later cursor.execute("insert into blobby values(1,zeroblob(10000))")<line_sep># Or as a binding cursor.execute("insert into blobby values(2,?)" (apsw.zeroblob(20000) ))<line_sep># Open a blob for writing. We need to know the rowid rowid=inext(cursor.execute("select ROWID from blobby where x=1"))[0]<line_sep>blob=connection.blobopen("main" "blobby" "y" rowid 1)# 1 is for read/write blob.write(b"hello world")<line_sep>blob.seek(2000)<line_sep>blob.write(b"hello world, again")<line_sep>blob.close()<line_sep>### ### Virtual tables @@ example-vtable ### # This virtual table stores information about files in a set of # directories so you can execute SQL queries <def_stmt>getfiledata directories<block_start>columns=<none><line_sep>data=[]<line_sep>counter=1<for_stmt>directory directories<block_start><for_stmt>f os.listdir(directory)<block_start><if_stmt><not>os.path.isfile(os.path.join(directory f))<block_start><continue><block_end>counter<augadd>1<line_sep>st=os.stat(os.path.join(directory f))<if_stmt>columns<is><none><block_start>columns=["rowid" "name" "directory"]+[x<for>x dir(st)<if>x.startswith("st_")]<block_end>data.append([counter f directory]+[getattr(st x)<for>x columns[3:]])<block_end><block_end><return>columns data<block_end># This gets registered with the Connection <class_stmt>Source<block_start><def_stmt>Create self db modulename dbname tablename *args<block_start>columns,data=getfiledata([eval(a.replace("\\" "\\\\"))<for>a args])# eval strips off layer of quotes schema="create table foo("+','.join(["'%s'"%(x )<for>x columns[1:]])+")"<line_sep><return>schema Table(columns data)<block_end>Connect=Create<block_end># Represents a table <class_stmt>Table<block_start><def_stmt>__init__ self columns data<block_start>self.columns=columns<line_sep>self.data=data<block_end><def_stmt>BestIndex self *args<block_start><return><none><block_end><def_stmt>Open self<block_start><return>Cursor(self)<block_end><def_stmt>Disconnect self<block_start><pass><block_end>Destroy=Disconnect<block_end># Represents a cursor <class_stmt>Cursor<block_start><def_stmt>__init__ self table<block_start>self.table=table<block_end><def_stmt>Filter self *args<block_start>self.pos=0<block_end><def_stmt>Eof self<block_start><return>self.pos<ge>len(self.table.data)<block_end><def_stmt>Rowid self<block_start><return>self.table.data[self.pos][0]<block_end><def_stmt>Column self col<block_start><return>self.table.data[self.pos][1+col]<block_end><def_stmt>Next self<block_start>self.pos<augadd>1<block_end><def_stmt>Close self<block_start><pass><block_end><block_end># Register the module as filesource connection.createmodule("filesource" Source())<line_sep># Arguments to module - all directories in sys.path sysdirs=",".join(["'%s'"%(x )<for>x sys.path[1:]<if>len(x)<and>os.path.isdir(x)])<line_sep>cursor.execute("create virtual table sysfiles using filesource("+sysdirs+")")<line_sep>#@@CAPTURE # Which 3 files are the biggest? <for_stmt>size,directory,file cursor.execute("select st_size,directory,name from sysfiles order by st_size desc limit 3")<block_start>print(size file directory)<block_end>#@@ENDCAPTURE # Which 3 files are the oldest? #@@CAPTURE <for_stmt>ctime,directory,file cursor.execute("select st_ctime,directory,name from sysfiles order by st_ctime limit 3")<block_start>print(ctime file directory)<block_end>#@@ENDCAPTURE ### @@ example-vfs ### A VFS that "obfuscates" the database file contents. The scheme ### used is to xor all bytes with 0xa5. This scheme honours that used ### for MAPI and SQL Server. ### <def_stmt>encryptme data<block_start><if_stmt><not>data<block_start><return>data<block_end><if_stmt>py3<block_start><return>bytes([x^0xa5<for>x data])<block_end><return>"".join([chr(ord(x)^0xa5)<for>x data])<block_end># Inheriting from a base of "" means the default vfs <class_stmt>ObfuscatedVFS(apsw.VFS)<block_start><def_stmt>__init__ self vfsname="obfu" basevfs=""<block_start>self.vfsname=vfsname<line_sep>self.basevfs=basevfs<line_sep>apsw.VFS.__init__(self self.vfsname self.basevfs)<block_end># We want to return our own file implmentation, but also # want it to inherit <def_stmt>xOpen self name flags# We can look at uri parameters <block_start><if_stmt>isinstance(name apsw.URIFilename)#@@CAPTURE <block_start>print("fast is" name.uri_parameter("fast"))<line_sep>print("level is" name.uri_int("level" 3))<line_sep>print("warp is" name.uri_boolean("warp" <false>))<line_sep>print("notpresent is" name.uri_parameter("notpresent"))<line_sep>#@@ENDCAPTURE <block_end><return>ObfuscatedVFSFile(self.basevfs name flags)<block_end><block_end># The file implementation where we override xRead and xWrite to call our # encryption routine <class_stmt>ObfuscatedVFSFile(apsw.VFSFile)<block_start><def_stmt>__init__ self inheritfromvfsname filename flags<block_start>apsw.VFSFile.__init__(self inheritfromvfsname filename flags)<block_end><def_stmt>xRead self amount offset<block_start><return>encryptme(super(ObfuscatedVFSFile self).xRead(amount offset))<block_end><def_stmt>xWrite self data offset<block_start>super(ObfuscatedVFSFile self).xWrite(encryptme(data) offset)<block_end><block_end># To register the VFS we just instantiate it obfuvfs=ObfuscatedVFS()<line_sep># Lets see what vfs are now available? #@@CAPTURE print(apsw.vfsnames())<line_sep>#@@ENDCAPTURE # Make an obfuscated db, passing in some URI parameters obfudb=apsw.Connection("file:myobfudb?fast=speed&level=7&warp=on" flags=apsw.SQLITE_OPEN_READWRITE|apsw.SQLITE_OPEN_CREATE|apsw.SQLITE_OPEN_URI vfs=obfuvfs.vfsname)<line_sep># Check it works obfudb.cursor().execute("create table foo(x,y); insert into foo values(1,2)")<line_sep># Check it really is obfuscated on disk #@@CAPTURE print(open("myobfudb" "rb").read()[:20])<line_sep>#@@ENDCAPTURE # And unobfuscating it #@@CAPTURE print(encryptme(open("myobfudb" "rb").read()[:20]))<line_sep>#@@ENDCAPTURE # Tidy up obfudb.close()<line_sep>os.remove("myobfudb")<line_sep>### ### Limits @@example-limit ### #@@CAPTURE # Print some limits <for_stmt>limit ("LENGTH" "COLUMN" "ATTACHED")<block_start>name="SQLITE_LIMIT_"+limit<line_sep>maxname="SQLITE_MAX_"+limit# compile time orig=connection.limit(getattr(apsw name))<line_sep>print(name orig)<line_sep># To get the maximum, set to 0x7fffffff and then read value back connection.limit(getattr(apsw name) 0x7fffffff)<line_sep>max=connection.limit(getattr(apsw name))<line_sep>print(maxname max)<block_end># Set limit for size of a string cursor.execute("create table testlimit(s)")<line_sep>cursor.execute("insert into testlimit values(?)" ("x"<times>1024 ))# 1024 char string connection.limit(apsw.SQLITE_LIMIT_LENGTH 1023)# limit is now 1023 <try_stmt><block_start>cursor.execute("insert into testlimit values(?)" ("y"<times>1024 ))<line_sep>print("string exceeding limit was inserted")<block_end><except_stmt>apsw.TooBigError<block_start>print("Caught toobig exception")<block_end>connection.limit(apsw.SQLITE_LIMIT_LENGTH 0x7fffffff)<line_sep>#@@ENDCAPTURE ### ### Backup to memory @@example-backup ### # We will copy the disk database into a memory database memcon=apsw.Connection(":memory:")<line_sep># Copy into memory <with_stmt>memcon.backup("main" connection "main")<as>backup<block_start>backup.step()<block_end># copy whole database in one go # There will be no disk accesses for this query <for_stmt>row memcon.cursor().execute("select * from s")<block_start><pass><block_end>### ### Shell @@ example-shell ### # Here we use the shell to do a csv export providing the existing db # connection # Export to a StringIO <if_stmt>py3<block_start><import_stmt>io<block_end><else_stmt><block_start><import_stmt>StringIO<as>io<block_end>output=io.StringIO()<line_sep>shell=apsw.Shell(stdout=output db=connection)<line_sep># How to execute a dot command shell.process_command(".mode csv")<line_sep>shell.process_command(".headers on")<line_sep># How to execute SQL shell.process_sql("create table csvtest(col1,col2); insert into csvtest values(3,4); insert into csvtest values('a b', NULL)")<line_sep># Let the shell figure out SQL vs dot command shell.process_complete_line("select * from csvtest")<line_sep># Verify output #@@CAPTURE print(output.getvalue())<line_sep>#@@ENDCAPTURE ### ### Statistics @@example-status ### #@@CAPTURE print("SQLite memory usage current %d max %d"%apsw.status(apsw.SQLITE_STATUS_MEMORY_USED))<line_sep>#@@ENDCAPTURE ### ### Cleanup ### # We can close connections manually (useful if you want to catch exceptions) # but you don't have to connection.close(<true>)# force it since we want to exit # Delete database - we don't need it any more os.remove("dbfile")<line_sep>
<try_stmt><block_start><import_from_stmt>unittest mock<block_end><except_stmt>ImportError<block_start><import_stmt>mock<block_end><import_from_stmt>docker_custodian.docker_autostop build_container_matcher get_opts has_been_running_since main stop_container stop_containers <def_stmt>test_stop_containers mock_client container now<block_start>matcher=mock.Mock()<line_sep>mock_client.containers.return_value=[container]<line_sep>mock_client.inspect_container.return_value=container<line_sep>stop_containers(mock_client now matcher <false>)<line_sep>matcher.assert_called_once_with('container_name')<line_sep>mock_client.stop.assert_called_once_with(container['Id'])<block_end><def_stmt>test_stop_container mock_client<block_start>id='asdb'<line_sep>stop_container(mock_client id)<line_sep>mock_client.stop.assert_called_once_with(id)<block_end><def_stmt>test_build_container_matcher <block_start>prefixes=['one_' 'two_']<line_sep>matcher=build_container_matcher(prefixes)<assert_stmt>matcher('one_container')<assert_stmt>matcher('two_container')<assert_stmt><not>matcher('three_container')<assert_stmt><not>matcher('one')<block_end><def_stmt>test_has_been_running_since_true container later_time<block_start><assert_stmt>has_been_running_since(container later_time)<block_end><def_stmt>test_has_been_running_since_false container earlier_time<block_start><assert_stmt><not>has_been_running_since(container earlier_time)<block_end>@mock.patch('docker_custodian.docker_autostop.build_container_matcher' autospec=<true>)@mock.patch('docker_custodian.docker_autostop.stop_containers' autospec=<true>)@mock.patch('docker_custodian.docker_autostop.get_opts' autospec=<true>)@mock.patch('docker_custodian.docker_autostop.docker' autospec=<true>)<def_stmt>test_main mock_docker mock_get_opts mock_stop_containers mock_build_matcher<block_start>mock_get_opts.return_value.timeout=30<line_sep>main()<line_sep>mock_get_opts.assert_called_once_with()<line_sep>mock_build_matcher.assert_called_once_with(mock_get_opts.return_value.prefix)<line_sep>mock_stop_containers.assert_called_once_with(mock.ANY mock_get_opts.return_value.max_run_time mock_build_matcher.return_value mock_get_opts.return_value.dry_run)<block_end><def_stmt>test_get_opts_with_defaults <block_start>opts=get_opts(args=['--prefix' 'one' '--prefix' 'two'])<assert_stmt>opts.timeout<eq>60<assert_stmt>opts.dry_run<is><false><assert_stmt>opts.prefix<eq>['one' 'two']<assert_stmt>opts.max_run_time<is><none><block_end><def_stmt>test_get_opts_with_args now<block_start><with_stmt>mock.patch('docker_custodian.docker_autostop.timedelta_type' autospec=<true>)<as>mock_timedelta_type<block_start>opts=get_opts(args=['--prefix' 'one' '--max-run-time' '24h'])<block_end><assert_stmt>opts.max_run_time<eq>mock_timedelta_type.return_value<line_sep>mock_timedelta_type.assert_called_once_with('24h')<block_end>
""" Patch ranger.ext.shutil_generatorized """<import_stmt>os<import_from_stmt>shutil _basename<import_from_stmt>ranger.ext shutil_generatorized<import_from_stmt>ranger.ext.safe_path get_safe_path<def_stmt>wrap_move client<block_start>""" CopyLoader with do_cut parameter will invoke this method. Wrap low-level move method to save information in loaded buffers. :param client object: Object of attached neovim session """<def_stmt>move src dst overwrite make_safe_path=get_safe_path<block_start>real_dst=os.path.join(dst _basename(src))<if_stmt><not>overwrite<block_start>real_dst=make_safe_path(real_dst)<block_end><yield><from>raw_move(src dst overwrite make_safe_path)<line_sep>client.move_buf(src real_dst)<block_end>raw_move=shutil_generatorized.move<line_sep>shutil_generatorized.move=move<block_end>
<import_from_stmt>django.http HttpResponse<import_from_stmt>django.contrib.auth authenticate<import_stmt>re<import_stmt>base64<def_stmt>http_unauth <block_start>res=HttpResponse("Unauthorized")<line_sep>res.status_code=401<line_sep>res['WWW-Authenticate']='Basic realm="Secure Area"'<line_sep><return>res<block_end><def_stmt>match_first regx strg<block_start>m=re.match(regx strg)<if_stmt>(m<eq><none>)<block_start><return><none><block_end><else_stmt><block_start><return>m.group(1)<block_end><block_end><def_stmt>decode_auth strg<block_start><if_stmt>(strg<eq><none>)<block_start><return><none><block_end><else_stmt><block_start>m=re.match(r'([^:]*)\:(.*)' base64.decodestring(strg))<if_stmt>(m<ne><none>)<block_start><return>(m.group(1) m.group(2))<block_end><else_stmt><block_start><return><none><block_end><block_end><block_end><def_stmt>parse_auth_string authstr<block_start>auth=decode_auth(match_first('Basic (.*)' authstr))<if_stmt>(auth<eq><none>)<block_start><return><none><block_end><else_stmt><block_start><return>authenticate(username=auth[0] password=auth[1])<block_end><block_end><def_stmt>login_required view_f<block_start><def_stmt>wrapperf request *args **kwargs<block_start><if_stmt>(request.META.has_key('HTTP_AUTHORIZATION'))<block_start>auth=request.META['HTTP_AUTHORIZATION']<line_sep>user=parse_auth_string(auth)<if_stmt>(user<ne><none>)<block_start>request.user=user<line_sep><return>view_f(request *args **kwargs)<block_end><block_end><return>http_unauth()<block_end><return>wrapperf<block_end>
<class_stmt>groupcount(object)<block_start>"""Accept a (possibly infinite) iterable and yield a succession of sub-iterators from it, each of which will yield N values. >>> gc = groupcount('abcdefghij', 3) >>> for subgroup in gc: ... for item in subgroup: ... print item, ... print ... a b c d e f g h i j """<def_stmt>__init__ self iterable n=10<block_start>self.it=iter(iterable)<line_sep>self.n=n<block_end><def_stmt>__iter__ self<block_start><return>self<block_end><def_stmt>next self<block_start><return>self._group(self.it.next())<block_end><def_stmt>_group self ondeck<block_start><yield>ondeck<for_stmt>i xrange(1 self.n)<block_start><yield>self.it.next()<block_end><block_end><block_end>
<import_stmt>locale<import_stmt>unittest<import_from_stmt>mock patch<import_from_stmt>pathlib Path<import_from_stmt>PyQt5.QtCore QLocale<import_from_stmt>PyQt5.QtWidgets QApplication<import_from_stmt>inselect.gui.app main<import_from_stmt>inselect.gui.main_window MainWindow<line_sep>TESTDATA=Path(__file__).parent.parent/'test_data'<class_stmt>TestApp(unittest.TestCase)<block_start>"""Start and exit the application """<line_sep>@patch.object(QApplication 'exec_' return_value=0)<def_stmt>test_app self mock_exec_<block_start>"User starts the application"<line_sep>self.assertRaises(SystemExit main [])<line_sep>self.assertTrue(mock_exec_.called)<block_end>@patch.object(QApplication 'exec_' return_value=0)@patch.object(MainWindow 'open_file')<def_stmt>test_app_load_file self mock_open_file mock_exec_<block_start>"User starts the application with a file"<line_sep>path=str(TESTDATA/'shapes.inselect')<line_sep>self.assertRaises(SystemExit main [path])<line_sep>self.assertTrue(mock_exec_.called)<line_sep>mock_open_file.assert_called_once_with(Path(path))<block_end>@patch.object(QApplication 'exec_' return_value=0)@patch.object(QLocale 'setDefault')@patch.object(locale 'setlocale')<def_stmt>test_app_set_locale self mock_setlocale mock_set_default mock_exec_<block_start>"User starts the application with a non-default locale"<line_sep># Python's locale.setlocale raises an exception if the locale is # unrecognised, so it is mocked. loc='ja_JP'<line_sep>self.assertRaises(SystemExit main ['-l' loc])<line_sep>self.assertTrue(mock_exec_.called)<line_sep>mock_set_default.assert_called_once_with(QLocale(loc))<line_sep># Other actions inside main might cause setlocale to be called so # should not assert number of calls. mock_setlocale.assert_any_call(locale.LC_ALL loc)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>json<line_sep>DEFAULTS={"arch":"fpn_resnext" "segnetwork":{"backbone_arch":"resnext101" "seg_classes":2 "ignore_index":255 } "network":{} "optimizer":{"batch_size":256 "freeze_first_epoch":<false> "type":"SGD" # supported: SGD, Adam "momentum":0.9 "weight_decay":0 "clip":1. "learning_rate":0.1 "classifier_lr":-1. # If -1 use same learning rate as the rest of the network "nesterov":<true> "schedule":{"type":"constant" # supported: constant, step, multistep, exponential, linear, poly "mode":"epoch" # supported: epoch, step "epochs":10 "params":{}}} "input":{"scale_train":-1 # If -1 do not scale "random_vh_shift":0 "crop_train":224 "color_jitter_train":<false> "lighting_train":<false> "random_crop":[202 202] "crop_size_range":[1. 1.] "rescale_prob":0.0 "mask_downscale_factor":1 "padding_block":0 "padding_mode":'reflect' "mean":[0.485 0.456 0.406] "std":[0.229 0.224 0.225]}}<def_stmt>_merge src dst<block_start><for_stmt>k,v src.items()<block_start><if_stmt>k<in>dst<block_start><if_stmt>isinstance(v dict)<block_start>_merge(src[k] dst[k])<block_end><block_end><else_stmt><block_start>dst[k]=v<block_end><block_end><block_end><def_stmt>load_config config_file defaults=DEFAULTS<block_start><with_stmt>open(config_file "r")<as>fd<block_start>config=json.load(fd)<block_end>_merge(defaults config)<line_sep><return>config<block_end>
<import_stmt>sys<class_stmt>Logger<block_start>@staticmethod<def_stmt>_out x<block_start>sys.stderr.write(str(x)+u'\n')<block_end>@staticmethod<def_stmt>dbg x<block_start>sys.stderr.write(u'[dbg] '+str(x)+u'\n')<block_end>@staticmethod<def_stmt>out x<block_start>Logger._out(u'[.] '+str(x))<block_end>@staticmethod<def_stmt>info x<block_start>Logger._out(u'[?] '+str(x))<block_end>@staticmethod<def_stmt>err x<block_start>sys.stderr.write(u'[!] '+str(x)+u'\n')<block_end>@staticmethod<def_stmt>warn x<block_start>Logger._out(u'[-] '+str(x))<block_end>@staticmethod<def_stmt>ok x<block_start>Logger._out(u'[+] '+str(x))<block_end><block_end>
<import_stmt>re<import_stmt>sys<import_from_stmt>os environ<line_sep>BASE_LOGO=""" Sanic Build Fast. Run Fast. """<line_sep>COFFEE_LOGO="""\033[48;2;255;13;104m \033[0m \033[38;2;255;255;255;48;2;255;13;104m ▄████████▄ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ██ ██▀▀▄ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ███████████ █ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ███████████▄▄▀ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ▀███████▀ \033[0m \033[48;2;255;13;104m \033[0m Dark roast. No sugar."""<line_sep>COLOR_LOGO="""\033[48;2;255;13;104m \033[0m \033[38;2;255;255;255;48;2;255;13;104m ▄███ █████ ██ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ██ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ▀███████ ███▄ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ██ \033[0m \033[38;2;255;255;255;48;2;255;13;104m ████ ████████▀ \033[0m \033[48;2;255;13;104m \033[0m Build Fast. Run Fast."""<line_sep>FULL_COLOR_LOGO=""" \033[38;2;255;13;104m ▄███ █████ ██ \033[0m ▄█▄ ██ █ █ ▄██████████ \033[38;2;255;13;104m ██ \033[0m █ █ █ ██ █ █ ██ \033[38;2;255;13;104m ▀███████ ███▄ \033[0m ▀ █ █ ██ ▄ █ ██ \033[38;2;255;13;104m ██\033[0m █████████ █ ██ █ █ ▄▄ \033[38;2;255;13;104m ████ ████████▀ \033[0m █ █ █ ██ █ ▀██ ███████ """<line_sep># noqa ansi_pattern=re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")<def_stmt>get_logo full=<false> coffee=<false><block_start>logo=((FULL_COLOR_LOGO<if>full<else>(COFFEE_LOGO<if>coffee<else>COLOR_LOGO))<if>sys.stdout.isatty()<else>BASE_LOGO)<if_stmt>(sys.platform<eq>"darwin"<and>environ.get("TERM_PROGRAM")<eq>"Apple_Terminal")<block_start>logo=ansi_pattern.sub("" logo)<block_end><return>logo<block_end>
<import_stmt>evernote.edam.type.ttypes<as>NoteType<import_from_stmt>evernote.api.client EvernoteClient<import_from_stmt>Stephanie.Modules.base_module BaseModule<line_sep># Written by <NAME> - <EMAIL> <class_stmt>EvernoteModule(BaseModule)<block_start><def_stmt>__init__ self *args<block_start>super(EvernoteModule self).__init__(*args)<line_sep>self.auth_token=self.get_configuration('evernote_auth_token')<if_stmt>self.auth_token<block_start>self.client=EvernoteClient(token=self.auth_token sandbox=<false>)<line_sep>self.user_store=self.client.get_user_store()<line_sep>self.note_store=self.client.get_note_store()<block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>write_note self<block_start>note=NoteType.Note()# Creates a new note note.title="Stephanie Note"<line_sep>self.assistant.say("What would you like me to write down?")<line_sep>the_note=self.assistant.listen().decipher()# Listens to the input and stores it note.content='<?xml version="1.0" encoding="UTF-8"?>'<line_sep>note.content<augadd>'<!DOCTYPE en-note SYSTEM '<concat>'"http://xml.evernote.com/pub/enml2.dtd">'<line_sep>note.content<augadd>'<en-note>Note:<br/>'<line_sep>note.content<augadd>('%s'%the_note)<line_sep>note.content<augadd>'</en-note>'<try_stmt><block_start>created_note=self.note_store.createNote(note)# Stores the new note in Evernote <block_end><except_stmt><block_start>response=("Note wasn't created successfully, you probably didn't spelled anything or spelled really "<concat>"bad, Not my fault okay? It's never a program's fault.")<line_sep>print(response)<line_sep><return>response<block_end><if_stmt>created_note<block_start><return>"I successfully wrote down your note."<block_end><else_stmt><block_start>response=("Note wasn't created successfully, you probably didn't spelled anything or spelled really "<concat>"bad, Not my fault okay? It's never a program's fault. /s Refer back to docs.")<line_sep>print(response)<line_sep><return>response<block_end><block_end><block_end>
# Django settings for pastevid project. <import_stmt>datetime<import_stmt>os.path<import_stmt>profanity_list<line_sep>PROJECT_ROOT=os.path.abspath(os.path.dirname(__file__))<line_sep>DEBUG=<true><line_sep>TEMPLATE_DEBUG=DEBUG<line_sep># Use to determine what robots.txt to serve. To allow all crawlers set this to True. PRODUCTION=<false><line_sep>ADMINS=(('adam' '<EMAIL>') ('caleb' '<EMAIL>') )<line_sep>MANAGERS=ADMINS<line_sep>DATABASES={'default':{'ENGINE':'django.db.backends.sqlite3' # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. 'NAME':os.path.join(PROJECT_ROOT 'pastevid.db') # Or path to database file if using sqlite3. 'USER':'' # Not used with sqlite3. 'PASSWORD':'' # Not used with sqlite3. 'HOST':'' # Set to empty string for localhost. Not used with sqlite3. 'PORT':'' # Set to empty string for default. Not used with sqlite3. }}<line_sep># Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE='America/Chicago'<line_sep>USE_TZ=<true><line_sep># Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE='en-us'<line_sep>SITE_ID=1<line_sep># If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N=<true><line_sep># If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N=<true><line_sep># Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT=os.path.join(PROJECT_ROOT 'media/')<line_sep># URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL='/media/'<line_sep># Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT=os.path.join(PROJECT_ROOT 'static/')<line_sep># URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL='/static/'<line_sep># URL prefix for admin static files -- CSS, JavaScript and images. # Make sure to use a trailing slash. # Examples: "http://foo.com/static/admin/", "/static/admin/". ADMIN_MEDIA_PREFIX='/static/admin/'<line_sep># Additional locations of static files STATICFILES_DIRS=(# Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. )<line_sep># List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS=('django.contrib.staticfiles.finders.FileSystemFinder' 'django.contrib.staticfiles.finders.AppDirectoriesFinder' # 'django.contrib.staticfiles.finders.DefaultStorageFinder', )<line_sep># Make this unique, and don't share it with anybody. SECRET_KEY='<django-secret-key>'<line_sep># List of callables that know how to import templates from various sources. TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader' 'django.template.loaders.app_directories.Loader' # 'django.template.loaders.eggs.Loader', )<line_sep>TEMPLATE_CONTEXT_PROCESSORS=('context_processors.current_site' 'django.contrib.auth.context_processors.auth' "django.core.context_processors.media" 'django.core.context_processors.request' )<line_sep>MIDDLEWARE_CLASSES=('django.middleware.common.CommonMiddleware' 'django.contrib.sessions.middleware.SessionMiddleware' 'django.middleware.csrf.CsrfViewMiddleware' 'django.contrib.auth.middleware.AuthenticationMiddleware' 'django.contrib.messages.middleware.MessageMiddleware' 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware')<line_sep>ROOT_URLCONF='urls'<line_sep>TEMPLATE_DIRS=(# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(PROJECT_ROOT 'templates/') )<line_sep>FIXTURE_DIRS=(os.path.join(PROJECT_ROOT 'accounts/fixtures/') os.path.join(PROJECT_ROOT 'videos/fixtures/') )<line_sep>AUTHENTICATION_BACKENDS=('auth_login.backends.EmailBackend' 'social_auth.backends.facebook.FacebookBackend' 'social_auth.backends.google.GoogleOAuthBackend' 'social_auth.backends.google.GoogleOAuth2Backend' 'social_auth.backends.google.GoogleBackend' 'social_auth.backends.OpenIDBackend' 'django.contrib.auth.backends.ModelBackend' )<line_sep>HITCOUNT_KEEP_HIT_ACTIVE={'days':45}<line_sep>HITCOUNT_HITS_PER_IP_LIMIT=0<line_sep>HITCOUNT_EXCLUDE_USER_GROUP=('Editor' )<line_sep>AUTH_PROFILE_MODULE='accounts.UserProfile'<line_sep>INSTALLED_APPS=('django.contrib.auth' 'django.contrib.contenttypes' 'django.contrib.sessions' 'django.contrib.sites' 'django.contrib.messages' 'django.contrib.staticfiles' 'django.contrib.flatpages' # Uncomment the next line to enable the admin: 'django.contrib.admin' # Uncomment the next line to enable admin documentation: 'django.contrib.admindocs' #External Apps 'social_auth' 'south' 'hitcount' 'paypal.standard.ipn' 'chronograph' 'django_extensions' 'oembed' 'django_jenkins' 'widget_tweaks' #Local apps 'emailnotify' 'about' 'videos' 'accounts' 'metatags' 'amazon' 'thumbsup' 'flatpages' 'authorize_net' )<line_sep># For Jenkins testing PROJECT_APPS=('accounts' 'videos' 'about' 'amazon' 'emailnotify' 'metatags' 'thumbsup' 'api' )<line_sep>JENKINS_TASKS=('django_jenkins.tasks.with_coverage' 'django_jenkins.tasks.django_tests' # select one django or #'django_jenkins.tasks.dir_tests' # directory tests discovery )<line_sep># A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING={'version':1 'disable_existing_loggers':<false> 'formatters':{'verbose':{'format':'%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'} 'simple':{'format':'%(levelname)s %(message)s'}} 'handlers':{'mail_admins':{'level':'ERROR' 'class':'django.utils.log.AdminEmailHandler'} 'file':{'level':'DEBUG' 'class':'logging.FileHandler' 'filename':os.path.join(PROJECT_ROOT 'amazon.log') 'formatter':'verbose' } 'videos':{'level':'DEBUG' 'class':'logging.FileHandler' 'filename':os.path.join(PROJECT_ROOT 'videos.log') 'formatter':'verbose'} 'encode':{'level':'DEBUG' 'class':'logging.FileHandler' 'filename':os.path.join(PROJECT_ROOT 'encode.log') 'formatter':'verbose'}} 'loggers':{'django.request':{'handlers':['mail_admins'] 'level':'ERROR' 'propagate':<true> } 'amazon.utils':{'handlers':['file' ] 'level':'DEBUG' 'propagate':<true> } 'videos.forms':{'handlers':['file' ] 'level':'WARN' 'propagate':<true> } 'videos.views':{'handlers':['videos' ] 'level':'DEBUG' 'propagate':<true> } 'encode':{'handlers':['encode' ] 'level':'DEBUG' 'propagate':<true>}}}<line_sep># Paypal Account PAYPAL_RECEIVER_EMAIL="<your-paypal-reciever-email>"<line_sep>PROFANITY_LIST=profanity_list.CENSORED_LIST<line_sep>############# EMAIL SETTINGS ############### # override these in your local_settings.py EMAIL_BACKEND='django.core.mail.backends.smtp.EmailBackend'<line_sep>EMAIL_USE_TLS=<true><line_sep>EMAIL_HOST=''<line_sep>EMAIL_HOST_USER=''<line_sep>EMAIL_HOST_PASSWORD=''<line_sep>EMAIL_PORT=465<line_sep>HOST=""<line_sep>DEFAULT_FROM_EMAIL=''<line_sep>EMAIL_SUBJECT_PREFIX='[Screenbird]'<line_sep>EMAIL_FAIL_SILENTLY=<false><line_sep># AMAZON PUSH_TO_S3=<true><line_sep>AWS_ACCESS_KEY_ID=""<line_sep>AWS_SECRET_ACCESS_KEY=""<line_sep>AWS_VIDEO_BUCKET_NAME="%s-%s"%(AWS_ACCESS_KEY_ID.lower() "videos")<line_sep>UPLOAD_DELAY=datetime.timedelta(hours=12)<line_sep>UPLOAD_CHECKING=datetime.timedelta(minutes=30)<line_sep>EC2_KEY_NAME=''<line_sep>QUEUE_NAME="video_queue"<line_sep>COCREATE_QUEUE_NAME='cocreate_queue'<line_sep>PEM_PATH=os.path.join(PROJECT_ROOT "amazon" "ec2_files")<line_sep># Use to determine what robots.txt to serve. To allow all crawlers set this to True. PRODUCTION=<false><line_sep>#Facebook OAuth Keys #Production's APP ID; Override on local_settings for test site FACEBOOK_APP_ID=''<line_sep>FACEBOOK_API_SECRET=''<line_sep>FACEBOOK_EXTENDED_PERMISSIONS=['offline_access' 'publish_stream' 'email']<line_sep>#Twitter OAuth Keys #Production's APP ID; Override on local_settings for test site TWITTER_CONSUMER_KEY=''<line_sep>TWITTER_CONSUMER_SECRET=''<line_sep>#Social_Auth Parameters SOCIAL_AUTH_CREATE_USERS=<true><line_sep>SOCIAL_AUTH_FORCE_RANDOM_USERNAME=<false><line_sep>SOCIAL_AUTH_DEFAULT_USERNAME='socialauth_user'<line_sep>SOCIAL_AUTH_COMPLETE_URL_NAME='complete'<line_sep>SOCIAL_AUTH_ASSOCIATE_BY_MAIL=<true><line_sep>#Youtube YOUTUBE_DEV_KEY=''<line_sep>#Login Parameters LOGIN_ERROR_URL='/login/error/'<line_sep>LOGIN_URL='/login/'<line_sep>LOGIN_REDIRECT_URL='/login_auth/'<line_sep>#Sites SITE_ID=2#Screen Bird Site ID # If you are using secure AuthSub, be sure to set your RSA private key so a SecureAuthSubToken is created # http://code.google.com/apis/gdata/docs/auth/authsub.html#No-Library # SECURE_KEY the location of the RSA private key(For production). None if AuthSub is not secured. SECURE_KEY=<none>#os.path.join(PROJECT_ROOT, '') ENABLE_VIDEO_APPROVAL=<true><line_sep># Default Authorize.net credentials LOGIN_ID=u''<line_sep>TRANS_KEY=u''<line_sep>IS_TEST=<true><line_sep>DELIMITER=u','<line_sep>ENCAPSULATOR=u''<line_sep>#settings for upload test FILE_LOCATION=os.path.join(MEDIA_ROOT 'tmp/sample_video.mp4')<line_sep>FILE_KEY='SAMPLE'<try_stmt><block_start><import_from_stmt>local_settings *<block_end><except_stmt>ImportError<block_start><pass><block_end>
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2018-02-24 18:46 <import_from_future_stmt> unicode_literals<import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[migrations.swappable_dependency(settings.AUTH_USER_MODEL) ('posts' '0002_auto_20171217_0008') ]<line_sep>operations=[migrations.CreateModel(name='ViewedPostTracking' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('time' models.DateTimeField()) ('actor' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name='who_visit_post' to=settings.AUTH_USER_MODEL)) ('visited_post' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='posts.Post')) ] ) ]<block_end>
# # This file is part of pysnmp software. # # Copyright (c) 2005-2019, <NAME> <<EMAIL>> # License: http://snmplabs.com/pysnmp/license.html # <import_stmt>pysnmp.smi.error<import_from_stmt>pysnmp debug<import_from_stmt>pysnmp.proto errind<import_from_stmt>pysnmp.proto error<import_from_stmt>pysnmp.proto rfc1902<import_from_stmt>pysnmp.proto rfc1905<import_from_stmt>pysnmp.proto rfc3411<import_from_stmt>pysnmp.proto.api v2c# backend is always SMIv2 compliant <import_from_stmt>pysnmp.proto.proxy rfc2576<import_from_stmt>pysnmp.smi exval<line_sep># 3.2 <class_stmt>CommandResponderBase(object)<block_start>ACM_ID=3# default MIB access control method to use SUPPORTED_PDU_TYPES=()<line_sep>SMI_ERROR_MAP={pysnmp.smi.error.TooBigError:'tooBig' # this should never bubble up, SNMP exception objects should be passed as values pysnmp.smi.error.NoSuchNameError:'noSuchName' pysnmp.smi.error.BadValueError:'badValue' pysnmp.smi.error.ReadOnlyError:'readOnly' pysnmp.smi.error.GenError:'genErr' pysnmp.smi.error.NoAccessError:'noAccess' pysnmp.smi.error.WrongTypeError:'wrongType' pysnmp.smi.error.WrongLengthError:'wrongLength' pysnmp.smi.error.WrongEncodingError:'wrongEncoding' pysnmp.smi.error.WrongValueError:'wrongValue' pysnmp.smi.error.NoCreationError:'noCreation' pysnmp.smi.error.InconsistentValueError:'inconsistentValue' pysnmp.smi.error.ResourceUnavailableError:'resourceUnavailable' pysnmp.smi.error.CommitFailedError:'commitFailed' pysnmp.smi.error.UndoFailedError:'undoFailed' pysnmp.smi.error.AuthorizationError:'authorizationError' pysnmp.smi.error.NotWritableError:'notWritable' pysnmp.smi.error.InconsistentNameError:'inconsistentName'}<def_stmt>__init__ self snmpEngine snmpContext cbCtx=<none><block_start>snmpEngine.msgAndPduDsp.registerContextEngineId(snmpContext.contextEngineId self.SUPPORTED_PDU_TYPES self.processPdu)<line_sep>self.snmpContext=snmpContext<line_sep>self.cbCtx=cbCtx<line_sep>self.__pendingReqs={}<block_end><def_stmt>close self snmpEngine<block_start>snmpEngine.msgAndPduDsp.unregisterContextEngineId(self.snmpContext.contextEngineId self.SUPPORTED_PDU_TYPES)<line_sep>self.snmpContext=self.__pendingReqs=<none><block_end><def_stmt>releaseStateInformation self stateReference<block_start><if_stmt>stateReference<in>self.__pendingReqs<block_start><del_stmt>self.__pendingReqs[stateReference]<block_end><block_end><def_stmt>sendVarBinds self snmpEngine stateReference errorStatus errorIndex varBinds<block_start>(messageProcessingModel securityModel securityName securityLevel contextEngineId contextName pduVersion PDU origPdu maxSizeResponseScopedPDU statusInformation)=self.__pendingReqs[stateReference]<line_sep>v2c.apiPDU.setErrorStatus(PDU errorStatus)<line_sep>v2c.apiPDU.setErrorIndex(PDU errorIndex)<line_sep>v2c.apiPDU.setVarBinds(PDU varBinds)<line_sep>debug.logger&debug.FLAG_APP<and>debug.logger('sendVarBinds: stateReference %s, errorStatus %s, errorIndex %s, '<concat>'varBinds %s'%(stateReference errorStatus errorIndex varBinds))<line_sep>self.sendPdu(snmpEngine stateReference PDU)<block_end><def_stmt>sendPdu self snmpEngine stateReference PDU<block_start>(messageProcessingModel securityModel securityName securityLevel contextEngineId contextName pduVersion _ origPdu maxSizeResponseScopedPDU statusInformation)=self.__pendingReqs[stateReference]<line_sep># Agent-side API complies with SMIv2 <if_stmt>messageProcessingModel<eq>0<block_start>PDU=rfc2576.v2ToV1(PDU origPdu)<block_end># 3.2.6 <try_stmt><block_start>snmpEngine.msgAndPduDsp.returnResponsePdu(snmpEngine messageProcessingModel securityModel securityName securityLevel contextEngineId contextName pduVersion PDU maxSizeResponseScopedPDU stateReference statusInformation)<block_end><except_stmt>error.StatusInformation<as>exc<block_start>debug.logger&debug.FLAG_APP<and>debug.logger('sendPdu: stateReference %s, statusInformation '<concat>'%s'%(stateReference exc))<line_sep>snmpSilentDrops,=snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB' 'snmpSilentDrops')<line_sep>snmpSilentDrops.syntax<augadd>1<block_end><block_end>_getRequestType=rfc1905.GetRequestPDU.tagSet<line_sep>_getNextRequestType=rfc1905.GetNextRequestPDU.tagSet<line_sep>_setRequestType=rfc1905.SetRequestPDU.tagSet<line_sep>_counter64Type=rfc1902.Counter64.tagSet<def_stmt>processPdu self snmpEngine messageProcessingModel securityModel securityName securityLevel contextEngineId contextName pduVersion PDU maxSizeResponseScopedPDU stateReference# Agent-side API complies with SMIv2 <block_start><if_stmt>messageProcessingModel<eq>0<block_start>origPdu=PDU<line_sep>PDU=rfc2576.v1ToV2(PDU)<block_end><else_stmt><block_start>origPdu=<none><block_end># 3.2.1 <if_stmt>(PDU.tagSet<not><in>rfc3411.READ_CLASS_PDUS<and>PDU.tagSet<not><in>rfc3411.WRITE_CLASS_PDUS)<block_start><raise>error.ProtocolError('Unexpected PDU class %s'%PDU.tagSet)<block_end># 3.2.2 --> no-op # 3.2.4 rspPDU=v2c.apiPDU.getResponse(PDU)<line_sep>statusInformation={}<line_sep>self.__pendingReqs[stateReference]=(messageProcessingModel securityModel securityName securityLevel contextEngineId contextName pduVersion rspPDU origPdu maxSizeResponseScopedPDU statusInformation)<line_sep># 3.2.5 varBinds=v2c.apiPDU.getVarBinds(PDU)<line_sep>debug.logger&debug.FLAG_APP<and>debug.logger('processPdu: stateReference %s, varBinds %s'%(stateReference varBinds))<line_sep>self.initiateMgmtOperation(snmpEngine stateReference contextName PDU)<block_end>@staticmethod<def_stmt>_storeAccessContext snmpEngine<block_start>"""Copy received message metadata while it lasts"""<line_sep>execCtx=snmpEngine.observer.getExecutionContext('rfc3412.receiveMessage:request')<line_sep><return>{'securityModel':execCtx['securityModel'] 'securityName':execCtx['securityName'] 'securityLevel':execCtx['securityLevel'] 'contextName':execCtx['contextName'] 'pduType':execCtx['pdu'].getTagSet()}<block_end>@classmethod<def_stmt>verifyAccess cls viewType varBind **context<block_start>name,val=varBind<line_sep>snmpEngine=context['snmpEngine']<line_sep>(securityModel securityName securityLevel contextName pduType)=(context['securityModel'] context['securityName'] context['securityLevel'] context['contextName'] context['pduType'])<try_stmt><block_start>snmpEngine.accessControlModel[cls.ACM_ID].isAccessAllowed(snmpEngine securityModel securityName securityLevel viewType contextName name)<block_end># Map ACM errors onto SMI ones <except_stmt>error.StatusInformation<as>exc<block_start>statusInformation=exc<line_sep>debug.logger&debug.FLAG_APP<and>debug.logger('__verifyAccess: name %s, statusInformation '<concat>'%s'%(name statusInformation))<line_sep>errorIndication=statusInformation['errorIndication']<line_sep># 3.2.5... <if_stmt>(errorIndication<eq>errind.noSuchView<or>errorIndication<eq>errind.noAccessEntry<or>errorIndication<eq>errind.noGroupName)<block_start><raise>pysnmp.smi.error.AuthorizationError(name=name idx=context.get('idx'))<block_end><elif_stmt>errorIndication<eq>errind.otherError<block_start><raise>pysnmp.smi.error.GenError(name=name idx=context.get('idx'))<block_end><elif_stmt>errorIndication<eq>errind.noSuchContext<block_start>snmpUnknownContexts,=snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-TARGET-MIB' 'snmpUnknownContexts')<line_sep>snmpUnknownContexts.syntax<augadd>1<line_sep># Request REPORT generation <raise>pysnmp.smi.error.GenError(name=name idx=context.get('idx') oid=snmpUnknownContexts.name val=snmpUnknownContexts.syntax)<block_end><elif_stmt>errorIndication<eq>errind.notInView<block_start><return><true><block_end><else_stmt><block_start><raise>error.ProtocolError('Unknown ACM error %s'%errorIndication)<block_end><block_end><else_stmt># rfc2576: 4.1.2.1 <block_start><if_stmt>(securityModel<eq>1<and>val<is><not><none><and>cls._counter64Type<eq>val.getTagSet()<and>cls._getNextRequestType<eq>pduType)# This will cause MibTree to skip this OID-value <block_start><raise>pysnmp.smi.error.NoAccessError(name=name idx=context.get('idx'))<block_end><block_end><block_end><def_stmt>_getMgmtFun self contextName<block_start><return><lambda>*args **kwargs:<none><block_end><def_stmt>_mapSmiErrors self varBinds **context<block_start>errorIndication=<none><line_sep>errorStatus=errorIndex=0<line_sep>errors=context.get('errors')<if_stmt><not>errors<block_start><return>errorIndication errorStatus errorIndex varBinds<block_end># Take the latest exception err=errors[-1]<if_stmt>isinstance(err pysnmp.smi.error.GenError)<block_start>errorIndication=str(err)<block_end><elif_stmt>isinstance(err pysnmp.smi.error.SmiError)<block_start><if_stmt>isinstance(err pysnmp.smi.error.TooBigError)# rfc1905: 4.2.1.3 <block_start>varBinds=[]<block_end>errorStatus=self.SMI_ERROR_MAP.get(err.__class__ 'genErr')<try_stmt><block_start>errorIndex=err['idx']+1<block_end><except_stmt>IndexError<block_start>errorIndex=len(varBinds)<and>1<or>0<block_end><block_end><return>errorIndication errorStatus errorIndex varBinds<block_end><def_stmt>completeMgmtOperation self varBinds **context<block_start>(errorIndication errorStatus errorIndex varBinds)=self._mapSmiErrors(varBinds **context)<line_sep>stateReference=context['stateReference']<if_stmt>errorIndication<block_start>statusInformation=self.__pendingReqs[stateReference]['statusInformation']<try_stmt># Request REPORT generation <block_start>statusInformation['oid']=errorIndication['oid']<line_sep>statusInformation['val']=errorIndication['val']<block_end><except_stmt>KeyError<block_start><pass><block_end><block_end>self.sendVarBinds(context['snmpEngine'] stateReference errorStatus errorIndex varBinds)<line_sep>self.releaseStateInformation(stateReference)<block_end><def_stmt>initiateMgmtOperation self snmpEngine stateReference contextName PDU<block_start>varBinds=v2c.apiPDU.getVarBinds(PDU)<line_sep>mgmtFun=self._getMgmtFun(contextName)<line_sep>context=dict(snmpEngine=snmpEngine stateReference=stateReference acFun=self.verifyAccess cbFun=self.completeMgmtOperation cbCtx=self.cbCtx)<line_sep>context.update(self._storeAccessContext(snmpEngine))<line_sep>mgmtFun(*varBinds **context)<block_end><block_end><class_stmt>GetCommandResponder(CommandResponderBase)<block_start>SUPPORTED_PDU_TYPES=(rfc1905.GetRequestPDU.tagSet )<line_sep># rfc1905: 4.2.1 <def_stmt>_getMgmtFun self contextName<block_start><return>self.snmpContext.getMibInstrum(contextName).readMibObjects<block_end><block_end><class_stmt>NextCommandResponder(CommandResponderBase)<block_start>SUPPORTED_PDU_TYPES=(rfc1905.GetNextRequestPDU.tagSet )<line_sep># rfc1905: 4.2.2 <def_stmt>_getMgmtFun self contextName<block_start><return>self.snmpContext.getMibInstrum(contextName).readNextMibObjects<block_end><def_stmt>_getManagedObjectsInstances self varBinds **context<block_start>"""Iterate over Managed Objects fulfilling SNMP query. Returns ------- :py:class:`list` - List of Managed Objects Instances to respond with or `None` to indicate that not all objects have been gathered so far. """<line_sep>rspVarBinds=context['rspVarBinds']<line_sep>varBindsMap=context['varBindsMap']<line_sep>rtrVarBinds=[]<for_stmt>idx,varBind enumerate(varBinds)<block_start>name,val=varBind<if_stmt>(exval.noSuchObject.isSameTypeWith(val)<or>exval.noSuchInstance.isSameTypeWith(val))<block_start>varBindsMap[len(rtrVarBinds)]=varBindsMap.pop(idx idx)<line_sep>rtrVarBinds.append(varBind)<block_end><else_stmt><block_start>rspVarBinds[varBindsMap.pop(idx idx)]=varBind<block_end><block_end><if_stmt>rtrVarBinds<block_start>snmpEngine=context['snmpEngine']<line_sep># Need to unwind stack, can't recurse any more <def_stmt>callLater *args<block_start>snmpEngine.transportDispatcher.unregisterTimerCbFun(callLater)<line_sep>mgmtFun=context['mgmtFun']<line_sep>mgmtFun(*varBinds **context)<block_end>snmpEngine.transportDispatcher.registerTimerCbFun(callLater 0.01)<block_end><else_stmt><block_start><return>rspVarBinds<block_end><block_end><def_stmt>completeMgmtOperation self varBinds **context<block_start>rspVarBinds=self._getManagedObjectsInstances(varBinds **context)<if_stmt>rspVarBinds<block_start>CommandResponderBase.completeMgmtOperation(self rspVarBinds **context)<block_end><block_end><def_stmt>initiateMgmtOperation self snmpEngine stateReference contextName PDU<block_start>varBinds=v2c.apiPDU.getVarBinds(PDU)<line_sep>mgmtFun=self._getMgmtFun(contextName)<line_sep>context=dict(snmpEngine=snmpEngine stateReference=stateReference acFun=self.verifyAccess cbFun=self.completeMgmtOperation cbCtx=self.cbCtx rspVarBinds=varBinds[:] varBindsMap={} mgmtFun=mgmtFun)<line_sep>context.update(self._storeAccessContext(snmpEngine))<line_sep>mgmtFun(*varBinds **context)<block_end><block_end><class_stmt>BulkCommandResponder(NextCommandResponder)<block_start>SUPPORTED_PDU_TYPES=(rfc1905.GetBulkRequestPDU.tagSet )<line_sep>MAX_VAR_BINDS=64<def_stmt>_completeNonRepeaters self varBinds **context<block_start>mgmtFun=context['mgmtFun']<if_stmt><not>varBinds# No non-repeaters requested, proceed with repeaters <block_start>mgmtFun(*context['reqVarBinds'] **dict(context cbFun=self.completeMgmtOperation varBinds=context['reqVarBinds'][:]))<line_sep><return><block_end>rspVarBinds=self._getManagedObjectsInstances(varBinds **context)<if_stmt>rspVarBinds<block_start>context['allVarBinds'].extend(rspVarBinds)<if_stmt>context['counters']['M']<and>context['counters']['R']<block_start>rspVarBinds=self._getManagedObjectsInstances(varBinds **context)<if_stmt>rspVarBinds# Done with non-repeaters, proceed with repeaters <block_start>mgmtFun(*context['reqVarBinds'] **dict(context cbFun=self.completeMgmtOperation varBindsMap={} rspVarBinds=context['reqVarBinds'][:]))<line_sep><return><block_end><block_end><else_stmt><block_start>CommandResponderBase.completeMgmtOperation(self context['allVarBinds'] **context)<block_end><block_end><block_end><def_stmt>completeMgmtOperation self varBinds **context<block_start>rspVarBinds=self._getManagedObjectsInstances(varBinds **context)<if_stmt>rspVarBinds<block_start>context['counters']['M']<augsub>1<line_sep>context['allVarBinds'].extend(rspVarBinds)<line_sep>eom=all(exval.endOfMibView.isSameTypeWith(value)<for>name,value rspVarBinds)<if_stmt><not>eom<and>context['counters']['M']<and>context['counters']['R']<block_start>snmpEngine=context['snmpEngine']<line_sep># Need to unwind stack, can't recurse any more <def_stmt>callLater *args<block_start>snmpEngine.transportDispatcher.unregisterTimerCbFun(callLater)<line_sep>mgmtFun=context['mgmtFun']<line_sep>reqVarBinds=varBinds[-context['counters']['R']:]<line_sep>mgmtFun(*reqVarBinds **dict(context cbFun=self.completeMgmtOperation varBindsMap={} rspVarBinds=reqVarBinds[:]))<block_end>snmpEngine.transportDispatcher.registerTimerCbFun(callLater 0.01)<block_end><else_stmt><block_start>CommandResponderBase.completeMgmtOperation(self context['allVarBinds'] **context)<block_end><block_end><block_end># rfc1905: 4.2.3 <def_stmt>initiateMgmtOperation self snmpEngine stateReference contextName PDU<block_start>nonRepeaters=v2c.apiBulkPDU.getNonRepeaters(PDU)<if_stmt>nonRepeaters<l>0<block_start>nonRepeaters=0<block_end>maxRepetitions=v2c.apiBulkPDU.getMaxRepetitions(PDU)<if_stmt>maxRepetitions<l>0<block_start>maxRepetitions=0<block_end>varBinds=v2c.apiPDU.getVarBinds(PDU)<line_sep>N=min(int(nonRepeaters) len(varBinds))<line_sep>M=int(maxRepetitions)<line_sep>R=max(len(varBinds)-N 0)<if_stmt>R<block_start>M=min(M self.MAX_VAR_BINDS<floordiv>R)<block_end>debug.logger&debug.FLAG_APP<and>debug.logger('initiateMgmtOperation: N %d, M %d, R %d'%(N M R))<line_sep>mgmtFun=self._getMgmtFun(contextName)<line_sep>context=dict(snmpEngine=snmpEngine stateReference=stateReference contextName=contextName acFun=self.verifyAccess cbFun=self._completeNonRepeaters cbCtx=self.cbCtx reqVarBinds=varBinds[N:] counters={'M':M 'R':R} rspVarBinds=varBinds[N:] allVarBinds=[] varBindsMap={} mgmtFun=mgmtFun)<line_sep>context.update(self._storeAccessContext(snmpEngine))<line_sep>mgmtFun(*varBinds[:N] **context)<block_end><block_end><class_stmt>SetCommandResponder(CommandResponderBase)<block_start>SUPPORTED_PDU_TYPES=(rfc1905.SetRequestPDU.tagSet )<line_sep>SMI_ERROR_MAP=CommandResponderBase.SMI_ERROR_MAP.copy()<line_sep># turn missing OIDs into access denial SMI_ERROR_MAP[pysnmp.smi.error.NoSuchObjectError]='notWritable'<line_sep>SMI_ERROR_MAP[pysnmp.smi.error.NoSuchInstanceError]='notWritable'<line_sep># rfc1905: 4.2.5.1-13 <def_stmt>_getMgmtFun self contextName<block_start><return>self.snmpContext.getMibInstrum(contextName).writeMibObjects<block_end><block_end>
"""Loss ops."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>tensorflow<as>tf<import_from_stmt>common.ops activation_ops<def_stmt>softmax_entropy logits dims=-1<block_start>"""Softmax entropy from logits."""<line_sep>plogp=activation_ops.softmax(logits dims)<times>activation_ops.log_softmax(logits dims)<line_sep><return>-tf.reduce_sum(plogp dims)<block_end><def_stmt>softmax_cross_entropy labels logits dims=-1<block_start>"""Softmax entropy from logits."""<line_sep>plogp=labels<times>activation_ops.log_softmax(logits dims)<line_sep><return>-tf.reduce_sum(plogp dims)<block_end><def_stmt>softmax_kl_divergence labels logits dims=-1 epsilon=1e-6<block_start>"""Softmax entropy from logits."""<line_sep>plogp=labels<times>(tf.log(labels)-activation_ops.log_softmax(logits dims))<line_sep><return>tf.reduce_sum(plogp dims)<block_end><def_stmt>gaussian_kl q p=(0. 0.)<block_start>"""Computes the KL divergence between two isotropic Gaussian distributions. Args: q: A tuple (mu, log_sigma_sq) representing a multi-variatie Gaussian. p: A tuple (mu, log_sigma_sq) representing a multi-variatie Gaussian. Returns: A tensor representing KL(q, p). """<line_sep>mu1,log_sigma1_sq=q<line_sep>mu2,log_sigma2_sq=p<line_sep><return>tf.reduce_sum(0.5<times>(log_sigma2_sq-log_sigma1_sq+tf.exp(log_sigma1_sq-log_sigma2_sq)+tf.square(mu1-mu2)/tf.exp(log_sigma2_sq)-1) axis=-1)<block_end><def_stmt>gan_loss x gz discriminator<block_start>"""Original GAN loss. Args: x: Batch of real samples. gz: Batch of generated samples. discriminator: Discriminator function. Returns: d_loss: Discriminator loss. g_loss: Generator loss. """<line_sep>dx=discriminator(x)<with_stmt>tf.variable_scope(tf.get_variable_scope() reuse=<true>)<block_start>dgz=discriminator(gz)<block_end>d_loss=-tf.reduce_mean(tf.log_sigmoid(dx)+tf.log_sigmoid(1-dgz))<line_sep>g_loss=-tf.reduce_mean(tf.log_sigmoid(dgz))<line_sep><return>d_loss g_loss<block_end><def_stmt>lsgan_loss x gz discriminator<block_start>"""LS-GAN loss. Args: x: Batch of real samples. gz: Batch of generated samples. discriminator: Discriminator function. Returns: d_loss: Discriminator loss. g_loss: Generator loss. """<line_sep>dx=discriminator(x)<with_stmt>tf.variable_scope(tf.get_variable_scope() reuse=<true>)<block_start>dgz=discriminator(gz)<block_end>d_loss=tf.reduce_mean(tf.square(dx-1.0)+tf.square(dgz))<line_sep>g_loss=tf.reduce_mean(tf.square(dgz-1.0))<line_sep><return>d_loss g_loss<block_end><def_stmt>wgan_loss x gz discriminator beta=10.0<block_start>"""Improved Wasserstein GAN loss. Args: x: Batch of real samples. gz: Batch of generated samples. discriminator: Discriminator function. beta: Regualarizer factor. Returns: d_loss: Discriminator loss. g_loss: Generator loss. """<line_sep>dx=discriminator(x)<with_stmt>tf.variable_scope(tf.get_variable_scope() reuse=<true>)<block_start>dgz=discriminator(gz)<block_end>batch_size=tf.shape(x)[0]<line_sep>alpha=tf.random_uniform([batch_size])<line_sep>xhat=x<times>alpha+gz<times>(1-alpha)<with_stmt>tf.variable_scope(tf.get_variable_scope() reuse=<true>)<block_start>dxhat=discriminator(xhat)<block_end>gnorm=tf.norm(tf.gradients(dxhat xhat)[0])<line_sep>d_loss=-tf.reduce_mean(dx-dgz-beta<times>tf.square(gnorm-1))<line_sep>g_loss=-tf.reduce_mean(dgz)<line_sep><return>d_loss g_loss<block_end>
<import_stmt>numpy<as>np<import_stmt>nms<import_stmt>time<import_from_stmt>yolo_utils process_all_yolo_layers apply_nms<import_from_stmt>xfdnn.rt xdnn_io<def_stmt>correct_region_boxes boxes_array x_idx y_idx w_idx h_idx w h net_w net_h<block_start>new_w=0<line_sep>new_h=0<line_sep>#print "x_idx, y_idx, w_idx, h_idx, w, h, net_w, net_h", x_idx, y_idx, w_idx, h_idx, w, h, net_w, net_h <if_stmt>((float(net_w)/float(w))<l>(float(net_h)/float(h)))<block_start>new_w=net_w<line_sep>new_h=(h<times>net_w)/w<block_end><else_stmt><block_start>new_w=(w<times>net_h)/h<line_sep>new_h=net_h<block_end>boxes_array[: x_idx]=(boxes_array[: x_idx]-(net_w-new_w)/2.0/net_w)/(float(new_w)/net_w)<line_sep>boxes_array[: y_idx]=(boxes_array[: y_idx]-(net_h-new_h)/2.0/net_h)/(float(new_h)/net_h)<line_sep>boxes_array[: w_idx]<augmul>float(net_w)/float(new_w)<line_sep>boxes_array[: h_idx]<augmul>float(net_h)/float(new_h)<line_sep><return>boxes_array<block_end># simple HWC->CHW and mean subtraction/scaling # returns tensor ready for fpga execute <def_stmt>det_preprocess image dest net_h net_w#print "in image for preprosessing:", image.shape, image <block_start>dummy_dest,s=xdnn_io.loadYoloImageBlobFromFile(image net_h net_w)<line_sep>dest[<ellipsis>]=dummy_dest<line_sep>#print " prep image:", dest.shape, dest <block_end># takes dict of two outputs from XDNN, pixel-conv and bb-output # returns bounding boxes <def_stmt>det_postprocess fpgaOutput config image_shape#print fpgaOutput[0].shape , fpgaOutput[1].shape, config['classes'], config['anchorCnt'], config['net_w'], config['net_h'] <block_start>out_yolo_layers=process_all_yolo_layers(fpgaOutput config['classes'] config['anchorCnt'] config['net_w'] config['net_h'])<line_sep>anchorCnt=config['anchorCnt']<line_sep>classes=config['classes']<line_sep>num_proposals_layer=[0]<line_sep>total_proposals=0<for_stmt>layr_idx range(len(out_yolo_layers))<block_start>yolo_layer_shape=out_yolo_layers[layr_idx].shape<line_sep>#print "layr_idx , yolo_layer_shape", layr_idx , yolo_layer_shape out_yolo_layers[layr_idx]=out_yolo_layers[layr_idx].reshape(yolo_layer_shape[0] anchorCnt (5+classes) yolo_layer_shape[2]<times>yolo_layer_shape[3])<line_sep>out_yolo_layers[layr_idx]=out_yolo_layers[layr_idx].transpose(0 3 1 2)<line_sep>out_yolo_layers[layr_idx]=out_yolo_layers[layr_idx].reshape(yolo_layer_shape[0] yolo_layer_shape[2]<times>yolo_layer_shape[3]<times>anchorCnt (5+classes))<line_sep>#print "layr_idx, final in layer sape, outlayer shape", layr_idx, yolo_layer_shape, out_yolo_layers[layr_idx].shape total_proposals<augadd>yolo_layer_shape[2]<times>yolo_layer_shape[3]<times>anchorCnt<line_sep>num_proposals_layer.append(total_proposals)<block_end>boxes_array=np.empty([config['batch_sz'] total_proposals (5+classes)])<for_stmt>layr_idx range(len(out_yolo_layers))<block_start>proposal_st=num_proposals_layer[layr_idx]<line_sep>proposal_ed=num_proposals_layer[layr_idx+1]<line_sep>#print "proposal_st proposal_ed", proposal_st, proposal_ed boxes_array[: proposal_st:proposal_ed :]=out_yolo_layers[layr_idx][<ellipsis>]<block_end>bboxlist_for_images=[]<for_stmt>i range(config['batch_sz'])<block_start>boxes_array[i : :]=correct_region_boxes(boxes_array[i : :] 0 1 2 3 float(image_shape[i][1]) float(image_shape[i][0]) float(config['net_w']) float(config['net_h']))<line_sep>detected_boxes=apply_nms(boxes_array[i : :] classes config['scorethresh'] config['iouthresh'])<line_sep>bboxlist=[]<for_stmt>det_idx range(len(detected_boxes))#print detected_boxes[det_idx][0], detected_boxes[det_idx][1], detected_boxes[det_idx][2], detected_boxes[det_idx][3], config['names'][detected_boxes[det_idx][4]], detected_boxes[det_idx][5] <block_start>bboxlist.append({'classid':detected_boxes[det_idx][4] 'prob':detected_boxes[det_idx][5] 'll':{'x':int((detected_boxes[det_idx][0]-0.5<times>detected_boxes[det_idx][2])<times>image_shape[i][1]) 'y':int((detected_boxes[det_idx][1]+0.5<times>detected_boxes[det_idx][3])<times>image_shape[i][0])} 'ur':{'x':int((detected_boxes[det_idx][0]+0.5<times>detected_boxes[det_idx][2])<times>image_shape[i][1]) 'y':int((detected_boxes[det_idx][1]-0.5<times>detected_boxes[det_idx][3])<times>image_shape[i][0])}})<block_end>bboxlist_for_images.append(bboxlist)<block_end><return>bboxlist_for_images<block_end>
<import_from_stmt>DIE.Lib.DataPluginBase DataPluginBase<import_stmt>idc<import_stmt>idaapi<line_sep># TODO: Add more string types. ASCII_STR=0# ASCII String UNICODE_STR=1# Unicode String <class_stmt>StringParser(DataPluginBase)<block_start>""" A generic string value parser """<def_stmt>__init__ self<block_start>super(StringParser self).__init__()<block_end><def_stmt>registerSupportedTypes self<block_start>""" Register string types @return: """<line_sep>self.addSuportedType("LPCSTR" ASCII_STR)<line_sep>self.addSuportedType("CHAR *" ASCII_STR)<line_sep>self.addSuportedType("CONST CHAR *" ASCII_STR)<line_sep>self.addSuportedType("LPSTR" ASCII_STR)<line_sep>self.addSuportedType("LPCWSTR" UNICODE_STR)<line_sep>self.addSuportedType("LPWSTR" UNICODE_STR)<line_sep>self.setPluginType("String")<block_end><def_stmt>guessValues self rawValue<block_start>""" Guess string values """<line_sep>minLength=5# The minimal string length value=idc.GetString(rawValue strtype=idc.ASCSTR_C)<if_stmt>value<and>len(value)<ge>minLength<block_start>value,raw_value=self.normalize_raw_value(value)<line_sep>self.addParsedvalue(value 1 "ASCII C-String" raw_value)<block_end>value=idc.GetString(rawValue strtype=idc.ASCSTR_UNICODE)<if_stmt>value<and>len(value)<ge>minLength<block_start>value,raw_value=self.normalize_raw_value(value)<line_sep>self.addParsedvalue(value 1 "Ascii Unicode String" raw_value)<block_end>value=idc.GetString(rawValue strtype=idaapi.ASCSTR_PASCAL)<if_stmt>value<and>len(value)<ge>minLength<block_start>value,raw_value=self.normalize_raw_value(value)<line_sep>self.addParsedvalue(value 1 "Ascii Pascal string" raw_value)<block_end>value=idc.GetString(rawValue strtype=idaapi.ASCSTR_LEN2)<if_stmt>value<and>len(value)<ge>minLength<block_start>value,raw_value=self.normalize_raw_value(value)<line_sep>self.addParsedvalue(value 1 "Ascii String (Len2)" raw_value)<block_end>value=idc.GetString(rawValue strtype=idaapi.ASCSTR_LEN4)<if_stmt>value<and>len(value)<ge>minLength<block_start>value,raw_value=self.normalize_raw_value(value)<line_sep>self.addParsedvalue(value 1 "Ascii String (Len4)" raw_value)<block_end>value=idc.GetString(rawValue strtype=idaapi.ASCSTR_ULEN2)<if_stmt>value<and>len(value)<ge>minLength<block_start>value,raw_value=self.normalize_raw_value(value)<line_sep>self.addParsedvalue(value 1 "Ascii String (ULen2)" raw_value)<block_end>value=idc.GetString(rawValue strtype=idaapi.ASCSTR_ULEN4)<if_stmt>value<and>len(value)<ge>minLength<block_start>value,raw_value=self.normalize_raw_value(value)<line_sep>self.addParsedvalue(value 1 "Ascii String (ULen4)" raw_value)<block_end><block_end><def_stmt>matchType self type<block_start>""" Check if given type is of a string type @param type: IDA type_info_t object @return: True if given type is a string type otherwise False """<line_sep><return>self.checkSupportedType(type)<block_end><def_stmt>parseValue self rawValue<block_start>""" Parse the string value @return: """<if_stmt>self.type_params<eq>ASCII_STR<block_start>value=idc.GetString(rawValue strtype=idc.ASCSTR_C)<line_sep>description="ASCII C-String"<block_end><elif_stmt>self.type_params<eq>UNICODE_STR<block_start>value=idc.GetString(rawValue strtype=idc.ASCSTR_UNICODE)<line_sep>description="Unicode String"<block_end><else_stmt><block_start><return><block_end>value,raw_value=self.normalize_raw_value(value)<line_sep>self.addParsedvalue(value 0 description raw_value)<block_end><def_stmt>normalize_raw_value self value<block_start>""" Normalize value. @param value: value to normalize @return: a tuple (Nomralized_Value, Raw_value) """<if_stmt>value<is><not><none><block_start>raw_value="0x%s"%value.encode("hex")<line_sep>value=repr(value)<line_sep><return>(value raw_value)<block_end><return>(<none> <none>)<block_end><block_end>
<import_from_stmt>.extractor Extractor<line_sep>
<import_from_stmt>django.conf settings<import_from_stmt>django.test TestCase<import_from_stmt>devices.enums PasswordAlgorithm<import_from_stmt>devices.models Platform<class_stmt>PlatformTest(TestCase)<block_start>@classmethod<def_stmt>setUpTestData cls<block_start>cls.platforms=[Platform(name="Mercuros" slug="mercuros" password_algorithm=PasswordAlgorithm.JUNIPER_TYPE9 ) Platform(name="Test OS" slug="test-os" password_algorithm=PasswordAlgorithm.CISCO_TYPE7 ) Platform(name="Wrong OS" slug="wrong-os") ]<line_sep>Platform.objects.bulk_create(cls.platforms)<block_end><def_stmt>test_password_encryption_decryption self<block_start>clear_text_password="<PASSWORD>"<line_sep>junos=Platform.objects.filter(password_algorithm=PasswordAlgorithm.JUNIPER_TYPE9).first()<line_sep>encrypted_password=junos.encrypt_password(clear_text_password)<line_sep>self.assertNotEqual(clear_text_password encrypted_password)<line_sep>self.assertEqual(clear_text_password junos.decrypt_password(encrypted_password))<line_sep>cisco=Platform.objects.filter(password_algorithm=PasswordAlgorithm.CISCO_TYPE7).first()<line_sep>encrypted_password=cisco.encrypt_password(clear_text_password)<line_sep>self.assertNotEqual(clear_text_password encrypted_password)<line_sep>self.assertEqual(clear_text_password cisco.decrypt_password(encrypted_password))<line_sep>wrong=Platform.objects.filter(password_algorithm="").first()<line_sep>encrypted_password=wrong.encrypt_password(clear_text_password)<line_sep>self.assertEqual(clear_text_password encrypted_password)<line_sep>self.assertEqual(clear_text_password wrong.decrypt_password(encrypted_password))<block_end><block_end>
'''Autogenerated by xml_generate script, do not edit!'''<import_from_stmt>OpenGL platform<as>_p arrays<line_sep># Code generation uses this <import_from_stmt>OpenGL.raw.GLES2 _types<as>_cs<line_sep># End users want this... <import_from_stmt>OpenGL.raw.GLES2._types *<import_from_stmt>OpenGL.raw.GLES2 _errors<import_from_stmt>OpenGL.constant Constant<as>_C<import_stmt>ctypes<line_sep>_EXTENSION_NAME='GLES2_OES_texture_storage_multisample_2d_array'<def_stmt>_f function<block_start><return>_p.createFunction(function _p.PLATFORM.GLES2 'GLES2_OES_texture_storage_multisample_2d_array' error_checker=_errors._error_checker)<block_end>GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES=_C('GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES' 0x910C)<line_sep>GL_SAMPLER_2D_MULTISAMPLE_ARRAY_OES=_C('GL_SAMPLER_2D_MULTISAMPLE_ARRAY_OES' 0x910B)<line_sep>GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES=_C('GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES' 0x9102)<line_sep>GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY_OES=_C('GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY_OES' 0x9105)<line_sep>GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES=_C('GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES' 0x910D)<line_sep>@_f@_p.types(<none> _cs.GLenum _cs.GLsizei _cs.GLenum _cs.GLsizei _cs.GLsizei _cs.GLsizei _cs.GLboolean)<def_stmt>glTexStorage3DMultisampleOES target samples internalformat width height depth fixedsamplelocations<block_start><pass><block_end>
<import_from_future_stmt> print_function<import_stmt>sys<line_sep>sys.path.insert(1 "../../")<import_stmt>h2o<import_from_stmt>tests pyunit_utils<import_stmt>pandas<as>pd<def_stmt>whichmaxmin #Make H2O frame <block_start>f1=h2o.create_frame(rows=10000 cols=100 categorical_fraction=0 missing_fraction=0 seed=1234)<line_sep>#Make comparable pandas frame f2=f1.as_data_frame(use_pandas=<true>)<line_sep>############################################################# #Col wise max which_max_col=f1.idxmax()<line_sep>which_max_col=which_max_col.transpose()<line_sep>which_max_col_pd=f2.idxmax(axis=0)<line_sep>which_max_col_pd=h2o.H2OFrame(pd.DataFrame(which_max_col_pd columns=["C1"]))<line_sep>diff_max_col_idx=which_max_col-which_max_col_pd<assert_stmt>diff_max_col_idx.sum()<eq>0<line_sep>#Col wise min which_min_col=f1.idxmin()<line_sep>which_min_col=which_min_col.transpose()<line_sep>which_min_col_pd=f2.idxmin(axis=0)<line_sep>which_min_col_pd=h2o.H2OFrame(pd.DataFrame(which_min_col_pd columns=["C1"]))<line_sep>diff_min_col_idx=which_min_col-which_min_col_pd<assert_stmt>diff_min_col_idx.sum()<eq>0<line_sep>############################################################# #Row wise max which_max_row=f1.idxmax(axis=1)<line_sep>which_max_row_pd=f2.idxmax(axis=1)<line_sep>which_max_row_pd=h2o.H2OFrame(pd.DataFrame(which_max_row_pd columns=["C1"]))<line_sep>which_max_row_pd=which_max_row_pd.ascharacter().lstrip("C").asnumeric()-1#Had to clean up before comparison (indexing was +1) diff_max_row_idx=which_max_row-which_max_row_pd<assert_stmt>diff_max_row_idx.sum()<eq>0<line_sep>#Row wise min which_min_row=f1.idxmin(axis=1)<line_sep>which_min_row_pd=f2.idxmin(axis=1)<line_sep>which_min_row_pd=h2o.H2OFrame(pd.DataFrame(which_min_row_pd columns=["C1"]))<line_sep>which_min_row_pd=which_min_row_pd.ascharacter().lstrip("C").asnumeric()-1#Had to clean up before comparison (indexing was +1) diff_min_row_idx=which_min_row-which_min_row_pd<assert_stmt>diff_min_row_idx.sum()<eq>0<block_end><if_stmt>__name__<eq>"__main__"<block_start>pyunit_utils.standalone_test(whichmaxmin)<block_end><else_stmt><block_start>whichmaxmin()<block_end>
# Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the Apache 2.0 License. # See the LICENSE file in the project root for more information. <import_stmt>os<import_stmt>unittest<import_from_stmt>iptest IronPythonTestCase run_test<class_stmt>ExecFileTest(IronPythonTestCase)<block_start><def_stmt>test_sanity self<block_start>root=self.test_dir<line_sep>execfile(os.path.join(root "Inc" "toexec.py"))<line_sep>execfile(os.path.join(root "Inc" "toexec.py"))<line_sep>#execfile(root + "/doc.py") execfile(os.path.join(root "Inc" "toexec.py"))<block_end><def_stmt>test_negative self<block_start>self.assertRaises(TypeError execfile <none>)# arg must be string self.assertRaises(TypeError execfile [])<line_sep>self.assertRaises(TypeError execfile 1)<line_sep>self.assertRaises(TypeError execfile "somefile" "")<block_end><def_stmt>test_scope self<block_start>root=self.test_dir<line_sep>z=10<line_sep>execfile(os.path.join(root "Inc" "execfile_scope.py"))<block_end><block_end>run_test(__name__)<line_sep>
# Problem statement # write a program to swap two numbers without using third variable x=input()<line_sep>y=input()<line_sep>print("Before swapping: ")<line_sep>print("Value of x : " x " and y : " y)<line_sep>x,y=y x<line_sep>print("After swapping: ")<line_sep>print("Value of x : " x " and y : " y)<line_sep># sample input # 10 # 20 # sample output # Before swapping: # Value of x : 10 and y : 20 # After swapping: # Value of x : 20 and y : 10 # Time complexity : O(1) # space complexity : O(1)
# coding=utf-8 """ Tests for deepreg/dataset/loader/interface.py """<import_from_stmt>test.unit.util is_equal_np<import_from_stmt>typing Optional Tuple<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>deepreg.dataset.loader.interface AbstractPairedDataLoader AbstractUnpairedDataLoader DataLoader FileLoader GeneratorDataLoader <import_from_stmt>deepreg.dataset.loader.nifti_loader NiftiFileLoader<import_from_stmt>deepreg.dataset.loader.paired_loader PairedDataLoader<import_from_stmt>deepreg.dataset.loader.util normalize_array<class_stmt>TestDataLoader<block_start>@pytest.mark.parametrize("labeled,num_indices,sample_label,seed" [(<true> 1 "all" 0) (<false> 1 "all" 0) (<none> 1 "all" 0) (<true> 1 "sample" 0) (<true> 1 "all" 0) (<true> 1 <none> 0) (<true> 1 "sample" <none>) ] )<def_stmt>test_init self labeled num_indices sample_label seed<block_start>""" Test init function of DataLoader class :param labeled: bool :param num_indices: int :param sample_label: str :param seed: float/int/None :return: """<line_sep>DataLoader(labeled=labeled num_indices=num_indices sample_label=sample_label seed=seed )<line_sep>data_loader=DataLoader(labeled=labeled num_indices=num_indices sample_label=sample_label seed=seed )<with_stmt>pytest.raises(NotImplementedError)<block_start>data_loader.moving_image_shape<block_end><with_stmt>pytest.raises(NotImplementedError)<block_start>data_loader.fixed_image_shape<block_end><with_stmt>pytest.raises(NotImplementedError)<block_start>data_loader.num_samples<block_end><with_stmt>pytest.raises(NotImplementedError)<block_start>data_loader.get_dataset()<block_end>data_loader.close()<block_end>@pytest.mark.parametrize("labeled,moving_shape,fixed_shape,batch_size,data_augmentation" [(<true> (9 9 9) (9 9 9) 1 {}) (<true> (9 9 9) (15 15 15) 1 {"data_augmentation":{"name":"affine"}} ) (<true> (9 9 9) (15 15 15) 1 {"data_augmentation":[{"name":"affine"} {"name":"ddf" "field_strength":1 "low_res_size":(3 3 3) } ] } ) ] )<def_stmt>test_get_dataset_and_preprocess self labeled moving_shape fixed_shape batch_size data_augmentation<block_start>""" Test get_transforms() function. For that, an Abstract Data Loader is created only to set the moving and fixed shapes that are used in get_transforms(). Here we test that the get_transform() returns a function and the shape of the output of this function. See test_preprocess.py for more testing regarding the concrete params. :param labeled: bool :param moving_shape: tuple :param fixed_shape: tuple :param batch_size: total number of samples consumed per step, over all devices. :param data_augmentation: dict :return: """<line_sep>data_dir_path=["data/test/nifti/paired/train" "data/test/nifti/paired/test" ]<line_sep>common_args=dict(file_loader=NiftiFileLoader labeled=<true> sample_label="all" seed=<none>)<line_sep>data_loader=PairedDataLoader(data_dir_paths=data_dir_path fixed_image_shape=fixed_shape moving_image_shape=moving_shape **common_args )<line_sep>dataset=data_loader.get_dataset_and_preprocess(training=<true> batch_size=batch_size repeat=<true> shuffle_buffer_num_batch=1 **data_augmentation )<for_stmt>outputs dataset.take(1)<block_start><assert_stmt>(outputs["moving_image"].shape<eq>(batch_size )+data_loader.moving_image_shape)<assert_stmt>(outputs["fixed_image"].shape<eq>(batch_size )+data_loader.fixed_image_shape)<assert_stmt>(outputs["moving_label"].shape<eq>(batch_size )+data_loader.moving_image_shape)<assert_stmt>(outputs["fixed_label"].shape<eq>(batch_size )+data_loader.fixed_image_shape)<block_end><block_end><block_end><def_stmt>test_abstract_paired_data_loader <block_start>""" Test the functions in AbstractPairedDataLoader """<line_sep>moving_image_shape=(8 8 4)<line_sep>fixed_image_shape=(6 6 4)<line_sep># test init invalid shape <with_stmt>pytest.raises(ValueError)<as>err_info<block_start>AbstractPairedDataLoader(moving_image_shape=(2 2) fixed_image_shape=(3 3) labeled=<true> sample_label="sample" )<block_end><assert_stmt>"moving_image_shape and fixed_image_shape have length of three"<in>str(err_info.value)<line_sep># test init valid shapes data_loader=AbstractPairedDataLoader(moving_image_shape=moving_image_shape fixed_image_shape=fixed_image_shape labeled=<true> sample_label="sample" )<line_sep># test properties <assert_stmt>data_loader.num_indices<eq>2<assert_stmt>data_loader.moving_image_shape<eq>moving_image_shape<assert_stmt>data_loader.fixed_image_shape<eq>fixed_image_shape<assert_stmt>data_loader.num_samples<is><none><block_end><def_stmt>test_abstract_unpaired_data_loader <block_start>""" Test the functions in AbstractUnpairedDataLoader """<line_sep>image_shape=(8 8 4)<line_sep># test init invalid shape <with_stmt>pytest.raises(ValueError)<as>err_info<block_start>AbstractUnpairedDataLoader(image_shape=(2 2) labeled=<true> sample_label="sample")<block_end><assert_stmt>"image_shape has to be length of three"<in>str(err_info.value)<line_sep># test init valid shapes data_loader=AbstractUnpairedDataLoader(image_shape=image_shape labeled=<true> sample_label="sample")<line_sep># test properties <assert_stmt>data_loader.num_indices<eq>3<assert_stmt>data_loader.moving_image_shape<eq>image_shape<assert_stmt>data_loader.fixed_image_shape<eq>image_shape<assert_stmt>data_loader.num_samples<is><none><block_end><def_stmt>get_arr shape:Tuple=(2 3 4) seed:Optional[int]=<none><arrow>np.ndarray<block_start>""" Return a random array. :param shape: shape of array. :param seed: random seed. :return: random array. """<line_sep>np.random.seed(seed)<line_sep><return>np.random.random(size=shape).astype(np.float32)<block_end><class_stmt>TestGeneratorDataLoader<block_start>@pytest.mark.parametrize("labeled" [<true> <false>])<def_stmt>test_get_labeled_dataset self labeled:bool<block_start>""" Test get_dataset with data loader. :param labeled: labeled data or not. """<line_sep>sample={"moving_image":get_arr() "fixed_image":get_arr() "indices":[1] }<if_stmt>labeled<block_start>sample={"moving_label":get_arr() "fixed_label":get_arr() **sample }<block_end><def_stmt>mock_gen <block_start>"""Toy data generator."""<for_stmt>_ range(3)<block_start><yield>sample<block_end><block_end>loader=GeneratorDataLoader(labeled=labeled num_indices=1 sample_label="all")<line_sep>loader.__setattr__("data_generator" mock_gen)<line_sep>dataset=loader.get_dataset()<for_stmt>got dataset.as_numpy_iterator()<block_start><assert_stmt>all(is_equal_np(got[key] sample[key])<for>key sample.keys())<block_end><block_end>@pytest.mark.parametrize("labeled" [<true> <false>])<def_stmt>test_data_generator self labeled:bool<block_start>""" Test data_generator() :param labeled: labeled data or not. """<class_stmt>MockDataLoader<block_start>"""Toy data loader."""<def_stmt>__init__ self seed:int<block_start>""" Init. :param seed: random seed for numpy. :param kwargs: additional arguments. """<line_sep>self.seed=seed<block_end><def_stmt>get_data self index:int<arrow>np.ndarray<block_start>""" Return the dummy array despite of the index. :param index: not used :return: dummy array. """<assert_stmt>isinstance(index int)<line_sep><return>get_arr(seed=self.seed)<block_end><block_end><def_stmt>mock_sample_index_generator <block_start>"""Toy sample index generator."""<line_sep><return>[[1 1 [1]]]<block_end>loader=GeneratorDataLoader(labeled=labeled num_indices=1 sample_label="all")<line_sep>loader.__setattr__("sample_index_generator" mock_sample_index_generator)<line_sep>loader.loader_moving_image=MockDataLoader(seed=0)<line_sep>loader.loader_fixed_image=MockDataLoader(seed=1)<if_stmt>labeled<block_start>loader.loader_moving_label=MockDataLoader(seed=2)<line_sep>loader.loader_fixed_label=MockDataLoader(seed=3)<block_end># check data loader output got=next(loader.data_generator())<line_sep>expected={"moving_image":normalize_array(get_arr(seed=0)) "fixed_image":normalize_array(get_arr(seed=1)) # 0 or -1 is the label index "indices":np.array([1 0]<if>labeled<else>[1 -1] dtype=np.float32) }<if_stmt>labeled<block_start>expected={"moving_label":get_arr(seed=2) "fixed_label":get_arr(seed=3) **expected }<block_end><assert_stmt>all(is_equal_np(got[key] expected[key])<for>key expected.keys())<block_end><def_stmt>test_sample_index_generator self<block_start>loader=GeneratorDataLoader(labeled=<true> num_indices=1 sample_label="all")<with_stmt>pytest.raises(NotImplementedError)<block_start>loader.sample_index_generator()<block_end><block_end>@pytest.mark.parametrize(("moving_image_shape" "fixed_image_shape" "moving_label_shape" "fixed_label_shape" "err_msg" ) [(<none> (10 10 10) (10 10 10) (10 10 10) "moving image and fixed image must not be None" ) ((10 10 10) <none> (10 10 10) (10 10 10) "moving image and fixed image must not be None" ) ((10 10 10) (10 10 10) <none> (10 10 10) "moving label and fixed label must be both None or non-None" ) ((10 10 10) (10 10 10) (10 10 10) <none> "moving label and fixed label must be both None or non-None" ) ((10 10) (10 10 10) (10 10 10) (10 10 10) "Sample [1]'s moving_image's shape should be 3D" ) ((10 10 10) (10 10) (10 10 10) (10 10 10) "Sample [1]'s fixed_image's shape should be 3D" ) ((10 10 10) (10 10 10) (10 10) (10 10 10) "Sample [1]'s moving_label's shape should be 3D or 4D." ) ((10 10 10) (10 10 10) (10 10 10) (10 10) "Sample [1]'s fixed_label's shape should be 3D or 4D." ) ((10 10 10) (10 10 10) (10 10 10 2) (10 10 10 3) "Sample [1]'s moving image and fixed image "<concat>"have different numbers of labels." ) ] )<def_stmt>test_validate_images_and_labels self moving_image_shape:Optional[Tuple] fixed_image_shape:Optional[Tuple] moving_label_shape:Optional[Tuple] fixed_label_shape:Optional[Tuple] err_msg:str <block_start>""" Test error messages. :param moving_image_shape: None or tuple. :param fixed_image_shape: None or tuple. :param moving_label_shape: None or tuple. :param fixed_label_shape: None or tuple. :param err_msg: message. """<line_sep>moving_image=<none><line_sep>fixed_image=<none><line_sep>moving_label=<none><line_sep>fixed_label=<none><if_stmt>moving_image_shape<block_start>moving_image=get_arr(shape=moving_image_shape)<block_end><if_stmt>fixed_image_shape<block_start>fixed_image=get_arr(shape=fixed_image_shape)<block_end><if_stmt>moving_label_shape<block_start>moving_label=get_arr(shape=moving_label_shape)<block_end><if_stmt>fixed_label_shape<block_start>fixed_label=get_arr(shape=fixed_label_shape)<block_end>loader=GeneratorDataLoader(labeled=<true> num_indices=1 sample_label="all")<with_stmt>pytest.raises(ValueError)<as>err_info<block_start>loader.validate_images_and_labels(moving_image=moving_image fixed_image=fixed_image moving_label=moving_label fixed_label=fixed_label image_indices=[1] )<block_end><assert_stmt>err_msg<in>str(err_info.value)<block_end>@pytest.mark.parametrize("option" [0 1 2 3])<def_stmt>test_validate_images_and_labels_range self option:int<block_start>""" Test error messages related to input range. :param option: control which image to modify """<line_sep>option_to_name={0:"moving_image" 1:"fixed_image" 2:"moving_label" 3:"fixed_label" }<line_sep>input={"moving_image":get_arr() "fixed_image":get_arr() "moving_label":get_arr() "fixed_label":get_arr() }<line_sep>name=option_to_name[option]<line_sep>input[name]<augadd>1<line_sep>err_msg=f"Sample [1]'s {name}'s values are not between [0, 1]"<line_sep>loader=GeneratorDataLoader(labeled=<true> num_indices=1 sample_label="all")<with_stmt>pytest.raises(ValueError)<as>err_info<block_start>loader.validate_images_and_labels(image_indices=[1] **input )<block_end><assert_stmt>err_msg<in>str(err_info.value)<block_end><def_stmt>test_sample_image_label_unlabeled self<block_start>"""Test sample_image_label in unlabeled case."""<line_sep>loader=GeneratorDataLoader(labeled=<false> num_indices=1 sample_label="all")<line_sep>got=next(loader.sample_image_label(moving_image=get_arr(seed=0) fixed_image=get_arr(seed=1) moving_label=<none> fixed_label=<none> image_indices=[1] ))<line_sep>expected=dict(moving_image=get_arr(seed=0) fixed_image=get_arr(seed=1) indices=np.asarray([1 -1] dtype=np.float32) )<assert_stmt>all(is_equal_np(got[key] expected[key])<for>key expected.keys())<block_end>@pytest.mark.parametrize("shape" [(2 3 4) (2 3 4 1)])<def_stmt>test_sample_image_label_one_label self shape:Tuple<block_start>""" Test sample_image_label in labeled case with one label. :param shape: shape of the label. """<line_sep>loader=GeneratorDataLoader(labeled=<true> num_indices=1 sample_label="all")<line_sep>got=next(loader.sample_image_label(moving_image=get_arr(shape=shape[:3] seed=0) fixed_image=get_arr(shape=shape[:3] seed=1) moving_label=get_arr(shape=shape seed=2) fixed_label=get_arr(shape=shape seed=3) image_indices=[1] ))<line_sep>expected=dict(moving_image=get_arr(shape=shape[:3] seed=0) fixed_image=get_arr(shape=shape[:3] seed=1) moving_label=get_arr(shape=shape[:3] seed=2) fixed_label=get_arr(shape=shape[:3] seed=3) indices=np.asarray([1 0] dtype=np.float32) )<assert_stmt>all(is_equal_np(got[key] expected[key])<for>key expected.keys())<block_end><def_stmt>test_sample_image_label_multiple_labels self<block_start>"""Test sample_image_label in labeled case with multiple labels."""<line_sep>loader=GeneratorDataLoader(labeled=<true> num_indices=1 sample_label="all")<line_sep>shape=(2 3 4 5)<line_sep>got_iter=loader.sample_image_label(moving_image=get_arr(shape=shape[:3] seed=0) fixed_image=get_arr(shape=shape[:3] seed=1) moving_label=get_arr(shape=shape seed=2) fixed_label=get_arr(shape=shape seed=3) image_indices=[1] )<line_sep>moving_label=get_arr(shape=shape seed=2)<line_sep>fixed_label=get_arr(shape=shape seed=3)<for_stmt>i range(shape[-1])<block_start>got=next(got_iter)<line_sep>expected=dict(moving_image=get_arr(shape=shape[:3] seed=0) fixed_image=get_arr(shape=shape[:3] seed=1) moving_label=moving_label[: : : i] fixed_label=fixed_label[: : : i] indices=np.asarray([1 i] dtype=np.float32) )<assert_stmt>all(is_equal_np(got[key] expected[key])<for>key expected.keys())<block_end><block_end><block_end><def_stmt>test_file_loader <block_start>""" Test the functions in FileLoader """<line_sep># init, no error means passed loader_grouped=FileLoader(dir_paths=["/path/grouped_loader/"] name="grouped_loader" grouped=<true>)<line_sep>loader_ungrouped=FileLoader(dir_paths=["/path/ungrouped_loader/"] name="ungrouped_loader" grouped=<false>)<line_sep># init fails with repeated paths <with_stmt>pytest.raises(ValueError)<as>err_info<block_start>FileLoader(dir_paths=["/path/ungrouped_loader/" "/path/ungrouped_loader/"] name="ungrouped_loader" grouped=<false> )<block_end><assert_stmt>"dir_paths have repeated elements"<in>str(err_info.value)<line_sep># not implemented properties / functions <with_stmt>pytest.raises(NotImplementedError)<block_start>loader_grouped.set_data_structure()<block_end><with_stmt>pytest.raises(NotImplementedError)<block_start>loader_grouped.set_group_structure()<block_end><with_stmt>pytest.raises(NotImplementedError)<block_start>loader_grouped.get_data(1)<block_end><with_stmt>pytest.raises(NotImplementedError)<block_start>loader_grouped.get_data_ids()<block_end><with_stmt>pytest.raises(NotImplementedError)<block_start>loader_grouped.get_num_images()<block_end><with_stmt>pytest.raises(NotImplementedError)<block_start>loader_grouped.close()<block_end># test grouped file loader functions <assert_stmt>loader_grouped.group_struct<is><none><line_sep># create mock group structure with nested list loader_grouped.group_struct=[[1 2] [3 4] [5 6]]<assert_stmt>loader_grouped.get_num_groups()<eq>3<assert_stmt>loader_grouped.get_num_images_per_group()<eq>[2 2 2]<with_stmt>pytest.raises(ValueError)<as>err_info<block_start>loader_grouped.group_struct=[[] [3 4] [5 6]]<line_sep>loader_grouped.get_num_images_per_group()<block_end><assert_stmt>"Groups of ID [0, 2, 2] are empty."<in>str(err_info.value)<line_sep># test ungrouped file loader <assert_stmt>loader_ungrouped.group_struct<is><none><with_stmt>pytest.raises(AssertionError)<block_start>loader_ungrouped.get_num_groups()<block_end><with_stmt>pytest.raises(AssertionError)<block_start>loader_ungrouped.get_num_images_per_group()<block_end><block_end>
<import_stmt>numpy<as>np<import_from_stmt>pyearth Earth<import_from_stmt>timeit Timer<line_sep># The robot arm example, as defined in: # Fast MARS, <NAME>, Technical Report No.110, May 1993, section 6.2. np.random.seed(2)<line_sep>nb_examples=400<line_sep>theta1=np.random.uniform(0 2<times>np.pi size=nb_examples)<line_sep>theta2=np.random.uniform(0 2<times>np.pi size=nb_examples)<line_sep>phi=np.random.uniform(-np.pi/2 np.pi/2 size=nb_examples)<line_sep>l1=np.random.uniform(0 1 size=nb_examples)<line_sep>l2=np.random.uniform(0 1 size=nb_examples)<line_sep>x=l1<times>np.cos(theta1)-l2<times>np.cos(theta1+theta2)<times>np.cos(phi)<line_sep>y=l1<times>np.sin(theta1)-l2<times>np.sin(theta1+theta2)<times>np.cos(phi)<line_sep>z=l2<times>np.sin(theta2)<times>np.sin(phi)<line_sep>d=np.sqrt(x<power>2+y<power>2+z<power>2)<line_sep>inputs=np.concatenate([theta1[: np.newaxis] theta2[: np.newaxis] phi[: np.newaxis] l1[: np.newaxis] l2[: np.newaxis]] axis=1)<line_sep>outputs=d<line_sep>hp=dict(max_degree=5 minspan=1 endspan=1 max_terms=100 allow_linear=<false> )<line_sep>model_normal=Earth(**hp)<line_sep>t=Timer(<lambda>:model_normal.fit(inputs outputs))<line_sep>duration_normal=t.timeit(number=1)<line_sep>print("Normal : MSE={0:.5f}, duration={1:.2f}s".format(model_normal.mse_ duration_normal))<line_sep>model_fast=Earth(use_fast=<true> fast_K=5 fast_h=1 **hp)<line_sep>t=Timer(<lambda>:model_fast.fit(inputs outputs))<line_sep>duration_fast=t.timeit(number=1)<line_sep>print("Fast: MSE={0:.5f}, duration={1:.2f}s".format(model_fast.mse_ duration_fast))<line_sep>speedup=duration_normal/duration_fast<line_sep>print("diagnostic : MSE goes from {0:.5f} to {1:.5f} but it "<concat>"is {2:.2f}x faster".format(model_normal.mse_ model_fast.mse_ speedup))<line_sep>
# author: <NAME> <import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>encoder Encoder<import_from_stmt>decoder Decoder<import_from_stmt>pose_decoder PoseDecoder<import_from_stmt>..utils.dropout PointCloudDropOut<import_from_stmt>..utils.effective_loss_function EffectiveLossFunction<import_from_stmt>..utils.batch_repetition repeat_tensor_for_each_element_in_batch<import_stmt>torch.nn.functional<as>F<import_from_stmt>..quaternions.operations QuaternionOperations<class_stmt>UnsupervisedPart(nn.Module)# Unsupervised model that uses ensemble of pose predictors and effective loss function <block_start><def_stmt>__init__ self image_size=128 voxel_size=64 z_dimension=1024 pose_dimensions=128 number_of_point_cloud_points=8000 number_of_pose_predictor_candidates=4 number_of_views=5<block_start>""" :param image_size: image size :param voxel_size: voxel size (after tri-linear interpolation) :param z_dimension: dimension used for encoder-decoders :param pose_dimensions: dimension used for pose decoder :param number_of_point_cloud_points: number of point cloud points used when decoding it :param number_of_pose_predictor_candidates: number of candidates from which we 'll use the best one :param number_of_views: number of image views """<line_sep>super().__init__()<line_sep>self.encoder=Encoder(image_size=image_size)<line_sep>self.decoder=Decoder(number_of_point_cloud_points=number_of_point_cloud_points hidden_dimensions=z_dimension scale=<true>)<line_sep>self.point_cloud_drop_out=PointCloudDropOut(p=0.07)<line_sep>self.effective_loss_function=EffectiveLossFunction(voxel_size=voxel_size)<line_sep>self.pose_decoder=PoseDecoder(input_dimensions=z_dimension hidden_dimensions=pose_dimensions number_of_pose_candidates=number_of_pose_predictor_candidates)<line_sep>self.number_of_views=number_of_views<line_sep>self.number_of_pose_predictor_candidates=number_of_pose_predictor_candidates<line_sep>self.encoder.apply(self.kaiming_initialization)<line_sep>self.decoder.apply(self.kaiming_initialization)<line_sep>self.pose_decoder.apply(self.kaiming_initialization)<block_end>@staticmethod<def_stmt>kaiming_initialization architecture# Kaiming initialization for encoder, decoder and pose decoder <block_start><if_stmt>isinstance(architecture (nn.Conv2d nn.Linear))<block_start>nn.init.kaiming_normal_(architecture.weight.data a=0)<block_end><block_end><def_stmt>forward self images poses<block_start>""" :param images: all images in a batch :param poses: given poses attached to images :return: new projection views, ensemble and student poses """<line_sep>encoder_image_features=self.encoder.forward(images)<line_sep>encoder_pose_features=self.encoder.forward(poses)<line_sep>point_cloud,scaling=self.decoder.forward(hidden_vector=encoder_image_features)<line_sep>poses=self.pose_decoder.forward(hidden_vector=encoder_pose_features)<line_sep># do not create ensemble of pose predictors if we aren't training <if_stmt><not>self.training<block_start>point_clouds=repeat_tensor_for_each_element_in_batch(torch_tensor=self.point_cloud_drop_out.forward(point_cloud=point_cloud) n=self.number_of_views)<line_sep>scalings=repeat_tensor_for_each_element_in_batch(torch_tensor=scaling n=self.number_of_views)<line_sep>projection=self.effective_loss_function.forward(point_cloud=point_clouds rotation=poses scale=scalings)<line_sep><return>projection poses<block_end>batch_size=images.size(0)<times>self.number_of_views<line_sep>ensemble_poses,student_poses=poses[:-batch_size] poses[-batch_size:]<line_sep>point_clouds=repeat_tensor_for_each_element_in_batch(torch_tensor=self.point_cloud_drop_out.forward(point_cloud=point_cloud) n=self.number_of_pose_predictor_candidates<times>self.number_of_views)<line_sep>scalings=repeat_tensor_for_each_element_in_batch(torch_tensor=scaling n=self.number_of_pose_predictor_candidates<times>self.number_of_views)<line_sep>projection=self.effective_loss_function.forward(point_cloud=point_clouds rotation=poses scale=scalings)<line_sep><return>projection ensemble_poses student_poses<block_end><block_end><class_stmt>UnsupervisedLoss(nn.Module)# Combines projection effective losses for ensemble and student loss <block_start><def_stmt>__init__ self number_of_pose_predictor_candidates=4 student_weight=20.00<block_start>super().__init__()<line_sep>self.student_weight=student_weight<line_sep>self.number_of_pose_predictor_candidates=number_of_pose_predictor_candidates<line_sep>self.minimum_indexes=<none><block_end><def_stmt>forward self predictions masks training<block_start>projection,*poses=predictions<line_sep>""" Down/up samples the input to either the given size or the given scale_factor. The algorithm used for interpolation is determined by mode. Currently temporal, spatial and volumetric sampling are supported, i.e. expected inputs are 3-D, 4-D or 5-D in shape. The input dimensions are interpreted in the form: mini-batch x channels x [optional depth] x [optional height] x width. The modes available for resizing are: nearest, linear (3D-only), bi-linear, bicubic (4D-only), tri-linear (5D-only), area. """<line_sep>masks=F.interpolate(input=masks.unsqueeze(0) scale_factor=1/2 mode="bilinear" align_corners=<true>).squeeze()<if_stmt><not>training<block_start><return>dict(projection_loss=F.mse_loss(projection masks reduction="sum")/projection.size(0))<block_end>ensemble_poses,student_poses=poses<line_sep>masks=repeat_tensor_for_each_element_in_batch(torch_tensor=masks n=self.number_of_pose_predictor_candidates)<line_sep>projection_loss=F.mse_loss(projection masks reduction="none")<line_sep>projection_loss=projection_loss.sum((1 2)).view(-1 self.num_candidates)<line_sep>minimum_indexes=projection_loss.argmin(dim=-1).detach()<line_sep>batch_indexes=torch.arange(minimum_indexes.size(0) device=minimum_indexes.device)<line_sep># student loss minimum_projection_loss=projection_loss[batch_indexes minimum_indexes].sum()/minimum_indexes.size(0)<line_sep>ensemble_poses=ensemble_poses.view(-1 self.number_of_pose_predictor_candidates 4)<line_sep>best_poses=ensemble_poses[batch_indexes minimum_indexes :].detach()<line_sep>quaternion_operations=QuaternionOperations()<line_sep>poses_difference=F.normalize(quaternion_operations.quaternion_multiplication(q1=best_poses q2=quaternion_operations.quaternion_conjugate(q=student_poses)) dim=-1)<line_sep>angle_difference=poses_difference[: 0]<line_sep>student_loss=(1-angle_difference<power>2).sum()/minimum_indexes.size(0)<line_sep># save to print histogram self.minimum_indexes=minimum_indexes.detach()<line_sep>total_loss=minimum_projection_loss+self.student_weight<times>student_loss<line_sep><return>dict(projection_loss=minimum_projection_loss student_loss=student_loss total_loss=total_loss)<block_end><block_end>
<import_from_stmt>insights.tests context_wrap<import_from_stmt>insights.parsers.rhn_charsets RHNCharSets<line_sep>emb_charsets_content=""" server_encoding ----------------- UTF~ (1 row) client_encoding ----------------- UTF8 (1 row) """<line_sep>ora_charsets_content=""" PARAMETER VALUE --------------------------------- NLS_CHARACTERSET UTF8 NLS_NCHAR_CHARACTERSET UTF8 """<def_stmt>test_embedded_db <block_start>result=RHNCharSets(context_wrap(emb_charsets_content))<assert_stmt>result.get('server_encoding')<eq>'UTF~'<assert_stmt>result.get('client_encoding')<eq>'UTF8'<block_end><def_stmt>test_oracle_db <block_start>result=RHNCharSets(context_wrap(ora_charsets_content))<assert_stmt>result.get('NLS_CHARACTERSET')<eq>'UTF8'<assert_stmt>result.get('NLS_NCHAR_CHARACTERSET')<eq>'UTF8'<block_end>
<import_from_future_stmt> absolute_import division print_function<import_from_stmt>cctbx omz<import_stmt>cctbx.omz.dev<import_from_stmt>cctbx.array_family flex<import_from_stmt>libtbx.test_utils approx_equal<import_from_stmt>libtbx easy_run<import_from_stmt>libtbx easy_pickle<import_from_stmt>libtbx.utils date_and_time user_plus_sys_time<import_stmt>libtbx.load_env<import_from_stmt>libtbx Auto<import_from_stmt>six.moves cStringIO<as>StringIO<import_stmt>traceback<import_stmt>sys os<import_from_stmt>six.moves range<import_from_stmt>six.moves zip<line_sep>op=os.path<def_stmt>get_master_phil max_atoms=99 f_calc_options_algorithm="*direct fft" bulk_solvent_correction=<false><block_start><return>omz.dev.get_master_phil(iteration_limit=100 show_distances_threshold=0.5 bulk_solvent_correction=bulk_solvent_correction grads_mean_sq_threshold=1e-6 f_calc_options_algorithm=f_calc_options_algorithm additional_phil_string="""\ max_atoms = %(max_atoms)s .type = int f_obs_f_calc_fan_outliers = *remove keep .type = choice .optional = False use_f_calc_as_f_obs = False .type = bool reset_u_iso = 0.05 .type = float sites_mod_short = True .type = bool optimizers = *dev ls_simple ls_lm shelxl_fm shelxl_cg shelx76 .type = choice(multi=True) ls_simple_iterations = 12 .type = int shelxl_wght = None .type = str .help = ''' SHELX-97 Manual 7-31: Refinement against F2 requires different weights to refinement against F; in particular, making all the weights equal ('unit weights'), although useful in the initial stages of refinement against F, is NEVER a sensible option for F2.''' shelxl_reset_sigmas = None .type = float shelxl_fm_iterations = 12 .type = int shelxl_cg_iterations = 12 .type = int shelx76_iterations = 12 .type = int apply_iteration_limit_to_all = False .type = bool keep_tmp_files = False .type = bool export_refined = False .type = bool pickle_refined_dir = None .type = str wdir_root = None .type = str sorting_of_pickle_files = *down up .type = choice .optional = True random_subset { size = None .type = int seed = 0 .type = int } tardy_samples { iq = None .type = int qmin = -180 .type = float qmax = 180 .type = float qstep = 3 .type = float } """%vars())<block_end><def_stmt>shelxl_weights_a_b fo_sq sigmas fc_sq osf_sq a b<block_start><assert_stmt>sigmas.size()<eq>fo_sq.size()<assert_stmt>fc_sq.size()<eq>fo_sq.size()<import_from_stmt>cctbx.xray.targets.tst_shelxl_wght_ls calc_w<assert_stmt>sigmas.all_ge(0.01)<line_sep><return>calc_w(wa=a wb=b i_obs=fo_sq i_sig=sigmas i_calc=fc_sq k=osf_sq<power>0.5)<block_end><def_stmt>shelxl_weights fo_sq sigmas fc_sq osf_sq shelxl_wght<block_start><if_stmt>(shelxl_wght<is><none>)<block_start>shelxl_wght=""<block_end>vals=[float(s)<for>s shelxl_wght.split()]<assert_stmt>len(vals)<le>6<line_sep>a,b,c,d,e,f=vals+[0.1 0 0 0 0 0.33333][len(vals):]<assert_stmt>c<eq>0<assert_stmt>d<eq>0<assert_stmt>e<eq>0<assert_stmt>f<eq>0.33333<line_sep><return>shelxl_weights_a_b(fo_sq sigmas fc_sq osf_sq a b)<block_end><def_stmt>show_cc_r1 params label f_obs xray_structure=<none> fc_abs=<none> scale_factor=Auto<block_start><assert_stmt>[xray_structure fc_abs].count(<none>)<eq>1<if_stmt>(fc_abs<is><none>)<block_start>p=params.f_calc_options<line_sep>fc_abs=f_obs.structure_factors_from_scatterers(xray_structure=xray_structure algorithm=p.algorithm cos_sin_table=p.cos_sin_table).f_calc().amplitudes()<block_end>corr=flex.linear_correlation(x=f_obs.data() y=fc_abs.data())<assert_stmt>corr.is_well_defined()<line_sep>cc=corr.coefficient()<line_sep>r1=f_obs.r1_factor(other=fc_abs scale_factor=scale_factor assume_index_matching=<true>)<line_sep>print("%-12s cc, r1: %.4f %.4f"%(label cc r1))<line_sep>sys.stdout.flush()<line_sep><return>fc_abs cc r1<block_end><def_stmt>run_smtbx_ls mode cod_id i_obs f_obs xray_structure params<block_start><import_stmt>smtbx.refinement<line_sep>fo_sq=i_obs<assert_stmt>fo_sq.sigmas()<is><not><none><line_sep>sel=(fo_sq.data()<eq>0)&(fo_sq.sigmas()<eq>0)<line_sep>fo_sq=fo_sq.select(~sel)<line_sep>fo_sq.select(fo_sq.sigmas()<le>0).show_array()<assert_stmt>fo_sq.sigmas().all_gt(0)<if_stmt>(1)# work around bug currently in smtbx weighting scheme implementation <block_start>fo_sq=fo_sq.customized_copy(sigmas=flex.double(fo_sq.data().size() 1))<block_end>xobs=fo_sq.as_xray_observations()<line_sep>tm=user_plus_sys_time()<line_sep>rm=smtbx.refinement.model(fo_sq=xobs xray_structure=xray_structure constraints=[] restraints_manager=smtbx.refinement.restraints.manager() weighting_scheme=smtbx.refinement.least_squares.unit_weighting())<line_sep>ls=rm.least_squares()<if_stmt>(mode<eq>"simple")<block_start><for_stmt>i_cycle range(params.ls_simple_iterations)<block_start>ls.build_up()<try_stmt><block_start>ls.solve_and_step_forward()<block_end><except_stmt>RuntimeError<as>e<block_start><if_stmt>(str(e).find("cholesky.failure")<le>0)<block_start><raise><block_end>print('Aborting run_smtbx_ls("simple"): cholesky.failure: %s'%cod_id)<line_sep><break><block_end><for_stmt>sc xray_structure.scatterers()<block_start><if_stmt>(sc.u_iso<le>0<or>sc.u_iso<g>1)<block_start>sc.u_iso=0.05<block_end><block_end>show_cc_r1(params "ls%02d"%(i_cycle+1) f_obs xray_structure)<block_end>tm.show_elapsed(prefix="time smtbx_ls_simple_iterations: ")<block_end><elif_stmt>(mode<eq>"lm")<block_start><import_from_stmt>scitbx.lstbx normal_eqns_solving<line_sep>thresh=1e-6<try_stmt><block_start>cycles=normal_eqns_solving.levenberg_marquardt_iterations(ls gradient_threshold=thresh step_threshold=thresh tau=1e-7)<block_end><except_stmt>RuntimeError<as>e<block_start><if_stmt>(<not>str(e).startswith("cctbx::adptbx::debye_waller_factor_exp: arg_limit exceeded"))<block_start><raise><block_end>print('Aborting run_smtbx_ls("lm"):'<concat>' debye_waller_factor_exp failure: %s'%cod_id)<block_end>show_cc_r1(params "smtbx_lm" f_obs xray_structure)<line_sep>tm.show_elapsed(prefix="time levenberg_marquardt_iterations: ")<block_end><else_stmt><block_start><raise>RuntimeError('Unknown run_smtbx_ls(mode="%s")'%mode)<block_end><block_end><def_stmt>remove_tmp_files file_names<block_start><for_stmt>fn file_names<block_start><if_stmt>(op.isfile(fn))<block_start>os.remove(fn)<block_end><assert_stmt><not>op.exists(fn)<block_end><block_end><def_stmt>run_shelxl mode cod_id i_obs f_obs xray_structure params reference_structure expected_n_refinable_parameters<block_start><if_stmt>(mode<eq>"fm")<block_start><if_stmt>(params.apply_iteration_limit_to_all)<block_start>fm_cycles=params.iteration_limit<block_end><else_stmt><block_start>fm_cycles=params.shelxl_fm_iterations<block_end>cg_cycles=<none><block_end><elif_stmt>(mode<eq>"cg")<block_start>fm_cycles=<none><if_stmt>(params.apply_iteration_limit_to_all)<block_start>cg_cycles=params.iteration_limit<block_end><else_stmt><block_start>cg_cycles=params.shelxl_cg_iterations<block_end><block_end><else_stmt><block_start><raise>RuntimeError("Unknown mode: "+mode)<block_end>cwd_orig=os.getcwd()<line_sep>wdir="wdir_%s_shelxl_%s_%s"%(cod_id mode os.getpid())<if_stmt>(params.wdir_root<is><not><none>)<block_start>wdir=op.join(params.wdir_root wdir)<block_end>wdir_is_new=<false><if_stmt>(<not>op.isdir(wdir))<block_start>os.mkdir(wdir)<line_sep>wdir_is_new=<true><block_end>remove_wdir=<false><try_stmt><block_start>os.chdir(wdir)<line_sep>tmp_file_names=["tmp.ins" "tmp.hkl" "tmp.res" "tmp.lst"]<line_sep>remove_tmp_files(tmp_file_names)<line_sep>fo_sq=i_obs<if_stmt>(params.shelxl_reset_sigmas)<block_start>fo_sq=fo_sq.customized_copy(sigmas=flex.double(fo_sq.indices().size() params.shelxl_reset_sigmas))<block_end><import_stmt>iotbx.shelx<line_sep>open("tmp.ins" "w").writelines(iotbx.shelx.writer.generator(xray_structure=xray_structure data_are_intensities=<true> title="cod_id=%s mode=%s"%(cod_id mode) wavelength=fo_sq.minimum_wavelength_based_on_d_min() full_matrix_least_squares_cycles=fm_cycles conjugate_gradient_least_squares_cycles=cg_cycles weighting_scheme_params=params.shelxl_wght sort_scatterers=<false>))<line_sep>fo_sq.export_as_shelx_hklf(file_object=open("tmp.hkl" "w"))<import_stmt>iotbx.shelx.hklf<line_sep>fo_sq=iotbx.shelx.hklf.reader(file_name="tmp.hkl").as_miller_arrays(crystal_symmetry=fo_sq)[0]<line_sep>buffers=easy_run.fully_buffered("shelxl tmp")<line_sep>buffers.raise_if_errors()<line_sep>refinement_unstable=<false><for_stmt>line buffers.stdout_lines<block_start><if_stmt>(line.find("** REFINEMENT UNSTABLE **")<ge>0)<block_start>refinement_unstable=<true><line_sep>print("Aborted: shelxl %s refinement unstable: %s"%(mode cod_id))<line_sep><break><block_end><block_end>res=open("tmp.res").read()<try_stmt><block_start>refined=xray_structure.from_shelx(file=StringIO(res) min_distance_sym_equiv=0 strictly_shelxl=<false>)<block_end><except_stmt>iotbx.shelx.error<as>e<block_start><if_stmt>(str(e).find("scatterer parameter")<l>0)<block_start><raise><block_end>print("Aborted: shelxl %s refinement apparently unstable: %s"%(mode cod_id))<line_sep>refined=<none><block_end><if_stmt>(refined<is><not><none>)<block_start><assert_stmt>refined.crystal_symmetry().is_similar_symmetry(xray_structure)<for_stmt>sc,rsc zip(xray_structure.scatterers() refined.scatterers())<block_start><assert_stmt>rsc.label<eq>sc.label<assert_stmt>approx_equal(rsc.occupancy sc.weight() 1e-4)<line_sep>rsc.occupancy=sc.occupancy# XXX bug in res file reader <block_end><def_stmt>check_special_positions <block_start>result=<true><line_sep>uc=xray_structure.unit_cell()<line_sep>sstab=xray_structure.site_symmetry_table()<for_stmt>i_sc xray_structure.special_position_indices()<block_start>sc=refined.scatterers()[i_sc]<line_sep>site_symmetry=sstab.get(i_sc)<assert_stmt><not>site_symmetry.is_point_group_1()<line_sep>site_special=site_symmetry.special_op()<times>sc.site<line_sep>d=uc.mod_short_distance(sc.site site_special)<if_stmt>(d<g>1e-3)<block_start>print("site moved off special position:")<line_sep>print(" %s"%sc.label)<line_sep>print(" shelxl res: %11.6f %11.6f %11.6f"%sc.site)<line_sep>print(" special_op: %11.6f %11.6f %11.6f"%site_special)<line_sep>print(" distance moved: %.3f"%d)<line_sep>result=<false><block_end><block_end><return>result<block_end><assert_stmt>check_special_positions()<line_sep>xray_structure.replace_scatterers(refined.scatterers())<line_sep>res_osf=<none><line_sep>res_hkl_count=<none><line_sep>res_r1=<none><line_sep>res_n_parameters=<none><line_sep>res_n_restraints=<none><for_stmt>line res.splitlines()<block_start><if_stmt>(line.startswith("FVAR "))<block_start>flds=line.split()<assert_stmt>len(flds)<eq>2<line_sep>res_osf=float(flds[1])<line_sep><continue><block_end><if_stmt>(<not>line.startswith("REM "))<block_start><continue><block_end><assert_stmt><not>refinement_unstable<if_stmt>(line.startswith("REM R1 ="))<block_start>flds=line.split()<assert_stmt>len(flds)<eq>15<line_sep>res_hkl_count=int(flds[13])<line_sep>res_r1=float(flds[10])<block_end><elif_stmt>(line.find(" parameters refined ")<ge>0)<block_start><assert_stmt>line.endswith(" restraints")<line_sep>flds=line.split()<assert_stmt>len(flds)<eq>7<line_sep>res_n_parameters=int(flds[1])<line_sep>res_n_restraints=int(flds[-2])<block_end><block_end><if_stmt>(<not>refinement_unstable)<block_start><assert_stmt>res_osf<is><not><none><assert_stmt>res_hkl_count<is><not><none><assert_stmt>res_r1<is><not><none><assert_stmt>res_n_parameters<is><not><none><assert_stmt>res_n_restraints<is><not><none><line_sep># <assert_stmt>res_hkl_count<eq>fo_sq.indices().size()<def_stmt>raise_unexpected_restraints n_expected<block_start><raise>RuntimeError("Unexpected number of SHELXL restraints: %d (vs. %d expected)"%(res_n_restraints n_expected))<block_end><if_stmt>(mode<eq>"fm")<block_start>n_caos=fo_sq.space_group_info().number_of_continuous_allowed_origin_shifts()<if_stmt>(res_n_restraints<ne>n_caos)<block_start>sg_symbol=str(fo_sq.space_group_info())<if_stmt>(sg_symbol<in>["P 63 m c" "P 63 c m"])<block_start><assert_stmt>n_caos<eq>1<assert_stmt>res_n_restraints<eq>0<line_sep>print("INFO: SHELXL restraint count incorrect? code_code:" cod_id)<block_end><else_stmt><block_start>raise_unexpected_restraints(n_caos)<block_end><block_end><block_end><elif_stmt>(mode<eq>"cg")<block_start><if_stmt>(res_n_restraints<ne>0)<block_start>raise_unexpected_restraints(0)<block_end><block_end><else_stmt><block_start><raise>RuntimeError("Unknown mode: "+mode)<block_end><assert_stmt>res_n_parameters<eq>expected_n_refinable_parameters+1<line_sep>fc_abs,_,r1_fvar=show_cc_r1(params "fvar_"+mode f_obs xray_structure scale_factor=res_osf)<line_sep>r1_diff=r1_fvar-res_r1<line_sep>print("R1 recomputed - shelxl_%s.res: %.4f - %.4f = %.4f %s"%(mode r1_fvar res_r1 r1_diff cod_id))<if_stmt>(abs(r1_diff)<g>0.01)<block_start><raise>RuntimeError("R1 MISMATCH %s"%cod_id)<block_end>_,_,r1_auto=show_cc_r1(params "shelxl_"+mode f_obs fc_abs=fc_abs)<line_sep>print("R1 FVAR-Auto %s: %.4f"%(cod_id r1_fvar-r1_auto))<line_sep># lst_r1=<none><line_sep>lst_wr2=<none><for_stmt>line open("tmp.lst").read().splitlines()<block_start>l=line.strip()<if_stmt>(l.startswith("R1 = "))<block_start>lst_r1=float(l.split()[9])<block_end><elif_stmt>(l.startswith("wR2 = ")<and>l.endswith(" for all data"))<block_start>lst_wr2=float(l.replace("," " ").split()[2])<block_end><block_end><assert_stmt>lst_r1<is><not><none><assert_stmt>lst_wr2<is><not><none><assert_stmt>lst_r1<eq>res_r1<line_sep># fc_sq=fc_abs.f_as_f_sq()<line_sep>weights=shelxl_weights(fo_sq=fo_sq.data() sigmas=fo_sq.sigmas() fc_sq=fc_sq.data() osf_sq=res_osf<power>2 shelxl_wght=params.shelxl_wght)<line_sep>num=flex.sum(weights<times>flex.pow2(fo_sq.data()/res_osf<power>2-fc_sq.data()))<line_sep>den=flex.sum(weights<times>flex.pow2(fo_sq.data()/res_osf<power>2))<assert_stmt>den<ne>0<line_sep>wr2=(num/den)<power>0.5<line_sep>wr2_diff=wr2-lst_wr2<if_stmt>(abs(wr2_diff)<g>0.01)<block_start>info=" significantly different"<block_end><else_stmt><block_start>info=""<block_end>print("wR2 recomputed - shelxl_%s.lst: %.4f - %.4f = %.4f %s%s"%(mode wr2 lst_wr2 wr2_diff cod_id info))<if_stmt>(abs(wr2_diff)/max(lst_wr2 wr2)<g>0.2)<block_start><raise>RuntimeError("wR2 MISMATCH %s"%cod_id)<block_end><block_end><block_end><if_stmt>(<not>params.keep_tmp_files)<block_start>remove_tmp_files(tmp_file_names)<line_sep>remove_wdir=wdir_is_new<block_end><block_end><finally_stmt><block_start>os.chdir(cwd_orig)<if_stmt>(remove_wdir)<block_start><try_stmt><block_start>os.rmdir(wdir)<block_end><except_stmt>Exception<block_start><pass><block_end><block_end><block_end><block_end><def_stmt>run_shelx76 cod_id f_obs xray_structure fvars encoded_sites params reference_structure<block_start><if_stmt>(params.apply_iteration_limit_to_all)<block_start>ls_cycles=params.iteration_limit<block_end><else_stmt><block_start>ls_cycles=params.shelx76_iterations<block_end>cwd_orig=os.getcwd()<line_sep>wdir="wdir_%s_shelx76_%s"%(cod_id os.getpid())<if_stmt>(params.wdir_root<is><not><none>)<block_start>wdir=op.join(params.wdir_root wdir)<block_end>wdir_is_new=<false><if_stmt>(<not>op.isdir(wdir))<block_start>os.mkdir(wdir)<line_sep>wdir_is_new=<true><block_end>remove_wdir=<false><try_stmt><block_start>os.chdir(wdir)<line_sep>tmp_file_names=["tmp.ins" "tmp.lst" "fort.2" "fort.3" "fort.4" "fort.7"]<line_sep>remove_tmp_files(tmp_file_names)<assert_stmt><not>op.exists("tmp.ins")<import_from_stmt>cctbx.development run_shelx76<line_sep>run_shelx76.write_shelx76_ls(f_obs=f_obs xray_structure=xray_structure fvars=fvars encoded_sites=encoded_sites l_s_parameters=str(ls_cycles))<assert_stmt>op.exists("tmp.ins")<line_sep>buffers=easy_run.fully_buffered("shelx76 < tmp.ins > tmp.lst")<line_sep>buffers.raise_if_errors_or_output()<line_sep>lst=open("tmp.lst").read().splitlines()<line_sep>r_from_lst=<none><for_stmt>line lst<block_start>l=line.lstrip()<if_stmt>(l.startswith("R = "))<block_start>print(l)<line_sep>flds=l.split()<assert_stmt>len(flds)<eq>12<if_stmt>(flds[2].lower()<eq>"nan")<block_start>print("Aborted: shelx76 refinement apparently unstable: %s"%(cod_id))<line_sep>r_from_lst="nan"<line_sep><break><block_end>r_from_lst=float(flds[2])<block_end><block_end><assert_stmt>r_from_lst<is><not><none><if_stmt>(r_from_lst<ne>"nan")<block_start>print("%-12s cc, r1: None %.4f"%("shelx76" r_from_lst))<if_stmt>(<not>params.keep_tmp_files)<block_start>remove_tmp_files(tmp_file_names)<line_sep>remove_wdir=wdir_is_new<block_end><block_end><block_end><finally_stmt><block_start>os.chdir(cwd_orig)<if_stmt>(remove_wdir)<block_start><try_stmt><block_start>os.rmdir(wdir)<block_end><except_stmt>Exception<block_start><pass><block_end><block_end><block_end><block_end><def_stmt>process params pickle_file_name<block_start>cod_id=op.basename(pickle_file_name).split("." 1)[0]<line_sep>print("cod_id:" cod_id)<line_sep>c_obs,structure_prep,edge_list=easy_pickle.load(file_name=pickle_file_name)<line_sep>changes=structure_prep.make_scatterer_labels_shelx_compatible_in_place()<if_stmt>(params.sites_mod_short)<block_start>structure_prep=structure_prep.sites_mod_short()<block_end><import_from_stmt>iotbx.shelx fvar_encoding<line_sep>structure_prep=fvar_encoding.move_sites_if_necessary_for_shelx_fvar_encoding(xray_structure=structure_prep)<line_sep>structure_prep.show_summary().show_scatterers()<if_stmt>(len(changes)<ne>0)<block_start><import_from_stmt>libtbx.utils plural_s<line_sep>print("INFO: %d atom name%s changed for compatibility with SHELXL:"%plural_s(len(changes)))<for_stmt>change changes<block_start>print(' changed: "%s" -> "%s"'%change)<block_end><block_end>structure_prep.scattering_type_registry(table="it1992").show()<line_sep>fvar_encoding.dev_build_shelx76_fvars(structure_prep)# only an exercise print("."<times>79)<line_sep># <if_stmt>(len(params.optimizers)<eq>0)<block_start><return><block_end># <assert_stmt>c_obs.is_xray_intensity_array()<or>c_obs.is_xray_amplitude_array()<if_stmt>(c_obs.is_xray_intensity_array())<block_start>i_obs=c_obs<line_sep>f_obs=c_obs.f_sq_as_f(algorithm="xtal_3_7")<block_end><else_stmt><block_start>f_obs=c_obs<line_sep>i_obs=c_obs.f_as_f_sq(algorithm="shelxl")<block_end>process_continue(params=params cod_id=cod_id c_obs=c_obs i_obs=i_obs f_obs=f_obs structure_prep=structure_prep)<block_end><def_stmt>process_continue params cod_id c_obs i_obs f_obs structure_prep<block_start>p=params.f_calc_options<line_sep>f_calc=f_obs.structure_factors_from_scatterers(xray_structure=structure_prep algorithm=p.algorithm cos_sin_table=p.cos_sin_table).f_calc()<line_sep>sel=f_obs.f_obs_f_calc_fan_outlier_selection(f_calc=f_calc)<assert_stmt>sel<is><not><none><line_sep>n_outliers=sel.count(<true>)<if_stmt>(n_outliers<ne>0)<block_start>action=params.f_obs_f_calc_fan_outliers<line_sep>print("INFO: f_obs_f_calc_fan_outliers = %s: %d"%(action n_outliers))<if_stmt>(action<eq>"remove")<block_start>i_obs=i_obs.select(~sel)<line_sep>f_obs=f_obs.select(~sel)<block_end><block_end><if_stmt>(f_obs.anomalous_flag())<block_start>print("INFO: converting anomalous i+f_obs to non-anomalous.")<line_sep>i_obs=i_obs.average_bijvoet_mates()<line_sep>f_obs=f_obs.average_bijvoet_mates()<block_end>sel=((i_obs.data()<eq>0)&(i_obs.sigmas()<eq>0))|((f_obs.data()<eq>0)&(f_obs.sigmas()<eq>0))<line_sep>n_zero_d_and_s=sel.count(<true>)<if_stmt>(n_zero_d_and_s<ne>0)<block_start>print("INFO: removing reflections with i+f_obs=0 and sigma=0:" n_zero_d_and_s)<line_sep>i_obs=i_obs.select(~sel)<line_sep>f_obs=f_obs.select(~sel)<block_end>p=params.f_calc_options<line_sep>f_calc=f_obs.structure_factors_from_scatterers(xray_structure=structure_prep algorithm=p.algorithm cos_sin_table=p.cos_sin_table).f_calc()<if_stmt>(params.use_f_calc_as_f_obs)<block_start>print("INFO: using f_calc as i+f_obs")<line_sep>i_obs=f_calc.intensities().customized_copy(sigmas=flex.double(f_calc.indices().size() 0.01))<line_sep>f_obs=f_calc.amplitudes().customized_copy(sigmas=flex.double(f_calc.indices().size() 0.01))<block_end><else_stmt># scaling applied so that the data written in shelx hklf format # have sufficient significant digits, and FVAR is 1 (shelx76 seems # to be especially sensitive to FVAR >> 1) <block_start>k=f_obs.scale_factor(f_calc=f_calc)<assert_stmt>k<ne>0<line_sep>s=1/k<power>2<line_sep>print("INFO: scaling i_obs to f_calc by multiplying i_obs with: %.6g"%s)<line_sep>i_obs=i_obs.apply_scaling(factor=s)<line_sep>s=1/k<line_sep>print("INFO: scaling f_obs to f_calc by multiplying f_obs with: %.6g"%s)<line_sep>f_obs=f_obs.apply_scaling(factor=s)<block_end><def_stmt>show obs<block_start>obs.show_comprehensive_summary()<import_from_stmt>cctbx.omz.cif_refine report_fraction_of_negative_observations_if_any<as>_<line_sep>_(cod_id obs)<block_end><if_stmt>(c_obs.is_xray_intensity_array())<block_start>show(i_obs)<block_end><else_stmt><block_start>show(f_obs)<block_end>print("."<times>79)<line_sep># structure_work=structure_prep.deep_copy_scatterers()<line_sep>sel=structure_work.hd_selection()<line_sep>print("Removing hydrogen atoms:" sel.count(<true>))<line_sep>structure_work=structure_work.select(selection=~sel)<line_sep>sdt=params.show_distances_threshold<if_stmt>(sdt<g>0)<block_start>print("Distances smaller than %.6g A:"%sdt)<line_sep>structure_work.show_distances(distance_cutoff=sdt)<line_sep>print("."<times>79)<block_end># <if_stmt>(params.tardy_samples.iq<is><not><none>)<block_start><import_from_stmt>cctbx.omz tardy_adaptor<line_sep>print()<line_sep>tardy_adaptor.sample_e_pot(id_code=cod_id f_obs=f_obs xray_structure=structure_prep edge_list=edge_list params=params.tardy_samples)<line_sep>print()<line_sep><return><block_end># <import_from_stmt>iotbx.shelx fvar_encoding<line_sep>fvars,encoded_sites=fvar_encoding.dev_build_shelx76_fvars(structure_work)<line_sep>print("Number of FVARs for special position constraints:" len(fvars)-1)<line_sep>print("."<times>79)<line_sep># show_cc_r1(params "prep" f_obs structure_prep)<def_stmt>cc_r1 label<block_start>show_cc_r1(params label f_obs structure_work)<block_end>cc_r1("no_h")<line_sep>structure_work.convert_to_isotropic()<line_sep>cc_r1("iso")<line_sep>structure_iso=structure_work.deep_copy_scatterers()<line_sep># <if_stmt>(params.reset_u_iso<is><not><none>)<block_start>structure_work.set_u_iso(value=params.reset_u_iso)<line_sep>cc_r1("setu")<block_end><if_stmt>(params.shake_sites_rmsd<is><not><none>)<block_start>mt=flex.mersenne_twister(seed=0)<line_sep>structure_work.shift_sites_in_place(shift_length=params.shake_sites_rmsd mersenne_twister=mt)<line_sep>print("rms difference after shift_sites_in_place: %.3f"%structure_iso.rms_difference(structure_work))<line_sep>cc_r1("shift_xyz")<block_end># <if_stmt>(params.max_atoms<is><not><none>)<block_start>n=structure_work.scatterers().size()<if_stmt>(n<g>params.max_atoms)<block_start>print("Skipping refinement of large model: %d atoms COD %s"%(n cod_id))<line_sep><return><block_end><block_end># structure_work.scatterers().flags_set_grads(state=<false>)<for_stmt>sc structure_work.scatterers()<block_start>sc.flags.set_grad_site(<true>)<assert_stmt>sc.flags.use_u_iso_only()<line_sep>sc.flags.set_grad_u_iso(<true>)<block_end>n_refinable_parameters=structure_work.n_parameters(considering_site_symmetry_constraints=<true>)<line_sep>print("Number of refinable parameters:" n_refinable_parameters)<line_sep># <if_stmt>(params.iteration_limit<l>1)<block_start><return><block_end># <if_stmt>("dev"<not><in>params.optimizers)<block_start>structure_dev=<none><block_end><else_stmt><block_start>structure_dev=structure_work.deep_copy_scatterers()<line_sep>omz.dev.refinement(i_obs=i_obs f_obs=f_obs xray_structure=structure_dev params=params reference_structure=structure_iso expected_n_refinable_parameters=n_refinable_parameters plot_samples_id=cod_id)<line_sep>show_cc_r1(params "dev" f_obs structure_dev)<if_stmt>(params.export_refined)<block_start>file_name="dev_%s_%s_%s.pdb"%(params.target_type params.target_obs_type.lower() cod_id)<line_sep>open(file_name "w").write(structure_dev.as_pdb_file(remarks=[file_name]))<block_end><if_stmt>(params.pickle_refined_dir<is><not><none>)<block_start>easy_pickle.dump(file_name=op.join(params.pickle_refined_dir cod_id+".pickle") obj=(c_obs structure_dev <none>))<line_sep>print((structure_dev.scatterers().size() c_obs.space_group().order_p() c_obs.indices().size() c_obs.d_min()) file=open("%s/qi_%s"%(params.pickle_refined_dir cod_id) "w"))<block_end><block_end># <def_stmt>use_smtbx_ls mode<block_start><if_stmt>("ls_"+mode<not><in>params.optimizers)<block_start><return><none><block_end><if_stmt>(<not>libtbx.env.has_module(name="smtbx"))<block_start>print("INFO: smtbx not available: refinement skipped.")<line_sep><return><none><block_end>result=structure_work.deep_copy_scatterers()<line_sep>run_smtbx_ls(mode=mode cod_id=cod_id i_obs=i_obs f_obs=f_obs xray_structure=result params=params)<line_sep>show_cc_r1(params "ls_"+mode f_obs result)<line_sep><return>result<block_end>structure_ls_simple=use_smtbx_ls("simple")<line_sep>structure_ls_lm=use_smtbx_ls("lm")<line_sep># <def_stmt>use_shelxl mode<block_start><if_stmt>("shelxl_"+mode<not><in>params.optimizers)<block_start><return><none><block_end>result=structure_work.deep_copy_scatterers()<line_sep>run_shelxl(mode=mode cod_id=cod_id i_obs=i_obs f_obs=f_obs xray_structure=result params=params reference_structure=structure_iso expected_n_refinable_parameters=n_refinable_parameters)<if_stmt>(params.export_refined)<block_start>file_name="shelxl_%s_%s.pdb"%(mode cod_id)<line_sep>open(file_name "w").write(result.as_pdb_file(remarks=[file_name]))<block_end><return>result<block_end>structure_shelxl_fm=use_shelxl("fm")<line_sep>structure_shelxl_cg=use_shelxl("cg")<line_sep># <if_stmt>("shelx76"<not><in>params.optimizers)<block_start>structure_shelx76=<none><block_end><else_stmt><block_start>structure_shelx76=structure_work.deep_copy_scatterers()<line_sep>run_shelx76(cod_id=cod_id f_obs=f_obs xray_structure=structure_shelx76 fvars=fvars encoded_sites=encoded_sites params=params reference_structure=structure_iso)<if_stmt>(params.export_refined)<block_start>file_name="shelx76_%s.pdb"%cod_id<line_sep>open(file_name "w").write(structure_shelx76.as_pdb_file(remarks=[file_name]))<block_end><block_end><block_end><def_stmt>run args<block_start><import_from_stmt>iotbx.option_parser option_parser<as>iotbx_option_parser<import_stmt>libtbx.utils<line_sep>show_times=libtbx.utils.show_times(time_start="now")<line_sep>command_call=["iotbx.python" __file__]<line_sep>command_line=(iotbx_option_parser(usage=" ".join(command_call)+" [options] directory|file...").enable_chunk(easy_all=<true>).enable_multiprocessing()).process(args=args min_nargs=1)<if_stmt>(command_line.run_multiprocessing_chunks_if_applicable(command_call=command_call))<block_start>show_times()<line_sep><return><block_end>co=command_line.options<line_sep># print("TIME BEGIN cod_refine:" date_and_time())<line_sep>print()<line_sep># master_phil=get_master_phil()<line_sep>argument_interpreter=master_phil.command_line_argument_interpreter()<line_sep>phil_objects=[]<line_sep>remaining_args=[]<for_stmt>arg command_line.args<block_start><if_stmt>(arg.find("=")<ge>0)<block_start>phil_objects.append(argument_interpreter.process(arg=arg))<block_end><else_stmt><block_start>remaining_args.append(arg)<block_end><block_end>work_phil=master_phil.fetch(sources=phil_objects)<line_sep>work_phil.show()<line_sep>print()<line_sep>params=work_phil.extract()<line_sep># qi_dict={}<line_sep>all_pickles=[]<for_stmt>arg remaining_args<block_start><if_stmt>(op.isdir(arg))<block_start><for_stmt>node sorted(os.listdir(arg))<block_start><if_stmt>(node.endswith(".pickle"))<block_start>all_pickles.append(op.join(arg node))<block_end><elif_stmt>(node.startswith("qi_")<and>len(node)<eq>10)<block_start>qi=open(op.join(arg node)).read().splitlines()<if_stmt>(len(qi)<eq>1)<block_start>cod_id=node[3:]<line_sep>quick_info=eval(qi[0])<assert_stmt>cod_id<not><in>qi_dict<line_sep>qi_dict[cod_id]=quick_info<block_end><block_end><block_end><block_end><elif_stmt>(op.isfile(arg))<block_start>all_pickles.append(arg)<block_end><else_stmt><block_start><raise>RuntimeError("Not a file or directory: %s"%arg)<block_end><block_end>print("Number of pickle files:" len(all_pickles))<line_sep>print("Number of quick_infos:" len(qi_dict))<line_sep>sort_choice=params.sorting_of_pickle_files<if_stmt>(len(qi_dict)<ne>0<and>sort_choice<is><not><none>)<block_start>print("Sorting pickle files by n_atoms * n_refl:" sort_choice)<assert_stmt>sort_choice<in>["down" "up"]<def_stmt>sort_pickle_files <block_start><if_stmt>(sort_choice<eq>"down")<block_start>i_sign=-1<block_end><else_stmt><block_start>i_sign=1<block_end>buffer=[]<for_stmt>i,path enumerate(all_pickles)<block_start>cod_id=op.basename(path).split("." 1)[0]<line_sep>qi=qi_dict.get(cod_id)<if_stmt>(qi<is><none>)<block_start>nn=2<power>31<block_end><else_stmt><block_start>nn=qi[0]<times>qi[1]<times>qi[2]<block_end>buffer.append((nn i_sign<times>i path))<block_end>buffer.sort()<if_stmt>(i_sign<l>0)<block_start>buffer.reverse()<block_end>result=[]<for_stmt>elem buffer<block_start>result.append(elem[-1])<block_end><return>result<block_end>all_pickles=sort_pickle_files()<block_end>print()<line_sep># rss=params.random_subset.size<if_stmt>(rss<is><not><none><and>rss<g>0)<block_start>seed=params.random_subset.seed<line_sep>print("Selecting subset of %d pickle files using random seed %d"%(rss seed))<line_sep>mt=flex.mersenne_twister(seed=seed)<line_sep>perm=mt.random_permutation(size=len(all_pickles))[:rss]<line_sep>flags=flex.bool(len(all_pickles) <false>).set_selected(perm <true>)<line_sep>all_pickles=flex.select(all_pickles permutation=flags.iselection())<line_sep>print()<block_end># <import_from_stmt>libtbx.path makedirs_race<if_stmt>(params.wdir_root<is><not><none>)<block_start>makedirs_race(path=params.wdir_root)<block_end><if_stmt>(params.pickle_refined_dir<is><not><none>)<block_start>makedirs_race(path=params.pickle_refined_dir)<block_end># n_caught=0<for_stmt>i_pickle,pickle_file_name enumerate(all_pickles)<block_start><if_stmt>(i_pickle%command_line.chunk.n<ne>command_line.chunk.i)<block_start><continue><block_end>tm=user_plus_sys_time()<try_stmt><block_start>process(params pickle_file_name)<block_end><except_stmt>KeyboardInterrupt<block_start>print("CAUGHT EXCEPTION: KeyboardInterrupt" file=sys.stderr)<line_sep>traceback.print_exc()<line_sep>print(file=sys.stderr)<line_sep>sys.stderr.flush()<line_sep><return><block_end><except_stmt>Exception<block_start>sys.stdout.flush()<line_sep>print("CAUGHT EXCEPTION: %s"%pickle_file_name file=sys.stderr)<line_sep>traceback.print_exc()<line_sep>print(file=sys.stderr)<line_sep>sys.stderr.flush()<line_sep>n_caught<augadd>1<block_end><else_stmt><block_start>print("done_with: %s (%.2f seconds)"%(pickle_file_name tm.elapsed()))<line_sep>print()<line_sep>sys.stdout.flush()<block_end><block_end>print()<line_sep>print("Number of exceptions caught:" n_caught)<line_sep># show_times()<line_sep>print()<line_sep>print("TIME END cod_refine:" date_and_time())<block_end><if_stmt>(__name__<eq>"__main__")<block_start>run(args=sys.argv[1:])<block_end>
<import_from_stmt>django.db.models Case Count IntegerField Manager Sum When<import_from_stmt>django.db.models.query QuerySet<class_stmt>GroupBaseManager(Manager)<block_start>use_for_related_fields=<true><def_stmt>get_queryset self<block_start>"""Annotate count of group members."""<line_sep>qs=super(GroupBaseManager self).get_queryset()<line_sep>qs=qs.annotate(member_count=Count('members'))<line_sep><return>qs<block_end><block_end><class_stmt>GroupManager(Manager)<block_start>use_for_related_fields=<true><def_stmt>get_queryset self<block_start>"""Annotate count of memberships of type MEMBER."""<import_from_stmt>mozillians.groups.models GroupMembership<line_sep>qs=super(GroupManager self).get_queryset()<line_sep>annotation=Sum(Case(When(groupmembership__status=GroupMembership.MEMBER then=1) default=0 output_field=IntegerField()))<line_sep>qs=qs.annotate(member_count=annotation)<line_sep><return>qs<block_end><block_end><class_stmt>GroupQuerySet(QuerySet)<block_start><def_stmt>visible self<block_start><return>self.filter(visible=<true>)<block_end><block_end>
# flake8: noqa # TODO(vorj): When we will meet flake8 3.7.0+, # we should ignore only W291 for whole file # using --per-file-ignores . <import_stmt>clpy<import_stmt>unittest<class_stmt>TestUltimaCIndexer(unittest.TestCase)<block_start><def_stmt>test_cindexer_argument_mutation self<block_start>x=clpy.backend.ultima.exec_ultima('' '#include <cupy/carray.hpp>')+''' void f(CIndexer_2 ind) { } '''[1:]<line_sep>y=clpy.backend.ultima.exec_ultima(''' void f(CIndexer<2> ind){} ''' '#include <cupy/carray.hpp>')<line_sep>self.maxDiff=<none><line_sep>self.assertEqual(x y)<block_end><def_stmt>test_cindexer_member_function self<block_start>x=clpy.backend.ultima.exec_ultima('' '#include <cupy/carray.hpp>')+''' void f(CIndexer_2 ind) { ind_size; } '''[1:]<line_sep>y=clpy.backend.ultima.exec_ultima(''' void f(CIndexer<2> ind){ ind.size(); } ''' '#include <cupy/carray.hpp>')<line_sep>self.maxDiff=<none><line_sep>self.assertEqual(x y)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<import_from_stmt>.quantize quantize quantize_static quantize_dynamic quantize_qat<import_from_stmt>.quantize QuantizationMode<import_from_stmt>.calibrate CalibrationDataReader CalibraterBase MinMaxCalibrater create_calibrator CalibrationMethod<import_from_stmt>.quant_utils QuantType QuantFormat write_calibration_table<line_sep>
# Distributed under the MIT License. # See LICENSE.txt for details. <import_stmt>numpy<as>np<def_stmt>b_dot_v magnetic_field spatial_velocity spatial_metric<block_start><return>np.einsum("ab, ab" spatial_metric np.outer(magnetic_field spatial_velocity))<block_end><def_stmt>b_squared magnetic_field spatial_metric<block_start><return>np.einsum("ab, ab" spatial_metric np.outer(magnetic_field magnetic_field))<block_end><def_stmt>magnetic_field_one_form magnetic_field spatial_metric<block_start><return>np.einsum("a, ia" magnetic_field spatial_metric)<block_end><def_stmt>p_star pressure b_dot_v b_squared lorentz_factor<block_start><return>pressure+0.5<times>(b_dot_v<power>2+b_squared/lorentz_factor<power>2)<block_end><def_stmt>spatial_velocity_one_form spatial_velocity spatial_metric<block_start><return>np.einsum("a, ia" spatial_velocity spatial_metric)<block_end><def_stmt>tilde_d_flux tilde_d tilde_tau tilde_s tilde_b tilde_phi lapse shift sqrt_det_spatial_metric spatial_metric inv_spatial_metric pressure spatial_velocity lorentz_factor magnetic_field<block_start><return>tilde_d<times>(lapse<times>spatial_velocity-shift)<block_end><def_stmt>tilde_tau_flux tilde_d tilde_tau tilde_s tilde_b tilde_phi lapse shift sqrt_det_spatial_metric spatial_metric inv_spatial_metric pressure spatial_velocity lorentz_factor magnetic_field<block_start>b_dot_v_=b_dot_v(magnetic_field spatial_velocity spatial_metric)<line_sep><return>(sqrt_det_spatial_metric<times>lapse<times>p_star(pressure b_dot_v_ b_squared(magnetic_field spatial_metric) lorentz_factor)<times>spatial_velocity+tilde_tau<times>(lapse<times>spatial_velocity-shift)-lapse<times>b_dot_v_<times>tilde_b)<block_end><def_stmt>tilde_s_flux tilde_d tilde_tau tilde_s tilde_b tilde_phi lapse shift sqrt_det_spatial_metric spatial_metric inv_spatial_metric pressure spatial_velocity lorentz_factor magnetic_field<block_start>b_dot_v_=b_dot_v(magnetic_field spatial_velocity spatial_metric)<line_sep>b_i=(magnetic_field_one_form(magnetic_field spatial_metric)/lorentz_factor+spatial_velocity_one_form(spatial_velocity spatial_metric)<times>lorentz_factor<times>b_dot_v_)<line_sep>result=np.outer(lapse<times>spatial_velocity-shift tilde_s)<line_sep>result<augsub>lapse/lorentz_factor<times>np.outer(tilde_b b_i)<line_sep>result<augadd>(sqrt_det_spatial_metric<times>lapse<times>p_star(pressure b_dot_v_ b_squared(magnetic_field spatial_metric) lorentz_factor)<times>np.identity(shift.size))<line_sep><return>result<block_end><def_stmt>tilde_b_flux tilde_d tilde_tau tilde_s tilde_b tilde_phi lapse shift sqrt_det_spatial_metric spatial_metric inv_spatial_metric pressure spatial_velocity lorentz_factor magnetic_field<block_start>result=np.outer(lapse<times>spatial_velocity-shift tilde_b)<line_sep>result<augadd>lapse<times>inv_spatial_metric<times>tilde_phi<line_sep>result<augsub>lapse<times>np.outer(tilde_b spatial_velocity)<line_sep><return>result<block_end><def_stmt>tilde_phi_flux tilde_d tilde_tau tilde_s tilde_b tilde_phi lapse shift sqrt_det_spatial_metric spatial_metric inv_spatial_metric pressure spatial_velocity lorentz_factor magnetic_field<block_start><return>lapse<times>tilde_b-tilde_phi<times>shift<block_end>
# Generated by Django 2.2 on 2021-04-10 19:50 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('fastrunner' '0017_visit_project') ]<line_sep>operations=[migrations.AddField(model_name='api' name='yapi_catid' field=models.IntegerField(default=0 null=<true> verbose_name='yapi的分组id') ) migrations.AddField(model_name='api' name='yapi_id' field=models.IntegerField(default=0 null=<true> verbose_name='yapi的id') ) migrations.AddField(model_name='api' name='ypai_add_time' field=models.CharField(default='' max_length=10 null=<true> verbose_name='yapi创建时间') ) migrations.AddField(model_name='api' name='ypai_up_time' field=models.CharField(default='' max_length=10 null=<true> verbose_name='yapi更新时间') ) migrations.AddField(model_name='api' name='ypai_username' field=models.CharField(default='' max_length=30 null=<true> verbose_name='yapi的原作者') ) ]<block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> unicode_literals<import_from_stmt>scss Scss<import_stmt>pytest<line_sep># py.test bug: unicode literals not allowed here, so cast to native str type pytestmark=pytest.mark.skipif(str("not config.getoption('include_ruby')"))<line_sep># TODO undupe <def_stmt>assert_rendering input expected **kwargs<block_start>compiler=Scss(scss_opts=dict(compress=<false>) **kwargs)<line_sep>css=compiler.compile(input)<line_sep># TODO chumptastic hack; sass and pyscss have slightly different # "non-compressed" output <import_stmt>re<line_sep>css=re.sub(r'(?m)\n *[}]$' ' }\n' css).rstrip("\n")+"\n"<line_sep>#css = re.sub(r'; [}]', ';\n }', css) #css = re.sub(r'\n *[}]$', ' }', css) <assert_stmt>expected<eq>css<block_end><def_stmt>test_basic <block_start>assert_rendering('''\ .foo {a: b} .bar {@extend .foo} ''' '''\ .foo, .bar { a: b; } ''')<line_sep>assert_rendering('''\ .bar {@extend .foo} .foo {a: b} ''' '''\ .foo, .bar { a: b; } ''')<line_sep>assert_rendering('''\ .foo {a: b} .bar {c: d; @extend .foo} ''' '''\ .foo, .bar { a: b; } .bar { c: d; } ''')<line_sep>assert_rendering('''\ .foo {a: b} .bar {@extend .foo; c: d} ''' '''\ .foo, .bar { a: b; } .bar { c: d; } ''')<block_end><def_stmt>test_multiple_targets <block_start>assert_rendering('''\ .foo {a: b} .bar {@extend .foo} .blip .foo {c: d} ''' '''\ .foo, .bar { a: b; } .blip .foo, .blip .bar { c: d; } ''')<block_end><def_stmt>test_multiple_extendees <block_start>assert_rendering('''\ .foo {a: b} .bar {c: d} .baz {@extend .foo; @extend .bar} ''' '''\ .foo, .baz { a: b; } .bar, .baz { c: d; } ''')<block_end><def_stmt>test_multiple_extends_with_single_extender_and_single_target <block_start>assert_extends('.foo .bar' '.baz {@extend .foo; @extend .bar}' '.foo .bar, .baz .bar, .foo .baz, .baz .baz')<line_sep>assert_extends('.foo.bar' '.baz {@extend .foo; @extend .bar}' '.foo.bar, .baz')<block_end><def_stmt>test_multiple_extends_with_multiple_extenders_and_single_target <block_start>assert_rendering('''\ .foo .bar {a: b} .baz {@extend .foo} .bang {@extend .bar} ''' '''\ .foo .bar, .baz .bar, .foo .bang, .baz .bang { a: b; } ''')<line_sep>assert_rendering('''\ .foo.bar {a: b} .baz {@extend .foo} .bang {@extend .bar} ''' '''\ .foo.bar, .bar.baz, .baz.bang, .foo.bang { a: b; } ''')<block_end><def_stmt>test_chained_extends <block_start>assert_rendering('''\ .foo {a: b} .bar {@extend .foo} .baz {@extend .bar} .bip {@extend .bar} ''' '''\ .foo, .bar, .baz, .bip { a: b; } ''')<block_end><def_stmt>test_dynamic_extendee <block_start>assert_extends('.foo' '.bar {@extend #{".foo"}}' '.foo, .bar')<line_sep>assert_extends('[baz^="blip12px"]' '.bar {@extend [baz^="blip#{12px}"]}' '[baz^="blip12px"], .bar')<block_end><def_stmt>test_nested_target <block_start>assert_extends('.foo .bar' '.baz {@extend .bar}' '.foo .bar, .foo .baz')<block_end><def_stmt>test_target_with_child <block_start>assert_extends('.foo .bar' '.baz {@extend .foo}' '.foo .bar, .baz .bar')<block_end><def_stmt>test_class_unification <block_start>assert_unification('.foo.bar' '.baz {@extend .foo}' '.foo.bar, .bar.baz')<line_sep>assert_unification('.foo.baz' '.baz {@extend .foo}' '.baz')<block_end><def_stmt>test_id_unification <block_start>assert_unification('.foo.bar' '#baz {@extend .foo}' '.foo.bar, .bar#baz')<line_sep>assert_unification('.foo#baz' '#baz {@extend .foo}' '#baz')<line_sep># XXX assert_extend_doesnt_match('#bar', '.foo', :failed_to_unify, 2) do assert_unification('.foo#baz' '#bar {@extend .foo}' '.foo#baz')<block_end><def_stmt>test_universal_unification_with_simple_target <block_start>assert_unification('.foo' '* {@extend .foo}' '.foo, *')<line_sep>assert_unification('.foo' '*|* {@extend .foo}' '.foo, *|*')<line_sep>assert_unification('.foo.bar' '* {@extend .foo}' '.bar')<line_sep>assert_unification('.foo.bar' '*|* {@extend .foo}' '.bar')<line_sep>assert_unification('.foo.bar' 'ns|* {@extend .foo}' '.foo.bar, ns|*.bar')<block_end><def_stmt>test_universal_unification_with_namespaceless_universal_target <block_start>assert_unification('*.foo' '* {@extend .foo}' '*')<line_sep>assert_unification('*.foo' '*|* {@extend .foo}' '*')<line_sep>assert_unification('*|*.foo' '* {@extend .foo}' '*|*.foo, *')<line_sep>assert_unification('*|*.foo' '*|* {@extend .foo}' '*|*')<line_sep>assert_unification('*.foo' 'ns|* {@extend .foo}' '*.foo, ns|*')<line_sep>assert_unification('*|*.foo' 'ns|* {@extend .foo}' '*|*.foo, ns|*')<block_end><def_stmt>test_universal_unification_with_namespaced_universal_target <block_start>assert_unification('ns|*.foo' '* {@extend .foo}' 'ns|*')<line_sep>assert_unification('ns|*.foo' '*|* {@extend .foo}' 'ns|*')<line_sep># XXX assert_extend_doesnt_match('ns2|*', '.foo', :failed_to_unify, 2) do assert_unification('ns1|*.foo' 'ns2|* {@extend .foo}' 'ns1|*.foo')<line_sep>assert_unification('ns|*.foo' 'ns|* {@extend .foo}' 'ns|*')<block_end><def_stmt>test_universal_unification_with_namespaceless_element_target <block_start>assert_unification('a.foo' '* {@extend .foo}' 'a')<line_sep>assert_unification('a.foo' '*|* {@extend .foo}' 'a')<line_sep>assert_unification('*|a.foo' '* {@extend .foo}' '*|a.foo, a')<line_sep>assert_unification('*|a.foo' '*|* {@extend .foo}' '*|a')<line_sep>assert_unification('a.foo' 'ns|* {@extend .foo}' 'a.foo, ns|a')<line_sep>assert_unification('*|a.foo' 'ns|* {@extend .foo}' '*|a.foo, ns|a')<block_end><def_stmt>test_universal_unification_with_namespaced_element_target <block_start>assert_unification('ns|a.foo' '* {@extend .foo}' 'ns|a')<line_sep>assert_unification('ns|a.foo' '*|* {@extend .foo}' 'ns|a')<line_sep># XXX assert_extend_doesnt_match('ns2|*', '.foo', :failed_to_unify, 2) do assert_unification('ns1|a.foo' 'ns2|* {@extend .foo}' 'ns1|a.foo')<line_sep>assert_unification('ns|a.foo' 'ns|* {@extend .foo}' 'ns|a')<block_end><def_stmt>test_element_unification_with_simple_target <block_start>assert_unification('.foo' 'a {@extend .foo}' '.foo, a')<line_sep>assert_unification('.foo.bar' 'a {@extend .foo}' '.foo.bar, a.bar')<line_sep>assert_unification('.foo.bar' '*|a {@extend .foo}' '.foo.bar, *|a.bar')<line_sep>assert_unification('.foo.bar' 'ns|a {@extend .foo}' '.foo.bar, ns|a.bar')<block_end><def_stmt>test_element_unification_with_namespaceless_universal_target <block_start>assert_unification('*.foo' 'a {@extend .foo}' '*.foo, a')<line_sep>assert_unification('*.foo' '*|a {@extend .foo}' '*.foo, a')<line_sep>assert_unification('*|*.foo' 'a {@extend .foo}' '*|*.foo, a')<line_sep>assert_unification('*|*.foo' '*|a {@extend .foo}' '*|*.foo, *|a')<line_sep>assert_unification('*.foo' 'ns|a {@extend .foo}' '*.foo, ns|a')<line_sep>assert_unification('*|*.foo' 'ns|a {@extend .foo}' '*|*.foo, ns|a')<block_end><def_stmt>test_element_unification_with_namespaced_universal_target <block_start>assert_unification('ns|*.foo' 'a {@extend .foo}' 'ns|*.foo, ns|a')<line_sep>assert_unification('ns|*.foo' '*|a {@extend .foo}' 'ns|*.foo, ns|a')<line_sep># XXX assert_extend_doesnt_match('ns2|a', '.foo', :failed_to_unify, 2) do assert_unification('ns1|*.foo' 'ns2|a {@extend .foo}' 'ns1|*.foo')<line_sep>assert_unification('ns|*.foo' 'ns|a {@extend .foo}' 'ns|*.foo, ns|a')<block_end><def_stmt>test_element_unification_with_namespaceless_element_target <block_start>assert_unification('a.foo' 'a {@extend .foo}' 'a')<line_sep>assert_unification('a.foo' '*|a {@extend .foo}' 'a')<line_sep>assert_unification('*|a.foo' 'a {@extend .foo}' '*|a.foo, a')<line_sep>assert_unification('*|a.foo' '*|a {@extend .foo}' '*|a')<line_sep>assert_unification('a.foo' 'ns|a {@extend .foo}' 'a.foo, ns|a')<line_sep>assert_unification('*|a.foo' 'ns|a {@extend .foo}' '*|a.foo, ns|a')<line_sep># XXX assert_extend_doesnt_match('h1', '.foo', :failed_to_unify, 2) do assert_unification('a.foo' 'h1 {@extend .foo}' 'a.foo')<block_end><def_stmt>test_element_unification_with_namespaced_element_target <block_start>assert_unification('ns|a.foo' 'a {@extend .foo}' 'ns|a')<line_sep>assert_unification('ns|a.foo' '*|a {@extend .foo}' 'ns|a')<line_sep># XXX assert_extend_doesnt_match('ns2|a', '.foo', :failed_to_unify, 2) do assert_unification('ns1|a.foo' 'ns2|a {@extend .foo}' 'ns1|a.foo')<line_sep>assert_unification('ns|a.foo' 'ns|a {@extend .foo}' 'ns|a')<block_end><def_stmt>test_attribute_unification <block_start>assert_unification('[foo=bar].baz' '[foo=baz] {@extend .baz}' '[foo=bar].baz, [foo=bar][foo=baz]')<line_sep>assert_unification('[foo=bar].baz' '[foo^=bar] {@extend .baz}' '[foo=bar].baz, [foo=bar][foo^=bar]')<line_sep>assert_unification('[foo=bar].baz' '[foot=bar] {@extend .baz}' '[foo=bar].baz, [foo=bar][foot=bar]')<line_sep>assert_unification('[foo=bar].baz' '[ns|foo=bar] {@extend .baz}' '[foo=bar].baz, [foo=bar][ns|foo=bar]')<line_sep>assert_unification('%-a [foo=bar].bar' '[foo=bar] {@extend .bar}' '-a [foo=bar]')<block_end><def_stmt>test_pseudo_unification <block_start>assert_unification(':foo.baz' ':foo(2n+1) {@extend .baz}' ':foo.baz, :foo:foo(2n+1)')<line_sep>assert_unification(':foo.baz' '::foo {@extend .baz}' ':foo.baz, :foo::foo')<line_sep># XXX assert_extend_doesnt_match('::bar', '.baz', :failed_to_unify, 2) do assert_unification('::foo.baz' '::bar {@extend .baz}' '::foo.baz')<line_sep># XXX assert_extend_doesnt_match('::foo(2n+1)', '.baz', :failed_to_unify, 2) do assert_unification('::foo.baz' '::foo(2n+1) {@extend .baz}' '::foo.baz')<line_sep>assert_unification('::foo.baz' '::foo {@extend .baz}' '::foo')<line_sep>assert_unification('::foo(2n+1).baz' '::foo(2n+1) {@extend .baz}' '::foo(2n+1)')<line_sep>assert_unification(':foo.baz' ':bar {@extend .baz}' ':foo.baz, :foo:bar')<line_sep>assert_unification('.baz:foo' ':after {@extend .baz}' '.baz:foo, :foo:after')<line_sep>assert_unification('.baz:after' ':foo {@extend .baz}' '.baz:after, :foo:after')<line_sep>assert_unification(':foo.baz' ':foo {@extend .baz}' ':foo')<block_end><def_stmt>test_pseudoelement_remains_at_end_of_selector <block_start>assert_extends('.foo::bar' '.baz {@extend .foo}' '.foo::bar, .baz::bar')<line_sep>assert_extends('a.foo::bar' '.baz {@extend .foo}' 'a.foo::bar, a.baz::bar')<block_end><def_stmt>test_pseudoclass_remains_at_end_of_selector <block_start>assert_extends('.foo:bar' '.baz {@extend .foo}' '.foo:bar, .baz:bar')<line_sep>assert_extends('a.foo:bar' '.baz {@extend .foo}' 'a.foo:bar, a.baz:bar')<block_end><def_stmt>test_not_remains_at_end_of_selector <block_start>assert_extends('.foo:not(.bar)' '.baz {@extend .foo}' '.foo:not(.bar), .baz:not(.bar)')<block_end><def_stmt>test_pseudoelement_goes_lefter_than_pseudoclass <block_start>assert_extends('.foo::bar' '.baz:bang {@extend .foo}' '.foo::bar, .baz:bang::bar')<line_sep>assert_extends('.foo:bar' '.baz::bang {@extend .foo}' '.foo:bar, .baz:bar::bang')<block_end><def_stmt>test_pseudoelement_goes_lefter_than_not <block_start>assert_extends('.foo::bar' '.baz:not(.bang) {@extend .foo}' '.foo::bar, .baz:not(.bang)::bar')<line_sep>assert_extends('.foo:not(.bang)' '.baz::bar {@extend .foo}' '.foo:not(.bang), .baz:not(.bang)::bar')<block_end><def_stmt>test_negation_unification <block_start>assert_unification(':not(.foo).baz' ':not(.bar) {@extend .baz}' ':not(.foo).baz, :not(.foo):not(.bar)')<line_sep>assert_unification(':not(.foo).baz' ':not(.foo) {@extend .baz}' ':not(.foo)')<line_sep>assert_unification(':not([a=b]).baz' ':not([a = b]) {@extend .baz}' ':not([a=b])')<block_end><def_stmt>test_comma_extendee <block_start>assert_rendering('''\ .foo {a: b} .bar {c: d} .baz {@extend .foo, .bar} ''' '''\ .foo, .baz { a: b; } .bar, .baz { c: d; } ''')<block_end><def_stmt>test_redundant_selector_elimination <block_start>assert_rendering('''\ .foo.bar {a: b} .x {@extend .foo, .bar} .y {@extend .foo, .bar} ''' '''\ .foo.bar, .x, .y { a: b; } ''')<block_end>## Long Extendees <def_stmt>test_long_extendee <block_start>assert_extends('.foo.bar' '.baz {@extend .foo.bar}' '.foo.bar, .baz')<block_end><def_stmt>test_long_extendee_requires_all_selectors # XXX assert_extend_doesnt_match('.baz', '.foo.bar', :not_found, 2) do <block_start>assert_extends('.foo' '.baz {@extend .foo.bar}' '.foo')<block_end><def_stmt>test_long_extendee_matches_supersets <block_start>assert_extends('.foo.bar.bap' '.baz {@extend .foo.bar}' '.foo.bar.bap, .bap.baz')<block_end><def_stmt>test_long_extendee_runs_unification <block_start>assert_extends('ns|*.foo.bar' 'a.baz {@extend .foo.bar}' 'ns|*.foo.bar, ns|a.baz')<block_end>## Long Extenders <def_stmt>test_long_extender <block_start>assert_extends('.foo.bar' '.baz.bang {@extend .foo}' '.foo.bar, .bar.baz.bang')<block_end><def_stmt>test_long_extender_runs_unification <block_start>assert_extends('ns|*.foo.bar' 'a.baz {@extend .foo}' 'ns|*.foo.bar, ns|a.bar.baz')<block_end><def_stmt>test_long_extender_aborts_unification # XXX assert_extend_doesnt_match('h1.baz', '.foo', :failed_to_unify, 2) do <block_start>assert_extends('a.foo#bar' 'h1.baz {@extend .foo}' 'a.foo#bar')<line_sep># XXX assert_extend_doesnt_match('.bang#baz', '.foo', :failed_to_unify, 2) do assert_extends('a.foo#bar' '.bang#baz {@extend .foo}' 'a.foo#bar')<block_end>## Nested Extenders <def_stmt>test_nested_extender <block_start>assert_extends('.foo' 'foo bar {@extend .foo}' '.foo, foo bar')<block_end><def_stmt>test_nested_extender_runs_unification <block_start>assert_extends('.foo.bar' 'foo bar {@extend .foo}' '.foo.bar, foo bar.bar')<block_end><def_stmt>test_nested_extender_aborts_unification # XXX assert_extend_doesnt_match('foo bar', '.foo', :failed_to_unify, 2) do <block_start>assert_extends('baz.foo' 'foo bar {@extend .foo}' 'baz.foo')<block_end><def_stmt>test_nested_extender_alternates_parents <block_start>assert_extends('.baz .bip .foo' 'foo .grank bar {@extend .foo}' '.baz .bip .foo, .baz .bip foo .grank bar, foo .grank .baz .bip bar')<block_end><def_stmt>test_nested_extender_unifies_identical_parents <block_start>assert_extends('.baz .bip .foo' '.baz .bip bar {@extend .foo}' '.baz .bip .foo, .baz .bip bar')<block_end><def_stmt>test_nested_extender_unifies_common_substring <block_start>assert_extends('.baz .bip .bap .bink .foo' '.brat .bip .bap bar {@extend .foo}' '.baz .bip .bap .bink .foo, .baz .brat .bip .bap .bink bar, .brat .baz .bip .bap .bink bar')<block_end><def_stmt>test_nested_extender_unifies_common_subseq <block_start>assert_extends('.a .x .b .y .foo' '.a .n .b .m bar {@extend .foo}' '.a .x .b .y .foo, .a .x .n .b .y .m bar, .a .n .x .b .y .m bar, .a .x .n .b .m .y bar, .a .n .x .b .m .y bar')<block_end><def_stmt>test_nested_extender_chooses_first_subseq <block_start>assert_extends('.a .b .c .d .foo' '.c .d .a .b .bar {@extend .foo}' '.a .b .c .d .foo, .a .b .c .d .a .b .bar')<block_end><def_stmt>test_nested_extender_counts_extended_subselectors <block_start>assert_extends('.a .bip.bop .foo' '.b .bip .bar {@extend .foo}' '.a .bip.bop .foo, .a .b .bip.bop .bar, .b .a .bip.bop .bar')<block_end><def_stmt>test_nested_extender_counts_extended_superselectors <block_start>assert_extends('.a .bip .foo' '.b .bip.bop .bar {@extend .foo}' '.a .bip .foo, .a .b .bip.bop .bar, .b .a .bip.bop .bar')<block_end><def_stmt>test_nested_extender_with_child_selector <block_start>assert_extends('.baz .foo' 'foo > bar {@extend .foo}' '.baz .foo, .baz foo > bar')<block_end><def_stmt>test_nested_extender_finds_common_selectors_around_child_selector <block_start>assert_extends('a > b c .c1' 'a c .c2 {@extend .c1}' 'a > b c .c1, a > b c .c2')<line_sep>assert_extends('a > b c .c1' 'b c .c2 {@extend .c1}' 'a > b c .c1, a > b c .c2')<block_end><def_stmt>test_nested_extender_doesnt_find_common_selectors_around_adjacent_sibling_selector <block_start>assert_extends('a + b c .c1' 'a c .c2 {@extend .c1}' 'a + b c .c1, a + b a c .c2, a a + b c .c2')<line_sep>assert_extends('a + b c .c1' 'a b .c2 {@extend .c1}' 'a + b c .c1, a a + b c .c2')<line_sep>assert_extends('a + b c .c1' 'b c .c2 {@extend .c1}' 'a + b c .c1, a + b c .c2')<block_end><def_stmt>test_nested_extender_doesnt_find_common_selectors_around_sibling_selector <block_start>assert_extends('a ~ b c .c1' 'a c .c2 {@extend .c1}' 'a ~ b c .c1, a ~ b a c .c2, a a ~ b c .c2')<line_sep>assert_extends('a ~ b c .c1' 'a b .c2 {@extend .c1}' 'a ~ b c .c1, a a ~ b c .c2')<line_sep>assert_extends('a ~ b c .c1' 'b c .c2 {@extend .c1}' 'a ~ b c .c1, a ~ b c .c2')<block_end><def_stmt>test_nested_extender_doesnt_find_common_selectors_around_reference_selector <block_start>assert_extends('a /for/ b c .c1' 'a c .c2 {@extend .c1}' 'a /for/ b c .c1, a /for/ b a c .c2, a a /for/ b c .c2')<line_sep>assert_extends('a /for/ b c .c1' 'a b .c2 {@extend .c1}' 'a /for/ b c .c1, a a /for/ b c .c2')<line_sep>assert_extends('a /for/ b c .c1' 'b c .c2 {@extend .c1}' 'a /for/ b c .c1, a /for/ b c .c2')<block_end><def_stmt>test_nested_extender_with_early_child_selectors_doesnt_subseq_them <block_start>assert_extends('.bip > .bap .foo' '.grip > .bap .bar {@extend .foo}' '.bip > .bap .foo, .bip > .bap .grip > .bap .bar, .grip > .bap .bip > .bap .bar')<line_sep>assert_extends('.bap > .bip .foo' '.bap > .grip .bar {@extend .foo}' '.bap > .bip .foo, .bap > .bip .bap > .grip .bar, .bap > .grip .bap > .bip .bar')<block_end><def_stmt>test_nested_extender_with_child_selector_unifies <block_start>assert_extends('.baz.foo' 'foo > bar {@extend .foo}' '.baz.foo, foo > bar.baz')<line_sep>assert_rendering('''\ .baz > { .foo {a: b} .bar {@extend .foo} } ''' '''\ .baz > .foo, .baz > .bar { a: b; } ''')<line_sep>assert_rendering('''\ .foo { .bar {a: b} > .baz {@extend .bar} } ''' '''\ .foo .bar, .foo > .baz { a: b; } ''')<block_end><def_stmt>test_nested_extender_with_early_child_selectors_doesnt_subseq_them <block_start>assert_rendering('''\ .foo { .bar {a: b} .bip > .baz {@extend .bar} } ''' '''\ .foo .bar, .foo .bip > .baz { a: b; } ''')<line_sep>assert_rendering('''\ .foo { .bip .bar {a: b} > .baz {@extend .bar} } ''' '''\ .foo .bip .bar, .foo .bip .foo > .baz { a: b; } ''')<line_sep>assert_extends('.foo > .bar' '.bip + .baz {@extend .bar}' '.foo > .bar, .foo > .bip + .baz')<line_sep>assert_extends('.foo + .bar' '.bip > .baz {@extend .bar}' '.foo + .bar, .bip > .foo + .baz')<line_sep>assert_extends('.foo > .bar' '.bip > .baz {@extend .bar}' '.foo > .bar, .bip.foo > .baz')<block_end><def_stmt>test_nested_extender_with_trailing_child_selector <block_start><with_stmt>pytest.raises(SyntaxError)# "bar > can't extend: invalid selector" <block_start>render("bar > {@extend .baz}")<block_end><block_end><def_stmt>test_nested_extender_with_sibling_selector <block_start>assert_extends('.baz .foo' 'foo + bar {@extend .foo}' '.baz .foo, .baz foo + bar')<block_end><def_stmt>test_nested_extender_with_hacky_selector <block_start>assert_extends('.baz .foo' 'foo + > > + bar {@extend .foo}' '.baz .foo, .baz foo + > > + bar, foo .baz + > > + bar')<line_sep>assert_extends('.baz .foo' '> > bar {@extend .foo}' '.baz .foo, > > .baz bar')<block_end><def_stmt>test_nested_extender_merges_with_same_selector <block_start>assert_rendering('''\ .foo { .bar {a: b} .baz {@extend .bar} } ''' '''\ .foo .bar, .foo .baz { a: b; } ''')<block_end><def_stmt>test_nested_extender_with_child_selector_merges_with_same_selector <block_start>assert_extends('.foo > .bar .baz' '.foo > .bar .bang {@extend .baz}' '.foo > .bar .baz, .foo > .bar .bang')<block_end># Combinator Unification <def_stmt>test_combinator_unification_for_hacky_combinators <block_start>assert_extends('.a > + x' '.b y {@extend x}' '.a > + x, .a .b > + y, .b .a > + y')<line_sep>assert_extends('.a x' '.b > + y {@extend x}' '.a x, .a .b > + y, .b .a > + y')<line_sep>assert_extends('.a > + x' '.b > + y {@extend x}' '.a > + x, .a .b > + y, .b .a > + y')<line_sep>assert_extends('.a ~ > + x' '.b > + y {@extend x}' '.a ~ > + x, .a .b ~ > + y, .b .a ~ > + y')<line_sep>assert_extends('.a + > x' '.b > + y {@extend x}' '.a + > x')<line_sep>assert_extends('.a + > x' '.b > + y {@extend x}' '.a + > x')<line_sep>assert_extends('.a ~ > + .b > x' '.c > + .d > y {@extend x}' '.a ~ > + .b > x, .a .c ~ > + .d.b > y, .c .a ~ > + .d.b > y')<block_end><def_stmt>test_combinator_unification_double_tilde <block_start>assert_extends('.a.b ~ x' '.a ~ y {@extend x}' '.a.b ~ x, .a.b ~ y')<line_sep>assert_extends('.a ~ x' '.a.b ~ y {@extend x}' '.a ~ x, .a.b ~ y')<line_sep>assert_extends('.a ~ x' '.b ~ y {@extend x}' '.a ~ x, .a ~ .b ~ y, .b ~ .a ~ y, .b.a ~ y')<line_sep>assert_extends('a.a ~ x' 'b.b ~ y {@extend x}' 'a.a ~ x, a.a ~ b.b ~ y, b.b ~ a.a ~ y')<block_end><def_stmt>test_combinator_unification_tilde_plus <block_start>assert_extends('.a.b + x' '.a ~ y {@extend x}' '.a.b + x, .a.b + y')<line_sep>assert_extends('.a + x' '.a.b ~ y {@extend x}' '.a + x, .a.b ~ .a + y, .a.b + y')<line_sep>assert_extends('.a + x' '.b ~ y {@extend x}' '.a + x, .b ~ .a + y, .b.a + y')<line_sep>assert_extends('a.a + x' 'b.b ~ y {@extend x}' 'a.a + x, b.b ~ a.a + y')<line_sep>assert_extends('.a.b ~ x' '.a + y {@extend x}' '.a.b ~ x, .a.b ~ .a + y, .a.b + y')<line_sep>assert_extends('.a ~ x' '.a.b + y {@extend x}' '.a ~ x, .a.b + y')<line_sep>assert_extends('.a ~ x' '.b + y {@extend x}' '.a ~ x, .a ~ .b + y, .a.b + y')<line_sep>assert_extends('a.a ~ x' 'b.b + y {@extend x}' 'a.a ~ x, a.a ~ b.b + y')<block_end><def_stmt>test_combinator_unification_angle_sibling <block_start>assert_extends('.a > x' '.b ~ y {@extend x}' '.a > x, .a > .b ~ y')<line_sep>assert_extends('.a > x' '.b + y {@extend x}' '.a > x, .a > .b + y')<line_sep>assert_extends('.a ~ x' '.b > y {@extend x}' '.a ~ x, .b > .a ~ y')<line_sep>assert_extends('.a + x' '.b > y {@extend x}' '.a + x, .b > .a + y')<block_end><def_stmt>test_combinator_unification_double_angle <block_start>assert_extends('.a.b > x' '.b > y {@extend x}' '.a.b > x, .b.a > y')<line_sep>assert_extends('.a > x' '.a.b > y {@extend x}' '.a > x, .a.b > y')<line_sep>assert_extends('.a > x' '.b > y {@extend x}' '.a > x, .b.a > y')<line_sep>assert_extends('a.a > x' 'b.b > y {@extend x}' 'a.a > x')<block_end><def_stmt>test_combinator_unification_double_plus <block_start>assert_extends('.a.b + x' '.b + y {@extend x}' '.a.b + x, .b.a + y')<line_sep>assert_extends('.a + x' '.a.b + y {@extend x}' '.a + x, .a.b + y')<line_sep>assert_extends('.a + x' '.b + y {@extend x}' '.a + x, .b.a + y')<line_sep>assert_extends('a.a + x' 'b.b + y {@extend x}' 'a.a + x')<block_end><def_stmt>test_combinator_unification_angle_space <block_start>assert_extends('.a.b > x' '.a y {@extend x}' '.a.b > x, .a.b > y')<line_sep>assert_extends('.a > x' '.a.b y {@extend x}' '.a > x, .a.b .a > y')<line_sep>assert_extends('.a > x' '.b y {@extend x}' '.a > x, .b .a > y')<line_sep>assert_extends('.a.b x' '.a > y {@extend x}' '.a.b x, .a.b .a > y')<line_sep>assert_extends('.a x' '.a.b > y {@extend x}' '.a x, .a.b > y')<line_sep>assert_extends('.a x' '.b > y {@extend x}' '.a x, .a .b > y')<block_end><def_stmt>test_combinator_unification_plus_space <block_start>assert_extends('.a.b + x' '.a y {@extend x}' '.a.b + x, .a .a.b + y')<line_sep>assert_extends('.a + x' '.a.b y {@extend x}' '.a + x, .a.b .a + y')<line_sep>assert_extends('.a + x' '.b y {@extend x}' '.a + x, .b .a + y')<line_sep>assert_extends('.a.b x' '.a + y {@extend x}' '.a.b x, .a.b .a + y')<line_sep>assert_extends('.a x' '.a.b + y {@extend x}' '.a x, .a .a.b + y')<line_sep>assert_extends('.a x' '.b + y {@extend x}' '.a x, .a .b + y')<block_end><def_stmt>test_combinator_unification_nested <block_start>assert_extends('.a > .b + x' '.c > .d + y {@extend x}' '.a > .b + x, .c.a > .d.b + y')<line_sep>assert_extends('.a > .b + x' '.c > y {@extend x}' '.a > .b + x, .c.a > .b + y')<block_end><def_stmt>test_combinator_unification_with_newlines <block_start>assert_rendering('''\ .a > .b + x {a: b} .c > .d + y {@extend x} ''' '''\ .a > .b + x, .c.a > .d.b + y { a: b; } ''')<block_end># Loops <def_stmt>test_extend_self_loop <block_start>assert_rendering('''\ .foo {a: b; @extend .foo} ''' '''\ .foo { a: b; } ''')<block_end><def_stmt>test_basic_extend_loop <block_start>assert_rendering('''\ .foo {a: b; @extend .bar} .bar {c: d; @extend .foo} ''' '''\ .bar, .foo { a: b; } .foo, .bar { c: d; } ''')<block_end><def_stmt>test_three_level_extend_loop <block_start>assert_rendering('''\ .foo {a: b; @extend .bar} .bar {c: d; @extend .baz} .baz {e: f; @extend .foo} ''' '''\ .baz, .bar, .foo { a: b; } .foo, .baz, .bar { c: d; } .bar, .foo, .baz { e: f; } ''')<block_end><def_stmt>test_nested_extend_loop <block_start>assert_rendering('''\ .bar { a: b; .foo {c: d; @extend .bar} } ''' '''\ .bar, .bar .foo { a: b; } .bar .foo { c: d; } ''')<block_end><def_stmt>test_multiple_extender_merges_with_superset_selector <block_start>assert_rendering('''\ .foo {@extend .bar; @extend .baz} a.bar.baz {a: b} ''' '''\ a.bar.baz, a.foo { a: b; } ''')<block_end><def_stmt>test_control_flow_if <block_start>assert_rendering('''\ .true { color: green; } .false { color: red; } .also-true { @if true { @extend .true; } @else { @extend .false; } } .also-false { @if false { @extend .true; } @else { @extend .false; } } ''' '''\ .true, .also-true { color: green; } .false, .also-false { color: red; } ''')<block_end><def_stmt>test_control_flow_for <block_start>assert_rendering('''\ .base-0 { color: green; } .base-1 { display: block; } .base-2 { border: 1px solid blue; } .added { @for $i from 0 to 3 { @extend .base-\#{$i}; } } ''' '''\ .base-0, .added { color: green; } .base-1, .added { display: block; } .base-2, .added { border: 1px solid blue; } ''')<block_end><def_stmt>test_control_flow_while <block_start>assert_rendering('''\ .base-0 { color: green; } .base-1 { display: block; } .base-2 { border: 1px solid blue; } .added { $i : 0; @while $i < 3 { @extend .base-\#{$i}; $i : $i + 1; } } ''' '''\ .base-0, .added { color: green; } .base-1, .added { display: block; } .base-2, .added { border: 1px solid blue; } ''')<block_end><def_stmt>test_basic_placeholder_selector <block_start>assert_extends('%foo' '.bar {@extend %foo}' '.bar')<block_end><def_stmt>test_unused_placeholder_selector <block_start>assert_rendering('''\ %foo {color: blue} %bar {color: red} .baz {@extend %foo} ''' '''\ .baz { color: blue; } ''')<block_end><def_stmt>test_placeholder_descendant_selector <block_start>assert_extends('#context %foo a' '.bar {@extend %foo}' '#context .bar a')<block_end><def_stmt>test_semi_placeholder_selector <block_start>assert_rendering('''\ #context %foo, .bar .baz {color: blue} ''' '''\ .bar .baz { color: blue; } ''')<block_end><def_stmt>test_placeholder_selector_with_multiple_extenders <block_start>assert_rendering('''\ %foo {color: blue} .bar {@extend %foo} .baz {@extend %foo} ''' '''\ .bar, .baz { color: blue; } ''')<block_end><def_stmt>test_placeholder_selector_as_modifier # XXX assert_extend_doesnt_match('div', '%foo', :failed_to_unify, 3) do <block_start>assert_rendering('''\ a%foo.baz {color: blue} .bar {@extend %foo} div {@extend %foo} ''' '''\ a.baz.bar { color: blue; } ''')<block_end><def_stmt>test_placeholder_interpolation <block_start>assert_rendering('''\ $foo: foo; %\#{$foo} {color: blue} .bar {@extend %foo} ''' '''\ .bar { color: blue; } ''')<block_end><def_stmt>test_media_in_placeholder_selector <block_start>assert_rendering('''\ %foo {bar {@media screen {a: b}}} .baz {c: d} ''' '''\ .baz { c: d; } ''')<block_end>""" def test_extend_out_of_media(): assert_warning(<<WARN) {assert_equal(<<CSS, render(<<SCSS))} DEPRECATION WARNING on line 3 of test_extend_out_of_media_inline.scss: @extending an outer selector from within @media is deprecated. You may only @extend selectors within the same directive. This will be an error in Sass 3.3. It can only work once @extend is supported natively in the browser. WARN .foo { a: b; } CSS .foo {a: b} @media screen { .bar {@extend .foo} } SCSS def test_extend_out_of_unknown_directive(): assert_warning(<<WARN) {assert_equal(<<CSS, render(<<SCSS))} DEPRECATION WARNING on line 3 of test_extend_out_of_unknown_directive_inline.scss: @extending an outer selector from within @flooblehoof is deprecated. You may only @extend selectors within the same directive. This will be an error in Sass 3.3. It can only work once @extend is supported natively in the browser. WARN .foo { a: b; } @flooblehoof {} CSS .foo {a: b} @flooblehoof { .bar {@extend .foo} } SCSS def test_extend_out_of_nested_directives(): assert_warning(<<WARN) {assert_equal(<<CSS, render(<<SCSS))} DEPRECATION WARNING on line 4 of test_extend_out_of_nested_directives_inline.scss: @extending an outer selector from within @flooblehoof is deprecated. You may only @extend selectors within the same directive. This will be an error in Sass 3.3. It can only work once @extend is supported natively in the browser. WARN @media screen { .foo { a: b; } @flooblehoof {} } CSS @media screen { .foo {a: b} @flooblehoof { .bar {@extend .foo} } } SCSS """<def_stmt>test_extend_within_media <block_start>assert_rendering('''\ @media screen { .foo {a: b} .bar {@extend .foo} } ''' '''\ @media screen { .foo, .bar { a: b; } } ''')<block_end><def_stmt>test_extend_within_unknown_directive <block_start>assert_rendering('''\ @flooblehoof { .foo {a: b} .bar {@extend .foo} } ''' '''\ @flooblehoof { .foo, .bar { a: b; } } ''')<block_end><def_stmt>test_extend_within_nested_directives <block_start>assert_rendering('''\ @media screen { @flooblehoof { .foo {a: b} .bar {@extend .foo} } } ''' '''\ @media screen { @flooblehoof { .foo, .bar { a: b; } } } ''')<block_end><def_stmt>test_extend_within_disparate_media <block_start>assert_rendering('''\ @media screen {.foo {a: b}} @media screen {.bar {@extend .foo}} ''' '''\ @media screen { .foo, .bar { a: b; } } ''')<block_end><def_stmt>test_extend_within_disparate_unknown_directive <block_start>assert_rendering('''\ @flooblehoof {.foo {a: b}} @flooblehoof {.bar {@extend .foo}} ''' '''\ @flooblehoof { .foo, .bar { a: b; } } @flooblehoof {} ''')<block_end><def_stmt>test_extend_within_disparate_nested_directives <block_start>assert_rendering('''\ @media screen {@flooblehoof {.foo {a: b}}} @media screen {@flooblehoof {.bar {@extend .foo}}} ''' '''\ @media screen { @flooblehoof { .foo, .bar { a: b; } } } @media screen { @flooblehoof {} } ''')<block_end>""" def test_extend_within_and_without_media(): assert_warning(<<WARN) {assert_equal(<<CSS, render(<<SCSS))} DEPRECATION WARNING on line 4 of test_extend_within_and_without_media_inline.scss: @extending an outer selector from within @media is deprecated. You may only @extend selectors within the same directive. This will be an error in Sass 3.3. It can only work once @extend is supported natively in the browser. WARN .foo { a: b; } @media screen { .foo, .bar { c: d; } } CSS .foo {a: b} @media screen { .foo {c: d} .bar {@extend .foo} } SCSS def test_extend_within_and_without_unknown_directive(): assert_warning(<<WARN) {assert_equal(<<CSS, render(<<SCSS))} DEPRECATION WARNING on line 4 of test_extend_within_and_without_unknown_directive_inline.scss: @extending an outer selector from within @flooblehoof is deprecated. You may only @extend selectors within the same directive. This will be an error in Sass 3.3. It can only work once @extend is supported natively in the browser. WARN .foo { a: b; } @flooblehoof { .foo, .bar { c: d; } } CSS .foo {a: b} @flooblehoof { .foo {c: d} .bar {@extend .foo} } SCSS def test_extend_within_and_without_nested_directives(): assert_warning(<<WARN) {assert_equal(<<CSS, render(<<SCSS))} DEPRECATION WARNING on line 5 of test_extend_within_and_without_nested_directives_inline.scss: @extending an outer selector from within @flooblehoof is deprecated. You may only @extend selectors within the same directive. This will be an error in Sass 3.3. It can only work once @extend is supported natively in the browser. WARN @media screen { .foo { a: b; } @flooblehoof { .foo, .bar { c: d; } } } CSS @media screen { .foo {a: b} @flooblehoof { .foo {c: d} .bar {@extend .foo} } } SCSS """<def_stmt>test_extend_with_subject_transfers_subject_to_extender <block_start>assert_rendering('''\ foo bar! baz {a: b} .bip .bap {@extend bar} ''' '''\ foo bar! baz, foo .bip .bap! baz, .bip foo .bap! baz { a: b; } ''')<line_sep>assert_rendering('''\ foo.x bar.y! baz.z {a: b} .bip .bap {@extend .y} ''' '''\ foo.x bar.y! baz.z, foo.x .bip bar.bap! baz.z, .bip foo.x bar.bap! baz.z { a: b; } ''')<block_end><def_stmt>test_extend_with_subject_retains_subject_on_target <block_start>assert_rendering('''\ .foo! .bar {a: b} .bip .bap {@extend .bar} ''' '''\ .foo! .bar, .foo! .bip .bap, .bip .foo! .bap { a: b; } ''')<block_end><def_stmt>test_extend_with_subject_transfers_subject_to_target <block_start>assert_rendering('''\ a.foo .bar {a: b} .bip .bap! {@extend .foo} ''' '''\ a.foo .bar, .bip a.bap! .bar { a: b; } ''')<block_end><def_stmt>test_extend_with_subject_retains_subject_on_extender <block_start>assert_rendering('''\ .foo .bar {a: b} .bip! .bap {@extend .bar} ''' '''\ .foo .bar, .foo .bip! .bap, .bip! .foo .bap { a: b; } ''')<block_end><def_stmt>test_extend_with_subject_fails_with_conflicting_subject <block_start>assert_rendering('''\ x! .bar {a: b} y! .bap {@extend .bar} ''' '''\ x! .bar { a: b; } ''')<block_end>""" def test_extend_warns_when_extendee_doesnt_exist(): assert_warning(<<WARN) {assert_equal("", render(<<SCSS))} WARNING on line 1 of test_extend_warns_when_extendee_doesnt_exist_inline.scss: ".foo" failed to @extend ".bar". The selector ".bar" was not found. This will be an error in future releases of Sass. Use "@extend .bar !optional" if the extend should be able to fail. WARN .foo {@extend .bar} SCSS def test_extend_warns_when_extension_fails(): assert_warning(<<WARN) {assert_equal(<<CSS, render(<<SCSS))} WARNING on line 2 of test_extend_warns_when_extension_fails_inline.scss: "b.foo" failed to @extend ".bar". No selectors matching ".bar" could be unified with "b.foo". This will be an error in future releases of Sass. Use "@extend .bar !optional" if the extend should be able to fail. WARN a.bar { a: b; } CSS a.bar {a: b} b.foo {@extend .bar} SCSS def test_extend_does_not_warn_when_one_extension_fails_but_others_dont(): assert_no_warning {assert_equal(<<CSS, render(<<SCSS))} a.bar { a: b; } .bar, b.foo { c: d; } CSS a.bar {a: b} .bar {c: d} b.foo {@extend .bar} SCSS def test_extend_does_not_warn_when_one_extension_fails_but_others_dont(): assert_no_warning {assert_equal(<<CSS, render(<<SCSS))} a.bar { a: b; } .bar, b.foo { c: d; } CSS a.bar {a: b} .bar {c: d} b.foo {@extend .bar} SCSS def test_optional_extend_does_not_warn_when_extendee_doesnt_exist(): assert_no_warning {assert_equal("", render(<<SCSS))} .foo {@extend .bar !optional} SCSS def test_optional_extend_does_not_warn_when_extension_fails(): assert_no_warning {assert_equal(<<CSS, render(<<SCSS))} a.bar { a: b; } CSS a.bar {a: b} b.foo {@extend .bar !optional} SCSS """<line_sep>### Regression Tests <def_stmt>test_nested_extend_specificity <block_start>assert_rendering('''\ %foo {a: b} a { :b {@extend %foo} :b:c {@extend %foo} } ''' '''\ a :b, a :b:c { a: b; } ''')<block_end><def_stmt>test_nested_double_extend_optimization <block_start>assert_rendering('''\ %foo %bar { a: b; } .parent1 { @extend %foo; .child { @extend %bar; } } .parent2 { @extend %foo; } ''' '''\ .parent1 .child { a: b; } ''')<block_end><def_stmt>test_extend_in_double_nested_media_query <block_start>assert_rendering('''\ @media all { @media (orientation: landscape) { %foo {color: blue} .bar {@extend %foo} } } ''' '''\ @media all and (orientation: landscape) { .bar { color: blue; } } ''')<block_end>""" def test_partially_failed_extend(): assert_no_warning {assert_equal(<<CSS, render(<<SCSS))} .rc, test { color: white; } .prices span.pill span.rc { color: red; } CSS test { @extend .rc; } .rc {color: white;} .prices span.pill span.rc {color: red;} SCSS """<def_stmt>test_newline_near_combinator <block_start>assert_rendering('''\ .a + .b x {a: b} .c y {@extend x} ''' '''\ .a + .b x, .a + .b .c y, .c .a + .b y { a: b; } ''')<block_end><def_stmt>test_duplicated_selector_with_newlines <block_start>assert_rendering('''\ .example-1-1, .example-1-2, .example-1-3 { a: b; } .my-page-1 .my-module-1-1 {@extend .example-1-2} ''' '''\ .example-1-1, .example-1-2, .my-page-1 .my-module-1-1, .example-1-3 { a: b; } ''')<block_end><def_stmt>test_nested_selector_with_child_selector_hack_extendee <block_start>assert_extends('> .foo' 'foo bar {@extend .foo}' '> .foo, > foo bar')<block_end><def_stmt>test_nested_selector_with_child_selector_hack_extender <block_start>assert_extends('.foo .bar' '> foo bar {@extend .bar}' '.foo .bar, > .foo foo bar, > foo .foo bar')<block_end><def_stmt>test_nested_selector_with_child_selector_hack_extender_and_extendee <block_start>assert_extends('> .foo' '> foo bar {@extend .foo}' '> .foo, > foo bar')<block_end><def_stmt>test_nested_selector_with_child_selector_hack_extender_and_sibling_selector_extendee <block_start>assert_extends('~ .foo' '> foo bar {@extend .foo}' '~ .foo')<block_end><def_stmt>test_nested_selector_with_child_selector_hack_extender_and_extendee_and_newline <block_start>assert_rendering('''\ > .foo {a: b} flip, > foo bar {@extend .foo} ''' '''\ > .foo, > flip, > foo bar { a: b; } ''')<block_end><def_stmt>test_extended_parent_and_child_redundancy_elimination <block_start>assert_rendering('''\ a { b {a: b} c {@extend b} } d {@extend a} ''' '''\ a b, d b, a c, d c { a: b; } ''')<block_end><def_stmt>test_extend_redundancy_elimination_when_it_would_reduce_specificity <block_start>assert_extends('a' 'a.foo {@extend a}' 'a, a.foo')<block_end><def_stmt>test_extend_redundancy_elimination_when_it_would_preserve_specificity <block_start>assert_extends('.bar a' 'a.foo {@extend a}' '.bar a')<block_end><def_stmt>test_extend_redundancy_elimination_never_eliminates_base_selector <block_start>assert_extends('a.foo' '.foo {@extend a}' 'a.foo, .foo')<block_end><def_stmt>test_extend_cross_branch_redundancy_elimination <block_start>assert_rendering('''\ %x c %y {a: b} a, b {@extend %x} a d {@extend %y} ''' '''\ a c d, b c a d { a: b; } ''')<line_sep>assert_rendering('''\ e %z {a: b} %x c %y {@extend %z} a, b {@extend %x} a d {@extend %y} ''' '''\ e a c d, a c e d, e b c a d, b c a e d { a: b; } ''')<block_end>""" def assert_extend_doesnt_match(extender, target, reason, line, syntax = :scss): warn = "\"#{extender}\" failed to @extend \"#{target}\"." reason = if reason == :not_found "The selector \"#{target}\" was not found." else "No selectors matching \"#{target}\" could be unified with \"#{extender}\"." assert_warning(<<WARNING) {yield} WARNING on line #{line} of #{filename_for_test syntax}: #{warn} #{reason} This will be an error in future releases of Sass. Use "@extend #{target} !optional" if the extend should be able to fail. WARNING """<def_stmt>assert_unification selector extension unified# Do some trickery so the first law of extend doesn't get in our way. <block_start>assert_extends("%-a {0}".format(selector) extension+" -a {@extend %-a}" ', '.join('-a '+s<for>s unified.split(', ')))<block_end><def_stmt>assert_extends selector extension result<block_start>assert_rendering("{0} {{a: b}}\n{1}\n".format(selector extension) "{0} {{\n a: b; }}\n".format(result))<block_end>
""" Tools for working with Doc type. Docs are namedtuples that represent a file to be transformed. The `content` field of a doc contains the file contents, read as a Python string with UTF-8 encoding. Most lettersmith plugins transform Docs or iterables of Docs. For working with non-text files, images, binary files, or text files with other encodings, see `lettersmith.file` which stores the raw bytes instead of reading them into a Python string. """<import_from_stmt>pathlib PurePath Path<import_stmt>json<import_from_stmt>collections namedtuple<import_from_stmt>functools wraps<import_stmt>frontmatter<import_stmt>yaml<import_from_stmt>lettersmith.util mix<import_from_stmt>lettersmith.date read_file_times EPOCH to_datetime<import_from_stmt>lettersmith path<as>pathtools<import_from_stmt>lettersmith lens<import_from_stmt>lettersmith.lens Lens lens_compose get put key over_with update <import_from_stmt>lettersmith.func compose<line_sep>Doc=namedtuple("Doc" ("id_path" "output_path" "input_path" "created" "modified" "title" "content" "meta" "template"))<line_sep>Doc.__doc__=""" Docs are namedtuples that represent a document to be transformed, and eventually written to disk. Docs contain a content field. This is a string that typically contains the contents of the file. """<def_stmt>create id_path output_path input_path=<none> created=EPOCH modified=EPOCH title="" content="" meta=<none> template=""<block_start>""" Create a Doc tuple, populating it with sensible defaults """<line_sep><return>Doc(id_path=str(id_path) output_path=str(output_path) input_path=str(input_path)<if>input_path<is><not><none><else><none> created=to_datetime(created) modified=to_datetime(modified) title=str(title) content=str(content) meta=meta<if>meta<is><not><none><else>{} template=str(template))<block_end><def_stmt>load pathlike<block_start>""" Loads a doc namedtuple from a file path. `content` field will contain contents of file. Typically, you decorate the doc later with meta and other fields. Returns a doc. """<line_sep>file_created,file_modified=read_file_times(pathlike)<with_stmt>open(pathlike 'r')<as>f<block_start>content=f.read()<block_end>title=pathtools.to_title(pathlike)<line_sep><return>create(id_path=pathlike output_path=pathlike input_path=pathlike created=file_created modified=file_modified title=title meta={} content=content)<block_end><def_stmt>writeable doc<block_start>""" Return a writeable tuple for doc. writeable tuple is any 2-tuple of `output_path`, `bytes`. `lettersmith.write` knows how to write these tuples to disk. """<line_sep><return>doc.output_path doc.content.encode()<block_end>id_path=Lens(<lambda>doc:doc.id_path <lambda>doc id_path:doc._replace(id_path=id_path))<line_sep>output_path=Lens(<lambda>doc:doc.output_path <lambda>doc output_path:doc._replace(output_path=output_path))<line_sep>ext=lens_compose(output_path pathtools.ext)<line_sep>title=Lens(<lambda>doc:doc.title <lambda>doc title:doc._replace(title=title))<line_sep>content=Lens(<lambda>doc:doc.content <lambda>doc content:doc._replace(content=content))<line_sep>created=Lens(<lambda>doc:doc.created <lambda>doc created:doc._replace(created=created))<line_sep>modified=Lens(<lambda>doc:doc.modified <lambda>doc modified:doc._replace(modified=modified))<line_sep>meta=Lens(<lambda>doc:doc.meta <lambda>doc meta:doc._replace(meta=meta))<line_sep>template=Lens(<lambda>doc:doc.template <lambda>doc template:doc._replace(template=template))<line_sep>meta_summary=lens_compose(meta key("summary" ""))<def_stmt>update_meta doc patch<block_start>""" Mix keys from `patch` into `doc.meta`. """<line_sep><return>update(meta mix doc patch)<block_end><def_stmt>with_ext_html doc<block_start>""" Set doc extension to ".html" """<line_sep><return>put(ext doc ".html")<block_end>output_tld=compose(pathtools.tld output_path.get)<line_sep>id_tld=compose(pathtools.tld id_path.get)<line_sep>_infer_template=compose(pathtools.ext_html pathtools.to_slug id_tld)<def_stmt>autotemplate doc<block_start>""" Set template based on top-level directory in doc's id_path. E.g. if top-level-directory is "posts", template gets set to "posts.html". """<if_stmt>get(template doc)<ne>""<block_start><return>doc<block_end><else_stmt><block_start><return>put(template doc _infer_template(doc))<block_end><block_end><def_stmt>with_template t<block_start>""" Set template `t`, but only if doc doesn't have one already. """<def_stmt>with_template_on_doc doc<block_start><if_stmt>get(template doc)<ne>""<block_start><return>doc<block_end><else_stmt><block_start><return>put(template doc t)<block_end><block_end><return>with_template_on_doc<block_end><def_stmt>to_json doc<block_start>""" Serialize a doc as JSON-serializable data """<line_sep><return>{"@type":"doc" "id_path":doc.id_path "output_path":doc.output_path "input_path":doc.input_path "created":doc.created.timestamp() "modified":doc.modified.timestamp() "title":doc.title "content":doc.content "meta":doc.meta "template":doc.template}<block_end><def_stmt>uplift_meta doc<block_start>""" Reads "magic" fields in the meta and uplifts their values to doc properties. We use this to uplift... - title - created - modified - permalink - template ...in the frontmatter, overriding original or default values on doc. """<line_sep><return>doc._replace(title=doc.meta.get("title" doc.title) created=to_datetime(doc.meta.get("created" doc.created)) modified=to_datetime(doc.meta.get("modified" doc.modified)) output_path=doc.meta.get("permalink" doc.output_path) template=doc.meta.get("template" ""))<block_end><class_stmt>DocException(Exception)<block_start><pass><block_end><def_stmt>annotate_exceptions func<block_start>""" Decorates a mapping function for docs, giving it a more useful exception message. """<line_sep>@wraps(func)<def_stmt>func_with_annotated_exceptions doc<block_start><try_stmt><block_start><return>func(doc)<block_end><except_stmt>Exception<as>e<block_start>msg=('Error encountered while mapping doc '<concat>'"{id_path}" with {module}.{func}.').format(id_path=doc.id_path func=func.__qualname__ module=func.__module__)<line_sep><raise>DocException(msg)<from>e<block_end><block_end><return>func_with_annotated_exceptions<block_end>@annotate_exceptions<def_stmt>parse_frontmatter doc<block_start>""" Parse frontmatter as YAML. Set frontmatter on meta field, and remaining content on content field. If there is no frontmatter, will set an empty object on meta field, and leave content as-is. """<line_sep>meta,content=frontmatter.parse(doc.content)<line_sep><return>doc._replace(meta=meta content=content)<block_end>uplift_frontmatter=compose(uplift_meta parse_frontmatter)<def_stmt>renderer render<block_start>""" Create a renderer for doc content using a string rendering function. Will also annotate any exceptions that happen during rendering, transforming them into DocExceptions that will record the doc's id_path and the render function where exception occurred. Can be used as a decorator. """<line_sep><return>annotate_exceptions(over_with(content render))<block_end>
# Time: O(n) # Space: O(1) <class_stmt>Solution(object)<block_start><def_stmt>minCostToMoveChips self chips<block_start>""" :type chips: List[int] :rtype: int """<line_sep>count=[0]<times>2<for_stmt>p chips<block_start>count[p%2]<augadd>1<block_end><return>min(count)<block_end><block_end>
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. <import_from_stmt>typing Callable Dict List Type<import_stmt>beanmachine.ppl.compiler.bmg_nodes<as>bn<import_from_stmt>beanmachine.ppl.compiler.bm_graph_builder BMGraphBuilder<import_from_stmt>beanmachine.ppl.compiler.error_report ErrorReport<import_from_stmt>beanmachine.ppl.compiler.fix_matrix_scale matrix_scale_fixer<import_from_stmt>beanmachine.ppl.compiler.fix_problem ancestors_first_graph_fixer fixpoint_graph_fixer GraphFixer GraphFixerResult Inapplicable node_fixer_first_match NodeFixer NodeFixerResult sequential_graph_fixer <import_from_stmt>beanmachine.ppl.compiler.sizer is_scalar Sizer<line_sep># TODO Move this to a utils module <import_from_stmt>beanmachine.ppl.compiler.support _prod<import_from_stmt>torch Size tensor<line_sep># These graph fixers turn vectorized models into unvectorized models. # For example, the model # # @rv def flip(): # return Bernoulli(tensor([0.25, 0.75])) # # which we cannot represent in BMG is rewritten into the model: # # p = tensor([0.25, 0.75]) # @rv def f0: # return Bernoulli(p[0]) # @rv def f1: # return Bernoulli(p[1]) # @functional def flip(): # return tensor([f0()), f1())]) # # which we can represent in BMG. <def_stmt>_is_fixable_size s:Size<arrow>bool<block_start>dim=len(s)<if_stmt>dim<eq>1<block_start><return>s[0]<g>1<block_end><if_stmt>dim<eq>2<block_start><return>s[0]<g>1<or>s[1]<g>1<block_end><return><false><block_end><def_stmt>_is_indexable_node sizer:Sizer n:bn.BMGNode<arrow>bool<block_start><if_stmt>type(n)<not><in>_indexable_node_types<block_start><return><false><block_end><return>_is_fixable_size(sizer[n])<block_end><def_stmt>_inputs_are_devectorizable sizer:Sizer node:bn.BMGNode<arrow>bool# For a node to be devectorizable: # * All its inputs must be either indexable or scalars. # * At least one input must be indexable. <block_start><return>all(_is_indexable_node(sizer i)<or>is_scalar(sizer[i])<for>i node.inputs)<and>any(_is_indexable_node(sizer i)<for>i node.inputs)<block_end><def_stmt>_node_to_index_list bmg:BMGraphBuilder sizer:Sizer n:bn.BMGNode<arrow>List[bn.BMGNode]<block_start>size=sizer[n]<line_sep>dim=len(size)<line_sep>index_list=[]<line_sep># This code is a little confusing because BMG uses column-major matrices # and torch uses row-major tensors. The Sizer always gives the size # that a graph node would be in *torch*, so if we have a Size([2, 3]) # matrix node, that has two rows and three columns in torch, and would # be indexed first by row and then by column. But in BMG, that would # be two columns, three rows, and indexed by column first, then row. # # The practical upshot is: if we have, say, Size([3]) OR Size([1, 3]) # then either way, we will have a one-column, three row BMG node, and # therefore we only need a single level of indexing. <if_stmt>dim<eq>0# If we have just a single value then there's no indexing required. <block_start>index_list.append(n)<block_end><elif_stmt>dim<eq>1<block_start><for_stmt>i range(0 size[0])<block_start>ci=bmg.add_constant(i)<line_sep>ni=bmg.add_index(n ci)<line_sep>index_list.append(ni)<block_end><block_end><elif_stmt>size[0]<eq>1<block_start><assert_stmt>dim<eq>2<for_stmt>i range(0 size[1])<block_start>ci=bmg.add_constant(i)<line_sep>ni=bmg.add_index(n ci)<line_sep>index_list.append(ni)<block_end><block_end><else_stmt># We need two levels of indexing. <block_start><assert_stmt>dim<eq>2<for_stmt>i range(0 size[0])<block_start>ci=bmg.add_constant(i)<line_sep>ni=bmg.add_index(n ci)<for_stmt>j range(0 size[1])<block_start>cj=bmg.add_constant(j)<line_sep>nij=bmg.add_index(ni cj)<line_sep>index_list.append(nij)<block_end><block_end><block_end><return>index_list<block_end><def_stmt>_generate_arglists bmg:BMGraphBuilder sizer:Sizer node:bn.BMGNode# This code is a bit tricky to understand so lets work an example. # Suppose node has two inputs, call them X and Y. X has size [3], Y has # size [2, 3], and node has size [2, 3]. <block_start>final_size=sizer[node]# Size([2, 3]) final_length=_prod(final_size)# 2 x 3 = 6 input_nodes=[_node_to_index_list(bmg sizer n)<for>n node.inputs]<line_sep># input_nodes is [ # [ Index(X, 0), Index(X, 1), Index(X, 2)], # [ Index(Index(Y, 0), 0), Index(Index(Y, 0), 1), ...] # ] index_lists=[]<line_sep># Let's now look at what happens on the FIRST loop iteration: <for_stmt>i range(len(input_nodes))<block_start>input_node=input_nodes[i]<line_sep># First time through the loop input_node is [Index(X, 0), Index(X, 1), Index(X, 2)] input_length=len(input_node)# 3 input_size=sizer[node.inputs[i]]# Size([3]) t=(tensor(range(input_length))# tensor([0, 1, 2]) .reshape(input_size)# tensor([0, 1, 2]) .broadcast_to(final_size)# tensor([[0, 1, 2], [0, 1, 2]]) .reshape(final_length)# tensor([0, 1, 2, 0, 1, 2]) .tolist()# [0, 1, 2, 0, 1, 2] )<line_sep>index_lists.append(t)<block_end># When we're done both iterations we have two lists of the same length: # [0, 1, 2, 0, 1, 2] # [0, 1, 2, 3, 4, 5] # # Now make tuples out of each column. # # [(0, 0), (1, 1), (2, 2), (0, 3), (1, 4), (2, 5)] index_tuples=list(zip(*index_lists))<line_sep># These pairs give the elements of X and Y needed to build devectorized nodes. # Now make actual argument lists for each tuple. <return>[[input_nodes[i][index_tuple[i]]<for>i range(len(index_tuple))]<for>index_tuple index_tuples]<block_end><def_stmt>_distribution_factories bmg:BMGraphBuilder<arrow>Dict[Type Callable]# These are all the distributions that we know how to devectorize, # and the factory methods we need to use to generate a new node # of the appropriate type. # TODO: categorical # TODO: categorical logit # TODO: dirichlet <block_start><return>{bn.BernoulliLogitNode:bmg.add_bernoulli_logit bn.BernoulliNode:bmg.add_bernoulli bn.BetaNode:bmg.add_beta bn.BinomialNode:bmg.add_binomial bn.BinomialLogitNode:bmg.add_binomial_logit bn.Chi2Node:bmg.add_chi2 bn.GammaNode:bmg.add_gamma bn.HalfCauchyNode:bmg.add_halfcauchy bn.HalfNormalNode:bmg.add_halfnormal bn.NormalNode:bmg.add_normal bn.PoissonNode:bmg.add_poisson bn.StudentTNode:bmg.add_studentt bn.UniformNode:bmg.add_uniform }<block_end>_distribution_types=list(_distribution_factories(BMGraphBuilder()).keys())<def_stmt>_is_fixable_sample sizer:Sizer n:bn.BMGNode<arrow>bool<block_start><if_stmt><not>isinstance(n bn.SampleNode)<block_start><return><false><block_end>dist=n.operand<if_stmt>type(dist)<not><in>_distribution_types<block_start><return><false><block_end><if_stmt><not>_is_fixable_size(sizer[dist])<block_start><return><false><block_end># Every input must be either a scalar or indexable, # and at least one input must be indexable. <if_stmt><not>_inputs_are_devectorizable(sizer dist)<block_start><return><false><block_end><return><true><block_end>_indexable_node_types=[bn.ColumnIndexNode bn.ConstantTensorNode bn.IndexNode bn.MatrixMultiplicationNode bn.MatrixScaleNode bn.SampleNode bn.TensorNode bn.ToMatrixNode bn.UntypedConstantNode ]<def_stmt>_vectorized_distribution_node_fixer bmg:BMGraphBuilder sizer:Sizer<arrow>NodeFixer<block_start>distribution_factories=_distribution_factories(bmg)<def_stmt>vect_dist_fixer node:bn.BMGNode<arrow>NodeFixerResult# The graph transformation we're doing here takes graphs of the form: # # indexable --> dist --> sample --> consumer # # where the "indexable" produces a matrix, the consumer takes a matrix, # but the distribution requires scalar inputs and produces a scalar # output. # # We transform it into the graph: # # --> index[0] --> dist --> sample --> # indexable to_matrix --> consumer # --> index[1] --> dist --> sample --> # ... # # And now everyone is happy; the operators get scalars and the # consumer gets a matrix. # # # TODO: Consider optimizing distributions where the tensor elements are all # the same; if we have Bernoulli([[0.5, 0.5], [0.5, 0.5]]) then that can be # represented in BMG as an IID_SAMPLE(2,2) from Bernoulli(0.5). We could # write another fixer which makes this transformation, or we could modify # this fixer. NOTE that not all inference algorithms might support # IID_SAMPLE nodes; look into this before attempting the optimization. <block_start><if_stmt><not>_is_fixable_sample(sizer node)<block_start><return>Inapplicable<block_end><assert_stmt>isinstance(node bn.SampleNode)<line_sep>dist=node.operand<line_sep># We need to generate n new distribution and sample nodes, each of # which takes some scalar indexed from its inputs. The factory method that # builds the distribution is in the distribution factories list. # _generate_arglists constructs the arguments to that factory method. arglists=_generate_arglists(bmg sizer dist)<line_sep>samples=[]<line_sep>factory=distribution_factories[type(dist)]<for_stmt>arglist arglists<block_start>b=factory(*arglist)<line_sep>s=bmg.add_sample(b)<line_sep>samples.append(s)<block_end>size=sizer[dist]<line_sep># We now have n new operator nodes; stick them into a tensor. We then # return that tensor. The caller will retarget the input edge of the # consumer from the original operator to the tensor, and the graph is # rewritten. t=bmg.add_tensor(size *samples)<line_sep><return>t<block_end><return>vect_dist_fixer<block_end><def_stmt>_operator_factories bmg:BMGraphBuilder<arrow>Dict[Type Callable]<block_start><return>{# Note that we expect devectorization to run *before* multiary # addition/multiplication rewriting, so we can assume that # all additions and multiplications are binary. bn.AdditionNode:bmg.add_addition bn.DivisionNode:bmg.add_division bn.Exp2Node:bmg.add_exp2 bn.ExpNode:bmg.add_exp bn.ExpM1Node:bmg.add_expm1 bn.LogisticNode:bmg.add_logistic bn.Log10Node:bmg.add_log10 bn.Log1pNode:bmg.add_log1p bn.Log2Node:bmg.add_log2 bn.Log1mexpNode:bmg.add_log1mexp bn.LogNode:bmg.add_log bn.MultiplicationNode:bmg.add_multiplication bn.NegateNode:bmg.add_negate bn.PhiNode:bmg.add_phi bn.PowerNode:bmg.add_power bn.SquareRootNode:bmg.add_squareroot }<line_sep># TODO: LogSumExp, all comparisons, all bitwise, floordiv, # shifts, mod, invert. Should we devectorize "not"? <block_end><def_stmt>_vectorized_operator_node_fixer bmg:BMGraphBuilder sizer:Sizer<arrow>NodeFixer<block_start>operator_factories=_operator_factories(bmg)<def_stmt>_is_fixable_operator sizer:Sizer operator:bn.BMGNode<arrow>bool# * The operator must be on the list of devectorizable operators # in operator_factories above. # * The sizer must judge that the operator in its current # place in the graph produces a 1-d or 2-d tensor, not a scalar. # * Every input must be either a scalar or indexable, # * At least one input must be indexable. # * All inputs of a multiplication must be non-scalars. # (We rewrite scalar-matrix multiplications in a different fixer.) <block_start><if_stmt>type(operator)<not><in>operator_factories<block_start><return><false><block_end><if_stmt><not>_is_fixable_size(sizer[operator])<block_start><return><false><block_end><if_stmt><not>_inputs_are_devectorizable(sizer operator)<block_start><return><false><block_end><if_stmt>isinstance(operator bn.MultiplicationNode)<and><not>all(_is_indexable_node(sizer i)<for>i operator.inputs)<block_start><return><false><block_end><return><true><block_end><def_stmt>vect_op_node_fixer operator:bn.BMGNode<arrow>NodeFixerResult# The graph transformation we're doing here takes graphs of the form: # # indexable --> operator --> consumer # # where the "indexable" produces a matrix, the consumer takes a matrix, # but the BMG operator only operates on scalars. # # We transform it into the graph: # # --> index[0] --> operator --> # indexable to_matrix --> consumer # --> index[1] --> operator --> # ... # # And now everyone is happy; the operators get scalars and the # consumer gets a matrix. # # Obviously this increases the number of nodes in the graph by O(n) in # the size of the indexible matrix but until we have more vectorized BMG # operators we cannot do much better. (Also, we can often optimize away # some of the indexing operations in the arithmetic graph rewriter.) # <block_start><if_stmt><not>_is_fixable_operator(sizer operator)<block_start><return>Inapplicable<block_end># We need to generate n new operator nodes, each of which takes # some scalar indexed from its operands. The factory method that # builds those operator nodes is in the operator factories list; # _generate_arglists constructs the arguments to that factory method. arglists=_generate_arglists(bmg sizer operator)<line_sep>results=[]<line_sep>factory=operator_factories[type(operator)]<for_stmt>arglist arglists<block_start>r=factory(*arglist)<line_sep>results.append(r)<block_end>size=sizer[operator]<line_sep># We now have n new operator nodes; stick them into a tensor. We then # return that tensor. The caller will retarget the input edge of the # consumer from the original operator to the tensor, and the graph is # rewritten. t=bmg.add_tensor(size *results)<line_sep><return>t<block_end><return>vect_op_node_fixer<block_end><def_stmt>vectorized_operator_fixer bmg:BMGraphBuilder<arrow>GraphFixer<block_start><def_stmt>vop_fixer <arrow>GraphFixerResult<block_start>sizer=Sizer()<line_sep>dist_fixer=_vectorized_distribution_node_fixer(bmg sizer)<line_sep>oper_fixer=_vectorized_operator_node_fixer(bmg sizer)<line_sep>scale_fixer=matrix_scale_fixer(bmg sizer)<line_sep>node_fixer=node_fixer_first_match([dist_fixer oper_fixer scale_fixer])<line_sep>vof=ancestors_first_graph_fixer(bmg sizer node_fixer)<line_sep>made_progress,errors=vof()<line_sep># If we changed something then we might have a leaf sample node; # we can remove it. <if_stmt>made_progress<block_start><for_stmt>n bmg.all_nodes()<block_start><if_stmt>_is_fixable_sample(sizer n)<block_start><assert_stmt>n.is_leaf<line_sep>bmg.remove_leaf(n)<block_end><block_end><block_end><return>made_progress errors<block_end><return>vop_fixer<block_end><def_stmt>vectorized_observation_fixer bmg:BMGraphBuilder<arrow>GraphFixer<block_start><def_stmt>vobs_fixer <arrow>GraphFixerResult<block_start>made_change=<false><line_sep># We might have an illegal observation. Fix it. <for_stmt>o bmg.all_observations()<block_start>observed=o.observed<if_stmt><not>isinstance(observed bn.TensorNode)<block_start><continue><block_end><if_stmt><not>_is_fixable_size(observed._size)<block_start><continue><block_end># TODO: What if the observation is of a different size than the # tensor node we've just generated? That should be an error, but instead # we just crash here. Figure out where to put an error detection pass # which prevents this crash and reports the error. dim=len(observed._size)<if_stmt>dim<eq>1<block_start><for_stmt>i range(0 observed._size[0])<block_start>s=observed.inputs[i]<assert_stmt>isinstance(s bn.SampleNode)<line_sep>bmg.add_observation(s o.value[i])<block_end><block_end><else_stmt><block_start><assert_stmt>dim<eq>2<for_stmt>i range(0 observed._size[0])<block_start><for_stmt>j range(0 observed._size[1])<block_start>s=observed.inputs[i<times>observed._size[1]+j]<assert_stmt>isinstance(s bn.SampleNode)<line_sep>bmg.add_observation(s o.value[i][j])<block_end><block_end><block_end>bmg.remove_leaf(o)<line_sep>made_change=<true><block_end><return>made_change ErrorReport()<block_end><return>vobs_fixer<block_end><def_stmt>vectorized_model_fixer bmg:BMGraphBuilder<arrow>GraphFixer<block_start>vector_ops=vectorized_operator_fixer(bmg)<line_sep>vector_obs=vectorized_observation_fixer(bmg)<line_sep><return>fixpoint_graph_fixer(sequential_graph_fixer([vector_ops vector_obs]))<block_end>
<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<def_stmt>_get_operations_by_names graph names<block_start><return>[graph.get_operation_by_name(name)<for>name names]<block_end><def_stmt>_get_tensors_by_names graph names<block_start><return>[graph.get_tensor_by_name(name)<for>name names]<block_end><def_stmt>perform_test sess init_op summary_writer=<none> epoch=<none> feed_dict_fn=<none> feed_dict_args={} custom_tensors=[]<block_start>tf.keras.backend.set_learning_phase(<false>)<line_sep>sess.run(init_op)<if_stmt>len(custom_tensors)<eq>0# Retrieve all default tensors and operations. <block_start>graph=tf.get_default_graph()<line_sep>reset_tp,reset_fp,reset_fn,reset_tn,reset_brier,reset_auc=_get_operations_by_names(graph ['tp/reset' 'fp/reset' 'fn/reset' 'tn/reset' 'brier/reset' 'auc/reset'])<line_sep>update_tp,update_fp,update_fn,update_tn,update_brier,update_auc,brier,auc,confusion_matrix,summaries_op=_get_tensors_by_names(graph ['tp/true_positives/AssignAdd:0' 'fp/false_positives/AssignAdd:0' 'fn/false_negatives/AssignAdd:0' 'tn/true_negatives/AssignAdd:0' 'brier/mean_squared_error/update_op:0' 'auc/auc/update_op:0' 'brier/mean_squared_error/value:0' 'auc/auc/value:0' 'confusion_matrix/Cast:0' 'Merge/MergeSummary:0'])<line_sep># Reset all streaming variables. sess.run([reset_tp reset_fp reset_fn reset_tn reset_brier reset_auc])<line_sep># Create an array with tensors to run for each batch. tensors=[update_tp update_fp update_fn update_tn update_brier update_auc]<block_end><else_stmt><block_start>tensors=custom_tensors<block_end><try_stmt><block_start>batch_results=[]<while_stmt><true><block_start><if_stmt>feed_dict_fn<is><not><none><block_start>feed_dict=feed_dict_fn(**feed_dict_args)<block_end><else_stmt><block_start>feed_dict=<none><block_end># Retrieve the validation set confusion metrics. batch_results.append(sess.run(tensors feed_dict))<block_end><block_end><except_stmt>tf.errors.OutOfRangeError<block_start><pass><block_end># Yield the result if custom tensors were defined. <if_stmt>len(custom_tensors)<g>0<block_start><return>[np.vstack(x)<for>x zip(*batch_results)]<block_end># Retrieve confusion matrix and estimated roc auc score. test_conf_matrix,test_brier,test_auc,summaries=sess.run([confusion_matrix brier auc summaries_op])<line_sep># Write summary. <if_stmt>summary_writer<is><not><none><block_start>summary_writer.add_summary(summaries epoch)<block_end># Print total roc auc score for validation. print(f"Brier score: {test_brier:6.4}, AUC: {test_auc:10.8}")<line_sep># Print confusion matrix. print(f"Confusion matrix:")<line_sep>print(test_conf_matrix[0])<line_sep><return>test_auc<block_end>
<import_stmt>requests<import_stmt>time<import_stmt>configuration<import_from_stmt>mamba description context it<import_from_stmt>expects expect be_true have_length equal be_a have_property be_none<line_sep>headers={'content-type':'application/json' 'accept':'application/json'}<with_stmt>description('Zipkin tracing functionality')<block_start><with_stmt>before.all#Read Config file <block_start>configuration.setenv(self)<block_end><with_stmt>context('Deploy Zipkin and make sure port forwarded to localhost')<block_start><with_stmt>it('Bookinfo Zipkin tracing feature')<block_start><for_stmt>_ range(10)<block_start>r=requests.get(self.url)<line_sep>r.status_code<line_sep>expect(r.status_code).to(equal(200))<block_end>r1=requests.get(self.zipkin)<line_sep>r1.status_code<line_sep>expect(r1.status_code).to(equal(200))<if_stmt>'productpage'<in>r1.text<block_start>expect(0).to(equal(0))<block_end><else_stmt><block_start>expect(0).not_to(equal(0))<block_end>configuration.generate_request(self)<block_end><block_end><block_end>
<if_stmt>__name__<eq>"__main__"<block_start>f=open('val_files.txt' 'w')<for_stmt>i range(108)<block_start>f.writelines(['2011_09_26/2011_09_26_drive_0001_sync ' str(i).zfill(10) ' l\n'])<block_end>f.close()<line_sep>print('done')<block_end>
<import_stmt>os<import_stmt>bddl<import_stmt>igibson<import_from_stmt>igibson object_states<import_from_stmt>igibson.examples.behavior behavior_demo_replay<line_sep>bddl.set_backend("iGibson")<def_stmt>robot_states_callback igbhvr_act_inst _<block_start>window1=(igbhvr_act_inst.object_scope["window.n.01_1"] "kitchen")<line_sep>window2=(igbhvr_act_inst.object_scope["window.n.01_2"] "living room")<line_sep>windows=[window1 window2]<for_stmt>window,roomname windows<block_start>print("%s window is inFOV: %r, inSameRoom: %r, inReach: %r"%(roomname window.states[object_states.InFOVOfRobot].get_value() window.states[object_states.InSameRoomAsRobot].get_value() window.states[object_states.InReachOfRobot].get_value() ))<block_end>rag=igbhvr_act_inst.object_scope["rag.n.01_1"]<line_sep>print("Rag is in hand: %r"%rag.states[object_states.InHandOfRobot].get_value())<line_sep>agent=igbhvr_act_inst.object_scope["agent.n.01_1"]<line_sep>print("Agent is in kitchen: %r, living room: %r, bedroom: %r."%(agent.states[object_states.IsInKitchen].get_value() agent.states[object_states.IsInLivingRoom].get_value() agent.states[object_states.IsInBedroom].get_value() ))<block_end><def_stmt>main <block_start>DEMO_FILE=os.path.join(igibson.ig_dataset_path "tests" "cleaning_windows_0_Rs_int_2021-05-23_23-11-46.hdf5")<line_sep>behavior_demo_replay.replay_demo(DEMO_FILE disable_save=<true> step_callbacks=[robot_states_callback] mode="headless")<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
""" Wrapper around yaml to ensure that everything is ordered correctly. This is based on the answer at http://stackoverflow.com/a/16782282 """<import_from_future_stmt> absolute_import print_function<import_from_stmt>collections OrderedDict<import_stmt>yaml<def_stmt>represent_ordereddict dumper data<block_start>value=[]<for_stmt>item_key,item_value data.items()<block_start>node_key=dumper.represent_data(item_key)<line_sep>node_value=dumper.represent_data(item_value)<line_sep>value.append((node_key node_value))<block_end><return>yaml.nodes.MappingNode(u'tag:yaml.org,2002:map' value)<block_end>yaml.add_representer(OrderedDict represent_ordereddict)<line_sep>dump=yaml.dump<line_sep>load=yaml.load<line_sep>dict=OrderedDict<line_sep>
<import_stmt>os<import_from_stmt>contextlib suppress<import_from_stmt>torchelie.hyper HyperparamSearch UniformSampler<def_stmt>beale x y<block_start><return>(1.5-x+x<times>y)<power>2+(2.25-x+x<times>y<power>2)<power>2+(2.625-x+x<times>y<power>3)<power>2<block_end><def_stmt>sphere x y<block_start><return>x<power>2+y<power>2<block_end><def_stmt>rosen x y<block_start><return>100<times>(y-x<power>2)<power>2+(1-x)<power>2<block_end>hpsearch=HyperparamSearch(x=UniformSampler(-4.5 4.5) y=UniformSampler(-4.5 4.5))<with_stmt>suppress(FileNotFoundError)<block_start>os.remove('hpsearch.json')<block_end>print(beale(3 0.5))<for_stmt>_ range(30)<block_start>hps=hpsearch.sample(algorithm='gp' target='out')<line_sep>out=-beale(**hps.params)<line_sep>print(hps '\t' out)<line_sep>hpsearch.log_result(hps {'out':out})<block_end>
<import_from_stmt>ray.rllib.utils.schedules.polynomial_schedule PolynomialSchedule<class_stmt>LinearSchedule(PolynomialSchedule)<block_start>""" Linear interpolation between `initial_p` and `final_p`. Simply uses Polynomial with power=1.0. final_p + (initial_p - final_p) * (1 - `t`/t_max) """<def_stmt>__init__ self **kwargs<block_start>super().__init__(power=1.0 **kwargs)<block_end><block_end>