content
stringlengths
0
1.55M
""" Serializers suitable for transports which support multiple fields per message These serializers handle moving data to/from a dictionary format. The format looks like this:: # Message metadata first. Each value is implicitly a utf8 string id: 'ZOCTLh1CEeimW3gxwcOTbg==' api_name: 'my_company.auth' procedure_name: 'check_password' return_path: 'redis+key://my_company.auth.check_password:result:ZOCTLh1CEeimW3gxwcOTbg==' # kwargs follow, each encoded with the provided encoder (in this case JSON) kw:username: '"admin"' kw:password: '"<PASSWORD>"' """<import_from_stmt>typing TYPE_CHECKING<import_from_stmt>lightbus.serializers.base decode_bytes sanity_check_metadata MessageSerializer MessageDeserializer <if_stmt>TYPE_CHECKING# pylint: disable=unused-import,cyclic-import <block_start><import_from_stmt>lightbus Message<block_end><class_stmt>ByFieldMessageSerializer(MessageSerializer)<block_start><def_stmt>__call__ self message:"Message"<arrow>dict<block_start>"""Takes a message object and returns a serialised dictionary representation See the module-level docs (above) for further details """<line_sep>serialized=message.get_metadata()<for_stmt>k,v message.get_kwargs().items()<block_start>serialized[":{}".format(k)]=self.encoder(v)<block_end><return>serialized<block_end><block_end><class_stmt>ByFieldMessageDeserializer(MessageDeserializer)<block_start><def_stmt>__call__ self serialized:dict * native_id=<none> **extra<block_start>"""Takes a dictionary of serialised fields and returns a Message object See the module-level docs (above) for further details """<line_sep>metadata={}<line_sep>kwargs={}<for_stmt>k,v serialized.items()<block_start>k=decode_bytes(k)<line_sep>v=decode_bytes(v)<if_stmt><not>k<block_start><continue><block_end># kwarg fields start with a ':', everything else is metadata <if_stmt>k[0]<eq>":"# kwarg values need decoding <block_start>kwargs[k[1:]]=self.decoder(v)<block_end><else_stmt># metadata args are implicitly strings, so we don't need to decode them <block_start>metadata[k]=v<block_end><block_end>sanity_check_metadata(self.message_class metadata)<if_stmt>"native_id"<in>metadata<block_start>native_id=metadata.pop("native_id")<block_end><return>self.message_class.from_dict(metadata=metadata kwargs=kwargs native_id=native_id **extra)<block_end><block_end>
<import_from_stmt>parsl python_app<import_from_stmt>parsl.dataflow.error DependencyError<line_sep>@python_app<def_stmt>fails <block_start><raise>ValueError("Deliberate failure")<block_end>@python_app<def_stmt>depends parent<block_start><return>1<block_end><def_stmt>test_depfail_once <block_start>"""Test the simplest dependency failure case"""<line_sep>f1=fails()<line_sep>f2=depends(f1)<assert_stmt>isinstance(f1.exception() Exception)<assert_stmt><not>isinstance(f1.exception() DependencyError)<assert_stmt>isinstance(f2.exception() DependencyError)<block_end><def_stmt>test_depfail_chain <block_start>"""Test that dependency failures chain"""<line_sep>f1=fails()<line_sep>f2=depends(f1)<line_sep>f3=depends(f2)<line_sep>f4=depends(f3)<assert_stmt>isinstance(f1.exception() Exception)<assert_stmt><not>isinstance(f1.exception() DependencyError)<assert_stmt>isinstance(f2.exception() DependencyError)<assert_stmt>isinstance(f3.exception() DependencyError)<assert_stmt>isinstance(f4.exception() DependencyError)<block_end><def_stmt>test_depfail_branches <block_start>"""Test that dependency failures propagate in the presence of multiple downstream tasks."""<line_sep>f1=fails()<line_sep>f2=depends(f1)<line_sep>f3=depends(f1)<assert_stmt>isinstance(f1.exception() Exception)<assert_stmt><not>isinstance(f1.exception() DependencyError)<assert_stmt>isinstance(f2.exception() DependencyError)<assert_stmt>isinstance(f3.exception() DependencyError)<block_end>
""" Created on Feb 2, 2016 @author: <NAME> """<import_from_future_stmt> unicode_literals absolute_import<import_from_stmt>multiprocessing Process<import_stmt>subprocess<import_stmt>unittest<import_stmt>ssl<import_stmt>os<import_stmt>sys<import_from_stmt>py4j.java_gateway JavaGateway CallbackServerParameters set_default_callback_accept_timeout GatewayParameters <import_from_stmt>py4j.tests.java_gateway_test PY4J_JAVA_PATH safe_shutdown sleep <line_sep>set_default_callback_accept_timeout(0.125)<def_stmt>start_example_tls_server <block_start>subprocess.call(["java" "-cp" PY4J_JAVA_PATH "py4j.examples.ExampleSSLApplication"])<block_end><def_stmt>start_example_tls_process <block_start>p=Process(target=start_example_tls_server)<line_sep>p.start()<line_sep>sleep()<line_sep><return>p<block_end><class_stmt>Adder(object)<block_start><def_stmt>doOperation self i j<block_start><return>i+j<block_end><class_stmt>Java<block_start>implements=["py4j.examples.Operator"]<block_end><block_end><if_stmt>sys.version_info<ge>(2 7)# ssl.SSLContext introduced in Python 2.7 <block_start><class_stmt>TestIntegration(unittest.TestCase)<block_start>"""Tests cases borrowed from other files, but executed over a TLS connection. """<def_stmt>setUp self<block_start>key_file=os.path.join(os.path.dirname(os.path.realpath(__file__)) "selfsigned.pem")<line_sep>client_ssl_context=ssl.SSLContext(ssl.PROTOCOL_TLSv1)<line_sep>client_ssl_context.verify_mode=ssl.CERT_REQUIRED<line_sep>client_ssl_context.check_hostname=<true><line_sep>client_ssl_context.load_verify_locations(cafile=key_file)<line_sep>server_ssl_context=ssl.SSLContext(ssl.PROTOCOL_TLSv1)<line_sep>server_ssl_context.load_cert_chain(key_file password='password')<line_sep>callback_server_parameters=CallbackServerParameters(ssl_context=server_ssl_context)<line_sep># address must match cert, because we're checking hostnames gateway_parameters=GatewayParameters(address='localhost' ssl_context=client_ssl_context)<line_sep>self.p=start_example_tls_process()<line_sep>self.gateway=JavaGateway(gateway_parameters=gateway_parameters callback_server_parameters=callback_server_parameters)<line_sep># It seems SecureServerSocket may need a little more time to # initialize on some platforms/slow machines. sleep(0.500)<block_end><def_stmt>tearDown self<block_start>safe_shutdown(self)<line_sep>self.p.join()<line_sep>sleep()<block_end><def_stmt>testUnicode self<block_start>sleep()<line_sep>sb=self.gateway.jvm.java.lang.StringBuffer()<line_sep>sb.append("\r\n\tHello\r\n\t")<line_sep>self.assertEqual("\r\n\tHello\r\n\t" sb.toString())<block_end><def_stmt>testMethodConstructor self<block_start>sleep()<line_sep>adder=Adder()<line_sep>oe1=self.gateway.jvm.py4j.examples.OperatorExample()<line_sep># Test method oe1.randomBinaryOperator(adder)<line_sep># Test constructor oe2=self.gateway.jvm.py4j.examples.OperatorExample(adder)<line_sep>self.assertTrue(oe2<is><not><none>)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
"""Pipeline functionality shared amongst multiple analysis types. """<import_stmt>os<import_stmt>collections<import_from_stmt>contextlib closing<import_stmt>pysam<import_from_stmt>bcbio broad<import_from_stmt>bcbio.pipeline.alignment get_genome_ref<import_from_stmt>bcbio.utils file_exists safe_makedir save_diskspace<import_from_stmt>bcbio.distributed.transaction file_transaction<line_sep># ## Split/Combine helpers <def_stmt>combine_bam in_files out_file config<block_start>"""Parallel target to combine multiple BAM files. """<line_sep>runner=broad.runner_from_config(config)<line_sep>runner.run_fn("picard_merge" in_files out_file)<for_stmt>in_file in_files<block_start>save_diskspace(in_file "Merged into {0}".format(out_file) config)<block_end>runner.run_fn("picard_index" out_file)<line_sep><return>out_file<block_end><def_stmt>process_bam_by_chromosome output_ext file_key default_targets=<none> dir_ext_fn=<none><block_start>"""Provide targets to process a BAM file by individual chromosome regions. output_ext: extension to supply to output files file_key: the key of the BAM file in the input data map default_targets: a list of extra chromosome targets to process, beyond those specified in the BAM file. Useful for retrieval of non-mapped reads. dir_ext_fn: A function to retrieve a directory naming extension from input data map. """<if_stmt>default_targets<is><none><block_start>default_targets=[]<block_end><def_stmt>_do_work data<block_start>bam_file=data[file_key]<line_sep>out_dir=os.path.dirname(bam_file)<if_stmt>dir_ext_fn<block_start>out_dir=os.path.join(out_dir dir_ext_fn(data))<block_end>out_file=os.path.join(out_dir "{base}{ext}".format(base=os.path.splitext(os.path.basename(bam_file))[0] ext=output_ext))<line_sep>part_info=[]<if_stmt><not>file_exists(out_file)<block_start>work_dir=safe_makedir("{base}-split".format(base=os.path.splitext(out_file)[0]))<with_stmt>closing(pysam.Samfile(bam_file "rb"))<as>work_bam<block_start><for_stmt>chr_ref list(work_bam.references)+default_targets<block_start>chr_out=os.path.join(work_dir "{base}-{ref}{ext}".format(base=os.path.splitext(os.path.basename(bam_file))[0] ref=chr_ref ext=output_ext))<line_sep>part_info.append((chr_ref chr_out))<block_end><block_end><block_end><return>out_file part_info<block_end><return>_do_work<block_end><def_stmt>write_nochr_reads in_file out_file<block_start>"""Write a BAM file of reads that are not on a reference chromosome. This is useful for maintaining non-mapped reads in parallel processes that split processing by chromosome. """<if_stmt><not>file_exists(out_file)<block_start><with_stmt>closing(pysam.Samfile(in_file "rb"))<as>in_bam<block_start><with_stmt>file_transaction(out_file)<as>tx_out_file<block_start><with_stmt>closing(pysam.Samfile(tx_out_file "wb" template=in_bam))<as>out_bam<block_start><for_stmt>read in_bam<block_start><if_stmt>read.tid<l>0<block_start>out_bam.write(read)<block_end><block_end><block_end><block_end><block_end><block_end><return>out_file<block_end><def_stmt>subset_bam_by_region in_file region out_file_base=<none><block_start>"""Subset BAM files based on specified chromosome region. """<if_stmt>out_file_base<is><not><none><block_start>base,ext=os.path.splitext(out_file_base)<block_end><else_stmt><block_start>base,ext=os.path.splitext(in_file)<block_end>out_file="%s-subset%s%s"%(base region ext)<if_stmt><not>file_exists(out_file)<block_start><with_stmt>closing(pysam.Samfile(in_file "rb"))<as>in_bam<block_start>target_tid=in_bam.gettid(region)<assert_stmt>region<is><not><none> "Did not find reference region %s in %s"%(region in_file)<with_stmt>file_transaction(out_file)<as>tx_out_file<block_start><with_stmt>closing(pysam.Samfile(tx_out_file "wb" template=in_bam))<as>out_bam<block_start><for_stmt>read in_bam<block_start><if_stmt>read.tid<eq>target_tid<block_start>out_bam.write(read)<block_end><block_end><block_end><block_end><block_end><block_end><return>out_file<block_end><def_stmt>subset_variant_regions variant_regions region out_file<block_start>"""Return BED file subset by a specified chromosome region. variant_regions is a BED file, region is a chromosome name. """<if_stmt>region<is><none><block_start><return>variant_regions<block_end><elif_stmt>variant_regions<is><none><block_start><return>region<block_end><elif_stmt>region.find(":")<g>0<block_start><raise>ValueError("Partial chromosome regions not supported")<block_end><else_stmt># create an ordered subset file for processing <block_start>subset_file="{0}-regions.bed".format(os.path.splitext(out_file)[0])<line_sep>items=[]<with_stmt>open(variant_regions)<as>in_handle<block_start><for_stmt>line in_handle<block_start><if_stmt>line.startswith(region)<and>line.split("\t")[0]<eq>region<block_start>start=int(line.split("\t")[1])<line_sep>items.append((start line))<block_end><block_end><block_end><if_stmt>len(items)<g>0<block_start><if_stmt><not>os.path.exists(subset_file)<block_start><with_stmt>open(subset_file "w")<as>out_handle<block_start>items.sort()<for_stmt>_,line items<block_start>out_handle.write(line)<block_end><block_end><block_end><return>subset_file<block_end><else_stmt><block_start><return>region<block_end><block_end><block_end># ## Retrieving file information from configuration variables <def_stmt>configured_ref_file name config sam_ref<block_start>"""Full path to a reference file specified in the configuration. Resolves non-absolute paths relative to the base genome reference directory. """<line_sep>ref_file=config["algorithm"].get(name <none>)<if_stmt>ref_file<block_start><if_stmt><not>os.path.isabs(ref_file)<block_start>base_dir=os.path.dirname(os.path.dirname(sam_ref))<line_sep>ref_file=os.path.join(base_dir ref_file)<block_end><block_end><return>ref_file<block_end><def_stmt>configured_vrn_files config sam_ref<block_start>"""Full path to all configured files for variation assessment. """<line_sep>names=["dbsnp" "train_hapmap" "train_1000g_omni" "train_indels"]<line_sep>VrnFiles=collections.namedtuple("VrnFiles" names)<line_sep><return>apply(VrnFiles [configured_ref_file(n config sam_ref)<for>n names])<block_end><def_stmt>ref_genome_info info config dirs<block_start>"""Retrieve reference genome information from configuration variables. """<line_sep>genome_build=info.get("genome_build" <none>)<line_sep>(_ sam_ref)=get_genome_ref(genome_build config["algorithm"]["aligner"] dirs["galaxy"])<line_sep><return>genome_build sam_ref<block_end>
# Copyright 2008 Lime Nest LLC # Copyright 2008 Lime Spot LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>setuptools setup<import_stmt>hgvers<line_sep>setup(name="urllib2_kerberos" version=hgvers.version py_modules=['urllib2_kerberos'] # install_requires = ['kerberos'], author="<NAME>" author_email="<EMAIL>" description="Kerberos over HTTP Negotiate/SPNEGO support for urllib2" license="Apache 2.0" url="http://limedav.com/hg/urllib2_kerberos/" keywords="urllib2 kerberos http negotiate spnego" classifiers=['Development Status :: 3 - Alpha' 'Intended Audience :: Developers' 'License :: OSI Approved :: Apache Software License' 'Natural Language :: English' 'Operating System :: POSIX :: Linux' 'Programming Language :: Python' 'Topic :: Internet :: WWW/HTTP' 'Topic :: Software Development :: Libraries' 'Topic :: Software Development :: Libraries :: Python Modules' 'Topic :: System :: Systems Administration :: Authentication/Directory'])<line_sep>
# Copyright 2009-2011 <NAME>. # This program is distributed under the LGPL2.1 license. <import_stmt>wx<import_from_stmt>python_toolbox.wx_tools.widgets.cute_button CuteButton<class_stmt>CuteBitmapButton(wx.BitmapButton CuteButton)<block_start><def_stmt>__init__ self parent id=-1 bitmap=wx.NullBitmap pos=wx.DefaultPosition size=wx.DefaultSize style=wx.BU_AUTODRAW validator=wx.DefaultValidator name=wx.ButtonNameStr bitmap_disabled=<none> tool_tip=<none> help_text=<none><block_start>wx.BitmapButton.__init__(self parent=parent id=id bitmap=bitmap pos=pos size=size style=style validator=validator name=name)<if_stmt>bitmap_disabled<is><not><none><block_start>self.SetBitmapDisabled(bitmap_disabled)<block_end>self.set_tool_tip_and_help_text(tool_tip help_text)<block_end><block_end>
<import_from_stmt>datetime datetime<import_stmt>math<import_stmt>numpy<as>np<import_stmt>random<import_from_stmt>utils is_good is_get_good send_mail test_bern_get test_bern_post query<line_sep>FROM_GMAIL_ADDR='YOUR_GMAIL_ADDR'<line_sep>FROM_GMAIL_ACCOUNT_PASSWORD='<PASSWORD>'<line_sep>TO_EMAIL_ADDR='TO_EMAIL_ADDR'<def_stmt>check_bern from_gmail to_email from_google_account from_google_password<block_start>results=list()<line_sep># 0. raw text results.append(is_good())<line_sep># 1. pmid, json results.append(is_get_good(29446767 'json' 3 10))<line_sep># 2. pmid, pubtator results.append(is_get_good(29446767 'pubtator' 3 10))<line_sep># 3. mutiple pmid results.append(is_get_good([29446767 25681199] 'json' 4 32))<line_sep>acceptables=['success' 'tmtool error']<line_sep>problems=list()<for_stmt>ridx,r enumerate(results)<block_start><if_stmt>r<in>acceptables<block_start><continue><block_end>problems.append('{}: {}'.format(ridx r))<block_end><if_stmt>len(problems)<eq>0<block_start>print(datetime.now() 'No problem')<block_end><else_stmt><block_start>problems_total=', '.join(problems)<line_sep>print(datetime.now() 'Found' problems_total)<line_sep>send_mail(from_gmail to_email '[BERN] Error(s) {}'.format(problems_total) '\n'.join(problems) from_google_account from_google_password)<block_end><block_end><def_stmt>benchmark tries batch_size=<none> log_interval=100<block_start>mutation_times=list()<line_sep>ner_times=list()<line_sep>normalization_times=list()<line_sep>total_times=list()<line_sep>pmids=random.sample(range(0 31113013) tries)<line_sep>print('pmids[:10]' pmids[:min(10 tries)])<if_stmt>batch_size<is><not><none><block_start>batch_pmids=list()<line_sep>num_batches=math.ceil(len(pmids)/batch_size)<for_stmt>i range(num_batches)# last <block_start><if_stmt>i<eq>num_batches-1<block_start>batch_pmids.append(pmids[i<times>batch_size:])<block_end><else_stmt><block_start>batch_pmids.append(pmids[i<times>batch_size:(i+1)<times>batch_size])<block_end><block_end>pmids=batch_pmids<block_end>num_na=0<line_sep>num_not_list=0<line_sep>num_not_dict=0<line_sep>ooi_list=list()<line_sep>num_error_dict=dict()<with_stmt>open('benchmark.tsv' 'w' encoding='utf-8')<as>f<block_start><for_stmt>pidx,pmid enumerate(pmids)<block_start>res_dict_list=query(pmid)<if_stmt>type(res_dict_list)<is><not>list<block_start>print('not list' pmid sep='\t')<line_sep>num_not_list<augadd>1<line_sep><continue><block_end><if_stmt>type(res_dict_list[0])<is><not>dict<block_start>print('not dict' pmid sep='\t')<line_sep>num_not_dict<augadd>1<line_sep><continue><block_end><if_stmt>'text'<in>res_dict_list[0]<block_start><if_stmt>'out of index range'<in>res_dict_list[0]['text']<block_start>ooi_list.append(pmid)<line_sep>print('out of index range' pmid sep='\t')<block_end><elif_stmt>'BioC.key'<in>res_dict_list[0]['text']<block_start>num_na<augadd>1<line_sep># print(res_dict_list[0]['text'], pmid, sep='\t') <block_end><elif_stmt>'error: '<in>res_dict_list[0]['text']<and>'elapsed_time'<not><in>res_dict_list[0]<block_start><if_stmt>res_dict_list[0]['text']<in>num_error_dict<block_start>num_error_dict[res_dict_list[0]['text']]<augadd>1<block_end><else_stmt><block_start>num_error_dict[res_dict_list[0]['text']]=1<block_end><block_end><block_end><if_stmt>'elapsed_time'<not><in>res_dict_list[0]# print('no elapsed_time', pmid, sep='\t') <block_start><continue><block_end>elapsed_time_dict=res_dict_list[0]['elapsed_time']<line_sep>mutation_times.append(elapsed_time_dict['tmtool'])<line_sep>ner_times.append(elapsed_time_dict['ner'])<line_sep>normalization_times.append(elapsed_time_dict['normalization'])<line_sep>total_times.append(elapsed_time_dict['total'])<line_sep>valid_results=len(mutation_times)<if_stmt>pidx<g>0<and>(pidx+1)%log_interval<eq>0<block_start>print(datetime.now() '{}/{}'.format(pidx+1 tries) '#valid_results' valid_results '#N/A' num_na '#not_list' num_not_list '#not_dict' num_not_dict '#ooi' len(ooi_list) ooi_list '#err' num_error_dict)<block_end><if_stmt>valid_results<g>0<and>valid_results%log_interval<eq>0<block_start>print(datetime.now() '#valid_results' valid_results)<line_sep>mutation_res='\t'.join(['{:.3f}'.format(v)<for>v get_stats(mutation_times batch_size=batch_size)])<line_sep>ner_res='\t'.join(['{:.3f}'.format(v)<for>v get_stats(ner_times batch_size=batch_size)])<line_sep>normalization_res='\t'.join(['{:.3f}'.format(v)<for>v get_stats(normalization_times batch_size=batch_size)])<line_sep>total_res='\t'.join(['{:.3f}'.format(v)<for>v get_stats(total_times batch_size=batch_size)])<line_sep>print(valid_results 'mutation' mutation_res sep='\t')<line_sep>print(valid_results 'ner' ner_res sep='\t')<line_sep>print(valid_results 'normalization' normalization_res sep='\t')<line_sep>print(valid_results 'total' total_res sep='\t')<line_sep>f.write('{}\t{}\t{}\n'.format(valid_results 'mutation NER' mutation_res))<line_sep>f.write('{}\t{}\t{}\n'.format(valid_results 'NER' ner_res))<line_sep>f.write('{}\t{}\t{}\n'.format(valid_results 'normalization' normalization_res))<line_sep>f.write('{}\t{}\t{}\n'.format(valid_results 'total' total_res))<line_sep>f.flush()<block_end><block_end><block_end>print('#valid_results' len(mutation_times))<line_sep>print('mutation' '\t'.join(['{:.3f}'.format(v)<for>v get_stats(mutation_times batch_size=batch_size)]) sep='\t')<line_sep>print('ner' '\t'.join(['{:.3f}'.format(v)<for>v get_stats(ner_times batch_size=batch_size)]) sep='\t')<line_sep>print('normalization' '\t'.join(['{:.3f}'.format(v)<for>v get_stats(normalization_times batch_size=batch_size)]) sep='\t')<line_sep>print('total' '\t'.join(['{:.3f}'.format(v)<for>v get_stats(total_times batch_size=batch_size)]) sep='\t')<block_end><def_stmt>get_stats lst batch_size=<none><block_start><if_stmt><not>lst<block_start><return><none><block_end><if_stmt>batch_size<is><none><block_start><return>sum(lst)/len(lst) np.std(lst) min(lst) max(lst)<block_end><else_stmt><block_start><return>(sum(lst)/len(lst))/batch_size np.std(lst) min(lst)/batch_size max(lst)/batch_size<block_end><block_end><def_stmt>stress_test num_threads wait_seconds num_try<block_start>test_bern_get(num_threads wait_seconds num_try)<line_sep>test_bern_post('CLAPO syndrome: identification of somatic activating '<concat>'PIK3CA mutations and delineation of the natural history '<concat>'and phenotype. Purpose CLAPO syndrome is a rare vascular '<concat>'disorder characterized by capillary malformation of the '<concat>'lower lip, lymphatic malformation predominant on the face'<concat>' and neck, asymmetry and partial/generalized overgrowth. '<concat>'Here we tested the hypothesis that, although the genetic '<concat>'cause is not known, the tissue distribution of the '<concat>'clinical manifestations in CLAPO seems to follow a '<concat>'pattern of somatic mosaicism. Methods We clinically '<concat>'evaluated a cohort of 13 patients with CLAPO and screened'<concat>' 20 DNA blood/tissue samples from 9 patients using '<concat>'high-throughput, deep sequencing. Results We identified '<concat>'five activating mutations in the PIK3CA gene in affected '<concat>'tissues from 6 of the 9 patients studied; one of the '<concat>'variants (NM_006218.2:c.248T>C; p.Phe83Ser) has not been '<concat>'previously described in developmental disorders. '<concat>'Conclusion We describe for the first time the presence '<concat>'of somatic activating PIK3CA mutations in patients with '<concat>'CLAPO. We also report an update of the phenotype and '<concat>'natural history of the syndrome.' num_threads wait_seconds num_try)<block_end><if_stmt>__name__<eq>'__main__'<block_start>check_bern(FROM_GMAIL_ADDR TO_EMAIL_ADDR FROM_GMAIL_ADDR FROM_GMAIL_ACCOUNT_PASSWORD)<block_end>
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. """ Utility functions for experiment config classes, internal part. If you are implementing a config class for a training service, it's unlikely you will need these. """<import_stmt>dataclasses<import_stmt>importlib<import_stmt>json<import_stmt>os.path<import_from_stmt>pathlib Path<import_stmt>socket<import_stmt>typeguard<import_stmt>nni.runtime.config<import_from_stmt>.public is_missing<line_sep>__all__=['get_base_path' 'set_base_path' 'unset_base_path' 'resolve_path' 'case_insensitive' 'camel_case' 'is_instance' 'validate_type' 'is_path_like' 'guess_config_type' 'guess_list_config_type' 'training_service_config_factory' 'load_training_service_config' 'get_ipv4_address']<line_sep>## handle relative path ## _current_base_path=<none><def_stmt>get_base_path <block_start><if_stmt>_current_base_path<is><none><block_start><return>Path()<block_end><return>_current_base_path<block_end><def_stmt>set_base_path path<block_start><global>_current_base_path<assert_stmt>_current_base_path<is><none><line_sep>_current_base_path=path<block_end><def_stmt>unset_base_path <block_start><global>_current_base_path<line_sep>_current_base_path=<none><block_end><def_stmt>resolve_path path base_path<block_start><if_stmt>path<is><none><block_start><return><none><block_end># Path.resolve() does not work on Windows when file not exist, so use os.path instead path=os.path.expanduser(path)<if_stmt><not>os.path.isabs(path)<block_start>path=os.path.join(base_path path)<block_end><return>str(os.path.realpath(path))<block_end># it should be already str, but official doc does not specify it's type ## field name case convertion ## <def_stmt>case_insensitive key<block_start><return>key.lower().replace('_' '')<block_end><def_stmt>camel_case key<block_start>words=key.strip('_').split('_')<line_sep><return>words[0]+''.join(word.title()<for>word words[1:])<block_end>## type hint utils ## <def_stmt>is_instance value type_hint<block_start><try_stmt><block_start>typeguard.check_type('_' value type_hint)<block_end><except_stmt>TypeError<block_start><return><false><block_end><return><true><block_end><def_stmt>validate_type config<block_start>class_name=type(config).__name__<for_stmt>field dataclasses.fields(config)<block_start>value=getattr(config field.name)<line_sep>#check existense <if_stmt>is_missing(value)<block_start><raise>ValueError(f'{class_name}: {field.name} is not set')<block_end><if_stmt><not>is_instance(value field.type)<block_start><raise>ValueError(f'{class_name}: type of {field.name} ({repr(value)}) is not {field.type}')<block_end><block_end><block_end><def_stmt>is_path_like type_hint# only `PathLike` and `Any` accepts `Path`; check `int` to make sure it's not `Any` <block_start><return>is_instance(Path() type_hint)<and><not>is_instance(1 type_hint)<block_end>## type inference ## <def_stmt>guess_config_type obj type_hint<block_start>ret=guess_list_config_type([obj] type_hint _hint_list_item=<true>)<line_sep><return>ret[0]<if>ret<else><none><block_end><def_stmt>guess_list_config_type objs type_hint _hint_list_item=<false># avoid circular import <block_start><import_from_stmt>..base ConfigBase<import_from_stmt>..training_service TrainingServiceConfig<line_sep># because __init__ of subclasses might be complex, we first create empty objects to determine type candidate_classes=[]<for_stmt>cls _all_subclasses(ConfigBase)<block_start><if_stmt>issubclass(cls TrainingServiceConfig)# training service configs are specially handled <block_start><continue><block_end>empty_list=[cls.__new__(cls)]<if_stmt>_hint_list_item<block_start>good_type=is_instance(empty_list[0] type_hint)<block_end><else_stmt><block_start>good_type=is_instance(empty_list type_hint)<block_end><if_stmt>good_type<block_start>candidate_classes.append(cls)<block_end><block_end><if_stmt><not>candidate_classes# it does not accept config type <block_start><return><none><block_end><if_stmt>len(candidate_classes)<eq>1# the type is confirmed, raise error if cannot convert to this type <block_start><return>[candidate_classes[0](**obj)<for>obj objs]<block_end># multiple candidates available, call __init__ to further verify candidate_configs=[]<for_stmt>cls candidate_classes<block_start><try_stmt><block_start>configs=[cls(**obj)<for>obj objs]<block_end><except_stmt>Exception<block_start><continue><block_end>candidate_configs.append(configs)<block_end><if_stmt><not>candidate_configs<block_start><return><none><block_end><if_stmt>len(candidate_configs)<eq>1<block_start><return>candidate_configs[0]<block_end># still have multiple candidates, choose the common base class <for_stmt>base candidate_configs<block_start>base_class=type(base[0])<line_sep>is_base=all(isinstance(configs[0] base_class)<for>configs candidate_configs)<if_stmt>is_base<block_start><return>base<block_end><block_end><return><none><block_end># cannot detect the type, give up <def_stmt>_all_subclasses cls<block_start>subclasses=set(cls.__subclasses__())<line_sep><return>subclasses.union(*[_all_subclasses(subclass)<for>subclass subclasses])<block_end><def_stmt>training_service_config_factory platform<block_start>cls=_get_ts_config_class(platform)<if_stmt>cls<is><none><block_start><raise>ValueError(f'Bad training service platform: {platform}')<block_end><return>cls()<block_end><def_stmt>load_training_service_config config<block_start><if_stmt>isinstance(config dict)<and>'platform'<in>config<block_start>cls=_get_ts_config_class(config['platform'])<if_stmt>cls<is><not><none><block_start><return>cls(**config)<block_end><block_end><return>config<block_end># not valid json, don't touch <def_stmt>_get_ts_config_class platform<block_start><import_from_stmt>..training_service TrainingServiceConfig# avoid circular import # import all custom config classes so they can be found in TrainingServiceConfig.__subclasses__() custom_ts_config_path=nni.runtime.config.get_config_file('training_services.json')<with_stmt>custom_ts_config_path.open()<as>config_file<block_start>custom_ts_config=json.load(config_file)<block_end><for_stmt>custom_ts_pkg custom_ts_config.keys()<block_start>pkg=importlib.import_module(custom_ts_pkg)<line_sep>_config_class=pkg.nni_training_service_info.config_class<block_end><for_stmt>cls TrainingServiceConfig.__subclasses__()<block_start><if_stmt>cls.platform<eq>platform<block_start><return>cls<block_end><block_end><return><none><block_end>## misc ## <def_stmt>get_ipv4_address <block_start>s=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>s.connect(('192.0.2.0' 80))<line_sep>addr=s.getsockname()[0]<line_sep>s.close()<line_sep><return>addr<block_end>
""" Export native Fairseq Transformer models to protobuf/hdf5 format. Refer to the `examples/training/fairseq` directory for more training details. """<import_from_stmt>collections OrderedDict<import_stmt>torch<import_from_stmt>export.proto.transformer_pb2 Transformer<import_from_stmt>lightseq.training.ops.pytorch.export gather_token_embedding fill_pb_layer export_ls_config <import_from_stmt>lightseq.training.ops.pytorch.util get_pos_embedding<import_stmt>lightseq.inference<as>lsi<import_from_stmt>export.util parse_args save_model<line_sep>enc_layer_mapping_dict=OrderedDict({"multihead_norm_scale":"self_attn_layer_norm weight" "multihead_norm_bias":"self_attn_layer_norm bias" "multihead_project_kernel_qkv":"self_attn q_proj weight&&self_attn k_proj weight&&self_attn v_proj weight&&expression_.transpose(0, 1)" "multihead_project_bias_qkv":"self_attn q_proj bias&&self_attn k_proj bias&&self_attn v_proj bias" "multihead_project_kernel_output":"self_attn out_proj weight&&expression_.transpose(0, 1)" "multihead_project_bias_output":"self_attn out_proj bias" "ffn_norm_scale":"final_layer_norm weight" "ffn_norm_bias":"final_layer_norm bias" "ffn_first_kernel":"fc1 weight&&expression_.transpose(0, 1)" "ffn_first_bias":"fc1 bias" "ffn_second_kernel":"fc2 weight&&expression_.transpose(0, 1)" "ffn_second_bias":"fc2 bias" })<line_sep>dec_layer_mapping_dict=OrderedDict({"self_norm_scale":"self_attn_layer_norm weight" "self_norm_bias":"self_attn_layer_norm bias" "self_project_kernel_qkv":"self_attn q_proj weight&&self_attn k_proj weight&&self_attn v_proj weight&&expression_.transpose(0, 1)" "self_project_bias_qkv":"self_attn q_proj bias&&self_attn k_proj bias&&self_attn v_proj bias" "self_project_kernel_output":"self_attn out_proj weight&&expression_.transpose(0, 1)" "self_project_bias_output":"self_attn out_proj bias" "encdec_norm_scale":"encoder_attn_layer_norm weight" "encdec_norm_bias":"encoder_attn_layer_norm bias" "encdec_project_kernel_q":"encoder_attn q_proj weight&&expression_.transpose(0, 1)" "encdec_project_bias_q":"encoder_attn q_proj bias" "encdec_project_kernel_output":"encoder_attn out_proj weight&&expression_.transpose(0, 1)" "encdec_project_bias_output":"encoder_attn out_proj bias" "ffn_norm_scale":"final_layer_norm weight" "ffn_norm_bias":"final_layer_norm bias" "ffn_first_kernel":"fc1 weight&&expression_.transpose(0, 1)" "ffn_first_bias":"fc1 bias" "ffn_second_kernel":"fc2 weight&&expression_.transpose(0, 1)" "ffn_second_bias":"fc2 bias" })<line_sep>src_emb_mapping_dict=OrderedDict({"norm_scale":"layer_norm weight" "norm_bias":"layer_norm bias" })<line_sep>trg_emb_mapping_dict=OrderedDict({"norm_scale":"layer_norm weight" "norm_bias":"layer_norm bias" })<def_stmt>_get_encode_output_mapping_dict dec_layer_num<block_start>encode_output_kernel_pattern=["encoder_attn {0} k_proj weight&&encoder_attn {0} v_proj weight".format(ele)<for>ele range(dec_layer_num)]<line_sep>encode_output_bias_pattern=["encoder_attn {0} k_proj bias&&encoder_attn {0} v_proj bias".format(ele)<for>ele range(dec_layer_num)]<line_sep><return>{"encode_output_project_kernel_kv":"&&".join(encode_output_kernel_pattern+["expression_.transpose(0, 1)"]) "encode_output_project_bias_kv":"&&".join(encode_output_bias_pattern) }<block_end><def_stmt>export_native_fs_transformer model_path pb_path hdf5_path hdf5 max_step=300 bos_id=2 eos_id=2 pad_id=1 <block_start>transformer=Transformer()<line_sep># load var names reloaded=torch.load(model_path "cpu")<line_sep>args=reloaded["args"]<line_sep>model_dict=reloaded["model"]<line_sep>trg_emb_mapping_dict["shared_bias"]=("expression_np.zeros(%d)"%model_dict["decoder.embed_tokens.weight"].numpy().shape[0])<line_sep>encoder_state_dict={}<line_sep>decoder_state_dict={}<for_stmt>k model_dict<block_start><if_stmt>k.startswith("encoder.")<block_start>encoder_state_dict[k]=model_dict[k]<block_end><if_stmt>k.startswith("decoder.")<block_start>decoder_state_dict[k]=model_dict[k]<block_end><block_end>dec_var_name_list=list(decoder_state_dict.keys())<line_sep>enc_var_name_list=list(encoder_state_dict.keys())<line_sep>enc_tensor_names={}<for_stmt>name enc_var_name_list<block_start>name_split=name.split(".")<if_stmt>len(name_split)<le>2<or><not>name_split[2].isdigit()<block_start><continue><block_end>layer_id=int(name_split[2])<line_sep>enc_tensor_names.setdefault(layer_id []).append(name)<block_end><for_stmt>layer_id sorted(enc_tensor_names.keys())<block_start>fill_pb_layer(enc_tensor_names[layer_id] encoder_state_dict transformer.encoder_stack.add() enc_layer_mapping_dict )<block_end># fill each decoder layer's params dec_tensor_names={}<for_stmt>name dec_var_name_list<block_start>name_split=name.split(".")<if_stmt>len(name_split)<le>2<or><not>name.split(".")[2].isdigit()<block_start><continue><block_end>layer_id=int(name.split(".")[2])<line_sep>dec_tensor_names.setdefault(layer_id []).append(name)<block_end><for_stmt>layer_id sorted(dec_tensor_names.keys())<block_start>fill_pb_layer(dec_tensor_names[layer_id] decoder_state_dict transformer.decoder_stack.add() dec_layer_mapping_dict )<block_end>fill_pb_layer(enc_var_name_list encoder_state_dict transformer.src_embedding src_emb_mapping_dict )<line_sep># encoder token embedding src_tb,_=gather_token_embedding(enc_var_name_list encoder_state_dict "embed_tokens")<line_sep>transformer.src_embedding.token_embedding[:]=src_tb.flatten().tolist()<line_sep># encoder position embedding pos_emb=<none><if_stmt>"encoder.embed_positions.weight"<in>encoder_state_dict<block_start>pos_emb=encoder_state_dict["encoder.embed_positions.weight"].numpy()<line_sep>transformer.src_embedding.position_embedding[:]=pos_emb.flatten().tolist()<block_end><else_stmt><block_start>pos_emb=get_pos_embedding(max_step+pad_id+1 src_tb.shape[-1]).numpy()<line_sep>pos_emb_list=(pos_emb[pad_id+1:max_step+pad_id+1 :].reshape([-1]).tolist())<line_sep>transformer.src_embedding.position_embedding[:]=pos_emb_list<block_end>print("encoder.embed_positions.weight -> src_embedding.position_embedding, shape: {}, conversion finished!".format(pos_emb.shape))<line_sep># fill trg_embedding encode_output_mapping_dict=_get_encode_output_mapping_dict(len(dec_tensor_names))<line_sep>trg_emb_mapping_dict.update(encode_output_mapping_dict)<line_sep>fill_pb_layer(dec_var_name_list decoder_state_dict transformer.trg_embedding trg_emb_mapping_dict )<line_sep># decoder token embedding trg_tb,_=gather_token_embedding(dec_var_name_list decoder_state_dict "embed_tokens")<line_sep>transformer.trg_embedding.token_embedding[:]=trg_tb.transpose().flatten().tolist()<line_sep>print("token_embedding.weight -> trg_embedding.token_embedding, shape: {}, conversion finished!".format(trg_tb.transpose().shape))<line_sep># decoder position embedding pos_emb=<none><if_stmt>"decoder.embed_positions.weight"<in>decoder_state_dict<block_start>pos_emb=decoder_state_dict["decoder.embed_positions.weight"].numpy()<line_sep>transformer.trg_embedding.position_embedding[:]=pos_emb.flatten().tolist()<block_end><else_stmt><block_start>pos_emb=get_pos_embedding(max_step+pad_id+1 trg_tb.shape[-1]).numpy()<line_sep>pos_emb_list=(pos_emb[pad_id+1:max_step+pad_id+1 :].reshape([-1]).tolist())<line_sep>transformer.trg_embedding.position_embedding[:]=pos_emb_list<block_end>print("decoder.embed_positions.weight -> trg_embedding.position_embedding, shape: {}, conversion finished!".format(pos_emb.shape))<line_sep># fill in conf export_ls_config(transformer args.encoder_attention_heads pad_id bos_id eos_id args.encoder_layers args.decoder_layers save_pb=<true> )<line_sep>save_path=save_model(transformer pb_path hdf5_path hdf5)<line_sep><return>save_path<block_end><if_stmt>__name__<eq>"__main__"<block_start>args=parse_args()<line_sep>model_name=".".join(args.model.split(".")[:-1])<line_sep>pb_path=f"{model_name}.pb"<line_sep>hdf5_path=f"{model_name}.hdf5"<line_sep>path=export_native_fs_transformer(args.model pb_path hdf5_path args.hdf5)<line_sep>src=[[63 47 65 1507 88 74 10 2057 362 9 284 6 2 1 1 1]]<line_sep>model=lsi.Transformer(path 8)<line_sep>output=model.infer(src)<line_sep># Expected result: [23, 550, 34, 118, 148, 2939, 4, 42, 32, 37, 6, 224, 10, 179, 5, 2] print("results:" output)<block_end>
# -*- coding: utf-8 -*- """ Logic handling user specific input forms such as logins and registration. """<import_from_stmt>flask_wtf FlaskForm<import_from_stmt>wtforms TextField PasswordField BooleanField<import_from_stmt>flask_wtf.recaptcha RecaptchaField<import_from_stmt>wtforms.validators Required EqualTo Email<class_stmt>LoginForm(FlaskForm)<block_start>email=TextField('Email address' [Required() Email()])<line_sep>password=PasswordField('Password' [Required()])<block_end><class_stmt>RegisterForm(FlaskForm)<block_start>username=TextField('NickName' [Required()])<line_sep>email=TextField('Email address' [Required() Email()])<line_sep>password=PasswordField('Password' [Required()])<line_sep>confirm=PasswordField('Repeat Password' [Required() EqualTo('password' message='Passwords must match')])<line_sep>accept_tos=BooleanField('I accept the Terms of Service.' [Required()])<line_sep>recaptcha=RecaptchaField()<block_end>
# Copyright Pincer 2021-Present # Full MIT License can be found in `LICENSE` at the project root. <import_from_future_stmt> annotations<import_from_stmt>dataclasses dataclass<import_from_stmt>typing TYPE_CHECKING<import_from_stmt>...utils.api_object APIObject<import_from_stmt>...utils.types MISSING<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>typing Optional<import_from_stmt>...utils.types APINullable<import_from_stmt>...utils.snowflake Snowflake<block_end>@dataclass(repr=<false>)<class_stmt>Attachment(APIObject)<block_start>"""Represents a Discord Attachment object Attributes ---------- id: :class:`~pincer.utils.snowflake.Snowflake` Attachment id filename: :class:`str` Name of file attached content_type: :class:`int` The attachment's data type size: :class:`str` Size of file in bytes url: :class:`str` Source url of file proxy_url: APINullable[:class:`str`] A proxied url of file height: APINullable[Optional[:class:`int`]] Height of file (if image) width: APINullable[Optional[:class:`int`]] Width of file (if image) """<line_sep>id:Snowflake<line_sep>filename:str<line_sep>size:int<line_sep>url:str<line_sep>proxy_url:str<line_sep>content_type:APINullable[str]=MISSING<line_sep>height:APINullable[Optional[int]]=MISSING<line_sep>width:APINullable[Optional[int]]=MISSING<block_end>
<import_stmt>asyncio<import_stmt>contextlib<import_from_stmt>pathlib Path<import_from_stmt>typing Iterator Optional<import_stmt>pytest<import_from_stmt>_pytest.monkeypatch MonkeyPatch<import_from_stmt>pytest_subprocess FakeProcess<import_from_stmt>prisma Client<import_from_stmt>prisma.utils temp_env_update<import_from_stmt>prisma.binaries platform<import_from_stmt>prisma.binaries BINARIES ENGINE_VERSION<import_from_stmt>prisma.engine errors utils<import_from_stmt>prisma.engine.query QueryEngine<import_from_stmt>prisma._compat get_running_loop<import_from_stmt>.utils Testdir<line_sep>QUERY_ENGINE=next(# pragma: no branch b<for>b BINARIES<if>b.name<eq>'query-engine')<line_sep>@contextlib.contextmanager<def_stmt>no_event_loop <arrow>Iterator[<none>]<block_start><try_stmt><block_start>current:Optional[asyncio.AbstractEventLoop]=get_running_loop()<block_end><except_stmt>RuntimeError<block_start>current=<none><block_end><try_stmt><block_start>asyncio.set_event_loop(<none>)<line_sep><yield><block_end><finally_stmt><block_start>asyncio.set_event_loop(current)<block_end><block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_engine_connects <arrow><none><block_start>"""Can connect to engine"""<line_sep>db=Client()<line_sep><await>db.connect()<with_stmt>pytest.raises(errors.AlreadyConnectedError)<block_start><await>db.connect()<block_end><await>db.disconnect()<block_end><def_stmt>test_stopping_engine_on_closed_loop <arrow><none><block_start>"""Stopping the engine with no event loop available does not raise an error"""<with_stmt>no_event_loop()<block_start>engine=QueryEngine(dml='')<line_sep>engine.stop()<block_end><block_end><def_stmt>test_engine_binary_does_not_exist monkeypatch:MonkeyPatch<arrow><none><block_start>"""No query engine binary found raises an error"""<def_stmt>mock_exists path:Path<arrow>bool<block_start><return><false><block_end>monkeypatch.setattr(Path 'exists' mock_exists raising=<true>)<with_stmt>pytest.raises(errors.BinaryNotFoundError)<as>exc<block_start>utils.ensure()<block_end><assert_stmt>exc.match(r'Expected .* or .* but neither were found\.\nTry running prisma py fetch')<block_end><def_stmt>test_mismatched_version_error fake_process:FakeProcess<arrow><none><block_start>"""Mismatched query engine versions raises an error"""<line_sep>fake_process.register_subprocess([QUERY_ENGINE.path '--version'] # type: ignore[list-item] stdout='query-engine unexpected-hash' )<with_stmt>pytest.raises(errors.MismatchedVersionsError)<as>exc<block_start>utils.ensure()<block_end><assert_stmt>exc.match(f'Expected query engine version `{ENGINE_VERSION}` but got `unexpected-hash`')<block_end><def_stmt>test_ensure_local_path testdir:Testdir fake_process:FakeProcess<arrow><none><block_start>"""Query engine in current directory required to be the expected version"""<line_sep>fake_engine=testdir.path/platform.check_for_extension(f'prisma-query-engine-{platform.binary_platform()}')<line_sep>fake_engine.touch()<line_sep>fake_process.register_subprocess([fake_engine '--version'] # type: ignore[list-item] stdout='query-engine a-different-hash' )<with_stmt>pytest.raises(errors.MismatchedVersionsError)<block_start>path=utils.ensure()<block_end>fake_process.register_subprocess([fake_engine '--version'] # type: ignore[list-item] stdout=f'query-engine {ENGINE_VERSION}' )<line_sep>path=utils.ensure()<assert_stmt>path<eq>fake_engine<block_end><def_stmt>test_ensure_env_override testdir:Testdir fake_process:FakeProcess<arrow><none><block_start>"""Query engine path in environment variable can be any version"""<line_sep>fake_engine=testdir.path/'my-query-engine'<line_sep>fake_engine.touch()<line_sep>fake_process.register_subprocess([fake_engine '--version'] # type: ignore[list-item] stdout='query-engine a-different-hash' )<with_stmt>temp_env_update({'PRISMA_QUERY_ENGINE_BINARY':str(fake_engine)})<block_start>path=utils.ensure()<block_end><assert_stmt>path<eq>fake_engine<block_end><def_stmt>test_ensure_env_override_does_not_exist <arrow><none><block_start>"""Query engine path in environment variable not found raises an error"""<with_stmt>temp_env_update({'PRISMA_QUERY_ENGINE_BINARY':'foo'})<block_start><with_stmt>pytest.raises(errors.BinaryNotFoundError)<as>exc<block_start>utils.ensure()<block_end><block_end><assert_stmt>exc.match(r'PRISMA_QUERY_ENGINE_BINARY was provided, but no query engine was found at foo')<block_end>
<import_stmt>csv<import_stmt>os<line_sep>""" evaluates results by calculating: - ICR(pairwise intercoder reliability AB, BC, AC) - GP(precision_generalized) Results are global and per category """<line_sep>filename='evaluation_data_how.csv'<line_sep># change csv column index, if necessary here category_index=2<line_sep>coder_a_index=5<line_sep>coder_b_index=6<line_sep>coder_c_index=7<line_sep># measure_agreement function to keep code more readable <def_stmt>measure_agreement a b<block_start><if_stmt>a<eq>b<block_start><return>1<block_end><else_stmt><block_start><return>0<block_end><block_end># convert ICR rating from 0 to 2 to GP scala 0 - 1 # (done on purpose in easy to read way, aka without normalization ) <def_stmt>to_precision_generalized a<block_start><if_stmt>a<eq>0# not relevant: <block_start><return>0<block_end><elif_stmt>a<eq>1# partial relevant <block_start><return>0.5<block_end><else_stmt># relevant <block_start><return>1<block_end><block_end><with_stmt>open(os.path.dirname(__file__)+'/'+filename 'r')<as>csvfile<block_start>reader=csv.reader(csvfile)<line_sep>is_header=<true><line_sep>ICR=0<line_sep>ICR_cat={}<line_sep>generalized_precision=0<line_sep>generalized_precision_cat={}<line_sep>aggrement=[]<for_stmt>line reader<block_start><if_stmt>is_header<block_start>is_header=<false><block_end><else_stmt><block_start>category=line[category_index]<line_sep>coder_a=int(line[coder_a_index])<line_sep>coder_b=int(line[coder_b_index])<line_sep>coder_c=int(line[coder_c_index])<line_sep># measure pairwise agreement AB, AC, CB ab=measure_agreement(coder_a coder_b)<line_sep>ac=measure_agreement(coder_a coder_c)<line_sep>cb=measure_agreement(coder_c coder_b)<line_sep># measure agreement of the pairs # inter-rater reliability is based on agreement between pairs of raters. line_agreement=(ab+ac+cb)/3<line_sep># irc global ICR=ICR+line_agreement<line_sep># irc per category ICR_cat[category]=ICR_cat.get(category 0)+line_agreement<line_sep># gp global tmp_gp=to_precision_generalized(coder_a)+to_precision_generalized(coder_b)+to_precision_generalized(coder_c)<line_sep>generalized_precision=generalized_precision+tmp_gp<line_sep># gp per category generalized_precision_cat[category]=generalized_precision_cat.get(category 0)+tmp_gp<line_sep># saved, for possible output aggrement.append((category ab ac cb line_agreement tmp_gp))<block_end><block_end>line_count=len(aggrement)<line_sep>cat_count=len(ICR_cat)<line_sep>line_count_cat=line_count/cat_count<line_sep># for GP: summarize all ratings dividing by the number of all ratings rating_count=line_count<times>3# each doc was rated by 3 coder rating_count_cat=rating_count/cat_count<line_sep># output print('Global ICR: '+str(ICR/line_count))<line_sep>print('Global GP: '+str(generalized_precision/rating_count))<for_stmt>cat ICR_cat<block_start>val=ICR_cat[cat]<line_sep>print(cat+' ICR: '+str(val/line_count_cat))<line_sep>val=generalized_precision_cat[cat]<line_sep>print(cat+' GP: '+str(val/rating_count_cat))<block_end><block_end>
r""" Visualization of normal modes from an elastic network model =========================================================== The *elastic network model* (ENM) is a fast method to estimate movements in a protein structure, without the need to run time-consuming MD simulations. A protein is modelled as *mass-and-spring* model, with the masses being the :math:`C_\alpha` atoms and the springs being the non-covalent bonds between adjacent residues. Via *normal mode analysis* distinct movements/oscillations can be extracted from the model. An *anisotropic network model* (ANM), is an ENM that includes directional information. Hence, the normal mode analysis yields eigenvectors, where each atom is represented by three vector components (*x*, *y*, *z*). Thus these vectors can be used for 3D representation. In the case of this example a normal mode analysis on an ANM was already conducted. This script merely takes the structure and obtained eigenvectors to add a smooth oscillation of the chosen normal mode to the structure. The newly created structure has multiple models, where each model depicts a different time in the oscillation period. Then the multi-model structure can be used to create a video of the oscillation using a molecular visualization program. The file containing the eigenvectors can be downloaded via this :download:`link </examples/download/glycosylase_anm_vectors.csv>`. """<line_sep># Code source: <NAME> # License: BSD 3 clause <import_from_stmt>tempfile NamedTemporaryFile<import_from_stmt>os.path join<import_stmt>numpy<as>np<import_from_stmt>numpy newaxis<import_stmt>biotite.structure<as>struc<import_stmt>biotite.structure.io<as>strucio<import_stmt>biotite.structure.io.mmtf<as>mmtf<import_stmt>biotite.database.rcsb<as>rcsb<line_sep># A CSV file containing the eigenvectors for the CA atoms VECTOR_FILE="../../download/glycosylase_anm_vectors.csv"<line_sep># The corresponding structure PDB_ID="1MUG"<line_sep># The normal mode to be visualized # '-1' is the last (and most significant) one MODE=-1<line_sep># The amount of frames (models) per oscillation FRAMES=60<line_sep># The maximum oscillation amplitude for an atom # (The length of the ANM's eigenvectors make only sense when compared # relative to each other, the absolute values have no significance) MAX_AMPLITUDE=5<line_sep># Load structure mmtf_file=mmtf.MMTFFile.read(rcsb.fetch(PDB_ID "mmtf"))<line_sep>structure=mmtf.get_structure(mmtf_file model=1)<line_sep># Filter first peptide chain protein_chain=structure[struc.filter_amino_acids(structure)&(structure.chain_id<eq>structure.chain_id[0])]<line_sep># Filter CA atoms ca=protein_chain[protein_chain.atom_name<eq>"CA"]<line_sep># Load eigenvectors for CA atoms # The first axis indicates the mode, # the second axis indicates the vector component vectors=np.loadtxt(VECTOR_FILE delimiter=",").transpose()<line_sep># Discard the last 6 modes, as these are movements of the entire system: # A system with N atoms has only 3N - 6 degrees of freedom # ^^^ vectors=vectors[:-6]<line_sep># Extract vectors for given mode and reshape to (n,3) array mode_vectors=vectors[MODE].reshape((-1 3))<line_sep># Rescale, so that the largest vector has the length 'MAX_AMPLITUDE' vector_lenghts=np.sqrt(np.sum(mode_vectors<power>2 axis=-1))<line_sep>scale=MAX_AMPLITUDE/np.max(vector_lenghts)<line_sep>mode_vectors<augmul>scale<line_sep># Stepwise application of eigenvectors as smooth sine oscillation time=np.linspace(0 2<times>np.pi FRAMES endpoint=<false>)<line_sep>deviation=np.sin(time)[: newaxis newaxis]<times>mode_vectors<line_sep># Apply oscillation of CA atom to all atoms in the corresponding residue oscillation=np.zeros((FRAMES len(protein_chain) 3))<line_sep>residue_starts=struc.get_residue_starts(protein_chain # The last array element will be the length of the atom array, # i.e. no valid index add_exclusive_stop=<true>)<for_stmt>i range(len(residue_starts)-1)<block_start>res_start=residue_starts[i]<line_sep>res_stop=residue_starts[i+1]<line_sep>oscillation[: res_start:res_stop :]=protein_chain.coord[res_start:res_stop :]+deviation[: i:i+1 :]<block_end># An atom array stack containing all frames oscillating_structure=struc.from_template(protein_chain oscillation)<line_sep># Save as PDB for rendering a video with PyMOL temp=NamedTemporaryFile(suffix=".pdb")<line_sep>strucio.save_structure(temp.name oscillating_structure)<line_sep># biotite_static_image = normal_modes.gif temp.close()<line_sep>
""" Negating an image with math_img =============================== The goal of this example is to illustrate the use of the function :func:`nilearn.image.math_img` on T-maps. We compute a negative image by multiplying its voxel values with -1. """<import_from_stmt>nilearn datasets plotting image<line_sep>############################################################################### # Retrieve the data: the localizer dataset with contrast maps. motor_images=datasets.fetch_neurovault_motor_task()<line_sep>stat_img=motor_images.images[0]<line_sep>############################################################################### # Multiply voxel values by -1. negative_stat_img=image.math_img("-img" img=stat_img)<line_sep>plotting.plot_stat_map(stat_img cut_coords=(36 -27 66) threshold=3 title="t-map" vmax=9)<line_sep>plotting.plot_stat_map(negative_stat_img cut_coords=(36 -27 66) threshold=3 title="Negative t-map" vmax=9)<line_sep>plotting.show()<line_sep>
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** <import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['GetUserAssignedIdentityResult' 'AwaitableGetUserAssignedIdentityResult' 'get_user_assigned_identity' 'get_user_assigned_identity_output' ]<line_sep>@pulumi.output_type<class_stmt>GetUserAssignedIdentityResult<block_start>""" A collection of values returned by getUserAssignedIdentity. """<def_stmt>__init__ __self__ client_id=<none> id=<none> location=<none> name=<none> principal_id=<none> resource_group_name=<none> tags=<none> tenant_id=<none><block_start><if_stmt>client_id<and><not>isinstance(client_id str)<block_start><raise>TypeError("Expected argument 'client_id' to be a str")<block_end>pulumi.set(__self__ "client_id" client_id)<if_stmt>id<and><not>isinstance(id str)<block_start><raise>TypeError("Expected argument 'id' to be a str")<block_end>pulumi.set(__self__ "id" id)<if_stmt>location<and><not>isinstance(location str)<block_start><raise>TypeError("Expected argument 'location' to be a str")<block_end>pulumi.set(__self__ "location" location)<if_stmt>name<and><not>isinstance(name str)<block_start><raise>TypeError("Expected argument 'name' to be a str")<block_end>pulumi.set(__self__ "name" name)<if_stmt>principal_id<and><not>isinstance(principal_id str)<block_start><raise>TypeError("Expected argument 'principal_id' to be a str")<block_end>pulumi.set(__self__ "principal_id" principal_id)<if_stmt>resource_group_name<and><not>isinstance(resource_group_name str)<block_start><raise>TypeError("Expected argument 'resource_group_name' to be a str")<block_end>pulumi.set(__self__ "resource_group_name" resource_group_name)<if_stmt>tags<and><not>isinstance(tags dict)<block_start><raise>TypeError("Expected argument 'tags' to be a dict")<block_end>pulumi.set(__self__ "tags" tags)<if_stmt>tenant_id<and><not>isinstance(tenant_id str)<block_start><raise>TypeError("Expected argument 'tenant_id' to be a str")<block_end>pulumi.set(__self__ "tenant_id" tenant_id)<block_end>@[email protected](name="clientId")<def_stmt>client_id self<arrow>str<block_start>""" The Client ID of the User Assigned Identity. """<line_sep><return>pulumi.get(self "client_id")<block_end>@[email protected]<def_stmt>id self<arrow>str<block_start>""" The provider-assigned unique ID for this managed resource. """<line_sep><return>pulumi.get(self "id")<block_end>@[email protected]<def_stmt>location self<arrow>str<block_start>""" The Azure location where the User Assigned Identity exists. """<line_sep><return>pulumi.get(self "location")<block_end>@[email protected]<def_stmt>name self<arrow>str<block_start><return>pulumi.get(self "name")<block_end>@[email protected](name="principalId")<def_stmt>principal_id self<arrow>str<block_start>""" The Service Principal ID of the User Assigned Identity. """<line_sep><return>pulumi.get(self "principal_id")<block_end>@[email protected](name="resourceGroupName")<def_stmt>resource_group_name self<arrow>str<block_start><return>pulumi.get(self "resource_group_name")<block_end>@[email protected]<def_stmt>tags self<arrow>Mapping[str str]<block_start>""" A mapping of tags assigned to the User Assigned Identity. """<line_sep><return>pulumi.get(self "tags")<block_end>@[email protected](name="tenantId")<def_stmt>tenant_id self<arrow>str<block_start>""" The Tenant ID of the User Assigned Identity. """<line_sep><return>pulumi.get(self "tenant_id")<block_end><block_end><class_stmt>AwaitableGetUserAssignedIdentityResult(GetUserAssignedIdentityResult)# pylint: disable=using-constant-test <block_start><def_stmt>__await__ self<block_start><if_stmt><false><block_start><yield>self<block_end><return>GetUserAssignedIdentityResult(client_id=self.client_id id=self.id location=self.location name=self.name principal_id=self.principal_id resource_group_name=self.resource_group_name tags=self.tags tenant_id=self.tenant_id)<block_end><block_end><def_stmt>get_user_assigned_identity name:Optional[str]=<none> resource_group_name:Optional[str]=<none> opts:Optional[pulumi.InvokeOptions]=<none><arrow>AwaitableGetUserAssignedIdentityResult<block_start>""" Use this data source to access information about an existing User Assigned Identity. ## Example Usage ### Reference An Existing) ```python import pulumi import pulumi_azure as azure example = azure.authorization.get_user_assigned_identity(name="name_of_user_assigned_identity", resource_group_name="name_of_resource_group") pulumi.export("uaiClientId", example.client_id) pulumi.export("uaiPrincipalId", example.principal_id) pulumi.export("uaiTenantId", example.tenant_id) ``` :param str name: The name of the User Assigned Identity. :param str resource_group_name: The name of the Resource Group in which the User Assigned Identity exists. """<line_sep>__args__=dict()<line_sep>__args__['name']=name<line_sep>__args__['resourceGroupName']=resource_group_name<if_stmt>opts<is><none><block_start>opts=pulumi.InvokeOptions()<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end>__ret__=pulumi.runtime.invoke('azure:authorization/getUserAssignedIdentity:getUserAssignedIdentity' __args__ opts=opts typ=GetUserAssignedIdentityResult).value<line_sep><return>AwaitableGetUserAssignedIdentityResult(client_id=__ret__.client_id id=__ret__.id location=__ret__.location name=__ret__.name principal_id=__ret__.principal_id resource_group_name=__ret__.resource_group_name tags=__ret__.tags tenant_id=__ret__.tenant_id)<block_end>@_utilities.lift_output_func(get_user_assigned_identity)<def_stmt>get_user_assigned_identity_output name:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> opts:Optional[pulumi.InvokeOptions]=<none><arrow>pulumi.Output[GetUserAssignedIdentityResult]<block_start>""" Use this data source to access information about an existing User Assigned Identity. ## Example Usage ### Reference An Existing) ```python import pulumi import pulumi_azure as azure example = azure.authorization.get_user_assigned_identity(name="name_of_user_assigned_identity", resource_group_name="name_of_resource_group") pulumi.export("uaiClientId", example.client_id) pulumi.export("uaiPrincipalId", example.principal_id) pulumi.export("uaiTenantId", example.tenant_id) ``` :param str name: The name of the User Assigned Identity. :param str resource_group_name: The name of the Resource Group in which the User Assigned Identity exists. """<line_sep><ellipsis><block_end>
""" Interface for system identification data (Actuator and Drives). """<import_from_future_stmt> print_function<import_stmt>os<import_stmt>sys<import_stmt>zipfile<import_stmt>warnings<import_stmt>numpy<as>np<import_stmt>scipy.io<as>sio<import_from_stmt>six.moves urllib<import_from_stmt>six.moves cPickle<as>pkl<line_sep>SOURCE_URLS={'actuator':'https://www.cs.cmu.edu/~mshediva/data/actuator.mat' 'drives':'https://www.cs.cmu.edu/~mshediva/data/NonlinearData.zip' }<def_stmt>maybe_download data_path dataset_name verbose=1<block_start>source_url=SOURCE_URLS[dataset_name]<line_sep>datadir_path=os.path.join(data_path 'sysid')<line_sep>dataset_path=os.path.join(datadir_path dataset_name+'.mat')<line_sep># Create directories (if necessary) <if_stmt><not>os.path.isdir(datadir_path)<block_start>os.makedirs(datadir_path)<block_end># Download & extract the data (if necessary) <if_stmt><not>os.path.isfile(dataset_path)<block_start><if_stmt>dataset_name<eq>'actuator'<block_start>urllib.request.urlretrieve(source_url dataset_path)<block_end><if_stmt>dataset_name<eq>'drives'<block_start><assert_stmt>source_url.endswith('.zip')<line_sep>archive_path=os.path.join(datadir_path 'tmp.zip')<line_sep>urllib.request.urlretrieve(source_url archive_path)<with_stmt>zipfile.ZipFile(archive_path 'r')<as>zfp<block_start>zfp.extract('DATAPRBS.MAT' datadir_path)<block_end>os.rename(os.path.join(datadir_path 'DATAPRBS.MAT') dataset_path)<line_sep>os.remove(archive_path)<block_end><if_stmt>verbose<block_start>print("Successfully downloaded `%s` dataset from %s."%(dataset_name source_url))<block_end><block_end><return>dataset_path<block_end><def_stmt>load_data dataset_name t_step=1 start=0. stop=100. use_targets=<true> batch_size=<none> verbose=1<block_start>"""Load the system identification data. Arguments: ---------- t_step : uint (default: 1) Take data points t_step apart from each other in time. start : float in [0., 100.) (default: 0.) stop : float in (0., 100.] (default: 100.) use_targets : bool (default: True) batch_size : uint or None (default: None) verbose : uint (default: 1) """<if_stmt>dataset_name<not><in>{'actuator' 'drives'}<block_start><raise>ValueError("Unknown dataset: %s"%dataset_name)<block_end><if_stmt>'DATA_PATH'<not><in>os.environ<block_start>warnings.warn("Cannot find DATA_PATH variable in the environment. "<concat>"Using <current_working_directory>/data/ instead.")<line_sep>DATA_PATH=os.path.join(os.getcwd() 'data')<block_end><else_stmt><block_start>DATA_PATH=os.environ['DATA_PATH']<block_end>dataset_path=maybe_download(DATA_PATH dataset_name verbose=verbose)<if_stmt><not>os.path.exists(dataset_path)<block_start><raise>Exception("Cannot find data: %s"%dataset_path)<block_end><if_stmt>verbose<block_start>sys.stdout.write('Loading data...')<line_sep>sys.stdout.flush()<block_end>data_mat=sio.loadmat(dataset_path)<if_stmt>dataset_name<eq>'actuator'<block_start>X,Y=data_mat['u'] data_mat['p']<block_end><if_stmt>dataset_name<eq>'drives'<block_start>X,Y=data_mat['u1'] data_mat['z1']<block_end>start=int((start/100.)<times>len(X))<line_sep>stop=int((stop/100.)<times>len(X))<line_sep>X=X[start:stop:t_step :]<line_sep>Y=Y[start:stop:t_step :]<if_stmt>use_targets<block_start>X=np.hstack([X Y])<block_end><if_stmt>batch_size<block_start>nb_examples=(len(X)<floordiv>batch_size)<times>batch_size<line_sep>X=X[:nb_examples]<line_sep>Y=Y[:nb_examples]<block_end><if_stmt>verbose<block_start>sys.stdout.write('Done.\n')<line_sep>print('# of loaded points: %d'%len(X))<block_end><return>X Y<block_end>
<import_stmt>sublime<import_stmt>sublime_plugin<import_stmt>logging<line_sep>LOG=<false><line_sep>TRACE=9<line_sep>BASIC_FORMAT="[%(asctime)s - %(levelname)s - %(filename)s %(funcName)s] %(message)s"<line_sep>logging.addLevelName("TRACE" TRACE)<class_stmt>CustomLogger(logging.Logger)<block_start><def_stmt>isEnabledFor self level<block_start><if_stmt><not>LOG<block_start><return><block_end><return>level<ge>self.getEffectiveLevel()<block_end><def_stmt>trace self msg="" *args **kwargs<block_start><if_stmt>self.isEnabledFor(TRACE)<block_start>self._log(TRACE msg args **kwargs)<block_end><block_end><block_end><def_stmt>getLogger name level=logging.DEBUG<block_start>log=CustomLogger(name level)<line_sep># Set stream handler h=logging.StreamHandler()<line_sep>h.setFormatter(logging.Formatter(BASIC_FORMAT))<line_sep>log.addHandler(h)<line_sep><return>log<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_stmt>aliyunsdkcore.request RpcRequest<import_from_stmt>aliyunsdkcbn.endpoint endpoint_data<class_stmt>ListTransitRouterRouteTablesRequest(RpcRequest)<block_start><def_stmt>__init__ self<block_start>RpcRequest.__init__(self 'Cbn' '2017-09-12' 'ListTransitRouterRouteTables' 'cbn')<line_sep>self.set_method('POST')<if_stmt>hasattr(self "endpoint_map")<block_start>setattr(self "endpoint_map" endpoint_data.getEndpointMap())<block_end><if_stmt>hasattr(self "endpoint_regional")<block_start>setattr(self "endpoint_regional" endpoint_data.getEndpointRegional())<block_end><block_end><def_stmt>get_ResourceOwnerId self<block_start><return>self.get_query_params().get('ResourceOwnerId')<block_end><def_stmt>set_ResourceOwnerId self ResourceOwnerId<block_start>self.add_query_param('ResourceOwnerId' ResourceOwnerId)<block_end><def_stmt>get_TransitRouterRouteTableNamess self<block_start><return>self.get_query_params().get('TransitRouterRouteTableNames')<block_end><def_stmt>set_TransitRouterRouteTableNamess self TransitRouterRouteTableNamess<block_start><for_stmt>depth1 range(len(TransitRouterRouteTableNamess))<block_start><if_stmt>TransitRouterRouteTableNamess[depth1]<is><not><none><block_start>self.add_query_param('TransitRouterRouteTableNames.'+str(depth1+1) TransitRouterRouteTableNamess[depth1])<block_end><block_end><block_end><def_stmt>get_TransitRouterRouteTableType self<block_start><return>self.get_query_params().get('TransitRouterRouteTableType')<block_end><def_stmt>set_TransitRouterRouteTableType self TransitRouterRouteTableType<block_start>self.add_query_param('TransitRouterRouteTableType' TransitRouterRouteTableType)<block_end><def_stmt>get_TransitRouterRouteTableStatus self<block_start><return>self.get_query_params().get('TransitRouterRouteTableStatus')<block_end><def_stmt>set_TransitRouterRouteTableStatus self TransitRouterRouteTableStatus<block_start>self.add_query_param('TransitRouterRouteTableStatus' TransitRouterRouteTableStatus)<block_end><def_stmt>get_TransitRouterRouteTableIdss self<block_start><return>self.get_query_params().get('TransitRouterRouteTableIds')<block_end><def_stmt>set_TransitRouterRouteTableIdss self TransitRouterRouteTableIdss<block_start><for_stmt>depth1 range(len(TransitRouterRouteTableIdss))<block_start><if_stmt>TransitRouterRouteTableIdss[depth1]<is><not><none><block_start>self.add_query_param('TransitRouterRouteTableIds.'+str(depth1+1) TransitRouterRouteTableIdss[depth1])<block_end><block_end><block_end><def_stmt>get_NextToken self<block_start><return>self.get_query_params().get('NextToken')<block_end><def_stmt>set_NextToken self NextToken<block_start>self.add_query_param('NextToken' NextToken)<block_end><def_stmt>get_ResourceOwnerAccount self<block_start><return>self.get_query_params().get('ResourceOwnerAccount')<block_end><def_stmt>set_ResourceOwnerAccount self ResourceOwnerAccount<block_start>self.add_query_param('ResourceOwnerAccount' ResourceOwnerAccount)<block_end><def_stmt>get_OwnerAccount self<block_start><return>self.get_query_params().get('OwnerAccount')<block_end><def_stmt>set_OwnerAccount self OwnerAccount<block_start>self.add_query_param('OwnerAccount' OwnerAccount)<block_end><def_stmt>get_OwnerId self<block_start><return>self.get_query_params().get('OwnerId')<block_end><def_stmt>set_OwnerId self OwnerId<block_start>self.add_query_param('OwnerId' OwnerId)<block_end><def_stmt>get_TransitRouterId self<block_start><return>self.get_query_params().get('TransitRouterId')<block_end><def_stmt>set_TransitRouterId self TransitRouterId<block_start>self.add_query_param('TransitRouterId' TransitRouterId)<block_end><def_stmt>get_MaxResults self<block_start><return>self.get_query_params().get('MaxResults')<block_end><def_stmt>set_MaxResults self MaxResults<block_start>self.add_query_param('MaxResults' MaxResults)<block_end><block_end>
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>typing NamedTuple<line_sep># from kfp.components import InputPath, OutputPath # An example of how the model eval info could be used to make decisions about whether or not # to deploy the model. <def_stmt>eval_metrics metrics:str thresholds:str<arrow>NamedTuple('Outputs' [('deploy' str)])<block_start><import_stmt>json<import_stmt>logging<def_stmt>regression_threshold_check metrics_info# ... <block_start><for_stmt>k,v thresholds_dict.items()<block_start>logging.info('k {}, v {}'.format(k v))<if_stmt>k<in>['root_mean_squared_error' 'mae']<block_start><if_stmt>metrics_info[k][-1]<g>v<block_start>logging.info('{} > {}; returning False'.format(metrics_info[k][0] v))<line_sep><return>('False' )<block_end><block_end><block_end><return>('deploy' )<block_end>logging.getLogger().setLevel(logging.INFO)# TODO: make level configurable thresholds_dict=json.loads(thresholds)<line_sep>logging.info('thresholds dict: {}'.format(thresholds_dict))<line_sep>logging.info('metrics: %s' metrics)<line_sep>metrics_dict=json.loads(metrics)<line_sep>logging.info("got metrics info: %s" metrics_dict)<line_sep>res=regression_threshold_check(metrics_dict)<line_sep>logging.info('deploy decision: %s' res)<line_sep><return>res<block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>kfp<line_sep>kfp.components.func_to_container_op(eval_metrics output_component_file='../../eval_metrics_component.yaml' base_image='gcr.io/deeplearning-platform-release/tf2-cpu.2-3:latest')<block_end>
<import_stmt>json<import_from_stmt>zerver.lib.test_classes WebhookTestCase<class_stmt>JsonHookTests(WebhookTestCase)<block_start>STREAM_NAME="json"<line_sep>URL_TEMPLATE="/api/v1/external/json?api_key={api_key}&stream={stream}"<line_sep>WEBHOOK_DIR_NAME="json"<def_stmt>test_json_github_push__1_commit_message self<arrow><none><block_start>""" Tests if json github push 1 commit is handled correctly """<with_stmt>open("zerver/webhooks/json/fixtures/json_github_push__1_commit.json")<as>f<block_start>original_fixture=json.load(f)<block_end>expected_topic="JSON"<line_sep>expected_message="""```json {original_fixture} ```""".format(original_fixture=json.dumps(original_fixture indent=2))<line_sep>self.check_webhook("json_github_push__1_commit" expected_topic expected_message)<block_end><def_stmt>test_json_pingdom_http_up_to_down_message self<arrow><none><block_start>""" Tests if json pingdom http up to down is handled correctly """<with_stmt>open("zerver/webhooks/json/fixtures/json_pingdom_http_up_to_down.json")<as>f<block_start>original_fixture=json.load(f)<block_end>expected_topic="JSON"<line_sep>expected_message="""```json {original_fixture} ```""".format(original_fixture=json.dumps(original_fixture indent=2))<line_sep>self.check_webhook("json_pingdom_http_up_to_down" expected_topic expected_message)<block_end><def_stmt>test_json_sentry_event_for_exception_js_message self<arrow><none><block_start>""" Tests if json sentry event for exception js is handled correctly """<with_stmt>open("zerver/webhooks/json/fixtures/json_sentry_event_for_exception_js.json")<as>f<block_start>original_fixture=json.load(f)<block_end>expected_topic="JSON"<line_sep>expected_message="""```json {original_fixture} ```""".format(original_fixture=json.dumps(original_fixture indent=2))<line_sep>self.check_webhook("json_sentry_event_for_exception_js" expected_topic expected_message)<block_end><block_end>
# -*- coding: utf-8 -*- # # SPDX-FileCopyrightText: © 2018 The glucometerutils Authors # SPDX-License-Identifier: MIT """Tests for the common routines."""<line_sep># pylint: disable=protected-access,missing-docstring <import_stmt>datetime<import_stmt>construct<import_from_stmt>absl.testing absltest<import_from_stmt>glucometerutils.support construct_extras<line_sep>_TEST_DATE1=datetime.datetime(1970 1 2 0 0)<line_sep>_TEST_DATE2=datetime.datetime(1971 1 1 0 0)<line_sep>_TEST_DATE3=datetime.datetime(1970 1 1 0 0)<line_sep>_NEW_EPOCH=31536000# datetime.datetime(1971, 1, 1, 0, 0) <class_stmt>TestTimestamp(absltest.TestCase)<block_start><def_stmt>test_build_unix_epoch self<block_start>self.assertEqual(construct_extras.Timestamp(construct.Int32ul).build(_TEST_DATE1) b"\x80\x51\x01\x00" )<block_end><def_stmt>test_parse_unix_epoch self<block_start>self.assertEqual(construct_extras.Timestamp(construct.Int32ul).parse(b"\x803\xe1\x01") _TEST_DATE2 )<block_end><def_stmt>test_build_custom_epoch self<block_start>self.assertEqual(construct_extras.Timestamp(construct.Int32ul epoch=_NEW_EPOCH).build(_TEST_DATE2) b"\x00\x00\x00\x00" )<block_end><def_stmt>test_parse_custom_epoch self<block_start>self.assertEqual(construct_extras.Timestamp(construct.Int32ul epoch=_NEW_EPOCH).parse(b"\x00\x00\x00\x00") _TEST_DATE2 )<block_end><def_stmt>test_build_custom_epoch_negative_failure self<block_start><with_stmt>self.assertRaises(construct.core.FormatFieldError)<block_start>construct_extras.Timestamp(construct.Int32ul epoch=_NEW_EPOCH).build(_TEST_DATE1)<block_end><block_end><def_stmt>test_build_custom_epoch_negative_success self<block_start>self.assertEqual(construct_extras.Timestamp(construct.Int32sl epoch=_NEW_EPOCH).build(_TEST_DATE1) b"\x00\x1e\x20\xfe" )<block_end><def_stmt>test_build_varint self<block_start>self.assertEqual(construct_extras.Timestamp(construct.VarInt).build(_TEST_DATE3) b"\x00")<block_end><def_stmt>test_invalid_value self<block_start><with_stmt>self.assertRaises(AssertionError)<block_start>construct_extras.Timestamp(construct.Int32ul).build("foo")<block_end><block_end><block_end>
# Add taintlib to PATH so it can be imported during runtime without any hassle <import_stmt>sys<import_stmt>os<line_sep>sys.path.append(os.path.dirname(os.path.dirname((__file__))))<import_from_stmt>taintlib *<line_sep># This has no runtime impact, but allows autocomplete to work <import_from_stmt>typing Iterable TYPE_CHECKING<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>..taintlib *<block_end># Actual tests <import_stmt>pathlib<line_sep># pathlib was added in 3.4 <def_stmt>test_basic <block_start>print("\n# test_basic")<line_sep>ts=TAINTED_STRING<line_sep>tainted_path=pathlib.Path(ts)<line_sep>tainted_pure_path=pathlib.PurePath(ts)<line_sep>tainted_pure_posix_path=pathlib.PurePosixPath(ts)<line_sep>tainted_pure_windows_path=pathlib.PureWindowsPath(ts)<line_sep>ensure_tainted(tainted_path # $ tainted tainted_pure_path # $ tainted tainted_pure_posix_path # $ tainted tainted_pure_windows_path # $ tainted pathlib.Path("foo")/ts # $ tainted ts/pathlib.Path("foo") # $ tainted tainted_path.joinpath("foo" "bar") # $ tainted pathlib.Path("foo").joinpath(tainted_path "bar") # $ tainted pathlib.Path("foo").joinpath("bar" tainted_path) # $ tainted str(tainted_path) # $ tainted # TODO: Tainted methods and attributes # https://docs.python.org/3.8/library/pathlib.html#methods-and-properties )<if_stmt>os.name<eq>"posix"<block_start>tainted_posix_path=pathlib.PosixPath(ts)<line_sep>ensure_tainted(tainted_posix_path # $ tainted )<block_end><if_stmt>os.name<eq>"nt"<block_start>tainted_windows_path=pathlib.WindowsPath(ts)<line_sep>ensure_tainted(tainted_windows_path # $ tainted )<block_end><block_end># Make tests runable test_basic()<line_sep>
""" Test suite for the public fingerprinting function. """<import_from_stmt>hypothesis given settings<import_stmt>hypothesis.strategies<as>st<import_from_stmt>umash C FFI<import_from_stmt>umash_reference umash UmashKey<line_sep>U64S=st.integers(min_value=0 max_value=2<power>64-1)<line_sep>FIELD=2<power>61-1<def_stmt>repeats min_size<block_start>"""Repeats one byte n times."""<line_sep><return>st.builds(<lambda>count binary:binary<times>count st.integers(min_value=min_size max_value=1024) st.binary(min_size=1 max_size=1) )<block_end>@given(seed=U64S multipliers=st.lists(st.integers(min_value=0 max_value=FIELD-1) min_size=2 max_size=2) key=st.lists(U64S min_size=C.UMASH_OH_PARAM_COUNT+C.UMASH_OH_TWISTING_COUNT max_size=C.UMASH_OH_PARAM_COUNT+C.UMASH_OH_TWISTING_COUNT ) data=st.binary()|repeats(1) )<def_stmt>test_public_umash_fprint seed multipliers key data<block_start>"""Compare umash_fprint with two calls to the reference."""<line_sep>expected=[umash(UmashKey(poly=multipliers[0] oh=key) seed data secondary=<false>) umash(UmashKey(poly=multipliers[1] oh=key) seed data secondary=<true>) ]<line_sep>n_bytes=len(data)<line_sep>block=FFI.new("char[]" n_bytes)<line_sep>FFI.memmove(block data n_bytes)<line_sep>params=FFI.new("struct umash_params[1]")<for_stmt>i,multiplier enumerate(multipliers)<block_start>params[0].poly[i][0]=(multiplier<power>2)%FIELD<line_sep>params[0].poly[i][1]=multiplier<block_end><for_stmt>i,param enumerate(key)<block_start>params[0].oh[i]=param<block_end>actual=C.umash_fprint(params seed block n_bytes)<assert_stmt>[actual.hash[0] actual.hash[1]]<eq>expected<block_end>@settings(deadline=<none>)@given(seed=U64S multipliers=st.lists(st.integers(min_value=0 max_value=FIELD-1) min_size=2 max_size=2) key=st.lists(U64S min_size=C.UMASH_OH_PARAM_COUNT+C.UMASH_OH_TWISTING_COUNT max_size=C.UMASH_OH_PARAM_COUNT+C.UMASH_OH_TWISTING_COUNT ) byte=st.binary(min_size=1 max_size=1) )<def_stmt>test_public_umash_fprint_repeated seed multipliers key byte<block_start>"""Compare umash_fprint with two calls to the reference, for n repetitions of the input byte."""<line_sep>params=FFI.new("struct umash_params[1]")<for_stmt>i,multiplier enumerate(multipliers)<block_start>params[0].poly[i][0]=(multiplier<power>2)%FIELD<line_sep>params[0].poly[i][1]=multiplier<block_end><for_stmt>i,param enumerate(key)<block_start>params[0].oh[i]=param<block_end><for_stmt>i range(520)<block_start>data=byte<times>i<line_sep>expected=[umash(UmashKey(poly=multipliers[0] oh=key) seed data secondary=<false>) umash(UmashKey(poly=multipliers[1] oh=key) seed data secondary=<true>) ]<line_sep>n_bytes=len(data)<line_sep>block=FFI.new("char[]" n_bytes)<line_sep>FFI.memmove(block data n_bytes)<line_sep>actual=C.umash_fprint(params seed block n_bytes)<assert_stmt>[actual.hash[0] actual.hash[1]]<eq>expected<block_end><block_end>
<import_stmt>torch.nn<as>nn<import_stmt>torch<import_from_stmt>at_learner_core.models.wrappers.losses get_loss<import_from_stmt>at_learner_core.models.wrappers.simple_classifier_wrapper SimpleClassifierWrapper<import_from_stmt>at_learner_core.models.architectures get_backbone<class_stmt>RGBSimpleWrapper(SimpleClassifierWrapper)<block_start><def_stmt>__init__ self wrapper_config<block_start>super().__init__(wrapper_config)<block_end><def_stmt>_init_modules self wrapper_config<block_start>self.backbone,feature_size=get_backbone(wrapper_config.backbone pretrained=wrapper_config.pretrained get_feature_size=<true>)<line_sep>self.classifier=nn.Linear(feature_size wrapper_config.nclasses)<block_end><def_stmt>predict self x<block_start>features=self.backbone(x['data'])<line_sep>output=self.classifier(features)<if_stmt>isinstance(self.loss (nn.BCELoss nn.BCEWithLogitsLoss))<block_start>output=torch.sigmoid(output)<block_end><elif_stmt>isinstance(self.loss nn.CrossEntropyLoss)<block_start>output=nn.functional.softmax(output dim=0)[: 1]<block_end>output_dict={'output':output.detach().cpu()}<line_sep><return>output_dict<block_end><block_end><class_stmt>RGBSimpleInferenceWrapper(RGBSimpleWrapper)<block_start><def_stmt>forward self x<block_start>features=self.backbone(x)<line_sep>output=self.classifier(features)<line_sep>output=torch.sigmoid(output)<line_sep><return>output<block_end><block_end>
# -*- coding: utf-8 -*- """ Name : c10_16_straddle.py Book : Python for Finance (2nd ed.) Publisher: Packt Publishing Ltd. Author : <NAME> Date : 6/6/2017 email : <EMAIL> <EMAIL> """<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<line_sep>sT=np.arange(30 80 5)<line_sep>x=50<line_sep>c=2<line_sep>p=1<line_sep>straddle=(abs(sT-x)+sT-x)/2-c+(abs(x-sT)+x-sT)/2-p<line_sep>y0=np.zeros(len(sT))<line_sep>plt.ylim(-6 20)<line_sep>plt.xlim(40 70)<line_sep>plt.plot(sT y0)<line_sep>plt.plot(sT straddle 'r')<line_sep>plt.plot([x x] [-6 4] 'g-.')<line_sep>plt.title("Profit-loss for a Straddle")<line_sep>plt.xlabel('Stock price')<line_sep>plt.ylabel('Profit (loss)')<line_sep>plt.annotate('Point 1='+str(x-c-p) xy=(x-p-c 0) xytext=(x-p-c 10) arrowprops=dict(facecolor='red' shrink=0.01) )<line_sep>plt.annotate('Point 2='+str(x+c+p) xy=(x+p+c 0) xytext=(x+p+c 13) arrowprops=dict(facecolor='blue' shrink=0.01) )<line_sep>plt.annotate('exercise price' xy=(x+1 -5))<line_sep>plt.annotate('Buy a call and buy a put with the same exercise price' xy=(45 16))<line_sep>plt.show()<line_sep>
<import_stmt>torch<import_from_stmt>..abstract ExtendedTorchModule<class_stmt>AbstractRecurrentCell(ExtendedTorchModule)<block_start><def_stmt>__init__ self Op input_size hidden_size writer=<none> **kwargs<block_start>super().__init__('recurrent' writer=writer **kwargs)<line_sep>self.input_size=input_size<line_sep>self.hidden_size=hidden_size<line_sep>self.op=Op(input_size+hidden_size hidden_size writer=self.writer **kwargs)<block_end><def_stmt>reset_parameters self<block_start>self.op.reset_parameters()<block_end><def_stmt>forward self x_t h_tm1<block_start><return>self.op(torch.cat((x_t h_tm1) dim=1))<block_end><def_stmt>extra_repr self<block_start><return>'input_size={}, hidden_size={}'.format(self.input_size self.hidden_size)<block_end><block_end>
<import_stmt>pytest<import_from_stmt>wemake_python_styleguide.visitors.ast.complexity.jones JonesComplexityVisitor JonesScoreViolation <line_sep>module_without_nodes=''<line_sep>module_with_nodes=""" some_value = 1 + 2 other = some_value if some_value > 2 else some_value * 8 + 34 """<line_sep>module_with_function=""" def some_function(param): return param + param * 2 some_function(12 + 6) """<line_sep>module_with_class=""" class SomeClass(object): def execute(self): return self some = SomeClass() print(some.execute()) """<line_sep>@pytest.mark.parametrize('code' [module_without_nodes module_with_nodes module_with_function module_with_class ])<def_stmt>test_module_score assert_errors parse_ast_tree code default_options mode <block_start>"""Testing that regular nodes do not raise violations."""<line_sep>tree=parse_ast_tree(mode(code))<line_sep>visitor=JonesComplexityVisitor(default_options tree=tree)<line_sep>visitor.run()<line_sep>assert_errors(visitor [])<block_end>@pytest.mark.parametrize(('code' 'score') [(module_without_nodes 0) (module_with_nodes 8.5) (module_with_function 6) (module_with_class 2) ])<def_stmt>test_module_score_error assert_errors assert_error_text parse_ast_tree code score options mode <block_start>"""Testing that regular nodes do raise violations."""<line_sep>tree=parse_ast_tree(mode(code))<line_sep>option_values=options(max_jones_score=-1)<line_sep>visitor=JonesComplexityVisitor(option_values tree=tree)<line_sep>visitor.run()<line_sep>assert_errors(visitor [JonesScoreViolation])<line_sep>assert_error_text(visitor str(score) option_values.max_jones_score)<block_end>
<import_stmt>weakref<import_from_stmt>netaddr IPAddress IPNetwork IPRange<def_stmt>test_ip_classes_are_weak_referencable <block_start>weakref.ref(IPAddress('10.0.0.1'))<line_sep>weakref.ref(IPNetwork('10.0.0.1/8'))<line_sep>weakref.ref(IPRange('10.0.0.1' '10.0.0.10'))<block_end>
<import_stmt>time<line_sep>__all__=['dt_to_timestamp']<def_stmt>dt_to_timestamp dt<block_start>timestamp=int(time.mktime(dt.timetuple()))<line_sep><return>timestamp<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_stmt>logging<import_from_stmt>typing Any Dict<import_from_stmt>flask g<import_from_stmt>flask_appbuilder.models.sqla Model<import_from_stmt>flask_appbuilder.security.sqla.models User<import_from_stmt>superset security_manager<import_from_stmt>superset.dashboards.filter_sets.commands.base BaseFilterSetCommand<import_from_stmt>superset.dashboards.filter_sets.commands.exceptions DashboardIdInconsistencyError FilterSetCreateFailedError UserIsNotDashboardOwnerError <import_from_stmt>superset.dashboards.filter_sets.consts DASHBOARD_ID_FIELD DASHBOARD_OWNER_TYPE OWNER_ID_FIELD OWNER_TYPE_FIELD <import_from_stmt>superset.dashboards.filter_sets.dao FilterSetDAO<line_sep>logger=logging.getLogger(__name__)<class_stmt>CreateFilterSetCommand(BaseFilterSetCommand)# pylint: disable=C0103 <block_start><def_stmt>__init__ self user:User dashboard_id:int data:Dict[str Any]<block_start>super().__init__(user dashboard_id)<line_sep>self._properties=data.copy()<block_end><def_stmt>run self<arrow>Model<block_start>self.validate()<line_sep>self._properties[DASHBOARD_ID_FIELD]=self._dashboard.id<line_sep>filter_set=FilterSetDAO.create(self._properties commit=<true>)<line_sep><return>filter_set<block_end><def_stmt>validate self<arrow><none><block_start>self._validate_filterset_dashboard_exists()<if_stmt>self._properties[OWNER_TYPE_FIELD]<eq>DASHBOARD_OWNER_TYPE<block_start>self._validate_owner_id_is_dashboard_id()<line_sep>self._validate_user_is_the_dashboard_owner()<block_end><else_stmt><block_start>self._validate_owner_id_exists()<block_end><block_end><def_stmt>_validate_owner_id_exists self<arrow><none><block_start>owner_id=self._properties[OWNER_ID_FIELD]<if_stmt><not>(g.user.id<eq>owner_id<or>security_manager.get_user_by_id(owner_id))<block_start><raise>FilterSetCreateFailedError(str(self._dashboard_id) "owner_id does not exists")<block_end><block_end><def_stmt>_validate_user_is_the_dashboard_owner self<arrow><none><block_start><if_stmt><not>self.is_user_dashboard_owner()<block_start><raise>UserIsNotDashboardOwnerError(str(self._dashboard_id))<block_end><block_end><def_stmt>_validate_owner_id_is_dashboard_id self<arrow><none><block_start><if_stmt>(self._properties.get(OWNER_ID_FIELD self._dashboard_id)<ne>self._dashboard_id)<block_start><raise>DashboardIdInconsistencyError(str(self._dashboard_id))<block_end><block_end><block_end>
expected_output={"controller_config":{"group_name":"default" "ipv4":"10.9.3.4" "mac_address":"AAAA.BBFF.8888" "multicast_ipv4":"0.0.0.0" "multicast_ipv6":"::" "pmtu":"N/A" "public_ip":"N/A" "status":"N/A" } "mobility_summary":{"domain_id":"0x34ac" "dscp_value":"48" "group_name":"default" "keepalive":"10/3" "mac_addr":"687d.b4ff.b9e9" "mgmt_ipv4":"10.20.30.40" "mgmt_ipv6":"" "mgmt_vlan":"143" "multi_ipv4":"0.0.0.0" "multi_ipv6":"::" } }<line_sep>
<import_from_stmt>.services ProductCommandService <line_sep>
<import_stmt>importlib<def_stmt>is_installed module_name:str<block_start><return>importlib.util.find_spec(module_name)<is><not><none><block_end>
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>numpy<as>np<import_stmt>coords<import_from_stmt>go Position PlayerMove LibertyTracker WHITE BLACK<import_stmt>go<import_stmt>sgf_wrapper<import_from_stmt>tests test_utils<line_sep>EMPTY_ROW='.'<times>go.N+'\n'<line_sep>TEST_BOARD=test_utils.load_board(''' .X.....OO X........ '''+EMPTY_ROW<times>7)<line_sep>NO_HANDICAP_SGF="(;CA[UTF-8]SZ[9]PB[Murakawa Daisuke]PW[Iyama Yuta]KM[6.5]HA[0]RE[W+1.5]GM[1];B[fd];W[cf];B[eg];W[dd];B[dc];W[cc];B[de];W[cd];B[ed];W[he];B[ce];W[be];B[df];W[bf];B[hd];W[ge];B[gd];W[gg];B[db];W[cb];B[cg];W[bg];B[gh];W[fh];B[hh];W[fg];B[eh];W[ei];B[di];W[fi];B[hg];W[dh];B[ch];W[ci];B[bh];W[ff];B[fe];W[hf];B[id];W[bi];B[ah];W[ef];B[dg];W[ee];B[di];W[ig];B[ai];W[ih];B[fb];W[hi];B[ag];W[ab];B[bd];W[bc];B[ae];W[ad];B[af];W[bd];B[ca];W[ba];B[da];W[ie])"<def_stmt>coords_from_gtp_set string<block_start><return>frozenset(map(coords.from_gtp string.split()))<block_end><class_stmt>TestBasicFunctions(test_utils.MinigoUnitTest)<block_start><def_stmt>test_load_board self<block_start>self.assertEqualNPArray(go.EMPTY_BOARD np.zeros([go.N go.N]))<line_sep>self.assertEqualNPArray(go.EMPTY_BOARD test_utils.load_board('. \n'<times>go.N<power>2))<block_end><def_stmt>test_neighbors self<block_start>corner=coords.from_gtp('A1')<line_sep>neighbors=[go.EMPTY_BOARD[c]<for>c go.NEIGHBORS[corner]]<line_sep>self.assertEqual(len(neighbors) 2)<line_sep>side=coords.from_gtp('A2')<line_sep>side_neighbors=[go.EMPTY_BOARD[c]<for>c go.NEIGHBORS[side]]<line_sep>self.assertEqual(len(side_neighbors) 3)<block_end><def_stmt>test_is_koish self<block_start>self.assertEqual(go.is_koish(TEST_BOARD coords.from_gtp('A9')) BLACK)<line_sep>self.assertEqual(go.is_koish(TEST_BOARD coords.from_gtp('B8')) <none>)<line_sep>self.assertEqual(go.is_koish(TEST_BOARD coords.from_gtp('B9')) <none>)<line_sep>self.assertEqual(go.is_koish(TEST_BOARD coords.from_gtp('E5')) <none>)<block_end><def_stmt>test_is_eyeish self<block_start>board=test_utils.load_board(''' .XX...XXX X.X...X.X XX.....X. ........X XXXX..... OOOX....O X.OXX.OO. .XO.X.O.O XXO.X.OO. ''')<line_sep>B_eyes=coords_from_gtp_set('A2 A9 B8 J7 H8')<line_sep>W_eyes=coords_from_gtp_set('H2 J1 J3')<line_sep>not_eyes=coords_from_gtp_set('B3 E5')<for_stmt>be B_eyes<block_start>self.assertEqual(go.is_eyeish(board be) BLACK str(be))<block_end><for_stmt>we W_eyes<block_start>self.assertEqual(go.is_eyeish(board we) WHITE str(we))<block_end><for_stmt>ne not_eyes<block_start>self.assertEqual(go.is_eyeish(board ne) <none> str(ne))<block_end><block_end><block_end><class_stmt>TestLibertyTracker(test_utils.MinigoUnitTest)<block_start><def_stmt>test_lib_tracker_init self<block_start>board=test_utils.load_board('X........'+EMPTY_ROW<times>8)<line_sep>lib_tracker=LibertyTracker.from_board(board)<line_sep>self.assertEqual(len(lib_tracker.groups) 1)<line_sep>self.assertNotEqual(lib_tracker.group_index[coords.from_gtp('A9')] go.MISSING_GROUP_ID)<line_sep>self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('A9')] 2)<line_sep>sole_group=lib_tracker.groups[lib_tracker.group_index[coords.from_gtp('A9')]]<line_sep>self.assertEqual(sole_group.stones coords_from_gtp_set('A9'))<line_sep>self.assertEqual(sole_group.liberties coords_from_gtp_set('B9 A8'))<line_sep>self.assertEqual(sole_group.color BLACK)<block_end><def_stmt>test_place_stone self<block_start>board=test_utils.load_board('X........'+EMPTY_ROW<times>8)<line_sep>lib_tracker=LibertyTracker.from_board(board)<line_sep>lib_tracker.add_stone(BLACK coords.from_gtp('B9'))<line_sep>self.assertEqual(len(lib_tracker.groups) 1)<line_sep>self.assertNotEqual(lib_tracker.group_index[coords.from_gtp('A9')] go.MISSING_GROUP_ID)<line_sep>self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('A9')] 3)<line_sep>self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('B9')] 3)<line_sep>sole_group=lib_tracker.groups[lib_tracker.group_index[coords.from_gtp('A9')]]<line_sep>self.assertEqual(sole_group.stones coords_from_gtp_set('A9 B9'))<line_sep>self.assertEqual(sole_group.liberties coords_from_gtp_set('C9 A8 B8'))<line_sep>self.assertEqual(sole_group.color BLACK)<block_end><def_stmt>test_place_stone_opposite_color self<block_start>board=test_utils.load_board('X........'+EMPTY_ROW<times>8)<line_sep>lib_tracker=LibertyTracker.from_board(board)<line_sep>lib_tracker.add_stone(WHITE coords.from_gtp('B9'))<line_sep>self.assertEqual(len(lib_tracker.groups) 2)<line_sep>self.assertNotEqual(lib_tracker.group_index[coords.from_gtp('A9')] go.MISSING_GROUP_ID)<line_sep>self.assertNotEqual(lib_tracker.group_index[coords.from_gtp('B9')] go.MISSING_GROUP_ID)<line_sep>self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('A9')] 1)<line_sep>self.assertEqual(lib_tracker.liberty_cache[coords.from_gtp('B9')] 2)<line_sep>black_group=lib_tracker.groups[lib_tracker.group_index[coords.from_gtp('A9')]]<line_sep>white_group=lib_tracker.groups[lib_tracker.group_index[coords.from_gtp('B9')]]<line_sep>self.assertEqual(black_group.stones coords_from_gtp_set('A9'))<line_sep>self.assertEqual(black_group.liberties coords_from_gtp_set('A8'))<line_sep>self.assertEqual(black_group.color BLACK)<line_sep>self.assertEqual(white_group.stones coords_from_gtp_set('B9'))<line_sep>self.assertEqual(white_group.liberties coords_from_gtp_set('C9 B8'))<line_sep>self.assertEqual(white_group.color WHITE)<block_end><def_stmt>test_merge_multiple_groups self<block_start>board=test_utils.load_board(''' .X....... X.X...... .X....... '''+EMPTY_ROW<times>6)<line_sep>lib_tracker=LibertyTracker.from_board(board)<line_sep>lib_tracker.add_stone(BLACK coords.from_gtp('B8'))<line_sep>self.assertEqual(len(lib_tracker.groups) 1)<line_sep>self.assertNotEqual(lib_tracker.group_index[coords.from_gtp('B8')] go.MISSING_GROUP_ID)<line_sep>sole_group=lib_tracker.groups[lib_tracker.group_index[coords.from_gtp('B8')]]<line_sep>self.assertEqual(sole_group.stones coords_from_gtp_set('B9 A8 B8 C8 B7'))<line_sep>self.assertEqual(sole_group.liberties coords_from_gtp_set('A9 C9 D8 A7 C7 B6'))<line_sep>self.assertEqual(sole_group.color BLACK)<line_sep>liberty_cache=lib_tracker.liberty_cache<for_stmt>stone sole_group.stones<block_start>self.assertEqual(liberty_cache[stone] 6 str(stone))<block_end><block_end><def_stmt>test_capture_stone self<block_start>board=test_utils.load_board(''' .X....... XO....... .X....... '''+EMPTY_ROW<times>6)<line_sep>lib_tracker=LibertyTracker.from_board(board)<line_sep>captured=lib_tracker.add_stone(BLACK coords.from_gtp('C8'))<line_sep>self.assertEqual(len(lib_tracker.groups) 4)<line_sep>self.assertEqual(lib_tracker.group_index[coords.from_gtp('B8')] go.MISSING_GROUP_ID)<line_sep>self.assertEqual(captured coords_from_gtp_set('B8'))<block_end><def_stmt>test_capture_many self<block_start>board=test_utils.load_board(''' .XX...... XOO...... .XX...... '''+EMPTY_ROW<times>6)<line_sep>lib_tracker=LibertyTracker.from_board(board)<line_sep>captured=lib_tracker.add_stone(BLACK coords.from_gtp('D8'))<line_sep>self.assertEqual(len(lib_tracker.groups) 4)<line_sep>self.assertEqual(lib_tracker.group_index[coords.from_gtp('B8')] go.MISSING_GROUP_ID)<line_sep>self.assertEqual(captured coords_from_gtp_set('B8 C8'))<line_sep>left_group=lib_tracker.groups[lib_tracker.group_index[coords.from_gtp('A8')]]<line_sep>self.assertEqual(left_group.stones coords_from_gtp_set('A8'))<line_sep>self.assertEqual(left_group.liberties coords_from_gtp_set('A9 B8 A7'))<line_sep>right_group=lib_tracker.groups[lib_tracker.group_index[coords.from_gtp('D8')]]<line_sep>self.assertEqual(right_group.stones coords_from_gtp_set('D8'))<line_sep>self.assertEqual(right_group.liberties coords_from_gtp_set('D9 C8 E8 D7'))<line_sep>top_group=lib_tracker.groups[lib_tracker.group_index[coords.from_gtp('B9')]]<line_sep>self.assertEqual(top_group.stones coords_from_gtp_set('B9 C9'))<line_sep>self.assertEqual(top_group.liberties coords_from_gtp_set('A9 D9 B8 C8'))<line_sep>bottom_group=lib_tracker.groups[lib_tracker.group_index[coords.from_gtp('B7')]]<line_sep>self.assertEqual(bottom_group.stones coords_from_gtp_set('B7 C7'))<line_sep>self.assertEqual(bottom_group.liberties coords_from_gtp_set('B8 C8 A7 D7 B6 C6'))<line_sep>liberty_cache=lib_tracker.liberty_cache<for_stmt>stone top_group.stones<block_start>self.assertEqual(liberty_cache[stone] 4 str(stone))<block_end><for_stmt>stone left_group.stones<block_start>self.assertEqual(liberty_cache[stone] 3 str(stone))<block_end><for_stmt>stone right_group.stones<block_start>self.assertEqual(liberty_cache[stone] 4 str(stone))<block_end><for_stmt>stone bottom_group.stones<block_start>self.assertEqual(liberty_cache[stone] 6 str(stone))<block_end><for_stmt>stone captured<block_start>self.assertEqual(liberty_cache[stone] 0 str(stone))<block_end><block_end><def_stmt>test_capture_multiple_groups self<block_start>board=test_utils.load_board(''' .OX...... OXX...... XX....... '''+EMPTY_ROW<times>6)<line_sep>lib_tracker=LibertyTracker.from_board(board)<line_sep>captured=lib_tracker.add_stone(BLACK coords.from_gtp('A9'))<line_sep>self.assertEqual(len(lib_tracker.groups) 2)<line_sep>self.assertEqual(captured coords_from_gtp_set('B9 A8'))<line_sep>corner_stone=lib_tracker.groups[lib_tracker.group_index[coords.from_gtp('A9')]]<line_sep>self.assertEqual(corner_stone.stones coords_from_gtp_set('A9'))<line_sep>self.assertEqual(corner_stone.liberties coords_from_gtp_set('B9 A8'))<line_sep>surrounding_stones=lib_tracker.groups[lib_tracker.group_index[coords.from_gtp('C9')]]<line_sep>self.assertEqual(surrounding_stones.stones coords_from_gtp_set('C9 B8 C8 A7 B7'))<line_sep>self.assertEqual(surrounding_stones.liberties coords_from_gtp_set('B9 D9 A8 D8 C7 A6 B6'))<line_sep>liberty_cache=lib_tracker.liberty_cache<for_stmt>stone corner_stone.stones<block_start>self.assertEqual(liberty_cache[stone] 2 str(stone))<block_end><for_stmt>stone surrounding_stones.stones<block_start>self.assertEqual(liberty_cache[stone] 7 str(stone))<block_end><block_end><def_stmt>test_same_friendly_group_neighboring_twice self<block_start>board=test_utils.load_board(''' XX....... X........ '''+EMPTY_ROW<times>7)<line_sep>lib_tracker=LibertyTracker.from_board(board)<line_sep>captured=lib_tracker.add_stone(BLACK coords.from_gtp('B8'))<line_sep>self.assertEqual(len(lib_tracker.groups) 1)<line_sep>sole_group_id=lib_tracker.group_index[coords.from_gtp('A9')]<line_sep>sole_group=lib_tracker.groups[sole_group_id]<line_sep>self.assertEqual(sole_group.stones coords_from_gtp_set('A9 B9 A8 B8'))<line_sep>self.assertEqual(sole_group.liberties coords_from_gtp_set('C9 C8 A7 B7'))<line_sep>self.assertEqual(captured set())<block_end><def_stmt>test_same_opponent_group_neighboring_twice self<block_start>board=test_utils.load_board(''' XX....... X........ '''+EMPTY_ROW<times>7)<line_sep>lib_tracker=LibertyTracker.from_board(board)<line_sep>captured=lib_tracker.add_stone(WHITE coords.from_gtp('B8'))<line_sep>self.assertEqual(len(lib_tracker.groups) 2)<line_sep>black_group=lib_tracker.groups[lib_tracker.group_index[coords.from_gtp('A9')]]<line_sep>self.assertEqual(black_group.stones coords_from_gtp_set('A9 B9 A8'))<line_sep>self.assertEqual(black_group.liberties coords_from_gtp_set('C9 A7'))<line_sep>white_group=lib_tracker.groups[lib_tracker.group_index[coords.from_gtp('B8')]]<line_sep>self.assertEqual(white_group.stones coords_from_gtp_set('B8'))<line_sep>self.assertEqual(white_group.liberties coords_from_gtp_set('C8 B7'))<line_sep>self.assertEqual(captured set())<block_end><block_end><class_stmt>TestPosition(test_utils.MinigoUnitTest)<block_start><def_stmt>test_passing self<block_start>start_position=Position(board=TEST_BOARD n=0 komi=6.5 caps=(1 2) ko=coords.from_gtp('A1') recent=tuple() to_play=BLACK )<line_sep>expected_position=Position(board=TEST_BOARD n=1 komi=6.5 caps=(1 2) ko=<none> recent=(PlayerMove(BLACK <none>) ) to_play=WHITE )<line_sep>pass_position=start_position.pass_move()<line_sep>self.assertEqualPositions(pass_position expected_position)<block_end><def_stmt>test_flipturn self<block_start>start_position=Position(board=TEST_BOARD n=0 komi=6.5 caps=(1 2) ko=coords.from_gtp('A1') recent=tuple() to_play=BLACK )<line_sep>expected_position=Position(board=TEST_BOARD n=0 komi=6.5 caps=(1 2) ko=<none> recent=tuple() to_play=WHITE )<line_sep>flip_position=start_position.flip_playerturn()<line_sep>self.assertEqualPositions(flip_position expected_position)<block_end><def_stmt>test_is_move_suicidal self<block_start>board=test_utils.load_board(''' ...O.O... ....O.... XO.....O. OXO...OXO O.XO.OX.O OXO...OOX XO....... ......XXO .....XOO. ''')<line_sep>position=Position(board=board to_play=BLACK )<line_sep>suicidal_moves=coords_from_gtp_set('E9 H5')<line_sep>nonsuicidal_moves=coords_from_gtp_set('B5 J1 A9')<for_stmt>move suicidal_moves# sanity check my coordinate input <block_start>self.assertEqual(position.board[move] go.EMPTY)<line_sep>self.assertTrue(position.is_move_suicidal(move) str(move))<block_end><for_stmt>move nonsuicidal_moves# sanity check my coordinate input <block_start>self.assertEqual(position.board[move] go.EMPTY)<line_sep>self.assertFalse(position.is_move_suicidal(move) str(move))<block_end><block_end><def_stmt>test_legal_moves self<block_start>board=test_utils.load_board(''' .O.O.XOX. O..OOOOOX ......O.O OO.....OX XO.....X. .O....... OX.....OO XX...OOOX .....O.X. ''')<line_sep>position=Position(board=board to_play=BLACK)<line_sep>illegal_moves=coords_from_gtp_set('A9 E9 J9')<line_sep>legal_moves=coords_from_gtp_set('A4 G1 J1 H7')|{<none>}<for_stmt>move illegal_moves<block_start><with_stmt>self.subTest(type='illegal' move=move)<block_start>self.assertFalse(position.is_move_legal(move))<block_end><block_end><for_stmt>move legal_moves<block_start><with_stmt>self.subTest(type='legal' move=move)<block_start>self.assertTrue(position.is_move_legal(move))<block_end><block_end># check that the bulk legal test agrees with move-by-move illegal test. bulk_legality=position.all_legal_moves()<for_stmt>i,bulk_legal enumerate(bulk_legality)<block_start><with_stmt>self.subTest(type='bulk' move=coords.from_flat(i))<block_start>self.assertEqual(bulk_legal position.is_move_legal(coords.from_flat(i)))<block_end><block_end># flip the colors and check that everything is still (il)legal position=Position(board=-board to_play=WHITE)<for_stmt>move illegal_moves<block_start><with_stmt>self.subTest(type='illegal' move=move)<block_start>self.assertFalse(position.is_move_legal(move))<block_end><block_end><for_stmt>move legal_moves<block_start><with_stmt>self.subTest(type='legal' move=move)<block_start>self.assertTrue(position.is_move_legal(move))<block_end><block_end>bulk_legality=position.all_legal_moves()<for_stmt>i,bulk_legal enumerate(bulk_legality)<block_start><with_stmt>self.subTest(type='bulk' move=coords.from_flat(i))<block_start>self.assertEqual(bulk_legal position.is_move_legal(coords.from_flat(i)))<block_end><block_end><block_end><def_stmt>test_move self<block_start>start_position=Position(board=TEST_BOARD n=0 komi=6.5 caps=(1 2) ko=<none> recent=tuple() to_play=BLACK )<line_sep>expected_board=test_utils.load_board(''' .XX....OO X........ '''+EMPTY_ROW<times>7)<line_sep>expected_position=Position(board=expected_board n=1 komi=6.5 caps=(1 2) ko=<none> recent=(PlayerMove(BLACK coords.from_gtp('C9')) ) to_play=WHITE )<line_sep>actual_position=start_position.play_move(coords.from_gtp('C9'))<line_sep>self.assertEqualPositions(actual_position expected_position)<line_sep>expected_board2=test_utils.load_board(''' .XX....OO X.......O '''+EMPTY_ROW<times>7)<line_sep>expected_position2=Position(board=expected_board2 n=2 komi=6.5 caps=(1 2) ko=<none> recent=(PlayerMove(BLACK coords.from_gtp('C9')) PlayerMove(WHITE coords.from_gtp('J8'))) to_play=BLACK )<line_sep>actual_position2=actual_position.play_move(coords.from_gtp('J8'))<line_sep>self.assertEqualPositions(actual_position2 expected_position2)<block_end><def_stmt>test_move_with_capture self<block_start>start_board=test_utils.load_board(EMPTY_ROW<times>5+''' XXXX..... XOOX..... O.OX..... OOXX..... ''')<line_sep>start_position=Position(board=start_board n=0 komi=6.5 caps=(1 2) ko=<none> recent=tuple() to_play=BLACK )<line_sep>expected_board=test_utils.load_board(EMPTY_ROW<times>5+''' XXXX..... X..X..... .X.X..... ..XX..... ''')<line_sep>expected_position=Position(board=expected_board n=1 komi=6.5 caps=(7 2) ko=<none> recent=(PlayerMove(BLACK coords.from_gtp('B2')) ) to_play=WHITE )<line_sep>actual_position=start_position.play_move(coords.from_gtp('B2'))<line_sep>self.assertEqualPositions(actual_position expected_position)<block_end><def_stmt>test_ko_move self<block_start>start_board=test_utils.load_board(''' .OX...... OX....... '''+EMPTY_ROW<times>7)<line_sep>start_position=Position(board=start_board n=0 komi=6.5 caps=(1 2) ko=<none> recent=tuple() to_play=BLACK )<line_sep>expected_board=test_utils.load_board(''' X.X...... OX....... '''+EMPTY_ROW<times>7)<line_sep>expected_position=Position(board=expected_board n=1 komi=6.5 caps=(2 2) ko=coords.from_gtp('B9') recent=(PlayerMove(BLACK coords.from_gtp('A9')) ) to_play=WHITE )<line_sep>actual_position=start_position.play_move(coords.from_gtp('A9'))<line_sep>self.assertEqualPositions(actual_position expected_position)<line_sep># Check that retaking ko is illegal until two intervening moves <with_stmt>self.assertRaises(go.IllegalMove)<block_start>actual_position.play_move(coords.from_gtp('B9'))<block_end>pass_twice=actual_position.pass_move().pass_move()<line_sep>ko_delayed_retake=pass_twice.play_move(coords.from_gtp('B9'))<line_sep>expected_position=Position(board=start_board n=4 komi=6.5 caps=(2 3) ko=coords.from_gtp('A9') recent=(PlayerMove(BLACK coords.from_gtp('A9')) PlayerMove(WHITE <none>) PlayerMove(BLACK <none>) PlayerMove(WHITE coords.from_gtp('B9'))) to_play=BLACK )<line_sep>self.assertEqualPositions(ko_delayed_retake expected_position)<block_end><def_stmt>test_is_game_over self<block_start>root=go.Position()<line_sep>self.assertFalse(root.is_game_over())<line_sep>first_pass=root.play_move(<none>)<line_sep>self.assertFalse(first_pass.is_game_over())<line_sep>second_pass=first_pass.play_move(<none>)<line_sep>self.assertTrue(second_pass.is_game_over())<block_end><def_stmt>test_scoring self<block_start>board=test_utils.load_board(''' .XX...... OOXX..... OOOX...X. OXX...... OOXXXXXX. OOOXOXOXX .O.OOXOOX .O.O.OOXX ......OOO ''')<line_sep>position=Position(board=board n=54 komi=6.5 caps=(2 5) ko=<none> recent=tuple() to_play=BLACK )<line_sep>expected_score=1.5<line_sep>self.assertEqual(position.score() expected_score)<line_sep>board=test_utils.load_board(''' XXX...... OOXX..... OOOX...X. OXX...... OOXXXXXX. OOOXOXOXX .O.OOXOOX .O.O.OOXX ......OOO ''')<line_sep>position=Position(board=board n=55 komi=6.5 caps=(2 5) ko=<none> recent=tuple() to_play=WHITE )<line_sep>expected_score=2.5<line_sep>self.assertEqual(position.score() expected_score)<block_end><def_stmt>test_replay_position self<block_start>sgf_positions=list(sgf_wrapper.replay_sgf(NO_HANDICAP_SGF))<line_sep>initial=sgf_positions[0]<line_sep>self.assertEqual(initial.result go.WHITE)<line_sep>final=sgf_positions[-1].position.play_move(sgf_positions[-1].next_move)<line_sep># sanity check to ensure we're working with the right position final_board=test_utils.load_board(''' .OXX..... O.OX.X... .OOX..... OOOOXXXXX XOXXOXOOO XOOXOO.O. XOXXXOOXO XXX.XOXXO X..XOO.O. ''')<line_sep>expected_final_position=go.Position(final_board n=62 komi=6.5 caps=(3 2) ko=<none> recent=tuple() to_play=go.BLACK)<line_sep>self.assertEqualPositions(expected_final_position final)<line_sep>self.assertEqual(final.n len(final.recent))<line_sep>replayed_positions=list(go.replay_position(final 1))<for_stmt>sgf_pos,replay_pos zip(sgf_positions replayed_positions)<block_start>self.assertEqualPositions(sgf_pos.position replay_pos.position)<block_end><block_end><block_end>
<import_stmt>panel<as>pn<import_stmt>numpy<as>np<import_stmt>holoviews<as>hv<line_sep>LOGO="https://panel.holoviz.org/_static/logo_horizontal.png"<def_stmt>test_vanilla_with_sidebar <block_start>"""Returns an app that uses the vanilla template in various ways. Inspect the app and verify that the issues of [Issue 1641]\ (https://github.com/holoviz/panel/issues/1641) have been solved - Navbar is "sticky"/ fixed to the top - Navbar supports adding header items to left, center and right - There is a nice padding/ margin everywhere - Independent scroll for sidebar and main - Only vertical scrollbars """<line_sep>vanilla=pn.template.VanillaTemplate(title="Vanilla Template" logo=LOGO )<line_sep>xs=np.linspace(0 np.pi)<line_sep>freq=pn.widgets.FloatSlider(name="Frequency" start=0 end=10 value=2)<line_sep>phase=pn.widgets.FloatSlider(name="Phase" start=0 end=np.pi)<line_sep>@pn.depends(freq=freq phase=phase)<def_stmt>sine freq phase<block_start><return>hv.Curve((xs np.sin(xs<times>freq+phase))).opts(responsive=<true> min_height=400)<block_end>@pn.depends(freq=freq phase=phase)<def_stmt>cosine freq phase<block_start><return>hv.Curve((xs np.cos(xs<times>freq+phase))).opts(responsive=<true> min_height=400)<block_end>vanilla.sidebar.append(freq)<line_sep>vanilla.sidebar.append(phase)<line_sep>vanilla.sidebar.append(pn.pane.Markdown(test_vanilla_with_sidebar.__doc__))<line_sep>vanilla.sidebar.append(pn.pane.Markdown("## Sidebar Item\n"<times>50))<line_sep>vanilla.main.append(pn.Row(pn.Card(hv.DynamicMap(sine) title="Sine") pn.Card(hv.DynamicMap(cosine) title="Cosine") ))<line_sep>vanilla.main.append(pn.Row(pn.Card(hv.DynamicMap(sine) title="Sine") pn.Card(hv.DynamicMap(cosine) title="Cosine") ))<line_sep>vanilla.header[:]=[pn.Row(pn.widgets.Button(name="Left" sizing_mode="fixed" width=50) pn.layout.HSpacer() pn.widgets.Button(name="Center" sizing_mode="fixed" width=50) pn.layout.HSpacer() pn.widgets.Button(name="Right" sizing_mode="fixed" width=50) )]<line_sep>vanilla.main_max_width="600px"<line_sep><return>vanilla<block_end><def_stmt>test_vanilla_with_no_sidebar <block_start>"""Returns an app that uses the vanilla template in various ways. Inspect the app and verify that the issues of [Issue 1641]\ (https://github.com/holoviz/panel/issues/1641) have been solved - Navbar is "sticky"/ fixed to the top - Navbar supports adding header items to left, center and right - There is a nice padding/ margin everywhere - Independent scroll for sidebar and main - Only vertical scrollbars """<line_sep>vanilla=pn.template.VanillaTemplate(title="Vanilla Template" logo=LOGO favicon="https://raw.githubusercontent.com/MarcSkovMadsen/awesome-panel/2781d86d4ed141889d633748879a120d7d8e777a/assets/images/favicon.ico" )<line_sep>xs=np.linspace(0 np.pi)<line_sep>freq=pn.widgets.FloatSlider(name="Frequency" start=0 end=10 value=2)<line_sep>phase=pn.widgets.FloatSlider(name="Phase" start=0 end=np.pi)<line_sep>@pn.depends(freq=freq phase=phase)<def_stmt>sine freq phase<block_start><return>hv.Curve((xs np.sin(xs<times>freq+phase))).opts(responsive=<true> min_height=400)<block_end>@pn.depends(freq=freq phase=phase)<def_stmt>cosine freq phase<block_start><return>hv.Curve((xs np.cos(xs<times>freq+phase))).opts(responsive=<true> min_height=400)<block_end>vanilla.main.append(freq)<line_sep>vanilla.main.append(phase)<line_sep>vanilla.main.append(pn.pane.Markdown(test_vanilla_with_no_sidebar.__doc__))<line_sep>vanilla.main.append(pn.Row(pn.Card(hv.DynamicMap(sine) title="Sine") pn.Card(hv.DynamicMap(cosine) title="Cosine") ))<line_sep>vanilla.main.append(pn.Row(pn.Card(hv.DynamicMap(sine) title="Sine") pn.Card(hv.DynamicMap(cosine) title="Cosine") ))<line_sep>vanilla.header[:]=[pn.Row(pn.widgets.Button(name="Left" sizing_mode="fixed" width=50) pn.layout.HSpacer() pn.widgets.Button(name="Center" sizing_mode="fixed" width=50) pn.layout.HSpacer() pn.widgets.Button(name="Right" sizing_mode="fixed" width=50) )]<line_sep>vanilla.main_max_width="600px"<line_sep><return>vanilla<block_end><if_stmt>__name__.startswith("bokeh")<block_start>pn.extension(sizing_mode="stretch_width")<line_sep>test_vanilla_with_sidebar().servable()<line_sep># test_vanilla_with_no_sidebar().servable() <block_end>
<import_stmt>numpy<as>np<import_from_stmt>.base_signal BaseSignal<line_sep>__all__=['CAR']<class_stmt>CAR(BaseSignal)<block_start>"""Signal generatpr for continuously autoregressive (CAR) signals. Parameters ---------- ar_param : number (default 1.0) Parameter of the AR(1) process sigma : number (default 1.0) Standard deviation of the signal start_value : number (default 0.0) Starting value of the AR process """<def_stmt>__init__ self ar_param=1.0 sigma=0.5 start_value=0.01<block_start>self.vectorizable=<false><line_sep>self.ar_param=ar_param<line_sep>self.sigma=sigma<line_sep>self.start_value=start_value<line_sep>self.previous_value=<none><line_sep>self.previous_time=<none><block_end><def_stmt>sample_next self time samples errors<block_start>"""Sample a single time point Parameters ---------- time : number Time at which a sample was required Returns ------- float sampled signal for time t """<if_stmt>self.previous_value<is><none><block_start>output=self.start_value<block_end><else_stmt><block_start>time_diff=time-self.previous_time<line_sep>noise=np.random.normal(loc=0.0 scale=1.0 size=1)<line_sep>output=(np.power(self.ar_param time_diff))<times>self.previous_value+self.sigma<times>np.sqrt(1-np.power(self.ar_param time_diff))<times>noise<block_end>self.previous_time=time<line_sep>self.previous_value=output<line_sep><return>output<block_end><block_end>
<import_from_stmt>fontTools.misc sstruct<import_from_stmt>fontTools.misc.fixedTools floatToFixedToStr strToFixedToFloat<import_from_stmt>fontTools.misc.textTools safeEval num2binary binary2num<import_from_stmt>fontTools.misc.timeTools timestampFromString timestampToString timestampNow<import_from_stmt>fontTools.misc.timeTools epoch_diff<as>mac_epoch_diff# For backward compat <import_from_stmt>fontTools.misc.arrayTools intRect unionRect<import_from_stmt>. DefaultTable<import_stmt>logging<line_sep>log=logging.getLogger(__name__)<line_sep>headFormat=""" > # big endian tableVersion: 16.16F fontRevision: 16.16F checkSumAdjustment: I magicNumber: I flags: H unitsPerEm: H created: Q modified: Q xMin: h yMin: h xMax: h yMax: h macStyle: H lowestRecPPEM: H fontDirectionHint: h indexToLocFormat: h glyphDataFormat: h """<class_stmt>table__h_e_a_d(DefaultTable.DefaultTable)<block_start>dependencies=['maxp' 'loca' 'CFF ' 'CFF2']<def_stmt>decompile self data ttFont<block_start>dummy,rest=sstruct.unpack2(headFormat data self)<if_stmt>rest# this is quite illegal, but there seem to be fonts out there that do this <block_start>log.warning("extra bytes at the end of 'head' table")<assert_stmt>rest<eq>b"\0\0"<block_end># For timestamp fields, ignore the top four bytes. Some fonts have # bogus values there. Since till 2038 those bytes only can be zero, # ignore them. # # https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810 <for_stmt>stamp 'created' 'modified'<block_start>value=getattr(self stamp)<if_stmt>value<g>0xFFFFFFFF<block_start>log.warning("'%s' timestamp out of range; ignoring top bytes" stamp)<line_sep>value<augand>0xFFFFFFFF<line_sep>setattr(self stamp value)<block_end><if_stmt>value<l>0x7C259DC0# January 1, 1970 00:00:00 <block_start>log.warning("'%s' timestamp seems very low; regarding as unix timestamp" stamp)<line_sep>value<augadd>0x7C259DC0<line_sep>setattr(self stamp value)<block_end><block_end><block_end><def_stmt>compile self ttFont<block_start><if_stmt>ttFont.recalcBBoxes# For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc(). <block_start><if_stmt>'CFF '<in>ttFont<block_start>topDict=ttFont['CFF '].cff.topDictIndex[0]<line_sep>self.xMin,self.yMin,self.xMax,self.yMax=intRect(topDict.FontBBox)<block_end><elif_stmt>'CFF2'<in>ttFont<block_start>topDict=ttFont['CFF2'].cff.topDictIndex[0]<line_sep>charStrings=topDict.CharStrings<line_sep>fontBBox=<none><for_stmt>charString charStrings.values()<block_start>bounds=charString.calcBounds(charStrings)<if_stmt>bounds<is><not><none><block_start><if_stmt>fontBBox<is><not><none><block_start>fontBBox=unionRect(fontBBox bounds)<block_end><else_stmt><block_start>fontBBox=bounds<block_end><block_end><block_end><if_stmt>fontBBox<is><not><none><block_start>self.xMin,self.yMin,self.xMax,self.yMax=intRect(fontBBox)<block_end><block_end><block_end><if_stmt>ttFont.recalcTimestamp<block_start>self.modified=timestampNow()<block_end>data=sstruct.pack(headFormat self)<line_sep><return>data<block_end><def_stmt>toXML self writer ttFont<block_start>writer.comment("Most of this table will be recalculated by the compiler")<line_sep>writer.newline()<line_sep>_,names,fixes=sstruct.getformat(headFormat)<for_stmt>name names<block_start>value=getattr(self name)<if_stmt>name<in>fixes<block_start>value=floatToFixedToStr(value precisionBits=fixes[name])<block_end><elif_stmt>name<in>("created" "modified")<block_start>value=timestampToString(value)<block_end><elif_stmt>name<in>("magicNumber" "checkSumAdjustment")<block_start><if_stmt>value<l>0<block_start>value=value+0x100000000<block_end>value=hex(value)<if_stmt>value[-1:]<eq>"L"<block_start>value=value[:-1]<block_end><block_end><elif_stmt>name<in>("macStyle" "flags")<block_start>value=num2binary(value 16)<block_end>writer.simpletag(name value=value)<line_sep>writer.newline()<block_end><block_end><def_stmt>fromXML self name attrs content ttFont<block_start>value=attrs["value"]<line_sep>fixes=sstruct.getformat(headFormat)[2]<if_stmt>name<in>fixes<block_start>value=strToFixedToFloat(value precisionBits=fixes[name])<block_end><elif_stmt>name<in>("created" "modified")<block_start>value=timestampFromString(value)<block_end><elif_stmt>name<in>("macStyle" "flags")<block_start>value=binary2num(value)<block_end><else_stmt><block_start>value=safeEval(value)<block_end>setattr(self name value)<block_end><block_end>
<import_stmt>ibis<def_stmt>test_connection_pool_size hdfs env test_data_db<block_start>client=ibis.impala.connect(port=env.impala_port hdfs_client=hdfs host=env.impala_host database=test_data_db )<line_sep># the client cursor may or may not be GC'd, so the connection # pool will contain either zero or one cursor <assert_stmt>len(client.con.connection_pool)<in>(0 1)<block_end><def_stmt>test_connection_pool_size_after_close hdfs env test_data_db<block_start>client=ibis.impala.connect(port=env.impala_port hdfs_client=hdfs host=env.impala_host database=test_data_db )<line_sep>client.close()<assert_stmt><not>client.con.connection_pool<block_end>
<class_stmt>Foo<block_start><class_stmt>Bar<block_start><def_stmt>baz self<block_start><pass><block_end><block_end><block_end>
#// #//------------------------------------------------------------------------------ #// Copyright 2011 Mentor Graphics Corporation #// Copyright 2011 Cadence Design Systems, Inc. #// Copyright 2011 Synopsys, Inc. #// Copyright 2019-2020 <NAME> (tpoikela) #// All Rights Reserved Worldwide #// #// Licensed under the Apache License, Version 2.0 (the #// "License"); you may not use this file except in #// compliance with the License. You may obtain a copy of #// the License at #// #// http://www.apache.org/licenses/LICENSE-2.0 #// #// Unless required by applicable law or agreed to in #// writing, software distributed under the License is #// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR #// CONDITIONS OF ANY KIND, either express or implied. See #// the License for the specific language governing #// permissions and limitations under the License. #//------------------------------------------------------------------------------ <import_from_stmt>uvm.reg UVMReg UVMRegField<import_from_stmt>uvm.base sv<import_from_stmt>uvm.macros uvm_object_utils<import_from_stmt>uvm.reg.uvm_mem UVMMem<import_from_stmt>uvm.reg.uvm_reg_block UVMRegBlock<import_from_stmt>uvm.reg.uvm_reg_model UVM_NO_COVERAGE UVM_LITTLE_ENDIAN<class_stmt>dut_ID(UVMReg)<block_start><def_stmt>__init__ self name="dut_ID"<block_start>super().__init__(name 32 UVM_NO_COVERAGE)<line_sep>self.REVISION_ID=<none><line_sep>self.CHIP_ID=<none><line_sep>self.PRODUCT_ID=<none><block_end><def_stmt>build self<block_start>self.REVISION_ID=UVMRegField.type_id.create("REVISION_ID")<line_sep>self.CHIP_ID=UVMRegField.type_id.create("CHIP_ID")<line_sep>self.PRODUCT_ID=UVMRegField.type_id.create("PRODUCT_ID")<line_sep>self.REVISION_ID.configure(self 8 0 "RO" 0 0x03 1 0 1)<line_sep>self.CHIP_ID.configure(self 8 8 "RO" 0 0x5A 1 0 1)<line_sep>self.PRODUCT_ID.configure(self 10 16 "RO" 0 0x176 1 0 1)<block_end><block_end>uvm_object_utils(dut_ID)<class_stmt>dut_DATA(UVMReg)<block_start><def_stmt>__init__ self name="dut_DATA"<block_start>super().__init__(name 32 UVM_NO_COVERAGE)<line_sep>self.value=<none><block_end><def_stmt>build self<block_start>self.value=UVMRegField.type_id.create("value")<line_sep>self.value.configure(self 32 0 "RW" 1 0x0 1 0 1)<block_end><block_end>uvm_object_utils(dut_DATA)<class_stmt>dut_SOCKET(UVMReg)# rand UVMRegField IP # rand UVMRegField PORT <block_start><def_stmt>__init__ self name="dut_ADDR"<block_start>super().__init__(name 64 UVM_NO_COVERAGE)<line_sep>self.IP=<none><line_sep>self.PORT=<none><block_end><def_stmt>build self<block_start>self.IP=UVMRegField.type_id.create("value")<line_sep>self.PORT=UVMRegField.type_id.create("value")<line_sep>self.IP.configure(self 48 0 "RW" 0 0x0 1 0 1)<line_sep>self.PORT.configure(self 16 48 "RW" 0 0x0 1 0 1)<line_sep>self.rand('IP')<line_sep>self.rand('PORT')<block_end><block_end>uvm_object_utils(dut_SOCKET)<class_stmt>dut_RAM(UVMMem)<block_start><def_stmt>__init__ self name="dut_RAM"<block_start>super().__init__(name 8 32 "RW" UVM_NO_COVERAGE)<block_end><block_end>uvm_object_utils(dut_RAM)<class_stmt>dut_regmodel(UVMRegBlock)# rand dut_ID ID # rand dut_DATA DATA # rand dut_SOCKET SOCKET[256] # rand dut_RAM RAM <block_start><def_stmt>__init__ self name="slave"<block_start>super().__init__(name UVM_NO_COVERAGE)<line_sep>self.SOCKET=[]<line_sep>self.nsockets=16<block_end><def_stmt>build self# create <block_start>self.ID=dut_ID.type_id.create("ID")<line_sep>self.DATA=dut_DATA.type_id.create("DATA")<for_stmt>i range(self.nsockets)<block_start>socket=dut_SOCKET.type_id.create(sv.sformatf("SOCKET[%0d]" i))<line_sep>self.SOCKET.append(socket)<block_end>self.RAM=dut_RAM.type_id.create("DMA_RAM")<line_sep># configure/build registers self.ID.configure(self <none> "ID")<line_sep>self.ID.build()<line_sep>self.DATA.configure(self <none> "DATA")<line_sep>self.DATA.build()<for_stmt>i range(len(self.SOCKET))<block_start>self.SOCKET[i].configure(self <none> sv.sformatf("SOCKET[%0d]" i))<line_sep>self.SOCKET[i].build()<block_end>self.RAM.configure(self "DMA")<line_sep># define default map/add register to map self.default_map=self.create_map("default_map" 0x0 4 UVM_LITTLE_ENDIAN 1)<line_sep>self.default_map.add_reg(self.ID 0x0 "RW")<line_sep>self.default_map.add_reg(self.DATA 0x24 "RW")<for_stmt>i range(len(self.SOCKET))<block_start>self.default_map.add_reg(self.SOCKET[i] 0x1000+16<times>i "RW")<block_end>self.default_map.add_mem(self.RAM 0x2000 "RW")<block_end><block_end>uvm_object_utils(dut_regmodel)<line_sep>
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. <import_stmt>datetime<import_stmt>os<import_stmt>sys<import_stmt>torch<line_sep>sys.path.insert(0 os.path.abspath(".."))<line_sep># -- Project information ----------------------------------------------------- project="Snorkel"<line_sep>copyright=f"{datetime.datetime.now().year}, Snorkel Team"<line_sep>author="Snorkel Team"<line_sep>master_doc="index"<line_sep>html_logo="_static/octopus.png"<line_sep>VERSION={}<with_stmt>open("../snorkel/version.py" "r")<as>version_file<block_start>exec(version_file.read() VERSION)<block_end># The full version, including alpha/beta/rc tags release=VERSION["VERSION"]<line_sep># -- General configuration --------------------------------------------------- # Mock imports for troublesome modules (i.e. any that use C code) autosummary_mock_imports=["dask" "pyspark" "spacy"]<line_sep># Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions=["sphinx.ext.autodoc" "sphinx.ext.autosummary" "sphinx.ext.napoleon" "sphinx_autodoc_typehints" "sphinx.ext.linkcode" ]<line_sep># Add any paths that contain templates here, relative to this directory. templates_path=["_templates"]<line_sep># List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns=["_build" "Thumbs.db" ".DS_Store"]<line_sep>autosummary_generate=<true><line_sep># -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme="sphinx_rtd_theme"<line_sep>html_theme_options={"navigation_depth":-1 "titles_only":<true>}<line_sep># Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path=["_static"]<line_sep># -- Options for napoleon extension ------------------------------------------- napoleon_google_docstring=<false><line_sep>napoleon_numpy_docstring=<true><line_sep>napoleon_include_init_with_doc=<false><line_sep>napoleon_include_private_with_doc=<false><line_sep>napoleon_include_special_with_doc=<false><line_sep>napoleon_use_admonition_for_examples=<false><line_sep>napoleon_use_admonition_for_notes=<false><line_sep>napoleon_use_admonition_for_references=<false><line_sep>napoleon_use_ivar=<false><line_sep>napoleon_use_param=<true><line_sep>napoleon_use_rtype=<true><line_sep># -- Options for autodoc extension ------------------------------------------- # This value selects what content will be inserted into the main body of an autoclass # directive # # http://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#directive-autoclass autoclass_content="class"<line_sep># Default options to an ..autoXXX directive. autodoc_default_options={"members":<none> "inherited-members":<none> "show-inheritance":<none> "special-members":"__call__" }<line_sep># Subclasses should show parent classes docstrings if they don't override them. autodoc_inherit_docstrings=<true><line_sep># -- Options for linkcode extension ------------------------------------------ <def_stmt>linkcode_resolve domain info<block_start><if_stmt>domain<ne>"py"<block_start><return><none><block_end><if_stmt><not>info["module"]<block_start><return><none><block_end>module_path=info["module"].replace("." "/")<line_sep># If only one `.`, assume it's a package <if_stmt>info["module"].count(".")<eq>1<block_start><return>f"https://github.com/snorkel-team/snorkel/tree/master/{module_path}"<block_end># Otherwise, it's a module <else_stmt><block_start><return>f"https://github.com/snorkel-team/snorkel/blob/master/{module_path}.py"<block_end><block_end># -- Exclude PyTorch methods ------------------------------------------------- <def_stmt>skip_torch_module_member app what name obj skip options<block_start>skip_torch="Module."<in>str(obj)<and>name<in>dir(torch.nn.Module)<if_stmt>name<eq>"dump_patches"# Special handling for documented attrib <block_start>skip_torch=<true><block_end><return>skip<or>skip_torch<block_end># -- Run setup --------------------------------------------------------------- <def_stmt>setup app<block_start>app.connect("autodoc-skip-member" skip_torch_module_member)<block_end>
<import_stmt>argparse<import_stmt>os<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>models<import_from_stmt>importlib import_module<import_from_stmt>pytorch3d.structures Meshes<import_from_stmt>skimage measure<import_from_stmt>nerf.nerf_helpers export_obj batchify<import_from_stmt>lightning_modules PathParser<def_stmt>create_mesh vertices faces_idx# We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale <block_start>vertices=vertices-vertices.mean(0)<line_sep>scale=max(vertices.abs().max(0)[0])<line_sep>vertices=vertices/scale<line_sep># We construct a Meshes structure for the target mesh target_mesh=Meshes(verts=[vertices] faces=[faces_idx])<line_sep><return>target_mesh<block_end><def_stmt>extract_radiance model args device nums<block_start><assert_stmt>(isinstance(nums tuple)<or>isinstance(nums list)<or>isinstance(nums int)) "Nums arg should be either iterable or int."<if_stmt>isinstance(nums int)<block_start>nums=(nums )<times>3<block_end><else_stmt><block_start><assert_stmt>(len(nums)<eq>3) "Nums arg should be of length 3, number of axes for 3D"<block_end># Create sample tiles tiles=[torch.linspace(-args.limit args.limit num)<for>num nums]<line_sep># Generate 3D samples samples=torch.stack(torch.meshgrid(*tiles) -1).view(-1 3).float()<line_sep>radiance_samples=[]<for_stmt>(samples ) batchify(samples batch_size=args.batch_size device=device)# Query radiance batch <block_start>radiance_batch=model.sample_points(samples samples)<line_sep># Accumulate radiance radiance_samples.append(radiance_batch.cpu())<block_end># Radiance 3D grid (rgb + density) radiance=torch.cat(radiance_samples 0).view(*nums 4).contiguous().numpy()<line_sep><return>radiance<block_end><def_stmt>extract_iso_level density args# Density boundaries <block_start>min_a,max_a,std_a=density.min() density.max() density.std()<line_sep># Adaptive iso level iso_value=min(max(args.iso_level min_a+std_a) max_a-std_a)<line_sep>print(f"Min density {min_a}, Max density: {max_a}, Mean density {density.mean()}")<line_sep>print(f"Querying based on iso level: {iso_value}")<line_sep><return>iso_value<block_end><def_stmt>extract_geometry model device args# Sample points based on the grid <block_start>radiance=extract_radiance(model args device args.res)<line_sep># Density grid density=radiance[<ellipsis> 3]<line_sep># Adaptive iso level iso_value=extract_iso_level(density args)<line_sep># Extracting iso-surface triangulated results=measure.marching_cubes(density iso_value)<line_sep># Use contiguous tensors vertices,triangles,normals,_=[torch.from_numpy(np.ascontiguousarray(result))<for>result results]<line_sep># Use contiguous tensors normals=torch.from_numpy(np.ascontiguousarray(normals))<line_sep>vertices=torch.from_numpy(np.ascontiguousarray(vertices))<line_sep>triangles=torch.from_numpy(np.ascontiguousarray(triangles))<line_sep># Normalize vertices, to the (-limit, limit) vertices=args.limit<times>(vertices/(args.res/2.)-1.)<line_sep><return>vertices triangles normals density<block_end><def_stmt>extract_geometry_with_super_sampling model device args<block_start><raise>NotImplementedError<try_stmt><block_start>mcubes=import_module("marching_cubes")<block_end><except_stmt>ModuleNotFoundError<block_start>print(""" Run the following instructions within your environment: https://github.com/JustusThies/PyMarchingCubes#installation """)<line_sep># Close process exit(-1)<block_end># Sampling resolution per axis nums=np.array([args.res+(args.res-1)<times>args.super_sampling args.res args.res])<line_sep># Radiance per axis, super sampling across each axis radiances=[]<for_stmt>i range(0 3)# Roll such that each axis is rich <block_start>radiance_axis=extract_radiance(model args device np.roll(nums i))[<ellipsis> 3]<line_sep>radiances.append(radiance_axis)<block_end># accumulate density=np.stack(radiances axis=0)<line_sep># Adaptive iso level iso_value=extract_iso_level(density args)<line_sep>vertices,triangles=mcubes.marching_cubes_super_sampling(*radiances iso_value)<line_sep>vertices=np.ascontiguousarray(vertices)<line_sep>mcubes.export_obj(vertices triangles os.path.join(args.save_dir "mesh.obj"))<block_end><def_stmt>export_marching_cubes model args cfg device# Mesh Extraction <block_start><if_stmt>args.super_sampling<ge>1<block_start>print("Generating mesh geometry...")<line_sep># Extract model geometry with super sampling across each axis extract_geometry_with_super_sampling(model device args)<line_sep><return><block_end># Cached mesh path containing data mesh_cache_path=os.path.join(args.save_dir args.cache_name)<line_sep>cached_mesh_exists=os.path.exists(mesh_cache_path)<line_sep>cache_new_mesh=args.use_cached_mesh<and><not>cached_mesh_exists<if_stmt>cache_new_mesh<block_start>print(f"Cached mesh does not exist - {mesh_cache_path}")<block_end><if_stmt>args.use_cached_mesh<and>cached_mesh_exists<block_start>print("Loading cached mesh geometry...")<line_sep>vertices,triangles,normals,density=torch.load(mesh_cache_path)<block_end><else_stmt><block_start>print("Generating mesh geometry...")<line_sep># Extract model geometry vertices,triangles,normals,density=extract_geometry(model device args)<if_stmt>cache_new_mesh<or>args.override_cache_mesh<block_start>torch.save((vertices triangles normals density) mesh_cache_path)<line_sep>print(f"Cached mesh geometry saved to {mesh_cache_path}")<block_end><block_end># Extracting the mesh appearance # Ray targets and directions targets,directions=vertices -normals<line_sep>diffuse=[]<if_stmt>args.no_view_dependence<block_start>print("Diffuse map query directly without specific-views...")<line_sep># Query directly without specific-views batch_generator=batchify(targets directions batch_size=args.batch_size device=device)<for_stmt>(pos_batch dir_batch) batch_generator# Diffuse batch queried <block_start>diffuse_batch=model.sample_points(pos_batch dir_batch)[<ellipsis> :3]<line_sep># Accumulate diffuse diffuse.append(diffuse_batch.cpu())<block_end><block_end><else_stmt><block_start>print("Diffuse map query with view dependence...")<line_sep>ray_bounds=torch.tensor([0. args.view_disparity_max_bound] dtype=directions.dtype)<line_sep># Query with view dependence # Move ray origins slightly towards negative sdf ray_origins=targets-args.view_disparity<times>directions<line_sep>print("Started ray-casting")<line_sep>batch_generator=batchify(ray_origins directions batch_size=args.batch_size device=device)<for_stmt>(ray_origins ray_directions) batch_generator# View dependent diffuse batch queried <block_start>output_bundle=model.query((ray_origins ray_directions ray_bounds))<line_sep># Accumulate diffuse diffuse.append(output_bundle.rgb_map.cpu())<block_end><block_end># Query the whole diffuse map diffuse=torch.cat(diffuse dim=0).numpy()<line_sep># Target mesh path mesh_path=os.path.join(args.save_dir args.mesh_name)<line_sep># Export model export_obj(vertices triangles diffuse normals mesh_path)<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--log-checkpoint" type=str default=<none> help="Training log path with the config and checkpoints to load existent configuration." )<line_sep>parser.add_argument("--checkpoint" type=str default="model_last.ckpt" help="Load existent configuration from the latest checkpoint by default." )<line_sep>parser.add_argument("--save-dir" type=str default="." help="Save mesh to this directory, if specified." )<line_sep>parser.add_argument("--mesh-name" type=str default="mesh.obj" help="Mesh name to be generated." )<line_sep>parser.add_argument("--iso-level" type=float default=32 help="Iso-level value for triangulation" )<line_sep>parser.add_argument("--limit" type=float default=1.2 help="Limits in -xyz to xyz for marching cubes 3D grid." )<line_sep>parser.add_argument("--res" type=int default=128 help="Sampling resolution for marching cubes, increase it for higher level of detail." )<line_sep>parser.add_argument("--super-sampling" type=int default=0 help="Add super sampling along the edges." )<line_sep>parser.add_argument("--batch-size" type=int default=1024 help="Higher batch size results in faster processing but needs more device memory." )<line_sep>parser.add_argument("--no-view-dependence" action="store_true" default=<false> help="Disable view dependent appearance, use sampled diffuse color based on the grid")<line_sep>parser.add_argument("--view-disparity" type=float default=1e-2 help="Ray origins offset from target based on the inverse normal for the view dependent appearance." )<line_sep>parser.add_argument("--view-disparity-max-bound" type=float default=4e0 help="Far max possible bound, usually set to (cfg.far - cfg.near), lower it for better "<concat>"appearance estimation when using higher resolution e.g. at least view_disparity * 2.0." )<line_sep>parser.add_argument("--use-cached-mesh" action="store_true" default=<false> help="Use the cached mesh." )<line_sep>parser.add_argument("--override-cache-mesh" action="store_true" default=<false> help="Caches the mesh, useful for rapid configuration appearance tweaking." )<line_sep>parser.add_argument("--cache-name" type=str default="mesh_cache.pt" help="Mesh cache name, allows for multiple unique meshes of different resolutions." )<line_sep>config_args=parser.parse_args()<line_sep># Existent log path path_parser=PathParser()<line_sep>cfg,_=path_parser.parse(<none> config_args.log_checkpoint <none> config_args.checkpoint)<line_sep># Available device device="cuda"<if>torch.cuda.is_available()<else>"cpu"<line_sep># Load model checkpoint print(f"Loading model from {path_parser.checkpoint_path}")<line_sep>model=getattr(models cfg.experiment.model).load_from_checkpoint(path_parser.checkpoint_path)<line_sep>model=model.eval().to(device)<with_stmt>torch.no_grad()# Perform marching cubes and export the mesh <block_start>export_marching_cubes(model config_args cfg device)<block_end><block_end>
# -*- coding: utf-8 -*- # Copyright 2018 Yelp # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for AMI utilities"""<import_from_stmt>mrjob.ami describe_base_emr_images<import_from_stmt>tests.mock_boto3 MockBoto3TestCase<class_stmt>DescribeBaseEMRImagesTestCase(MockBoto3TestCase)# a valid base EMR image. we can make variants of this for testing <block_start>BASE_EMR_IMAGE={'Architecture':'x86_64' 'BlockDeviceMappings':[{'DeviceName':'/dev/xvda' 'Ebs':{'DeleteOnTermination':<true> 'Encrypted':<false> 'SnapshotId':'snap-0ceb5dfba7c0cbd4c' 'VolumeSize':8 'VolumeType':'standard'}}] 'CreationDate':'2018-08-11T02:33:53.000Z' 'Description':'Amazon Linux AMI 2018.03.0.20180811 x86_64 HVM EBS' 'EnaSupport':<true> 'Hypervisor':'xen' 'ImageId':'ami-09c6e771' 'ImageLocation':'amazon/amzn-ami-hvm-2018.03.0.20180811-x86_64-ebs' 'ImageOwnerAlias':'amazon' 'ImageType':'machine' 'Name':'amzn-ami-hvm-2018.03.0.20180811-x86_64-ebs' 'OwnerId':'137112412989' 'Public':<true> 'RootDeviceName':'/dev/xvda' 'RootDeviceType':'ebs' 'SriovNetSupport':'simple' 'State':'available' 'VirtualizationType':'hvm' }<def_stmt>make_image self **kwargs<block_start>"""Return a copy of BASE_EMR_IMAGE with the given fields added. You can blank out fields by setting them to None."""<line_sep>image=dict(self.BASE_EMR_IMAGE **kwargs)<line_sep><return>{k:v<for>k,v image.items()<if>v<is><not><none>}<block_end><def_stmt>test_no_images self<block_start>self.assertEqual(describe_base_emr_images(self.client('ec2')) [])<block_end><def_stmt>test_base_emr_image self<block_start>image=self.make_image()<line_sep>self.add_mock_ec2_image(image)<line_sep>self.assertEqual(describe_base_emr_images(self.client('ec2')) [image])<block_end><def_stmt>test_most_recent_image_first self<block_start>image_old=self.make_image(ImageId='ami-old' CreationDate='2010-06-06T00:00:00.000Z')<line_sep>image_new=self.make_image(ImageId='ami-new' CreationDate='2015-05-06T00:00:00.000Z')<line_sep>self.add_mock_ec2_image(image_old)<line_sep>self.add_mock_ec2_image(image_new)<line_sep>self.assertEqual(describe_base_emr_images(self.client('ec2')) [image_new image_old])<block_end><def_stmt>test_filter_and_sort self<block_start>image_old=self.make_image(ImageId='ami-old' CreationDate='2010-06-06T00:00:00.000Z')<line_sep>image_new=self.make_image(ImageId='ami-new' CreationDate='2015-05-06T00:00:00.000Z')<line_sep>image_null={}<line_sep>self.add_mock_ec2_image(image_null)<line_sep>self.add_mock_ec2_image(image_old)<line_sep>self.add_mock_ec2_image(image_null)<line_sep>self.add_mock_ec2_image(image_new)<line_sep>self.add_mock_ec2_image(image_null)<line_sep>self.assertEqual(describe_base_emr_images(self.client('ec2')) [image_new image_old])<block_end><def_stmt>assert_rejects_image self **kwargs<block_start>image=self.make_image(**kwargs)<line_sep>self.add_mock_ec2_image(image)<line_sep>self.assertNotIn(image describe_base_emr_images(self.client('ec2')))<block_end><def_stmt>test_owner_must_be_amazon self<block_start>self.assert_rejects_image(ImageOwnerAlias='aws-marketplace' OwnerId='679593333241')<block_end><def_stmt>test_architecture_must_be_x86_64 self<block_start>self.assert_rejects_image(Architecture='i386')<block_end><def_stmt>test_root_device_type_must_be_ebs self<block_start>self.assert_rejects_image(RootDeviceType='instance-store')<block_end><def_stmt>test_virtualization_type_must_be_hvm self<block_start>self.assert_rejects_image(VirtualizationType='paravirtual')<block_end><def_stmt>test_amazon_linux_1_only self<block_start>self.assert_rejects_image(Name='amzn2-ami-hvm-2017.12.0.20180109-x86_64-ebs')<block_end><def_stmt>test_stable_amazon_linux_versions_only self# no "testing" or "rc," only dots and numbers, please <block_start>self.assert_rejects_image(Name='amzn-ami-hvm-2017.03.rc-1.20170327-x86_64-ebs')<block_end><def_stmt>test_one_volume_only self<block_start>self.assert_rejects_image(BlockDeviceMappings=[self.BASE_EMR_IMAGE['BlockDeviceMappings'][0] {'DeviceName':'xvdca' 'VirtualName':'ephemeral0' }])<block_end><def_stmt>test_dont_crash_on_missing_name self# shouldn't happen in practice, but just in case <block_start>self.assert_rejects_image(Name=<none>)<block_end><def_stmt>test_dont_crash_on_missing_block_device_mappings self# shouldn't happen in practice, but just in case <block_start>self.assert_rejects_image(BlockDeviceMappings=<none>)<block_end><def_stmt>test_dont_crash_on_missing_creation_date self<block_start>self.assert_rejects_image(CreationDate=<none>)<block_end><block_end>
""" EOS core package. """<import_from_stmt>.base Base EOSException<import_from_stmt>.profiler Profiler<import_from_stmt>.symfony Symfony<import_from_stmt>.engine Engine<import_from_stmt>.cookies RememberMe<import_from_stmt>.eos EOS<line_sep>
""" .. module:: clusters .. moduleauthor:: <NAME> <<EMAIL>> """<import_from_stmt>flask request current_app<import_from_stmt>flask_restful Resource<import_from_stmt>requests.exceptions ConnectionError<import_from_stmt>elastichq.model ClusterDTO<import_from_stmt>. api<import_from_stmt>..common.api_response APIResponse<import_from_stmt>..common.exceptions BadRequest request_wrapper<import_from_stmt>..common.status_codes HTTP_Status<import_from_stmt>..service ClusterService ConnectionNotAuthorized ConnectionService<class_stmt>ClusterConnection(Resource)<block_start>""" Manages cluster connection pool. """<line_sep>@request_wrapper<def_stmt>post self<block_start>""" Creates a connection to a given host/port. Accepts a JSON POST BODY. This will add the connection, if it doesn't already exist, to the pool of connections and save the details in the database. .. :quickref: ClusterConnection; Creates a connection to the cluster. **Example request**: .. sourcecode:: http POST /api/clusters/_connect/ HTTP/1.1 Accept: application/json .. code-block:: json { "ip": "127.0.0.1", "port": "9200", "use_ssl": false } **Request Structure** - *(dict) --* - **ip** *(string) --* IP address or host name - **port** *(string) --* ES REST API port - **use_ssl** *(boolean) --* Whether to use HTTPS or not. **Example response**: .. sourcecode:: http HTTP/1.1 201 Content-Type: application/json .. code-block:: json { "data": [ { "cluster_name": "", "cluster_ip": "", "cluster_port": "9200", "cluster_scheme": "http", "cluster_connected": true, "cluster_host": "http://10.0.0.0:9200", "cluster_version": "2.3.5" } ], "status_code": 200, "message": null, "response_time": 92 } **Response Structure** - *(dict) --* - **cluster_name** *(string) --* cluster name - **cluster_ip** *(string) --* IP or host - **cluster_port** *(string) --* - **cluster_scheme** *(string) --* - **cluster_connected** *(boolean) --* Whether there was a successful connection. - **cluster_host** *(string) --* The complete connection url - **cluster_version** *(string) --* Elasticsearch version :reqheader Accept: application/json :resheader Content-Type: application/json :status 201: connection created :status 400: bad request :status 500: server error """<line_sep>json_data=request.get_json(force=<true>)<line_sep>params=request.values.to_dict()<line_sep>params.update(json_data)<if_stmt>params.get('ip' <none>)<is><none><block_start><raise>BadRequest(message='Missing required parameters.')<block_end>scheme='http'<if_stmt>params.get('use_ssl' <false>)<is><true><block_start>scheme='https'<block_end><try_stmt><block_start>enable_ssl=current_app.config.get('ENABLE_SSL' <false>)<line_sep>ca_certs=current_app.config.get('CA_CERTS' <none>)<line_sep>verify_certs=current_app.config.get('VERIFY_CERTS' <none>)<line_sep>client_key=current_app.config.get('CLIENT_KEY' <none>)<line_sep>client_cert=current_app.config.get('CLIENT_CERT' <none>)<line_sep>print(client_key)<line_sep>print(client_cert)<line_sep>response=ConnectionService().create_connection(ip=params['ip'] port=params.get('port' "9200") scheme=scheme username=params.get('username' <none>) password=params.get('password' <none>) fail_on_exception=<true> enable_ssl=enable_ssl ca_certs=ca_certs verify_certs=verify_certs client_key=client_key client_cert=client_cert)<line_sep>schema=ClusterDTO(many=<false>)<line_sep>result=schema.dump(response)<line_sep><return>APIResponse(result.data HTTP_Status.CREATED <none>)<block_end><except_stmt>ConnectionNotAuthorized<as>cna<block_start><return>APIResponse([] HTTP_Status.UNAUTHORIZED <none>)<block_end><except_stmt>ConnectionError<as>ce<block_start><return>APIResponse([] HTTP_Status.NOT_FOUND <none>)<block_end><block_end>@request_wrapper<def_stmt>delete self cluster_name<block_start>""" Deletes a connection from the connection pool and the database, by cluster name. :note: This method does NOT delete your Elasticsearch Cluster, just the connection from HQ to it. **Example request**: .. sourcecode:: http DELETE /clusters/_connect/<CLUSTER_NAME> HTTP/1.1 Accept: application/json :type cluster_name: string :param cluster_name: Name of cluster connection to remove. :returns: List of active clusters. :status 200: Ok :status 400: bad request :status 500: server error """<line_sep>response=ConnectionService().delete_connection(cluster_name)<line_sep><return>APIResponse(response HTTP_Status.OK <none>)<block_end><block_end><class_stmt>ClusterList(Resource)<block_start>""" Retrieves a list of all active and inactive cluster connections. """<line_sep>@request_wrapper<def_stmt>get self<block_start>"""Returns a collection of clusters. **Example request**: .. sourcecode:: http GET /api/clusters/ HTTP/1.1 Accept: application/json **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Vary: Accept Content-Type: application/json .. code-block:: json { "status_code": 200, "response_time": 1648, "message": null, "data": [ { "cluster_name": "", "cluster_ip": "", "cluster_port": "9200", "cluster_scheme": "http", "cluster_connected": true, "cluster_host": "http://10.0.0.0:9200", "cluster_version": "2.3.5", "cluster_health": { } } ] } **Response Structure** - *(dict) --* - **cluster_name** *(string) --* cluster name - **cluster_ip** *(string) --* IP or host - **cluster_port** *(string) --* - **cluster_scheme** *(string) --* - **cluster_connected** *(boolean) --* Whether there was a successful connection. - **cluster_host** *(string) --* The complete connection url - **cluster_version** *(string) --* Elasticsearch version :resheader Content-Type: application/json :status 200: OK :status 500: server error """<line_sep>response=ClusterService().get_clusters()<line_sep>schema=ClusterDTO(many=<true>)<line_sep>result=schema.dump(response)<line_sep><return>APIResponse(result.data HTTP_Status.OK <none>)<block_end><block_end><class_stmt>ClusterHealth(Resource)<block_start>""" Wrapper around the Cluster health API https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html """<line_sep>@request_wrapper<def_stmt>get self cluster_name<block_start>""" Returns cluster health for one cluster **Example request**: .. sourcecode:: http GET /api/clusters/<cluster_name>/_health HTTP/1.1 :type cluster_name: string :param cluster_name: Name of cluster **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Vary: Accept Content-Type: application/json .. code-block:: json { "status_code": 200, "data": [ { "active_primary_shards": 10, "relocating_shards": 0, "cluster_name": "es_v2", "active_shards": 10, "task_max_waiting_in_queue_millis": 0, "number_of_pending_tasks": 0, "timed_out": false, "number_of_nodes": 1, "unassigned_shards": 10, "number_of_in_flight_fetch": 0, "initializing_shards": 0, "delayed_unassigned_shards": 0, "active_shards_percent_as_number": 50, "status": "yellow", "number_of_data_nodes": 1 } ], "response_time": 38, "message": null } :resheader Content-Type: application/json :status 200: OK :status 500: server error """<line_sep>response=ClusterService().get_cluster_health(cluster_name)<line_sep><return>APIResponse(response HTTP_Status.OK <none>)<block_end><block_end><class_stmt>ClusterState(Resource)<block_start>@request_wrapper<def_stmt>get self cluster_name<block_start>""" Wrapper around https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html """<line_sep>response=ClusterService().get_cluster_state(cluster_name)<line_sep><return>APIResponse(response HTTP_Status.OK <none>)<block_end><block_end><class_stmt>ClusterSummary(Resource)<block_start>""" Brief summary for a given cluster name """<line_sep>@request_wrapper<def_stmt>get self cluster_name<block_start>""" Given a cluster_name, returns summary information from several ES Cluster APIs. :param cluster_name: :return: """<line_sep>response=ClusterService().get_cluster_summary(cluster_name)<line_sep><return>APIResponse(response HTTP_Status.OK <none>)<block_end><block_end><class_stmt>ClusterStats(Resource)<block_start>@request_wrapper<def_stmt>get self cluster_name<block_start>""" Wrapper around https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html :param cluster_name: :return: """<line_sep>response=ClusterService().get_cluster_stats(cluster_name)<line_sep><return>APIResponse(response HTTP_Status.OK <none>)<block_end><block_end><class_stmt>ClusterPendingTasks(Resource)<block_start>@request_wrapper<def_stmt>get self cluster_name<block_start>response=ClusterService().get_cluster_pending_tasks(cluster_name)<line_sep><return>APIResponse(response HTTP_Status.OK <none>)<block_end><block_end><class_stmt>ClusterSettings(Resource)<block_start>@request_wrapper<def_stmt>get self cluster_name<block_start>response=ClusterService().get_cluster_settings(cluster_name)<line_sep><return>APIResponse(response HTTP_Status.OK <none>)<block_end>@request_wrapper<def_stmt>put self cluster_name<block_start>json_data=request.get_json(force=<true>)<line_sep>response=ClusterService().put_cluster_settings(json_data cluster_name)<line_sep><return>APIResponse(response HTTP_Status.OK <none>)<block_end><block_end>api.add_resource(ClusterConnection '/clusters/_connect' '/clusters/<string:cluster_name>/_connect' endpoint='clusters' methods=['POST' 'DELETE'])<line_sep>api.add_resource(ClusterList '/clusters' endpoint='clusters_list' methods=['GET'])<line_sep>api.add_resource(ClusterStats '/clusters/<string:cluster_name>/_stats' endpoint='clusters_stats' methods=['GET'])<line_sep>api.add_resource(ClusterHealth '/clusters/<string:cluster_name>/_health' endpoint='clusters_health' methods=['GET'])<line_sep>api.add_resource(ClusterSummary '/clusters/<string:cluster_name>/_summary' endpoint='clusters_summary' methods=['GET'])<line_sep>api.add_resource(ClusterState '/clusters/<string:cluster_name>/_state' endpoint='clusters_state' methods=['GET'])<line_sep>api.add_resource(ClusterPendingTasks '/clusters/<string:cluster_name>/_pending_tasks' endpoint='clusters_pending_tasks' methods=['GET'])<line_sep>api.add_resource(ClusterSettings '/clusters/<string:cluster_name>/_settings' endpoint='clusters_settings' methods=['GET' 'PUT'])<line_sep>
# Copyright 2015 Spotify AB. All rights reserved. # # The contents of this file are licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """Tests for IOSDriver."""<import_stmt>unittest<import_from_stmt>napalm.ios ios<import_from_stmt>napalm.base.test.base TestConfigNetworkDriver TestGettersNetworkDriver<import_stmt>re<class_stmt>TestConfigIOSDriver(unittest.TestCase TestConfigNetworkDriver)<block_start>"""Configuration Tests for IOSDriver. Core file operations: load_replace_candidate Tested load_merge_candidate Tested compare_config Tested commit_config Tested discard_config Tested rollback Tested Internal methods: _enable_confirm Tested _disable_confirm Tested _gen_rollback_cfg Tested as part of rollback _check_file_exists Tested Misc methods: open Tested close Skipped normalize_compare_config Tested (indirectly) scp_file Tested gen_full_path Tested """<line_sep>@classmethod<def_stmt>setUpClass cls<block_start>"""Executed when the class is instantiated."""<line_sep>ip_addr="127.0.0.1"<line_sep>username="vagrant"<line_sep>password="<PASSWORD>"<line_sep>cls.vendor="ios"<line_sep>optional_args={"port":12204 "dest_file_system":"bootflash:"}<line_sep>cls.device=ios.IOSDriver(ip_addr username password optional_args=optional_args)<line_sep>cls.device.open()<line_sep># Setup initial state cls.device.load_replace_candidate(filename="%s/initial.conf"%cls.vendor)<line_sep>cls.device.commit_config()<block_end><def_stmt>test_ios_only_confirm self<block_start>"""Test _disable_confirm() and _enable_confirm(). _disable_confirm() changes router config so it doesn't prompt for confirmation _enable_confirm() reenables this """<line_sep># Set initial device configuration self.device.load_replace_candidate(filename="%s/initial.conf"%self.vendor)<line_sep>self.device.commit_config()<line_sep># Verify initial state output=self.device.device.send_command("show run | inc file prompt")<line_sep>output=output.strip()<line_sep>self.assertEqual(output "")<line_sep># Disable confirmation self.device._disable_confirm()<line_sep>output=self.device.device.send_command("show run | inc file prompt")<line_sep>output=output.strip()<line_sep>self.assertEqual(output "file prompt quiet")<line_sep># Reenable confirmation self.device._enable_confirm()<line_sep>output=self.device.device.send_command("show run | inc file prompt")<line_sep>output=output.strip()<line_sep>self.assertEqual(output "")<block_end><def_stmt>test_ios_only_gen_full_path self<block_start>"""Test gen_full_path() method."""<line_sep>output=self.device._gen_full_path(self.device.candidate_cfg)<line_sep>self.assertEqual(output self.device.dest_file_system+"/candidate_config.txt")<line_sep>output=self.device._gen_full_path(self.device.rollback_cfg)<line_sep>self.assertEqual(output self.device.dest_file_system+"/rollback_config.txt")<line_sep>output=self.device._gen_full_path(self.device.merge_cfg)<line_sep>self.assertEqual(output self.device.dest_file_system+"/merge_config.txt")<line_sep>output=self.device._gen_full_path(filename="running-config" file_system="system:")<line_sep>self.assertEqual(output "system:/running-config")<block_end><def_stmt>test_ios_only_check_file_exists self<block_start>"""Test _check_file_exists() method."""<line_sep>self.device.load_replace_candidate(filename="%s/initial.conf"%self.vendor)<line_sep>valid_file=self.device._check_file_exists(self.device.dest_file_system+"/candidate_config.txt")<line_sep>self.assertTrue(valid_file)<line_sep>invalid_file=self.device._check_file_exists(self.device.dest_file_system+"/bogus_999.txt")<line_sep>self.assertFalse(invalid_file)<block_end><block_end><class_stmt>TestGetterIOSDriver(unittest.TestCase TestGettersNetworkDriver)<block_start>"""Getters Tests for IOSDriver. Get operations: get_lldp_neighbors get_facts get_interfaces get_bgp_neighbors get_interfaces_counters """<line_sep>@classmethod<def_stmt>setUpClass cls<block_start>"""Executed when the class is instantiated."""<line_sep>cls.mock=<true><line_sep>username="vagrant"<line_sep>ip_addr="192.168.0.234"<line_sep>password="<PASSWORD>"<line_sep>cls.vendor="ios"<line_sep>optional_args={}<line_sep>optional_args["dest_file_system"]="flash:"<line_sep>cls.device=ios.IOSDriver(ip_addr username password optional_args=optional_args)<if_stmt>cls.mock<block_start>cls.device.device=FakeIOSDevice()<block_end><else_stmt><block_start>cls.device.open()<block_end><block_end><def_stmt>test_ios_only_bgp_time_conversion self<block_start>"""Verify time conversion static method."""<line_sep>test_cases={"1w0d":604800 "00:14:23":863 "00:13:40":820 "00:00:21":21 "00:00:13":13 "00:00:49":49 "1d11h":126000 "1d17h":147600 "8w5d":5270400 "1y28w":48470400 "never":-1 }<for_stmt>bgp_time,result test_cases.items()<block_start>self.assertEqual(self.device.bgp_time_conversion(bgp_time) result)<block_end><block_end><block_end><class_stmt>FakeIOSDevice<block_start>"""Class to fake a IOS Device."""<line_sep>@staticmethod<def_stmt>read_txt_file filename<block_start>"""Read a txt file and return its content."""<with_stmt>open(filename)<as>data_file<block_start><return>data_file.read()<block_end><block_end><def_stmt>send_command_expect self command **kwargs<block_start>"""Fake execute a command in the device by just returning the content of a file."""<line_sep>cmd=re.sub(r"[\[\]\*\^\+\s\|]" "_" command)<line_sep>output=self.read_txt_file("ios/mock_data/{}.txt".format(cmd))<line_sep><return>str(output)<block_end><def_stmt>send_command self command **kwargs<block_start>"""Fake execute a command in the device by just returning the content of a file."""<line_sep><return>self.send_command_expect(command)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<import_stmt>os<import_stmt>sys<line_sep>invalid_files=[]<line_sep>copyright_headers=['# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.' '# Copyright (c) Microsoft Corporation. All rights reserved.\n#\n# MIT License' ]<line_sep>whitelist=['nni/version.py' 'nni/algorithms/hpo/bohb_advisor/config_generator.py' ]<for_stmt>root,dirs,files os.walk('nni')<block_start><for_stmt>file files<block_start><if_stmt><not>file.endswith('.py')<block_start><continue><block_end>full_path=os.path.join(root file)<if_stmt>full_path<in>whitelist<block_start><continue><block_end>content=open(full_path).read()<if_stmt><not>content.strip()# empty file <block_start><continue><block_end><if_stmt><not>any(content.startswith(header)<for>header copyright_headers)<block_start>invalid_files.append(full_path)<block_end><block_end><block_end><if_stmt>invalid_files<block_start>print("The following files doesn't have a copyright text header.\n")<for_stmt>file invalid_files<block_start>print(' '+file)<block_end>print('\nPlease add the following text at the beginning of the file.\n')<line_sep>print('# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.')<line_sep>sys.exit(1)<block_end>
<import_stmt>sys<line_sep>sys.path.append("../")<import_from_stmt>appJar gui<def_stmt>getQuestions fileName="questions.txt"<block_start>questions=[]<line_sep>data=<none><with_stmt>open(fileName "r")<as>questionsFile<block_start><while_stmt><true><block_start>line=questionsFile.readline().strip()<if_stmt>line<eq>"EOF"<block_start><break># end of file reached <block_end><elif_stmt>line.startswith("-")<block_start><continue><block_end># ignore these lines <elif_stmt>line.startswith("#")# start of question # we need to add our last question <block_start><if_stmt>data<is><not><none><block_start>questions.append(data)<block_end>data={"question":"" "options":[] "answer":""}<line_sep>question=line[5:].strip()<line_sep>nextLine=questionsFile.readline().strip()<if_stmt><not>nextLine.startswith("------")<block_start>question<augadd>" "+nextLine<block_end>data["question"]=question<block_end><elif_stmt>line.startswith("*")# answer option <block_start>option=line[1:]<line_sep>data["options"].append(option)<block_end><elif_stmt>line.startswith("Answer:")# answer <block_start>answer=line[8:]<line_sep>data["answer"]=answer<block_end><block_end><block_end><return>questions<block_end><def_stmt>checkAnswer question<block_start>selection=app.getRadioButton(question)<line_sep>answer=questions[int(question[1:])-1]["answer"]<if_stmt>selection<eq>answer<block_start>app.infoBox("CORRECT" "You got it!")<block_end><else_stmt><block_start>app.infoBox("WRONG" "Try again!")<block_end><block_end><with_stmt>gui("QUIZ")<as>app<block_start>questions=getQuestions()<with_stmt>app.pagedWindow("QUIZ DEMO")<block_start>count=0<for_stmt>q questions<block_start>count<augadd>1<line_sep>title="Q"+str(count)<with_stmt>app.page()<block_start>app.setSticky("EW")<line_sep>app.addLabel(title title+": "+q["question"])<line_sep>app.setLabelBg(title "green")<with_stmt>app.labelFrame(title hideTitle=<true>)<block_start><for_stmt>o q["options"]<block_start>app.addRadioButton(title o)<block_end><block_end>app.addNamedButton("CHECK" title checkAnswer)<block_end><block_end><block_end><block_end>
<import_from_stmt>datetime date datetime timedelta<import_from_stmt>freezegun freeze_time<import_from_stmt>data.logs_model.inmemory_model InMemoryModel<import_from_stmt>data.logs_model.combined_model CombinedLogsModel<import_from_stmt>test.fixtures *<line_sep>@pytest.fixture()<def_stmt>first_model <block_start><return>InMemoryModel()<block_end>@pytest.fixture()<def_stmt>second_model <block_start><return>InMemoryModel()<block_end>@pytest.fixture()<def_stmt>combined_model first_model second_model initialized_db<block_start><return>CombinedLogsModel(first_model second_model)<block_end><def_stmt>test_log_action first_model second_model combined_model initialized_db<block_start>day=date(2019 1 1)<line_sep># Write to the combined model. <with_stmt>freeze_time(day)<block_start>combined_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<block_end>simple_repo=model.repository.get_repository("devtable" "simple")<line_sep># Make sure it is found in the first model but not the second. <assert_stmt>combined_model.count_repository_actions(simple_repo day)<eq>1<assert_stmt>first_model.count_repository_actions(simple_repo day)<eq>1<assert_stmt>second_model.count_repository_actions(simple_repo day)<eq>0<block_end><def_stmt>test_count_repository_actions first_model second_model combined_model initialized_db<block_start>today=date(2019 1 1)<line_sep># Write to the combined model. <with_stmt>freeze_time(today)# Write to each model. <block_start>first_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<line_sep>first_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<line_sep>first_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<line_sep>second_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<line_sep>second_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<line_sep># Ensure the counts match as expected. simple_repo=model.repository.get_repository("devtable" "simple")<assert_stmt>first_model.count_repository_actions(simple_repo today)<eq>3<assert_stmt>second_model.count_repository_actions(simple_repo today)<eq>2<assert_stmt>combined_model.count_repository_actions(simple_repo today)<eq>5<block_end><block_end><def_stmt>test_yield_logs_for_export first_model second_model combined_model initialized_db<block_start>now=datetime.now()<with_stmt>freeze_time(now)# Write to each model. <block_start>first_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<line_sep>first_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<line_sep>first_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<line_sep>second_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<line_sep>second_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<block_end>later=now+timedelta(minutes=60)<line_sep># Ensure the full set of logs is yielded. first_logs=list(first_model.yield_logs_for_export(now later))[0]<line_sep>second_logs=list(second_model.yield_logs_for_export(now later))[0]<line_sep>combined=list(combined_model.yield_logs_for_export(now later))<line_sep>full_combined=[]<for_stmt>subset combined<block_start>full_combined.extend(subset)<block_end><assert_stmt>len(full_combined)<eq>len(first_logs)+len(second_logs)<assert_stmt>full_combined<eq>(first_logs+second_logs)<block_end><def_stmt>test_lookup_logs first_model second_model combined_model initialized_db<block_start>now=datetime.now()<with_stmt>freeze_time(now)# Write to each model. <block_start>first_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<line_sep>first_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<line_sep>first_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<line_sep>second_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<line_sep>second_model.log_action("push_repo" namespace_name="devtable" repository_name="simple" ip="1.2.3.4")<block_end>later=now+timedelta(minutes=60)<def_stmt>_collect_logs model<block_start>page_token=<none><line_sep>all_logs=[]<while_stmt><true><block_start>paginated_logs=model.lookup_logs(now later page_token=page_token)<line_sep>page_token=paginated_logs.next_page_token<line_sep>all_logs.extend(paginated_logs.logs)<if_stmt>page_token<is><none><block_start><break><block_end><block_end><return>all_logs<block_end>first_logs=_collect_logs(first_model)<line_sep>second_logs=_collect_logs(second_model)<line_sep>combined=_collect_logs(combined_model)<assert_stmt>len(combined)<eq>len(first_logs)+len(second_logs)<assert_stmt>combined<eq>(first_logs+second_logs)<block_end>
<import_stmt>unittest<import_from_stmt>asciimatics.event MouseEvent<import_from_stmt>asciimatics.paths Path DynamicPath<class_stmt>TestPaths(unittest.TestCase)<block_start><def_stmt>assert_path_equals self path oracle<block_start>path.reset()<line_sep>positions=[]<while_stmt><not>path.is_finished()<block_start>positions.append(path.next_pos())<block_end>self.assertEqual(positions oracle)<block_end><def_stmt>test_jump_and_wait self<block_start>""" Check basic movement of cursor works. """<line_sep>path=Path()<line_sep>path.jump_to(10 10)<line_sep>path.wait(3)<line_sep>self.assert_path_equals(path [(10 10) (10 10) (10 10) (10 10)])<block_end><def_stmt>test_straight_lines self<block_start>""" Check a path works in straight lines. """<line_sep># Horizontal path=Path()<line_sep>path.jump_to(10 10)<line_sep>path.move_straight_to(15 10 5)<line_sep>self.assert_path_equals(path [(10 10) (11 10) (12 10) (13 10) (14 10) (15 10)])<line_sep># Vertical path=Path()<line_sep>path.jump_to(5 5)<line_sep>path.move_straight_to(5 10 5)<line_sep>self.assert_path_equals(path [(5 5) (5 6) (5 7) (5 8) (5 9) (5 10)])<line_sep># Diagonal spaced path=Path()<line_sep>path.jump_to(5 5)<line_sep>path.move_straight_to(15 15 5)<line_sep>self.assert_path_equals(path [(5 5) (7 7) (9 9) (11 11) (13 13) (15 15)])<block_end><def_stmt>test_spline self<block_start>""" Check a path works with a spline curve. """<line_sep>path=Path()<line_sep>path.jump_to(0 10)<line_sep>path.move_round_to([(0 10) (20 0) (40 10) (20 20) (0 10)] 20)<line_sep>self.assert_path_equals(path [(0 10) (0 10) (0 10) (0 10) (0 10) (5 7) (10 4) (15 1) (20 0) (25 1) (30 3) (35 7) (40 10) (35 12) (30 16) (25 18) (20 20) (15 18) (10 15) (5 12) (0 10)])<block_end><def_stmt>test_dynamic_path self<block_start>""" Check a dynamic path works as expected. """<class_stmt>TestPath(DynamicPath)<block_start><def_stmt>process_event self event# Assume that we're always passing in a MouseEvent. <block_start>self._x=event.x<line_sep>self._y=event.y<block_end><block_end># Initial path should start at specified location. path=TestPath(<none> 0 0)<line_sep>self.assertEqual(path.next_pos() (0 0))<line_sep>self.assertFalse(path.is_finished())<line_sep># Process event should move location. path.process_event(MouseEvent(10 5 0))<line_sep>self.assertEqual(path.next_pos() (10 5))<line_sep># Reset should return to original location. path.reset()<line_sep>self.assertEqual(path.next_pos() (0 0))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>unittest<import_from_stmt>tests.test_utils get_sample_pdf_with_labels get_sample_pdf get_sample_sdf get_sample_pdf_with_extra_cols get_sample_pdf_with_no_text_col get_sample_spark_dataframe<import_from_stmt>nlu *<class_stmt>TestLem(unittest.TestCase)<block_start><def_stmt>test_stopwords_pipe self<block_start>pipe=nlu.load('stopwords' verbose=<true>)<line_sep>df=pipe.predict('HELLO WORLD! How are YOU!?!@' output_level='sentence' drop_irrelevant_cols=<false> metadata=<true> )<for_stmt>c df.columns<block_start>print(df[c])<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
'''Autogenerated by xml_generate script, do not edit!'''<import_from_stmt>OpenGL platform<as>_p arrays<line_sep># Code generation uses this <import_from_stmt>OpenGL.raw.GL _types<as>_cs<line_sep># End users want this... <import_from_stmt>OpenGL.raw.GL._types *<import_from_stmt>OpenGL.raw.GL _errors<import_from_stmt>OpenGL.constant Constant<as>_C<import_stmt>ctypes<line_sep>_EXTENSION_NAME='GL_AMD_debug_output'<def_stmt>_f function<block_start><return>_p.createFunction(function _p.PLATFORM.GL 'GL_AMD_debug_output' error_checker=_errors._error_checker)<block_end>GL_DEBUG_CATEGORY_API_ERROR_AMD=_C('GL_DEBUG_CATEGORY_API_ERROR_AMD' 0x9149)<line_sep>GL_DEBUG_CATEGORY_APPLICATION_AMD=_C('GL_DEBUG_CATEGORY_APPLICATION_AMD' 0x914F)<line_sep>GL_DEBUG_CATEGORY_DEPRECATION_AMD=_C('GL_DEBUG_CATEGORY_DEPRECATION_AMD' 0x914B)<line_sep>GL_DEBUG_CATEGORY_OTHER_AMD=_C('GL_DEBUG_CATEGORY_OTHER_AMD' 0x9150)<line_sep>GL_DEBUG_CATEGORY_PERFORMANCE_AMD=_C('GL_DEBUG_CATEGORY_PERFORMANCE_AMD' 0x914D)<line_sep>GL_DEBUG_CATEGORY_SHADER_COMPILER_AMD=_C('GL_DEBUG_CATEGORY_SHADER_COMPILER_AMD' 0x914E)<line_sep>GL_DEBUG_CATEGORY_UNDEFINED_BEHAVIOR_AMD=_C('GL_DEBUG_CATEGORY_UNDEFINED_BEHAVIOR_AMD' 0x914C)<line_sep>GL_DEBUG_CATEGORY_WINDOW_SYSTEM_AMD=_C('GL_DEBUG_CATEGORY_WINDOW_SYSTEM_AMD' 0x914A)<line_sep>GL_DEBUG_LOGGED_MESSAGES_AMD=_C('GL_DEBUG_LOGGED_MESSAGES_AMD' 0x9145)<line_sep>GL_DEBUG_SEVERITY_HIGH_AMD=_C('GL_DEBUG_SEVERITY_HIGH_AMD' 0x9146)<line_sep>GL_DEBUG_SEVERITY_LOW_AMD=_C('GL_DEBUG_SEVERITY_LOW_AMD' 0x9148)<line_sep>GL_DEBUG_SEVERITY_MEDIUM_AMD=_C('GL_DEBUG_SEVERITY_MEDIUM_AMD' 0x9147)<line_sep>GL_MAX_DEBUG_LOGGED_MESSAGES_AMD=_C('GL_MAX_DEBUG_LOGGED_MESSAGES_AMD' 0x9144)<line_sep>GL_MAX_DEBUG_MESSAGE_LENGTH_AMD=_C('GL_MAX_DEBUG_MESSAGE_LENGTH_AMD' 0x9143)<line_sep>@_f@_p.types(<none> _cs.GLDEBUGPROCAMD ctypes.c_void_p)<def_stmt>glDebugMessageCallbackAMD callback userParam<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLenum _cs.GLenum _cs.GLsizei arrays.GLuintArray _cs.GLboolean)<def_stmt>glDebugMessageEnableAMD category severity count ids enabled<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLenum _cs.GLenum _cs.GLuint _cs.GLsizei arrays.GLcharArray)<def_stmt>glDebugMessageInsertAMD category severity id length buf<block_start><pass><block_end>@_f@_p.types(_cs.GLuint _cs.GLuint _cs.GLsizei arrays.GLuintArray arrays.GLuintArray arrays.GLuintArray arrays.GLsizeiArray arrays.GLcharArray)<def_stmt>glGetDebugMessageLogAMD count bufsize categories severities ids lengths message<block_start><pass><block_end>
<import_stmt>unittest<import_from_stmt>unittest.mock Mock patch<import_stmt>numpy<as>np<import_stmt>numpy.typing<as>npt<import_from_stmt>nuplan.common.actor_state.state_representation Point2D StateSE2<import_from_stmt>nuplan.common.geometry.transform rotate rotate_2d rotate_angle transform translate translate_laterally translate_longitudinally translate_longitudinally_and_laterally <class_stmt>TestTransform(unittest.TestCase)<block_start>"""Tests for transform functions"""<def_stmt>test_rotate_2d self<arrow><none><block_start>"""Tests rotation of 2D point"""<line_sep># Setup point=Point2D(1 0)<line_sep>rotation_matrix=np.array([[0 1] [-1 0]] dtype=np.float32)# type: npt.NDArray[np.float32] # Function call result=rotate_2d(point rotation_matrix)<line_sep># Checks self.assertEqual(result Point2D(0 1))<block_end><def_stmt>test_translate self<arrow><none><block_start>"""Tests translate"""<line_sep># Setup pose=StateSE2(3 5 np.pi/4)<line_sep>translation=np.array([1 2] dtype=np.float32)# type: npt.NDArray[np.float32] # Function call result=translate(pose translation)<line_sep># Checks self.assertEqual(result StateSE2(4 7 np.pi/4))<block_end><def_stmt>test_rotate self<arrow><none><block_start>"""Tests rotation of SE2 pose by rotation matrix"""<line_sep># Setup pose=StateSE2(1 2 np.pi/4)<line_sep>rotation_matrix=np.array([[0 1] [-1 0]] dtype=np.float32)# type: npt.NDArray[np.float32] # Function call result=rotate(pose rotation_matrix)<line_sep># Checks self.assertAlmostEqual(result.x -2)<line_sep>self.assertAlmostEqual(result.y 1)<line_sep>self.assertAlmostEqual(result.heading -np.pi/4)<block_end><def_stmt>test_rotate_angle self<arrow><none><block_start>"""Tests rotation of SE2 pose by angle (in radian)"""<line_sep># Setup pose=StateSE2(1 2 np.pi/4)<line_sep>angle=-np.pi/2<line_sep># Function call result=rotate_angle(pose angle)<line_sep># Checks self.assertAlmostEqual(result.x -2)<line_sep>self.assertAlmostEqual(result.y 1)<line_sep>self.assertAlmostEqual(result.heading -np.pi/4)<block_end><def_stmt>test_transform self<arrow><none><block_start>"""Tests transformation of SE2 pose"""<line_sep># Setup pose=StateSE2(1 2 0)<line_sep>transform_matrix=np.array([[-3 -2 5] [0 -1 4] [0 0 1]] dtype=np.float32)<line_sep># type: npt.NDArray[np.float32] # Function call result=transform(pose transform_matrix)<line_sep># Checks self.assertAlmostEqual(result.x 2)<line_sep>self.assertAlmostEqual(result.y 0)<line_sep>self.assertAlmostEqual(result.heading np.pi places=4)<block_end>@patch("nuplan.common.geometry.transform.translate")<def_stmt>test_translate_longitudinally self mock_translate:Mock<arrow><none><block_start>"""Tests longitudinal translation"""<line_sep># Setup pose=StateSE2(1 2 np.arctan(1/3))<line_sep># Function call result=translate_longitudinally(pose np.sqrt(10))<line_sep># Checks np.testing.assert_array_almost_equal(mock_translate.call_args.args[1] np.array([3 1]))<line_sep>self.assertEqual(result mock_translate.return_value)<block_end>@patch("nuplan.common.geometry.transform.translate")<def_stmt>test_translate_laterally self mock_translate:Mock<arrow><none><block_start>"""Tests lateral translation"""<line_sep># Setup pose=StateSE2(1 2 np.arctan(1/3))<line_sep># Function call result=translate_laterally(pose np.sqrt(10))<line_sep># Checks np.testing.assert_array_almost_equal(mock_translate.call_args.args[1] np.array([-1 3]))<line_sep>self.assertEqual(result mock_translate.return_value)<block_end>@patch("nuplan.common.geometry.transform.translate")<def_stmt>test_translate_longitudinally_and_laterally self mock_translate:Mock<arrow><none><block_start>"""Tests longitudinal and lateral translation"""<line_sep># Setup pose=StateSE2(1 2 np.arctan(1/3))<line_sep># Function call result=translate_longitudinally_and_laterally(pose np.sqrt(10) np.sqrt(10))<line_sep># Checks np.testing.assert_array_almost_equal(mock_translate.call_args.args[1] np.array([2 4]))<line_sep>self.assertEqual(result mock_translate.return_value)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<import_stmt>datetime<import_stmt>arrow<import_from_stmt>sqlalchemy.sql func<import_from_stmt>flask current_app<as>app<import_from_stmt>werkzeug.security generate_password_hash check_password_hash<import_from_stmt>config.extensions db<import_from_stmt>utils.constants USER_ROLES INVITE_EXPIRY<import_from_stmt>utils.custom_exception MultipleCompaniesFound<class_stmt>User(db.Model)<block_start>id=db.Column(db.Integer primary_key=<true>)<line_sep>first_name=db.Column(db.String(100) nullable=<false>)<line_sep>last_name=db.Column(db.String(100) nullable=<false>)<line_sep>email=db.Column(db.String(100) unique=<true> nullable=<false>)<line_sep>role_id=db.Column(db.Integer db.ForeignKey('role.id') nullable=<false>)<line_sep>role=db.relationship('Role' backref=db.backref('users' lazy=<true>))<line_sep>company_id=db.Column(db.Integer db.ForeignKey('company.id') nullable=<false>)<line_sep>company=db.relationship('Company' backref=db.backref('users' lazy=<true>))<line_sep>notification=db.relationship('Notification' uselist=<false> back_populates='user')<line_sep>api_key=db.Column(db.String(100) unique=<true> nullable=<false>)<line_sep>phone_number=db.Column(db.String(100) nullable=<true>)<line_sep>password_hash=db.Column(db.String(255) nullable=<false>)<line_sep>created_at=db.Column(db.DateTime(timezone=<true>) default=func.now())<line_sep>updated_at=db.Column(db.DateTime(timezone=<true>) onupdate=func.now())<line_sep>isActive=db.Column(db.Boolean default=<true> server_default='t' nullable=<false>)<def_stmt>set_password self password<block_start>password_hash=generate_password_hash(password)<line_sep>self.password_hash=password_hash<block_end><def_stmt>check_password self password<block_start><return>check_password_hash(self.password_hash password)<block_end>@property<def_stmt>full_name self<block_start><return>self.first_name+" "+self.last_name<block_end><def_stmt>get_identity self<block_start>""" Use this to generate access token. """<line_sep>user_identity={"id":self.id "first_name":self.first_name "last_name":self.last_name "email":self.email "company":self.company.name "role":self.role.name "api_key":self.api_key "isActive":self.isActive "company_license_exists":<true> }<line_sep><return>user_identity<block_end><def_stmt>save self commit=<true><block_start>db.session.add(self)<line_sep># add a notification for the user. notification=Notification(user=self)<line_sep>db.session.add(notification)<if_stmt>commit<block_start><try_stmt><block_start>db.session.commit()<block_end><except_stmt><block_start>db.session.rollback()<line_sep># Exception block is just for rolling back the transaction # So re raise it. <raise><block_end><block_end><block_end><def_stmt>delete self commit=<true><block_start>db.session.delete(self)<if_stmt>commit<block_start><try_stmt><block_start>db.session.commit()<block_end><except_stmt><block_start>db.session.rollback()<line_sep># Exception block is just for rolling back the transaction # So re raise it. <raise><block_end><block_end><block_end><def_stmt>__repr__ self<block_start><return>'<User {}>'.format(self.email)<block_end><block_end><class_stmt>Role(db.Model)<block_start>id=db.Column(db.Integer primary_key=<true>)<line_sep>name=db.Column(db.String(100) unique=<true> nullable=<false>)<line_sep>created_at=db.Column(db.DateTime(timezone=<true>) default=func.now())<line_sep>updated_at=db.Column(db.DateTime(timezone=<true>) onupdate=func.now())<line_sep>@staticmethod<def_stmt>get_all_roles <block_start><return>Role.query.all()<block_end><def_stmt>save self commit=<true><block_start>db.session.add(self)<if_stmt>commit<block_start><try_stmt><block_start>db.session.commit()<block_end><except_stmt><block_start>db.session.rollback()<line_sep># Exception block is just for rolling back the transaction # So re raise it. <raise><block_end><block_end><block_end><def_stmt>__repr__ self<block_start><return>'<Role {}>'.format(self.name)<block_end><block_end><class_stmt>Company(db.Model)<block_start>id=db.Column(db.Integer primary_key=<true>)<line_sep>name=db.Column(db.String(255) unique=<true> nullable=<false>)<line_sep>created_at=db.Column(db.DateTime(timezone=<true>) default=func.now())<line_sep>updated_at=db.Column(db.DateTime(timezone=<true>) onupdate=func.now())<def_stmt>pre_save_checks self# There should be only one company <block_start>count=Company.query.count()<if_stmt>count<eq>0<and>self.id<is><none><block_start><return><true><block_end><return><false><block_end><def_stmt>pre_save self<block_start>""" Use this method to modify the data before storing in the database. """<line_sep>self.name=self.name.lower()<block_end><def_stmt>save self commit=<true><block_start><if_stmt><not>self.pre_save_checks()<block_start>companies=Company.query.all()<line_sep>companies=[company.name<for>company companies]<line_sep>app.logger.error("Multiple companies found: [{}]".format(companies))<line_sep><raise>MultipleCompaniesFound()<block_end>self.pre_save()<line_sep>db.session.add(self)<if_stmt>commit<block_start><try_stmt><block_start>db.session.commit()<block_end><except_stmt><block_start>db.session.rollback()<line_sep># Exception block is just for rolling back the transaction # So re raise it. <raise><block_end><block_end><block_end><def_stmt>delete self commit=<true><block_start>db.session.delete(self)<if_stmt>commit<block_start><try_stmt><block_start>db.session.commit()<block_end><except_stmt><block_start>db.session.rollback()<line_sep># Exception block is just for rolling back the transaction # So re raise it. <raise><block_end><block_end><block_end><def_stmt>get_admins self<block_start>""" Get all admins of a company. """<line_sep>admins=[]<for_stmt>user self.users<block_start><if_stmt>user.isActive<and>user.role.name<eq>USER_ROLES.ADMIN_USER<block_start>admins.append(user)<block_end><block_end><return>admins<block_end><def_stmt>__repr__ self<block_start><return>'<Company {}>'.format(self.name)<block_end><block_end><class_stmt>Invite(db.Model)<block_start>id=db.Column(db.Integer primary_key=<true>)<line_sep>email=db.Column(db.String(100) nullable=<false>)<line_sep>code=db.Column(db.String(255) unique=<true> nullable=<false>)<line_sep>created_by=db.relationship('User' backref=db.backref('invites' lazy=<true>))<line_sep>user_id=db.Column(db.Integer db.ForeignKey('user.id') nullable=<false>)<line_sep>company=db.relationship('Company' backref=db.backref('invites' lazy=<true>))<line_sep>company_id=db.Column(db.Integer db.ForeignKey('company.id') nullable=<false>)<line_sep>role=db.relationship('Role' backref=db.backref('invites' lazy=<true>))<line_sep>role_id=db.Column(db.Integer db.ForeignKey('role.id') nullable=<false>)<line_sep>accepted=db.Column(db.Boolean default=<false>)<line_sep>created_at=db.Column(db.DateTime(timezone=<true>) default=func.now())<line_sep>updated_at=db.Column(db.DateTime(timezone=<true>) onupdate=func.now())<def_stmt>save self commit=<true><block_start>db.session.add(self)<if_stmt>commit<block_start><try_stmt><block_start>db.session.commit()<block_end><except_stmt><block_start>db.session.rollback()<line_sep># Exception block is just for rolling back the transaction # So re raise it. <raise><block_end><block_end><block_end><def_stmt>delete self commit=<true><block_start>db.session.delete(self)<if_stmt>commit<block_start><try_stmt><block_start>db.session.commit()<block_end><except_stmt><block_start>db.session.rollback()<line_sep># Exception block is just for rolling back the transaction # So re raise it. <raise><block_end><block_end><block_end><def_stmt>is_expired self<block_start><if_stmt>arrow.get(self.created_at).datetime+datetime.timedelta(seconds=INVITE_EXPIRY)<l>arrow.now().datetime<block_start><return><true><block_end><return><false><block_end><def_stmt>__repr__ self<block_start><return>'<Invite {}>'.format(self.id)<block_end><block_end><class_stmt>PasswordReset(db.Model)<block_start>id=db.Column(db.Integer primary_key=<true>)<line_sep>email=db.Column(db.String(100) nullable=<false>)<line_sep>code=db.Column(db.String(255) unique=<true> nullable=<false>)<line_sep>expiry_timedelta=db.Column(db.Integer nullable=<false>)<line_sep>created_at=db.Column(db.DateTime(timezone=<true>) default=func.now())<line_sep>updated_at=db.Column(db.DateTime(timezone=<true>) onupdate=func.now())<def_stmt>save self commit=<true><block_start>db.session.add(self)<if_stmt>commit<block_start><try_stmt><block_start>db.session.commit()<block_end><except_stmt><block_start>db.session.rollback()<line_sep># Exception block is just for rolling back the transaction # So re raise it. <raise><block_end><block_end><block_end><def_stmt>delete self commit=<true><block_start>db.session.delete(self)<if_stmt>commit<block_start><try_stmt><block_start>db.session.commit()<block_end><except_stmt><block_start>db.session.rollback()<line_sep># Exception block is just for rolling back the transaction # So re raise it. <raise><block_end><block_end><block_end><def_stmt>__repr__ self<block_start><return>'<PasswordReset {}>'.format(self.id)<block_end><block_end><class_stmt>Notification(db.Model)<block_start>id=db.Column(db.Integer primary_key=<true>)<line_sep>last_seen=db.Column(db.DateTime(timezone=<true>) nullable=<false> default=func.now())<line_sep># One to one mapping with user user_id=db.Column(db.Integer db.ForeignKey('user.id'))<line_sep>user=db.relationship('User' back_populates='notification')<line_sep>created_at=db.Column(db.DateTime(timezone=<true>) default=func.now())<line_sep>updated_at=db.Column(db.DateTime(timezone=<true>) onupdate=func.now())<def_stmt>save self commit=<true><block_start>db.session.add(self)<if_stmt>commit<block_start><try_stmt><block_start>db.session.commit()<block_end><except_stmt><block_start>db.session.rollback()<line_sep># Exception block is just for rolling back the transaction # So re raise it. <raise><block_end><block_end><block_end><def_stmt>__repr__ self<block_start><return>'<Notification {}>'.format(self.id)<block_end><block_end>
<import_stmt>os<import_from_stmt>setuptools setup find_packages<def_stmt>read fname<block_start><return>open(os.path.join(os.path.dirname(__file__) fname)).read()<block_end>setup(name="SwampDragon" version="0.4.2.2" author="<NAME>" author_email="<EMAIL>" description=("SwampDragon is a powerful platform making it easy to build real time web applications, combining the power of Django and Tornado") license="BSD" keywords="SwampDragon, websockets, realtime, sockjs, django, tornado, framework" url="http://swampdragon.net" packages=find_packages() long_description=read('README.txt') include_package_data=<true> entry_points={'console_scripts':['dragon-admin = swampdragon.core:run' ]} install_requires=["Django>=1.6,<1.10" "Tornado >= 3.2.2" "sockjs-tornado >= 1.0.0" "tornado-redis >= 2.4.18" "redis >= 2.8" "python-dateutil >= 2.2"] classifiers=["Development Status :: 4 - Beta" "License :: OSI Approved :: BSD License" "Programming Language :: Python :: 2.7" "Programming Language :: Python :: 3" "Programming Language :: Python :: 3.3" "Programming Language :: Python :: 3.4" ] )<line_sep>
<import_stmt>os<import_from_stmt>uuid uuid4<import_stmt>numpy.testing<as>npt<import_stmt>numpy<as>np<import_from_stmt>cesium time_series<import_from_stmt>cesium.time_series TimeSeries<def_stmt>sample_time_series size=51 channels=1<block_start>times=np.array([np.sort(np.random.random(size))<for>i range(channels)]).squeeze()<line_sep>values=np.array([np.random.normal(size=size)<for>i range(channels)]).squeeze()<line_sep>errors=np.array([np.random.exponential(size=size)<for>i range(channels)]).squeeze()<line_sep><return>times values errors<block_end><def_stmt>test__compatible_shapes <block_start>compat=time_series._compatible_shapes<assert_stmt>compat(np.arange(5) np.arange(5))<assert_stmt><not>compat(np.arange(5) np.arange(6))<assert_stmt>compat([np.arange(5)]<times>5 [np.arange(5)]<times>5)<assert_stmt><not>compat([np.arange(5)]<times>5 [np.arange(5)]<times>6)<assert_stmt><not>compat([np.arange(5)]<times>5 [np.arange(6)]<times>5)<assert_stmt><not>compat(np.arange(5) [np.arange(6)]<times>5)<assert_stmt>compat([[0 1] [0 1]] [[0 1] [0 1]])<assert_stmt><not>compat([[0 1] [0 1]] [[0] [0 1]])<assert_stmt>compat([0 1] np.arange(2))<block_end><def_stmt>assert_ts_equal ts1 ts2<block_start><for_stmt>x1,x2 zip((ts1.time ts1.measurement ts1.error) (ts2.time ts2.measurement ts2.error))<block_start><assert_stmt>type(x1)<eq>type(x2)<if_stmt>isinstance(x1 np.ndarray)<block_start><assert_stmt>np.array_equal(x1 x2)<block_end><else_stmt><block_start><assert_stmt>all(np.array_equal(x1_i x2_i)<for>x1_i,x2_i zip(x1 x2))<block_end><block_end><assert_stmt>ts1.label<eq>ts2.label<assert_stmt>ts1.meta_features<eq>ts2.meta_features<assert_stmt>ts1.name<eq>ts2.name<block_end><def_stmt>test_time_series_init_1d <block_start>t,m,e=sample_time_series(channels=1)<line_sep>ts=TimeSeries(t m e)<assert_stmt>ts.time.shape<eq>t.shape<and>np.allclose(ts.time t)<assert_stmt>ts.measurement.shape<eq>m.shape<and>np.allclose(ts.measurement m)<assert_stmt>ts.error.shape<eq>e.shape<and>np.allclose(ts.error e)<assert_stmt>ts.n_channels<eq>1<block_end><def_stmt>test_time_series_init_2d <block_start>n_channels=3<line_sep>t,m,e=sample_time_series(channels=n_channels)<line_sep>ts=TimeSeries(t m e)<assert_stmt>ts.time.shape<eq>t.shape<and>np.allclose(ts.time t)<assert_stmt>ts.measurement.shape<eq>m.shape<and>np.allclose(ts.measurement m)<assert_stmt>ts.error.shape<eq>e.shape<and>np.allclose(ts.error e)<assert_stmt>ts.n_channels<eq>n_channels<line_sep>ts=TimeSeries(t[0] m e[0])<assert_stmt>ts.time.shape<eq>m.shape<and>np.allclose(ts.time[0] t[0])<assert_stmt>ts.measurement.shape<eq>m.shape<and>np.allclose(ts.measurement m)<assert_stmt>ts.error.shape<eq>m.shape<and>np.allclose(ts.error[0] e[0])<assert_stmt>ts.n_channels<eq>n_channels<block_end><def_stmt>test_time_series_init_ragged <block_start>n_channels=3<line_sep>t,m,e=sample_time_series(channels=n_channels)<line_sep>t=[t[i][0:i+2]<for>i range(len(t))]<line_sep>m=[m[i][0:i+2]<for>i range(len(m))]<line_sep>e=[e[i][0:i+2]<for>i range(len(e))]<line_sep>ts=TimeSeries(t m e)<assert_stmt>all(np.allclose(ts.time[i] t[i])<for>i range(len(t)))<assert_stmt>all(np.allclose(ts.measurement[i] m[i])<for>i range(len(t)))<assert_stmt>all(np.allclose(ts.error[i] e[i])<for>i range(len(t)))<assert_stmt>ts.n_channels<eq>n_channels<block_end><def_stmt>test_time_series_default_values <block_start>n_channels=3<line_sep>t,m,e=sample_time_series(channels=n_channels)<line_sep>ts=TimeSeries(<none> m[0] <none>)<line_sep>npt.assert_allclose(ts.time np.linspace(0. time_series.DEFAULT_MAX_TIME m.shape[1]))<line_sep>npt.assert_allclose(ts.error np.repeat(time_series.DEFAULT_ERROR_VALUE m.shape[1]))<assert_stmt>ts.n_channels<eq>1<line_sep>ts=TimeSeries(<none> m <none>)<line_sep>npt.assert_allclose(ts.time[0] np.linspace(0. time_series.DEFAULT_MAX_TIME m.shape[1]))<line_sep>npt.assert_allclose(ts.error[0] np.repeat(time_series.DEFAULT_ERROR_VALUE m.shape[1]))<assert_stmt>ts.n_channels<eq>n_channels<line_sep>t=[t[i][0:i+2]<for>i range(len(t))]<line_sep>m=[m[i][0:i+2]<for>i range(len(m))]<line_sep>e=[e[i][0:i+2]<for>i range(len(e))]<line_sep>ts=TimeSeries(<none> m <none>)<for_stmt>i range(n_channels)<block_start>npt.assert_allclose(ts.time[i] np.linspace(0. time_series.DEFAULT_MAX_TIME len(m[i])))<line_sep>npt.assert_allclose(ts.error[i] np.repeat(time_series.DEFAULT_ERROR_VALUE len(m[i])))<block_end><assert_stmt>ts.n_channels<eq>n_channels<block_end><def_stmt>test_channels_iterator <block_start>n_channels=3<line_sep>t,m,e=sample_time_series(channels=n_channels)<line_sep>ts=TimeSeries(t[0] m[0] e[0])<for_stmt>t_i,m_i,e_i ts.channels()<block_start>npt.assert_allclose(t_i t[0])<line_sep>npt.assert_allclose(m_i m[0])<line_sep>npt.assert_allclose(e_i e[0])<block_end>ts=TimeSeries(t m e)<for_stmt>(t_i m_i e_i),i zip(ts.channels() range(n_channels))<block_start>npt.assert_allclose(t_i t[i])<line_sep>npt.assert_allclose(m_i m[i])<line_sep>npt.assert_allclose(e_i e[i])<block_end>t=[t[i][0:i+2]<for>i range(len(t))]<line_sep>m=[m[i][0:i+2]<for>i range(len(m))]<line_sep>e=[e[i][0:i+2]<for>i range(len(e))]<line_sep>ts=TimeSeries(t m e)<for_stmt>(t_i m_i e_i),i zip(ts.channels() range(n_channels))<block_start>npt.assert_allclose(t_i t[i])<line_sep>npt.assert_allclose(m_i m[i])<line_sep>npt.assert_allclose(e_i e[i])<block_end><block_end><def_stmt>test_time_series_npz tmpdir<block_start>n_channels=3<line_sep>t,m,e=sample_time_series(channels=n_channels)<line_sep>ts=TimeSeries(t[0] m[0] e[0])<line_sep>ts_path=os.path.join(str(tmpdir) str(uuid4())+'.npz')<line_sep>ts.save(ts_path)<line_sep>ts_loaded=time_series.load(ts_path)<line_sep>assert_ts_equal(ts ts_loaded)<line_sep>ts=TimeSeries(t[0] m e[0])<line_sep>ts_path=os.path.join(str(tmpdir) str(uuid4())+'.npz')<line_sep>ts.save(ts_path)<line_sep>ts_loaded=time_series.load(ts_path)<line_sep>assert_ts_equal(ts ts_loaded)<line_sep>t=[t[i][0:i+2]<for>i range(len(t))]<line_sep>m=[m[i][0:i+2]<for>i range(len(m))]<line_sep>e=[e[i][0:i+2]<for>i range(len(e))]<line_sep>ts=TimeSeries(t m e)<line_sep>ts_path=os.path.join(str(tmpdir) str(uuid4())+'.npz')<line_sep>ts.save(ts_path)<line_sep>ts_loaded=time_series.load(ts_path)<line_sep>assert_ts_equal(ts ts_loaded)<block_end><def_stmt>test_time_series_sort <block_start>t,m,e=sample_time_series(channels=1)<line_sep>t[:2]=t[1::-1]<line_sep>ts=TimeSeries(t m e)<line_sep>npt.assert_allclose(ts.time np.sort(t))<line_sep>npt.assert_allclose(ts.measurement m[np.argsort(t)])<line_sep>npt.assert_allclose(ts.error e[np.argsort(t)])<line_sep>n_channels=3<line_sep>t,m,e=sample_time_series(channels=n_channels)<line_sep>t[: :2]=t[: 1::-1]<line_sep>ts=TimeSeries(t m e)<for_stmt>i range(len(m))<block_start>npt.assert_allclose(ts.time[i] np.sort(t[i]))<line_sep>npt.assert_allclose(ts.measurement[i] m[i][np.argsort(t[i])])<line_sep>npt.assert_allclose(ts.error[i] e[i][np.argsort(t[i])])<block_end>ts=TimeSeries(t[0] m e[0])<for_stmt>i range(len(m))<block_start>npt.assert_allclose(ts.time[i] np.sort(t[0]))<line_sep>npt.assert_allclose(ts.measurement[i] m[i][np.argsort(t[0])])<line_sep>npt.assert_allclose(ts.error[i] e[0][np.argsort(t[0])])<block_end><block_end>
<import_stmt>disnake<import_from_stmt>disnake.ext commands<class_stmt>MessageCommands(commands.Cog)<block_start><def_stmt>__init__ self bot<block_start>self.bot:commands.Bot=bot<block_end>@commands.message_command(name="Reverse")<async_keyword><def_stmt>reverse self inter:disnake.MessageCommandInteraction<block_start><await>inter.response.send_message(inter.target.content[::-1])<block_end><block_end><def_stmt>setup bot<block_start>bot.add_cog(MessageCommands(bot))<line_sep>print(f"> Extension {__name__} is ready\n")<block_end>
# Copyright 2018/2019 The RLgraph authors, All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== <import_from_future_stmt> absolute_import division print_function<import_from_stmt>rlgraph.components.common.container_merger ContainerMerger<import_from_stmt>rlgraph.components.component Component<import_from_stmt>rlgraph.components.explorations.exploration Exploration<import_from_stmt>rlgraph.components.neural_networks.preprocessor_stack PreprocessorStack<import_from_stmt>rlgraph.components.policies.policy Policy<import_from_stmt>rlgraph.utils.decorators rlgraph_api<class_stmt>ActorComponent(Component)<block_start>""" A Component that incorporates an entire pipeline from env state to an action choice. Includes preprocessor, policy and exploration sub-components. """<def_stmt>__init__ self preprocessor_spec policy_spec exploration_spec=<none> **kwargs<block_start>""" Args: preprocessor_spec (Union[list,dict,PreprocessorSpec]): - A dict if the state from the Env will come in as a ContainerSpace (e.g. Dict). In this case, each each key in this dict specifies, which value in the incoming dict should go through which PreprocessorStack. - A list with layer specs. - A PreprocessorStack object. policy_spec (Union[dict,Policy]): A specification dict for a Policy object or a Policy object directly. exploration_spec (Union[dict,Exploration]): A specification dict for an Exploration object or an Exploration object directly. """<line_sep>super(ActorComponent self).__init__(scope=kwargs.pop("scope" "actor-component") **kwargs)<line_sep>self.preprocessor=PreprocessorStack.from_spec(preprocessor_spec)<line_sep>self.policy=Policy.from_spec(policy_spec)<line_sep>self.exploration=Exploration.from_spec(exploration_spec)<line_sep>self.tuple_merger=ContainerMerger(is_tuple=<true> merge_tuples_into_one=<true>)<line_sep>self.add_components(self.policy self.exploration self.preprocessor self.tuple_merger)<block_end>@rlgraph_api<def_stmt>get_preprocessed_state_and_action self states other_nn_inputs=<none> time_percentage=<none> use_exploration=<true><block_start>""" API-method to get the preprocessed state and an action based on a raw state from an Env. Args: states (DataOp): The states coming directly from the environment. other_nn_inputs (Optional[DataOpTuple]): Inputs to the NN that don't have to be pushed through the preprocessor. time_percentage (SingleDataOp): The current consumed time (0.0 to 1.0) with respect to a max timestep value. use_exploration (Optional[DataOp]): Whether to use exploration or not. Returns: dict (3x DataOp): `preprocessed_state` (DataOp): The preprocessed states. `action` (DataOp): The chosen action. #`last_internal_states` (DataOp): If RNN-based, the last internal states after passing through #states. Or None. """<line_sep>preprocessed_states=self.preprocessor.preprocess(states)<line_sep>nn_inputs=preprocessed_states<if_stmt>other_nn_inputs<is><not><none># TODO: Do this automatically when using the `+` operator on DataOpRecords. <block_start>nn_inputs=self.tuple_merger.merge(nn_inputs other_nn_inputs)<block_end>out=self.policy.get_action(nn_inputs)<line_sep>actions=self.exploration.get_action(out["action"] time_percentage use_exploration)<line_sep><return>dict(preprocessed_state=preprocessed_states action=actions nn_outputs=out["nn_outputs"])<block_end>@rlgraph_api<def_stmt>get_preprocessed_state_action_and_action_probs self states other_nn_inputs=<none> time_percentage=<none> use_exploration=<true><block_start>""" API-method to get the preprocessed state, one action and all possible action's probabilities based on a raw state from an Env. Args: states (DataOp): The states coming directly from the environment. other_nn_inputs (DataOp): Inputs to the NN that don't have to be pushed through the preprocessor. time_percentage (SingleDataOp): The current consumed time (0.0 to 1.0) with respect to a max timestep value. use_exploration (Optional[DataOp]): Whether to use exploration or not. Returns: dict (4x DataOp): `preprocessed_state` (DataOp): The preprocessed states. `action` (DataOp): The chosen action. `action_probs` (DataOp): The different action probabilities. #`last_internal_states` (DataOp): If RNN-based, the last internal states after passing through #states. Or None. """<line_sep>preprocessed_states=self.preprocessor.preprocess(states)<line_sep>nn_inputs=preprocessed_states<line_sep># merge preprocessed_states + other_nn_inputs <if_stmt>other_nn_inputs<is><not><none># TODO: Do this automatically when using the `+` operator on DataOpRecords. <block_start>nn_inputs=self.tuple_merger.merge(nn_inputs other_nn_inputs)<block_end># TODO: Dynamic Batching problem. State-value is not really needed, but dynamic batching will require us to # TODO: run through the exact same partial-graph as the learner (which does need the extra state-value output). # if isinstance(self.policy, SharedValueFunctionPolicy): # out = self.policy.get_state_values_logits_probabilities_log_probs(preprocessed_states, internal_states) # else: # out = self.policy.get_logits_parameters_log_probs(preprocessed_states, internal_states) # action_sample = self.policy.get_action_from_logits_and_parameters(out["logits"], out["parameters"]) out=self.policy.get_action_and_log_likelihood(nn_inputs)<line_sep>actions=self.exploration.get_action(out["action"] time_percentage use_exploration)<line_sep><return>dict(preprocessed_state=preprocessed_states action=actions action_probs=out["action_probabilities"] nn_outputs=out["nn_outputs"])<block_end><block_end>
<import_from_stmt>django.shortcuts render<import_from_stmt>.models Member<def_stmt>index request#member_one = Member(name='Anthony', location='Las Vegas') #member_one.save() <block_start>member_three=Member.objects.get(pk=1)<line_sep>#member_two = member_one member_three.pk=<none><line_sep>member_three.id=<none><line_sep>member_three.name='Anthony'<line_sep>member_three.location='Las Vegas'<line_sep>member_three.save()<line_sep>member_one=Member.objects.get(pk=1)<line_sep>context={'member_one':member_one 'member_three':member_three}<line_sep><return>render(request 'index.html' context)<block_end>
# Copyright (c) Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>os<import_stmt>pytest<import_from_stmt>playwright._impl._path_utils get_file_dirname<import_from_stmt>playwright.async_api Error Page<import_from_stmt>tests.server Server<line_sep>_dirname=get_file_dirname()<line_sep>FILE_TO_UPLOAD=_dirname/".."/"assets/file-to-upload.txt"<async_keyword><def_stmt>test_locators_click_should_work page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/input/button.html")<line_sep>button=page.locator("button")<line_sep><await>button.click()<assert_stmt><await>page.evaluate("window['result']")<eq>"Clicked"<block_end><async_keyword><def_stmt>test_locators_click_should_work_with_node_removed page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/input/button.html")<line_sep><await>page.evaluate("delete window['Node']")<line_sep>button=page.locator("button")<line_sep><await>button.click()<assert_stmt><await>page.evaluate("window['result']")<eq>"Clicked"<block_end><async_keyword><def_stmt>test_locators_click_should_work_for_text_nodes page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/input/button.html")<line_sep><await>page.evaluate("""() => { window['double'] = false; const button = document.querySelector('button'); button.addEventListener('dblclick', event => { window['double'] = true; }); }""")<line_sep>button=page.locator("button")<line_sep><await>button.dblclick()<assert_stmt><await>page.evaluate("double")<is><true><assert_stmt><await>page.evaluate("result")<eq>"Clicked"<block_end><async_keyword><def_stmt>test_locators_should_have_repr page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/input/button.html")<line_sep>button=page.locator("button")<line_sep><await>button.click()<assert_stmt>(str(button)<eq>f"<Locator frame=<Frame name= url='{server.PREFIX}/input/button.html'> selector='button'>")<block_end><async_keyword><def_stmt>test_locators_get_attribute_should_work page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/dom.html")<line_sep>button=page.locator("#outer")<assert_stmt><await>button.get_attribute("name")<eq>"value"<assert_stmt><await>button.get_attribute("foo")<is><none><block_end><async_keyword><def_stmt>test_locators_input_value_should_work page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/dom.html")<line_sep><await>page.fill("#textarea" "input value")<line_sep>text_area=page.locator("#textarea")<assert_stmt><await>text_area.input_value()<eq>"input value"<block_end><async_keyword><def_stmt>test_locators_inner_html_should_work page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/dom.html")<line_sep>locator=page.locator("#outer")<assert_stmt><await>locator.inner_html()<eq>'<div id="inner">Text,\nmore text</div>'<block_end><async_keyword><def_stmt>test_locators_inner_text_should_work page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/dom.html")<line_sep>locator=page.locator("#inner")<assert_stmt><await>locator.inner_text()<eq>"Text, more text"<block_end><async_keyword><def_stmt>test_locators_text_content_should_work page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/dom.html")<line_sep>locator=page.locator("#inner")<assert_stmt><await>locator.text_content()<eq>"Text,\nmore text"<block_end><async_keyword><def_stmt>test_locators_is_hidden_and_is_visible_should_work page:Page<block_start><await>page.set_content("<div>Hi</div><span></span>")<line_sep>div=page.locator("div")<assert_stmt><await>div.is_visible()<is><true><assert_stmt><await>div.is_hidden()<is><false><line_sep>span=page.locator("span")<assert_stmt><await>span.is_visible()<is><false><assert_stmt><await>span.is_hidden()<is><true><block_end><async_keyword><def_stmt>test_locators_is_enabled_and_is_disabled_should_work page:Page<block_start><await>page.set_content(""" <button disabled>button1</button> <button>button2</button> <div>div</div> """)<line_sep>div=page.locator("div")<assert_stmt><await>div.is_enabled()<is><true><assert_stmt><await>div.is_disabled()<is><false><line_sep>button1=page.locator(':text("button1")')<assert_stmt><await>button1.is_enabled()<is><false><assert_stmt><await>button1.is_disabled()<is><true><line_sep>button1=page.locator(':text("button2")')<assert_stmt><await>button1.is_enabled()<is><true><assert_stmt><await>button1.is_disabled()<is><false><block_end><async_keyword><def_stmt>test_locators_is_editable_should_work page:Page<block_start><await>page.set_content(""" <input id=input1 disabled><textarea></textarea><input id=input2> """)<line_sep>input1=page.locator("#input1")<assert_stmt><await>input1.is_editable()<is><false><line_sep>input2=page.locator("#input2")<assert_stmt><await>input2.is_editable()<is><true><block_end><async_keyword><def_stmt>test_locators_is_checked_should_work page:Page<block_start><await>page.set_content(""" <input type='checkbox' checked><div>Not a checkbox</div> """)<line_sep>element=page.locator("input")<assert_stmt><await>element.is_checked()<is><true><line_sep><await>element.evaluate("e => e.checked = false")<assert_stmt><await>element.is_checked()<is><false><block_end><async_keyword><def_stmt>test_locators_all_text_contents_should_work page:Page<block_start><await>page.set_content(""" <div>A</div><div>B</div><div>C</div> """)<line_sep>element=page.locator("div")<assert_stmt><await>element.all_text_contents()<eq>["A" "B" "C"]<block_end><async_keyword><def_stmt>test_locators_all_inner_texts page:Page<block_start><await>page.set_content(""" <div>A</div><div>B</div><div>C</div> """)<line_sep>element=page.locator("div")<assert_stmt><await>element.all_inner_texts()<eq>["A" "B" "C"]<block_end><async_keyword><def_stmt>test_locators_should_query_existing_element page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/playground.html")<line_sep><await>page.set_content("""<html><body><div class="second"><div class="inner">A</div></div></body></html>""")<line_sep>html=page.locator("html")<line_sep>second=html.locator(".second")<line_sep>inner=second.locator(".inner")<assert_stmt>(<await>page.evaluate("e => e.textContent" <await>inner.element_handle())<eq>"A")<block_end><async_keyword><def_stmt>test_locators_evaluate_handle_should_work page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/dom.html")<line_sep>outer=page.locator("#outer")<line_sep>inner=outer.locator("#inner")<line_sep>check=inner.locator("#check")<line_sep>text=<await>inner.evaluate_handle("e => e.firstChild")<line_sep><await>page.evaluate("1 + 1")<assert_stmt>(str(outer)<eq>f"<Locator frame=<Frame name= url='{server.PREFIX}/dom.html'> selector='#outer'>")<assert_stmt>(str(inner)<eq>f"<Locator frame=<Frame name= url='{server.PREFIX}/dom.html'> selector='#outer >> #inner'>")<assert_stmt>str(text)<eq>"JSHandle@#text=Text,↵more text"<assert_stmt>(str(check)<eq>f"<Locator frame=<Frame name= url='{server.PREFIX}/dom.html'> selector='#outer >> #inner >> #check'>")<block_end><async_keyword><def_stmt>test_locators_should_query_existing_elements page:Page<block_start><await>page.set_content("""<html><body><div>A</div><br/><div>B</div></body></html>""")<line_sep>html=page.locator("html")<line_sep>elements=<await>html.locator("div").element_handles()<assert_stmt>len(elements)<eq>2<line_sep>result=[]<for_stmt>element elements<block_start>result.append(<await>page.evaluate("e => e.textContent" element))<block_end><assert_stmt>result<eq>["A" "B"]<block_end><async_keyword><def_stmt>test_locators_return_empty_array_for_non_existing_elements page:Page<block_start><await>page.set_content("""<html><body><div>A</div><br/><div>B</div></body></html>""")<line_sep>html=page.locator("html")<line_sep>elements=<await>html.locator("abc").element_handles()<assert_stmt>len(elements)<eq>0<assert_stmt>elements<eq>[]<block_end><async_keyword><def_stmt>test_locators_evaluate_all_should_work page:Page<block_start><await>page.set_content("""<html><body><div class="tweet"><div class="like">100</div><div class="like">10</div></div></body></html>""")<line_sep>tweet=page.locator(".tweet .like")<line_sep>content=<await>tweet.evaluate_all("nodes => nodes.map(n => n.innerText)")<assert_stmt>content<eq>["100" "10"]<block_end><async_keyword><def_stmt>test_locators_evaluate_all_should_work_with_missing_selector page:Page<block_start><await>page.set_content("""<div class="a">not-a-child-div</div><div id="myId"></div""")<line_sep>tweet=page.locator("#myId .a")<line_sep>nodes_length=<await>tweet.evaluate_all("nodes => nodes.length")<assert_stmt>nodes_length<eq>0<block_end><async_keyword><def_stmt>test_locators_hover_should_work page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/input/scrollable.html")<line_sep>button=page.locator("#button-6")<line_sep><await>button.hover()<assert_stmt>(<await>page.evaluate("document.querySelector('button:hover').id")<eq>"button-6")<block_end><async_keyword><def_stmt>test_locators_fill_should_work page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/input/textarea.html")<line_sep>button=page.locator("input")<line_sep><await>button.fill("some value")<assert_stmt><await>page.evaluate("result")<eq>"some value"<block_end><async_keyword><def_stmt>test_locators_check_should_work page:Page<block_start><await>page.set_content("<input id='checkbox' type='checkbox'></input>")<line_sep>button=page.locator("input")<line_sep><await>button.check()<assert_stmt><await>page.evaluate("checkbox.checked")<is><true><block_end><async_keyword><def_stmt>test_locators_uncheck_should_work page:Page<block_start><await>page.set_content("<input id='checkbox' type='checkbox' checked></input>")<line_sep>button=page.locator("input")<line_sep><await>button.uncheck()<assert_stmt><await>page.evaluate("checkbox.checked")<is><false><block_end><async_keyword><def_stmt>test_locators_select_option_should_work page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/input/select.html")<line_sep>select=page.locator("select")<line_sep><await>select.select_option("blue")<assert_stmt><await>page.evaluate("result.onInput")<eq>["blue"]<assert_stmt><await>page.evaluate("result.onChange")<eq>["blue"]<block_end><async_keyword><def_stmt>test_locators_focus_should_work page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/input/button.html")<line_sep>button=page.locator("button")<assert_stmt><await>button.evaluate("button => document.activeElement === button")<is><false><line_sep><await>button.focus()<assert_stmt><await>button.evaluate("button => document.activeElement === button")<is><true><block_end><async_keyword><def_stmt>test_locators_dispatch_event_should_work page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/input/button.html")<line_sep>button=page.locator("button")<line_sep><await>button.dispatch_event("click")<assert_stmt><await>page.evaluate("result")<eq>"Clicked"<block_end><async_keyword><def_stmt>test_locators_should_upload_a_file page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/input/fileupload.html")<line_sep>input=page.locator("input[type=file]")<line_sep>file_path=os.path.relpath(FILE_TO_UPLOAD os.getcwd())<line_sep><await>input.set_input_files(file_path)<assert_stmt>(<await>page.evaluate("e => e.files[0].name" <await>input.element_handle())<eq>"file-to-upload.txt")<block_end><async_keyword><def_stmt>test_locators_should_press page:Page<block_start><await>page.set_content("<input type='text' />")<line_sep><await>page.locator("input").press("h")<line_sep><await>page.eval_on_selector("input" "input => input.value")<eq>"h"<block_end><async_keyword><def_stmt>test_locators_should_scroll_into_view page:Page server:Server<block_start><await>page.goto(server.PREFIX+"/offscreenbuttons.html")<for_stmt>i range(11)<block_start>button=page.locator(f"#btn{i}")<line_sep>before=<await>button.evaluate("button => button.getBoundingClientRect().right - window.innerWidth")<assert_stmt>before<eq>10<times>i<line_sep><await>button.scroll_into_view_if_needed()<line_sep>after=<await>button.evaluate("button => button.getBoundingClientRect().right - window.innerWidth")<assert_stmt>after<le>0<line_sep><await>page.evaluate("window.scrollTo(0, 0)")<block_end><block_end><async_keyword><def_stmt>test_locators_should_select_textarea page:Page server:Server browser_name:str<block_start><await>page.goto(server.PREFIX+"/input/textarea.html")<line_sep>textarea=page.locator("textarea")<line_sep><await>textarea.evaluate("textarea => textarea.value = 'some value'")<line_sep><await>textarea.select_text()<if_stmt>browser_name<eq>"firefox"<block_start><assert_stmt><await>textarea.evaluate("el => el.selectionStart")<eq>0<assert_stmt><await>textarea.evaluate("el => el.selectionEnd")<eq>10<block_end><else_stmt><block_start><assert_stmt><await>page.evaluate("window.getSelection().toString()")<eq>"some value"<block_end><block_end><async_keyword><def_stmt>test_locators_should_type page:Page<block_start><await>page.set_content("<input type='text' />")<line_sep><await>page.locator("input").type("hello")<line_sep><await>page.eval_on_selector("input" "input => input.value")<eq>"hello"<block_end><async_keyword><def_stmt>test_locators_should_screenshot page:Page server:Server assert_to_be_golden<block_start><await>page.set_viewport_size({"width":500 "height":500 })<line_sep><await>page.goto(server.PREFIX+"/grid.html")<line_sep><await>page.evaluate("window.scrollBy(50, 100)")<line_sep>element=page.locator(".box:nth-of-type(3)")<line_sep>assert_to_be_golden(<await>element.screenshot() "screenshot-element-bounding-box.png")<block_end><async_keyword><def_stmt>test_locators_should_return_bounding_box page:Page server:Server<block_start><await>page.set_viewport_size({"width":500 "height":500 })<line_sep><await>page.goto(server.PREFIX+"/grid.html")<line_sep>element=page.locator(".box:nth-of-type(13)")<line_sep>box=<await>element.bounding_box()<assert_stmt>box<eq>{"x":100 "y":50 "width":50 "height":50 }<block_end><async_keyword><def_stmt>test_locators_should_respect_first_and_last page:Page<block_start><await>page.set_content(""" <section> <div><p>A</p></div> <div><p>A</p><p>A</p></div> <div><p>A</p><p>A</p><p>A</p></div> </section>""")<assert_stmt><await>page.locator("div >> p").count()<eq>6<assert_stmt><await>page.locator("div").locator("p").count()<eq>6<assert_stmt><await>page.locator("div").first.locator("p").count()<eq>1<assert_stmt><await>page.locator("div").last.locator("p").count()<eq>3<block_end><async_keyword><def_stmt>test_locators_should_respect_nth page:Page<block_start><await>page.set_content(""" <section> <div><p>A</p></div> <div><p>A</p><p>A</p></div> <div><p>A</p><p>A</p><p>A</p></div> </section>""")<assert_stmt><await>page.locator("div >> p").nth(0).count()<eq>1<assert_stmt><await>page.locator("div").nth(1).locator("p").count()<eq>2<assert_stmt><await>page.locator("div").nth(2).locator("p").count()<eq>3<block_end><async_keyword><def_stmt>test_locators_should_throw_on_capture_without_nth page:Page<block_start><await>page.set_content(""" <section><div><p>A</p></div></section> """)<with_stmt>pytest.raises(Error match="Can't query n-th element")<block_start><await>page.locator("*css=div >> p").nth(1).click()<block_end><block_end><async_keyword><def_stmt>test_locators_should_throw_due_to_strictness page:Page<block_start><await>page.set_content(""" <div>A</div><div>B</div> """)<with_stmt>pytest.raises(Error match="strict mode violation")<block_start><await>page.locator("div").is_visible()<block_end><block_end><async_keyword><def_stmt>test_locators_should_throw_due_to_strictness_2 page:Page<block_start><await>page.set_content(""" <select><option>One</option><option>Two</option></select> """)<with_stmt>pytest.raises(Error match="strict mode violation")<block_start><await>page.locator("option").evaluate("e => {}")<block_end><block_end><async_keyword><def_stmt>test_locators_set_checked page:Page<block_start><await>page.set_content("`<input id='checkbox' type='checkbox'></input>`")<line_sep>locator=page.locator("input")<line_sep><await>locator.set_checked(<true>)<assert_stmt><await>page.evaluate("checkbox.checked")<line_sep><await>locator.set_checked(<false>)<assert_stmt><await>page.evaluate("checkbox.checked")<is><false><block_end><async_keyword><def_stmt>test_locators_wait_for page:Page<arrow><none><block_start><await>page.set_content("<div></div>")<line_sep>locator=page.locator("div")<line_sep>task=locator.wait_for()<line_sep><await>page.eval_on_selector("div" "div => div.innerHTML = '<span>target</span>'")<line_sep><await>task<assert_stmt><await>locator.text_content()<eq>"target"<block_end>
<import_from_stmt>overrides overrides<import_stmt>pytest<import_from_stmt>allennlp_demo.bidaf_elmo.api BidafElmoModelEndpoint<import_from_stmt>allennlp_demo.common.testing RcModelEndpointTestCase<class_stmt>TestBidafElmoModelEndpoint(RcModelEndpointTestCase)<block_start>endpoint=BidafElmoModelEndpoint()<line_sep>@pytest.mark.skip("Takes too long")@overrides<def_stmt>test_interpret self<block_start><pass><block_end>@pytest.mark.skip("Takes too long")@overrides<def_stmt>test_attack self<block_start><pass><block_end><block_end>
"""OpenType Layout-related functionality."""<line_sep>
# -*- coding: utf-8 -*- # # This file is part of PyBuilder # # Copyright 2011-2020 PyBuilder Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>unittest<import_from_stmt>os.path normcase<as>nc join<as>jp<import_from_stmt>pybuilder.core Project Logger Dependency RequirementsFile <import_from_stmt>pybuilder.install_utils install_dependencies<import_from_stmt>pybuilder.pip_utils PIP_MODULE_STANZA<import_from_stmt>pybuilder.plugins.python.install_dependencies_plugin initialize_install_dependencies_plugin<import_from_stmt>test_utils Mock ANY patch<line_sep>__author__="<NAME>"<class_stmt>InstallDependencyTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.project=Project("unittest" ".")<line_sep>self.project.set_property("dir_install_logs" "any_directory")<line_sep>self.project.set_property("dir_target" "/any_target_directory")<line_sep>self.logger=Mock(Logger)<line_sep>self.pyb_env=Mock()<line_sep>self.pyb_env.executable=["exec"]<line_sep>self.pyb_env.site_paths=[]<line_sep>self.pyb_env.env_dir="a"<line_sep>self.pyb_env.execute_command.return_value=0<line_sep>initialize_install_dependencies_plugin(self.project)<block_end>@patch("pybuilder.install_utils.tail_log")@patch("pybuilder.install_utils.open")@patch("pybuilder.install_utils.create_constraint_file")@patch("pybuilder.install_utils.get_packages_info" return_value={})<def_stmt>test_should_install_requirements_file_dependency self *_<block_start>dependency=RequirementsFile("requirements.txt")<line_sep>install_dependencies(self.logger self.project dependency self.pyb_env "install_batch")<line_sep>self.pyb_env.execute_command.assert_called_with(self.pyb_env.executable+PIP_MODULE_STANZA+["install" "-r" "requirements.txt"] cwd=ANY env=ANY error_file_name=ANY outfile_name=ANY shell=<false> no_path_search=<true>)<block_end>@patch("pybuilder.install_utils.tail_log")@patch("pybuilder.install_utils.open")@patch("pybuilder.install_utils.create_constraint_file")@patch("pybuilder.install_utils.get_packages_info" return_value={})<def_stmt>test_should_install_dependency_without_version self *_<block_start>dependency=Dependency("spam")<line_sep>install_dependencies(self.logger self.project dependency self.pyb_env "install_batch" constraints_file_name="constraint_file")<line_sep>self.pyb_env.execute_command.assert_called_with(self.pyb_env.executable+PIP_MODULE_STANZA+["install" "-c" nc(jp(self.pyb_env.env_dir "constraint_file")) "spam"] cwd=ANY env=ANY error_file_name=ANY outfile_name=ANY shell=<false> no_path_search=<true>)<block_end>@patch("pybuilder.install_utils.tail_log")@patch("pybuilder.install_utils.open")@patch("pybuilder.install_utils.create_constraint_file")@patch("pybuilder.install_utils.get_packages_info" return_value={})<def_stmt>test_should_install_dependency_without_version_on_windows_derivate self *_<block_start>dependency=Dependency("spam")<line_sep>install_dependencies(self.logger self.project dependency self.pyb_env "install_batch")<line_sep>self.pyb_env.execute_command.assert_called_with(self.pyb_env.executable+PIP_MODULE_STANZA+["install" "spam"] cwd=ANY env=ANY error_file_name=ANY outfile_name=ANY shell=<false> no_path_search=<true>)<block_end>@patch("pybuilder.install_utils.tail_log")@patch("pybuilder.install_utils.open")@patch("pybuilder.install_utils.create_constraint_file")@patch("pybuilder.install_utils.get_packages_info" return_value={})<def_stmt>test_should_install_dependency_insecurely_when_property_is_set self *_<block_start>dependency=Dependency("spam")<line_sep>self.project.set_property("install_dependencies_insecure_installation" ["spam"])<line_sep>install_dependencies(self.logger self.project dependency self.pyb_env "install_batch")<line_sep>self.pyb_env.execute_command.assert_called_with(self.pyb_env.executable+PIP_MODULE_STANZA+["install" "--allow-unverified" "spam" "--allow-external" "spam" "spam"] cwd=ANY env=ANY error_file_name=ANY outfile_name=ANY shell=<false> no_path_search=<true>)<block_end>@patch("pybuilder.install_utils.tail_log")@patch("pybuilder.install_utils.open")@patch("pybuilder.install_utils.create_constraint_file")@patch("pybuilder.install_utils.get_packages_info" return_value={})<def_stmt>test_should_install_dependency_securely_when_property_is_not_set_to_dependency self *_<block_start>dependency=Dependency("spam")<line_sep>self.project.set_property("install_dependencies_insecure_installation" ["some-other-dependency"])<line_sep>install_dependencies(self.logger self.project dependency self.pyb_env "install_batch" constraints_file_name="constraint_file")<line_sep>self.pyb_env.execute_command.assert_called_with(self.pyb_env.executable+PIP_MODULE_STANZA+["install" "-c" ANY "--allow-unverified" "some-other-dependency" "--allow-external" "some-other-dependency" "spam"] cwd=ANY env=ANY error_file_name=ANY outfile_name=ANY shell=<false> no_path_search=<true>)<line_sep># some-other-dependency might be a dependency of "spam" # so we always have to put the insecure dependencies in the command line :-( <block_end>@patch("pybuilder.install_utils.tail_log")@patch("pybuilder.install_utils.open")@patch("pybuilder.install_utils.create_constraint_file")@patch("pybuilder.install_utils.get_packages_info" return_value={})<def_stmt>test_should_install_dependency_using_custom_index_url self *_<block_start>self.project.set_property("install_dependencies_index_url" "some_index_url")<line_sep>dependency=Dependency("spam")<line_sep>install_dependencies(self.logger self.project dependency self.pyb_env "install_batch")<line_sep>self.pyb_env.execute_command.assert_called_with(self.pyb_env.executable+PIP_MODULE_STANZA+["install" "--index-url" "some_index_url" "spam"] cwd=ANY env=ANY error_file_name=ANY outfile_name=ANY shell=<false> no_path_search=<true>)<block_end>@patch("pybuilder.install_utils.tail_log")@patch("pybuilder.install_utils.open")@patch("pybuilder.install_utils.create_constraint_file")@patch("pybuilder.install_utils.get_packages_info" return_value={})<def_stmt>test_should_use_extra_index_url_when_index_url_is_not_set self *_<block_start>self.project.set_property("install_dependencies_extra_index_url" "some_extra_index_url")<line_sep>dependency=Dependency("spam")<line_sep>install_dependencies(self.logger self.project dependency self.pyb_env "install_batch")<line_sep>self.pyb_env.execute_command.assert_called_with(self.pyb_env.executable+PIP_MODULE_STANZA+["install" "--extra-index-url" "some_extra_index_url" "spam"] cwd=ANY env=ANY error_file_name=ANY outfile_name=ANY shell=<false> no_path_search=<true>)<block_end>@patch("pybuilder.install_utils.tail_log")@patch("pybuilder.install_utils.open")@patch("pybuilder.install_utils.create_constraint_file")@patch("pybuilder.install_utils.get_packages_info" return_value={})<def_stmt>test_should_use_index_and_extra_index_url_when_index_and_extra_index_url_are_set self *_<block_start>self.project.set_property("install_dependencies_index_url" "some_index_url")<line_sep>self.project.set_property("install_dependencies_extra_index_url" "some_extra_index_url")<line_sep>dependency=Dependency("spam")<line_sep>install_dependencies(self.logger self.project dependency self.pyb_env "install_batch")<line_sep>self.pyb_env.execute_command.assert_called_with(self.pyb_env.executable+PIP_MODULE_STANZA+["install" "--index-url" "some_index_url" "--extra-index-url" "some_extra_index_url" "spam"] cwd=ANY env=ANY error_file_name=ANY outfile_name=ANY shell=<false> no_path_search=<true>)<block_end>@patch("pybuilder.install_utils.tail_log")@patch("pybuilder.install_utils.open")@patch("pybuilder.install_utils.create_constraint_file")@patch("pybuilder.install_utils.get_packages_info" return_value={})<def_stmt>test_should_install_dependency_with_version self *_<block_start>dependency=Dependency("spam" "0.1.2")<line_sep>install_dependencies(self.logger self.project dependency self.pyb_env "install_batch")<line_sep>self.pyb_env.execute_command.assert_called_with(self.pyb_env.executable+PIP_MODULE_STANZA+["install" "spam>=0.1.2"] cwd=ANY env=ANY error_file_name=ANY outfile_name=ANY shell=<false> no_path_search=<true>)<block_end>@patch("pybuilder.install_utils.tail_log")@patch("pybuilder.install_utils.open")@patch("pybuilder.install_utils.create_constraint_file")@patch("pybuilder.install_utils.get_packages_info" return_value={})<def_stmt>test_should_install_dependency_with_version_and_operator self *_<block_start>dependency=Dependency("spam" "==0.1.2")<line_sep>install_dependencies(self.logger self.project dependency self.pyb_env "install_batch")<line_sep>self.pyb_env.execute_command.assert_called_with(self.pyb_env.executable+PIP_MODULE_STANZA+["install" "spam==0.1.2"] cwd=ANY env=ANY error_file_name=ANY outfile_name=ANY shell=<false> no_path_search=<true>)<block_end><def_stmt>test_should_install_dependency_with_wrong_version_and_operator self<block_start>self.assertRaises(ValueError Dependency "spam" "~=1")<block_end>@patch("pybuilder.install_utils.tail_log")@patch("pybuilder.install_utils.open")@patch("pybuilder.install_utils.create_constraint_file")@patch("pybuilder.install_utils.get_packages_info" return_value={})<def_stmt>test_should_install_dependency_with_url self *_<block_start>dependency=Dependency("spam" url="some_url")<line_sep>install_dependencies(self.logger self.project dependency self.pyb_env "install_batch")<line_sep>self.pyb_env.execute_command.assert_called_with(self.pyb_env.executable+PIP_MODULE_STANZA+["install" "--force-reinstall" "some_url"] cwd=ANY env=ANY error_file_name=ANY outfile_name=ANY shell=<false> no_path_search=<true>)<block_end>@patch("pybuilder.install_utils.tail_log")@patch("pybuilder.install_utils.open")@patch("pybuilder.install_utils.create_constraint_file")@patch("pybuilder.install_utils.get_packages_info" return_value={})<def_stmt>test_should_install_dependency_with_url_even_if_version_is_given self *_<block_start>dependency=Dependency("spam" version="0.1.2" url="some_url")<line_sep>install_dependencies(self.logger self.project dependency self.pyb_env "install_batch")<line_sep>self.pyb_env.execute_command.assert_called_with(self.pyb_env.executable+PIP_MODULE_STANZA+["install" "--force-reinstall" "some_url"] cwd=ANY env=ANY error_file_name=ANY outfile_name=ANY shell=<false> no_path_search=<true>)<block_end><block_end>
# Copyright (c) 2020, Xilinx # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of FINN nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. <import_stmt>pytest<import_stmt>brevitas.onnx<as>bo<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>torch<import_stmt>torchvision.datasets<as>datasets<import_stmt>torchvision.transforms<as>transforms<import_stmt>finn.core.onnx_exec<as>oxe<import_stmt>finn.transformation.streamline.absorb<as>absorb<import_stmt>finn.util.imagenet<as>imagenet_util<import_from_stmt>finn.core.modelwrapper ModelWrapper<import_from_stmt>finn.transformation.fold_constants FoldConstants<import_from_stmt>finn.transformation.general GiveReadableTensorNames GiveUniqueNodeNames GiveUniqueParameterTensors RemoveStaticGraphInputs <import_from_stmt>finn.transformation.infer_data_layouts InferDataLayouts<import_from_stmt>finn.transformation.infer_datatypes InferDataTypes<import_from_stmt>finn.transformation.infer_shapes InferShapes<import_from_stmt>finn.transformation.insert_topk InsertTopK<import_from_stmt>finn.transformation.merge_onnx_models MergeONNXModels<import_from_stmt>finn.util.basic make_build_dir<import_from_stmt>finn.util.pytorch NormalizePreProc<import_from_stmt>finn.util.test get_test_model_trained<line_sep># normalization (preprocessing) settings for MobileNet-v1 w4a4 mean=[0.485 0.456 0.406]<line_sep>std=0.226<line_sep>ch=3<def_stmt>test_brevitas_mobilenet_preproc <block_start><if_stmt>"IMAGENET_VAL_PATH"<not><in>os.environ.keys()<block_start>pytest.skip("Can't do validation without IMAGENET_VAL_PATH")<block_end>n_images=1000<line_sep># Brevitas-style: use torchvision pipeline std_arr=[std std std]<line_sep>normalize=transforms.Normalize(mean=mean std=std_arr)<line_sep>val_loader=torch.utils.data.DataLoader(datasets.ImageFolder(os.environ["IMAGENET_VAL_PATH"]+"/../" transforms.Compose([transforms.Resize(256) transforms.CenterCrop(224) transforms.ToTensor() normalize ]) ) batch_size=1 shuffle=<false> num_workers=0 )<line_sep># FINN-style: load_resize_crop then normalization as PyTorch graph preproc=NormalizePreProc(mean std ch)<line_sep>finn_loader=imagenet_util.get_val_images(n_images)<line_sep>val_loader=iter(val_loader)<for_stmt>i range(n_images)<block_start>(img_path finn_target)=next(finn_loader)<line_sep>finn_img=imagenet_util.load_resize_crop(img_path)<line_sep>finn_img=preproc.forward(torch.from_numpy(finn_img).float())<line_sep>(pyt_img pyt_target)=next(val_loader)<assert_stmt>finn_img.shape<eq>pyt_img.shape<assert_stmt>(finn_img<eq>pyt_img).all()<block_end><block_end>@pytest.mark.slow# marked as XFAIL until Brevitas export issues are resolved: # https://github.com/Xilinx/brevitas/issues/173 @pytest.mark.xfail<def_stmt>test_brevitas_compare_exported_mobilenet <block_start><if_stmt>"IMAGENET_VAL_PATH"<not><in>os.environ.keys()<block_start>pytest.skip("Can't do validation without IMAGENET_VAL_PATH")<block_end>n_images=10<line_sep>debug_mode=<false><line_sep>export_onnx_path=make_build_dir("test_brevitas_mobilenet-v1_")<line_sep># export preprocessing preproc_onnx=export_onnx_path+"/quant_mobilenet_v1_4b_preproc.onnx"<line_sep>preproc=NormalizePreProc(mean std ch)<line_sep>bo.export_finn_onnx(preproc (1 3 224 224) preproc_onnx)<line_sep>preproc_model=ModelWrapper(preproc_onnx)<line_sep>preproc_model=preproc_model.transform(InferShapes())<line_sep>preproc_model=preproc_model.transform(GiveUniqueNodeNames())<line_sep>preproc_model=preproc_model.transform(GiveUniqueParameterTensors())<line_sep>preproc_model=preproc_model.transform(GiveReadableTensorNames())<line_sep># export the actual MobileNet-v1 finn_onnx=export_onnx_path+"/quant_mobilenet_v1_4b.onnx"<line_sep>mobilenet=get_test_model_trained("mobilenet" 4 4)<if_stmt>debug_mode<block_start>dbg_hook=bo.enable_debug(mobilenet)<block_end>bo.export_finn_onnx(mobilenet (1 3 224 224) finn_onnx)<line_sep>model=ModelWrapper(finn_onnx)<line_sep>model=model.transform(InferShapes())<line_sep>model=model.transform(FoldConstants())<line_sep>model=model.transform(RemoveStaticGraphInputs())<line_sep>model=model.transform(InsertTopK())<line_sep># get initializer from Mul that will be absorbed into topk a0=model.get_initializer(model.get_nodes_by_op_type("Mul")[-1].input[1])<line_sep>model=model.transform(absorb.AbsorbScalarMulAddIntoTopK())<line_sep>model=model.transform(InferShapes())<line_sep>model=model.transform(InferDataTypes())<line_sep>model=model.transform(InferDataLayouts())<line_sep>model=model.transform(GiveUniqueNodeNames())<line_sep>model=model.transform(GiveUniqueParameterTensors())<line_sep>model=model.transform(GiveReadableTensorNames())<line_sep>model.save(export_onnx_path+"/quant_mobilenet_v1_4b_wo_preproc.onnx")<line_sep># create merged preprocessing + MobileNet-v1 model model=model.transform(MergeONNXModels(preproc_model))<line_sep>model.save(export_onnx_path+"/quant_mobilenet_v1_4b.onnx")<with_stmt>open(export_onnx_path+"/mobilenet_validation.csv" "w" newline="")<as>csvfile<block_start>writer=csv.writer(csvfile)<line_sep>writer.writerow(["goldenID" "brevitasTop5" "brevitasTop5[%]" "finnTop5" "finnTop5[%]" "top5equal" "top5%equal" ])<line_sep>csvfile.flush()<line_sep>workload=imagenet_util.get_val_images(n_images interleave_classes=<true>)<line_sep>all_inds_ok=<true><line_sep>all_probs_ok=<true><for_stmt>(img_path target_id) workload<block_start>img_np=imagenet_util.load_resize_crop(img_path)<line_sep>img_torch=torch.from_numpy(img_np).float()<line_sep># do forward pass in PyTorch/Brevitas input_tensor=preproc.forward(img_torch)<line_sep>expected=mobilenet.forward(input_tensor).detach().numpy()<line_sep>expected_topk=expected.flatten()<line_sep>expected_top5=np.argsort(expected_topk)[-5:]<line_sep>expected_top5=np.flip(expected_top5)<line_sep>expected_top5_prob=[]<for_stmt>index expected_top5<block_start>expected_top5_prob.append(expected_topk[index])<block_end>idict={model.graph.input[0].name:img_np}<line_sep>odict=oxe.execute_onnx(model idict return_full_exec_context=<true>)<line_sep>produced=odict[model.graph.output[0].name]<line_sep>produced_prob=odict["TopK_0_out0"]<times>a0<line_sep>inds_ok=(produced.flatten()<eq>expected_top5).all()<line_sep>probs_ok=np.isclose(produced_prob.flatten() expected_top5_prob).all()<line_sep>all_inds_ok=all_inds_ok<and>inds_ok<line_sep>all_probs_ok=all_probs_ok<and>probs_ok<line_sep>writer.writerow([str(target_id) str(expected_top5) str(expected_top5_prob) str(produced.flatten()) str(produced_prob.flatten()) str(inds_ok) str(probs_ok) ])<line_sep>csvfile.flush()<if_stmt>((<not>inds_ok)<or>(<not>probs_ok))<and>debug_mode<block_start>print("Results differ for %s"%img_path)<line_sep># check all tensors at debug markers names_brevitas=set(dbg_hook.values.keys())<line_sep>names_finn=set(odict.keys())<line_sep>names_common=names_brevitas.intersection(names_finn)<for_stmt>dbg_name names_common<block_start><if_stmt><not>np.isclose(dbg_hook.values[dbg_name].detach().numpy() odict[dbg_name] atol=1e-3 ).all()<block_start>print("Tensor %s differs between Brevitas and FINN"%dbg_name)<block_end><block_end><block_end><block_end><assert_stmt>all_inds_ok<and>all_probs_ok<block_end><block_end>
# # Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/ # Written by <NAME> <<EMAIL>> # """Implementations of feature maps to be used with linear attention and causal linear attention."""<import_from_stmt>.base elu_feature_map ActivationFunctionFeatureMap<import_from_stmt>.fourier_features RandomFourierFeatures Favor SmoothedRandomFourierFeatures GeneralizedRandomFeatures<line_sep>
__author__='sibirrer'<import_stmt>numpy<as>np<import_from_stmt>lenstronomy.Util.package_util exporter<line_sep>export,__all__=exporter()<line_sep>@export<class_stmt>Slit(object)<block_start>""" Slit aperture description """<def_stmt>__init__ self length width center_ra=0 center_dec=0 angle=0<block_start>""" :param length: length of slit :param width: width of slit :param center_ra: center of slit :param center_dec: center of slit :param angle: orientation angle of slit, angle=0 corresponds length in RA direction """<line_sep>self._length=length<line_sep>self._width=width<line_sep>self._center_ra,self._center_dec=center_ra center_dec<line_sep>self._angle=angle<block_end><def_stmt>aperture_select self ra dec<block_start>""" :param ra: angular coordinate of photon/ray :param dec: angular coordinate of photon/ray :return: bool, True if photon/ray is within the slit, False otherwise """<line_sep><return>slit_select(ra dec self._length self._width self._center_ra self._center_dec self._angle) 0<block_end>@property<def_stmt>num_segments self<block_start>""" number of segments with separate measurements of the velocity dispersion :return: int """<line_sep><return>1<block_end><block_end>@export<def_stmt>slit_select ra dec length width center_ra=0 center_dec=0 angle=0<block_start>""" :param ra: angular coordinate of photon/ray :param dec: angular coordinate of photon/ray :param length: length of slit :param width: width of slit :param center_ra: center of slit :param center_dec: center of slit :param angle: orientation angle of slit, angle=0 corresponds length in RA direction :return: bool, True if photon/ray is within the slit, False otherwise """<line_sep>ra_=ra-center_ra<line_sep>dec_=dec-center_dec<line_sep>x=np.cos(angle)<times>ra_+np.sin(angle)<times>dec_<line_sep>y=-np.sin(angle)<times>ra_+np.cos(angle)<times>dec_<if_stmt>abs(x)<l>length/2.<and>abs(y)<l>width/2.<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end>@export<class_stmt>Frame(object)<block_start>""" rectangular box with a hole in the middle (also rectangular), effectively a frame """<def_stmt>__init__ self width_outer width_inner center_ra=0 center_dec=0 angle=0<block_start>""" :param width_outer: width of box to the outer parts :param width_inner: width of inner removed box :param center_ra: center of slit :param center_dec: center of slit :param angle: orientation angle of slit, angle=0 corresponds length in RA direction """<line_sep>self._width_outer=width_outer<line_sep>self._width_inner=width_inner<line_sep>self._center_ra,self._center_dec=center_ra center_dec<line_sep>self._angle=angle<block_end><def_stmt>aperture_select self ra dec<block_start>""" :param ra: angular coordinate of photon/ray :param dec: angular coordinate of photon/ray :return: bool, True if photon/ray is within the slit, False otherwise """<line_sep><return>frame_select(ra dec self._width_outer self._width_inner self._center_ra self._center_dec self._angle) 0<block_end>@property<def_stmt>num_segments self<block_start>""" number of segments with separate measurements of the velocity dispersion :return: int """<line_sep><return>1<block_end><block_end>@export<def_stmt>frame_select ra dec width_outer width_inner center_ra=0 center_dec=0 angle=0<block_start>""" :param ra: angular coordinate of photon/ray :param dec: angular coordinate of photon/ray :param width_outer: width of box to the outer parts :param width_inner: width of inner removed box :param center_ra: center of slit :param center_dec: center of slit :param angle: orientation angle of slit, angle=0 corresponds length in RA direction :return: bool, True if photon/ray is within the box with a hole, False otherwise """<line_sep>ra_=ra-center_ra<line_sep>dec_=dec-center_dec<line_sep>x=np.cos(angle)<times>ra_+np.sin(angle)<times>dec_<line_sep>y=-np.sin(angle)<times>ra_+np.cos(angle)<times>dec_<if_stmt>abs(x)<l>width_outer/2.<and>abs(y)<l>width_outer/2.<block_start><if_stmt>abs(x)<l>width_inner/2.<and>abs(y)<l>width_inner/2.<block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end><return><false><block_end>@export<class_stmt>Shell(object)<block_start>""" Shell aperture """<def_stmt>__init__ self r_in r_out center_ra=0 center_dec=0<block_start>""" :param r_in: innermost radius to be selected :param r_out: outermost radius to be selected :param center_ra: center of the sphere :param center_dec: center of the sphere """<line_sep>self._r_in,self._r_out=r_in r_out<line_sep>self._center_ra,self._center_dec=center_ra center_dec<block_end><def_stmt>aperture_select self ra dec<block_start>""" :param ra: angular coordinate of photon/ray :param dec: angular coordinate of photon/ray :return: bool, True if photon/ray is within the slit, False otherwise """<line_sep><return>shell_select(ra dec self._r_in self._r_out self._center_ra self._center_dec) 0<block_end>@property<def_stmt>num_segments self<block_start>""" number of segments with separate measurements of the velocity dispersion :return: int """<line_sep><return>1<block_end><block_end>@export<def_stmt>shell_select ra dec r_in r_out center_ra=0 center_dec=0<block_start>""" :param ra: angular coordinate of photon/ray :param dec: angular coordinate of photon/ray :param r_in: innermost radius to be selected :param r_out: outermost radius to be selected :param center_ra: center of the sphere :param center_dec: center of the sphere :return: boolean, True if within the radial range, False otherwise """<line_sep>x=ra-center_ra<line_sep>y=dec-center_dec<line_sep>R=np.sqrt(x<power>2+y<power>2)<if_stmt>(R<ge>r_in)<and>(R<l>r_out)<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end>@export<class_stmt>IFUShells(object)<block_start>""" class for an Integral Field Unit spectrograph with azimuthal shells where the kinematics are measured """<def_stmt>__init__ self r_bins center_ra=0 center_dec=0<block_start>""" :param r_bins: array of radial bins to average the dispersion spectra in ascending order. It starts with the inner-most edge to the outermost edge. :param center_ra: center of the sphere :param center_dec: center of the sphere """<line_sep>self._r_bins=r_bins<line_sep>self._center_ra,self._center_dec=center_ra center_dec<block_end><def_stmt>aperture_select self ra dec<block_start>""" :param ra: angular coordinate of photon/ray :param dec: angular coordinate of photon/ray :return: bool, True if photon/ray is within the slit, False otherwise, index of shell """<line_sep><return>shell_ifu_select(ra dec self._r_bins self._center_ra self._center_dec)<block_end>@property<def_stmt>num_segments self<block_start>""" number of segments with separate measurements of the velocity dispersion :return: int """<line_sep><return>len(self._r_bins)-1<block_end><block_end>@export<def_stmt>shell_ifu_select ra dec r_bin center_ra=0 center_dec=0<block_start>""" :param ra: angular coordinate of photon/ray :param dec: angular coordinate of photon/ray :param r_bin: array of radial bins to average the dispersion spectra in ascending order. It starts with the inner-most edge to the outermost edge. :param center_ra: center of the sphere :param center_dec: center of the sphere :return: boolean, True if within the radial range, False otherwise """<line_sep>x=ra-center_ra<line_sep>y=dec-center_dec<line_sep>R=np.sqrt(x<power>2+y<power>2)<for_stmt>i range(0 len(r_bin)-1)<block_start><if_stmt>(R<ge>r_bin[i])<and>(R<l>r_bin[i+1])<block_start><return><true> i<block_end><block_end><return><false> <none><block_end>
# -*- coding: utf-8 -*- # # Copyright (C) 2019 <NAME> <<EMAIL>> # All rights reserved. # # This code is licensed under the MIT License. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files(the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and / or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions : # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # You will need an API Key for this plugin to work. # From the Settings -> API Keys you can click "Create API Key" if you don't # have one already. The key must have at least the "Mail Send" permission # to work. # # The schema to use the plugin looks like this: # {schema}://{apikey}:{from_email} # # Your {from_email} must be comprissed of your Sendgrid Authenticated # Domain. The same domain must have 'Link Branding' turned on as well or it # will not work. This can be seen from Settings -> Sender Authentication. # If you're (SendGrid) verified domain is example.com, then your schema may # look something like this: # Simple API Reference: # - https://sendgrid.com/docs/API_Reference/Web_API_v3/index.html # - https://sendgrid.com/docs/ui/sending-email/\ # how-to-send-an-email-with-dynamic-transactional-templates/ <import_stmt>requests<import_from_stmt>json dumps<import_from_stmt>.NotifyBase NotifyBase<import_from_stmt>..common NotifyFormat<import_from_stmt>..common NotifyType<import_from_stmt>..utils parse_list<import_from_stmt>..utils is_email<import_from_stmt>..utils validate_regex<import_from_stmt>..AppriseLocale gettext_lazy<as>_<line_sep># Extend HTTP Error Messages SENDGRID_HTTP_ERROR_MAP={401:'Unauthorized - You do not have authorization to make the request.' 413:'Payload To Large - The JSON payload you have included in your '<concat>'request is too large.' 429:'Too Many Requests - The number of requests you have made exceeds '<concat>'SendGrid’s rate limitations.' }<class_stmt>NotifySendGrid(NotifyBase)<block_start>""" A wrapper for Notify SendGrid Notifications """<line_sep># The default descriptive name associated with the Notification service_name='SendGrid'<line_sep># The services URL service_url='https://sendgrid.com'<line_sep># The default secure protocol secure_protocol='sendgrid'<line_sep># A URL that takes you to the setup/help of the specific protocol setup_url='https://github.com/caronc/apprise/wiki/Notify_sendgrid'<line_sep># Default to markdown notify_format=NotifyFormat.HTML<line_sep># The default Email API URL to use notify_url='https://api.sendgrid.com/v3/mail/send'<line_sep># Allow 300 requests per minute. # 60/300 = 0.2 request_rate_per_sec=0.2<line_sep># The default subject to use if one isn't specified. default_empty_subject='<no subject>'<line_sep># Define object templates templates=('{schema}://{apikey}:{from_email}' '{schema}://{apikey}:{from_email}/{targets}' )<line_sep># Define our template arguments template_tokens=dict(NotifyBase.template_tokens **{'apikey':{'name':_('API Key') 'type':'string' 'private':<true> 'required':<true> 'regex':(r'^[A-Z0-9._-]+$' 'i') } 'from_email':{'name':_('Source Email') 'type':'string' 'required':<true> } 'target_email':{'name':_('Target Email') 'type':'string' 'map_to':'targets' } 'targets':{'name':_('Targets') 'type':'list:string' } })<line_sep># Define our template arguments template_args=dict(NotifyBase.template_args **{'to':{'alias_of':'targets' } 'cc':{'name':_('Carbon Copy') 'type':'list:string' } 'bcc':{'name':_('Blind Carbon Copy') 'type':'list:string' } 'template':{# Template ID # The template ID is 64 characters with one dash (d-uuid) 'name':_('Template') 'type':'string' } })<line_sep># Support Template Dynamic Variables (Substitutions) template_kwargs={'template_data':{'name':_('Template Data') 'prefix':'+' } }<def_stmt>__init__ self apikey from_email targets=<none> cc=<none> bcc=<none> template=<none> template_data=<none> **kwargs<block_start>""" Initialize Notify SendGrid Object """<line_sep>super(NotifySendGrid self).__init__(**kwargs)<line_sep># API Key (associated with project) self.apikey=validate_regex(apikey *self.template_tokens['apikey']['regex'])<if_stmt><not>self.apikey<block_start>msg='An invalid SendGrid API Key '<concat>'({}) was specified.'.format(apikey)<line_sep>self.logger.warning(msg)<line_sep><raise>TypeError(msg)<block_end>result=is_email(from_email)<if_stmt><not>result<block_start>msg='Invalid ~From~ email specified: {}'.format(from_email)<line_sep>self.logger.warning(msg)<line_sep><raise>TypeError(msg)<block_end># Store email address self.from_email=result['full_email']<line_sep># Acquire Targets (To Emails) self.targets=list()<line_sep># Acquire Carbon Copies self.cc=set()<line_sep># Acquire Blind Carbon Copies self.bcc=set()<line_sep># Now our dynamic template (if defined) self.template=template<line_sep># Now our dynamic template data (if defined) self.template_data=template_data<if>isinstance(template_data dict)<else>{}<line_sep># Validate recipients (to:) and drop bad ones: <for_stmt>recipient parse_list(targets)<block_start>result=is_email(recipient)<if_stmt>result<block_start>self.targets.append(result['full_email'])<line_sep><continue><block_end>self.logger.warning('Dropped invalid email '<concat>'({}) specified.'.format(recipient) )<block_end># Validate recipients (cc:) and drop bad ones: <for_stmt>recipient parse_list(cc)<block_start>result=is_email(recipient)<if_stmt>result<block_start>self.cc.add(result['full_email'])<line_sep><continue><block_end>self.logger.warning('Dropped invalid Carbon Copy email '<concat>'({}) specified.'.format(recipient) )<block_end># Validate recipients (bcc:) and drop bad ones: <for_stmt>recipient parse_list(bcc)<block_start>result=is_email(recipient)<if_stmt>result<block_start>self.bcc.add(result['full_email'])<line_sep><continue><block_end>self.logger.warning('Dropped invalid Blind Carbon Copy email '<concat>'({}) specified.'.format(recipient) )<block_end><if_stmt>len(self.targets)<eq>0# Notify ourselves <block_start>self.targets.append(self.from_email)<block_end><return><block_end><def_stmt>url self privacy=<false> *args **kwargs<block_start>""" Returns the URL built dynamically based on specified arguments. """<line_sep># Our URL parameters params=self.url_parameters(privacy=privacy *args **kwargs)<if_stmt>len(self.cc)<g>0# Handle our Carbon Copy Addresses <block_start>params['cc']=','.join(self.cc)<block_end><if_stmt>len(self.bcc)<g>0# Handle our Blind Carbon Copy Addresses <block_start>params['bcc']=','.join(self.bcc)<block_end><if_stmt>self.template# Handle our Template ID if if was specified <block_start>params['template']=self.template<block_end># Append our template_data into our parameter list params.update({'+{}'.format(k):v<for>k,v self.template_data.items()})<line_sep># a simple boolean check as to whether we display our target emails # or not has_targets=<not>(len(self.targets)<eq>1<and>self.targets[0]<eq>self.from_email)<line_sep><return>'{schema}://{apikey}:{from_email}/{targets}?{params}'.format(schema=self.secure_protocol apikey=self.pprint(self.apikey privacy safe='') # never encode email since it plays a huge role in our hostname from_email=self.from_email targets=''<if><not>has_targets<else>'/'.join([NotifySendGrid.quote(x safe='')<for>x self.targets]) params=NotifySendGrid.urlencode(params) )<block_end><def_stmt>send self body title='' notify_type=NotifyType.INFO **kwargs<block_start>""" Perform SendGrid Notification """<line_sep>headers={'User-Agent':self.app_id 'Content-Type':'application/json' 'Authorization':'Bearer {}'.format(self.apikey) }<line_sep># error tracking (used for function return) has_error=<false><line_sep># A Simple Email Payload Template _payload={'personalizations':[{# Placeholder 'to':[{'email':<none>}] }] 'from':{'email':self.from_email } # A subject is a requirement, so if none is specified we must # set a default with at least 1 character or SendGrid will deny # our request 'subject':title<if>title<else>self.default_empty_subject 'content':[{'type':'text/plain'<if>self.notify_format<eq>NotifyFormat.TEXT<else>'text/html' 'value':body }] }<if_stmt>self.template<block_start>_payload['template_id']=self.template<if_stmt>self.template_data<block_start>_payload['personalizations'][0]['dynamic_template_data']={k:v<for>k,v self.template_data.items()}<block_end><block_end>targets=list(self.targets)<while_stmt>len(targets)<g>0<block_start>target=targets.pop(0)<line_sep># Create a copy of our template payload=_payload.copy()<line_sep># the cc, bcc, to field must be unique or SendMail will fail, the # below code prepares this by ensuring the target isn't in the cc # list or bcc list. It also makes sure the cc list does not contain # any of the bcc entries cc=(self.cc-self.bcc-set([target]))<line_sep>bcc=(self.bcc-set([target]))<line_sep># Set our target payload['personalizations'][0]['to'][0]['email']=target<if_stmt>len(cc)<block_start>payload['personalizations'][0]['cc']=[{'email':email}<for>email cc]<block_end><if_stmt>len(bcc)<block_start>payload['personalizations'][0]['bcc']=[{'email':email}<for>email bcc]<block_end>self.logger.debug('SendGrid POST URL: %s (cert_verify=%r)'%(self.notify_url self.verify_certificate ))<line_sep>self.logger.debug('SendGrid Payload: %s'%str(payload))<line_sep># Always call throttle before any remote server i/o is made self.throttle()<try_stmt><block_start>r=requests.post(self.notify_url data=dumps(payload) headers=headers verify=self.verify_certificate timeout=self.request_timeout )<if_stmt>r.status_code<not><in>(requests.codes.ok requests.codes.accepted)# We had a problem <block_start>status_str=NotifySendGrid.http_response_code_lookup(r.status_code SENDGRID_HTTP_ERROR_MAP)<line_sep>self.logger.warning('Failed to send SendGrid notification to {}: '<concat>'{}{}error={}.'.format(target status_str ', '<if>status_str<else>'' r.status_code))<line_sep>self.logger.debug('Response Details:\r\n{}'.format(r.content))<line_sep># Mark our failure has_error=<true><line_sep><continue><block_end><else_stmt><block_start>self.logger.info('Sent SendGrid notification to {}.'.format(target))<block_end><block_end><except_stmt>requests.RequestException<as>e<block_start>self.logger.warning('A Connection error occurred sending SendGrid '<concat>'notification to {}.'.format(target))<line_sep>self.logger.debug('Socket Exception: %s'%str(e))<line_sep># Mark our failure has_error=<true><line_sep><continue><block_end><block_end><return><not>has_error<block_end>@staticmethod<def_stmt>parse_url url<block_start>""" Parses the URL and returns enough arguments that can allow us to re-instantiate this object. """<line_sep>results=NotifyBase.parse_url(url)<if_stmt><not>results# We're done early as we couldn't load the results <block_start><return>results<block_end># Our URL looks like this: # {schema}://{apikey}:{from_email}/{targets} # # which actually equates to: # {schema}://{user}:{password}@{host}/{email1}/{email2}/etc.. # ^ ^ ^ # | | | # apikey -from addr- <if_stmt><not>results.get('user')# An API Key as not properly specified <block_start><return><none><block_end><if_stmt><not>results.get('password')# A From Email was not correctly specified <block_start><return><none><block_end># Prepare our API Key results['apikey']=NotifySendGrid.unquote(results['user'])<line_sep># Prepare our From Email Address results['from_email']='{}@{}'.format(NotifySendGrid.unquote(results['password']) NotifySendGrid.unquote(results['host']) )<line_sep># Acquire our targets results['targets']=NotifySendGrid.split_path(results['fullpath'])<line_sep># The 'to' makes it easier to use yaml configuration <if_stmt>'to'<in>results['qsd']<and>len(results['qsd']['to'])<block_start>results['targets']<augadd>NotifySendGrid.parse_list(results['qsd']['to'])<block_end># Handle Carbon Copy Addresses <if_stmt>'cc'<in>results['qsd']<and>len(results['qsd']['cc'])<block_start>results['cc']=NotifySendGrid.parse_list(results['qsd']['cc'])<block_end># Handle Blind Carbon Copy Addresses <if_stmt>'bcc'<in>results['qsd']<and>len(results['qsd']['bcc'])<block_start>results['bcc']=NotifySendGrid.parse_list(results['qsd']['bcc'])<block_end># Handle Blind Carbon Copy Addresses <if_stmt>'template'<in>results['qsd']<and>len(results['qsd']['template'])<block_start>results['template']=NotifySendGrid.unquote(results['qsd']['template'])<block_end># Add any template substitutions results['template_data']=results['qsd+']<line_sep><return>results<block_end><block_end>
#process a filtered list of files by calling reapeatedly a #console app(no console opens) in two parallel threads with a timeout #<NAME> 2015 <import_stmt>os<import_stmt>threading<import_stmt>subprocess<def_stmt>my_thread <block_start><global>files path timeout options<line_sep>myname=threading.currentThread().getName()<while_stmt>files#create command to run <block_start>nextfile=files.pop()<line_sep>#print name of thread and command being run print('Thread {0} starts processing {1}'.format(myname nextfile))<line_sep>f=path+nextfile+options<try_stmt>#timeout interrupts frozen command, shell=True does'nt open a console <block_start>subprocess.check_call(args=f shell=<true> timeout=timeout)<block_end><except_stmt>subprocess.TimeoutExpired<block_start>print('Thread {0} Processing {0} took too long'.format(myname nextfile))<block_end><except_stmt>subprocess.CalledProcessError<as>e<block_start>print('Thread {0} Processing {1} returned error {2}:{3}'.format(myname nextfile e.returncode e.output))<block_end><except_stmt>Exception<as>e<block_start>print('Thread {0} Processing {1} returned error {2}'.format(myname nextfile type(e).__name__))<block_end><block_end>print('thread {0} stopped'.format(myname))<block_end>timeout=150<line_sep>#the patth to the console app exe_path='\"C:/Program files/Calibre2/ebook-convert.exe" '<line_sep>file_path='./'# so it can be called from a console opened in the folder whrer files are options='\" .epub > nul'<line_sep>#filter the files in file_path extensions=['mobi' 'lit' 'prc' 'azw' 'rtf' 'odf']<line_sep>files=[fn<for>fn os.listdir(file_path)<if>any([fn.endswith(ext)<for>ext extensions])]<line_sep>path=exe_path+' \"'+file_path<line_sep>#runs the same thread twice, each with a name t1=threading.Thread(target=my_thread name='uno')<line_sep>t1.start()<line_sep>t2=threading.Thread(target=my_thread name='dos')<line_sep>t2.start()<line_sep>
""" File to define utilities for Game handling. The GameState data structures serve as the states that preserve the information of an environment and is used within the Coach classes to handle environment data. """<import_from_stmt>dataclasses dataclass<import_stmt>typing<import_stmt>gym<import_from_stmt>gym Env spaces<import_from_stmt>gym.envs.atari AtariEnv<import_stmt>numpy<as>np<line_sep>@dataclass<class_stmt>GameState<block_start>canonical_state:typing.Any# s_t observation:np.ndarray# o_t action:int# a_t player:int# player_t done:bool# I(s_t = s_T) <block_end>@dataclass<class_stmt>GymState(GameState)<block_start>env:Env# Class for the (stateful) logic of Gym Environments at t. <block_end>@dataclass<class_stmt>AtariState(GymState)<block_start>env:AtariEnv<block_end># Class for the (stateful) logic of Gym Atari Environments at t. <class_stmt>DiscretizeAction(gym.ActionWrapper)<block_start>""" Factorizes a continuous action space of an environment into n discrete actions. """<def_stmt>__init__ self env n:int<arrow><none><block_start>""" Factorize the given environment's action space (a single continuous action) to n discrete actions. :param env: Gym.Env Environment object from OpenAI Gym. :param n: int Number of actions to factorize. """<assert_stmt>isinstance(env.action_space spaces.Box) ("expected Box action space, got {}".format(type(env.action_space)))<assert_stmt>env.action_space.is_bounded() "expected bounded Box action space"<line_sep># We could support multiple dimensions, but that quickly becomes unmanagble with # the single dimension spaces.Discrete. We can add a version using # spaces.MultiDiscrete for that use case. dims=np.product(env.action_space.shape)<assert_stmt>dims<eq>1 f"expected 1d Box action space, got {dims}d space"<line_sep>super(DiscretizeAction self).__init__(env)<line_sep>self.action_space=spaces.Discrete(n)<block_end><def_stmt>action self action:int<arrow>float<block_start>""" Linearly scale the action integer between the continuous range. Example if range: [-1, 1] and n = 3, then a'=0 -> a=-1, a=1 -> a'=0, a=2 -> a'=1 :param action: int Action bin to perform in the environment. :return: float Action cast to the original, continuous, action space. """<line_sep>low=self.env.action_space.low<line_sep>high=self.env.action_space.high<line_sep>action=low+(high-low)<times>action/(self.action_space.n-1)<line_sep><return>action<block_end><def_stmt>reverse_action self action:float<arrow>int<block_start>""" Yield the closest bin action to the given continuous action. TODO """<line_sep><pass><block_end><block_end>
<import_stmt>sys<import_stmt>time<import_stmt>operator<import_from_stmt>datetime timedelta<import_stmt>numpy<as>np<import_stmt>collections<import_stmt>torch<import_stmt>torch.nn<as>nn<class_stmt>SMAQueue<block_start>""" Queue of fixed size with mean, max, min operations """<def_stmt>__init__ self size<block_start>self.queue=collections.deque()<line_sep>self.size=size<block_end><def_stmt>__iadd__ self other<block_start><if_stmt>isinstance(other (list tuple))<block_start>self.queue.extend(other)<block_end><else_stmt><block_start>self.queue.append(other)<block_end><while_stmt>len(self.queue)<g>self.size<block_start>self.queue.popleft()<block_end><return>self<block_end><def_stmt>__len__ self<block_start><return>len(self.queue)<block_end><def_stmt>__repr__ self<block_start><return>"SMAQueue(size=%d)"%self.size<block_end><def_stmt>__str__ self<block_start><return>"SMAQueue(size=%d, len=%d)"%(self.size len(self.queue))<block_end><def_stmt>min self<block_start><if_stmt><not>self.queue<block_start><return><none><block_end><return>np.min(self.queue)<block_end><def_stmt>mean self<block_start><if_stmt><not>self.queue<block_start><return><none><block_end><return>np.mean(self.queue)<block_end><def_stmt>max self<block_start><if_stmt><not>self.queue<block_start><return><none><block_end><return>np.max(self.queue)<block_end><block_end><class_stmt>SpeedMonitor<block_start><def_stmt>__init__ self batch_size autostart=<true><block_start>self.batch_size=batch_size<line_sep>self.start_ts=<none><line_sep>self.batches=<none><if_stmt>autostart<block_start>self.reset()<block_end><block_end><def_stmt>epoch self<block_start><if_stmt>self.epoches<is><not><none><block_start>self.epoches<augadd>1<block_end><block_end><def_stmt>batch self<block_start><if_stmt>self.batches<is><not><none><block_start>self.batches<augadd>1<block_end><block_end><def_stmt>reset self<block_start>self.start_ts=time.time()<line_sep>self.batches=0<line_sep>self.epoches=0<block_end><def_stmt>seconds self<block_start>""" Seconds since last reset :return: """<line_sep><return>time.time()-self.start_ts<block_end><def_stmt>samples_per_sec self<block_start>""" Calculate samples per second since last reset() call :return: float count samples per second or None if not started """<if_stmt>self.start_ts<is><none><block_start><return><none><block_end>secs=self.seconds()<if_stmt>abs(secs)<l>1e-5<block_start><return>0.0<block_end><return>(self.batches+1)<times>self.batch_size/secs<block_end><def_stmt>epoch_time self<block_start>""" Calculate average epoch time :return: timedelta object """<if_stmt>self.start_ts<is><none><block_start><return><none><block_end>s=self.seconds()<if_stmt>self.epoches<g>0<block_start>s<augdiv>self.epoches+1<block_end><return>timedelta(seconds=s)<block_end><def_stmt>batch_time self<block_start>""" Calculate average batch time :return: timedelta object """<if_stmt>self.start_ts<is><none><block_start><return><none><block_end>s=self.seconds()<if_stmt>self.batches<g>0<block_start>s<augdiv>self.batches+1<block_end><return>timedelta(seconds=s)<block_end><block_end><class_stmt>WeightedMSELoss(nn.Module)<block_start><def_stmt>__init__ self size_average=<true><block_start>super(WeightedMSELoss self).__init__()<line_sep>self.size_average=size_average<block_end><def_stmt>forward self input target weights=<none><block_start><if_stmt>weights<is><none><block_start><return>nn.MSELoss(self.size_average)(input target)<block_end>loss_rows=(input-target)<power>2<if_stmt>len(loss_rows.size())<ne>1<block_start>loss_rows=torch.sum(loss_rows dim=1)<block_end>res=(weights<times>loss_rows).sum()<if_stmt>self.size_average<block_start>res<augdiv>len(weights)<block_end><return>res<block_end><block_end><class_stmt>SegmentTree(object)<block_start><def_stmt>__init__ self capacity operation neutral_element<block_start>"""Build a Segment Tree data structure. https://en.wikipedia.org/wiki/Segment_tree Can be used as regular array, but with two important differences: a) setting item's value is slightly slower. It is O(lg capacity) instead of O(1). b) user has access to an efficient `reduce` operation which reduces `operation` over a contiguous subsequence of items in the array. Paramters --------- capacity: int Total size of the array - must be a power of two. operation: lambda obj, obj -> obj and operation for combining elements (eg. sum, max) must for a mathematical group together with the set of possible values for array elements. neutral_element: obj neutral element for the operation above. eg. float('-inf') for max and 0 for sum. """<assert_stmt>capacity<g>0<and>capacity&(capacity-1)<eq>0 "capacity must be positive and a power of 2."<line_sep>self._capacity=capacity<line_sep>self._value=[neutral_element<for>_ range(2<times>capacity)]<line_sep>self._operation=operation<block_end><def_stmt>_reduce_helper self start end node node_start node_end<block_start><if_stmt>start<eq>node_start<and>end<eq>node_end<block_start><return>self._value[node]<block_end>mid=(node_start+node_end)<floordiv>2<if_stmt>end<le>mid<block_start><return>self._reduce_helper(start end 2<times>node node_start mid)<block_end><else_stmt><block_start><if_stmt>mid+1<le>start<block_start><return>self._reduce_helper(start end 2<times>node+1 mid+1 node_end)<block_end><else_stmt><block_start><return>self._operation(self._reduce_helper(start mid 2<times>node node_start mid) self._reduce_helper(mid+1 end 2<times>node+1 mid+1 node_end))<block_end><block_end><block_end><def_stmt>reduce self start=0 end=<none><block_start>"""Returns result of applying `self.operation` to a contiguous subsequence of the array. self.operation(arr[start], operation(arr[start+1], operation(... arr[end]))) Parameters ---------- start: int beginning of the subsequence end: int end of the subsequences Returns ------- reduced: obj result of reducing self.operation over the specified range of array elements. """<if_stmt>end<is><none><block_start>end=self._capacity<block_end><if_stmt>end<l>0<block_start>end<augadd>self._capacity<block_end>end<augsub>1<line_sep><return>self._reduce_helper(start end 1 0 self._capacity-1)<block_end><def_stmt>__setitem__ self idx val# index of the leaf <block_start>idx<augadd>self._capacity<line_sep>self._value[idx]=val<line_sep>idx<augfloordiv>2<while_stmt>idx<ge>1<block_start>self._value[idx]=self._operation(self._value[2<times>idx] self._value[2<times>idx+1])<line_sep>idx<augfloordiv>2<block_end><block_end><def_stmt>__getitem__ self idx<block_start><assert_stmt>0<le>idx<l>self._capacity<line_sep><return>self._value[self._capacity+idx]<block_end><block_end><class_stmt>SumSegmentTree(SegmentTree)<block_start><def_stmt>__init__ self capacity<block_start>super(SumSegmentTree self).__init__(capacity=capacity operation=operator.add neutral_element=0.0)<block_end><def_stmt>sum self start=0 end=<none><block_start>"""Returns arr[start] + ... + arr[end]"""<line_sep><return>super(SumSegmentTree self).reduce(start end)<block_end><def_stmt>find_prefixsum_idx self prefixsum<block_start>"""Find the highest index `i` in the array such that sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum if array values are probabilities, this function allows to sample indexes according to the discrete probability efficiently. Parameters ---------- perfixsum: float upperbound on the sum of array prefix Returns ------- idx: int highest index satisfying the prefixsum constraint """<assert_stmt>0<le>prefixsum<le>self.sum()+1e-5<line_sep>idx=1<while_stmt>idx<l>self._capacity# while non-leaf <block_start><if_stmt>self._value[2<times>idx]<g>prefixsum<block_start>idx=2<times>idx<block_end><else_stmt><block_start>prefixsum<augsub>self._value[2<times>idx]<line_sep>idx=2<times>idx+1<block_end><block_end><return>idx-self._capacity<block_end><block_end><class_stmt>MinSegmentTree(SegmentTree)<block_start><def_stmt>__init__ self capacity<block_start>super(MinSegmentTree self).__init__(capacity=capacity operation=min neutral_element=float('inf'))<block_end><def_stmt>min self start=0 end=<none><block_start>"""Returns min(arr[start], ..., arr[end])"""<line_sep><return>super(MinSegmentTree self).reduce(start end)<block_end><block_end><class_stmt>TBMeanTracker<block_start>""" TensorBoard value tracker: allows to batch fixed amount of historical values and write their mean into TB Designed and tested with pytorch-tensorboard in mind """<def_stmt>__init__ self writer batch_size<block_start>""" :param writer: writer with close() and add_scalar() methods :param batch_size: integer size of batch to track """<assert_stmt>isinstance(batch_size int)<assert_stmt>writer<is><not><none><line_sep>self.writer=writer<line_sep>self.batch_size=batch_size<block_end><def_stmt>__enter__ self<block_start>self._batches=collections.defaultdict(list)<line_sep><return>self<block_end><def_stmt>__exit__ self exc_type exc_val exc_tb<block_start>self.writer.close()<block_end>@staticmethod<def_stmt>_as_float value<block_start><assert_stmt>isinstance(value (float int np.ndarray np.generic torch.autograd.Variable))<or>torch.is_tensor(value)<line_sep>tensor_val=<none><if_stmt>isinstance(value torch.autograd.Variable)<block_start>tensor_val=value.data<block_end><elif_stmt>torch.is_tensor(value)<block_start>tensor_val=value<block_end><if_stmt>tensor_val<is><not><none><block_start><return>tensor_val.float().mean().item()<block_end><elif_stmt>isinstance(value np.ndarray)<block_start><return>float(np.mean(value))<block_end><else_stmt><block_start><return>float(value)<block_end><block_end><def_stmt>track self param_name value iter_index<block_start><assert_stmt>isinstance(param_name str)<assert_stmt>isinstance(iter_index int)<line_sep>data=self._batches[param_name]<line_sep>data.append(self._as_float(value))<if_stmt>len(data)<ge>self.batch_size<block_start>self.writer.add_scalar(param_name np.mean(data) iter_index)<line_sep>data.clear()<block_end><block_end><block_end><class_stmt>RewardTracker<block_start><def_stmt>__init__ self writer min_ts_diff=1.0<block_start>""" Constructs RewardTracker :param writer: writer to use for writing stats :param min_ts_diff: minimal time difference to track speed """<line_sep>self.writer=writer<line_sep>self.min_ts_diff=min_ts_diff<block_end><def_stmt>__enter__ self<block_start>self.ts=time.time()<line_sep>self.ts_frame=0<line_sep>self.total_rewards=[]<line_sep><return>self<block_end><def_stmt>__exit__ self *args<block_start>self.writer.close()<block_end><def_stmt>reward self reward frame epsilon=<none><block_start>self.total_rewards.append(reward)<line_sep>mean_reward=np.mean(self.total_rewards[-100:])<line_sep>ts_diff=time.time()-self.ts<if_stmt>ts_diff<g>self.min_ts_diff<block_start>speed=(frame-self.ts_frame)/ts_diff<line_sep>self.ts_frame=frame<line_sep>self.ts=time.time()<line_sep>epsilon_str=""<if>epsilon<is><none><else>", eps %.2f"%epsilon<line_sep>print("%d: done %d episodes, mean reward %.3f, speed %.2f f/s%s"%(frame len(self.total_rewards) mean_reward speed epsilon_str))<line_sep>sys.stdout.flush()<line_sep>self.writer.add_scalar("speed" speed frame)<block_end><if_stmt>epsilon<is><not><none><block_start>self.writer.add_scalar("epsilon" epsilon frame)<block_end>self.writer.add_scalar("reward_100" mean_reward frame)<line_sep>self.writer.add_scalar("reward" reward frame)<line_sep><return>mean_reward<if>len(self.total_rewards)<g>30<else><none><block_end><block_end>
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test data for firewall rules scanners."""<line_sep>FAKE_FIREWALL_RULE_FOR_TEST_PROJECT={'name':'policy1' 'full_name':('organization/org/folder/folder1/'<concat>'project/project0/firewall/policy1/') 'network':'network1' 'direction':'ingress' 'allowed':[{'IPProtocol':'tcp' 'ports':['1' '3389']}] 'sourceRanges':['0.0.0.0/0'] 'targetTags':['linux'] }<line_sep>FAKE_FIREWALL_RULE_FOR_PROJECT1={'name':'policy1' 'full_name':('organization/org/folder/test_instances/'<concat>'project/project1/firewall/policy1/') 'network':'network1' 'direction':'ingress' 'allowed':[{'IPProtocol':'tcp' 'ports':['22']}] 'sourceRanges':['172.16.58.3'] 'targetTags':['test'] }<line_sep>
<import_stmt>itertools<import_stmt>json<import_stmt>os<import_stmt>numpy<as>np<import_stmt>pybullet<as>p<import_from_stmt>bddl.object_taxonomy ObjectTaxonomy<import_from_stmt>pynput keyboard<import_stmt>igibson<import_from_stmt>igibson.objects.articulated_object URDFObject<import_from_stmt>igibson.objects.visual_marker VisualMarker<import_from_stmt>igibson.scenes.empty_scene EmptyScene<import_from_stmt>igibson.simulator Simulator<import_from_stmt>igibson.utils.assets_utils download_assets<line_sep>download_assets()<line_sep>ABILITY_NAME="cleaningTool"<line_sep>SYNSETS=["alarm.n.02" "printer.n.03" "facsimile.n.02" "scanner.n.02" "modem.n.01" ]<line_sep>CATEGORIES=["broom" "carpet_sweeper" "scraper" "scrub_brush" "toothbrush" "vacuum" ]<line_sep>MODE="synset"# "ability, "category" LINK_NAME="toggle_button"<line_sep>IS_CUBOID=<false><line_sep>SKIP_EXISTING=<false><line_sep>OBJECT_TAXONOMY=ObjectTaxonomy()<def_stmt>get_categories <block_start>dir=os.path.join(igibson.ig_dataset_path "objects")<line_sep><return>[cat<for>cat os.listdir(dir)<if>os.path.isdir(get_category_directory(cat))]<block_end><def_stmt>get_category_directory category<block_start><return>os.path.join(igibson.ig_dataset_path "objects" category)<block_end><def_stmt>get_obj folder<block_start><return>URDFObject(os.path.join(folder os.path.basename(folder)+".urdf") name="obj" model_path=folder)<block_end><def_stmt>get_metadata_filename objdir<block_start><return>os.path.join(objdir "misc" "metadata.json")<block_end><def_stmt>get_corner_positions base rotation size<block_start>quat=p.getQuaternionFromEuler(rotation)<line_sep>options=[-1 1]<line_sep>outputs=[]<for_stmt>pos itertools.product(options options options)<block_start>res=p.multiplyTransforms(base quat np.array(pos)<times>size/2.0 [0 0 0 1])<line_sep>outputs.append(res)<block_end><return>outputs<block_end><def_stmt>main # Collect the relevant categories. <block_start>categories=CATEGORIES<if_stmt>MODE<eq>"ability"<block_start>categories=[]<for_stmt>cat get_categories()# Check that the category has this label. <block_start>klass=OBJECT_TAXONOMY.get_class_name_from_igibson_category(cat)<if_stmt><not>klass<block_start><continue><block_end><if_stmt><not>OBJECT_TAXONOMY.has_ability(klass ABILITY_NAME)<block_start><continue><block_end>categories.append(cat)<block_end><block_end><elif_stmt>MODE<eq>"synset"<block_start>categories=[]<for_stmt>synset SYNSETS<block_start>categories.extend(OBJECT_TAXONOMY.get_igibson_categories(synset))<block_end>categories=set(categories)&set(get_categories())<block_end>print("%d categories: %s"%(len(categories) ", ".join(categories)))<line_sep># Now collect the actual objects. objects=[]<line_sep>objects_by_category={}<for_stmt>cat categories<block_start>cd=get_category_directory(cat)<line_sep>objects_by_category[cat]=[]<for_stmt>objdir os.listdir(cd)<block_start>objdirfull=os.path.join(cd objdir)<line_sep>objects.append(objdirfull)<line_sep>objects_by_category[cat].append(objdirfull)<block_end><block_end>print("%d objects.\n"%len(objects))<for_stmt>cat categories<block_start>cd=get_category_directory(cat)<for_stmt>objdir os.listdir(cd)<block_start>objdirfull=os.path.join(cd objdir)<line_sep>mfn=get_metadata_filename(objdirfull)<with_stmt>open(mfn "r")<as>mf<block_start>meta=json.load(mf)<block_end>offset=np.array([0.0 0.0 0.0])<line_sep>size=np.array([0.0 0.0 0.0])<line_sep>rotation=np.array([0.0 0.0 0.0])<line_sep>existing=<false><if_stmt>"links"<in>meta<and>LINK_NAME<in>meta["links"]<block_start>print("%s/%s already has the requested link."%(cat objdir))<if_stmt>SKIP_EXISTING<block_start><continue><block_end>existing=<true><line_sep>offset=np.array(meta["links"][LINK_NAME]["xyz"])<if_stmt>IS_CUBOID<block_start>size=np.array(meta["links"][LINK_NAME]["size"])<line_sep>rotation=np.array(meta["links"][LINK_NAME]["rpy"])<block_end><block_end>s=Simulator(mode="gui")<line_sep>scene=EmptyScene()<line_sep>s.import_scene(scene)<line_sep>obj=get_obj(objdirfull)<line_sep>s.import_object(obj)<line_sep>obj_pos=np.array([0.0 0.0 1.0])<line_sep>obj.set_position(obj_pos)<line_sep>dim=max(obj.bounding_box)<line_sep>marker_size=dim/100.0<line_sep>steps=[dim<times>0.1 dim<times>0.01 dim<times>0.001]<line_sep>rot_steps=[np.deg2rad(1) np.deg2rad(5) np.deg2rad(10)]<line_sep>m=VisualMarker(radius=marker_size rgba_color=[0 0 1 0.5])<line_sep>s.import_object(m)<if_stmt>IS_CUBOID<block_start>initial_poses=get_corner_positions(obj_pos+offset rotation size)<line_sep>markers=[VisualMarker(radius=marker_size rgba_color=[0 1 0 0.5])<for>_ initial_poses]<line_sep>[s.import_object(m)<for>m markers]<for_stmt>marker,(pos orn) zip(markers initial_poses)<block_start>marker.set_position_orientation(pos orn)<block_end><block_end># if existing: # e = VisualMarker(radius=0.02, rgba_color=[1, 0, 0, 0.5]) # s.import_object(e) # e.set_position(obj_pos + offset) step_size=steps[1]<line_sep>rot_step_size=rot_steps[1]<line_sep>done=<false><while_stmt><not>done<block_start><with_stmt>keyboard.Events()<as>events<block_start><for_stmt>event events<block_start><if_stmt>(event<is><none><or><not>isinstance(event keyboard.Events.Press)<or><not>hasattr(event.key "char"))<block_start><continue><block_end><if_stmt>event.key.char<eq>"w"<block_start>print("Moving forward one")<line_sep>offset<augadd>np.array([0 1 0])<times>step_size<block_end><elif_stmt>event.key.char<eq>"a"<block_start>print("Moving left one")<line_sep>offset<augadd>np.array([-1 0 0])<times>step_size<block_end><elif_stmt>event.key.char<eq>"s"<block_start>print("Moving back one")<line_sep>offset<augadd>np.array([0 -1 0])<times>step_size<block_end><elif_stmt>event.key.char<eq>"d"<block_start>print("Moving right one")<line_sep>offset<augadd>np.array([1 0 0])<times>step_size<block_end><elif_stmt>event.key.char<eq>"q"<block_start>print("Moving up one")<line_sep>offset<augadd>np.array([0 0 1])<times>step_size<block_end><elif_stmt>event.key.char<eq>"z"<block_start>print("Moving down one")<line_sep>offset<augadd>np.array([0 0 -1])<times>step_size<block_end><elif_stmt>event.key.char<eq>"1"<block_start>print("Sizing forward one")<line_sep>size<augadd>np.array([0 1 0])<times>step_size<block_end><elif_stmt>event.key.char<eq>"2"<block_start>print("Sizing back one")<line_sep>size<augadd>np.array([0 -1 0])<times>step_size<block_end><elif_stmt>event.key.char<eq>"4"<block_start>print("Sizing left one")<line_sep>size<augadd>np.array([-1 0 0])<times>step_size<block_end><elif_stmt>event.key.char<eq>"5"<block_start>print("Sizing right one")<line_sep>size<augadd>np.array([1 0 0])<times>step_size<block_end><elif_stmt>event.key.char<eq>"7"<block_start>print("Sizing up one")<line_sep>size<augadd>np.array([0 0 1])<times>step_size<block_end><elif_stmt>event.key.char<eq>"8"<block_start>print("Sizing down one")<line_sep>size<augadd>np.array([0 0 -1])<times>step_size<block_end><elif_stmt>event.key.char<eq>"t"<block_start>print("Rotation +X one")<line_sep>rotation<augadd>np.array([1 0 0])<times>rot_step_size<block_end><elif_stmt>event.key.char<eq>"y"<block_start>print("Rotation -X one")<line_sep>rotation<augadd>np.array([-1 0 0])<times>rot_step_size<block_end><elif_stmt>event.key.char<eq>"u"<block_start>print("Rotation +Y one")<line_sep>rotation<augadd>np.array([0 1 0])<times>rot_step_size<block_end><elif_stmt>event.key.char<eq>"i"<block_start>print("Rotation -Y one")<line_sep>rotation<augadd>np.array([0 -1 0])<times>rot_step_size<block_end><elif_stmt>event.key.char<eq>"o"<block_start>print("Rotation +Z one")<line_sep>rotation<augadd>np.array([0 0 1])<times>rot_step_size<block_end><elif_stmt>event.key.char<eq>"p"<block_start>print("Rotation -Z one")<line_sep>rotation<augadd>np.array([0 0 -1])<times>rot_step_size<block_end><elif_stmt>event.key.char<eq>"h"<block_start>print("Step to 0.1")<line_sep>step_size=steps[0]<line_sep>rot_step_size=rot_steps[0]<block_end><elif_stmt>event.key.char<eq>"j"<block_start>print("Step to 0.01")<line_sep>step_size=steps[1]<line_sep>rot_step_size=rot_steps[1]<block_end><elif_stmt>event.key.char<eq>"k"<block_start>print("Step to 0.001")<line_sep>step_size=steps[2]<line_sep>rot_step_size=rot_steps[2]<block_end><elif_stmt>event.key.char<eq>"b"<block_start>print("Updating box to match bounding box.")<line_sep>offset=np.array([0.0 0.0 0.0])<line_sep>rotation=np.array([0.0 0.0 0.0])<line_sep>size=np.array(obj.bounding_box dtype=float)<block_end><elif_stmt>event.key.char<eq>"c"<block_start>done=<true><line_sep><break><block_end>print("New position:" offset)<line_sep>m.set_position(obj_pos+offset)<if_stmt>IS_CUBOID<block_start>print("New rotation:" rotation)<line_sep>print("New size:" size)<line_sep>print("")<line_sep>poses=get_corner_positions(obj_pos+offset rotation size)<for_stmt>marker,(pos orn) zip(markers poses)<block_start>marker.set_position_orientation(pos orn)<block_end><block_end><block_end><block_end><block_end># Record it into the meta file. <if_stmt>"links"<not><in>meta<block_start>meta["links"]=dict()<block_end>dynamics_info=p.getDynamicsInfo(obj.get_body_id() -1)<line_sep>inertial_pos,inertial_orn=dynamics_info[3] dynamics_info[4]<line_sep>rel_position,rel_orn=p.multiplyTransforms(offset p.getQuaternionFromEuler(rotation) inertial_pos inertial_orn)<if_stmt>IS_CUBOID<block_start>meta["links"][LINK_NAME]={"geometry":"box" "size":list(size) "xyz":list(rel_position) "rpy":list(p.getEulerFromQuaternion(rel_orn)) }<block_end><else_stmt><block_start>meta["links"][LINK_NAME]={"geometry":<none> "size":<none> "xyz":list(rel_position) "rpy":<none>}<block_end><with_stmt>open(mfn "w")<as>mf<block_start>json.dump(meta mf)<line_sep>print("Updated %s"%mfn)<block_end>input("Hit enter to continue.")<line_sep>s.disconnect()<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
# Author: <NAME> <<EMAIL>> """ Calculate Segmentation metrics: - GlobalAccuracy - MeanAccuracy - Mean MeanIoU """<import_stmt>numpy<as>np<import_from_stmt>data_io imread<def_stmt>cal_semantic_metrics pred_list gt_list thresh_step=0.01 num_cls=2<block_start>final_accuracy_all=[]<for_stmt>thresh np.arange(0.0 1.0 thresh_step)<block_start>print(thresh)<line_sep>global_accuracy_cur=[]<line_sep>statistics=[]<for_stmt>pred,gt zip(pred_list gt_list)<block_start>gt_img=(gt/255).astype('uint8')<line_sep>pred_img=(pred/255<g>thresh).astype('uint8')<line_sep># calculate each image global_accuracy_cur.append(cal_global_acc(pred_img gt_img))<line_sep>statistics.append(get_statistics(pred_img gt_img num_cls))<block_end># get global accuracy with corresponding threshold: (TP+TN)/all_pixels global_acc=np.sum([v[0]<for>v global_accuracy_cur])/np.sum([v[1]<for>v global_accuracy_cur])<line_sep># get tp, fp, fn counts=[]<for_stmt>i range(num_cls)<block_start>tp=np.sum([v[i][0]<for>v statistics])<line_sep>fp=np.sum([v[i][1]<for>v statistics])<line_sep>fn=np.sum([v[i][2]<for>v statistics])<line_sep>counts.append([tp fp fn])<block_end># calculate mean accuracy mean_acc=np.sum([v[0]/(v[0]+v[2])<for>v counts])/num_cls<line_sep># calculate mean iou mean_iou_acc=np.sum([v[0]/(np.sum(v))<for>v counts])/num_cls<line_sep>final_accuracy_all.append([thresh global_acc mean_acc mean_iou_acc])<block_end><return>final_accuracy_all<block_end><def_stmt>cal_global_acc pred gt<block_start>""" acc = (TP+TN)/all_pixels """<line_sep>h,w=gt.shape<line_sep><return>[np.sum(pred<eq>gt) float(h<times>w)]<block_end><def_stmt>get_statistics pred gt num_cls=2<block_start>""" return tp, fp, fn """<line_sep>h,w=gt.shape<line_sep>statistics=[]<for_stmt>i range(num_cls)<block_start>tp=np.sum((pred<eq>i)&(gt<eq>i))<line_sep>fp=np.sum((pred<eq>i)&(gt<ne>i))<line_sep>fn=np.sum((pred<ne>i)&(gt<eq>i))<line_sep>statistics.append([tp fp fn])<block_end><return>statistics<block_end>
# -*- coding: utf-8 -*- ''' FanFilm Add-on Copyright (C) 2015 lambda This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. '''<import_stmt>re urllib urlparse<import_from_stmt>resources.lib.libraries cleantitle<import_from_stmt>resources.lib.libraries client<import_from_stmt>resources.lib.libraries cache<import_from_stmt>resources.lib.libraries control<import_from_stmt>resources.lib.libraries cloudflare<class_stmt>source<block_start><def_stmt>__init__ self<block_start>self.base_link='http://dizimag.co'<line_sep>self.headers={'X-Requested-With':'XMLHttpRequest'}<block_end><def_stmt>dizimag_shows self<block_start><try_stmt><block_start>result=cloudflare.source(self.base_link)<line_sep>result=client.parseDOM(result 'div' attrs={'id':'fil'})[0]<line_sep>result=zip(client.parseDOM(result 'a' ret='href') client.parseDOM(result 'a'))<line_sep>result=[(re.sub('http.+?//.+?/' '/' i[0]) cleantitle.tv(i[1]))<for>i result]<line_sep><return>result<block_end><except_stmt><block_start><return><block_end><block_end><def_stmt>get_show self imdb tvdb tvshowtitle year<block_start><try_stmt><block_start>result=cache.get(self.dizimag_shows 72)<line_sep>tvshowtitle=cleantitle.tv(tvshowtitle)<line_sep>result=[i[0]<for>i result<if>tvshowtitle<eq>i[1]][0]<try_stmt><block_start>url=re.compile('//.+?(/.+)').findall(result)[0]<block_end><except_stmt><block_start>url=result<block_end>url=client.replaceHTMLCodes(url)<line_sep>url=url.encode('utf-8')<line_sep><return>url<block_end><except_stmt><block_start><return><block_end><block_end><def_stmt>get_episode self url imdb tvdb title date season episode<block_start><try_stmt><block_start><if_stmt>url<eq><none><block_start><return><block_end>url=urlparse.urljoin(self.base_link url)<line_sep>result=client.source(url)<line_sep>result=client.parseDOM(result 'a' ret='href')<line_sep>result=[i<for>i result<if>'/%01d-sezon-%01d-bolum-'%(int(season) int(episode))<in>i][0]<try_stmt><block_start>url=re.compile('//.+?(/.+)').findall(result)[0]<block_end><except_stmt><block_start>url=result<block_end>url=client.replaceHTMLCodes(url)<line_sep>url=url.encode('utf-8')<line_sep><return>url<block_end><except_stmt><block_start><return><block_end><block_end><def_stmt>get_sources self url hosthdDict hostDict locDict<block_start><try_stmt><block_start>sources=[]<if_stmt>url<eq><none><block_start><return>sources<block_end>sources_url=urlparse.urljoin(self.base_link url)<line_sep>result=client.request(sources_url close=<false>)<line_sep>result=re.compile('<script[^>]*>(.*?)</script>' re.DOTALL).findall(result)<line_sep>result=[re.compile("var\s+kaynaklar.*?url\s*:\s*\"([^\"]+)\"\s*,\s*data\s*:\s*'([^']+)").findall(i.replace('\n' ''))<for>i result]<line_sep>result=[i[0]<for>i result<if>len(i)<g>0][0]<line_sep>url=urlparse.urljoin(self.base_link result[0])<line_sep>post=result[1]<line_sep>result=client.request(url post=post headers=self.headers)<line_sep>result=re.compile('"videolink\d*"\s*:\s*"([^"]+)","videokalite\d*"\s*:\s*"?(\d+)p?').findall(result)<line_sep>result=[(i[0].replace('\\/' '/') i[1])<for>i result]<try_stmt><block_start>url=[i<for>i result<if><not>'google'<in>i[0]]<line_sep>url=[('%s|User-Agent=%s&Referer=%s'%(i[0].decode('unicode_escape') urllib.quote_plus(client.agent()) urllib.quote_plus(sources_url)) i[1])<for>i url]<try_stmt><block_start>sources.append({'source':'Dizimag' 'quality':'1080p' 'provider':'Dizimag' 'url':[i[0]<for>i url<if>i[1]<eq>'1080'][0]})<block_end><except_stmt><block_start><pass><block_end><try_stmt><block_start>sources.append({'source':'Dizimag' 'quality':'HD' 'provider':'Dizimag' 'url':[i[0]<for>i url<if>i[1]<eq>'720'][0]})<block_end><except_stmt><block_start><pass><block_end><try_stmt><block_start>sources.append({'source':'Dizimag' 'quality':'SD' 'provider':'Dizimag' 'url':[i[0]<for>i url<if>i[1]<eq>'480'][0]})<block_end><except_stmt><block_start>sources.append({'source':'Dizimag' 'quality':'SD' 'provider':'Dizimag' 'url':[i[0]<for>i url<if>i[1]<eq>'360'][0]})<block_end><block_end><except_stmt><block_start><pass><block_end><try_stmt><block_start>url=[i<for>i result<if>'google'<in>i[0]]<try_stmt><block_start>sources.append({'source':'GVideo' 'quality':'1080p' 'provider':'Dizimag' 'url':[i[0]<for>i url<if>i[1]<eq>'1080'][0]})<block_end><except_stmt><block_start><pass><block_end><try_stmt><block_start>sources.append({'source':'GVideo' 'quality':'HD' 'provider':'Dizimag' 'url':[i[0]<for>i url<if>i[1]<eq>'720'][0]})<block_end><except_stmt><block_start><pass><block_end><try_stmt><block_start>sources.append({'source':'GVideo' 'quality':'SD' 'provider':'Dizimag' 'url':[i[0]<for>i url<if>i[1]<eq>'480'][0]})<block_end><except_stmt><block_start>sources.append({'source':'GVideo' 'quality':'SD' 'provider':'Dizimag' 'url':[i[0]<for>i url<if>i[1]<eq>'360'][0]})<block_end><block_end><except_stmt><block_start><pass><block_end><return>sources<block_end><except_stmt><block_start><return>sources<block_end><block_end><def_stmt>resolve self url<block_start><try_stmt><block_start><if_stmt><not>'google'<in>url<block_start><return>url<block_end><if_stmt>url.startswith('stack://')<block_start><return>url<block_end>url=client.request(url output='geturl')<if_stmt>'requiressl=yes'<in>url<block_start>url=url.replace('http://' 'https://')<block_end><else_stmt><block_start>url=url.replace('https://' 'http://')<block_end><return>url<block_end><except_stmt><block_start><return><block_end><block_end><block_end>
<import_stmt>tensorflow<as>tf<line_sep># import tensorflow_graphics as tfg <import_stmt>numpy<as>np<import_stmt>skimage.data<import_from_stmt>PIL Image ImageDraw ImageFont<import_stmt>math<import_from_stmt>tensorflow.python.platform gfile<import_stmt>scipy.misc<line_sep>IMAGE_HEIGHT=256<line_sep>IMAGE_WIDTH=256<line_sep># ***************************************************************************************************** <def_stmt>calc_loss output y z_r# y refine <block_start>y_masked=tf.where(z_r y 0<times>tf.ones_like(y))<line_sep>y_masked_flat_refined=tf.reshape(y_masked [-1 IMAGE_HEIGHT<times>IMAGE_WIDTH])<line_sep># output refine o_masked=tf.where(z_r output 0<times>tf.ones_like(y))<line_sep>o_masked_flat_refined=tf.reshape(o_masked [-1 IMAGE_HEIGHT<times>IMAGE_WIDTH])<line_sep># mask refine mask_one_refined=tf.where(z_r tf.ones_like(y) 0<times>tf.ones_like(y))<line_sep>mask_one_flat_refined=tf.reshape(mask_one_refined [-1 IMAGE_HEIGHT<times>IMAGE_WIDTH])<line_sep># num of pixels numOfPix=tf.reduce_sum(mask_one_flat_refined 1)<line_sep>d=tf.subtract(o_masked_flat_refined y_masked_flat_refined)<line_sep>d_sum=tf.reduce_sum(tf.square(d) 1)<line_sep>cost=tf.reduce_mean(tf.truediv(d_sum numOfPix))<line_sep><return>cost<block_end># ***************************************************************************************************** <def_stmt>calc_loss_normal output y_normal z_refined# gives mean angle error for given output tensor and its ref y <block_start>output_mask=tf.abs(output)<l>1e-5<line_sep>output_no0=tf.where(output_mask 1e-5<times>tf.ones_like(output) output)<line_sep>output_mag=tf.expand_dims(tf.sqrt(tf.reduce_sum(tf.square(output_no0) 3)) -1)<line_sep>output_unit=tf.divide(output_no0 output_mag)<line_sep>z_mask=z_refined[<ellipsis> 0]<line_sep>a11=tf.boolean_mask(tf.reduce_sum(tf.square(output_unit) 3) z_mask)<line_sep>a22=tf.boolean_mask(tf.reduce_sum(tf.square(y_normal) 3) z_mask)<line_sep>a12=tf.boolean_mask(tf.reduce_sum(tf.multiply(output_unit y_normal) 3) z_mask)<line_sep>cos_angle=a12/tf.sqrt(tf.multiply(a11 a22))<line_sep>cos_angle_clipped=tf.clip_by_value(tf.where(tf.is_nan(cos_angle) -1<times>tf.ones_like(cos_angle) cos_angle) -1 1)<line_sep># MAE, using tf.acos() is numerically unstable, here use Taylor expansion of "acos" instead loss=tf.reduce_mean(3.1415926/2-cos_angle_clipped-tf.pow(cos_angle_clipped 3)/6-tf.pow(cos_angle_clipped 5)<times>3/40-tf.pow(cos_angle_clipped 7)<times>5/112-tf.pow(cos_angle_clipped 9)<times>35/1152)<line_sep><return>loss<block_end><def_stmt>calc_loss_normal2 output y_normal z_refined# gives mean angle error for given output tensor and its ref y <block_start>output_mask=tf.abs(output)<l>1e-5<line_sep>output_no0=tf.where(output_mask 1e-5<times>tf.ones_like(output) output)<line_sep>output_mag=tf.expand_dims(tf.sqrt(tf.reduce_sum(tf.square(output_no0) 3)) -1)<line_sep>output_unit=tf.divide(output_no0 output_mag)<line_sep>z_mask=z_refined[<ellipsis> 0]<line_sep>a11=tf.boolean_mask(tf.reduce_sum(tf.square(output_unit) 3) z_mask)<line_sep>a22=tf.boolean_mask(tf.reduce_sum(tf.square(y_normal) 3) z_mask)<line_sep>a12=tf.boolean_mask(tf.reduce_sum(tf.multiply(output_unit y_normal) 3) z_mask)<line_sep>cos_angle=a12/(a11+0.00001)<line_sep>loss=tf.reduce_mean(tf.acos(cos_angle))<line_sep><return>loss<block_end># ***************************************************************************************************** <def_stmt>calc_loss_d_refined_mask output y z_refined<block_start>multiply=tf.constant([IMAGE_HEIGHT<times>IMAGE_WIDTH])<line_sep># mask nonrefine mask_one=tf.where(z_refined tf.ones_like(y) 0<times>tf.ones_like(y))<line_sep>mask_one_flat=tf.reshape(mask_one [-1 IMAGE_HEIGHT<times>IMAGE_WIDTH])<line_sep># y refine y_masked=tf.where(z_refined y 0<times>tf.ones_like(y))<line_sep>y_masked_flat_refined=tf.reshape(y_masked [-1 IMAGE_HEIGHT<times>IMAGE_WIDTH])<line_sep>max_y=tf.reduce_max(y_masked_flat_refined 1)<line_sep>matrix_max_y=tf.transpose(tf.reshape(tf.tile(max_y multiply) [multiply[0] tf.shape(max_y)[0]]))<line_sep># normalize depth output_flat=tf.reshape(output [-1 IMAGE_HEIGHT<times>IMAGE_WIDTH])<line_sep>output_flat_masked=tf.multiply(output_flat mask_one_flat)<line_sep>output_max=tf.reduce_max(output_flat_masked 1)<line_sep>matrix_max=tf.transpose(tf.reshape(tf.tile(output_max multiply) [multiply[0] tf.shape(output_max)[0]]))<line_sep>output_min=tf.reduce_min(output_flat_masked 1)<line_sep>matrix_min=tf.transpose(tf.reshape(tf.tile(output_min multiply) [multiply[0] tf.shape(output_min)[0]]))<line_sep>output_unit_flat=tf.truediv(tf.subtract(output_flat_masked matrix_min) tf.subtract(matrix_max matrix_min))<line_sep>output_unit_flat=tf.multiply(output_unit_flat matrix_max_y)<line_sep># mask refine mask_one_refined=tf.where(z_refined tf.ones_like(y) 0<times>tf.ones_like(y))<line_sep>mask_one_flat_refined=tf.reshape(mask_one_refined [-1 IMAGE_HEIGHT<times>IMAGE_WIDTH])<line_sep># output refine output_unit_masked_flat_refined=tf.multiply(output_unit_flat mask_one_flat_refined)<line_sep># y refine y_masked=tf.where(z_refined y 0<times>tf.ones_like(y))<line_sep>y_masked_flat_refined=tf.reshape(y_masked [-1 IMAGE_HEIGHT<times>IMAGE_WIDTH])<line_sep>numOfPix=tf.reduce_sum(mask_one_flat_refined 1)<line_sep>d=tf.subtract(output_unit_masked_flat_refined y_masked_flat_refined)<line_sep>a1=tf.reduce_sum(tf.square(d) 1)<line_sep>a2=tf.square(tf.reduce_sum(d 1))<line_sep>cost=tf.reduce_mean(tf.truediv(a1 numOfPix)-(0.5<times>tf.truediv(a2 tf.square(numOfPix))))<line_sep><return>cost<block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> print_function division absolute_import<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>time timeit<import_stmt>six<import_stmt>pandas<import_stmt>CoolProp<as>CP<import_from_stmt>math ceil<line_sep>CP.CoolProp.set_debug_level(00)<import_from_stmt>matplotlib.backends.backend_pdf PdfPages<line_sep># all_solvers = ['PT', 'DmolarT', 'HmolarP', 'PSmolar', 'SmolarT', 'DmolarP', 'DmolarHmolar', 'DmolarSmolar', 'HmolarSmolar', 'HmolarT'] # not_implemented_solvers = ['HmolarP', 'PSmolar', 'SmolarT', 'DmolarP', 'DmolarHmolar', 'DmolarSmolar', 'HmolarSmolar', 'HmolarT'] all_solvers=['PT' 'DmolarT']<line_sep>not_implemented_solvers=[]<line_sep>no_two_phase_solvers=['PT']<line_sep>implemented_solvers=[pair<for>pair all_solvers<if>pair<not><in>not_implemented_solvers]<line_sep>param_labels=dict(Hmolar='Enthalpy [J/mol]/1000' Smolar='Entropy [J/mol/K]/1000' Umolar='Int. Ener. [J/mol]/1000' T='Temperature [K]' Dmolar='Density [mol/m3]/1000' P='Pressure [Pa]/1000')<def_stmt>split_pair pair<block_start><for_stmt>key ['Dmolar' 'Hmolar' 'Smolar' 'P' 'T' 'Umolar']<block_start><if_stmt>pair.startswith(key)<block_start><return>key pair.replace(key '')<block_end><block_end><block_end><def_stmt>split_pair_xy pair<block_start><if_stmt>pair<eq>'HmolarP'<block_start><return>'Hmolar' 'P'<block_end><elif_stmt>pair<eq>'PSmolar'<block_start><return>'Smolar' 'P'<block_end><elif_stmt>pair<eq>'PUmolar'<block_start><return>'Umolar' 'P'<block_end><elif_stmt>pair<eq>'PT'<block_start><return>'T' 'P'<block_end><elif_stmt>pair<eq>'DmolarT'<block_start><return>'Dmolar' 'T'<block_end><elif_stmt>pair<eq>'SmolarT'<block_start><return>'Smolar' 'T'<block_end><elif_stmt>pair<eq>'TUmolar'<block_start><return>'Umolar' 'T'<block_end><elif_stmt>pair<eq>'HmolarT'<block_start><return>'Hmolar' 'T'<block_end><elif_stmt>pair<eq>'DmolarP'<block_start><return>'Dmolar' 'P'<block_end><elif_stmt>pair<eq>'DmolarHmolar'<block_start><return>'Dmolar' 'Hmolar'<block_end><elif_stmt>pair<eq>'DmolarSmolar'<block_start><return>'Dmolar' 'Smolar'<block_end><elif_stmt>pair<eq>'DmolarUmolar'<block_start><return>'Dmolar' 'Umolar'<block_end><elif_stmt>pair<eq>'HmolarSmolar'<block_start><return>'Smolar' 'Hmolar'<block_end><elif_stmt>pair<eq>'SmolarUmolar'<block_start><return>'Smolar' 'Umolar'<block_end><elif_stmt>pair<eq>'HmolarUmolar'<block_start><return>'Hmolar' 'Umolar'<block_end><else_stmt><block_start><raise>ValueError(pair)<block_end><block_end>DEBUG_LEVEL=1<def_stmt>myprint level *args **kwargs<block_start><if_stmt>level<g>DEBUG_LEVEL<block_start>print(*args **kwargs)<block_end><block_end><class_stmt>ConsistencyFigure(object)<block_start><def_stmt>__init__ self fluid figsize=(15 23) backend='PCSAFT' additional_skips=[] mole_fractions=<none> p_limits_1phase=<none> T_limits_1phase=<none> NT_1phase=40 Np_1phase=40 NT_2phase=20 NQ_2phase=20<block_start>self.fluid=fluid<line_sep>self.backend=backend<line_sep>self.additional_backend='HEOS'# the PCSAFT backend does not currently have all the constants and functions for calculating the boundaries of the phase diagram print('***********************************************************************************')<line_sep>print('*************** '+backend+'::'+fluid+' ************************')<line_sep>print('***********************************************************************************')<line_sep>self.fig,self.axes=plt.subplots(nrows=ceil(len(all_solvers)/2) ncols=2 figsize=figsize)<line_sep>self.pairs=all_solvers<line_sep>pairs_generator=iter(self.pairs)<line_sep>states=[CP.AbstractState(self.additional_backend fluid)<for>_ range(3)]<line_sep>states_pcsaft=[CP.AbstractState(backend fluid)<for>_ range(3)]<if_stmt>mole_fractions<is><not><none><block_start><for_stmt>state states<block_start>state.set_mole_fractions(mole_fractions)<block_end><for_stmt>state states_pcsaft<block_start>state.set_mole_fractions(mole_fractions)<block_end><block_end>self.axes_list=[]<if_stmt>len(self.axes.shape)<g>1<block_start><for_stmt>row self.axes<block_start><for_stmt>ax row<block_start>pair=six.next(pairs_generator)<line_sep>kwargs=dict(p_limits_1phase=p_limits_1phase T_limits_1phase=T_limits_1phase NT_1phase=NT_1phase Np_1phase=Np_1phase NT_2phase=NT_2phase NQ_2phase=NQ_2phase)<line_sep>self.axes_list.append(ConsistencyAxis(ax self pair self.fluid self.backend self.additional_backend *states *states_pcsaft **kwargs))<line_sep>ax.set_title(pair)<block_end><block_end><block_end><else_stmt><block_start><for_stmt>ax self.axes<block_start>pair=six.next(pairs_generator)<line_sep>kwargs=dict(p_limits_1phase=p_limits_1phase T_limits_1phase=T_limits_1phase NT_1phase=NT_1phase Np_1phase=Np_1phase NT_2phase=NT_2phase NQ_2phase=NQ_2phase)<line_sep>self.axes_list.append(ConsistencyAxis(ax self pair self.fluid self.backend self.additional_backend *states *states_pcsaft **kwargs))<line_sep>ax.set_title(pair)<block_end><block_end>self.calc_saturation_curves()<line_sep>self.plot_saturation_curves()<line_sep># self.calc_Tmax_curve() # self.plot_Tmax_curve() # # self.calc_melting_curve() # self.plot_melting_curve() self.tight_layout()<line_sep>self.fig.subplots_adjust(top=0.95)<line_sep>self.fig.suptitle('Consistency plots for '+self.fluid size=14)<line_sep>errors=[]<for_stmt>i,(ax pair) enumerate(zip(self.axes_list self.pairs))<block_start><if_stmt>pair<not><in>not_implemented_solvers<and>pair<not><in>additional_skips<block_start>errors.append(ax.consistency_check_singlephase())<if_stmt>pair<not><in>no_two_phase_solvers<block_start>ax.consistency_check_twophase()<block_end><block_end><else_stmt><block_start>ax.cross_out_axis()<block_end><block_end>self.errors=pandas.concat(errors sort=<true>)<block_end><def_stmt>calc_saturation_curves self<block_start>""" Calculate all the saturation curves in one shot using the state class to save computational time """<line_sep>HEOS=CP.AbstractState(self.additional_backend self.fluid)<line_sep>PCSAFT=CP.AbstractState(self.backend self.fluid)<line_sep>self.dictL,self.dictV={} {}<for_stmt>Q,dic zip([0 1] [self.dictL self.dictV])# rhomolar, smolar, hmolar, T, p, umolar = [], [], [], [], [], [] <block_start>rhomolar,T,p=[] [] []<for_stmt>_T np.logspace(np.log10(HEOS.keyed_output(CP.iT_triple)) np.log10(HEOS.keyed_output(CP.iT_critical)) 500)<block_start><try_stmt><block_start>PCSAFT.update(CP.QT_INPUTS Q _T)<line_sep># print('T', PCSAFT.T()) # print('p', PCSAFT.p()) # print('rhomolar', PCSAFT.rhomolar()) <if_stmt>(PCSAFT.p()<l>0)<block_start><raise>ValueError('P is negative:'+str(PCSAFT.p()))<block_end>PCSAFT.T() PCSAFT.p() PCSAFT.rhomolar()<line_sep># PCSAFT.hmolar(), PCSAFT.smolar(), PCSAFT.umolar() T.append(PCSAFT.T())<line_sep>p.append(PCSAFT.p())<line_sep>rhomolar.append(PCSAFT.rhomolar())<line_sep># hmolar.append(PCSAFT.hmolar()) # smolar.append(PCSAFT.smolar()) # umolar.append(PCSAFT.umolar()) <block_end><except_stmt>ValueError<as>VE<block_start>myprint(1 'satT error:' VE '; T:' '{T:0.16g}'.format(T=_T) 'T/Tc:' _T/HEOS.keyed_output(CP.iT_critical))<block_end><block_end>dic.update(dict(T=np.array(T) P=np.array(p) Dmolar=np.array(rhomolar)))<line_sep># Hmolar=np.array(hmolar), # Smolar=np.array(smolar))) # Umolar=np.array(umolar))) <block_end><block_end><def_stmt>plot_saturation_curves self<block_start><for_stmt>ax self.axes_list<block_start>ax.label_axes()<line_sep>ax.plot_saturation_curves()<block_end><block_end><def_stmt>calc_Tmax_curve self<block_start>HEOS=CP.AbstractState(self.additional_backend self.fluid)<line_sep>PCSAFT=CP.AbstractState(self.backend self.fluid)<line_sep># rhomolar, smolar, hmolar, T, p, umolar = [], [], [], [], [], [] rhomolar,T,p=[] [] []<for_stmt>_p np.logspace(np.log10(HEOS.keyed_output(CP.iP_min)<times>1.01) np.log10(HEOS.keyed_output(CP.iP_max)) 300)<block_start><try_stmt><block_start>PCSAFT.update(CP.PT_INPUTS _p HEOS.keyed_output(CP.iT_max))<block_end><except_stmt>ValueError<as>VE<block_start>print(1 'Tmax' _p VE)<line_sep>print('T' PCSAFT.T())<line_sep>print('p' PCSAFT.p())<line_sep>print('rhomolar' PCSAFT.rhomolar())<line_sep>myprint(1 'Tmax' _p VE)<line_sep><continue><block_end><try_stmt><block_start>T.append(PCSAFT.T())<line_sep>p.append(PCSAFT.p())<line_sep>rhomolar.append(PCSAFT.rhomolar())<line_sep># hmolar.append(PCSAFT.hmolar()) # smolar.append(PCSAFT.smolar()) # umolar.append(PCSAFT.umolar()) <block_end><except_stmt>ValueError<as>VE<block_start>myprint(1 'Tmax access' VE)<block_end><block_end>self.Tmax=dict(T=np.array(T) P=np.array(p) Dmolar=np.array(rhomolar))<line_sep># Hmolar=np.array(hmolar), # Smolar=np.array(smolar)) # Umolar=np.array(umolar)) <block_end><def_stmt>plot_Tmax_curve self<block_start><for_stmt>ax self.axes_list<block_start>ax.plot_Tmax_curve()<block_end><block_end><def_stmt>calc_melting_curve self<block_start>state=CP.AbstractState('HEOS' self.fluid)<line_sep># rhomolar, smolar, hmolar, T, p, umolar = [], [], [], [], [], [] rhomolar,T,p=[] [] []<line_sep># Melting line if it has it <if_stmt>state.has_melting_line()<block_start>pmelt_min=max(state.melting_line(CP.iP_min -1 -1) state.keyed_output(CP.iP_triple))<times>1.01<line_sep>pmelt_max=min(state.melting_line(CP.iP_max -1 -1) state.keyed_output(CP.iP_max))<times>0.99<for_stmt>_p np.logspace(np.log10(pmelt_min) np.log10(pmelt_max) 100)<block_start><try_stmt><block_start>Tm=state.melting_line(CP.iT CP.iP _p)<line_sep>state.update(CP.PT_INPUTS _p Tm)<line_sep>T.append(state.T())<line_sep>p.append(state.p())<line_sep>rhomolar.append(state.rhomolar())<line_sep># hmolar.append(state.hmolar()) # smolar.append(state.smolar()) # umolar.append(state.umolar()) <block_end><except_stmt>ValueError<as>VE<block_start>myprint(1 'melting' VE)<block_end><block_end><block_end>self.melt=dict(T=np.array(T) P=np.array(p) Dmolar=np.array(rhomolar))<line_sep># Hmolar=np.array(hmolar), # Smolar=np.array(smolar)) # Umolar=np.array(umolar)) <block_end><def_stmt>plot_melting_curve self<block_start><for_stmt>ax self.axes_list<block_start>ax.plot_melting_curve()<block_end><block_end><def_stmt>tight_layout self<block_start>self.fig.tight_layout()<block_end><def_stmt>add_to_pdf self pdf<block_start>""" Add this figure to the pdf instance """<line_sep>pdf.savefig(self.fig)<block_end><def_stmt>savefig self fname **kwargs<block_start>self.fig.savefig(fname **kwargs)<block_end><block_end><class_stmt>ConsistencyAxis(object)<block_start><def_stmt>__init__ self axis fig pair fluid backend additional_backend state1 state2 state3 state4 state5 state6 p_limits_1phase=<none> T_limits_1phase=<none> NT_1phase=40 Np_1phase=40 NT_2phase=20 NQ_2phase=20<block_start>self.ax=axis<line_sep>self.fig=fig<line_sep>self.pair=pair<line_sep>self.fluid=fluid<line_sep>self.backend=backend<line_sep>self.additional_backend=additional_backend<line_sep>self.state=state1<line_sep>self.state_PT=state2<line_sep>self.state_QT=state3<line_sep>self.state_pcsaft=state4<line_sep>self.state_pcsaft_PT=state5<line_sep>self.state_pcsaft_QT=state6<line_sep>self.p_limits_1phase=p_limits_1phase<line_sep>self.T_limits_1phase=T_limits_1phase<line_sep>self.NT_1phase=NT_1phase<line_sep>self.Np_1phase=Np_1phase<line_sep>self.NQ_2phase=NQ_2phase<line_sep>self.NT_2phase=NT_2phase<line_sep># self.saturation_curves() <block_end><def_stmt>label_axes self<block_start>""" Label the axes for the given pair """<line_sep>xparam,yparam=split_pair_xy(self.pair)<line_sep>self.ax.set_xlabel(param_labels[xparam])<line_sep>self.ax.set_ylabel(param_labels[yparam])<if_stmt>xparam<in>['P' 'Dmolar']<block_start>self.ax.set_xscale('log')<block_end><if_stmt>yparam<in>['P' 'Dmolar']<block_start>self.ax.set_yscale('log')<block_end><block_end><def_stmt>plot_saturation_curves self<block_start>xparam,yparam=split_pair_xy(self.pair)<line_sep>xL=self.to_axis_units(xparam self.fig.dictL[xparam])<line_sep>yL=self.to_axis_units(yparam self.fig.dictL[yparam])<line_sep>xV=self.to_axis_units(xparam self.fig.dictV[xparam])<line_sep>yV=self.to_axis_units(yparam self.fig.dictV[yparam])<line_sep>self.ax.plot(xL yL 'k' lw=1)<line_sep>self.ax.plot(xV yV 'k' lw=1)<block_end><def_stmt>plot_Tmax_curve self<block_start>xparam,yparam=split_pair_xy(self.pair)<line_sep>x=self.to_axis_units(xparam self.fig.Tmax[xparam])<line_sep>y=self.to_axis_units(yparam self.fig.Tmax[yparam])<line_sep>self.ax.plot(x y 'r' lw=1)<block_end># !!! start here: ValueError: x and y must have same first dimension, but have shapes (0,) and (65,) <def_stmt>plot_melting_curve self<block_start>xparam,yparam=split_pair_xy(self.pair)<line_sep>x=self.to_axis_units(xparam self.fig.melt[xparam])<line_sep>y=self.to_axis_units(yparam self.fig.melt[yparam])<line_sep>self.ax.plot(x y 'b' lw=1)<block_end><def_stmt>to_axis_units self label vals<block_start>""" Convert to the units used in the plot """<if_stmt>label<in>['Hmolar' 'Smolar' 'Umolar' 'Dmolar' 'P']<block_start><return>vals/1000<block_end><elif_stmt>label<in>['T']<block_start><return>vals<block_end><else_stmt><block_start><raise>ValueError(label)<block_end><block_end><def_stmt>consistency_check_singlephase self<block_start>tic=time.time()<line_sep># Update the state given the desired set of inputs param1,param2=split_pair(self.pair)<line_sep>key1=getattr(CP 'i'+param1)<line_sep>key2=getattr(CP 'i'+param2)<line_sep>pairkey=getattr(CP self.pair+'_INPUTS')<line_sep># Get the keys and indices and values for the inputs needed xparam,yparam=split_pair_xy(self.pair)<line_sep>xkey=getattr(CP 'i'+xparam)<line_sep>ykey=getattr(CP 'i'+yparam)<line_sep>data=[]<if_stmt>self.p_limits_1phase<is><not><none># User-specified limits were provided, use them <block_start>p_min,p_max=self.p_limits_1phase<block_end><else_stmt># No user-specified limits were provided, use the defaults <block_start>p_min=self.state.keyed_output(CP.iP_min)<times>1.01<line_sep>p_max=self.state.keyed_output(CP.iP_max)<block_end><for_stmt>p np.logspace(np.log10(p_min) np.log10(p_max) self.Np_1phase)<block_start><if_stmt>self.T_limits_1phase<is><none># No user-specified limits were provided, using the defaults <block_start>Tmin=self.state.keyed_output(CP.iT_triple)<if_stmt>self.state.has_melting_line()<block_start><try_stmt><block_start>pmelt_min=self.state.melting_line(CP.iP_min -1 -1)<if_stmt>p<l>pmelt_min<block_start>T0=Tmin<block_end><else_stmt><block_start>T0=self.state.melting_line(CP.iT CP.iP p)<block_end><block_end><except_stmt>Exception<as>E<block_start>T0=Tmin+1.1<line_sep>data.append(dict(err=str(E) type="melting" input=p))<line_sep>myprint(1 'MeltingLine:' E)<block_end><block_end><else_stmt><block_start>T0=Tmin+1.1<block_end>Tvec=np.linspace(T0 self.state.keyed_output(CP.iT_max) self.NT_1phase)<block_end><else_stmt># Use the provided limits for T <block_start>Tvec=np.linspace(self.T_limits_1phase[0] self.T_limits_1phase[1] self.NT_1phase)<block_end><for_stmt>T Tvec<block_start><try_stmt># Update the state using PT inputs in order to calculate all the remaining inputs <block_start>self.state_pcsaft_PT.update(CP.PT_INPUTS p T)<block_end><except_stmt>ValueError<as>VE<block_start>print(self.state_pcsaft_PT.get_mole_fractions())<line_sep>print(self.state_PT.get_mole_fractions())<line_sep>data.append(dict(err=str(VE) cls="EXCEPTION" type="update" in1="P" val1=p in2="T" val2=T))<line_sep>myprint(1 'consistency' VE)<line_sep><continue><block_end>_exception=<false><line_sep>tic2=timeit.default_timer()<try_stmt><block_start>val1,val2=self.state_pcsaft_PT.keyed_output(key1) self.state_pcsaft_PT.keyed_output(key2)<line_sep>self.state_pcsaft.update(pairkey val1 val2)<line_sep>toc2=timeit.default_timer()<block_end><except_stmt>ValueError<as>VE<block_start>data.append(dict(err=str(VE) cls="EXCEPTION" type="update" in1=param1 val1=val1 in2=param2 val2=val2))<line_sep>myprint(1 'update(1p)' self.pair 'P' p 'T' T 'D' self.state_pcsaft_PT.keyed_output(CP.iDmolar) '{0:18.16g}, {1:18.16g}'.format(self.state_pcsaft_PT.keyed_output(key1) self.state_pcsaft_PT.keyed_output(key2)) VE)<line_sep>_exception=<true><block_end>x=self.to_axis_units(xparam self.state_pcsaft_PT.keyed_output(xkey))<line_sep>y=self.to_axis_units(yparam self.state_pcsaft_PT.keyed_output(ykey))<if_stmt><not>_exception# Check the error on the density <block_start><if_stmt>abs(self.state_pcsaft_PT.rhomolar()/self.state_pcsaft.rhomolar()-1)<l>1e-3<and>abs(self.state_pcsaft_PT.p()/self.state_pcsaft.p()-1)<l>1e-3<and>abs(self.state_pcsaft_PT.T()-self.state_pcsaft.T())<l>1e-3<block_start>data.append(dict(cls="GOOD" x=x y=y elapsed=toc2-tic2))<if_stmt>'REFPROP'<not><in>self.backend<block_start><if_stmt>self.state_pcsaft_PT.phase()<ne>self.state_pcsaft.phase()<block_start>myprint(1 'bad phase' self.pair '{0:18.16g}, {1:18.16g}'.format(self.state_pcsaft_PT.keyed_output(key1) self.state_pcsaft_PT.keyed_output(key2)) self.state_pcsaft.phase() 'instead of' self.state_pcsaft_PT.phase())<block_end><block_end><block_end><else_stmt><block_start>data.append(dict(cls="INCONSISTENT" type="update" in1=param1 val1=val1 in2=param2 val2=val2 x=x y=y))<line_sep>myprint(1 'bad' self.pair '{0:18.16g}, {1:18.16g}'.format(self.state_pcsaft_PT.keyed_output(key1) self.state_pcsaft_PT.keyed_output(key2)) 'T:' self.state_pcsaft_PT.T() 'Drho:' abs(self.state_pcsaft_PT.rhomolar()/self.state_pcsaft.rhomolar()-1) abs(self.state_pcsaft_PT.p()/self.state_pcsaft.p()-1) 'DT:' abs(self.state_pcsaft_PT.T()-self.state_pcsaft.T()))<block_end><block_end><block_end><block_end>toc=time.time()<line_sep>df=pandas.DataFrame(data)<line_sep>bad=df[df.cls<eq>'INCONSISTENT']<line_sep>good=df[df.cls<eq>'GOOD']<line_sep>slowgood=good[good.elapsed<g>0.01]<line_sep>excep=df[df.cls<eq>'EXCEPTION']<line_sep>badphase=df[df.cls<eq>'BAD_PHASE']<line_sep>self.ax.plot(bad.x bad.y 'r+' ms=3)<line_sep>self.ax.plot(good.x good.y 'k.' ms=1)<line_sep>self.ax.plot(excep.x excep.y 'rx' ms=3)<line_sep>self.ax.plot(slowgood.x slowgood.y 'b*' ms=6)<line_sep>self.ax.plot(badphase.x badphase.y 'o' ms=3 mfc='none')<line_sep>print('1-phase took '+str(toc-tic)+' s for '+self.pair)<if_stmt>self.pair<eq>'HmolarSmolar'# plt.plot(good.elapsed) # plt.title(self.pair) # plt.show() <block_start>good.to_csv('times_water' sep=';')<line_sep># good.to_excel('times_water.xlsx') # !!! uncomment <block_end><return>df[df.cls<ne>'GOOD']<block_end><def_stmt>consistency_check_twophase self<block_start>tic=time.time()<line_sep>state=self.state<try_stmt><block_start><if_stmt>state_pcsaft.fluid_param_string('pure')<eq>'false'<block_start>print("Not a pure-fluid, skipping two-phase evaluation")<line_sep><return><block_end><block_end><except_stmt><block_start><pass><block_end># Update the state given the desired set of inputs param1,param2=split_pair(self.pair)<line_sep>key1=getattr(CP 'i'+param1)<line_sep>key2=getattr(CP 'i'+param2)<line_sep>pairkey=getattr(CP self.pair+'_INPUTS')<line_sep># Get the keys and indices and values for the inputs needed xparam,yparam=split_pair_xy(self.pair)<line_sep>xkey=getattr(CP 'i'+xparam)<line_sep>ykey=getattr(CP 'i'+yparam)<line_sep>data=[]<for_stmt>q np.linspace(0 1 self.NQ_2phase)<block_start>Tmin=state.keyed_output(CP.iT_triple)+1<for_stmt>T np.linspace(Tmin state.keyed_output(CP.iT_critical)-1 self.NT_2phase)<block_start><try_stmt># Update the state using QT inputs in order to calculate all the remaining inputs <block_start>self.state_pcsaft_QT.update(CP.QT_INPUTS q T)<block_end><except_stmt>ValueError<as>VE<block_start>data.append(dict(err=str(VE) cls="EXCEPTION" type="update" in1="Q" val1=q in2="T" val2=T))<line_sep>myprint(1 'consistency' VE)<line_sep><continue><block_end>_exception=<false><try_stmt><block_start>val1,val2=self.state_pcsaft_QT.keyed_output(key1) self.state_pcsaft_QT.keyed_output(key2)<line_sep>self.state_pcsaft.update(pairkey val1 val2)<block_end><except_stmt>ValueError<as>VE<block_start>data.append(dict(err=str(VE) cls="EXCEPTION" type="update" in1=param1 val1=val1 in2=param2 val2=val2))<line_sep>myprint(1 'update_QT' T q)<line_sep>myprint(1 'update' param1 self.state_pcsaft_QT.keyed_output(key1) param2 self.state_pcsaft_QT.keyed_output(key2) VE)<line_sep>_exception=<true><block_end>x=self.to_axis_units(xparam self.state_pcsaft_QT.keyed_output(xkey))<line_sep>y=self.to_axis_units(yparam self.state_pcsaft_QT.keyed_output(ykey))<if_stmt><not>_exception# Check the error on the density <block_start><if_stmt>abs(self.state_pcsaft_QT.rhomolar()/self.state_pcsaft.rhomolar()-1)<l>1e-3<and>abs(self.state_pcsaft_QT.p()/self.state_pcsaft.p()-1)<l>1e-3<and>abs(self.state_pcsaft_QT.T()-self.state_pcsaft.T())<l>1e-3<block_start>data.append(dict(cls="GOOD" x=x y=y))<if_stmt>'REFPROP'<not><in>self.backend<block_start><if_stmt>self.state_pcsaft_QT.phase()<ne>self.state_pcsaft.phase()<block_start>myprint(1 'bad phase (2phase)' self.pair '{0:18.16g}, {1:18.16g}'.format(self.state_pcsaft_QT.keyed_output(key1) self.state_pcsaft_QT.keyed_output(key2)) self.state_pcsaft.phase() 'instead of' self.state_pcsaft_QT.phase())<block_end><block_end><block_end><else_stmt><block_start>myprint(1 'Q' q)<line_sep>myprint(1 'bad(2phase)' self.pair '{0:18.16g}, {1:18.16g}'.format(self.state_pcsaft_QT.keyed_output(key1) self.state_pcsaft_QT.keyed_output(key2)) 'pnew:' self.state_pcsaft.p() 'pold:' self.state_pcsaft_QT.p() 'Tnew:' self.state_pcsaft.T() 'T:' self.state_pcsaft_QT.T() 'Drho:' abs(self.state_pcsaft_QT.rhomolar()/self.state_pcsaft.rhomolar()-1) 'DP' abs(self.state_pcsaft_QT.p()/self.state_pcsaft.p()-1) 'DT:' abs(self.state_pcsaft_QT.T()-self.state_pcsaft.T()))<line_sep>data.append(dict(cls="INCONSISTENT" type="update" in1=param1 val1=val1 in2=param2 val2=val2 x=x y=y))<block_end><block_end><block_end><block_end>toc=time.time()<line_sep>df=pandas.DataFrame(data)<line_sep>bad=df[df.cls<eq>'INCONSISTENT']<line_sep>good=df[df.cls<eq>'GOOD']<line_sep>excep=df[df.cls<eq>'EXCEPTION']<line_sep>badphase=df[df.cls<eq>'BAD_PHASE']<line_sep>self.ax.plot(bad.x bad.y 'r+' ms=3)<line_sep>self.ax.plot(good.x good.y 'k.' ms=1)<line_sep>self.ax.plot(excep.x excep.y 'rx' ms=3)<line_sep>self.ax.plot(badphase.x badphase.y 'o' ms=3 mfc='none')<line_sep>print('2-phase took '+str(toc-tic)+' s for '+self.pair)<block_end><def_stmt>cross_out_axis self<block_start>xlims=self.ax.get_xlim()<line_sep>ylims=self.ax.get_ylim()<line_sep>self.ax.plot([xlims[0] xlims[1]] [ylims[0] ylims[1]] lw=3 c='r')<line_sep>self.ax.plot([xlims[0] xlims[1]] [ylims[1] ylims[0]] lw=3 c='r')<line_sep>xparam,yparam=split_pair_xy(self.pair)<line_sep>x=0.5<times>xlims[0]+0.5<times>xlims[1]<line_sep>y=0.5<times>ylims[0]+0.5<times>ylims[1]<if_stmt>xparam<in>['P' 'Dmolar']<block_start>x=(xlims[0]<times>xlims[1])<power>0.5<block_end><if_stmt>yparam<in>['P' 'Dmolar']<block_start>y=(ylims[0]<times>ylims[1])<power>0.5<block_end>self.ax.text(x y 'Not\nImplemented' ha='center' va='center' bbox=dict(fc='white'))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>PVT=PdfPages('Consistency.pdf')<line_sep>CP.CoolProp.set_debug_level(0)<line_sep>open('timelog.txt' 'w')<with_stmt>open('timelog.txt' 'a+' buffering=1)<as>fp<block_start><for_stmt>fluid ['METHANOL']# CP.__fluids__: <block_start>tic=timeit.default_timer()<line_sep>skips=['DmolarHmolar' 'DmolarSmolar' 'DmolarUmolar' 'HmolarSmolar']<line_sep>skips=[]<line_sep>ff=ConsistencyFigure(fluid backend='PCSAFT' additional_skips=skips)# , NT_1phase = 10, Np_1phase = 10, NT_2phase = 100, NQ_2phase = 0) ff.to_csv('Errors'+fluid sep=';')<line_sep># ff.errors.to_excel('Errors' + fluid + '.xlsx') # !!! uncomment toc=timeit.default_timer()<line_sep>print('Time to build:' toc-tic 'seconds')<line_sep>ff.add_to_pdf(PVT)<line_sep>ff.savefig(fluid+'.png')<line_sep>ff.savefig(fluid+'.pdf')<line_sep>plt.close()<line_sep>fp.write('Time to build: {0} seconds for {1}\n'.format(toc-tic fluid))<del_stmt>ff<block_end><block_end>PVT.close()<block_end>
<import_from_stmt>.deepfashion_dataset DeepFashionDataset<line_sep>__all__=['DeepFashionDataset']<line_sep>
# Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>armi context<import_from_stmt>armi.cli.entryPoint EntryPoint<class_stmt>CleanTemps(EntryPoint)<block_start>""" Delete all temp directories created by any ARMI run. Useful for occasionally cleaning temporary dirs from crashed runs. .. warning:: This will break any ongoing runs. """<line_sep>name="clean-temps"<def_stmt>invoke self<block_start>context.cleanTempDirs(olderThanDays=0)<block_end><block_end>
<import_from_stmt>model_zoo.logger get_logger<import_from_stmt>model_zoo.utils load_config load_model find_model_class<import_from_stmt>absl flags<line_sep># ========== Checkpoint ================ flags.DEFINE_string('checkpoint_dir' 'checkpoints' help='Data source dir' allow_override=<true>)<line_sep>flags.DEFINE_string('checkpoint_name' 'model.ckpt' help='Model name' allow_override=<true>)<line_sep># ========== Log System ================ flags.DEFINE_bool('log_enable' <true> help='Whether to enable Log System' allow_override=<true>)<line_sep>flags.DEFINE_string('log_level' 'DEBUG' help='Log Level' allow_override=<true>)<line_sep>flags.DEFINE_string('log_rotation' '100MB' help='Log file rotation' allow_override=<true>)<line_sep>flags.DEFINE_string('log_retention' <none> help='Log file retention' allow_override=<true>)<line_sep>flags.DEFINE_string('log_format' '{time} - {level} - {module} - {file} - {message}' help='Log record format' allow_override=<true>)<line_sep>flags.DEFINE_string('log_folder' './logs/' help='Folder of log file' allow_override=<true>)<line_sep>flags.DEFINE_string('log_file' 'evaluate.log' help='Name of log file' allow_override=<true>)<line_sep>flags.DEFINE_string('log_path' '' help='File path of log file' allow_override=<true>)<class_stmt>BaseEvaluater(object)<block_start>""" Base Evaluater, you need to specify """<def_stmt>__init__ self<block_start>""" you need to define model_class in your Inferer """<line_sep>self.config=flags.FLAGS.flag_values_dict()<line_sep># get logger logger=get_logger(self.config)<line_sep>self.logger=logger<block_end><def_stmt>data self<block_start>""" you need to implement this method :return: """<line_sep><raise>NotImplementedError<block_end><def_stmt>run self **kwargs<block_start>""" start inferring :return: """<line_sep># prepare data self.eval_data=self.data()<line_sep># split data x_eval,y_eval=self.eval_data<line_sep># init configs from checkpoints json file and flags config=load_config(self.config)<line_sep># init model class model_class_name,model_file_name=config.get('model_class_name') config.get('model_file_name')<line_sep>self.model_class=find_model_class(model_class_name model_file_name)<line_sep># init model model=self.model_class(config=config)<line_sep>model.logger=self.logger<line_sep>self.logger.info(f'initialize model logger {model.logger} of {model}')<line_sep># restore model load_model(model self.config.get('checkpoint_dir') self.config.get('checkpoint_name'))<line_sep># evaluate <return>model.evaluate(x_eval y_eval **kwargs)<block_end><block_end>
# pass in a sentence, pass out it's features <import_stmt>nltk<import_stmt>pandas<as>pd<import_stmt>sys<import_stmt>hashlib<import_stmt>re<import_stmt>string<import_stmt>itertools<import_from_stmt>nltk word_tokenize<import_from_stmt>nltk.corpus stopwords<import_stmt>logging<import_stmt>logger_config<line_sep>log=logging.getLogger(__name__)<line_sep>log.info("Entered module: %s"%__name__)<line_sep>lemma=nltk.wordnet.WordNetLemmatizer()<line_sep>sno=nltk.stem.SnowballStemmer("english")<line_sep>line=["xxx" "Oracle 12.2 will be released for on-premises users on 15 March 2017" 0 "S" ]<line_sep>pos=[]# list of PartsOfSpeech output=""# comma separated string header=""# string for describing features header VerbCombos=["VB" "VBD" "VBG" "VBN" "VBP" "VBZ" "WDT" "WP" "WP$" "WRB" "MD"]<line_sep>questionTriples=["CD-VB-VBN" "MD-PRP-VB" "MD-VB-CD" "NN-IN-DT" "PRP-VB-PRP" "PRP-WP-NNP" "VB-CD-VB" "VB-PRP-WP" "VBZ-DT-NN" "WP-VBZ-DT" "WP-VBZ-NNP" "WRB-MD-VB" ]<line_sep>statementTriples=["DT-JJ-NN" "DT-NN-VBZ" "DT-NNP-NNP" "IN-DT-NN" "IN-NN-NNS" "MD-VB-VBN" "NNP-IN-NNP" "NNP-NNP-NNP" "NNP-VBZ-DT" "NNP-VBZ-NNP" "NNS-IN-DT" "VB-VBN-IN" "VBZ-DT-JJ" ]<line_sep>startTuples=["NNS-DT" "WP-VBZ" "WRB-MD"]<line_sep>endTuples=["IN-NN" "VB-VBN" "VBZ-NNP"]<line_sep>"""Because python dict's return key-vals in random order, provide ordered list to pass to ML models"""<line_sep>feature_keys=["id" "wordCount" "stemmedCount" "stemmedEndNN" "CD" "NN" "NNP" "NNPS" "NNS" "PRP" "VBG" "VBZ" "startTuple0" "endTuple0" "endTuple1" "endTuple2" "verbBeforeNoun" "qMark" "qVerbCombo" "qTripleScore" "sTripleScore" "class" ]<line_sep>@logger_config.logger<def_stmt>strip_sentence sentence<block_start>sentence=sentence.strip(",")<line_sep>sentence="".join(filter(<lambda>x:x<in>string.printable sentence))<line_sep># strip out non-alpha-numerix sentence=sentence.translate(str.maketrans("" "" string.punctuation))<line_sep># strip punctuation <return>sentence<block_end>@logger_config.logger<def_stmt>exists_pair_combos comboCheckList sentence<block_start>pos=get_pos(sentence)<line_sep>tag_string="-".join([i[1]<for>i pos])<line_sep>combo_list=[]<for_stmt>pair itertools.permutations(comboCheckList 2)<block_start><if_stmt>pair[0]<eq>"MD"# Kludge - strip off leading MD <block_start>pair=["" ""]<block_end>combo_list.append("-".join(pair))<block_end><if_stmt>any(code<in>tag_string<for>code combo_list)<block_start><return>1<block_end><else_stmt><block_start><return>0<block_end><block_end>@logger_config.logger# Parts Of Speech <def_stmt>get_pos sentence<block_start>sentenceParsed=word_tokenize(sentence)<line_sep><return>nltk.pos_tag(sentenceParsed)<block_end>@logger_config.logger# Count Q-Marks <def_stmt>count_qmark sentence<block_start><return>sentence.count("?")<block_end>@logger_config.logger# Count a specific POS-Type # VBG = count_POSType(pos,'VBG') <def_stmt>count_POSType pos ptype<block_start>tags=[i[1]<for>i pos]<line_sep><return>tags.count(ptype)<line_sep># if ptype in tags: # VBG = 1 # return(VBG) <block_end>@logger_config.logger# Does Verb occur before first Noun <def_stmt>exists_vb_before_nn pos<block_start>pos_tags=[i[1]<for>i pos]<line_sep># Strip the Verbs to all just "V" pos_tags=[re.sub(r"V.*" "V" str)<for>str pos_tags]<line_sep># Strip the Nouns to all just "NN" pos_tags=[re.sub(r"NN.*" "NN" str)<for>str pos_tags]<line_sep>vi=99<line_sep>ni=99<line_sep>mi=99<line_sep># Get first NN index <if_stmt>"NN"<in>pos_tags<block_start>ni=pos_tags.index("NN")<block_end># Get first V index <if_stmt>"V"<in>pos_tags<block_start>vi=pos_tags.index("V")<block_end># get Modal Index <if_stmt>"MD"<in>pos_tags<block_start>mi=pos_tags.index("MD")<block_end><if_stmt>vi<l>ni<or>mi<l>ni<block_start><return>1<block_end><else_stmt><block_start><return>0<block_end><block_end>@logger_config.logger# Stemmed sentence ends in "NN-NN"? <def_stmt>exists_stemmed_end_NN stemmed<block_start>stemmedEndNN=0<line_sep>stemmed_end=get_first_last_tuples(" ".join(stemmed))[1]<if_stmt>stemmed_end<eq>"NN-NN"<block_start>stemmedEndNN=1<block_end><return>stemmedEndNN<block_end>@logger_config.logger# Go through the predefined list of start-tuples, 1 / 0 if given startTuple occurs in the list <def_stmt>exists_startTuple startTuple<block_start>exists_startTuples=[]<for_stmt>tstring startTuples# startTuples defined as global var <block_start><if_stmt>startTuple<in>tstring<block_start>exists_startTuples.append(1)<block_end><else_stmt><block_start>exists_startTuples.append(0)<block_end><return>exists_startTuples<block_end><block_end>@logger_config.logger# Go through the predefined list of end-tuples, 1 / 0 if given Tuple occurs in the list <def_stmt>exists_endTuple endTuple<block_start>exists_endTuples=[]<for_stmt>tstring endTuples# endTuples defined as global var <block_start><if_stmt>endTuple<in>tstring<block_start>exists_endTuples.append(1)<block_end><else_stmt><block_start>exists_endTuples.append(0)<block_end><block_end><return>exists_endTuples<block_end>@logger_config.logger# loop round list of triples and construct a list of binary 1/0 vals if triples occur in list <def_stmt>exists_triples triples tripleSet<block_start>exists=[]<for_stmt>tstring tripleSet<block_start><if_stmt>tstring<in>triples<block_start>exists.append(1)<block_end><else_stmt><block_start>exists.append(0)<block_end><block_end><return>exists<block_end>@logger_config.logger# Get a sentence and spit out the POS triples <def_stmt>get_triples pos<block_start>list_of_triple_strings=[]<line_sep>pos=[i[1]<for>i pos]# extract the 2nd element of the POS tuples in list n=len(pos)<if_stmt>n<g>2# need to have three items <block_start><for_stmt>i range(0 n-2)<block_start>t="-".join(pos[i:i+3]# noqa: E203 )<line_sep># pull out 3 list item from counter, convert to string list_of_triple_strings.append(t)<block_end><block_end><return>list_of_triple_strings<block_end>@logger_config.logger<def_stmt>get_first_last_tuples sentence<block_start>first_last_tuples=[]<line_sep>sentenceParsed=word_tokenize(sentence)<line_sep>pos=nltk.pos_tag(sentenceParsed)# Parts Of Speech pos=[i[1]<for>i pos]# extract the 2nd element of the POS tuples in list n=len(pos)<line_sep>first=""<line_sep>last=""<if_stmt>n<g>1# need to have three items <block_start>first="-".join(pos[0:2])# pull out first 2 list items last="-".join(pos[-2:])<block_end># pull out last 2 list items first_last_tuples=[first last]<line_sep><return>first_last_tuples<block_end>@logger_config.logger<def_stmt>lemmatize sentence<block_start>""" pass in a sentence as a string, return just core text that has been "lematised" stop words are removed - could effect ability to detect if this is a question or answer - depends on import lemma = nltk.wordnet.WordNetLemmatizer() and from nltk.corpus import stopwords """<line_sep>stop_words=set(stopwords.words("english"))<line_sep>word_tokens=word_tokenize(sentence)<line_sep>filtered_sentence=[]<for_stmt>w word_tokens<block_start><if_stmt>w<not><in>stop_words<block_start>filtered_sentence.append(w.lower())# also set lowercase <block_end><block_end>lem=[]<for_stmt>w filtered_sentence<block_start>lem.append(lemma.lemmatize(w))<block_end><return>lem<block_end>@logger_config.logger<def_stmt>stematize sentence<block_start>""" pass in a sentence as a string, return just core text stemmed stop words are removed - could effect ability to detect if this is a question or answer - depends on import sno = nltk.stem.SnowballStemmer('english') and from nltk.corpus import stopwords """<line_sep>stop_words=set(stopwords.words("english"))<line_sep>word_tokens=word_tokenize(sentence)<line_sep>filtered_sentence=[]<for_stmt>w word_tokens<block_start><if_stmt>w<not><in>stop_words<block_start>filtered_sentence.append(w)<block_end><block_end>stemmed=[]<for_stmt>w filtered_sentence<block_start>stemmed.append(sno.stem(w))<block_end><return>stemmed<block_end>######################################################################### # A wrapper function to put it all together - build a csv line to return # A header string is also returned for optional use <def_stmt>get_string id sentence c="X"<block_start>header,output="" ""<line_sep>pos=get_pos(sentence)<line_sep>qMark=count_qmark(sentence)# count Qmarks before stripping punctuation sentence=strip_sentence(sentence)<line_sep># lemmed = lemmatize(sentence) stemmed=stematize(sentence)<line_sep>wordCount=len(sentence.split())<line_sep>stemmedCount=len(stemmed)<line_sep>qVerbCombo=exists_pair_combos(VerbCombos sentence)<line_sep>verbBeforeNoun=exists_vb_before_nn(pos)<line_sep>output=(id+","+str(wordCount)+","+str(stemmedCount)+","+str(qVerbCombo)+","+str(qMark)+","+str(verbBeforeNoun))<line_sep>header=header+"id,wordCount,stemmedCount,qVerbCombo,qMark,verbBeforeNoun"<line_sep># list of POS-TYPES to count , generate a list of counts in the CSV line <for_stmt>ptype ["VBG" "VBZ" "NNP" "NN" "NNS" "NNPS" "PRP" "CD"]<block_start>output=output+","+str(count_POSType(pos ptype))<line_sep>header=header+","+ptype<block_end>output=output+","+str(exists_stemmed_end_NN(stemmed))<line_sep>header=header+",StemmedEndNN,"<line_sep># get Start Tuples and End Tuples Features ## startTuple,endTuple=get_first_last_tuples(sentence)<line_sep>list1=exists_startTuple(startTuple)# list [1/0] for exists / not exists output=output+","+",".join(str(i)<for>i list1)<for_stmt>i range(0 len(list1))<block_start>header=header+"startTuple"+str(i+1)+","<block_end>list1=exists_endTuple(endTuple)# list [1/0] for exists / not exists output=output+","+",".join(str(i)<for>i list1)<for_stmt>i range(0 len(list1))<block_start>header=header+"endTuple"+str(i+1)+","<block_end># look for special Triple Combinations ## triples=get_triples(pos)# all the triple sequences in the sentence POS list list1=exists_triples(triples questionTriples)<line_sep>total=sum(list1)<line_sep>output=output+","+str(total)<line_sep>header=header+"qTripleScore"+","<line_sep>list1=exists_triples(triples statementTriples)<line_sep>total=sum(list1)<line_sep>output=output+","+str(total)<line_sep>header=header+"sTripleScore"+","<line_sep>output=output+","+c# Class Type on end header=header+"class"<line_sep><return>output header<block_end># End of Get String wrapper @logger_config.logger# Build a dictionary of features <def_stmt>features_dict id sentence c="X"<block_start>features={}<line_sep>pos=get_pos(sentence)<line_sep>features["id"]=id<line_sep>features["qMark"]=count_qmark(sentence)<line_sep># count Qmarks before stripping punctuation sentence=strip_sentence(sentence)<line_sep>stemmed=stematize(sentence)<line_sep>startTuple,endTuple=get_first_last_tuples(sentence)<line_sep>features["wordCount"]=len(sentence.split())<line_sep>features["stemmedCount"]=len(stemmed)<line_sep>features["qVerbCombo"]=exists_pair_combos(VerbCombos sentence)<line_sep>features["verbBeforeNoun"]=exists_vb_before_nn(pos)<for_stmt>ptype ["VBG" "VBZ" "NNP" "NN" "NNS" "NNPS" "PRP" "CD"]<block_start>features[ptype]=count_POSType(pos ptype)<block_end>features["stemmedEndNN"]=exists_stemmed_end_NN(stemmed)<line_sep>list1=exists_startTuple(startTuple)# list [1/0] for exists / not exists <for_stmt>i range(0 len(list1))<block_start>features["startTuple"+str(i)]=list1[i]<block_end>list1=exists_endTuple(endTuple)# list [1/0] for exists / not exists <for_stmt>i range(0 len(list1))<block_start>features["endTuple"+str(i)]=list1[i]<block_end># look for special Triple Combinations ## triples=get_triples(pos)# all the triple sequences in the sentence POS list list1=exists_triples(triples questionTriples)<line_sep># a list of 1/0 for hits on this triple-set features["qTripleScore"]=sum(list1)<line_sep># add all the triple matches up to get a score list1=exists_triples(triples statementTriples)<line_sep># Do same check for the Statement t-set features["sTripleScore"]=sum(list1)<line_sep># add all the triple matches up to get a score features["class"]=c# Class Type on end <return>features<block_end>@logger_config.logger# pass in dict, get back series <def_stmt>features_series features_dict<block_start>values=[]<for_stmt>key feature_keys<block_start>values.append(features_dict[key])<block_end>features_series=pd.Series(values)<line_sep><return>features_series<block_end># MAIN ## <if_stmt>__name__<eq>"__main__"<block_start>"""ID, WordCount, StemmedCount, Qmark, VBG, StemmedEnd, StartTuples, EndTuples, QuestionTriples, StatementTriples, Class [1/0] [NN-NN?] [3 x binary] [3 x binary] [10 x binary] [10 x binary]"""<line_sep>logging.debug("Starting...")<line_sep>c="X"# Dummy class header=""<line_sep>output=""<if_stmt>len(sys.argv)<g>1<block_start>sentence=sys.argv[1]<block_end><else_stmt><block_start>sentence=line[1]<block_end>id=hashlib.md5(str(sentence).encode("utf-8")).hexdigest()[:16]<line_sep>features=features_dict(id sentence c)<line_sep>pos=get_pos(sentence)# NLTK Parts Of Speech, duplicated just for the printout logging.debug(pos)<line_sep>logging.debug(features)<for_stmt>key,value features.items()<block_start>logging.debug(key value)<block_end># header string <for_stmt>key,value features.items()<block_start>header=header+", "+key# keys come out in a random order output=output+", "+str(value)<block_end>header=header[1:]# strip the first ","" off output=output[1:]# strip the first ","" off logging.debug("HEADER:" header)<line_sep>logging.debug("VALUES:" output)<block_end>
<import_stmt>os<import_stmt>os.path<import_stmt>cv2<import_from_stmt>Katna.video Video<import_stmt>multiprocessing<import_stmt>Katna.config<as>app_config<line_sep># change these paths # usually autoflip build is located here : /mediapipe/repo/bazel-build/mediapipe/examples/desktop/autoflip # usually mediapipe model is located here : /mediapipe/repo/mediapipe/models autoflip_build_path="/path/to/autoflip/build"<line_sep>autoflip_model_path="/path/to/mediapipe/models"<line_sep># output aspect ratio aspect_ratio="9:16"<line_sep># get the current configuration conf=app_config.MediaPipe.AutoFlip.get_conf()<line_sep># set True for features which are required in output conf["ENFORCE_FEATURES"]={"FACE_CORE_LANDMARKS":<false> "FACE_ALL_LANDMARKS":<false> "FACE_FULL":<false> "HUMAN":<false> "PET":<false> "CAR":<false> "OBJECT":<false>}<line_sep># % stabalization threshold conf["STABALIZATION_THRESHOLD"]=0.5<line_sep># opacity of blur area conf["BLUR_AREA_OPACITY"]=0.6<def_stmt>main_folder <block_start>dir_path=file_path=os.path.join("." "tests" "data")<line_sep># will create a resize_result dir inside data folder and dump videos there abs_dir_path_output=os.path.join("." "tests" "data" "resize_results")<line_sep>vd=Video(autoflip_build_path autoflip_model_path)<line_sep># update configuration app_config.MediaPipe.AutoFlip.set_conf(conf)<try_stmt><block_start>vd.resize_video_from_dir(dir_path=dir_path abs_dir_path_output=abs_dir_path_output aspect_ratio=aspect_ratio)<block_end><except_stmt>Exception<as>e<block_start><raise>e<block_end>print(f"output resized video dir path = {abs_dir_path_output}")<block_end><def_stmt>main_single_video # resize the pos_video.mp4 in same directory with na,e pos_video_resize.mp4 <block_start>abs_file_path_output=os.path.join("." "tests" "data" "pos_video_resize.mp4")<line_sep>file_path=os.path.join("." "tests" "data" "pos_video.mp4")<line_sep>vd=Video(autoflip_build_path autoflip_model_path)<line_sep># update configuration app_config.MediaPipe.AutoFlip.set_conf(conf)<try_stmt><block_start>vd.resize_video(file_path=file_path abs_file_path_output=abs_file_path_output aspect_ratio=aspect_ratio)<block_end><except_stmt>Exception<as>e<block_start><raise>e<block_end>print(f"output resized video file path = {abs_file_path_output}")<block_end><if_stmt>__name__<eq>"__main__"<block_start>main_single_video()<line_sep># uncomment this to run on a folder # main_folder() <block_end>
<import_stmt>json time<import_from_stmt>threading Thread<import_from_stmt>websocket create_connection WebSocketConnectionClosedException<def_stmt>main <block_start>ws=<none><line_sep>thread=<none><line_sep>thread_running=<false><line_sep>thread_keepalive=<none><def_stmt>websocket_thread <block_start><global>ws<line_sep>ws=create_connection("wss://ws-feed.pro.coinbase.com")<line_sep>ws.send(json.dumps({"type":"subscribe" "product_ids":['BTC-USD'] "channels":["matches"] }))<line_sep>thread_keepalive.start()<while_stmt><not>thread_running<block_start><try_stmt><block_start>data=ws.recv()<if_stmt>data<ne>""<block_start>msg=json.loads(data)<block_end><else_stmt><block_start>msg={}<block_end><block_end><except_stmt>ValueError<as>e<block_start>print(e)<line_sep>print("{} - data: {}".format(e data))<block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep>print("{} - data: {}".format(e data))<block_end><else_stmt><block_start><if_stmt>"result"<not><in>msg<block_start>print(msg)<block_end><block_end><block_end><try_stmt><block_start><if_stmt>ws<block_start>ws.close()<block_end><block_end><except_stmt>WebSocketConnectionClosedException<block_start><pass><block_end><finally_stmt><block_start>thread_keepalive.join()<block_end><block_end><def_stmt>websocket_keepalive interval=30<block_start><global>ws<while_stmt>ws.connected<block_start>ws.ping("keepalive")<line_sep>time.sleep(interval)<block_end><block_end>thread=Thread(target=websocket_thread)<line_sep>thread_keepalive=Thread(target=websocket_keepalive)<line_sep>thread.start()<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>os<import_stmt>sys<import_from_stmt>transformers ModelCard AutoTokenizer BertForTokenClassification BertForSequenceClassification TokenClassificationPipeline TextClassificationPipeline<if_stmt>len(sys.argv)<l>4<block_start>print('usage: "python save-model.py basename name type task" where "basename" is the original model name ("bert-base-dutch-cased"), "name" is the dir name in "output" and type is "token" or "seq"')<line_sep>exit(1)<block_end>base_name=sys.argv[1]<line_sep>name=sys.argv[2]<line_sep>typ=sys.argv[3]<if_stmt>typ<not><in>['token' 'seq']<block_start>print('type must be token or seq')<line_sep>exit(1)<block_end>src_path=os.path.join('output' name 'model')<if_stmt><not>os.path.exists(src_path)<block_start>print(src_path+' does not exist')<line_sep>exit(1)<block_end>name=base_name+'-finetuned-'+'-'.join(name.split('-')[:-1])<line_sep>print(name)<line_sep>dst_path=f'models/{name}'<line_sep>os.makedirs(dst_path exist_ok=<true>)<line_sep># Load model model=BertForTokenClassification.from_pretrained(src_path)<if>typ<eq>'token'<else>BertForSequenceClassification.from_pretrained(src_path)<line_sep>tokenizer=AutoTokenizer.from_pretrained(base_name)<line_sep>modelcard=ModelCard(model_details="""This model does not have a specific model card yet. You can possibly find more information about model comparison and labels at [the Github page](https://github.com/wietsedv/bertje).""")<line_sep># Save pipeline pipeline=TokenClassificationPipeline<if>typ<eq>'token'<else>TextClassificationPipeline<line_sep>pipe=pipeline(model tokenizer modelcard=modelcard)<line_sep>pipe.save_pretrained(dst_path)<line_sep>
'''OpenGL extension ARB.conditional_render_inverted This module customises the behaviour of the OpenGL.raw.GL.ARB.conditional_render_inverted to provide a more Python-friendly API Overview (from the spec) This extension adds new modes to BeginConditionalRender which invert the condition used to determine whether to draw or not. The official definition of this extension is available here: http://www.opengl.org/registry/specs/ARB/conditional_render_inverted.txt '''<import_from_stmt>OpenGL platform constant arrays<import_from_stmt>OpenGL extensions wrapper<import_stmt>ctypes<import_from_stmt>OpenGL.raw.GL _types _glgets<import_from_stmt>OpenGL.raw.GL.ARB.conditional_render_inverted *<import_from_stmt>OpenGL.raw.GL.ARB.conditional_render_inverted _EXTENSION_NAME<def_stmt>glInitConditionalRenderInvertedARB <block_start>'''Return boolean indicating whether this extension is available'''<import_from_stmt>OpenGL extensions<line_sep><return>extensions.hasGLExtension(_EXTENSION_NAME)<block_end>### END AUTOGENERATED SECTION
# pylint: skip-file # pylint: disable=too-many-instance-attributes <class_stmt>GcloudComputeZones(GcloudCLI)<block_start>''' Class to wrap the gcloud compute zones command'''<line_sep># pylint allows 5 # pylint: disable=too-many-arguments <def_stmt>__init__ self region=<none> verbose=<false><block_start>''' Constructor for gcloud resource '''<line_sep>super(GcloudComputeZones self).__init__()<line_sep>self._region=region<line_sep>self.verbose=verbose<block_end>@property<def_stmt>region self<block_start>'''property for region'''<line_sep><return>self._region<block_end><def_stmt>list_zones self<block_start>'''return a list of zones'''<line_sep>results=self._list_zones()<if_stmt>results['returncode']<eq>0<and>self.region<block_start>zones=[]<for_stmt>zone results['results']<block_start><if_stmt>self.region<eq>zone['region']<block_start>zones.append(zone)<block_end><block_end>results['results']=zones<block_end><return>results<block_end><block_end>
""" Top-N evaluation metrics. """<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<line_sep>_log=logging.getLogger(__name__)<def_stmt>bulk_impl metric<block_start><def_stmt>wrap impl<block_start>metric.bulk_score=impl<line_sep><return>impl<block_end><return>wrap<block_end><def_stmt>precision recs truth k=<none><block_start>""" Compute recommendation precision. This is computed as: .. math:: \\frac{|L \\cap I_u^{\\mathrm{test}}|}{|L|} In the uncommon case that ``k`` is specified and ``len(recs) < k``, this metric uses ``len(recs)`` as the denominator. """<if_stmt>k<is><not><none><block_start>recs=recs.iloc[:k]<block_end>nrecs=len(recs)<if_stmt>nrecs<eq>0<block_start><return><none><block_end>ngood=recs['item'].isin(truth.index).sum()<line_sep><return>ngood/nrecs<block_end>@bulk_impl(precision)<def_stmt>_bulk_precision recs truth k=<none><block_start><if_stmt>k<is><not><none><block_start>recs=recs[recs['rank']<le>k]<line_sep>lcounts=pd.Series(k index=recs['LKRecID'].unique())<line_sep>lcounts.index.name='LKRecID'<block_end><else_stmt><block_start>lcounts=recs.groupby(['LKRecID'])['item'].count()<block_end>good=recs.join(truth on=['LKTruthID' 'item'] how='inner')<line_sep>gcounts=good.groupby(['LKRecID'])['item'].count()<line_sep>lcounts,gcounts=lcounts.align(gcounts join='left' fill_value=0)<line_sep><return>gcounts/lcounts<block_end><def_stmt>recall recs truth k=<none><block_start>""" Compute recommendation recall. """<line_sep>nrel=len(truth)<if_stmt>nrel<eq>0<block_start><return><none><block_end><if_stmt>k<is><not><none><block_start>nrel=min(nrel k)<line_sep>recs=recs.iloc[:k]<block_end>ngood=recs['item'].isin(truth.index).sum()<line_sep><return>ngood/nrel<block_end>@bulk_impl(recall)<def_stmt>_bulk_recall recs truth k=<none><block_start>tcounts=truth.reset_index().groupby('LKTruthID')['item'].count()<if_stmt>k<is><not><none><block_start>_log.debug('truncating to k for recall')<line_sep>tcounts=np.minimum(tcounts k)<line_sep>recs=recs[recs['rank']<le>k]<block_end>good=recs.join(truth on=['LKTruthID' 'item'] how='inner')<line_sep>gcounts=good.groupby('LKRecID')['item'].count()<line_sep># we need all lists, because some might have no truth (oops), some no recs (also oops) lists=recs[['LKRecID' 'LKTruthID']].drop_duplicates()<line_sep>scores=lists.join(gcounts.to_frame('ngood') on='LKRecID' how='left')<line_sep>scores['ngood'].fillna(0 inplace=<true>)<line_sep>scores=scores.join(tcounts.to_frame('nrel') on='LKTruthID' how='left')<line_sep>scores=scores.set_index('LKRecID')<line_sep><return>scores['ngood']/scores['nrel']<block_end><def_stmt>recip_rank recs truth k=<none><block_start>""" Compute the reciprocal rank of the first relevant item in a list of recommendations. If no elements are relevant, the reciprocal rank is 0. This metric has a bulk equivalent. """<if_stmt>k<is><not><none><block_start>recs=recs.iloc[:k]<block_end>good=recs['item'].isin(truth.index)<line_sep>npz,=np.nonzero(good.to_numpy())<if_stmt>len(npz)<block_start><return>1.0/(npz[0]+1.0)<block_end><else_stmt><block_start><return>0.0<block_end><block_end>@bulk_impl(recip_rank)<def_stmt>_bulk_rr recs truth k=<none># find everything with truth <block_start><if_stmt>k<is><not><none><block_start>recs=recs[recs['rank']<le>k]<block_end>joined=recs.join(truth on=['LKTruthID' 'item'] how='inner')<line_sep># compute min ranks ranks=joined.groupby('LKRecID')['rank'].agg('min')<line_sep># reciprocal ranks scores=1.0/ranks<line_sep>_log.debug('have %d scores with MRR %.3f' len(scores) scores.mean())<line_sep># fill with zeros rec_ids=recs['LKRecID'].unique()<line_sep>scores=scores.reindex(rec_ids fill_value=0.0)<line_sep>_log.debug('filled to get %s scores w/ MRR %.3f' len(scores) scores.mean())<line_sep># and we're done <return>scores<block_end><def_stmt>_dcg scores discount=np.log2<block_start>""" Compute the Discounted Cumulative Gain of a series of recommended items with rating scores. These should be relevance scores; they can be :math:`{0,1}` for binary relevance data. This is not a true top-N metric, but is a utility function for other metrics. Args: scores(array-like): The utility scores of a list of recommendations, in recommendation order. discount(ufunc): the rank discount function. Each item's score will be divided the discount of its rank, if the discount is greater than 1. Returns: double: the DCG of the scored items. """<line_sep>scores=np.nan_to_num(scores)<line_sep>ranks=np.arange(1 len(scores)+1)<line_sep>disc=discount(ranks)<line_sep>np.maximum(disc 1 out=disc)<line_sep>np.reciprocal(disc out=disc)<line_sep><return>np.dot(scores disc)<block_end><def_stmt>_fixed_dcg n discount=np.log2<block_start>ranks=np.arange(1 n+1)<line_sep>disc=discount(ranks)<line_sep>disc=np.maximum(disc 1)<line_sep>disc=np.reciprocal(disc)<line_sep><return>np.sum(disc)<block_end><def_stmt>dcg recs truth discount=np.log2<block_start>""" Compute the **unnormalized** discounted cumulative gain. Discounted cumultative gain is computed as: .. math:: \\begin{align*} \\mathrm{DCG}(L,u) & = \\sum_{i=1}^{|L|} \\frac{r_{ui}}{d(i)} \\end{align*} Args: recs: The recommendation list. truth: The user's test data. discount(ufunc): The rank discount function. Each item's score will be divided the discount of its rank, if the discount is greater than 1. """<line_sep>tpos=truth.index.get_indexer(recs['item'])<line_sep>tgood=tpos<ge>0<if_stmt>'rating'<in>truth.columns# make an array of ratings for this rec list <block_start>r_rates=truth['rating'].values[tpos]<line_sep>r_rates[tpos<l>0]=0<line_sep>achieved=_dcg(r_rates discount)<block_end><else_stmt><block_start>achieved=_dcg(tgood discount)<block_end><return>achieved<block_end><def_stmt>ndcg recs truth discount=np.log2 k=<none><block_start>""" Compute the normalized discounted cumulative gain. Discounted cumultative gain is computed as: .. math:: \\begin{align*} \\mathrm{DCG}(L,u) & = \\sum_{i=1}^{|L|} \\frac{r_{ui}}{d(i)} \\end{align*} This is then normalized as follows: .. math:: \\begin{align*} \\mathrm{nDCG}(L, u) & = \\frac{\\mathrm{DCG}(L,u)}{\\mathrm{DCG}(L_{\\mathrm{ideal}}, u)} \\end{align*} Args: recs: The recommendation list. truth: The user's test data. discount(ufunc): The rank discount function. Each item's score will be divided the discount of its rank, if the discount is greater than 1. """<line_sep>tpos=truth.index.get_indexer(recs['item'])<if_stmt>k<is><not><none><block_start>recs=recs.iloc[:k]<block_end><if_stmt>'rating'<in>truth.columns<block_start>i_rates=np.sort(truth.rating.values)[::-1]<if_stmt>k<is><not><none><block_start>i_rates=i_rates[:k]<block_end>ideal=_dcg(i_rates discount)<line_sep># make an array of ratings for this rec list r_rates=truth['rating'].values[tpos]<line_sep>r_rates[tpos<l>0]=0<line_sep>achieved=_dcg(r_rates discount)<block_end><else_stmt><block_start>ntrue=len(truth)<if_stmt>k<is><not><none><and>ntrue<g>k<block_start>ntrue=k<block_end>ideal=_fixed_dcg(ntrue discount)<line_sep>tgood=tpos<ge>0<line_sep>achieved=_dcg(tgood discount)<block_end><return>achieved/ideal<block_end>@bulk_impl(ndcg)<def_stmt>_bulk_ndcg recs truth discount=np.log2 k=<none><block_start><if_stmt>'rating'<not><in>truth.columns<block_start>truth=truth.assign(rating=np.ones(len(truth) dtype=np.float32))<block_end>ideal=truth.groupby(level='LKTruthID')['rating'].rank(method='first' ascending=<false>)<if_stmt>k<is><not><none><block_start>ideal=ideal[ideal<le>k]<block_end>ideal=discount(ideal)<line_sep>ideal=np.maximum(ideal 1)<line_sep>ideal=truth['rating']/ideal<line_sep>ideal=ideal.groupby(level='LKTruthID').sum()<line_sep>ideal.name='ideal'<line_sep>list_ideal=recs[['LKRecID' 'LKTruthID']].drop_duplicates()<line_sep>list_ideal=list_ideal.join(ideal on='LKTruthID' how='left')<line_sep>list_ideal=list_ideal.set_index('LKRecID')<if_stmt>k<is><not><none><block_start>recs=recs[recs['rank']<le>k]<block_end>rated=recs.join(truth on=['LKTruthID' 'item'] how='inner')<line_sep>rd=discount(rated['rank'])<line_sep>rd=np.maximum(rd 1)<line_sep>rd=rated['rating']/rd<line_sep>rd=rated[['LKRecID']].assign(util=rd)<line_sep>dcg=rd.groupby(['LKRecID'])['util'].sum().reset_index(name='dcg')<line_sep>dcg=dcg.set_index('LKRecID')<line_sep>dcg=dcg.join(list_ideal how='outer')<line_sep>dcg['ndcg']=dcg['dcg'].fillna(0)/dcg['ideal']<line_sep><return>dcg['ndcg']<block_end>
<import_from_stmt>django.contrib admin<import_from_stmt>.models User<line_sep>admin.site.register(User)<line_sep>
<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>scipy.integrate._ivp rk<import_stmt>probnum.problems.zoo.diffeq<as>diffeq_zoo<import_from_stmt>probnum _randomvariablelist diffeq<line_sep>@pytest.fixture<def_stmt>steprule <block_start><return>diffeq.stepsize.AdaptiveSteps(0.1 atol=1e-4 rtol=1e-4)<block_end>@pytest.fixture<def_stmt>perturbed_solution steprule<block_start>y0=np.array([0.1 0.1])<line_sep>ode=diffeq_zoo.lotkavolterra(t0=0.0 tmax=1.0 y0=y0)<line_sep>rng=np.random.default_rng(seed=1)<line_sep>testsolver=diffeq.perturbed.scipy_wrapper.WrappedScipyRungeKutta(rk.RK45 steprule=steprule)<line_sep>sol=diffeq.perturbed.step.PerturbedStepSolver(rng=rng solver=testsolver noise_scale=0.1 perturb_function=diffeq.perturbed.step.perturb_uniform )<line_sep><return>sol.solve(ode)<block_end><def_stmt>test_states perturbed_solution<block_start><assert_stmt>isinstance(perturbed_solution.states _randomvariablelist._RandomVariableList)<block_end><def_stmt>test_call perturbed_solution<block_start>"""Test for continuity of the dense output. Small changes of the locations should come with small changes of the states. """<line_sep>np.testing.assert_allclose(perturbed_solution(perturbed_solution.locations[0:]).mean perturbed_solution.states[0:].mean atol=1e-14 rtol=1e-14 )<line_sep>np.testing.assert_allclose(perturbed_solution(perturbed_solution.locations[0:-1]+1e-14).mean perturbed_solution(perturbed_solution.locations[0:-1]).mean atol=1e-12 rtol=1e-12 )<line_sep>np.testing.assert_allclose(perturbed_solution(perturbed_solution.locations[1:]-1e-14).mean perturbed_solution(perturbed_solution.locations[1:]).mean atol=1e-12 rtol=1e-12 )<block_end><def_stmt>test_len perturbed_solution<block_start>np.testing.assert_allclose(len(perturbed_solution) len(perturbed_solution.locations) atol=1e-14 rtol=1e-14 )<block_end><def_stmt>test_getitem perturbed_solution<block_start>np.testing.assert_allclose(perturbed_solution.interpolants[1](perturbed_solution.locations[1]) perturbed_solution[1].mean atol=1e-14 rtol=1e-14 )<block_end>
<import_from_stmt>docs_snippets_crag.concepts.partitions_schedules_sensors.partitioned_job_test # pylint: disable=unused-import test_do_stuff_partitioned <line_sep>
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. <import_stmt>os<import_stmt>pickle<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.nn.parallel<import_stmt>torch.utils.data<as>data<class_stmt>_OneHotIterator<block_start>""" >>> it_1 = _OneHotIterator(n_features=128, n_batches_per_epoch=2, batch_size=64, seed=1) >>> it_2 = _OneHotIterator(n_features=128, n_batches_per_epoch=2, batch_size=64, seed=1) >>> list(it_1)[0][0].allclose(list(it_2)[0][0]) True >>> it = _OneHotIterator(n_features=8, n_batches_per_epoch=1, batch_size=4) >>> data = list(it) >>> len(data) 1 >>> batch = data[0] >>> x, y = batch >>> x.size() torch.Size([4, 8]) >>> x.sum(dim=1) tensor([1., 1., 1., 1.]) """<def_stmt>__init__ self n_features n_batches_per_epoch batch_size seed=<none><block_start>self.n_batches_per_epoch=n_batches_per_epoch<line_sep>self.n_features=n_features<line_sep>self.batch_size=batch_size<line_sep>self.probs=np.ones(n_features)/n_features<line_sep>self.batches_generated=0<line_sep>self.random_state=np.random.RandomState(seed)<block_end><def_stmt>__iter__ self<block_start><return>self<block_end><def_stmt>__next__ self<block_start><if_stmt>self.batches_generated<ge>self.n_batches_per_epoch<block_start><raise>StopIteration()<block_end>batch_data=self.random_state.multinomial(1 self.probs size=self.batch_size)<line_sep>self.batches_generated<augadd>1<line_sep><return>torch.from_numpy(batch_data).float() torch.zeros(1)<block_end><block_end><class_stmt>OneHotLoader(torch.utils.data.DataLoader)<block_start>""" >>> data_loader = OneHotLoader(n_features=8, batches_per_epoch=3, batch_size=2, seed=1) >>> epoch_1 = [] >>> for batch in data_loader: ... epoch_1.append(batch) >>> [b[0].size() for b in epoch_1] [torch.Size([2, 8]), torch.Size([2, 8]), torch.Size([2, 8])] >>> data_loader_other = OneHotLoader(n_features=8, batches_per_epoch=3, batch_size=2) >>> all_equal = True >>> for a, b in zip(data_loader, data_loader_other): ... all_equal = all_equal and (a[0] == b[0]).all() >>> all_equal.item() 0 """<def_stmt>__init__ self n_features batches_per_epoch batch_size seed=<none><block_start>self.seed=seed<line_sep>self.batches_per_epoch=batches_per_epoch<line_sep>self.n_features=n_features<line_sep>self.batch_size=batch_size<block_end><def_stmt>__iter__ self<block_start><if_stmt>self.seed<is><none><block_start>seed=np.random.randint(0 2<power>32)<block_end><else_stmt><block_start>seed=self.seed<block_end><return>_OneHotIterator(n_features=self.n_features n_batches_per_epoch=self.batches_per_epoch batch_size=self.batch_size seed=seed )<block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>json<import_stmt>codecs<import_stmt>re<import_stmt>csv<line_sep>FIXED_VOCABULARY=<none><line_sep>SENTENCE_PAIR_DATA=<false><line_sep>#source: ( That ( ( 's ( ( a lot ) ( to ( pack ( into ( 18 minutes ) ) ) ) ) ) . ) ) #target: وأخرج ملتحفا فوطتي، كان الجميع يراني . <def_stmt>load_data source_path target_path trg_language="arabic" src_language="english" data_type="gt" is_lowercase=<true><block_start>examples=[]<line_sep>s_file=open(source_path)<line_sep>t_file=open(target_path)<line_sep>i=0<for_stmt>element zip(s_file.readlines() t_file.readlines())<block_start>line=element[0].strip()<line_sep>s_tokens,s_transitions=convert_binary_bracketing(line data_type=data_type lowercase=is_lowercase)<if_stmt>trg_language<eq>"zh"<block_start>t_tokens=list(element[1])<block_end><else_stmt><block_start>t_tokens=element[1].lower().split()<block_end>example={}<line_sep>example["tokens"]=s_tokens<line_sep>example["target_tokens"]=t_tokens+["<s>"]#end token for mt predictions. example["transitions"]=s_transitions<line_sep>example["example_id"]=i<line_sep>i<augadd>1<line_sep>examples.append(example)<block_end><return>examples<block_end><def_stmt>convert_binary_bracketing parse data_type="gt" lowercase=<false><block_start>transitions=[]<line_sep>tokens=[]<for_stmt>word parse.split(' ')<block_start><if_stmt>word[0]<ne>"("<block_start><if_stmt>word.strip()<eq>")"<block_start>transitions.append(1)<block_end><else_stmt># Downcase all words to match GloVe. <block_start><if_stmt>lowercase<block_start>tokens.append(word.lower())<block_end><else_stmt><block_start>tokens.append(word)<block_end>transitions.append(0)<block_end><block_end><block_end><if_stmt>(data_type<eq>"lb")<block_start>transitions=lb_build(len(tokens))<block_end><elif_stmt>(data_type<eq>"bal")<block_start>transitions=balanced_transitions(len(tokens))<block_end><elif_stmt>(data_type<eq>"rb")<block_start>transitions=rb_build(len(tokens))<block_end>#print(transitions) <return>tokens transitions<block_end><def_stmt>lb_build N<block_start><if_stmt>N<eq>2<block_start><return>[0 0 1]<block_end><else_stmt><block_start><return>[0 0 1]+(N-2)<times>[0 1]<block_end><block_end><def_stmt>rb_build N<block_start><return>[0]<times>(N)+[1]<times>(N-1)<block_end><def_stmt>balanced_transitions N<block_start>""" Recursively creates a balanced binary tree with N leaves using shift reduce transitions. """<if_stmt>N<eq>3<block_start><return>[0 0 1 0 1]<block_end><elif_stmt>N<eq>2<block_start><return>[0 0 1]<block_end><elif_stmt>N<eq>1<block_start><return>[0]<block_end><else_stmt><block_start>right_N=N<floordiv>2<line_sep>left_N=N-right_N<line_sep><return>balanced_transitions(left_N)+balanced_transitions(right_N)+[1]<block_end><block_end>
# -*- coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team. All rights reserved. # # This file is part of the AiiDA code. # # # # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### """Tests for the :mod:`aiida.orm.utils.serialize` module."""<import_stmt>types<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>aiida orm<import_from_stmt>aiida.common.links LinkType<import_from_stmt>aiida.orm.utils serialize<line_sep>pytestmark=pytest.mark.usefixtures('aiida_profile_clean')<def_stmt>test_serialize_round_trip <block_start>""" Test the serialization of a dictionary with Nodes in various data structure Also make sure that the serialized data is json-serializable """<line_sep>node_a=orm.Data().store()<line_sep>node_b=orm.Data().store()<line_sep>data={'test':1 'list':[1 2 3 node_a] 'dict':{('Si' ):node_b 'foo':'bar'} 'baz':'aar'}<line_sep>serialized_data=serialize.serialize(data)<line_sep>deserialized_data=serialize.deserialize_unsafe(serialized_data)<line_sep># For now manual element-for-element comparison until we come up with general # purpose function that can equate two node instances properly <assert_stmt>data['test']<eq>deserialized_data['test']<assert_stmt>data['baz']<eq>deserialized_data['baz']<assert_stmt>data['list'][:3]<eq>deserialized_data['list'][:3]<assert_stmt>data['list'][3].uuid<eq>deserialized_data['list'][3].uuid<assert_stmt>data['dict'][('Si' )].uuid<eq>deserialized_data['dict'][('Si' )].uuid<block_end><def_stmt>test_serialize_group <block_start>""" Test that serialization and deserialization of Groups works. Also make sure that the serialized data is json-serializable """<line_sep>group_name='groupie'<line_sep>group_a=orm.Group(label=group_name).store()<line_sep>data={'group':group_a}<line_sep>serialized_data=serialize.serialize(data)<line_sep>deserialized_data=serialize.deserialize_unsafe(serialized_data)<assert_stmt>data['group'].uuid<eq>deserialized_data['group'].uuid<assert_stmt>data['group'].label<eq>deserialized_data['group'].label<block_end><def_stmt>test_serialize_node_round_trip <block_start>"""Test you can serialize and deserialize a node"""<line_sep>node=orm.Data().store()<line_sep>deserialized=serialize.deserialize_unsafe(serialize.serialize(node))<assert_stmt>node.uuid<eq>deserialized.uuid<block_end><def_stmt>test_serialize_group_round_trip <block_start>"""Test you can serialize and deserialize a group"""<line_sep>group=orm.Group(label='test_serialize_group_round_trip').store()<line_sep>deserialized=serialize.deserialize_unsafe(serialize.serialize(group))<assert_stmt>group.uuid<eq>deserialized.uuid<assert_stmt>group.label<eq>deserialized.label<block_end><def_stmt>test_serialize_computer_round_trip aiida_localhost<block_start>"""Test you can serialize and deserialize a computer"""<line_sep>deserialized=serialize.deserialize_unsafe(serialize.serialize(aiida_localhost))<line_sep># pylint: disable=no-member <assert_stmt>aiida_localhost.uuid<eq>deserialized.uuid<assert_stmt>aiida_localhost.label<eq>deserialized.label<block_end><def_stmt>test_serialize_unstored_node <block_start>"""Test that you can't serialize an unstored node"""<line_sep>node=orm.Data()<with_stmt>pytest.raises(ValueError)<block_start>serialize.serialize(node)<block_end><block_end><def_stmt>test_serialize_unstored_group <block_start>"""Test that you can't serialize an unstored group"""<line_sep>group=orm.Group(label='test_serialize_unstored_group')<with_stmt>pytest.raises(ValueError)<block_start>serialize.serialize(group)<block_end><block_end><def_stmt>test_serialize_unstored_computer <block_start>"""Test that you can't serialize an unstored node"""<line_sep>computer=orm.Computer('test_computer' 'test_host')<with_stmt>pytest.raises(ValueError)<block_start>serialize.serialize(computer)<block_end><block_end><def_stmt>test_mixed_attribute_normal_dict <block_start>"""Regression test for #3092. The yaml mapping constructor in `aiida.orm.utils.serialize` was not properly "deeply" reconstructing nested mappings, causing a mix of attribute dictionaries and normal dictionaries to lose information in a round-trip. If a nested `AttributeDict` contained a normal dictionary, the content of the latter would be lost during the deserialization, despite the information being present in the serialized yaml dump. """<import_from_stmt>aiida.common.extendeddicts AttributeDict<line_sep># Construct a nested `AttributeDict`, which should make all nested dictionaries `AttributeDicts` recursively dictionary={'nested':AttributeDict({'dict':'string' 'value':1})}<line_sep>attribute_dict=AttributeDict(dictionary)<line_sep># Now add a normal dictionary in the attribute dictionary attribute_dict['nested']['normal']={'a':2}<line_sep>serialized=serialize.serialize(attribute_dict)<line_sep>deserialized=serialize.deserialize_unsafe(serialized)<assert_stmt>attribute_dict deserialized<block_end><def_stmt>test_serialize_numpy <block_start>"""Regression test for #3709 Check that numpy arrays can be serialized. """<line_sep>data=np.array([1 2 3])<line_sep>serialized=serialize.serialize(data)<line_sep>deserialized=serialize.deserialize_unsafe(serialized)<assert_stmt>np.all(data<eq>deserialized)<block_end><def_stmt>test_serialize_simplenamespace <block_start>"""Regression test for #3709 Check that `types.SimpleNamespace` can be serialized. """<line_sep>data=types.SimpleNamespace(a=1 b=2.1)<line_sep>serialized=serialize.serialize(data)<line_sep>deserialized=serialize.deserialize_unsafe(serialized)<assert_stmt>data<eq>deserialized<block_end><def_stmt>test_enum <block_start>"""Test serialization and deserialization of an ``Enum``."""<line_sep>enum=LinkType.RETURN<line_sep>serialized=serialize.serialize(enum)<assert_stmt>isinstance(serialized str)<line_sep>deserialized=serialize.deserialize_unsafe(serialized)<assert_stmt>deserialized<eq>enum<block_end>
<import_stmt>unittest<import_stmt>json<import_stmt>random<import_stmt>string<import_stmt>os<import_from_stmt>unittest.case SkipTest<import_from_stmt>nose.tools assert_equals assert_in<import_from_stmt>functools partial<import_from_stmt>twython TwythonError<import_stmt>sneakers<line_sep>basePath=os.path.dirname(os.path.abspath(sneakers.__file__))<def_stmt>unit_channel channel data<block_start>""" Test a channel. """<line_sep>t=sneakers.Exfil(channel [])<line_sep># get parameters from config folder configPath=os.path.join(basePath 'config' '{}-config.json'.format(channel))<try_stmt><block_start><with_stmt>open(configPath)<as>f<block_start>params=json.loads(f.read())<block_end><block_end><except_stmt><block_start><raise>SkipTest('could not load configuration file for {}'.format(channel))<block_end>t.set_channel_params({'sending':params[channel] 'receiving':params[channel]})<try_stmt><block_start>t.send(data)<block_end><except_stmt>TwythonError<as>e# something out of our control <block_start><raise>SkipTest("Twython error occurred: {}".format(e))<block_end>got=t.receive()<if_stmt>len(data)<g>300<block_start>assert_in(data got 'Failed in assertion for the \'{}\' channel with a very large payload.'.format(channel))<block_end><else_stmt><block_start>assert_in(data got)<block_end><block_end>###################################################### #################### Actual Tests #################### ###################################################### <def_stmt>test_AllChannelsBasic <block_start>""" Test all channels with basic alphanumeric characters. """<line_sep># need to have some random; a few platforms (looking at you, Twitter) have # issues if you post the same thing multiple times rand=''.join([random.choice(string.letters)<for>i range(5)])<line_sep>data=''.join([string.letters string.digits rand])<for_stmt>channel sneakers.Exfil.list_channels()<block_start>f=partial(unit_channel channel data)<line_sep>f.description="Test the {} channel with basic alphanumeric characters.".format(channel)<line_sep><yield>(f )<block_end><block_end><def_stmt>test_AllChannelsAdvanced <block_start>""" Test all channels with a full range of printable characters. """<line_sep># need to have some random; a few platforms (looking at you, Twitter) have # issues if you post the same thing multiple times rand=''.join([random.choice(string.letters)<for>i range(5)])<line_sep>our_printable="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ "<line_sep># excludes \t\r\n data=''.join([our_printable rand])<for_stmt>channel sneakers.Exfil.list_channels()<block_start>f=partial(unit_channel channel data)<line_sep>f.description="Test the {} channel with the full range of printable characters.".format(channel)<line_sep><yield>(f )<block_end><block_end><def_stmt>test_AllChannelsLong <block_start>""" Test all channels with long messages. """<line_sep>data=''.join([random.choice(string.letters)<for>i range(500000)])<for_stmt>channel sneakers.Exfil.list_channels()<block_start>f=partial(unit_channel channel data)<line_sep>f.description="Test the {} channel with a very long message.".format(channel)<line_sep><yield>(f )<block_end><block_end>
expected_output={"GigabitEthernet3/8/0/38":{"auto_negotiate":<true> "counters":{"normal":{"in_broadcast_pkts":1093 "in_mac_pause_frames":0 "in_multicast_pkts":18864 "in_octets":0 "in_pkts":7446905 "in_unicast_pkts":7426948 "out_broadcast_pkts":373635 "out_mac_pause_frames":0 "out_multicast_pkts":34367737 "out_octets":0 "out_pkts":40981139 "out_unicast_pkts":6239767} "in_abort":0 "in_broadcast_pkts":1093 "in_crc_errors":0 "in_errors":0 "in_frame":0 "in_giants":0 "in_ignored":0 "in_mac_pause_frames":0 "in_multicast_pkts":18864 "in_octets":10280397282 "in_overrun":0 "in_parity_errors":0 "in_pkts":7446905 "in_runts":0 "in_throttles":0 "in_unicast_pkts":7426948 "last_clear":"Never" "out_abort":0 "out_broadcast_pkts":373635 "out_buffer_failure":0 "out_collision":0 "out_deferred":0 "out_errors":0 "out_late_collision":0 "out_lost_carrier":0 "out_mac_pause_frames":0 "out_multicast_pkts":34367737 "out_no_carrier":0 "out_octets":44666966188 "out_pkts":40981139 "out_underruns":0 "out_unicast_pkts":6239767 "rate":{"in_rate_bytes":0 "in_rate_pkts":0 "load_interval":300 "out_rate_bytes":0 "out_rate_pkts":0}} "description":"GigabitEthernet3/8/0/38 Interface" "duplex_mode":"unknown" "enabled":<true> "frame_type":"PKTFMT_ETHNT_2" "mac_address":"cc3e-5f69-5751" "max_frame_length":9216 "media_type":"twisted pair" "oper_status":"DOWN" "port_speed":"unknown" "port_type":"1000_BASE_T" "priority":0 "pvid":17 "switchport":{"mode":"access" "untagged":17} "type":"GigabitEthernet"}}<line_sep>
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. <import_stmt>random<import_stmt>unittest<import_from_stmt>pymatgen.util.num abs_cap min_max_indexes round_to_sigfigs<line_sep>__author__="<NAME>"<line_sep>__copyright__="Copyright 2013, The Materials Project"<line_sep>__version__="0.1"<line_sep>__maintainer__="<NAME>"<line_sep>__email__="<EMAIL>"<line_sep>__date__="9/25/14"<class_stmt>FuncTestCase(unittest.TestCase)<block_start><def_stmt>test_abs_cap self<block_start>self.assertEqual(abs_cap(1.000000001) 1.0)<line_sep>self.assertEqual(abs_cap(-1.000000001) -1.0)<line_sep>v=random.uniform(-1 1)<line_sep>self.assertEqual(abs_cap(v) v)<line_sep>self.assertEqual(abs_cap(1.000000001 2) 1.000000001)<line_sep>self.assertEqual(abs_cap(-2.000000001 2) -2.0)<block_end><def_stmt>test_min_max_indexes self<block_start>val=["b" "a" "m" "z" "y"]<line_sep>min_ind,max_ind=min_max_indexes(val)<line_sep>self.assertEqual(min_ind 1)<line_sep>self.assertEqual(max_ind 3)<block_end><def_stmt>test_round self<block_start>vals=[424.2425 2.3425356 0.000042535636653 0.23 2.468e6 0 -1.392156]<line_sep>sigfigs=range(1 6)<line_sep>rounded_vals=[[400.0 420.0 424.0 424.2 424.24] [2.0 2.3 2.34 2.343 2.3425] [4e-5 4.3e-5 4.25e-5 4.254e-5 4.2536e-5] [0.2 0.23 0.23 0.23 0.23] [2e6 2.5e6 2.47e6 2.468e6 2.468e6] [0 0 0 0 0] [-1 -1.4 -1.39 -1.392 -1.3922] ]<for_stmt>v,val enumerate(vals)<block_start><for_stmt>s,sig enumerate(sigfigs)<block_start>self.assertEqual(round_to_sigfigs(val sig) rounded_vals[v][s])<block_end><block_end><with_stmt>self.assertRaises(ValueError)<block_start>round_to_sigfigs(3.5 -2)<block_end><with_stmt>self.assertRaises(TypeError)<block_start>round_to_sigfigs(3.5 3.5)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>unittest<import_from_stmt>nose.tools eq_<import_from_stmt>nose.tools ok_<import_from_stmt>time time<import_from_stmt>ryu.lib.packet bmp<import_from_stmt>ryu.lib.packet bgp<import_from_stmt>ryu.lib.packet afi<import_from_stmt>ryu.lib.packet safi<class_stmt>Test_bmp(unittest.TestCase)<block_start>""" Test case for ryu.lib.packet.bmp """<def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end><def_stmt>test_route_monitoring self<block_start>update=bgp.BGPUpdate()<line_sep>msg=bmp.BMPRouteMonitoring(bgp_update=update peer_type=bmp.BMP_PEER_TYPE_GLOBAL is_post_policy=<true> peer_distinguisher=0 peer_address='192.0.2.1' peer_as=30000 peer_bgp_id='192.0.2.1' timestamp=time())<line_sep>binmsg=msg.serialize()<line_sep>msg2,rest=bmp.BMPMessage.parser(binmsg)<line_sep>eq_(msg.to_jsondict() msg2.to_jsondict())<line_sep>eq_(rest '')<block_end><def_stmt>test_statistics_report self<block_start>stats=[{'type':bmp.BMP_STAT_TYPE_REJECTED 'value':100} {'type':bmp.BMP_STAT_TYPE_DUPLICATE_PREFIX 'value':200} {'type':bmp.BMP_STAT_TYPE_DUPLICATE_WITHDRAW 'value':300} {'type':bmp.BMP_STAT_TYPE_ADJ_RIB_IN 'value':100000} {'type':bmp.BMP_STAT_TYPE_LOC_RIB 'value':500000}]<line_sep>msg=bmp.BMPStatisticsReport(stats=stats peer_type=bmp.BMP_PEER_TYPE_GLOBAL is_post_policy=<true> peer_distinguisher=0 peer_address='192.0.2.1' peer_as=30000 peer_bgp_id='192.0.2.1' timestamp=time())<line_sep>binmsg=msg.serialize()<line_sep>msg2,rest=bmp.BMPMessage.parser(binmsg)<line_sep>eq_(msg.to_jsondict() msg2.to_jsondict())<line_sep>eq_(rest '')<block_end><def_stmt>test_peer_down_notification self<block_start>reason=bmp.BMP_PEER_DOWN_REASON_LOCAL_BGP_NOTIFICATION<line_sep>data="hoge"<line_sep>data=bgp.BGPNotification(error_code=1 error_subcode=2 data=data)<line_sep>msg=bmp.BMPPeerDownNotification(reason=reason data=data peer_type=bmp.BMP_PEER_TYPE_GLOBAL is_post_policy=<true> peer_distinguisher=0 peer_address='192.0.2.1' peer_as=30000 peer_bgp_id='192.0.2.1' timestamp=time())<line_sep>binmsg=msg.serialize()<line_sep>msg2,rest=bmp.BMPMessage.parser(binmsg)<line_sep>eq_(msg.to_jsondict() msg2.to_jsondict())<line_sep>eq_(rest '')<block_end><def_stmt>test_peer_up_notification self<block_start>opt_param=[bgp.BGPOptParamCapabilityUnknown(cap_code=200 cap_value='hoge') bgp.BGPOptParamCapabilityRouteRefresh() bgp.BGPOptParamCapabilityMultiprotocol(afi=afi.IP safi=safi.MPLS_VPN)]<line_sep>open_message=bgp.BGPOpen(my_as=40000 bgp_identifier='192.0.2.2' opt_param=opt_param)<line_sep>msg=bmp.BMPPeerUpNotification(local_address='192.0.2.2' local_port=179 remote_port=11089 sent_open_message=open_message received_open_message=open_message peer_type=bmp.BMP_PEER_TYPE_GLOBAL is_post_policy=<true> peer_distinguisher=0 peer_address='192.0.2.1' peer_as=30000 peer_bgp_id='192.0.2.1' timestamp=time())<line_sep>binmsg=msg.serialize()<line_sep>msg2,rest=bmp.BMPMessage.parser(binmsg)<line_sep>eq_(msg.to_jsondict() msg2.to_jsondict())<line_sep>eq_(rest '')<block_end><def_stmt>test_initiation self<block_start>initiation_info=[{'type':bmp.BMP_INIT_TYPE_STRING 'value':u'This is Ryu BGP BMP message'}]<line_sep>msg=bmp.BMPInitiation(info=initiation_info)<line_sep>binmsg=msg.serialize()<line_sep>msg2,rest=bmp.BMPMessage.parser(binmsg)<line_sep>eq_(msg.to_jsondict(<lambda>v:v) msg2.to_jsondict(<lambda>v:v))<line_sep>eq_(rest '')<block_end><def_stmt>test_termination self<block_start>termination_info=[{'type':bmp.BMP_TERM_TYPE_STRING 'value':u'Session administatively closed'} {'type':bmp.BMP_TERM_TYPE_REASON 'value':bmp.BMP_TERM_REASON_ADMIN}]<line_sep>msg=bmp.BMPTermination(info=termination_info)<line_sep>binmsg=msg.serialize()<line_sep>msg2,rest=bmp.BMPMessage.parser(binmsg)<line_sep>eq_(msg.to_jsondict(<lambda>v:v) msg2.to_jsondict(<lambda>v:v))<line_sep>eq_(rest '')<block_end><block_end>
<import_from_stmt>flask_restful Resource<import_from_stmt>app.constants API_ENVELOPE PLAN_PUBLIC_KEYS<import_from_stmt>app.plans plans<class_stmt>PlansApi(Resource)# Public (for authenticated users) <block_start><def_stmt>get self# Flatten dict to an array to match rest of api style <block_start>output=[]<for_stmt>plan_id,value plans.iteritems()<block_start>clean_plan={"id":plan_id}<for_stmt>key PLAN_PUBLIC_KEYS<block_start>clean_plan[key]=value.get(key)<block_end>output.append(clean_plan)<block_end><return>{API_ENVELOPE:output }<block_end><block_end>
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>argparse<def_stmt>args_parser <block_start>'''Common classifier application command-line arguments. '''<line_sep>parser=argparse.ArgumentParser(description='image classification model command-line')<line_sep>parser.add_argument('--arch' '-a' metavar='ARCH' default='DenseNet121')<line_sep>parser.add_argument('--data' '-d' default='./data')<line_sep>parser.add_argument('--resume-from' dest='resumed_checkpoint_path' default='' type=str metavar='PATH' help='path to latest checkpoint. Use to resume paused training session.')<line_sep>parser.add_argument('--infer-file' dest='infer_file')<line_sep>parser.add_argument('--infer-classdim' dest='infer_classdim' default=5)<line_sep><return>parser<block_end>
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # # For the regression-based detector: # python exps/BASE-eval-image.py --image ./cache_data/cache/self.jpeg --face 250 150 900 1100 --model ${check_point_path} # <import_from_future_stmt> division<import_stmt>sys time torch random argparse PIL<import_from_stmt>PIL ImageFile<line_sep>ImageFile.LOAD_TRUNCATED_IMAGES=<true><import_from_stmt>pathlib Path<import_stmt>numpy<as>np<line_sep>lib_dir=(Path(__file__).parent/'..'/'lib').resolve()<if_stmt>str(lib_dir)<not><in>sys.path<block_start>sys.path.insert(0 str(lib_dir))<block_end><assert_stmt>sys.version_info.major<eq>3 'Please upgrade from {:} to Python 3.x'.format(sys.version_info)<import_from_stmt>datasets GeneralDatasetV2<as>Dataset PointMeta2V<as>PointMeta pil_loader<import_from_stmt>xvision transforms2v<as>transforms draw_image_by_points<import_from_stmt>xvision normalize_points denormalize_points<import_from_stmt>models obtain_pro_model remove_module_dict<import_from_stmt>config_utils load_configure<def_stmt>evaluate args<block_start><if_stmt>args.cuda<block_start><assert_stmt>torch.cuda.is_available() 'CUDA is not available.'<line_sep>torch.backends.cudnn.enabled=<true><line_sep>torch.backends.cudnn.benchmark=<true><line_sep>print('Use the GPU mode')<block_end><else_stmt><block_start>print('Use the CPU mode')<block_end>print('The image is {:}'.format(args.image))<line_sep>print('The model is {:}'.format(args.model))<line_sep>last_info_or_snap=Path(args.model)<assert_stmt>last_info_or_snap.exists() 'The model path {:} does not exist'.format(last_info)<line_sep>last_info_or_snap=torch.load(last_info_or_snap map_location=torch.device('cpu'))<if_stmt>'last_checkpoint'<in>last_info_or_snap<block_start>snapshot=last_info_or_snap['last_checkpoint']<assert_stmt>snapshot.exists() 'The model path {:} does not exist'.format(snapshot)<line_sep>print('The face bounding box is {:}'.format(args.face))<assert_stmt>len(args.face)<eq>4 'Invalid face input : {:}'.format(args.face)<line_sep>snapshot=torch.load(snapshot map_location=torch.device('cpu'))<block_end><else_stmt><block_start>snapshot=last_info_or_snap<block_end>param=snapshot['args']<line_sep># General Data Argumentation <if_stmt>param.use_gray<eq><false><block_start>mean_fill=tuple([int(x<times>255)<for>x [0.485 0.456 0.406]])<line_sep>normalize=transforms.Normalize(mean=[0.485 0.456 0.406] std=[0.229 0.224 0.225])<block_end><else_stmt><block_start>mean_fill=(0.5 )<line_sep>normalize=transforms.Normalize(mean=[mean_fill[0]] std=[0.5])<block_end>eval_transform=transforms.Compose2V([transforms.ToTensor() normalize transforms.PreCrop(param.pre_crop_expand) transforms.CenterCrop(param.crop_max)])<line_sep>model_config=load_configure(param.model_config <none>)<line_sep># dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, (120, 96), param.use_gray, None, param.data_indicator) dataset=Dataset(eval_transform param.sigma model_config.downsample param.heatmap_type (param.height param.width) param.use_gray <none> param.data_indicator)<line_sep>dataset.reset(param.num_pts)<line_sep>net=obtain_pro_model(model_config param.num_pts param.sigma param.use_gray)<line_sep>net.eval()<try_stmt><block_start>net.load_state_dict(snapshot['detector'])<block_end><except_stmt><block_start>net.load_state_dict(remove_module_dict(snapshot['detector']))<block_end><if_stmt>args.cuda<block_start>net=net.cuda()<block_end>print('Processing the input face image.')<line_sep>face_meta=PointMeta(dataset.NUM_PTS <none> args.face args.image 'BASE-EVAL')<line_sep>face_img=pil_loader(args.image dataset.use_gray)<line_sep>affineImage,heatmaps,mask,norm_trans_points,transthetas,_,_,_,shape=dataset._process_(face_img face_meta -1)<line_sep># network forward <with_stmt>torch.no_grad()<block_start><if_stmt>args.cuda<block_start>inputs=affineImage.unsqueeze(0).cuda()<block_end><else_stmt><block_start>inputs=affineImage.unsqueeze(0)<block_end>batch_locs=net(inputs)<line_sep>batch_locs=batch_locs.cpu()<line_sep>(batch_size C H W),num_pts=inputs.size() param.num_pts<line_sep>norm_locs=torch.cat((batch_locs[0].transpose(1 0) torch.ones(1 num_pts)) dim=0)<line_sep>norm_locs=torch.mm(transthetas[:2 :] norm_locs)<line_sep>real_locs=denormalize_points(shape.tolist() norm_locs)<block_end>print('the coordinates for {:} facial landmarks:'.format(param.num_pts))<for_stmt>i range(param.num_pts)<block_start>point=real_locs[: i]<line_sep>print('the {:02d}/{:02d}-th landmark : ({:.1f}, {:.1f})'.format(i param.num_pts float(point[0]) float(point[1])))<block_end><if_stmt>args.save<block_start>resize=512<line_sep>image=draw_image_by_points(args.image real_locs 2 (255 0 0) args.face resize)<line_sep>image.save(args.save)<line_sep>print('save the visualization results into {:}'.format(args.save))<block_end><else_stmt><block_start>print('ignore the visualization procedure')<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='Evaluate a single image by the trained model' formatter_class=argparse.ArgumentDefaultsHelpFormatter)<line_sep>parser.add_argument('--image' type=str help='The evaluation image path.')<line_sep>parser.add_argument('--model' type=str help='The snapshot to the saved detector.')<line_sep>parser.add_argument('--face' nargs='+' type=float help='The coordinate [x1,y1,x2,y2] of a face')<line_sep>parser.add_argument('--save' type=str help='The path to save the visualized results.')<line_sep>parser.add_argument('--cuda' action='store_true' help='Use cuda or not.')<line_sep>args=parser.parse_args()<line_sep>evaluate(args)<block_end>