content
stringlengths
0
1.55M
## Copyright 2015-2019 <NAME>, <NAME> ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## http://www.apache.org/licenses/LICENSE-2.0 ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. <import_stmt>os<import_stmt>pyside2uic<import_stmt>subprocess<line_sep>CURRENT_DIR=os.path.dirname(__file__).replace('\\' '/')+'/'<line_sep>INTERPRETER_PATH='python.exe'<def_stmt>ui_to_py ui_file<block_start><if_stmt><not>os.path.isfile(ui_file)<block_start>msg='no such file'<line_sep>print(msg)<line_sep><return>msg<block_end>py_file_name=os.path.splitext(ui_file)[0]+'.py'<with_stmt>open(py_file_name 'w')<as>py_file<block_start><try_stmt><block_start>pyside2uic.compileUi(ui_file py_file)<line_sep>print('{0} converted to {1}.'.format(ui_file.upper() py_file_name.upper()))<block_end><except_stmt>Exception<as>e<block_start>print('Error: compilation error.' e)<block_end><block_end>bakFileName=py_file_name.replace(".py" "_backup.py")<line_sep># convert to cross compatible code subprocess.call([INTERPRETER_PATH '-m' 'Qt' '--convert' py_file_name])<if_stmt>(os.path.isfile(bakFileName))<block_start>os.remove(bakFileName)<line_sep>print("REMOVING" bakFileName)<block_end><block_end><def_stmt>compile <block_start><for_stmt>d,dirs,files os.walk(CURRENT_DIR)<block_start><if_stmt>"Python"<in>d<or>".git"<in>d<block_start><continue><block_end><for_stmt>f files<block_start><if_stmt>"."<in>f<block_start>ext=f.split('.')[1]<if_stmt>ext<eq>'ui'<block_start>uiFile=os.path.join(d f)<line_sep>ui_to_py(uiFile)<block_end><block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>compile()<block_end>
# vi: ft=python <import_stmt>argparse<import_stmt>os<import_stmt>json<import_stmt>re<import_stmt>sys<import_from_stmt>numbers_parser.unpack read_numbers_file<import_from_stmt>numbers_parser _get_version<import_from_stmt>numbers_parser.iwafile IWAFile<import_from_stmt>numbers_parser.exceptions FileFormatError<def_stmt>ensure_directory_exists prefix path<block_start>"""Ensure that a path's directory exists."""<line_sep>parts=os.path.split(path)<try_stmt><block_start>os.makedirs(os.path.join(*([prefix]+list(parts[:-1]))))<block_end><except_stmt>OSError<block_start><pass><block_end><block_end><def_stmt>convert_uuids_to_hex obj<block_start><if_stmt>isinstance(obj dict)<block_start><for_stmt>k,v obj.items()<block_start><if_stmt>isinstance(v dict)<or>isinstance(v list)<block_start>convert_uuids_to_hex(v)<block_end><elif_stmt>k<eq>"lower"<or>k<eq>"upper"<block_start>obj[k]="0x{0:0{1}X}".format(int(v) 16)<block_end><elif_stmt>k<in>["uuidW0" "uuidW1" "uuidW2" "uuidW3"]<block_start>obj[k]="0x{0:0{1}X}".format(v 8)<block_end><block_end><block_end><elif_stmt>isinstance(obj list)<block_start><for_stmt>v obj<block_start><if_stmt>isinstance(v dict)<or>isinstance(v list)<block_start>convert_uuids_to_hex(v)<block_end><block_end><block_end><block_end><def_stmt>process_file contents filename output_dir hex_uuids<block_start>filename=re.sub(r".*\.numbers/" "" filename)<line_sep>ensure_directory_exists(output_dir filename)<line_sep>target_path=os.path.join(output_dir filename)<if_stmt>isinstance(contents IWAFile)<block_start>target_path=target_path.replace(".iwa" "")<line_sep>target_path<augadd>".txt"<with_stmt>open(target_path "w")<as>out<block_start>data=contents.to_dict()<if_stmt>hex_uuids<block_start>convert_uuids_to_hex(data)<block_end>print(json.dumps(data sort_keys=<true> indent=4) file=out)<block_end><block_end><else_stmt><block_start><with_stmt>open(target_path "wb")<as>out<block_start>out.write(contents)<block_end><block_end><block_end><def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("document" help="Apple Numbers file(s)" nargs="*")<line_sep>parser.add_argument("-V" "--version" action="store_true")<line_sep>parser.add_argument("--hex-uuids" action="store_true" help="print UUIDs as hex")<line_sep>parser.add_argument("--output" "-o" help="directory name to unpack into")<line_sep>args=parser.parse_args()<if_stmt>args.version<block_start>print(_get_version())<block_end><elif_stmt>args.output<is><not><none><and>len(args.document)<g>1<block_start>print("unpack-numbers: error: output directory only valid with a single document" file=sys.stderr )<line_sep>sys.exit(1)<block_end><elif_stmt>len(args.document)<eq>0<block_start>parser.print_help()<block_end><else_stmt><block_start><for_stmt>document args.document<block_start>output_dir=args.output<or>document.replace(".numbers" "")<try_stmt><block_start>read_numbers_file(document handler=<lambda>contents filename:process_file(contents filename output_dir args.hex_uuids) store_objects=<false> )<block_end><except_stmt>FileFormatError<as>e<block_start>print(f"{document}:" str(e) file=sys.stderr)<line_sep>sys.exit(1)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"# execute only if run as a script <block_start>main()<block_end>
# pylint: disable-all """ Test a final model with distance attacks """<import_stmt>os<import_stmt>copy<import_stmt>argparse<import_stmt>subprocess<import_stmt>yaml<if_stmt>__name__<eq>"__main__"<block_start>TEST_TYPES={"BIM_L2":{"adversary_type":"L2BasicIterativeAttack" "distance_type":"MeanSquaredDistance" } "BIM_LINF":{"adversary_type":"LinfinityBasicIterativeAttack" "distance_type":"Linfinity" } "CW_L2":{"adversary_type":"CarliniWagnerL2Attack" "distance_type":"MeanSquaredDistance" } "DEEPFOOL_L2":{"adversary_type":"DeepFoolL2Attack" "distance_type":"MeanSquaredDistance" } "DEEPFOOL_LINF":{"adversary_type":"DeepFoolLinfinityAttack" "distance_type":"Linfinity" } }<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("cfg_file")<line_sep>parser.add_argument("--load" default=<none> help="load a checkpoint")<line_sep>parser.add_argument("--load_state_dict" default=<none> help="load checkpoint's state dict")<line_sep>parser.add_argument("--type" "-t" required=<true> action="append" default=[] help="distance attack type" choices=list(TEST_TYPES.keys())+[t.lower()<for>t TEST_TYPES.keys()] )<line_sep>parser.add_argument("--gpu" default=0 type=int)<line_sep>args=parser.parse_args()<assert_stmt>(args.load<is><not><none><or>args.load_state_dict<is><not><none>) "Checkpoint Required."<with_stmt>open(args.cfg_file "r")<as>rf<block_start>base_cfg=yaml.load(rf)<block_end>test_cfg_files=[]<line_sep>log_files=[]<line_sep>save_path=(args.load<if>args.load<is><not><none><else>os.path.dirname(args.load_state_dict))<for_stmt>test_type args.type<block_start>cfg=copy.deepcopy(base_cfg)<line_sep>cfg["objective_type"]="adversarial_distance_objective"<line_sep>cfg["objective_cfg"]={}<line_sep>cfg["objective_cfg"]["mean"]=base_cfg["objective_cfg"]["mean"]<line_sep>cfg["objective_cfg"]["std"]=base_cfg["objective_cfg"]["std"]<line_sep>cfg["objective_cfg"]["num_classes"]=base_cfg["objective_cfg"].get("num_classes" base_cfg["final_model_cfg"].get("num_classes" 10))<line_sep>cfg["objective_cfg"].update(TEST_TYPES[test_type.upper()])<line_sep>test_cfg_files.append("{}-test-{}.yaml".format(os.path.splitext(args.cfg_file)[0] test_type.upper()))<line_sep>log_files.append(os.path.join(save_path "test-{}.log".format(test_type.upper())))<with_stmt>open(test_cfg_files[-1] "w")<as>wf<block_start>yaml.dump(cfg wf)<block_end><block_end><for_stmt>test_type,test_cfg_file,log_file zip(args.type test_cfg_files log_files)<block_start>print("****Test {}. Test cfg: {}. Log saved to {}.****".format(test_type test_cfg_file log_file))<if_stmt>args.load_state_dict<is><not><none><block_start>subprocess.check_call("awnas test {} --load-state-dict {} --gpus {} -s test 2>&1 | tee {}".format(test_cfg_file args.load_state_dict args.gpu log_file) shell=<true> )<block_end><elif_stmt>args.load<is><not><none><block_start>subprocess.check_call("awnas test {} --load {} --gpus {} -s test 2>&1 | tee {}".format(test_cfg_file args.load args.gpu log_file) shell=<true> )<block_end><block_end><block_end>
# Copyright (c) 2018 The Regents of the University of California. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' Test file for simple memory test TODO: Add stats checking '''<import_from_stmt>testlib *<line_sep>gem5_verify_config(name='simple_mem_default' verifiers=() # No need for verfiers this will return non-zero on fail config=joinpath(getcwd() 'simple-run.py') config_args=[] valid_isas=(constants.null_tag ) )<line_sep>simple_mem_params=[('inf-bandwidth' {'bandwidth':'0GB/s'}) ('low-latency' {'latency':'1ns'}) ('high-latency' {'latency':'1us'}) ('low-bandwidth' {'bandwidth':'1MB/s'}) ('high-var' {'latency_var':'100ns'})]<for_stmt>name,params simple_mem_params<block_start>args=['--'+key+'='+val<for>key,val params.items()]<line_sep>gem5_verify_config(name='simple_mem_'+name verifiers=() # No need for verfiers this will return non-zero on fail config=joinpath(getcwd() 'simple-run.py') config_args=args valid_isas=(constants.null_tag ) )<block_end># This tests for validity as well as performance gem5_verify_config(name='memtest' verifiers=() # No need for verfiers this will return non-zero on fail config=joinpath(getcwd() 'memtest-run.py') config_args=[] valid_isas=(constants.null_tag ) )<line_sep>null_tests=[('garnet_synth_traffic' ['--sim-cycles' '5000000']) ('memcheck' ['--maxtick' '2000000000' '--prefetchers']) ('ruby_mem_test' ['--abs-max-tick' '20000000' '--functional' '10']) ('ruby_random_test' ['--maxloads' '5000']) ('ruby_direct_test' ['--requests' '50000']) ]<for_stmt>basename_noext,args null_tests<block_start>gem5_verify_config(name=basename_noext fixtures=() verifiers=() config=joinpath(config.base_dir 'configs' 'example' basename_noext+'.py') config_args=args valid_isas=(constants.null_tag ) valid_hosts=constants.supported_hosts )<block_end>
<import_stmt>unittest<import_from_stmt>pycoin.ecdsa.secp256k1 secp256k1_generator<import_from_stmt>pycoin.encoding.hexbytes b2h b2h_rev<import_from_stmt>pycoin.intbytes int2byte<import_from_stmt>pycoin.networks.registry network_for_netcode<import_from_stmt>pycoin.satoshi.der sigdecode_der sigencode_der<line_sep>PRIV_KEYS=(2330949616242593315303241053456316633827293588958882755297900732239663851861 4437411780076344925846479906614060621668407514498402815534040340772719979673 14311886404724799688521454580288220586308410691395501373612453626821267193196 16404731722033649474165521611800542240555275746052963990137782680023514762282 92715304942310420502826004911529506622922082818576946681102234225452853924813 103235678552410630318322729483874198805317322052500844759252733409163632402845 )<def_stmt>sigcheck a_key a_hash_for_sig a_sig<block_start>""" Returns True if a_key was used to generate a_sig from a_hash_for_sig; False otherwise. """<line_sep>r,s=sigdecode_der(a_sig)<line_sep><return>secp256k1_generator.verify(a_key.public_pair() a_hash_for_sig (r s))<block_end><def_stmt>sigmake a_key a_hash_for_sig a_sig_type<block_start>""" Signs a_hash_for_sig with a_key and returns a DER-encoded signature with a_sig_type appended. """<line_sep>order=secp256k1_generator.order()<line_sep>r,s=secp256k1_generator.sign(a_key.secret_exponent() a_hash_for_sig)<if_stmt>s+s<g>order<block_start>s=order-s<block_end><return>sigencode_der(r s)+int2byte(a_sig_type)<block_end><class_stmt>SighashSingleTest(unittest.TestCase)<block_start><def_stmt>test_sighash_single self<block_start><for_stmt>netcode ["BTC" "XTN"]<block_start>self._test_sighash_single(network_for_netcode(netcode))<block_end><block_end><def_stmt>_test_sighash_single self network<block_start>flags=network.validator.flags<line_sep>k0,k1,k2,k3,k4,k5=[network.keys.private(secret_exponent=se is_compressed=<true>)<for>se PRIV_KEYS]<line_sep># Fake a coinbase transaction coinbase_tx=network.tx.coinbase_tx(k0.sec() 500000000)<for_stmt>k [k1 k2]<block_start>coinbase_tx.txs_out.append(network.tx.TxOut(1000000000 network.script.compile('%s OP_CHECKSIG'%b2h(k.sec()))))<block_end>self.assertEqual('2acbe1006f7168bad538b477f7844e53de3a31ffddfcfc4c6625276dd714155a' b2h_rev(coinbase_tx.hash()))<line_sep># Make the test transaction txs_in=[network.tx.TxIn(coinbase_tx.hash() 0) network.tx.TxIn(coinbase_tx.hash() 1) network.tx.TxIn(coinbase_tx.hash() 2) ]<line_sep>txs_out=[network.tx.TxOut(900000000 network.contract.for_address(k3.address())) network.tx.TxOut(800000000 network.contract.for_address(k4.address())) network.tx.TxOut(800000000 network.contract.for_address(k5.address())) ]<line_sep>tx=network.tx(1 txs_in txs_out)<line_sep>tx.set_unspents(coinbase_tx.txs_out)<line_sep>self.assertEqual('791b98ef0a3ac87584fe273bc65abd89821569fd7c83538ac0625a8ca85ba587' b2h_rev(tx.hash()))<line_sep>sig_type=flags.SIGHASH_SINGLE<line_sep>solution_checker=network.tx.SolutionChecker(tx)<line_sep>sig_hash=solution_checker._signature_hash(coinbase_tx.txs_out[0].script 0 sig_type)<line_sep>self.assertEqual(0xcc52d785a3b4133504d1af9e60cd71ca422609cb41df3a08bbb466b2a98a885e sig_hash)<line_sep>sig=sigmake(k0 sig_hash sig_type)<line_sep>self.assertTrue(sigcheck(k0 sig_hash sig[:-1]))<line_sep>tx.txs_in[0].script=network.script.compile(b2h(sig))<line_sep>self.assertTrue(tx.is_solution_ok(0))<line_sep>sig_hash=solution_checker._signature_hash(coinbase_tx.txs_out[1].script 1 sig_type)<line_sep>self.assertEqual(0x93bb883d70fccfba9b8aa2028567aca8357937c65af7f6f5ccc6993fd7735fb7 sig_hash)<line_sep>sig=sigmake(k1 sig_hash sig_type)<line_sep>self.assertTrue(sigcheck(k1 sig_hash sig[:-1]))<line_sep>tx.txs_in[1].script=network.script.compile(b2h(sig))<line_sep>self.assertTrue(tx.is_solution_ok(1))<line_sep>sig_hash=solution_checker._signature_hash(coinbase_tx.txs_out[2].script 2 sig_type)<line_sep>self.assertEqual(0x53ef7f67c3541bffcf4e0d06c003c6014e2aa1fb38ff33240b3e1c1f3f8e2a35 sig_hash)<line_sep>sig=sigmake(k2 sig_hash sig_type)<line_sep>self.assertTrue(sigcheck(k2 sig_hash sig[:-1]))<line_sep>tx.txs_in[2].script=network.script.compile(b2h(sig))<line_sep>self.assertTrue(tx.is_solution_ok(2))<line_sep>sig_type=flags.SIGHASH_SINGLE|flags.SIGHASH_ANYONECANPAY<line_sep>sig_hash=solution_checker._signature_hash(coinbase_tx.txs_out[0].script 0 sig_type)<line_sep>self.assertEqual(0x2003393d246a7f136692ce7ab819c6eadc54ffea38eb4377ac75d7d461144e75 sig_hash)<line_sep>sig=sigmake(k0 sig_hash sig_type)<line_sep>self.assertTrue(sigcheck(k0 sig_hash sig[:-1]))<line_sep>tx.txs_in[0].script=network.script.compile(b2h(sig))<line_sep>self.assertTrue(tx.is_solution_ok(0))<line_sep>sig_hash=solution_checker._signature_hash(coinbase_tx.txs_out[1].script 1 sig_type)<line_sep>self.assertEqual(0xe3f469ac88e9f35e8eff0bd8ad4ad3bf899c80eb7645947d60860de4a08a35df sig_hash)<line_sep>sig=sigmake(k1 sig_hash sig_type)<line_sep>self.assertTrue(sigcheck(k1 sig_hash sig[:-1]))<line_sep>tx.txs_in[1].script=network.script.compile(b2h(sig))<line_sep>self.assertTrue(tx.is_solution_ok(1))<line_sep>sig_hash=solution_checker._signature_hash(coinbase_tx.txs_out[2].script 2 sig_type)<line_sep>self.assertEqual(0xbacd7c3ab79cad71807312677c1788ad9565bf3c00ab9a153d206494fb8b7e6a sig_hash)<line_sep>sig=sigmake(k2 sig_hash sig_type)<line_sep>self.assertTrue(sigcheck(k2 sig_hash sig[:-1]))<line_sep>tx.txs_in[2].script=network.script.compile(b2h(sig))<line_sep>self.assertTrue(tx.is_solution_ok(2))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# -*- coding: utf-8 -*- # @Time: 2020/3/30 22:22 # @Author: GraceKoo # @File: 85_maximal-rectangle.py # @Desc:https://leetcode-cn.com/problems/maximal-rectangle/ <import_from_stmt>typing List<class_stmt>Solution<block_start><def_stmt>maximalRectangle self matrix:List[List[str]]<arrow>int<block_start>max_area=0<line_sep>dp=[0]<times>len(matrix[0])<for_stmt>row range(0 len(matrix))<block_start><for_stmt>col range(0 len(matrix[0]))<block_start>dp[col]=dp[col]+1<if>matrix[row][col]<eq>"1"<else>0<block_end>max_area=max(max_area self.largestRectangleArea(dp))<block_end><return>max_area<block_end># from 84_largest-rectangle-in-histogram <def_stmt>largestRectangleArea self heights:List[int]<arrow>int<block_start>stack=[]<line_sep>res=0<line_sep>heights=[0]+heights+[0]<for_stmt>i range(len(heights))<block_start><while_stmt>stack<and>heights[i]<l>heights[stack[-1]]<block_start>tmp=stack.pop()<line_sep>res=max(res (i-stack[-1]-1)<times>heights[tmp])<block_end>stack.append(i)<block_end><return>res<block_end><block_end>so=Solution()<line_sep>print(so.maximalRectangle([["1" "0" "1" "0" "0"] ["1" "0" "1" "1" "1"] ["1" "1" "1" "1" "1"] ["1" "0" "0" "1" "0"] ]))<line_sep>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>pfConcretePFCandidateProducer=cms.EDProducer("PFConcretePFCandidateProducer" src=cms.InputTag('particleFlow'))<line_sep>
<import_stmt>os<import_stmt>codecs<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<line_sep>Has_Header=<true><line_sep>CSV='data/valence_arousal_exp.csv'<def_stmt>calculate_mean_variance data<block_start>theta=np.arctan(data[: 0]/data[: 1])<line_sep>m_x=np.mean(np.cos(theta))<line_sep>m_y=np.mean(np.sin(theta))<line_sep>mu=np.arctan(m_y/m_x)<line_sep>R=np.sqrt(m_x<power>2+m_y<power>2)<line_sep>sigma=np.sqrt(-2<times>np.log(R))<line_sep><return>mu sigma<block_end><def_stmt>filled_arc center radius theta1 theta2 color# Ref: https://stackoverflow.com/a/30642704 <block_start>phi=np.linspace(theta1 theta2 100)<line_sep>x=center[0]+radius<times>np.cos(phi)<line_sep>y=center[1]+radius<times>np.sin(phi)<line_sep># Equation of the chord m=(y[-1]-y[0])/(x[-1]-x[0])<line_sep>c=y[0]-m<times>x[0]<line_sep>y2=m<times>x+c<line_sep># Plot the filled arc plt.fill_between(x y y2 facecolor=color edgecolor='none' alpha=0.5)<block_end><def_stmt>filled_sector center radius theta1 theta2 color<block_start>filled_arc(center radius theta1 theta2 color)<line_sep># Fill triangle x_0,y_0=center<line_sep>x_1=center[0]+radius<times>np.cos(theta1)<line_sep>y_1=center[1]+radius<times>np.sin(theta1)<line_sep>x_2=center[0]+radius<times>np.cos(theta2)<line_sep>y_2=center[1]+radius<times>np.sin(theta2)<line_sep>plt.fill([x_0 x_1 x_2 x_0] [y_0 y_1 y_2 y_0] facecolor=color edgecolor='none' alpha=0.5)<block_end><def_stmt>plot name_lst group_lst mu_lst sigma_lst<block_start>cx,cy=5.0 5.0<line_sep>colors=['red' 'blue']<line_sep>markers=['x' '+']<line_sep>linestyles=['r-' 'b--']<line_sep>bg_img=plt.imread('data/28-affect-words.png')<line_sep># plt.imshow(bg_img, extent=[-0.5, 10.5, -0.5, 10.5]) plt.imshow(bg_img extent=[-0.2 10.2 0.1 9.9])<line_sep>theta=np.linspace(0 2<times>np.pi 100)<line_sep>radius=4.8<line_sep>x=radius<times>np.cos(theta)+cx<line_sep>y=radius<times>np.sin(theta)+cy<line_sep>plt.plot(x y color='black')<for_stmt>name,group,mu,sigma,color,marker,linestyle zip(name_lst group_lst mu_lst sigma_lst colors markers linestyles)<block_start>plt.plot(group[: 0] group[: 1] marker label=name color=color)<line_sep>ex=cx+radius<times>np.cos(mu)<line_sep>ey=cy+radius<times>np.sin(mu)<line_sep>plt.plot([cx ex] [cy ey] linestyle)<for_stmt>d_mu [-sigma sigma]<block_start>ex=cx+radius<times>np.cos(mu+d_mu)<line_sep>ey=cy+radius<times>np.sin(mu+d_mu)<line_sep>plt.plot([cx ex] [cy ey] linestyle='-' color='black')<block_end>filled_sector([cx cy] radius mu-sigma mu+sigma color)<block_end>plt.axis('equal')<line_sep>plt.xlabel('Valence')<line_sep>plt.ylabel('Arousal')<line_sep>plt.xlim(0 10)<line_sep>plt.ylim(0 10)<line_sep>plt.legend(loc='lower left' bbox_to_anchor=(0.65 0.0))<line_sep>plt.savefig('valence_arousal_plain.pdf' bbox_inches='tight')<line_sep>plt.show()<block_end>group_1,group_2=[] []<with_stmt>codecs.open(CSV 'r' 'utf-8')<as>f<block_start><for_stmt>line f.readlines()<block_start><if_stmt>Has_Header<block_start>Has_Header=<false><line_sep><continue><block_end>eps=np.random.random(2)<times>0.1<line_sep>data=line.strip().split(',')<if_stmt>int(data[0])<eq>1<block_start>group_1.append((int(data[2])+eps[0] int(data[3])+eps[1]))<block_end><elif_stmt>int(data[0])<eq>2<block_start>group_2.append((int(data[2])+eps[0] int(data[3])+eps[1]))<block_end><block_end><block_end>group_1=np.array(group_1)<line_sep>group_2=np.array(group_2)<line_sep>mu_1,sigma_1=calculate_mean_variance(group_1)<line_sep>mu_2,sigma_2=calculate_mean_variance(group_2)<line_sep>plot(['Reactive HRI' 'TFVT-HRI'] [group_2 group_1] [mu_2 mu_1] [sigma_2 sigma_1])<line_sep>
<import_stmt>numpy<import_stmt>matplotlib.pyplot<as>plt<line_sep>FILE_NAME='rewards_nonshare.npz'<def_stmt>smooth reward_vec filter_size<block_start>l=len(reward_vec)-filter_size+1<line_sep>print(len(reward_vec))<line_sep>smooth_reward_vec=numpy.zeros(l)<for_stmt>i range(l)<block_start>reward=numpy.mean(reward_vec[i:i+filter_size])<line_sep>smooth_reward_vec[i]=reward<block_end><return>smooth_reward_vec<block_end><if_stmt>__name__<eq>'__main__'<block_start>f=numpy.load(FILE_NAME)<line_sep>reward=f['arr_0']<line_sep>qmax=f['arr_1']<line_sep>reward_smooth=smooth(reward 300)<line_sep>l=len(reward_smooth)<line_sep>fig=plt.figure(figsize=(8 6))<line_sep>line1,=plt.plot(reward_smooth color='r' linestyle='-' linewidth=3)<line_sep>line2,=plt.plot(numpy.arange(l) -150<times>numpy.ones(l) color='k' linestyle=':' linewidth=1)<line_sep>plt.xlabel('Episode' fontsize=26)<line_sep>plt.ylabel('Reward' fontsize=24)<line_sep>plt.xticks(fontsize=22)<line_sep>plt.yticks([-800 -700 -600 -500 -400 -300 -200 -150 -100 0] fontsize=22)<line_sep>plt.axis([-20 l+10 -600 -100])<line_sep>plt.tight_layout()<line_sep>fig.savefig('reward.pdf' format='pdf' dpi=1200)<line_sep>plt.show()<block_end>
#To find factorial of number num=int(input('N='))<line_sep>factorial=1<if_stmt>num<l>0<block_start>print('Number is not accepted')<block_end><elif_stmt>num<eq>0<block_start>print(1)<block_end><else_stmt><block_start><for_stmt>i range(1 num+1)<block_start>factorial=factorial<times>i<block_end>print(factorial)<block_end>
<import_stmt>argparse<import_stmt>json<import_stmt>os<import_stmt>sys<import_from_stmt>tqdm tqdm<import_from_stmt>PIL Image ImageDraw<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.optim<as>optim<import_from_stmt>torch.utils.data DataLoader<import_stmt>torch.nn.functional<as>F<line_sep>torch.backends.cudnn.benchmark=<true><import_from_stmt>config GlobalConfig<import_from_stmt>architectures AttentionField<import_from_stmt>data CARLA_points<import_from_stmt>utils iou flow_to_color<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--id' type=str help='Unique experiment identifier.')<line_sep>parser.add_argument('--device' type=str default='cuda' help='Device to use')<line_sep>parser.add_argument('--vis' action='store_true' help='Visualize each model while evaluating')<line_sep>parser.add_argument('--vis_freq' type=int default=100 help='Visualization frequency')<line_sep>parser.add_argument('--batch_size' type=int default=16 help='Batch size')<line_sep>parser.add_argument('--out_res' type=int default=256 help='output image resolution')<line_sep>args=parser.parse_args()<line_sep># config conf=GlobalConfig()<line_sep># data val_set=CARLA_points(conf.val_data conf)<line_sep>dataloader_val=DataLoader(val_set batch_size=args.batch_size shuffle=<false> num_workers=8 pin_memory=<true>)<line_sep># model model=AttentionField(conf args.device)<line_sep># load saved weights model.encoder.load_state_dict(torch.load('log/{}/best_encoder.pth'.format(args.id)))<line_sep>model.decoder.load_state_dict(torch.load('log/{}/best_decoder.pth'.format(args.id)))<line_sep># image storage directories <if_stmt>args.vis<block_start><if_stmt><not>os.path.isdir(f"log/{args.id}/img")<block_start>os.makedirs(f"log/{args.id}/img")<block_end><if_stmt><not>os.path.isdir(f"log/{args.id}/sem")<block_start>os.makedirs(f"log/{args.id}/sem")<block_end><if_stmt><not>os.path.isdir(f"log/{args.id}/out")<block_start>os.makedirs(f"log/{args.id}/out")<block_end><if_stmt><not>os.path.isdir(f"log/{args.id}/flow")<block_start>os.makedirs(f"log/{args.id}/flow")<block_end><block_end>intersection_epoch=[0.]<times>conf.num_class<line_sep>union_epoch=[0.]<times>conf.num_class<line_sep>off_epoch=0.<line_sep>wp_epoch=0.<line_sep>match=0<line_sep>miss=0<line_sep>fp=0<line_sep>converter=np.uint8(conf.converter)# used for semantics <with_stmt>torch.no_grad()<block_start>model.eval()<for_stmt>batch_num,data enumerate(tqdm(dataloader_val) 0)# create batch and move to GPU <block_start>fronts_in=data['fronts']<line_sep>lefts_in=data['lefts']<line_sep>rights_in=data['rights']<line_sep>images=[]<for_stmt>i range(conf.seq_len)<block_start>images.append(fronts_in[i].to(args.device dtype=torch.float32))<if_stmt>conf.num_camera<eq>3<block_start>images.append(lefts_in[i].to(args.device dtype=torch.float32))<line_sep>images.append(rights_in[i].to(args.device dtype=torch.float32))<block_end><block_end># semantic points for network input query_points=data['semantic_points'].to(args.device dtype=torch.float32)<line_sep>gt_occ=data['semantic_labels'].to(args.device)<line_sep># target points for network input target_point=torch.stack(data['target_point']).to(args.device dtype=torch.float32)<line_sep># waypoints for visualization waypoints=[]<line_sep># create driving offset label by looping over timesteps # label = -query + waypoint so that at test time query + label = waypoint gt_offsets=-query_points.clone()<for_stmt>i range(conf.tot_len)<block_start>waypoint=torch.stack(data['waypoints'][i]).to(args.device dtype=torch.float32)<line_sep>waypoints.append(waypoint)<line_sep># create a delta tensor to add to the query points delta=waypoint.transpose(0 1).unsqueeze(1)# (B, 1, 2) # divide to account for higher resolution delta=(-gt_offsets[: : 2]<eq>i).unsqueeze(-1)<times>delta/conf.resolution# (B, P, 2) gt_offsets[: : :2]<augadd>delta<block_end>gt_offsets=gt_offsets[: : :2].transpose(1 2)# (B, 2, P) gt_offsets[: 1 :]<augadd>conf.offset# reconstruct only front of vehicle velocity=data['velocity'].to(args.device dtype=torch.float32)<line_sep># inference encoding=model.encoder(images velocity)<line_sep>pred_occ,pred_off,_=model.decode(query_points target_point encoding)<line_sep># waypoint prediction pred_waypoint_mean,red_light_occ=model.plan(target_point encoding conf.plan_scale conf.plan_points conf.plan_iters)<line_sep>wp_pred=pred_waypoint_mean[: conf.seq_len:]<line_sep>wp_gt=torch.stack(waypoints[conf.seq_len:] dim=1).transpose(0 2)<line_sep># s,t,b = model.control_pid(wp_pred, velocity, target_point, red_light_occ) # grid used for visualizing occupancy and flow linspace_x=torch.linspace(-conf.axis/2 conf.axis/2 steps=args.out_res)<line_sep>linspace_y=torch.linspace(-conf.axis/2 conf.axis/2 steps=args.out_res)<line_sep>linspace_t=torch.linspace(0 conf.tot_len-1 steps=conf.tot_len)<line_sep># gt semantics semantics=(data['topdowns'][0][0][0].data.cpu().numpy()).astype(np.uint8)<line_sep>semantics=converter[semantics][:conf.axis conf.offset:conf.axis+conf.offset]<line_sep>red_light_gt=(semantics<eq>3).sum()<if_stmt>red_light_gt<and>red_light_occ<block_start>match<augadd>1<block_end><if_stmt>red_light_gt<and>red_light_occ<eq>0<block_start>miss<augadd>1<block_end><if_stmt>red_light_gt<eq>0<and>red_light_occ<block_start>fp<augadd>1<block_end><if_stmt>args.vis<and>(batch_num%args.vis_freq<eq>0)<block_start><for_stmt>i range(conf.seq_len)# save one sample per batch <block_start><if_stmt><not>os.path.isdir(f"log/{args.id}/img/{str(i)}")<block_start>os.makedirs(f"log/{args.id}/img/{str(i)}")<block_end>front_numpy=(fronts_in[i][0].data.cpu().numpy().transpose((1 2 0))).astype(np.uint8)<line_sep>left_numpy=(lefts_in[i][0].data.cpu().numpy().transpose((1 2 0))).astype(np.uint8)<line_sep>right_numpy=(rights_in[i][0].data.cpu().numpy().transpose((1 2 0))).astype(np.uint8)<line_sep>image_numpy=np.concatenate([left_numpy front_numpy right_numpy] axis=1)<line_sep>image_display=Image.fromarray(image_numpy)<line_sep>image_display.save(f"log/{args.id}/img/{str(i)}/{str(batch_num).zfill(4)}.png")<block_end># target point in pixel coordinates target_point_pixel=target_point.squeeze().cpu().numpy()<line_sep>target_point_pixel[1]<augadd>conf.offset<times>conf.resolution<line_sep># hack for when actual target is outside image (axis/2 * resolution) target_point_pixel=np.clip(target_point_pixel -(conf.axis/2<times>conf.resolution-1) (conf.axis/2<times>conf.resolution-1))<line_sep>target_point_pixel=(target_point_pixel<times>args.out_res<floordiv>50+args.out_res<floordiv>2).astype(np.uint8)<for_stmt>i range(conf.tot_len)<block_start><if_stmt><not>os.path.isdir(f"log/{args.id}/sem/{str(i)}")<block_start>os.makedirs(f"log/{args.id}/sem/{str(i)}")<block_end><if_stmt><not>os.path.isdir(f"log/{args.id}/out/{str(i)}")<block_start>os.makedirs(f"log/{args.id}/out/{str(i)}")<block_end><if_stmt><not>os.path.isdir(f"log/{args.id}/flow/{str(i)}")<block_start>os.makedirs(f"log/{args.id}/flow/{str(i)}")<block_end># gt semantics semantics=(data['topdowns'][i][0][0].data.cpu().numpy()).astype(np.uint8)<line_sep>semantics=converter[semantics][:conf.axis conf.offset:conf.axis+conf.offset]<line_sep>semantic_display=np.zeros((semantics.shape[0] semantics.shape[1] 3))<for_stmt>key,value conf.classes.items()<block_start>semantic_display[np.where(semantics<eq>key)]=value<block_end>semantic_display=semantic_display.astype(np.uint8)<line_sep>semantic_display=Image.fromarray(semantic_display)<line_sep>semantic_display.save(f"log/{args.id}/sem/{str(i)}/{str(batch_num).zfill(4)}.png")<line_sep># gt waypoint in pixel coordinates img_waypoint=waypoints[i].data.cpu().numpy()<line_sep>img_waypoint[1]<augadd>conf.offset<times>conf.resolution<line_sep>img_waypoint=np.clip(img_waypoint -(conf.axis/2<times>conf.resolution-1) (conf.axis/2<times>conf.resolution-1))<line_sep>img_waypoint=(img_waypoint<times>args.out_res<floordiv>(conf.axis<times>conf.resolution)+args.out_res<floordiv>2).astype(np.uint8)<line_sep># predicted waypoint in pixel coordinates pred_waypoint=pred_waypoint_mean[0 i].data.cpu().numpy()<line_sep>pred_waypoint[1]<augadd>conf.offset<times>conf.resolution<line_sep>pred_waypoint=np.clip(pred_waypoint -(conf.axis/2<times>conf.resolution-1) (conf.axis/2<times>conf.resolution-1))<line_sep>pred_waypoint=(pred_waypoint<times>args.out_res<floordiv>(conf.axis<times>conf.resolution)+args.out_res<floordiv>2).astype(np.uint8)<line_sep># visualization of occupancy and flow img_rows=[]<line_sep>flow_rows=[]<for_stmt>row range(args.out_res)<block_start>grid_x,grid_y,grid_t=torch.meshgrid(linspace_x linspace_y[row] linspace_t[i].unsqueeze(0))<line_sep>grid_points=torch.stack((grid_x grid_y grid_t) dim=3).unsqueeze(0).repeat(args.batch_size 1 1 1 1)<line_sep>grid_points=grid_points.reshape(args.batch_size -1 3).to(args.device dtype=torch.float32)<line_sep>pred_img_pts,pred_img_offsets,_=model.decode(grid_points target_point encoding)<line_sep>pred_img_pts=torch.argmax(pred_img_pts[-1] dim=1)<line_sep>pred_img=pred_img_pts.reshape(args.batch_size args.out_res)<line_sep>pred_flow=pred_img_offsets[-1].reshape(args.batch_size 2 args.out_res)<line_sep>img_rows.append(pred_img)<line_sep>flow_rows.append(pred_flow)<block_end>pred_img=torch.stack(img_rows dim=-1)<line_sep>pred_flow=torch.stack(flow_rows dim=-1)<line_sep>semantics=pred_img[0 : :].transpose(1 0).data.cpu().numpy().astype(np.uint8)<line_sep>semantic_display=np.zeros((semantics.shape[0] semantics.shape[1] 3))<for_stmt>key,value conf.classes.items()<block_start>semantic_display[np.where(semantics<eq>key)]=value<block_end>semantic_display=semantic_display.astype(np.uint8)<line_sep>semantic_display=Image.fromarray(semantic_display)<line_sep>semantic_display.save(f"log/{args.id}/out/{str(i)}/{str(batch_num).zfill(4)}.png")<line_sep># flow image of predicted offsets flow_uv=pred_flow[0 : : :].transpose(2 0).data.cpu().numpy()<times>args.out_res/conf.axis<line_sep>flow_rgb=flow_to_color(flow_uv)<line_sep>flow_display=Image.fromarray(flow_rgb)<line_sep>draw=ImageDraw.Draw(flow_display)<line_sep>draw.ellipse([tuple(target_point_pixel-2) tuple(target_point_pixel+2)] fill='Blue' outline='Blue')<line_sep>draw.ellipse([tuple(img_waypoint-2) tuple(img_waypoint+2)] fill='Green' outline='Green')<line_sep>draw.ellipse([tuple(pred_waypoint-2) tuple(pred_waypoint+2)] fill='Red' outline='Red')<line_sep>flow_display.save(f"log/{args.id}/flow/{str(i)}/{str(batch_num).zfill(4)}.png")<block_end><block_end>pred_occ_class=torch.argmax(pred_occ[-1] dim=1)<line_sep># losses <for_stmt>k range(conf.num_class)<block_start>gt_occ_k=gt_occ<eq>k<line_sep>pred_occ_k=pred_occ_class<eq>k<for_stmt>pt1,pt2 zip(gt_occ_k pred_occ_k)<block_start>intersection,union=iou(pt1 pt2)<line_sep>intersection_epoch[k]<augadd>float(intersection.item())<line_sep>union_epoch[k]<augadd>float(union.item())<block_end><block_end>off_epoch<augadd>float(F.l1_loss(pred_off[-1] gt_offsets).mean())<line_sep>wp_epoch<augadd>float(F.l1_loss(wp_gt wp_pred).mean())<block_end><block_end>out_loss=np.array(intersection_epoch)/np.array(union_epoch)<line_sep>off_loss=off_epoch/float(batch_num)<line_sep>wp_loss=wp_epoch/float(batch_num)<line_sep>print(f'Off: {off_loss:3.3f}')<line_sep>print(f'Wp: {wp_loss:3.3f}')<line_sep>print(f'Match: {match}')<line_sep>print(f'Miss: {miss}')<line_sep>print(f'FP: {fp}')<for_stmt>k range(conf.num_class)<block_start>print(f'Class {k:02d}: IoU: {out_loss[k]:3.3f}')<block_end>
<import_from_stmt>pybamm exp constants<def_stmt>electrolyte_diffusivity_Ramadass2004 c_e T<block_start>""" Diffusivity of LiPF6 in EC:DMC as a function of ion concentration. References ---------- .. [1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. "Development of First Principles Capacity Fade Model for Li-Ion Cells." (2004) Parameters ---------- c_e: :class:`pybamm.Symbol` Dimensional electrolyte concentration T: :class:`pybamm.Symbol` Dimensional temperature Returns ------- :class:`pybamm.Symbol` Solid diffusivity """<line_sep>D_c_e=7.5e-10<line_sep>E_D_e=37040<line_sep>arrhenius=exp(E_D_e/constants.R<times>(1/298.15-1/T))<line_sep><return>D_c_e<times>arrhenius<block_end>
<import_stmt>sublime<import_stmt>sublime_plugin<line_sep>__all__=['BuildBabelPackageCommand']<line_sep>BABEL_CONFIGURATION={'name':'JavaScript (Babel)' 'scope':'source.js' 'file_extensions':['js' 'jsx' 'es6' 'babel'] 'flow_types':<true> 'jsx':<true> 'string_object_keys':<true> 'custom_templates':{'styled_components':<true> } }<class_stmt>BuildBabelPackageCommand(sublime_plugin.ApplicationCommand)<block_start><def_stmt>run self<block_start><import_from_stmt>sublime_lib ResourcePath<import_from_stmt>pathlib Path<import_from_stmt>shutil rmtree<line_sep>package_path=Path(__file__).parent.parent<line_sep>syntax_path=ResourcePath.from_file_path(package_path)/'JavaScript (Babel).sublime-syntax'<line_sep>test_directory=package_path/'tests'<line_sep>rmtree(str(test_directory) ignore_errors=<true>)<line_sep>test_directory.mkdir()<line_sep>print("Building syntax…")<line_sep>sublime.active_window().run_command('build_js_custom_syntax' {'name':'Babel' 'configuration':BABEL_CONFIGURATION 'destination_path':str(syntax_path.file_path()) })<line_sep>ResourcePath('Packages/JSCustom/styled_components/Styled Components.sublime-syntax').copy((ResourcePath.from_file_path(package_path)/'Styled Components.sublime-syntax').file_path())<line_sep>print("Building tests…")<line_sep>sublime.run_command('build_js_custom_tests' {'syntax_path':str(syntax_path) 'suites':['js' 'flow' 'jsx' 'string_object_keys'] 'destination_directory':str(test_directory) })<line_sep>print('Done.')<block_end><block_end>
<import_stmt>json<import_from_stmt>typing Iterable<import_stmt>constants<as>consts<import_stmt>utils<def_stmt>shadowrocket domains:Iterable[str]<block_start>config=("#Shadowrocket\n"<concat>"[General]\n"<concat>"bypass-system = true\n"<concat>"skip-proxy = 192.168.0.0/16, 10.0.0.0/8, 172.16.0.0/12, localhost, *.local, captive.apple.com\n"<concat>"tun-excluded-routes = 10.0.0.0/8, 172.16.31.10/10, 127.0.0.0/8, 169.254.0.0/16, 172.16.0.0/12, 192.0.0.0/24, 192.0.2.0/24, 172.16.31.10/24, 192.168.0.0/16, 198.18.0.0/15, 198.51.100.0/24, 203.0.113.0/24, 172.16.17.32/4, 255.255.255.255/32\n"<concat>"dns-server = system\n"<concat>"ipv6 = true\n"<concat>"[Rule]\n")<line_sep>config<augadd>"".join(f"DOMAIN-SUFFIX,{domain},DIRECT\n"<for>domain domains)<line_sep>config<augadd>("USER-AGENT,Line*,PROXY\n"<concat>"IP-CIDR,192.168.0.0/16,DIRECT\n"<concat>"IP-CIDR,10.0.0.0/8,DIRECT\n"<concat>"IP-CIDR,172.16.0.0/12,DIRECT\n"<concat>"IP-CIDR,127.0.0.0/8,DIRECT\n"<concat>"GEOIP,IR,DIRECT\n"<concat>"FINAL,PROXY\n"<concat>"[Host]\n"<concat>"localhost = 127.0.0.1\n")<line_sep>utils.save_to_file(consts.shadowrocket_path config)<block_end><def_stmt>qv2ray direct_domains:Iterable[str] proxied_domains:Iterable[str] ads_domains:Iterable[str]<block_start>schema={"description":"Iran hosted domains" "domainStrategy":"AsIs" "domains":{"direct":["regexp:^.+\\.ir$"]+list(direct_domains) "proxy":list(proxied_domains) "block":["geosite:category-ads-all"]+list(ads_domains) } "ips":{"direct":["geoip:ir"]} "name":"ir_hosted" }<line_sep>utils.save_to_file(consts.qv2ray_schema_path json.dumps(schema))<block_end><def_stmt>clash domains:Iterable[str]<block_start>config=("# Clash\n"<concat>"# Wiki: https://github.com/Dreamacro/clash/wiki/premium-core-features#rule-providers\n"<concat>"payload:\n")<line_sep>config<augadd>"".join(f" - DOMAIN-SUFFIX,{domain}\n"<for>domain domains)<line_sep>config<augadd>(# " - IP-CIDR,192.168.0.0/16\n" # " - IP-CIDR,10.0.0.0/8\n" # " - IP-CIDR,172.16.0.0/12\n" # " - IP-CIDR,127.0.0.0/8\n" " - GEOIP,IR\n")<line_sep>utils.save_to_file(consts.clash_path config)<block_end><def_stmt>switchy_omega others:Iterable[str]<block_start>config="127.0.0.1\n"<concat>"::1\n"<concat>"localhost\n"<concat>"*.ir\n"<line_sep>config<augadd>"".join(f"*{domain}\n"<for>domain others)<line_sep>utils.save_to_file(consts.switchy_omega_path config)<block_end>
<import_from_stmt>.odin_init odin_init<import_from_stmt>.compute_days_elapsed compute_days_elapsed<import_from_stmt>.fund_actions period_dict<line_sep>
""" Pattern matching with mapping—requires Python ≥ 3.10 # tag::DICT_MATCH_TEST[] >>> b1 = dict(api=1, author='<NAME>', ... type='book', title='Gödel, Escher, Bach') >>> get_creators(b1) ['<NAME>'] >>> from collections import OrderedDict >>> b2 = OrderedDict(api=2, type='book', ... title='Python in a Nutshell', ... authors='<NAME>'.split()) >>> get_creators(b2) ['Martelli', 'Ravenscroft', 'Holden'] >>> get_creators({'type': 'book', 'pages': 770}) Traceback (most recent call last): ... ValueError: Invalid 'book' record: {'type': 'book', 'pages': 770} >>> get_creators('Spam, spam, spam') Traceback (most recent call last): ... ValueError: Invalid record: 'Spam, spam, spam' # end::DICT_MATCH_TEST[] """<line_sep># tag::DICT_MATCH[] <def_stmt>get_creators record:dict<arrow>list<block_start><match_stmt>record<block_start><case_stmt>{'type':'book' 'api':2 'authors':[*names]}# <1> <block_start><return>names<block_end><case_stmt>{'type':'book' 'api':1 'author':name}# <2> <block_start><return>[name]<block_end><case_stmt>{'type':'book'}# <3> <block_start><raise>ValueError(f"Invalid 'book' record: {record!r}")<block_end><case_stmt>{'type':'movie' 'director':name}# <4> <block_start><return>[name]<block_end><case_stmt>_# <5> <block_start><raise>ValueError(f'Invalid record: {record!r}')<block_end><block_end><block_end># end::DICT_MATCH[]
<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>absl logging<import_from_stmt>tf_transformers.core keras_utils<def_stmt>convert_t5_pt model config model_name<block_start>"""PT converter Args: model_hf: HuggingFace Model (TF) model: tf_transformers model/layer config: dict Returns: a function """<line_sep># When dropout, use_auto_regressive is enabled assertion won't work SKIP_ASSERT=<false><try_stmt># LegacyLayer <block_start>local_config=model._config_dict['decoder']<block_end><except_stmt>Exception<as>e# LegacyModel <block_start>local_config=model.model_config['decoder']<block_end><if_stmt>local_config['use_dropout']<block_start>logging.warn("Note: As `use_dropout` is True we will skip Assertions, please verify the model.")<line_sep>SKIP_ASSERT=<true><block_end><if_stmt>local_config['use_auto_regressive']<block_start><raise>ValueError("Please save model checkpoint without `use_auto_regressive` and then reload it with `use_auto_regressive`.")<line_sep>SKIP_ASSERT=<true><block_end><import_stmt>torch<import_stmt>transformers<line_sep>transformers.logging.set_verbosity_error()<line_sep>from_model_vars=["encoder.block.{}.layer.0.SelfAttention.q.weight" "encoder.block.{}.layer.0.SelfAttention.k.weight" "encoder.block.{}.layer.0.SelfAttention.v.weight" "encoder.block.{}.layer.0.SelfAttention.o.weight" "encoder.block.{}.layer.0.layer_norm.weight" "encoder.block.{}.layer.1.DenseReluDense.wi.weight" "encoder.block.{}.layer.1.DenseReluDense.wo.weight" "encoder.block.{}.layer.1.layer_norm.weight" ]<line_sep>to_model_vars=["tf_transformers/t5_encoder/transformer/layer_{}/self_attention/query/kernel:0" "tf_transformers/t5_encoder/transformer/layer_{}/self_attention/key/kernel:0" "tf_transformers/t5_encoder/transformer/layer_{}/self_attention/value/kernel:0" "tf_transformers/t5_encoder/transformer/layer_{}/self_attention_output/kernel:0" "tf_transformers/t5_encoder/transformer/layer_{}/pre_attention_norm/weight:0" "tf_transformers/t5_encoder/transformer/layer_{}/intermediate/kernel:0" "tf_transformers/t5_encoder/transformer/layer_{}/output/kernel:0" "tf_transformers/t5_encoder/transformer/layer_{}/self_attention_layer_norm/weight:0" ]<line_sep># Simple Assertion <assert_stmt>len(from_model_vars)<eq>len(to_model_vars)<line_sep>mapping_dict={}<for_stmt>index range(len(from_model_vars))<block_start><for_stmt>i range(config["num_hidden_layers"])<block_start>mapping_dict[from_model_vars[index].format(i)]=to_model_vars[index].format(i)<block_end><block_end># Only Layer 0 mapping_dict["encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"]="tf_transformers/t5_encoder/transformer/layer_0/self_attention/relative_attention_bias/embeddings:0"<line_sep># Word Embedding mapping_dict["shared.weight"]="tf_transformers/t5_encoder/word_embeddings/embeddings:0"<line_sep># Final Layer Norm weight mapping_dict["encoder.final_layer_norm.weight"]="tf_transformers/t5_encoder/last_layer_norm/weight:0"<line_sep># T5Model <import_from_stmt>transformers T5Model<as>PTT5Model<line_sep>model_hf=PTT5Model.from_pretrained(model_name)<line_sep># HF model variable name to variable values, for fast retrieval from_to_variable_dict={name:var.detach().numpy()<for>name,var model_hf.named_parameters()}<line_sep>tf_transformers_model_index_dict={}<for_stmt>index,var enumerate(model.variables)<block_start>tf_transformers_model_index_dict[var.name]=index<block_end># legacy_ai <-- hub assigned_map=[]<line_sep># assigned_map_values = [] <for_stmt>original_var,legacy_var mapping_dict.items()<block_start>index=tf_transformers_model_index_dict[legacy_var]<line_sep># If not in mapping_dict, then mostly it is from attention layer <if_stmt>"query/kernel:0"<in>legacy_var<or>"key/kernel:0"<in>legacy_var<or>"value/kernel:0"<in>legacy_var# hub (2D) to tf_transformers (3D) <block_start>model.variables[index].assign(np.reshape(np.transpose(from_to_variable_dict.get(original_var)) (config["embedding_size"] config["num_attention_heads"] config["attention_head_size"] ) ))<line_sep>assigned_map.append((original_var legacy_var))<line_sep><continue><block_end><elif_stmt>"kernel:0"<in>legacy_var<block_start><if_stmt>list(model.variables[index].shape)<eq>list(from_to_variable_dict.get(original_var).shape)<block_start>model.variables[index].assign(np.transpose(from_to_variable_dict.get(original_var)))<line_sep>assigned_map.append((original_var legacy_var))<line_sep><continue><block_end><else_stmt><block_start>model.variables[index].assign(np.transpose(from_to_variable_dict.get(original_var)))<line_sep>assigned_map.append((original_var legacy_var))<line_sep><continue><block_end><block_end>model.variables[index].assign(from_to_variable_dict.get(original_var))<line_sep>assigned_map.append((original_var legacy_var))<block_end># Decoder Side # From vars (Transformer variables) from_model_vars=["decoder.block.{}.layer.0.SelfAttention.q.weight" "decoder.block.{}.layer.0.SelfAttention.k.weight" "decoder.block.{}.layer.0.SelfAttention.v.weight" "decoder.block.{}.layer.0.SelfAttention.o.weight" "decoder.block.{}.layer.0.layer_norm.weight" "decoder.block.{}.layer.1.EncDecAttention.q.weight" "decoder.block.{}.layer.1.EncDecAttention.k.weight" "decoder.block.{}.layer.1.EncDecAttention.v.weight" "decoder.block.{}.layer.1.EncDecAttention.o.weight" "decoder.block.{}.layer.1.layer_norm.weight" "decoder.block.{}.layer.2.DenseReluDense.wi.weight" "decoder.block.{}.layer.2.DenseReluDense.wo.weight" "decoder.block.{}.layer.2.layer_norm.weight" ]<line_sep>to_model_vars=["tf_transformers/t5_decoder/transformer/layer_{}/self_attention/query/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/self_attention/key/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/self_attention/value/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/self_attention_output/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/pre_attention_norm/weight:0" "tf_transformers/t5_decoder/transformer/layer_{}/cross_attention/query/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/cross_attention/key/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/cross_attention/value/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/cross_attention_output/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/pre_cross_attention_norm/weight:0" "tf_transformers/t5_decoder/transformer/layer_{}/intermediate/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/output/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/self_attention_layer_norm/weight:0" ]<line_sep># Simple Assertion <assert_stmt>len(from_model_vars)<eq>len(to_model_vars)<line_sep>mapping_dict={}<for_stmt>index range(len(from_model_vars))<block_start><for_stmt>i range(config["num_hidden_layers"])<block_start>mapping_dict[from_model_vars[index].format(i)]=to_model_vars[index].format(i)<block_end><block_end># Only Layer 0 mapping_dict["decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"]="tf_transformers/t5_decoder/transformer/layer_0/self_attention/relative_attention_bias/embeddings:0"<line_sep># Final Layer Norm weight mapping_dict["decoder.final_layer_norm.weight"]="tf_transformers/t5_decoder/last_layer_norm/weight:0"<line_sep># HF model variable name to variable values, for fast retrieval from_to_variable_dict={name:var.detach().numpy()<for>name,var model_hf.named_parameters()}<line_sep>tf_transformers_model_index_dict={}<for_stmt>index,var enumerate(model.variables)<block_start>tf_transformers_model_index_dict[var.name]=index<if_stmt>(var.name<eq>"tf_transformers/t5_decoder/transformer/layer_0/cross_attention/relative_attention_bias/embeddings:0")<block_start>model.variables[index].assign(tf.zeros_like(model.variables[index]))<line_sep><continue><block_end><block_end># legacy_ai <-- hub assigned_map=[]<line_sep># assigned_map_values = [] <for_stmt>original_var,legacy_var mapping_dict.items()<block_start>index=tf_transformers_model_index_dict[legacy_var]<line_sep># If not in mapping_dict, then mostly it is from attention layer <if_stmt>"query/kernel:0"<in>legacy_var<or>"key/kernel:0"<in>legacy_var<or>"value/kernel:0"<in>legacy_var# hub (2D) to tf_transformers (3D) <block_start>model.variables[index].assign(np.reshape(np.transpose(from_to_variable_dict.get(original_var)) (config["embedding_size"] config["num_attention_heads"] config["attention_head_size"] ) ))<line_sep>assigned_map.append((original_var legacy_var))<line_sep><continue><block_end><elif_stmt>"kernel:0"<in>legacy_var<block_start><if_stmt>list(model.variables[index].shape)<eq>list(from_to_variable_dict.get(original_var).shape)<block_start>model.variables[index].assign(np.transpose(from_to_variable_dict.get(original_var)))<line_sep>assigned_map.append((original_var legacy_var))<line_sep><continue><block_end><else_stmt><block_start>model.variables[index].assign(np.transpose(from_to_variable_dict.get(original_var)))<line_sep>assigned_map.append((original_var legacy_var))<line_sep><continue><block_end><block_end>model.variables[index].assign(from_to_variable_dict.get(original_var))<line_sep>assigned_map.append((original_var legacy_var))<block_end><if_stmt>SKIP_ASSERT<is><false><block_start><import_from_stmt>transformers T5Tokenizer<line_sep>tokenizer=T5Tokenizer.from_pretrained(model_name)<line_sep>text="This is a long sentence to check how close models are."<line_sep>inputs=tokenizer(text return_tensors="pt")<line_sep>outputs_hf=model_hf(inputs["input_ids"] decoder_input_ids=inputs["input_ids"])<line_sep>outputs_hf=torch.sum(outputs_hf["last_hidden_state"] dim=-1).detach().numpy()<line_sep>inputs=tokenizer(text return_tensors="tf")<line_sep>inputs_tf={}<line_sep>inputs_tf["encoder_input_ids"]=inputs["input_ids"]<line_sep>inputs_tf["encoder_input_mask"]=inputs["attention_mask"]<line_sep>inputs_tf["decoder_input_ids"]=inputs["input_ids"]<line_sep>outputs_tf=model(inputs_tf)<line_sep>outputs_tf=tf.reduce_sum(outputs_tf["token_embeddings"] axis=-1).numpy()<line_sep>tf.debugging.assert_near(outputs_hf outputs_tf rtol=1.0)<block_end><block_end><def_stmt>convert_t5_tf model config model_name<block_start>"""TF converter Args: model_hf: HuggingFace Model (TF) model: tf_transformers model/layer config: dict Returns: a function """<line_sep># When dropout, use_auto_regressive is enabled assertion won't work SKIP_ASSERT=<false><try_stmt># LegacyLayer <block_start>local_config=model._config_dict['decoder']<block_end><except_stmt>Exception<as>e# LegacyModel <block_start>local_config=model.model_config['decoder']<block_end><if_stmt>local_config['use_dropout']<block_start>logging.warn("Note: As `use_dropout` is True we will skip Assertions, please verify the model.")<line_sep>SKIP_ASSERT=<true><block_end><if_stmt>local_config['use_auto_regressive']<block_start><raise>ValueError("Please save model checkpoint without `use_auto_regressive` and then reload it with `use_auto_regressive`.")<line_sep>SKIP_ASSERT=<true><block_end><import_stmt>transformers<line_sep>transformers.logging.set_verbosity_error()<line_sep>from_model_vars=["tf_t5model/encoder/block_._{}/layer_._0/SelfAttention/q/kernel:0" "tf_t5model/encoder/block_._{}/layer_._0/SelfAttention/k/kernel:0" "tf_t5model/encoder/block_._{}/layer_._0/SelfAttention/v/kernel:0" "tf_t5model/encoder/block_._{}/layer_._0/SelfAttention/o/kernel:0" "tf_t5model/encoder/block_._{}/layer_._0/layer_norm/weight:0" "tf_t5model/encoder/block_._{}/layer_._1/DenseReluDense/wi/kernel:0" "tf_t5model/encoder/block_._{}/layer_._1/DenseReluDense/wo/kernel:0" "tf_t5model/encoder/block_._{}/layer_._1/layer_norm/weight:0" ]<line_sep>to_model_vars=["tf_transformers/t5_encoder/transformer/layer_{}/self_attention/query/kernel:0" "tf_transformers/t5_encoder/transformer/layer_{}/self_attention/key/kernel:0" "tf_transformers/t5_encoder/transformer/layer_{}/self_attention/value/kernel:0" "tf_transformers/t5_encoder/transformer/layer_{}/self_attention_output/kernel:0" "tf_transformers/t5_encoder/transformer/layer_{}/pre_attention_norm/weight:0" "tf_transformers/t5_encoder/transformer/layer_{}/intermediate/kernel:0" "tf_transformers/t5_encoder/transformer/layer_{}/output/kernel:0" "tf_transformers/t5_encoder/transformer/layer_{}/self_attention_layer_norm/weight:0" ]<line_sep># Simple Assertion <assert_stmt>len(from_model_vars)<eq>len(to_model_vars)<line_sep>mapping_dict={}<for_stmt>index range(len(from_model_vars))<block_start><for_stmt>i range(config["num_hidden_layers"])<block_start>mapping_dict[from_model_vars[index].format(i)]=to_model_vars[index].format(i)<block_end><block_end># Only Layer 0 mapping_dict["tf_t5model/encoder/block_._0/layer_._0/SelfAttention/relative_attention_bias/embeddings:0"]="tf_transformers/t5_encoder/transformer/layer_0/self_attention/relative_attention_bias/embeddings:0"<line_sep># Word Embedding mapping_dict["shared/shared/weight:0"]="tf_transformers/t5_encoder/word_embeddings/embeddings:0"<line_sep># Final Layer Norm weight mapping_dict["tf_t5model/encoder/final_layer_norm/weight:0"]="tf_transformers/t5_encoder/last_layer_norm/weight:0"<line_sep># T5Model <import_from_stmt>transformers TFT5Model<line_sep>model_hf=TFT5Model.from_pretrained(model_name)<line_sep>from_to_variable_dict={var.name:var<for>var model_hf.variables}<line_sep>tf_transformers_model_index_dict={}<for_stmt>index,var enumerate(model.variables)<block_start>tf_transformers_model_index_dict[var.name]=index<block_end># legacy_ai <-- hub assigned_map=[]<line_sep># assigned_map_values = [] <for_stmt>original_var,legacy_var mapping_dict.items()<block_start>index=tf_transformers_model_index_dict[legacy_var]<line_sep># If not in mapping_dict, then mostly it is from attention layer <if_stmt>"query/kernel:0"<in>legacy_var<or>"key/kernel:0"<in>legacy_var<or>"value/kernel:0"<in>legacy_var# hub (2D) to tf_transformers (3D) <block_start>model.variables[index].assign(tf.reshape(from_to_variable_dict.get(original_var) (config["embedding_size"] config["num_attention_heads"] config["attention_head_size"] ) ))<line_sep>assigned_map.append((original_var legacy_var))<line_sep><continue><block_end>model.variables[index].assign(from_to_variable_dict.get(original_var))<line_sep>assigned_map.append((original_var legacy_var))<block_end># Decoder Side # From vars (Transformer variables) from_model_vars=["tf_t5model/decoder/block_._{}/layer_._0/SelfAttention/q/kernel:0" "tf_t5model/decoder/block_._{}/layer_._0/SelfAttention/k/kernel:0" "tf_t5model/decoder/block_._{}/layer_._0/SelfAttention/v/kernel:0" "tf_t5model/decoder/block_._{}/layer_._0/SelfAttention/o/kernel:0" "tf_t5model/decoder/block_._{}/layer_._0/layer_norm/weight:0" "tf_t5model/decoder/block_._{}/layer_._1/EncDecAttention/q/kernel:0" "tf_t5model/decoder/block_._{}/layer_._1/EncDecAttention/k/kernel:0" "tf_t5model/decoder/block_._{}/layer_._1/EncDecAttention/v/kernel:0" "tf_t5model/decoder/block_._{}/layer_._1/EncDecAttention/o/kernel:0" "tf_t5model/decoder/block_._{}/layer_._1/layer_norm/weight:0" "tf_t5model/decoder/block_._{}/layer_._2/DenseReluDense/wi/kernel:0" "tf_t5model/decoder/block_._{}/layer_._2/DenseReluDense/wo/kernel:0" "tf_t5model/decoder/block_._{}/layer_._2/layer_norm/weight:0" ]<line_sep>to_model_vars=["tf_transformers/t5_decoder/transformer/layer_{}/self_attention/query/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/self_attention/key/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/self_attention/value/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/self_attention_output/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/pre_attention_norm/weight:0" "tf_transformers/t5_decoder/transformer/layer_{}/cross_attention/query/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/cross_attention/key/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/cross_attention/value/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/cross_attention_output/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/pre_cross_attention_norm/weight:0" "tf_transformers/t5_decoder/transformer/layer_{}/intermediate/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/output/kernel:0" "tf_transformers/t5_decoder/transformer/layer_{}/self_attention_layer_norm/weight:0" ]<line_sep># Simple Assertion <assert_stmt>len(from_model_vars)<eq>len(to_model_vars)<line_sep>mapping_dict={}<for_stmt>index range(len(from_model_vars))<block_start><for_stmt>i range(config["num_hidden_layers"])<block_start>mapping_dict[from_model_vars[index].format(i)]=to_model_vars[index].format(i)<block_end><block_end># Only Layer 0 mapping_dict["tf_t5model/decoder/block_._0/layer_._0/SelfAttention/relative_attention_bias/embeddings:0"]="tf_transformers/t5_decoder/transformer/layer_0/self_attention/relative_attention_bias/embeddings:0"<line_sep>mapping_dict["tf_t5model/decoder/block_._0/layer_._1/EncDecAttention/relative_attention_bias/embeddings:0"]="tf_transformers/t5_decoder/transformer/layer_0/cross_attention/relative_attention_bias/embeddings:0"<line_sep># Final Layer Norm weight mapping_dict["tf_t5model/decoder/final_layer_norm/weight:0"]="tf_transformers/t5_decoder/last_layer_norm/weight:0"<line_sep>from_to_variable_dict={var.name:var<for>var model_hf.variables}<line_sep>tf_transformers_model_index_dict={}<for_stmt>index,var enumerate(model.variables)<block_start>tf_transformers_model_index_dict[var.name]=index<block_end># legacy_ai <-- hub assigned_map=[]<line_sep># assigned_map_values = [] <for_stmt>original_var,legacy_var mapping_dict.items()<block_start>index=tf_transformers_model_index_dict[legacy_var]<line_sep># If not in mapping_dict, then mostly it is from attention layer <if_stmt>"query/kernel:0"<in>legacy_var<or>"key/kernel:0"<in>legacy_var<or>"value/kernel:0"<in>legacy_var# hub (2D) to tf_transformers (3D) <block_start>model.variables[index].assign(tf.reshape(from_to_variable_dict.get(original_var) (config["embedding_size"] config["num_attention_heads"] config["attention_head_size"] ) ))<line_sep>assigned_map.append((original_var legacy_var))<line_sep><continue><block_end><if_stmt>(original_var<eq>"tf_t5model/decoder/block_._0/layer_._1/EncDecAttention/relative_attention_bias/embeddings:0")<block_start><if_stmt>original_var<not><in>from_to_variable_dict<block_start>model.variables[index].assign(tf.zeros_like(model.variables[index]))<line_sep>assigned_map.append((original_var legacy_var))<line_sep><continue><block_end><block_end>model.variables[index].assign(from_to_variable_dict.get(original_var))<line_sep>assigned_map.append((original_var legacy_var))<block_end><if_stmt>SKIP_ASSERT<is><false><block_start><import_from_stmt>transformers T5Tokenizer<line_sep>tokenizer=T5Tokenizer.from_pretrained(model_name)<line_sep>text="This is a long sentence to check how close models are."<line_sep>inputs=tokenizer(text return_tensors="tf")<line_sep>outputs_hf=model_hf(inputs["input_ids"] decoder_input_ids=inputs["input_ids"])<line_sep>outputs_hf=tf.reduce_sum(outputs_hf["last_hidden_state"] axis=-1).numpy()<line_sep>inputs_tf={}<line_sep>inputs_tf["encoder_input_ids"]=inputs["input_ids"]<line_sep>inputs_tf["encoder_input_mask"]=inputs["attention_mask"]<line_sep>inputs_tf["decoder_input_ids"]=inputs["input_ids"]<line_sep>outputs_tf=model(inputs_tf)<line_sep>outputs_tf=tf.reduce_sum(outputs_tf["token_embeddings"] axis=-1).numpy()<if_stmt>keras_utils.get_policy_name()<eq>'float32'<block_start>tf.debugging.assert_near(outputs_hf outputs_tf rtol=1.0)<block_end><block_end><block_end>
''' Unit Test Cases for JSON2HTML Description - python wrapper for converting JSON to HTML Table format (c) 2013 <NAME>. MIT License '''<line_sep>__author__='<NAME>'<line_sep>__version__='1.1.1'<line_sep>__license__='MIT'<line_sep>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>CalibMuon.CSCCalibration.CSCFakeDBGains_cfi *<import_from_stmt>CalibMuon.CSCCalibration.CSCFakeDBPedestals_cfi *<import_from_stmt>CalibMuon.CSCCalibration.CSCFakeDBNoiseMatrix_cfi *<import_from_stmt>CalibMuon.CSCCalibration.CSCFakeDBCrosstalk_cfi *<import_from_stmt>CalibMuon.CSCCalibration.CSC_BadChambers_cfi *<line_sep>
<import_from_future_stmt> print_function division<import_from_stmt>.utils save grid<def_stmt>main <block_start>"""Generate all example images for the chapter `Examples: Bounding Boxes` in the documentation."""<line_sep>chapter_examples_bounding_boxes_simple()<line_sep>chapter_examples_bounding_boxes_rotation()<line_sep>chapter_examples_bounding_boxes_ooi()<line_sep>chapter_examples_bounding_boxes_shift()<line_sep>chapter_examples_bounding_boxes_projection()<line_sep>chapter_examples_bounding_boxes_iou()<block_end><def_stmt>chapter_examples_bounding_boxes_simple <block_start><import_stmt>imgaug<as>ia<import_stmt>imgaug.augmenters<as>iaa<import_from_stmt>imgaug.augmentables.bbs BoundingBox BoundingBoxesOnImage<line_sep>ia.seed(1)<line_sep>image=ia.quokka(size=(256 256))<line_sep>bbs=BoundingBoxesOnImage([BoundingBox(x1=65 y1=100 x2=200 y2=150) BoundingBox(x1=150 y1=80 x2=200 y2=130)] shape=image.shape)<line_sep>seq=iaa.Sequential([iaa.Multiply((1.2 1.5)) # change brightness, doesn't affect BBs iaa.Affine(translate_px={"x":40 "y":60} scale=(0.5 0.7))# translate by 40/60px on x/y axis, and scale to 50-70%, affects BBs ])<line_sep># Augment BBs and images. image_aug,bbs_aug=seq(image=image bounding_boxes=bbs)<line_sep># print coordinates before/after augmentation (see below) # use .x1_int, .y_int, ... to get integer coordinates <for_stmt>i range(len(bbs.bounding_boxes))<block_start>before=bbs.bounding_boxes[i]<line_sep>after=bbs_aug.bounding_boxes[i]<line_sep>print("BB %d: (%.4f, %.4f, %.4f, %.4f) -> (%.4f, %.4f, %.4f, %.4f)"%(i before.x1 before.y1 before.x2 before.y2 after.x1 after.y1 after.x2 after.y2))<block_end># image with BBs before/after augmentation (shown below) image_before=bbs.draw_on_image(image size=2)<line_sep>image_after=bbs_aug.draw_on_image(image_aug size=2 color=[0 0 255])<line_sep># ------------ save("examples_bounding_boxes" "simple.jpg" grid([image_before image_after] cols=2 rows=1) quality=90)<block_end><def_stmt>chapter_examples_bounding_boxes_rotation <block_start><import_stmt>imgaug<as>ia<import_from_stmt>imgaug augmenters<as>iaa<line_sep>ia.seed(1)<line_sep>image=ia.quokka(size=(256 256))<line_sep>bbs=ia.BoundingBoxesOnImage([ia.BoundingBox(x1=65 y1=100 x2=200 y2=150) ia.BoundingBox(x1=150 y1=80 x2=200 y2=130)] shape=image.shape)<line_sep>seq=iaa.Sequential([iaa.Multiply((1.2 1.5)) # change brightness, doesn't affect BBs iaa.Affine(rotate=45 )])<line_sep># Make our sequence deterministic. # We can now apply it to the image and then to the BBs and it will # lead to the same augmentations. # IMPORTANT: Call this once PER BATCH, otherwise you will always get the # exactly same augmentations for every batch! seq_det=seq.to_deterministic()<line_sep># Augment BBs and images. # As we only have one image and list of BBs, we use # [image] and [bbs] to turn both into lists (batches) for the # functions and then [0] to reverse that. In a real experiment, your # variables would likely already be lists. image_aug=seq_det.augment_images([image])[0]<line_sep>bbs_aug=seq_det.augment_bounding_boxes([bbs])[0]<line_sep># print coordinates before/after augmentation (see below) <for_stmt>i range(len(bbs.bounding_boxes))<block_start>before=bbs.bounding_boxes[i]<line_sep>after=bbs_aug.bounding_boxes[i]<line_sep>print("BB %d: (%d, %d, %d, %d) -> (%d, %d, %d, %d)"%(i before.x1 before.y1 before.x2 before.y2 after.x1 after.y1 after.x2 after.y2))<block_end># image with BBs before/after augmentation (shown below) image_before=bbs.draw_on_image(image size=2)<line_sep>image_after=bbs_aug.draw_on_image(image_aug size=2 color=[0 0 255])<line_sep># ------------ save("examples_bounding_boxes" "rotation.jpg" grid([image_before image_after] cols=2 rows=1) quality=90)<block_end><def_stmt>chapter_examples_bounding_boxes_ooi <block_start><import_stmt>numpy<as>np<import_stmt>imgaug<as>ia<import_stmt>imgaug.augmenters<as>iaa<import_from_stmt>imgaug.augmentables.bbs BoundingBox BoundingBoxesOnImage<line_sep>ia.seed(1)<line_sep>GREEN=[0 255 0]<line_sep>ORANGE=[255 140 0]<line_sep>RED=[255 0 0]<line_sep># Pad image with a 1px white and (BY-1)px black border <def_stmt>pad image by<block_start>image_border1=ia.pad(image top=1 right=1 bottom=1 left=1 mode="constant" cval=255)<line_sep>image_border2=ia.pad(image_border1 top=by-1 right=by-1 bottom=by-1 left=by-1 mode="constant" cval=0)<line_sep><return>image_border2<block_end># Draw BBs on an image # and before doing that, extend the image plane by BORDER pixels. # Mark BBs inside the image plane with green color, those partially inside # with orange and those fully outside with red. <def_stmt>draw_bbs image bbs border<block_start>image_border=pad(image border)<for_stmt>bb bbs.bounding_boxes<block_start><if_stmt>bb.is_fully_within_image(image.shape)<block_start>color=GREEN<block_end><elif_stmt>bb.is_partly_within_image(image.shape)<block_start>color=ORANGE<block_end><else_stmt><block_start>color=RED<block_end>image_border=bb.shift(left=border top=border).draw_on_image(image_border size=2 color=color)<block_end><return>image_border<block_end># Define example image with three small square BBs next to each other. # Augment these BBs by shifting them to the right. image=ia.quokka(size=(256 256))<line_sep>bbs=BoundingBoxesOnImage([BoundingBox(x1=25 x2=75 y1=25 y2=75) BoundingBox(x1=100 x2=150 y1=25 y2=75) BoundingBox(x1=175 x2=225 y1=25 y2=75)] shape=image.shape)<line_sep>seq=iaa.Affine(translate_px={"x":120})<line_sep>image_aug,bbs_aug=seq(image=image bounding_boxes=bbs)<line_sep># Draw the BBs (a) in their original form, (b) after augmentation, # (c) after augmentation and removing those fully outside the image, # (d) after augmentation and removing those fully outside the image and # clipping those partially inside the image so that they are fully inside. image_before=draw_bbs(image bbs 100)<line_sep>image_after1=draw_bbs(image_aug bbs_aug 100)<line_sep>image_after2=draw_bbs(image_aug bbs_aug.remove_out_of_image() 100)<line_sep>image_after3=draw_bbs(image_aug bbs_aug.remove_out_of_image().clip_out_of_image() 100)<line_sep># ------------ save("examples_bounding_boxes" "ooi.jpg" grid([image_before image_after1 np.zeros_like(image_before) image_after2 np.zeros_like(image_before) image_after3] cols=2 rows=3) #grid([image_before, image_after1], cols=2, rows=1), quality=90)<block_end><def_stmt>chapter_examples_bounding_boxes_shift <block_start><import_stmt>imgaug<as>ia<import_from_stmt>imgaug.augmentables.bbs BoundingBox BoundingBoxesOnImage<line_sep>ia.seed(1)<line_sep># Define image and two bounding boxes image=ia.quokka(size=(256 256))<line_sep>bbs=BoundingBoxesOnImage([BoundingBox(x1=25 x2=75 y1=25 y2=75) BoundingBox(x1=100 x2=150 y1=25 y2=75)] shape=image.shape)<line_sep># Move both BBs 25px to the right and the second BB 25px down bbs_shifted=bbs.shift(left=25)<line_sep>bbs_shifted.bounding_boxes[1]=bbs_shifted.bounding_boxes[1].shift(top=25)<line_sep># Draw images before/after moving BBs image=bbs.draw_on_image(image color=[0 255 0] size=2 alpha=0.75)<line_sep>image=bbs_shifted.draw_on_image(image color=[0 0 255] size=2 alpha=0.75)<line_sep># ------------ save("examples_bounding_boxes" "shift.jpg" grid([image] cols=1 rows=1) quality=90)<block_end><def_stmt>chapter_examples_bounding_boxes_projection <block_start><import_stmt>imgaug<as>ia<import_from_stmt>imgaug.augmentables.bbs BoundingBox BoundingBoxesOnImage<line_sep>ia.seed(1)<line_sep># Define image with two bounding boxes image=ia.quokka(size=(256 256))<line_sep>bbs=BoundingBoxesOnImage([BoundingBox(x1=25 x2=75 y1=25 y2=75) BoundingBox(x1=100 x2=150 y1=25 y2=75)] shape=image.shape)<line_sep># Rescale image and bounding boxes image_rescaled=ia.imresize_single_image(image (512 512))<line_sep>bbs_rescaled=bbs.on(image_rescaled)<line_sep># Draw image before/after rescaling and with rescaled bounding boxes image_bbs=bbs.draw_on_image(image size=2)<line_sep>image_rescaled_bbs=bbs_rescaled.draw_on_image(image_rescaled size=2)<line_sep># ------------ save("examples_bounding_boxes" "projection.jpg" grid([image_bbs image_rescaled_bbs] cols=2 rows=1) quality=90)<block_end><def_stmt>chapter_examples_bounding_boxes_iou <block_start><import_stmt>numpy<as>np<import_stmt>imgaug<as>ia<import_from_stmt>imgaug.augmentables.bbs BoundingBox<line_sep>ia.seed(1)<line_sep># Define image with two bounding boxes. image=ia.quokka(size=(256 256))<line_sep>bb1=BoundingBox(x1=50 x2=100 y1=25 y2=75)<line_sep>bb2=BoundingBox(x1=75 x2=125 y1=50 y2=100)<line_sep># Compute intersection, union and IoU value # Intersection and union are both bounding boxes. They are here # decreased/increased in size purely for better visualization. bb_inters=bb1.intersection(bb2).extend(all_sides=-1)<line_sep>bb_union=bb1.union(bb2).extend(all_sides=2)<line_sep>iou=bb1.iou(bb2)<line_sep># Draw bounding boxes, intersection, union and IoU value on image. image_bbs=np.copy(image)<line_sep>image_bbs=bb1.draw_on_image(image_bbs size=2 color=[0 255 0])<line_sep>image_bbs=bb2.draw_on_image(image_bbs size=2 color=[0 255 0])<line_sep>image_bbs=bb_inters.draw_on_image(image_bbs size=2 color=[255 0 0])<line_sep>image_bbs=bb_union.draw_on_image(image_bbs size=2 color=[0 0 255])<line_sep>image_bbs=ia.draw_text(image_bbs text="IoU=%.2f"%(iou ) x=bb_union.x2+10 y=bb_union.y1+bb_union.height<floordiv>2 color=[255 255 255] size=13)<line_sep># ------------ save("examples_bounding_boxes" "iou.jpg" grid([image_bbs] cols=1 rows=1) quality=90)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_from_stmt>django.contrib admin<line_sep># Register your models here. <import_from_stmt>.models AdvThreatSrcCategory AdvThreatSrcType<import_from_stmt>.models AdvThreatSource<import_from_stmt>.models NonAdvThreatSrcClass NonAdvThreatSrcCategory<import_from_stmt>.models NonAdvThreatSrcType NonAdvThreatSource<import_from_stmt>.models AdvThreatEventCategory AdvThreatEventType<import_from_stmt>.models NonAdvThreatEventType<import_from_stmt>.models AdvThreatEvent NonAdvThreatEvent<import_from_stmt>.models VulnerabilityClass VulnerabilityCategory VulnerabilityType<import_from_stmt>.models ConditionClass ConditionCategory ConditionType<import_from_stmt>.models Vulnerability RiskCondition<import_from_stmt>.models ImpactType Impact<import_from_stmt>.models RiskResponseType RiskResponse<line_sep>admin.site.register(AdvThreatEventCategory)<line_sep>admin.site.register(AdvThreatEventType)<line_sep>admin.site.register(AdvThreatEvent)<line_sep>admin.site.register(NonAdvThreatEventType)<line_sep>admin.site.register(NonAdvThreatEvent)<line_sep>admin.site.register(AdvThreatSrcCategory)<line_sep>admin.site.register(AdvThreatSrcType)<line_sep>admin.site.register(AdvThreatSource)<line_sep>admin.site.register(NonAdvThreatSrcClass)<line_sep>admin.site.register(NonAdvThreatSrcCategory)<line_sep>admin.site.register(NonAdvThreatSrcType)<line_sep>admin.site.register(NonAdvThreatSource)<line_sep>admin.site.register(VulnerabilityClass)<line_sep>admin.site.register(VulnerabilityCategory)<line_sep>admin.site.register(VulnerabilityType)<line_sep>admin.site.register(ConditionClass)<line_sep>admin.site.register(ConditionCategory)<line_sep>admin.site.register(ConditionType)<line_sep>admin.site.register(Vulnerability)<line_sep>admin.site.register(RiskCondition)<line_sep>admin.site.register(ImpactType)<line_sep>admin.site.register(Impact)<line_sep>admin.site.register(RiskResponseType)<line_sep>admin.site.register(RiskResponse)<line_sep>
# Python Standard Library Imports <import_stmt>base64<import_stmt>datetime<import_stmt>json<import_stmt>re<line_sep># Third Party (PyPI) Imports <import_stmt>six.moves.urllib<as>urllib<line_sep># Django Imports <import_from_stmt>django template<import_from_stmt>django.template.defaultfilters stringfilter<import_from_stmt>django.urls reverse<import_from_stmt>django.utils.safestring SafeText<import_from_stmt>django.utils.safestring mark_safe<line_sep>register=template.Library()<line_sep>################################################## # Filters # Form Utilities @register.filter()<def_stmt>field_clsname field<block_start>clsname=field.field.widget.__class__.__name__<line_sep><return>clsname<block_end>@register.filter(is_safe=<true>)<def_stmt>label_with_classes value arg<block_start>attrs={'class':arg 'className':arg }<line_sep>html=value.label_tag(attrs=attrs)<line_sep><return>html<block_end>@register.filter(is_safe=<true>)<def_stmt>react_field field<block_start>html=field.__str__()<line_sep>html=re.sub(r' value="(.*?)"' r' defaultValue="\g<1>"' html)<line_sep>html=re.sub(r' class="(.*?)"' r' className="\g<1>"' html)<if_stmt>field.field.widget.__class__.__name__<eq>'RadioSelect'<block_start>html=re.sub(r'checked="checked"' r'defaultChecked' html)<block_end>html=mark_safe(html)<line_sep><return>html<block_end># Dictionary Utilities @register.filter()<def_stmt>get_item dictionary key<block_start>value=dictionary.get(key)<line_sep><return>value<block_end># String Utilities @register.filter(is_safe=<true>)<def_stmt>concat value arg<block_start>result=str(value)+str(arg)<line_sep><return>result<block_end>@register.filter()<def_stmt>zeropad value num_digits<block_start>""" """<line_sep>padded=str(value).zfill(num_digits)<line_sep><return>padded<block_end>@register.filter(is_safe=<true>)<def_stmt>markdownify value<block_start>"""Converts Markdown string to HTML """<import_stmt>markdown<line_sep>html=markdown.markdown(value)<line_sep><return>html<block_end>@register.filter()<def_stmt>atob value<block_start>"""Base64 decode ASCII to Binary """<line_sep>value=base64.b64decode(value)<line_sep><return>value<block_end>@register.filter()<def_stmt>btoa value<block_start>"""Base64 encode Binary to ASCII """<if_stmt>type(value)<in>(str SafeText)<block_start>value=value.encode('utf-8')<block_end># Convert bytes to str for for use in template value=base64.b64encode(value).decode('utf-8')<line_sep><return>value<block_end># Maths @register.filter()<def_stmt>int_divide value arg<block_start><return>int(value)/int(arg)<block_end>@register.filter()<def_stmt>float_divide value arg<block_start><return>1.0<times>int(value)/int(arg)<block_end>@register.filter()<def_stmt>make_range value<block_start><return>range(value)<block_end># Formatters @register.filter()<def_stmt>currency value<block_start><import_from_stmt>decimal Decimal<line_sep>value=Decimal(value).quantize(Decimal('0.01'))<line_sep><return>value<block_end>@register.filter()<def_stmt>currency_symbol value symbol<block_start><if_stmt>len(value)<g>0<and>value[0]<eq>'-'<block_start>sign='-'<line_sep>abs_value=value[1:]<block_end><else_stmt><block_start>sign=''<line_sep>abs_value=value<block_end>result='%s%s%s'%(sign symbol abs_value )<line_sep><return>result<block_end>@register.filter()<def_stmt>timestamp value<block_start><try_stmt><block_start>formatted=datetime.datetime.fromtimestamp(value)<block_end><except_stmt>AttributeError<block_start>formatted=''<block_end><return>formatted<block_end>@register.filter()<def_stmt>phonenumber value country='US'<block_start>"""Formats a phone number for a country """<import_stmt>phonenumbers<try_stmt><block_start>formatted=phonenumbers.format_number(phonenumbers.parse(value country) phonenumbers.PhoneNumberFormat.NATIONAL)<block_end><except_stmt><block_start>formatted=value<block_end><return>formatted<block_end>@register.filter(is_safe=<true>)<def_stmt>obfuscate value<block_start>"""Obfuscates a string """<import_from_stmt>htk.utils.obfuscate html_obfuscate_string<line_sep>result=html_obfuscate_string(value)<line_sep><return>result<block_end>@register.filter(is_safe=<true>)<def_stmt>obfuscate_mailto value text=<false><block_start>"""Obfuscates a mailto link """<import_from_stmt>htk.utils.obfuscate html_obfuscate_string<line_sep>email=html_obfuscate_string(value)<if_stmt>text<block_start>link_text=text<block_end><else_stmt><block_start>link_text=email<block_end>result='<a href="%s%s">%s</a>'%(html_obfuscate_string('mailto:') email link_text )<line_sep><return>result<block_end># Oembed @register.filter(is_safe=<true>)<def_stmt>oembed value autoplay=<false><block_start><import_from_stmt>htk.lib.oembed.utils get_oembed_html<line_sep>html=get_oembed_html(value autoplay=autoplay)<line_sep>html=mark_safe(html)<line_sep><return>html<block_end># Javascript-related @register.filter()<def_stmt>jsbool value<block_start>js_value='true'<if>bool(value)<else>'false'<line_sep><return>js_value<block_end>@register.filter()<def_stmt>jsondumps value<block_start>js_value=mark_safe(json.dumps(value))<line_sep><return>js_value<block_end># Requests @register.filter()<def_stmt>http_header value<block_start>"""Converts Django HTTP headers to standard format e.g. HTTP_ACCEPT -> Accept HTTP_CACHE_CONTROL -> Cache-Control """<line_sep>parts=value.split('_')<line_sep>header_parts=[part.title()<for>part parts[1:]]<line_sep>formatted='-'.join(header_parts)<line_sep><return>formatted<block_end>################################################## # Tags @register.simple_tag(takes_context=<true>)<def_stmt>get_django_setting context key<block_start>"""Retrieves a Django setting and sets it on the context dictionary """<import_from_stmt>django.conf settings<if_stmt>hasattr(settings key)<block_start>value=getattr(settings key)<line_sep>context[key]=value<block_end><return>''<block_end>@register.simple_tag()<def_stmt>htk_setting key<block_start><import_from_stmt>htk.utils htk_setting<as>_htk_setting<line_sep>value=_htk_setting(key)<line_sep><return>value<block_end>@register.simple_tag()<def_stmt>get_request_duration <block_start><import_from_stmt>htk.middleware.classes RequestTimerMiddleware<line_sep>timer=RequestTimerMiddleware.get_current_timer()<if_stmt>timer<block_start>duration=timer.duration()<block_end><else_stmt># TODO: fix get_current_timer() <block_start>duration=0<block_end><return>duration<block_end>## # Load Assets @register.simple_tag(takes_context=<true>)<def_stmt>lesscss context css_file_path_base media=<none><block_start>"""Determine whether to use LESS compilation on-the-fly or CSS files, and includes the appropriate one """<line_sep>media='media="%s" '%media<if>media<else>''<line_sep>values={'css_rel':context.get('css_rel' 'stylesheet') 'css_ext':context.get('css_ext' 'css') 'css_file_path_base':css_file_path_base 'media':media }<line_sep>html='<link type="text/css" rel="%(css_rel)s" href="%(css_file_path_base)s.%(css_ext)s" %(media)s/>'%values<line_sep>html=mark_safe(html)<line_sep><return>html<block_end>@register.simple_tag(takes_context=<true>)<def_stmt>loadjs context js_file_path jsx=<false><block_start>"""Include a JS file and append a static asset version string """<line_sep>asset_version=context.get('asset_version')<if_stmt>asset_version<block_start>asset_version_str='?v=%s'%asset_version<block_end><else_stmt><block_start>asset_version_str=''<block_end>values={'script_type':'text/babel'<if>jsx<else>'text/javascript' 'js_file_path':js_file_path 'asset_version_str':asset_version_str }<line_sep>html='<script type="%(script_type)s" src="%(js_file_path)s%(asset_version_str)s"></script>'%values<line_sep>html=mark_safe(html)<line_sep><return>html<block_end>@register.simple_tag(takes_context=<true>)<def_stmt>loadjsx context js_file_path<block_start>html=loadjs(context js_file_path jsx=<true>)<line_sep><return>html<block_end>## # Feature Flags @register.simple_tag()<def_stmt>is_feature_enabled feature_name<block_start><import_from_stmt>htk.apps.features.utils is_feature_enabled<as>_is_feature_enabled<line_sep>is_enabled=_is_feature_enabled(feature_name)<line_sep><return>is_enabled<block_end>## # ACL Tags @register.simple_tag(takes_context=<true>)<def_stmt>is_editable_by_context_user context obj<block_start>user=context.get('user' <none>)<if_stmt>user<block_start>is_editable=obj.is_editable_by(user)<block_end><else_stmt><block_start>is_editable=<false><block_end><return>is_editable<block_end>@register.simple_tag(takes_context=<true>)<def_stmt>has_permission context permission_key<block_start>request=context.get('request' {}).get('request' <none>)<line_sep>user=request.user<if_stmt>request<and>user.is_authenticated<block_start>has_permission=user.has_perm(permission_key)<block_end><else_stmt><block_start>has_permission=<false><block_end><return>has_permission<block_end>## # Organizations @register.simple_tag(takes_context=<true>)<def_stmt>is_user_organization_owner context organization<block_start>user=context.get('user' <none>)<if_stmt>user<block_start>is_owner=organization.has_owner(user)<block_end><else_stmt><block_start>is_owner=<false><block_end><return>is_owner<block_end>@register.simple_tag(takes_context=<true>)<def_stmt>is_user_organization_admin context organization<block_start>user=context.get('user' <none>)<if_stmt>user<block_start>is_admin=organization.has_admin(user)<block_end><else_stmt><block_start>is_admin=<false><block_end><return>is_admin<block_end>@register.simple_tag(takes_context=<true>)<def_stmt>is_user_organization_member context organization<block_start>user=context.get('user' <none>)<if_stmt>user<block_start>is_member=organization.has_member(user)<block_end><else_stmt><block_start>is_member=<false><block_end><return>is_member<block_end>## # Geolocations @register.simple_tag()<def_stmt>distance_from obj lat lng unit='mile'<block_start><import_from_stmt>htk.apps.geolocations.enums DistanceUnit<import_from_stmt>htk.apps.geolocations.models AbstractGeolocation<if_stmt><not>isinstance(obj AbstractGeolocation)<and><not>hasattr(obj 'distance_from')<block_start><raise>Exception('Not a Geolocation object or does not have a distance_from method')<block_end>distance_unit_map={'meter':DistanceUnit.METER 'kilometer':DistanceUnit.KILOMETER 'feet':DistanceUnit.FEET 'mile':DistanceUnit.MILE }<line_sep>distance_unit=distance_unit_map.get(unit)<if_stmt>distance_unit<is><none><block_start><raise>Exception('Unknown distance unit: %s'%unit)<block_end>distance=obj.distance_from(lat lng distance_unit=distance_unit)<line_sep><return>distance<block_end>## # Util Tags @register.simple_tag()<def_stmt>qrcode_image_url qr_data<block_start>"""Returns the URL to the QR Code image of `qr_data` """<if_stmt>qr_data<block_start><import_from_stmt>htk.lib.qrcode.utils generate_qr_key<import_from_stmt>htk.utils htk_setting<line_sep>url_name=htk_setting('HTK_QR_IMAGE_URL_NAME')<if_stmt>url_name<block_start>qr_params=urllib.parse.urlencode({'key':generate_qr_key(qr_data) 'data':qr_data })<line_sep>image_url='%s?%s'%(reverse(url_name) qr_params )<block_end><else_stmt><block_start>image_url=<none><block_end><block_end><else_stmt><block_start>image_url=<none><block_end><return>image_url<block_end>@register.simple_tag()<def_stmt>credit_card_icon credit_card_brand<block_start><import_from_stmt>htk.constants.icons CREDIT_CARD_ICONS<import_from_stmt>htk.constants.icons DEFAULT_CREDIT_CARD_ICON<if_stmt>credit_card_brand<in>CREDIT_CARD_ICONS<block_start>credit_card_icon=CREDIT_CARD_ICONS[credit_card_brand]<block_end><else_stmt><block_start>credit_card_icon=DEFAULT_CREDIT_CARD_ICON<block_end><return>credit_card_icon<block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> print_function<import_stmt>sys<import_stmt>os<line_sep># the next line can be removed after installation sys.path.insert(0 os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))<import_from_stmt>veriloggen *<import_stmt>veriloggen.resolver.resolver<as>resolver<def_stmt>mkSubLed <block_start>m=Module('sub_blinkled')<line_sep>width=m.Parameter('WIDTH' 8)<line_sep>inc=m.Parameter('INC' 1)<line_sep>clk=m.Input('CLK')<line_sep>rst=m.Input('RST')<line_sep>led=m.OutputReg('LED' width)<line_sep>count=m.Reg('count' width+10)<line_sep>m.Always(Posedge(clk))(If(rst)(count(0)).Else(If(count<eq>1023)(count(0)).Else(count(count+inc))))<line_sep>m.Always(Posedge(clk))(If(rst)(led(0)).Else(If(count<eq>1023)(led(led+inc))))<line_sep><return>m<block_end><def_stmt>mkOrigLed <block_start>m=Module('blinkled')<line_sep>sub=mkSubLed()<line_sep>width=m.Parameter('TOP_WIDTH' 16)<line_sep>inc=m.Parameter('TOP_INC' 1)<line_sep>clk=m.Input('CLK')<line_sep>rst=m.Input('RST')<line_sep>led0=m.Output('LED0' width)<line_sep>led1=m.Output('LED1' width)<line_sep>m.Instance(sub 'inst_sub_blinkled_0' params=[('WIDTH' width) ('INC' inc+1)] ports=[('CLK' clk) ('RST' rst) ('LED' led0)])<line_sep>m.Instance(sub 'inst_sub_blinkled_1' params=[('WIDTH' width) ('INC' inc+2)] ports=[('CLK' clk) ('RST' rst) ('LED' led1)])<line_sep><return>m<block_end><def_stmt>mkLed <block_start>led=mkOrigLed()<line_sep><return>resolver.resolve(led)<block_end><if_stmt>__name__<eq>'__main__'<block_start>led=mkLed()<line_sep>verilog=led.to_verilog()<line_sep>print(verilog)<block_end>
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Train and Eval multi-agent PPO for multi-agent gridworld. Each agent learns an independent policy. Note: this code always assumes the network has an RNN to track other agents' state. To run: ```bash tensorboard.sh --port=2222 --logdir /tmp/multigrid/ppo/ python -m multiagent_train_eval.py --root_dir=/tmp/multigrid/ppo/ ``` """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>functools<import_from_stmt>absl app<import_from_stmt>absl flags<import_from_stmt>absl logging<import_stmt>gin<import_from_stmt>tf_agents.system system_multiprocessing<line_sep># Import needed to trigger env registration, so pylint: disable=unused-import <import_from_stmt>social_rl gym_multigrid<import_from_stmt>social_rl.multiagent_tfagents football_gym_env<import_from_stmt>social_rl.multiagent_tfagents multiagent_gym_suite<import_from_stmt>social_rl.multiagent_tfagents multiagent_metrics<import_from_stmt>social_rl.multiagent_tfagents multiagent_ppo<import_from_stmt>social_rl.multiagent_tfagents multiagent_train_eval<import_from_stmt>social_rl.multiagent_tfagents utils<import_from_stmt>social_rl.multiagent_tfagents.joint_attention attention_ppo_agent<line_sep>FLAGS=flags.FLAGS<line_sep>flags.DEFINE_string('attention_bonus_type' 'kld' 'Method for computing attention bonuses.')<line_sep>flags.DEFINE_float('bonus_ratio' 0.00 'Final multiplier for bonus rewards.')<line_sep>flags.DEFINE_integer('bonus_timescale' int(1e6) 'Attention bonuses scale linearly until this point.')<def_stmt>main _<block_start>logging.set_verbosity(logging.INFO)<line_sep>agent_class=functools.partial(attention_ppo_agent.MultiagentAttentionPPO attention_bonus_type=FLAGS.attention_bonus_type bonus_ratio=FLAGS.bonus_ratio bonus_timescale=FLAGS.bonus_timescale)<if_stmt>'academy'<in>FLAGS.env_name<block_start>env_load_fn=football_gym_env.load<line_sep>gin.bind_parameter('construct_attention_networks.use_stacks' <true>)<line_sep>gin.bind_parameter('AttentionMultiagentPPOPolicy.use_stacks' <true>)<block_end><else_stmt><block_start>env_load_fn=multiagent_gym_suite.load<block_end>multiagent_train_eval.train_eval(FLAGS.root_dir env_load_fn=env_load_fn agent_class=agent_class env_name=FLAGS.env_name num_environment_steps=FLAGS.num_environment_steps collect_episodes_per_iteration=FLAGS.collect_episodes_per_iteration num_parallel_environments=FLAGS.num_parallel_environments replay_buffer_capacity=FLAGS.replay_buffer_capacity num_epochs=FLAGS.num_epochs num_eval_episodes=FLAGS.num_eval_episodes train_checkpoint_interval=FLAGS.train_checkpoint_interval policy_checkpoint_interval=FLAGS.policy_checkpoint_interval log_interval=FLAGS.log_interval summary_interval=FLAGS.summary_interval actor_fc_layers=(FLAGS.actor_fc_layers_size FLAGS.actor_fc_layers_size) value_fc_layers=(FLAGS.value_fc_layers_size FLAGS.value_fc_layers_size) lstm_size=(FLAGS.lstm_size ) conv_filters=FLAGS.conv_filters conv_kernel=FLAGS.conv_kernel direction_fc=FLAGS.direction_fc debug=FLAGS.debug inactive_agent_ids=tuple() random_seed=FLAGS.random_seed reinit_checkpoint_dir=FLAGS.reinit_checkpoint_dir use_attention_networks=<true>)<block_end><if_stmt>__name__<eq>'__main__'<block_start>flags.mark_flag_as_required('root_dir')<line_sep>system_multiprocessing.handle_main(<lambda>_:app.run(main))<block_end>
<import_stmt>sentry_sdk<import_from_stmt>sentry_sdk.integrations.modules ModulesIntegration<def_stmt>test_basic sentry_init capture_events<block_start>sentry_init(integrations=[ModulesIntegration()])<line_sep>events=capture_events()<line_sep>sentry_sdk.capture_exception(ValueError())<line_sep>(event )=events<assert_stmt>"sentry-sdk"<in>event["modules"]<assert_stmt>"pytest"<in>event["modules"]<block_end>
<import_stmt>json<import_stmt>logging<import_stmt>os<import_stmt>shutil<import_from_stmt>pathlib Path<import_from_stmt>tempfile TemporaryDirectory<import_from_stmt>conda_package_handling.api _convert<import_from_stmt>quetz.condainfo calculate_file_hashes_and_size<import_from_stmt>quetz.dao Dao<import_from_stmt>quetz.pkgstores PackageStore<line_sep>logger=logging.getLogger("quetz.plugins")<def_stmt>transmutation package_version:dict config pkgstore:PackageStore dao:Dao<block_start>filename:str=package_version["filename"]<line_sep>channel:str=package_version["channel_name"]<line_sep>package_format:str=package_version["package_format"]<line_sep>package_name:str=package_version["package_name"]<line_sep>platform=package_version["platform"]<line_sep>version=package_version["version"]<line_sep>build_number=package_version["build_number"]<line_sep>build_string=package_version["build_string"]<line_sep>uploader_id=package_version["uploader_id"]<line_sep>info=json.loads(package_version["info"])<if_stmt>package_format<eq>"tarbz2"<or><not>filename.endswith(".tar.bz2")<block_start><return><block_end>fh=pkgstore.serve_path(channel Path(platform)/filename)<with_stmt>TemporaryDirectory()<as>tmpdirname<block_start>local_file_name=os.path.join(tmpdirname filename)<with_stmt>open(local_file_name "wb")<as>local_file# chunk size 10MB <block_start>shutil.copyfileobj(fh local_file 10<times>1024<times>1024)<block_end>fn,out_fn,errors=_convert(local_file_name ".conda" tmpdirname force=<true>)<if_stmt>errors<block_start>logger.error(f"transmutation errors --> {errors}")<line_sep><return><block_end>filename_conda=os.path.basename(filename).replace('.tar.bz2' '.conda')<line_sep>logger.info(f"Adding file to package store: {Path(platform)/filename_conda}")<with_stmt>open(out_fn 'rb')<as>f<block_start>calculate_file_hashes_and_size(info f)<line_sep>f.seek(0)<line_sep>pkgstore.add_package(f channel str(Path(platform)/filename_conda))<block_end>version=dao.create_version(channel package_name "conda" platform version build_number build_string filename_conda json.dumps(info) uploader_id info["size"] upsert=<true> )<if_stmt>os.path.exists(out_fn)<block_start>os.remove(out_fn)<block_end><block_end><block_end>
""" Given a binary tree, return the zigzag level order traversal of its nodes' values. (ie, from left to right, then right to left for the next level and alternate between). For example: Given binary tree {3,9,20,#,#,15,7}, 3 / \ 9 20 / \ 15 7 return its zigzag level order traversal as: [ [3], [20,9], [15,7] ] """<line_sep># Definition for a binary tree node # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None <class_stmt>Solution# @param root, a tree node # @return a list of lists of integers <block_start><def_stmt>zigzagLevelOrder self root<block_start><if_stmt>root<is><none><block_start><return>[]<block_end>res=[]<line_sep>queue=[]<line_sep>rev=<false># Reverse direction level=[]<line_sep>queue.append(root)<line_sep>queue.append(<none>)<while_stmt>queue<block_start>root=queue.pop(0)<if_stmt>root<is><none><block_start><if_stmt>queue<block_start>queue.append(<none>)<block_end>res.append(level)<line_sep>level=[]<line_sep>rev=<not>rev# Toggle direction <block_end><else_stmt><block_start><if_stmt>rev<block_start>level.insert(0 root.val)<block_end><else_stmt><block_start>level.append(root.val)<block_end><if_stmt>root.left<is><not><none><block_start>queue.append(root.left)<block_end><if_stmt>root.right<is><not><none><block_start>queue.append(root.right)<block_end><block_end><block_end><return>res<block_end><block_end>
<import_stmt>time<line_sep>start=time.perf_counter()<for_stmt>_ range(10000)<block_start>x="{} {}".format("test" "test")<block_end>print(time.perf_counter()-start)<line_sep>
""" @Time : 2021-01-21 11:50:55 @File : logger.py @Author : Abtion @Email : <EMAIL> """<import_stmt>logging<import_stmt>os<import_stmt>sys<def_stmt>setup_logger name save_dir distributed_rank<block_start>logger=logging.getLogger(name)<line_sep>logger.setLevel(logging.DEBUG)<line_sep># don't log results for the non-master process <if_stmt>distributed_rank<g>0<block_start><return>logger<block_end>ch=logging.StreamHandler(stream=sys.stdout)<line_sep>ch.setLevel(logging.DEBUG)<line_sep>formatter=logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")<line_sep>ch.setFormatter(formatter)<line_sep>logger.addHandler(ch)<if_stmt>save_dir<block_start><if_stmt><not>os.path.exists(save_dir)<block_start>os.makedirs(save_dir)<block_end>fh=logging.FileHandler(os.path.join(save_dir "log.txt") encoding='utf8')<line_sep>fh.setLevel(logging.DEBUG)<line_sep>fh.setFormatter(formatter)<line_sep>logger.addHandler(fh)<block_end><return>logger<block_end>
<def_stmt>contains_magic_number list1 magic_number<block_start><for_stmt>i list1<block_start><if_stmt>i<eq>magic_number<block_start>print("This list contains the magic number")<line_sep># if not add break , will run more meaningless loop <break><block_end><else_stmt><block_start>print("This list does NOT contain the magic number")<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>contains_magic_number(range(10) 3)<block_end>
<import_stmt>responses<import_stmt>json<import_from_stmt>.helpers mock_file ClientTestCase<class_stmt>TestClientSubscription(ClientTestCase)<block_start><def_stmt>setUp self<block_start>super(TestClientSubscription self).setUp()<line_sep>self.base_url='{}/subscriptions'.format(self.base_url)<line_sep>self.subscription_id='sub_8RlLljfA4AnDVx'<block_end>@responses.activate<def_stmt>test_subscription_fetch_all self<block_start>result=mock_file('subscription_collection')<line_sep>url=self.base_url<line_sep>responses.add(responses.GET url status=200 body=json.dumps(result) match_querystring=<true>)<line_sep>self.assertEqual(self.client.subscription.all() result)<block_end>@responses.activate<def_stmt>test_subscription_fetch self<block_start>result=mock_file('fake_subscription')<line_sep>url='{}/{}'.format(self.base_url 'fake_subscription_id')<line_sep>responses.add(responses.GET url status=200 body=json.dumps(result) match_querystring=<true>)<line_sep>self.assertEqual(self.client.subscription.fetch('fake_subscription_id') result)<block_end>@responses.activate<def_stmt>test_subscription_create self<block_start>init=mock_file('init_subscription')<line_sep>result=mock_file('fake_subscription')<line_sep>url=self.base_url<line_sep>responses.add(responses.POST url status=200 body=json.dumps(result) match_querystring=<true>)<line_sep>self.assertEqual(self.client.subscription.create(init) result)<block_end>@responses.activate<def_stmt>test_subscription_cancel self<block_start>result=mock_file('fake_subscription_cancelled')<line_sep>url='{}/{}/cancel'.format(self.base_url self.subscription_id)<line_sep>responses.add(responses.POST url status=200 body=json.dumps(result) match_querystring=<true>)<line_sep>response=json.loads(self.client.subscription.cancel(self.subscription_id))<line_sep>self.assertEqual(response['id'] self.subscription_id)<line_sep>self.assertEqual(response['entity'] 'subscription')<line_sep>self.assertEqual(response['status'] 'cancelled')<block_end>@responses.activate<def_stmt>test_subscription_create_addon self<block_start>result=mock_file('fake_subscription_addon')<line_sep>url='{}/{}/addons'.format(self.base_url self.subscription_id)<line_sep>responses.add(responses.POST url status=200 body=json.dumps(result) match_querystring=<true>)<line_sep>response=json.loads(self.client.subscription.createAddon(self.subscription_id {'item':{'name':'Extra Chair' 'amount':30000 'currency':'INR'} 'quantity':2}))<line_sep>self.assertEqual(response['subscription_id'] self.subscription_id)<line_sep>self.assertEqual(response['entity'] 'addon')<line_sep>self.assertEqual(response['item']['name'] 'Extra Chair')<line_sep>self.assertEqual(response['item']['amount'] 30000)<block_end><block_end>
<import_stmt>pickle<import_from_stmt>flask jsonify<import_from_stmt>plenario.database postgres_base postgres_engine<as>engine<import_from_stmt>plenario.utils.helpers reflect<def_stmt>get_job ticket:str<block_start>celery_taskmeta=reflect('celery_taskmeta' postgres_base.metadata engine)<line_sep>query=celery_taskmeta.select().where(celery_taskmeta.c.task_id<eq>ticket)<line_sep>job_meta=dict(query.execute().first().items())<line_sep>job_meta['result']=pickle.loads(job_meta['result'])<line_sep><return>job_meta<block_end><def_stmt>make_job_response endpoint validated_query<block_start>msg='This feature, enabled by the jobs=true flag, is currently '<concat>'undergoing maintenance, we apologize for any inconvenience.'<line_sep><return>jsonify({'unavailable':msg})<block_end>
<import_from_stmt>collections OrderedDict<import_from_stmt>maggot get_current_separator<import_from_stmt>maggot.containers NestedContainer<class_stmt>Config(NestedContainer)<block_start>@property<def_stmt>identifier self<block_start>""" Maps config parameters into a single string that shortly summarrizes the content of config`s fields. Fields a sorted to provide deterministic output. Example: >>> config = dict(a=10, b=dict(c=20)) >>> config = Config.from_dict(config) >>> config.identifier '10-20' """<line_sep>parameters=self.as_flat_dict()<def_stmt>sort_key item<block_start>name,attr=item<line_sep>*prefix,base=name.split(".")<line_sep><return>base<block_end><def_stmt>is_descriptive key<block_start>*prefix,base=key.split(".")<line_sep><return><not>base.startswith("_")<block_end># convert values to strings parameters=OrderedDict((k value_to_string(v k))<for>k,v parameters.items())<line_sep># discard parameters that start with underscore # by convention, they are considered as `non-descriptive` # i.e. not used in the identifier parameters=OrderedDict((k v)<for>k,v parameters.items()<if>is_descriptive(k))<line_sep><return>get_current_separator().join(parameters.values())<block_end><block_end><def_stmt>value_to_string value name<block_start>"""Translates values (e.g. lists, ints, booleans) to strings"""<def_stmt>last name<block_start>*prefix,base=name.split(".")<line_sep><return>base<block_end><if_stmt>isinstance(value list)<block_start><return>"x".join(map(str value))<block_end><if_stmt>isinstance(value bool)<block_start><return>last(name)<if>value<else>"no_"+last(name)<block_end><else_stmt><block_start><return>str(value)<block_end><block_end>
"""Effects"""<class_stmt>FxName<block_start>"""FX name"""<def_stmt>__init__ self name<block_start>self.name=name<block_end><block_end>BITCRUSHER=FxName('bitcrusher')<line_sep>COMPRESSOR=FxName('compressor')<line_sep>ECHO=FxName('echo')<line_sep>FLANGER=FxName('flanger')<line_sep>KRUSH=FxName('krush')<line_sep>LPF=FxName('lpf')<line_sep>PAN=FxName('pan')<line_sep>PANSLICER=FxName('panslicer')<line_sep>REVERB=FxName('reverb')<line_sep>SLICER=FxName('slicer')<line_sep>WOBBLE=FxName('wobble')<line_sep>
#! /usr/bin/env python # -*- coding: utf-8 -*- #====================================================================== # # translator.py - 命令行翻译(谷歌,必应,百度,有道,词霸) # # Created by skywind on 2019/06/14 # Version: 1.0.2, Last Modified: 2019/06/18 18:40 # #====================================================================== <import_from_future_stmt> print_function unicode_literals<import_stmt>sys<import_stmt>time<import_stmt>os<import_stmt>re<import_stmt>random<import_stmt>copy<import_stmt>json<import_stmt>codecs<import_stmt>pprint<line_sep>#---------------------------------------------------------------------- # 编码兼容 #---------------------------------------------------------------------- <if_stmt>sys.version_info[0]<l>3<block_start>reload(sys)# noqa: F821 sys.setdefaultencoding('utf-8')<line_sep># sys.stdout = codecs.getwriter('utf-8')(sys.stdout, 'ignore') # sys.stderr = codecs.getwriter('utf-8')(sys.stderr, 'ignore') <block_end><else_stmt># sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'ignore') # sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'ignore') <block_start><pass><block_end>#---------------------------------------------------------------------- # 语言的别名 #---------------------------------------------------------------------- langmap={"arabic":"ar" "bulgarian":"bg" "catalan":"ca" "chinese":"zh-CN" "chinese simplified":"zh-CHS" "chinese traditional":"zh-CHT" "czech":"cs" "danish":"da" "dutch":"nl" "english":"en" "estonian":"et" "finnish":"fi" "french":"fr" "german":"de" "greek":"el" "haitian creole":"ht" "hebrew":"he" "hindi":"hi" "hmong daw":"mww" "hungarian":"hu" "indonesian":"id" "italian":"it" "japanese":"ja" "klingon":"tlh" "klingon (piqad)":"tlh-Qaak" "korean":"ko" "latvian":"lv" "lithuanian":"lt" "malay":"ms" "maltese":"mt" "norwegian":"no" "persian":"fa" "polish":"pl" "portuguese":"pt" "romanian":"ro" "russian":"ru" "slovak":"sk" "slovenian":"sl" "spanish":"es" "swedish":"sv" "thai":"th" "turkish":"tr" "ukrainian":"uk" "urdu":"ur" "vietnamese":"vi" "welsh":"cy"}<line_sep>#---------------------------------------------------------------------- # BasicTranslator #---------------------------------------------------------------------- <class_stmt>BasicTranslator(object)<block_start><def_stmt>__init__ self name **argv<block_start>self._name=name<line_sep>self._config={}<line_sep>self._options=argv<line_sep>self._session=<none><line_sep>self._agent=<none><line_sep>self._load_config(name)<line_sep>self._check_proxy()<block_end><def_stmt>__load_ini self ininame codec=<none><block_start>config={}<if_stmt><not>ininame<block_start><return><none><block_end><elif_stmt><not>os.path.exists(ininame)<block_start><return><none><block_end><try_stmt><block_start>content=open(ininame 'rb').read()<block_end><except_stmt>IOError<block_start>content=b''<block_end><if_stmt>content[:3]<eq>b'\xef\xbb\xbf'<block_start>text=content[3:].decode('utf-8')<block_end><elif_stmt>codec<is><not><none><block_start>text=content.decode(codec 'ignore')<block_end><else_stmt><block_start>codec=sys.getdefaultencoding()<line_sep>text=<none><for_stmt>name [codec 'gbk' 'utf-8']<block_start><try_stmt><block_start>text=content.decode(name)<line_sep><break><block_end><except_stmt><block_start><pass><block_end><block_end><if_stmt>text<is><none><block_start>text=content.decode('utf-8' 'ignore')<block_end><block_end><if_stmt>sys.version_info[0]<l>3<block_start><import_stmt>StringIO<import_stmt>ConfigParser<line_sep>sio=StringIO.StringIO(text)<line_sep>cp=ConfigParser.ConfigParser()<line_sep>cp.readfp(sio)<block_end><else_stmt><block_start><import_stmt>configparser<line_sep>cp=configparser.ConfigParser(interpolation=<none>)<line_sep>cp.read_string(text)<block_end><for_stmt>sect cp.sections()<block_start><for_stmt>key,val cp.items(sect)<block_start>lowsect,lowkey=sect.lower() key.lower()<line_sep>config.setdefault(lowsect {})[lowkey]=val<block_end><block_end><if_stmt>'default'<not><in>config<block_start>config['default']={}<block_end><return>config<block_end><def_stmt>_load_config self name<block_start>self._config={}<line_sep>ininame=os.path.expanduser('~/.config/translator/config.ini')<line_sep>config=self.__load_ini(ininame)<if_stmt><not>config<block_start><return><false><block_end><for_stmt>section ('default' name)<block_start>items=config.get(section {})<for_stmt>key items<block_start>self._config[key]=items[key]<block_end><block_end><return><true><block_end><def_stmt>_check_proxy self<block_start>proxy=os.environ.get('all_proxy' <none>)<if_stmt><not>proxy<block_start><return><false><block_end><if_stmt><not>isinstance(proxy str)<block_start><return><false><block_end><if_stmt>'proxy'<not><in>self._config<block_start>self._config['proxy']=proxy.strip()<block_end><return><true><block_end><def_stmt>request self url data=<none> post=<false> header=<none><block_start><import_stmt>requests<if_stmt><not>self._session<block_start>self._session=requests.Session()<block_end>argv={}<if_stmt>header<is><not><none><block_start>header=copy.deepcopy(header)<block_end><else_stmt><block_start>header={}<block_end><if_stmt>self._agent<block_start>header['User-Agent']=self._agent<block_end>argv['headers']=header<line_sep>timeout=self._config.get('timeout' 7)<line_sep>proxy=self._config.get('proxy' <none>)<if_stmt>timeout<block_start>argv['timeout']=float(timeout)<block_end><if_stmt>proxy<block_start>proxies={'http':proxy 'https':proxy}<line_sep>argv['proxies']=proxies<block_end><if_stmt><not>post<block_start><if_stmt>data<is><not><none><block_start>argv['params']=data<block_end><block_end><else_stmt><block_start><if_stmt>data<is><not><none><block_start>argv['data']=data<block_end><block_end><if_stmt><not>post<block_start>r=self._session.get(url **argv)<block_end><else_stmt><block_start>r=self._session.post(url **argv)<block_end><return>r<block_end><def_stmt>http_get self url data=<none> header=<none><block_start><return>self.request(url data <false> header)<block_end><def_stmt>http_post self url data=<none> header=<none><block_start><return>self.request(url data <true> header)<block_end><def_stmt>url_unquote self text plus=<true><block_start><if_stmt>sys.version_info[0]<l>3<block_start><import_stmt>urllib<if_stmt>plus<block_start><return>urllib.unquote_plus(text)<block_end><return>urllib.unquote(text)<block_end><import_stmt>urllib.parse<if_stmt>plus<block_start><return>urllib.parse.unquote_plus(text)<block_end><return>urllib.parse.unquote(text)<block_end><def_stmt>url_quote self text plus=<true><block_start><if_stmt>sys.version_info[0]<l>3<block_start><import_stmt>urllib<if_stmt>isinstance(text unicode)# noqa: F821 <block_start>text=text.encode('utf-8' 'ignore')<block_end><if_stmt>plus<block_start><return>urllib.quote_plus(text)<block_end><return>urlparse.quote(text)# noqa: F821 <block_end><import_stmt>urllib.parse<if_stmt>plus<block_start><return>urllib.parse.quote_plus(text)<block_end><return>urllib.parse.quote(text)<block_end><def_stmt>create_translation self sl=<none> tl=<none> text=<none><block_start>res={}<line_sep>res['engine']=self._name<line_sep>res['sl']=sl# 来源语言 res['tl']=tl# 目标语言 res['text']=text# 需要翻译的文本 res['phonetic']=<none># 音标 res['definition']=<none># 简单释义 res['explain']=<none># 分行解释 <return>res<block_end># 翻译结果:需要填充如下字段 <def_stmt>translate self sl tl text<block_start><return>self.create_translation(sl tl text)<block_end># 是否是英文 <def_stmt>check_english self text<block_start><for_stmt>ch text<block_start><if_stmt>ord(ch)<ge>128<block_start><return><false><block_end><block_end><return><true><block_end># 猜测语言 <def_stmt>guess_language self sl tl text<block_start><if_stmt>((<not>sl)<or>sl<eq>'auto')<and>((<not>tl)<or>tl<eq>'auto')<block_start><if_stmt>self.check_english(text)<block_start>sl,tl=('en-US' 'zh-CN')<block_end><else_stmt><block_start>sl,tl=('zh-CN' 'en-US')<block_end><block_end><if_stmt>sl.lower()<in>langmap<block_start>sl=langmap[sl.lower()]<block_end><if_stmt>tl.lower()<in>langmap<block_start>tl=langmap[tl.lower()]<block_end><return>sl tl<block_end><def_stmt>md5sum self text<block_start><import_stmt>hashlib<line_sep>m=hashlib.md5()<if_stmt>sys.version_info[0]<l>3<block_start><if_stmt>isinstance(text unicode)# noqa: F821 <block_start>text=text.encode('utf-8')<block_end><block_end><else_stmt><block_start><if_stmt>isinstance(text str)<block_start>text=text.encode('utf-8')<block_end><block_end>m.update(text)<line_sep><return>m.hexdigest()<block_end><block_end>#---------------------------------------------------------------------- # Azure Translator #---------------------------------------------------------------------- <class_stmt>AzureTranslator(BasicTranslator)<block_start><def_stmt>__init__ self **argv<block_start>super(AzureTranslator self).__init__('azure' **argv)<if_stmt>'apikey'<not><in>self._config<block_start>sys.stderr.write('error: missing apikey in [azure] section\n')<line_sep>sys.exit()<block_end>self.apikey=self._config['apikey']<block_end><def_stmt>translate self sl tl text<block_start><import_stmt>uuid<line_sep>sl,tl=self.guess_language(sl tl text)<line_sep>qs=self.url_quote(sl)<line_sep>qt=self.url_quote(tl)<line_sep>url='https://api.cognitive.microsofttranslator.com/translate'<line_sep>url<augadd>'?api-version=3.0&from={}&to={}'.format(qs qt)<line_sep>headers={'Ocp-Apim-Subscription-Key':self.apikey 'Content-type':'application/json' 'X-ClientTraceId':str(uuid.uuid4())}<line_sep>body=[{'text':text}]<import_stmt>json<line_sep>resp=self.http_post(url json.dumps(body) headers).json()<line_sep># print(resp) res={}<line_sep>res['text']=text<line_sep>res['sl']=sl<line_sep>res['tl']=tl<line_sep>res['translation']=self.render(resp)<line_sep>res['html']=<none><line_sep>res['xterm']=<none><line_sep><return>res<block_end><def_stmt>render self resp<block_start><if_stmt><not>resp<block_start><return>''<block_end>x=resp[0]<if_stmt><not>x<block_start><return>''<block_end>y=x['translations']<if_stmt><not>y<block_start><return>''<block_end>output=''<for_stmt>item y<block_start>output<augadd>item['text']+'\n'<block_end><return>output<block_end><block_end>#---------------------------------------------------------------------- # Google Translator #---------------------------------------------------------------------- <class_stmt>GoogleTranslator(BasicTranslator)<block_start><def_stmt>__init__ self **argv<block_start>super(GoogleTranslator self).__init__('google' **argv)<line_sep>self._agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0)'<line_sep>self._agent<augadd>' Gecko/20100101 Firefox/59.0'<block_end><def_stmt>get_url self sl tl qry<block_start>http_host=self._config.get('host' 'translate.googleapis.com')<line_sep>qry=self.url_quote(qry)<line_sep>url='https://{}/translate_a/single?client=gtx&sl={}&tl={}&dt=at&dt=bd&dt=ex&'<concat>'dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&q={}'.format(http_host sl tl qry)<line_sep># noqa: E216 <return>url<block_end><def_stmt>translate self sl tl text<block_start>sl,tl=self.guess_language(sl tl text)<line_sep>self.text=text<line_sep>url=self.get_url(sl tl text)<line_sep>r=self.http_get(url)<if_stmt><not>r<block_start><return><none><block_end><try_stmt><block_start>obj=r.json()<block_end><except_stmt><block_start><return><none><block_end># pprint.pprint(obj) res=self.create_translation(sl tl text)<line_sep>res['phonetic']=self.get_phonetic(obj)<line_sep>res['definition']=self.get_definition(obj)<line_sep>res['explain']=self.get_explain(obj)<line_sep>res['detail']=self.get_detail(obj)<line_sep>res['alternative']=self.get_alternative(obj)<line_sep><return>res<block_end><def_stmt>get_phonetic self obj<block_start><for_stmt>x obj[0]<block_start><if_stmt>len(x)<eq>4<block_start><return>x[3]<block_end><block_end><return><none><block_end><def_stmt>get_definition self obj<block_start>paraphrase=''<for_stmt>x obj[0]<block_start><if_stmt>x[0]<block_start>paraphrase<augadd>x[0]<block_end><block_end><return>paraphrase<block_end><def_stmt>get_explain self obj<block_start>explain=[]<if_stmt>obj[1]<block_start><for_stmt>x obj[1]<block_start>expl='[{}] '.format(x[0][0])<for_stmt>i x[2]<block_start>expl<augadd>i[0]+';'<block_end>explain.append(expl)<block_end><block_end><return>explain<block_end><def_stmt>get_detail self resp<block_start>result=[]<if_stmt>len(resp)<l>13<block_start><return><none><block_end><for_stmt>x resp[12]<block_start>result.append('[{}]'.format(x[0]))<for_stmt>y x[1]<block_start>result.append('- {}'.format(y[0]))<if_stmt>len(y)<ge>3<block_start>result.append(' * {}'.format(y[2]))<block_end><block_end><block_end><return>result<block_end><def_stmt>get_alternative self resp<block_start>definition=self.get_definition(resp)<line_sep>result=[]<if_stmt>len(resp)<l>6<block_start><return><none><block_end><for_stmt>x resp[5]# result.append('- {}'.format(x[0])) <block_start><for_stmt>i x[2]<block_start><if_stmt>i[0]<ne>definition<block_start>result.append(' * {}'.format(i[0]))<block_end><block_end><block_end><return>result<block_end><block_end>#---------------------------------------------------------------------- # Youdao Translator #---------------------------------------------------------------------- <class_stmt>YoudaoTranslator(BasicTranslator)<block_start><def_stmt>__init__ self **argv<block_start>super(YoudaoTranslator self).__init__('youdao' **argv)<line_sep>self.url='https://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'<line_sep>self.D="ebSeFb%=XZ%T[KZ)c(sy!"<line_sep>self.D="97_3(jkMYg@T[KZQmqjTK"<block_end><def_stmt>get_md5 self value<block_start><import_stmt>hashlib<line_sep>m=hashlib.md5()<line_sep># m.update(value) m.update(value.encode('utf-8'))<line_sep><return>m.hexdigest()<block_end><def_stmt>sign self text salt<block_start>s="fanyideskweb"+text+salt+self.D<line_sep><return>self.get_md5(s)<block_end><def_stmt>translate self sl tl text<block_start>sl,tl=self.guess_language(sl tl text)<line_sep>self.text=text<line_sep>salt=str(int(time.time()<times>1000)+random.randint(0 10))<line_sep>sign=self.sign(text salt)<line_sep>header={'Cookie':'[email protected];' 'Referer':'http://fanyi.youdao.com/' 'User-Agent':'Mozilla/5.0 (Windows NT 6.2; rv:51.0) Gecko/20100101 Firefox/51.0' }<line_sep>data={'i':text 'from':sl 'to':tl 'smartresult':'dict' 'client':'fanyideskweb' 'salt':salt 'sign':sign 'doctype':'json' 'version':'2.1' 'keyfrom':'fanyi.web' 'action':'FY_BY_CL1CKBUTTON' 'typoResult':'true'}<line_sep>r=self.http_post(self.url data header)<if_stmt><not>r<block_start><return><none><block_end><try_stmt><block_start>obj=r.json()<block_end><except_stmt><block_start><return><none><block_end># pprint.pprint(obj) res=self.create_translation(sl tl text)<line_sep>res['definition']=self.get_definition(obj)<line_sep>res['explain']=self.get_explain(obj)<line_sep><return>res<block_end><def_stmt>get_definition self obj<block_start>translation=''<line_sep>t=obj.get('translateResult')<if_stmt>t<block_start><for_stmt>n t<block_start>part=[]<for_stmt>m n<block_start>x=m.get('tgt')<if_stmt>x<block_start>part.append(x)<block_end><block_end><if_stmt>part<block_start>translation<augadd>', '.join(part)<block_end><block_end><block_end><return>translation<block_end><def_stmt>get_explain self obj<block_start>explain=[]<if_stmt>'smartResult'<in>obj<block_start>smarts=obj['smartResult']['entries']<for_stmt>entry smarts<block_start><if_stmt>entry<block_start>entry=entry.replace('\r' '')<line_sep>entry=entry.replace('\n' '')<line_sep>explain.append(entry)<block_end><block_end><block_end><return>explain<block_end><block_end>#---------------------------------------------------------------------- # Bing2: 免费 web 接口,只能查单词 #---------------------------------------------------------------------- <class_stmt>BingDict(BasicTranslator)<block_start><def_stmt>__init__ self **argv<block_start>super(BingDict self).__init__('bingdict' **argv)<line_sep>self._agent='Mozilla/5.0 (X11; Linux x86_64; rv:50.0) Gecko/20100101'<line_sep>self._agent<augadd>' Firefox/50.0'<line_sep>self._url='http://bing.com/dict/SerpHoverTrans'<line_sep>self._cnurl='http://cn.bing.com/dict/SerpHoverTrans'<block_end><def_stmt>translate self sl tl text<block_start>url=('zh'<in>tl)<and>self._cnurl<or>self._url<line_sep>url=self._cnurl<line_sep>url=url+'?q='+self.url_quote(text)<line_sep>headers={# 'Host': 'cn.bing.com', 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' 'Accept-Language':'en-US,en;q=0.5' }<line_sep>resp=self.http_get(url <none> headers)<if_stmt><not>resp<block_start><return><none><block_end>resp=resp.text<line_sep>res=self.create_translation(sl tl text)<line_sep>res['sl']='auto'<line_sep>res['tl']='auto'<line_sep>res['text']=text<line_sep>res['phonetic']=self.get_phonetic(resp)<line_sep>res['explain']=self.get_explain(resp)<line_sep><return>res<block_end><def_stmt>get_phonetic self html<block_start><if_stmt><not>html<block_start><return>''<block_end>m=re.findall(r'<span class="ht_attr" lang=".*?">\[(.*?)\] </span>' html)<if_stmt><not>m<block_start><return><none><block_end><return>m[0].strip()<block_end><def_stmt>get_explain self html<block_start><if_stmt><not>html<block_start><return>[]<block_end>m=re.findall(r'<span class="ht_pos">(.*?)</span><span class="ht_trs">(.*?)</span>' html)<line_sep>expls=[]<for_stmt>item m<block_start>expls.append('%s %s'%item)<block_end><return>expls<block_end><block_end>#---------------------------------------------------------------------- # Baidu Translator #---------------------------------------------------------------------- <class_stmt>BaiduTranslator(BasicTranslator)<block_start><def_stmt>__init__ self **argv<block_start>super(BaiduTranslator self).__init__('baidu' **argv)<if_stmt>'apikey'<not><in>self._config<block_start>sys.stderr.write('error: missing apikey in [baidu] section\n')<line_sep>sys.exit()<block_end><if_stmt>'secret'<not><in>self._config<block_start>sys.stderr.write('error: missing secret in [baidu] section\n')<line_sep>sys.exit()<block_end>self.apikey=self._config['apikey']<line_sep>self.secret=self._config['secret']<line_sep>langmap={'zh-cn':'zh' 'zh-chs':'zh' 'zh-cht':'cht' 'en-us':'en' 'en-gb':'en' 'ja':'jp' }<line_sep>self.langmap=langmap<block_end><def_stmt>convert_lang self lang<block_start>t=lang.lower()<if_stmt>t<in>self.langmap<block_start><return>self.langmap[t]<block_end><return>lang<block_end><def_stmt>translate self sl tl text<block_start>sl,tl=self.guess_language(sl tl text)<line_sep>req={}<line_sep>req['q']=text<line_sep>req['from']=self.convert_lang(sl)<line_sep>req['to']=self.convert_lang(tl)<line_sep>req['appid']=self.apikey<line_sep>req['salt']=str(int(time.time()<times>1000)+random.randint(0 10))<line_sep>req['sign']=self.sign(text req['salt'])<line_sep>url="https://fanyi-api.baidu.com/api/trans/vip/translate"<line_sep>r=self.http_post(url req)<line_sep>resp=r.json()<line_sep>res={}<line_sep>res['text']=text<line_sep>res['sl']=sl<line_sep>res['tl']=tl<line_sep>res['info']=resp<line_sep>res['translation']=self.render(resp)<line_sep>res['html']=<none><line_sep>res['xterm']=<none><line_sep><return>res<block_end><def_stmt>sign self text salt<block_start>t=self.apikey+text+salt+self.secret<line_sep><return>self.md5sum(t)<block_end><def_stmt>render self resp<block_start>output=''<line_sep>result=resp['trans_result']<for_stmt>item result<block_start>output<augadd>''+item['src']+'\n'<line_sep>output<augadd>' * '+item['dst']+'\n'<block_end><return>output<block_end><block_end>#---------------------------------------------------------------------- # 词霸 #---------------------------------------------------------------------- <class_stmt>CibaTranslator(BasicTranslator)<block_start><def_stmt>__init__ self **argv<block_start>super(CibaTranslator self).__init__('ciba' **argv)<block_end><def_stmt>translate self sl tl text<block_start>sl,tl=self.guess_language(sl tl text)<line_sep>url='https://fy.iciba.com/ajax.php'<line_sep>req={}<line_sep>req['a']='fy'<line_sep>req['f']=sl<line_sep>req['t']=tl<line_sep>req['w']=text<line_sep>r=self.http_get(url req <none>)<if_stmt><not>r<block_start><return><none><block_end><try_stmt><block_start>resp=r.json()<block_end><except_stmt><block_start><return><none><block_end>resp=r.json()<if_stmt><not>resp<block_start><return><none><block_end>res=self.create_translation(sl tl text)<line_sep>res['definition']=''<if_stmt>'content'<in>resp<block_start><if_stmt>'out'<in>resp['content']<block_start>res['definition']=resp['content']['out']<or>''<block_end><if_stmt>'ph_en'<in>resp['content']<block_start>res['phonetic']=resp['content']['ph_en']<or>''<block_end><if_stmt>'word_mean'<in>resp['content']<block_start>res['explain']=resp['content']['word_mean']<or>''<block_end><block_end><return>res<block_end><block_end>#---------------------------------------------------------------------- # 分析命令行参数 #---------------------------------------------------------------------- <def_stmt>getopt argv<block_start>args=[]<line_sep>options={}<if_stmt>argv<is><none><block_start>argv=sys.argv[1:]<block_end>index=0<line_sep>count=len(argv)<while_stmt>index<l>count<block_start>arg=argv[index]<if_stmt>arg<ne>''<block_start>head=arg[:1]<if_stmt>head<ne>'-'<block_start><break><block_end><if_stmt>arg<eq>'-'<block_start><break><block_end>name=arg.lstrip('-')<line_sep>key,_,val=name.partition('=')<line_sep>options[key.strip()]=val.strip()<block_end>index<augadd>1<block_end><while_stmt>index<l>count<block_start>args.append(argv[index])<line_sep>index<augadd>1<block_end><return>options args<block_end>#---------------------------------------------------------------------- # 引擎注册 #---------------------------------------------------------------------- ENGINES={'google':GoogleTranslator 'azure':AzureTranslator 'baidu':BaiduTranslator 'youdao':YoudaoTranslator 'bing':BingDict 'ciba':CibaTranslator }<line_sep>#---------------------------------------------------------------------- # 主程序 #---------------------------------------------------------------------- <def_stmt>main argv=<none><block_start><if_stmt>argv<is><none><block_start>argv=sys.argv<block_end>argv=[n<for>n argv]<line_sep>options,args=getopt(argv[1:])<line_sep>engine=options.get('engine')<if_stmt><not>engine<block_start>engine='google'<block_end>sl=options.get('from')<if_stmt><not>sl<block_start>sl='auto'<block_end>tl=options.get('to')<if_stmt><not>tl<block_start>tl='auto'<block_end><if_stmt><not>args<block_start>msg='usage: translator.py {--engine=xx} {--from=xx} {--to=xx}'<line_sep>print(msg+' {-json} text')<line_sep>print('engines:' list(ENGINES.keys()))<line_sep><return>0<block_end>text=' '.join(args)<line_sep>cls=ENGINES.get(engine)<if_stmt><not>cls<block_start>print('bad engine name: '+engine)<line_sep><return>-1<block_end>translator=cls()<line_sep>res=translator.translate(sl tl text)<if_stmt>'json'<in>options<block_start>text=json.dumps(res)<line_sep>sys.stdout.write(str(text))<line_sep><return>0<block_end><if_stmt><not>res<block_start><return>-2<block_end><if_stmt>'text'<in>res<block_start><if_stmt>res['text']<block_start>print(res['text'])<block_end><block_end><if_stmt>'phonetic'<in>res<block_start><if_stmt>res['phonetic']<and>('phonetic'<in>options)<block_start>print('['+res['phonetic']+']')<block_end><block_end><if_stmt>'definition'<in>res<block_start><if_stmt>res['definition']<block_start>print(res['definition'])<block_end><block_end><if_stmt>'explain'<in>res<block_start><if_stmt>res['explain']<block_start>print('\n'.join(res['explain']))<block_end><block_end><elif_stmt>'translation'<in>res<block_start><if_stmt>res['translation']<block_start>print(res['translation'])<block_end><block_end><if_stmt>'alternative'<in>res<block_start><if_stmt>res['alternative']<block_start>print('\n'.join(res['alternative']))<block_end><block_end><return>0<block_end>#---------------------------------------------------------------------- # 有待尝试的新接口 #---------------------------------------------------------------------- ''' http://dict.youdao.com/fsearch?client=deskdict&keyfrom=chrome.extension&q=a%20day&pos=-1&doctype=xml&xmlVersion=3.2&dogVersion=1.0&vendor=unknown&appVer=3.1.17.4208 '''<line_sep>#---------------------------------------------------------------------- # testing suit #---------------------------------------------------------------------- <if_stmt>__name__<eq>'__main__'<block_start><def_stmt>test1 <block_start>bt=BasicTranslator('test')<line_sep>r=bt.request("http://www.baidu.com")<line_sep>print(r.text)<line_sep><return>0<block_end><def_stmt>test2 <block_start>gt=GoogleTranslator()<line_sep># r = gt.translate('auto', 'auto', 'Hello, World !!') # r = gt.translate('auto', 'auto', '你吃饭了没有?') # r = gt.translate('auto', 'auto', '长') r=gt.translate('auto' 'auto' 'long')<line_sep># r = gt.translate('auto', 'auto', 'kiss') # r = gt.translate('auto', 'auto', '亲吻') <import_stmt>pprint<line_sep>print(r['translation'])<line_sep># pprint.pprint(r['info']) <return>0<block_end><def_stmt>test3 <block_start>t=YoudaoTranslator()<line_sep>r=t.translate('auto' 'auto' 'kiss')<import_stmt>pprint<line_sep>pprint.pprint(r)<line_sep>print(r['translation'])<line_sep><return>0<block_end><def_stmt>test4 <block_start>t=AzureTranslator()<line_sep>r=t.translate('' 'japanese' '吃饭没有?')<line_sep># print(r['info']) # print() print(r['translation'])<block_end><def_stmt>test5 <block_start>t=BaiduTranslator()<line_sep>r=t.translate('' '' '吃饭了没有?')<import_stmt>pprint<line_sep>pprint.pprint(r)<line_sep>print(r['translation'])<line_sep><return>0<block_end><def_stmt>test6 <block_start>t=CibaTranslator()<line_sep>r=t.translate('' '' '吃饭没有?')<line_sep># print(r['info']) # print() print(r['translation'])<block_end><def_stmt>test7 # t = CibaTranslator() <block_start>t=GoogleTranslator()<line_sep># t = YoudaoTranslator() # t = BingDict() # r = t.translate('zh', 'en', '吃饭了没有?') # r = t.translate('', '', 'apple') r=t.translate('' '' '正在测试翻译一段话')<line_sep>pprint.pprint(r)<block_end><def_stmt>test9 <block_start>argv=['' '正在测试翻译一段话']<line_sep>main(argv)<line_sep>print('=====')<line_sep>argv=['' '--engine=bing' '--sl=zh' '--tl=en' '正在测试翻译一段话']<line_sep>main(argv)<line_sep>print('=====')<line_sep>argv=['' '--engine=bing' '--sl=zh' '--tl=en' '-json' '苹果']<line_sep>main(argv)<line_sep><return>0<block_end># test9() main()<block_end>
<def_stmt>bubble_sort arry<block_start>n=len(arry)#获得数组的长度 <for_stmt>i range(n)<block_start><for_stmt>j range(1 n-i)<block_start><if_stmt>arry[j-1]<g>arry[j]#如果前者比后者大 <block_start>arry[j-1],arry[j]=arry[j] arry[j-1]#则交换两者 <block_end><block_end><block_end><return>arry<block_end>
# # All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or # its licensors. # # For complete copyright and license terms please see the LICENSE at the root of this # distribution (the "License"). All use of this software is governed by the License, # or, if provided, by the license below or the license accompanying this file. Do not # remove or modify any license notices. This file is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # <import_stmt>unittest<import_from_stmt>unittest mock<import_from_stmt>unittest.mock MagicMock<import_from_stmt>cgf_utils.aws_sts AWSSTSUtils<class_stmt>UnitTest_AWSSTSUtils(unittest.TestCase)<block_start>TEST_REGION="test-region"<line_sep>TOKEN_FROM_REGIONAL="<PASSWORD>"# random string TOKEN_FROM_GLOBAL="F"+TOKEN_FROM_REGIONAL<line_sep>MOCK_SESSION=MagicMock()<line_sep>MOCK_SESSION.client.return_value=MagicMock()<def_stmt>test_endpoint_construction self<block_start>aws_sts=AWSSTSUtils(self.TEST_REGION)<line_sep>self.assertTrue(self.TEST_REGION<in>aws_sts.endpoint_url)<block_end><def_stmt>test_client_construction_with_session self<block_start>aws_sts=AWSSTSUtils(self.TEST_REGION)<line_sep>client=aws_sts.client(self.MOCK_SESSION)<line_sep>self.assertIsNotNone(client)<line_sep>self.MOCK_SESSION.client.assert_called_once_with('sts' endpoint_url=aws_sts.endpoint_url)<block_end>@mock.patch("boto3.client")<def_stmt>test_client_construction self mock_boto_sts_client<block_start>aws_sts=AWSSTSUtils(self.TEST_REGION)<line_sep>client=aws_sts.client()<line_sep>self.assertIsNotNone(client)<line_sep>mock_boto_sts_client.assert_called_once_with('sts' endpoint_url=aws_sts.endpoint_url region_name=self.TEST_REGION)<block_end>@mock.patch("boto3.Session")<def_stmt>test_client_construction_with_credentials self mock_get_session<block_start>mock_session=mock.Mock()<line_sep>mock_session.client.return_value=MagicMock()<line_sep>mock_get_session.return_value=mock_session<line_sep>aws_sts=AWSSTSUtils(self.TEST_REGION)<line_sep>client=aws_sts.client_with_credentials(aws_access_key_id="ACCESS_KEY_ID" aws_secret_access_key="SECRET_ACCESS_KEY" aws_session_token="<PASSWORD>")<line_sep>self.assertIsNotNone(client)<line_sep>mock_get_session.assert_called_once_with(aws_access_key_id="ACCESS_KEY_ID" aws_secret_access_key="SECRET_ACCESS_KEY" aws_session_token="<PASSWORD>" region_name=self.TEST_REGION)<line_sep>mock_session.client.assert_called_once_with('sts' endpoint_url=aws_sts.endpoint_url)<block_end><def_stmt>test_session_token_validation self# No exception when calling <block_start>AWSSTSUtils.validate_session_token(self.TOKEN_FROM_REGIONAL)<line_sep># Expect exception when calling <with_stmt>self.assertRaises(RuntimeError)<block_start>AWSSTSUtils.validate_session_token(self.TOKEN_FROM_GLOBAL)<block_end><block_end><block_end>
<import_stmt>json<import_from_stmt>collections defaultdict<import_from_stmt>urllib.parse urlparse<import_from_stmt>shutil rmtree<import_stmt>os<import_stmt>tensorflow.compat.v1<as>tf<import_stmt>tensorflow.compat.v2<as>tf2<import_stmt>mesh_tensorflow<as>mtf<import_stmt>logging<import_stmt>sys<import_from_stmt>mesh_tensorflow.ops Operation Tensor<def_stmt>fetch_model_params model<block_start>model_path=model<if>model.endswith(".json")<else>f"./configs/{model}.json"<with_stmt>open(model_path)<as>f<block_start>params=json.load(f)<block_end><return>defaultdict(<lambda>:<none> params)<block_end><def_stmt>yes_or_no question<block_start><while_stmt><true><block_start>reply=str(input(question+' (y/n): ')).lower().strip()<if_stmt>reply[:1]<eq>'y'<block_start><return><true><block_end><if_stmt>reply[:1]<eq>'n'<block_start><return><false><block_end><block_end><block_end><def_stmt>mode_to_str mode<block_start><if_stmt>mode<eq>tf.estimator.ModeKeys.PREDICT<block_start><return>"predict"<block_end><elif_stmt>mode<eq>tf.estimator.ModeKeys.EVAL<block_start><return>"eval"<block_end><elif_stmt>mode<eq>tf.estimator.ModeKeys.TRAIN<block_start><return>"train"<block_end><else_stmt><block_start><raise>ValueError(f"Invalid mode {mode}")<block_end><block_end><def_stmt>remove_gs_or_filepath path<block_start>parsed_url=urlparse(path)<if_stmt>parsed_url.scheme<eq>"gs"<block_start>os.system(f"gsutil rm -rf {path}")<line_sep><return><block_end>rmtree(path)<block_end><def_stmt>maybe_remove_gs_or_filepath path<block_start><if_stmt>yes_or_no(f"Are you sure you want to remove '{path}' to start afresh?")<block_start>remove_gs_or_filepath(path)<block_end><else_stmt><block_start>exit()<block_end><block_end><def_stmt>get_n_trainable_vars graph<block_start>""" Gets number of trainable vars in a MTF model. :param graph: Mesh-Tensorflow graph :return: None """<line_sep>total_parameters=0<for_stmt>variable graph.trainable_variables<block_start>shape=variable.shape.dims<line_sep>variable_parameters=1<for_stmt>dim shape<block_start>variable_parameters<augmul>dim.size<block_end>total_parameters<augadd>variable_parameters<block_end>print(f"\n\nN PARAMS:\n{total_parameters:,}\n\n")<block_end><def_stmt>print_dim_names graph<block_start>""" Print names of all Dimensions :param graph: Mesh-Tensorflow graph :return: None """<line_sep>all_dim_names=[]<for_stmt>variable graph.all_variables<block_start>names=variable.shape.dimension_names<line_sep>all_dim_names.append(names)<block_end># Print all dim names in graph & write to file all_dim_names=[item<for>sublist all_dim_names<for>item sublist]# Flatten all dims unique_dims=list(set(all_dim_names))<line_sep>print("ALL DIM NAMES:")<for_stmt>dim_name unique_dims<block_start>print(dim_name)<block_end>print('\n')<block_end><def_stmt>get_graph_info graph<block_start>""" Wrapper fn that calculates number of trainable vars in an MTF graph & prints all dim_names to file :param graph: Mesh-Tensorflow graph :return: None """<line_sep>get_n_trainable_vars(graph)<line_sep>print_dim_names(graph)<block_end><def_stmt>create_host_call model_dir<block_start>"""Construct a host_call writing scalar summaries. Borrowed from t2t. Args: model_dir: String containing path to train Returns: (fn, args) Pair to be called by TPUEstimator as the host_call. """<line_sep>graph=tf.get_default_graph()<line_sep># A list of (name, lowered tensor) tuples summaries=graph.get_collection(mtf.utils.SCALAR_SUMMARIES_COLLECTION_KEY)<def_stmt>maybe_cast tensor# assert tensor.shape.is_compatible_with([]), tensor.name <block_start><if_stmt>tensor.dtype<eq>tf.int64<block_start><return>tf.to_int32(tensor)<block_end><if_stmt>tensor.dtype<eq>tf.bfloat16<block_start><return>tf.cast(tensor tf.float32)<block_end><return>tensor<block_end>reshaped_tensors=[]<for_stmt>_,t summaries<block_start><try_stmt><block_start>t=tf.reshape(maybe_cast(t) [1])<block_end><except_stmt><block_start><pass><block_end>reshaped_tensors.append(t)<block_end># When no supported summaries are found, don't create host_call. Otherwise, # TPU outfeed queue would enqueue global_step while host_call doesn't dequeue # it, eventually causing hang. <if_stmt><not>reshaped_tensors<block_start><return><none><block_end><def_stmt>host_call_fn global_step *args<block_start>"""Training host call. Creates scalar summaries for training metrics."""<line_sep># This function is executed on the CPU and should not directly reference # any Tensors in the rest of the `model_fn`. To pass Tensors from the # model to the `model_fn`, provide as part of the `host_call`. global_step=tf.cast(global_step[0] tf.int64)<with_stmt>tf2.summary.create_file_writer(model_dir).as_default()# We cannot directly use any tensor from summaries, because each # tensor here must be a concat of multiple tensors from all shards. # Therefore, we rely on the assumption that args wil have the same # length as summaries, and all tensors in args will have the same # order of self._tup_summaries. <block_start><assert_stmt>len(args)<eq>len(summaries)<for_stmt>i,tensor enumerate(args)<block_start>name=summaries[i][0]<if_stmt><not>"image"<in>name<block_start>tf2.summary.scalar(name tf.reduce_mean(tensor) step=global_step)<block_end><else_stmt><block_start>tf2.summary.image(name tensor step=global_step)<block_end><block_end><block_end><return>tf.summary.all_v2_summary_ops()<block_end>global_step_t=tf.reshape(tf.to_int32(tf.train.get_global_step()) [1])<line_sep><return>host_call_fn [global_step_t]+reshaped_tensors<block_end><def_stmt>simd_mesh_setup params mesh_shape layout_rules<block_start>"""Constructs SimdMesh function - instructions on how to evenly split tensors across all TPU cores"""<line_sep>num_hosts=params["context"].num_hosts<line_sep>host_placement_fn=params["context"].tpu_host_placement_function<line_sep>device_list=[host_placement_fn(host_id=i)<for>i range(num_hosts)]<line_sep>tf.logging.info(f"device_list = {device_list}")<line_sep># TODO: Better estimation of replica cache size? replica_cache_size=300<times>1000000# 300M per replica # Worker 0 caches all the TPU binaries worker0_mem=replica_cache_size<times>params["context"].num_replicas<line_sep>devices_memory_usage=[worker0_mem]+[0]<times>(num_hosts-1)<line_sep>var_placer=mtf.utils.BalancedVariablePlacer(device_list devices_memory_usage)<line_sep>mesh_devices=[""]<times>mesh_shape.size<line_sep>mesh_impl=mtf.simd_mesh_impl.SimdMeshImpl(mesh_shape layout_rules mesh_devices params["context"].device_assignment)<line_sep><return>var_placer mesh_impl<block_end><def_stmt>setup_logging args logdir="logs"<block_start>os.makedirs(logdir exist_ok=<true>)<line_sep>tf.logging.set_verbosity(logging.INFO)<line_sep>tf.get_logger().propagate=<false># Remove double log on console name=os.path.splitext(os.path.basename(args.model))[0]<line_sep>handlers=[logging.FileHandler(f"logs/{name}.log") logging.StreamHandler(sys.stdout)]<line_sep>logger=logging.getLogger("tensorflow")<line_sep>logger.handlers=handlers<line_sep><return>logger<block_end><class_stmt>ScalarSummaryOperation(Operation)<block_start>"""Similar to tf.Print."""<def_stmt>__init__ self name x<block_start>super(ScalarSummaryOperation self).__init__([x] x.mesh name=name)<line_sep>self._outputs=[Tensor(self x.shape x.dtype)]<block_end><def_stmt>lower self lowering<block_start>lowered_input=lowering.tensors[self.inputs[0]].to_laid_out_tensor()<line_sep>tf.add_to_collection(mtf.utils.SCALAR_SUMMARIES_COLLECTION_KEY (self.name lowered_input.tensor_list[0]))<line_sep>lowering.set_tensor_lowering(self.outputs[0] lowered_input)<block_end><def_stmt>gradient self grad_ys<block_start><return>grad_ys<block_end><block_end><def_stmt>scalar_summary name x<block_start>"""Call tf.summary.scalar. Caveat - summaries do not generally work on TPU - they need to be rewritten into a host call. TODO(noam): provide a pointer to code for this. Args: name: a string x: a 0-dimensional Tensor Returns: a Tensor which is identical in value to x """<line_sep><return>ScalarSummaryOperation(name x)<block_end>
# This sample tests type inference and TypeVar matching. <import_from_stmt>typing Union<line_sep>m=int(1)<line_sep>n=float(1.1)<line_sep>p="hello"<line_sep>a=dict(x=m y=m)<line_sep>a1:int=a["x"]<line_sep>b=dict(x=n y=n)<line_sep># This should generate an error because b should be # typed as dict[Any, float], and b["x"] is a float. b1:int=b["x"]<line_sep>b2:float=b["x"]<line_sep>c=dict(x=m y=n)<line_sep># This should generate an error because d should be # typed as dict[Any, float]. c1:int=c["x"]<line_sep>c2:float=c["x"]<line_sep>d=dict(x=p y=p)<line_sep># This should generate an error because d should be # typed as dict[Any, str]. d1:float=d["x"]<line_sep>d2:str=d["x"]<line_sep>e=dict(x=n y=p)<line_sep># This should generate an error because d should be # typed as dict[Any, str]. e1:str=e["x"]<line_sep># This should generate an error because d should be # typed as dict[Any, str]. e2:float=e["x"]<line_sep>e3:Union[float str]=e["x"]<line_sep>
<import_from_stmt>.dior_model *<import_from_stmt>utils.util StoreList StoreDictKeyPair<import_from_stmt>models.networks.block_extractor.block_extractor BlockExtractor<class_stmt>FlowModel(DIORModel)<block_start><def_stmt>__init__ self opt<block_start>opt.frozen_flownet=<false><line_sep>DIORModel.__init__(self opt)<line_sep>self.netE_opt=opt.netE<line_sep>self.visual_names=['from_img' 'to_img' 'fake_B']<block_end><def_stmt>_init_models self opt<block_start>self.model_names<augadd>["Flow"]<line_sep>self.netFlow=networks.define_tool_networks(tool='flownet' load_ckpt_path=opt.flownet_path gpu_ids=opt.gpu_ids)<line_sep>self.extractor=BlockExtractor(kernel_size=1)<block_end><def_stmt>forward self<block_start>"""Run forward pass; called by both functions <optimize_parameters> and <test>."""<line_sep>self.flow_fields,_=self.netFlow(self.from_img self.from_kpt self.to_kpt)<line_sep>_,_,H,W=self.flow_fields[-1].size()<line_sep>from_img=F.interpolate(self.from_img (H W))<line_sep>self.fake_B=self.extractor(from_img self.flow_fields[-1])<line_sep>_,_,H,W=self.to_img.size()<line_sep>self.fake_B=F.interpolate(self.fake_B (H W))<block_end><def_stmt>backward_G self<block_start>self.loss_G=0<line_sep>flow_feilds=self.flow_fields<line_sep>self.loss_flow_cor=0.0<if_stmt>self.loss_coe['flow_cor']<g>0<block_start>self.loss_flow_cor=self.Correctness(self.to_img self.from_img flow_feilds [2 3])<times>self.loss_coe['flow_cor']<line_sep>self.loss_G=self.loss_G+self.loss_flow_cor<block_end>self.loss_flow_reg=0.0<if_stmt>self.loss_coe['flow_reg']<g>0# import pdb; pdb.set_trace() <block_start>self.loss_flow_reg=self.Regularization(flow_feilds)<times>self.loss_coe['flow_reg']<line_sep>self.loss_G=self.loss_G+self.loss_flow_reg<block_end><block_end><def_stmt>optimize_parameters self<block_start>self.forward()# compute fake images: G(A) # update G self.optimizer_G.zero_grad()# set G's gradients to zero self.backward_G()# calculate graidents for G self.loss_G.backward()<line_sep>self.optimizer_G.step()# udpate G's weights self.log_loss_update()<block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>os<import_from_stmt>autogoal.datasets datapath download<import_from_stmt>sklearn.feature_extraction DictVectorizer<def_stmt>load <block_start>""" Loads corpora from [Yeast uci dataset](https://archive.ics.uci.edu/ml/datasets/Yeast). ##### Examples ```python >>> X, y = load() >>> X.shape (1484, 8) >>> len(y) 1484 ``` """<try_stmt><block_start>download("yeast")<block_end><except_stmt><block_start>print("Error loading data. This may be caused due to bad connection. Please delete badly downloaded data and retry")<line_sep><raise><block_end>f=open(datapath("yeast")/"yeast.data" "r")<line_sep>X=[]<line_sep>y=[]<for_stmt>i f<block_start>clean_line=i.strip().split()<line_sep>temp={}<line_sep>temp["1"]=float(clean_line[1])<line_sep>temp["2"]=float(clean_line[2])<line_sep>temp["3"]=float(clean_line[3])<line_sep>temp["4"]=float(clean_line[4])<line_sep>temp["5"]=float(clean_line[5])<line_sep>temp["6"]=float(clean_line[6])<line_sep>temp["7"]=float(clean_line[7])<line_sep>temp["8"]=float(clean_line[8])<line_sep>X.append(temp)<line_sep>y.append(clean_line[9])<block_end><return>_load_onehot(X y)<block_end><def_stmt>_load_onehot X y<block_start>vec=DictVectorizer(sparse=<false>)<line_sep><return>vec.fit_transform(X) np.asarray(y)<block_end>
<import_stmt>logging<import_from_stmt>browser.ajax ajax<class_stmt>XMLHTTPHandler(logging.Handler)<block_start>""" A class which sends records to a Web server, using either GET or POST semantics. """<def_stmt>__init__ self url method="GET"<block_start>""" Initialize the instance with the host, the request URL, and the method ("GET" or "POST") """<line_sep>logging.Handler.__init__(self)<line_sep>method=method.upper()<if_stmt>method<not><in>["GET" "POST"]<block_start><raise>ValueError("method must be GET or POST")<block_end>self.url=url<line_sep>self.method=method<block_end><def_stmt>mapLogRecord self record<block_start>""" Default implementation of mapping the log record into a dict that is sent as the CGI data. Overwrite in your class. Contributed by <NAME>. """<line_sep><return>record.__dict__<block_end><def_stmt>emit self record<block_start>""" Emit a record. Send the record to the Web server as a percent-encoded dictionary """<try_stmt><block_start>req=ajax.open(self.method self.url sync=<false>)<line_sep>req.send(self.mapLogRecord(record))<block_end><except_stmt><block_start>self.handleError(record)<block_end><block_end><block_end>
# # Class for Bruggeman tortuosity # <import_stmt>pybamm<import_from_stmt>.base_tortuosity BaseModel<class_stmt>Bruggeman(BaseModel)<block_start>"""Submodel for Bruggeman tortuosity Parameters ---------- param : parameter class The parameters to use for this submodel phase : str The material for the model ('electrolyte' or 'electrode'). options : dict, optional A dictionary of options to be passed to the model. **Extends:** :class:`pybamm.tortuosity.BaseModel` """<def_stmt>__init__ self param phase options=<none> set_leading_order=<false><block_start>super().__init__(param phase options=options)<line_sep>self.set_leading_order=set_leading_order<block_end><def_stmt>get_coupled_variables self variables<block_start>param=self.param<if_stmt>self.phase<eq>"Electrolyte"<block_start><if_stmt>self.half_cell<block_start>tor_n=<none><block_end><else_stmt><block_start>eps_n=variables["Negative electrode porosity"]<line_sep>tor_n=eps_n<power>param.b_e_n<block_end>eps_s=variables["Separator porosity"]<line_sep>tor_s=eps_s<power>param.b_e_s<line_sep>eps_p=variables["Positive electrode porosity"]<line_sep>tor_p=eps_p<power>param.b_e_p<block_end><elif_stmt>self.phase<eq>"Electrode"<block_start><if_stmt>self.half_cell<block_start>tor_n=<none><block_end><else_stmt><block_start>eps_n=variables["Negative electrode active material volume fraction"]<line_sep>tor_n=eps_n<power>param.b_s_n<block_end>eps_p=variables["Positive electrode active material volume fraction"]<line_sep>tor_s=pybamm.FullBroadcast(0 "separator" "current collector")<line_sep>tor_p=eps_p<power>param.b_s_p<block_end>variables.update(self._get_standard_tortuosity_variables(tor_n tor_s tor_p self.set_leading_order))<line_sep><return>variables<block_end><block_end>
# -*- coding: utf-8 -*- # # Tencent is pleased to support the open source community by making QTA available. # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. # Licensed under the BSD 3-Clause License (the "License"); you may not use this # file except in compliance with the License. You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. # """test cases for retry mechanism """<import_stmt>time<import_stmt>unittest<import_from_stmt>testbase.retry Retry RetryLimitExcceeded<class_stmt>TestRetry(unittest.TestCase)<block_start>"""test retry with invalid calllee """<def_stmt>test_retry_with_timeout self<block_start><def_stmt>dummy toggle_time start_ts<block_start><if_stmt>time.time()-start_ts<g>toggle_time<block_start><return><true><block_end><block_end>interval=1<line_sep>timeout=5<line_sep>retry=Retry(interval=interval timeout=timeout)<line_sep>self.assertRaises(ValueError retry.call <none>)<line_sep>start_time=time.time()<try_stmt><block_start>retry.call(dummy timeout+1 start_time)<block_end><except_stmt>RetryLimitExcceeded<block_start>time_cost=time.time()-start_time<line_sep>self.assertGreaterEqual(time_cost 5 "actual timeout=%s is less than specified timeout=%s"%(time_cost timeout))<block_end><else_stmt><block_start>self.fail("no RetryLimitExcceeded raised")<block_end>start_time=time.time()<line_sep>count=0<line_sep>retry=Retry(interval=interval timeout=timeout)<for_stmt>retry_item retry<block_start>count<augadd>1<line_sep>self.assertEqual(count retry_item.iteration "iteration does not match")<if_stmt>dummy(2 start_time)<block_start>time_cost=time.time()-start_time<line_sep>self.assertGreaterEqual(time_cost 2 "actual interval=%s is less than specified interval=%s"%(time_cost/float(count) interval))<line_sep><break><block_end><block_end><else_stmt><block_start>self.fail("unexpected timeout")<block_end><block_end><def_stmt>test_retry_with_count self<block_start><def_stmt>dummy param<block_start>param[0]<augadd>1<if_stmt>param[0]<g>2<block_start><return><true><block_end><block_end>retry=Retry(limit=1)<line_sep>self.assertRaises(ValueError retry.call <none>)<line_sep>x=[0]<try_stmt><block_start>retry.call(dummy x)<block_end><except_stmt>RetryLimitExcceeded<block_start><pass><block_end><else_stmt><block_start>self.fail("no RetryLimitExcceeded was raised")<block_end>x=[0]<line_sep>retry=Retry(limit=3)<try_stmt><block_start>retry.call(dummy x)<block_end><except_stmt>RetryLimitExcceeded<block_start>self.fail("RetryLimitExcceeded was raised")<block_end>x=[0]<line_sep>retry=Retry(limit=3 interval=<none>)<line_sep>retry_count=0<line_sep>start_time=time.time()<for_stmt>retry_item retry<block_start>retry_count<augadd>1<line_sep>self.assertEqual(retry_count retry_item.iteration "iteration does not match")<if_stmt>dummy(x)<block_start>self.assertEqual(retry_count 3 "iteration does not match")<line_sep><break><block_end><block_end>time_cost=time.time()-start_time<line_sep>self.assertLess(time_cost 0.05 "interval is unexpected")<line_sep>x=[-5]<line_sep>limit=3<line_sep>retry=Retry(limit=limit interval=0.5 raise_error=<false>)<line_sep>start_time=time.time()<line_sep>retry.call(dummy x)<line_sep>time_cost=time.time()-start_time<line_sep>self.assertGreaterEqual(time_cost+0.1 (limit-1)<times>0.5 "interval has no effect.")<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>defaultTest="TestRetry.test_retry_with_count"<line_sep>defaultTest=<none><line_sep>unittest.main(defaultTest=defaultTest)<block_end>
<import_stmt>numpy<as>np<def_stmt>mrisensesim size ncoils=8 array_cent=<none> coil_width=2 n_rings=<none> phi=0<block_start>"""Apply simulated sensitivity maps. Based on a script by <NAME>. Args: size (tuple): Size of the image array for the sensitivity coils. nc_range (int, default: 8): Number of coils to simulate. array_cent (tuple, default: 0): Location of the center of the coil array. coil_width (double, default: 2): Parameter governing the width of the coil, multiplied by actual image dimension. n_rings (int, default: ncoils // 4): Number of rings for a cylindrical hardware set-up. phi (double, default: 0): Parameter for rotating coil geometry. Returns: list: A list of dimensions (ncoils, (N)), specifying spatially-varying sensitivity maps for each coil. """<if_stmt>array_cent<is><none><block_start>c_shift=[0 0 0]<block_end><elif_stmt>len(array_cent)<l>3<block_start>c_shift=array_cent+(0 )<block_end><else_stmt><block_start>c_shift=array_cent<block_end>c_width=coil_width<times>min(size)<if_stmt>len(size)<g>2<block_start><if_stmt>n_rings<is><none><block_start>n_rings=ncoils<floordiv>4<block_end><block_end>c_rad=min(size[0:1])/2<line_sep>smap=[]<if_stmt>len(size)<g>2<block_start>zz,yy,xx=np.meshgrid(range(size[2]) range(size[1]) range(size[0]) indexing="ij")<block_end><else_stmt><block_start>yy,xx=np.meshgrid(range(size[1]) range(size[0]) indexing="ij")<block_end><if_stmt>ncoils<g>1<block_start>x0=np.zeros((ncoils ))<line_sep>y0=np.zeros((ncoils ))<line_sep>z0=np.zeros((ncoils ))<for_stmt>i range(ncoils)<block_start><if_stmt>len(size)<g>2<block_start>theta=np.radians((i-1)<times>360/(ncoils+n_rings)+phi)<block_end><else_stmt><block_start>theta=np.radians((i-1)<times>360/ncoils+phi)<block_end>x0[i]=c_rad<times>np.cos(theta)+size[0]/2<line_sep>y0[i]=c_rad<times>np.sin(theta)+size[1]/2<if_stmt>len(size)<g>2<block_start>z0[i]=(size[2]/(n_rings+1))<times>(i<floordiv>n_rings)<line_sep>smap.append(np.exp(-1<times>((xx-x0[i])<power>2+(yy-y0[i])<power>2+(zz-z0[i])<power>2)/(2<times>c_width)))<block_end><else_stmt><block_start>smap.append(np.exp(-1<times>((xx-x0[i])<power>2+(yy-y0[i])<power>2)/(2<times>c_width)))<block_end><block_end><block_end><else_stmt><block_start>x0=c_shift[0]<line_sep>y0=c_shift[1]<line_sep>z0=c_shift[2]<if_stmt>len(size)<g>2<block_start>smap=np.exp(-1<times>((xx-x0)<power>2+(yy-y0)<power>2+(zz-z0)<power>2)/(2<times>c_width))<block_end><else_stmt><block_start>smap=np.exp(-1<times>((xx-x0)<power>2+(yy-y0)<power>2)/(2<times>c_width))<block_end><block_end>side_mat=np.arange(int(size[0]<floordiv>2)-20 1 -1)<line_sep>side_mat=np.reshape(side_mat (1 )+side_mat.shape)<times>np.ones(shape=(size[1] 1))<line_sep>cent_zeros=np.zeros(shape=(size[1] size[0]-side_mat.shape[1]<times>2))<line_sep>ph=np.concatenate((side_mat cent_zeros side_mat) axis=1)/10<if_stmt>len(size)<g>2<block_start>ph=np.reshape(ph (1 )+ph.shape)<block_end><for_stmt>i,s enumerate(smap)<block_start>smap[i]=s<times>np.exp(i<times>1j<times>ph<times>np.pi/180)<block_end><return>smap<block_end>
<import_stmt>caffe<import_stmt>argparse<import_stmt>os<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>time<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>mpl_toolkits.mplot3d Axes3D<import_stmt>pygame<import_from_stmt>pygame.locals *<import_from_stmt>OpenGL.GL *<import_from_stmt>OpenGL.GLU *<import_stmt>utils<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--device' default='gpu')<line_sep>parser.add_argument('--model_dir' default='/media/tim_ho/HDD1/Projects/VNect-tensorflow/models')<line_sep>parser.add_argument('--input_size' default=368)<line_sep>parser.add_argument('--num_of_joints' default=21)<line_sep>parser.add_argument('--pool_scale' default=8)<line_sep>parser.add_argument('--plot_2d' default=<false>)<line_sep>parser.add_argument('--plot_3d' default=<false>)<line_sep>args=parser.parse_args()<line_sep>joint_color_code=[[139 53 255] [0 56 255] [43 140 237] [37 168 36] [147 147 0] [70 17 145]]<line_sep># Limb parents of each joint limb_parents=[1 15 1 2 3 1 5 6 14 8 9 14 11 12 14 14 1 4 7 10 13]<line_sep># input scales scales=[1.0 0.7]<def_stmt>demo <block_start>joints_2d=np.zeros(shape=(args.num_of_joints 2) dtype=np.int32)<line_sep>joints_3d=np.zeros(shape=(args.num_of_joints 3) dtype=np.float32)<if_stmt>args.plot_3d<block_start>plt.ion()<line_sep>fig=plt.figure()<line_sep>ax=fig.add_subplot(121 projection='3d')<line_sep>ax2=fig.add_subplot(122)<line_sep>plt.show()<block_end><if_stmt>args.device<eq>'cpu'<block_start>caffe.set_mode_cpu()<block_end><elif_stmt>args.device<eq>'gpu'<block_start>caffe.set_mode_gpu()<line_sep>caffe.set_device(0)<block_end><else_stmt><block_start><raise>ValueError('No such device')<block_end>model_prototxt_path=os.path.join(args.model_dir 'vnect_net.prototxt')<line_sep>model_weight_path=os.path.join(args.model_dir 'vnect_model.caffemodel')<line_sep># Load model model=caffe.Net(model_prototxt_path model_weight_path caffe.TEST)<line_sep># Show network structure and shape <for_stmt>layer_name model.params.keys()<block_start>print(layer_name model.params[layer_name][0].data.shape)<block_end>print('')<for_stmt>i model.blobs.keys()<block_start>print(i model.blobs[i].data.shape)<block_end>cam=cv2.VideoCapture(0)<line_sep>is_tracking=<false><line_sep># for img_name in os.listdir('test_imgs'): <while_stmt><true># if not is_tracking: <block_start>img_path='test_imgs/{}'.format('dance.jpg')<line_sep>t1=time.time()<line_sep>input_batch=[]<line_sep>cam_img=utils.read_square_image('' cam args.input_size 'WEBCAM')<line_sep># cam_img = utils.read_square_image(img_path, '', args.input_size, 'IMAGE') # cv2.imshow('', cam_img) # cv2.waitKey(0) orig_size_input=cam_img.astype(np.float32)<for_stmt>scale scales<block_start>resized_img=utils.resize_pad_img(orig_size_input scale args.input_size)<line_sep>input_batch.append(resized_img)<block_end>input_batch=np.asarray(input_batch dtype=np.float32)<line_sep>input_batch=np.transpose(input_batch (0 3 1 2))<line_sep>input_batch<augdiv>255.0<line_sep>input_batch<augsub>0.4<line_sep>model.blobs['data'].data[<ellipsis>]=input_batch<line_sep># Forward model.forward()<line_sep># Get output data x_hm=model.blobs['x_heatmap'].data<line_sep>y_hm=model.blobs['y_heatmap'].data<line_sep>z_hm=model.blobs['z_heatmap'].data<line_sep>hm=model.blobs['heatmap'].data<line_sep># Trans coordinates x_hm=x_hm.transpose([0 2 3 1])<line_sep>y_hm=y_hm.transpose([0 2 3 1])<line_sep>z_hm=z_hm.transpose([0 2 3 1])<line_sep>hm=hm.transpose([0 2 3 1])<line_sep># Average scale outputs hm_size=args.input_size<floordiv>args.pool_scale<line_sep>hm_avg=np.zeros(shape=(hm_size hm_size args.num_of_joints))<line_sep>x_hm_avg=np.zeros(shape=(hm_size hm_size args.num_of_joints))<line_sep>y_hm_avg=np.zeros(shape=(hm_size hm_size args.num_of_joints))<line_sep>z_hm_avg=np.zeros(shape=(hm_size hm_size args.num_of_joints))<for_stmt>i range(len(scales))<block_start>rescale=1.0/scales[i]<line_sep>scaled_hm=cv2.resize(hm[i : : :] (0 0) fx=rescale fy=rescale interpolation=cv2.INTER_LINEAR)<line_sep>scaled_x_hm=cv2.resize(x_hm[i : : :] (0 0) fx=rescale fy=rescale interpolation=cv2.INTER_LINEAR)<line_sep>scaled_y_hm=cv2.resize(y_hm[i : : :] (0 0) fx=rescale fy=rescale interpolation=cv2.INTER_LINEAR)<line_sep>scaled_z_hm=cv2.resize(z_hm[i : : :] (0 0) fx=rescale fy=rescale interpolation=cv2.INTER_LINEAR)<line_sep>mid=[scaled_hm.shape[0]<floordiv>2 scaled_hm.shape[1]<floordiv>2]<line_sep>hm_avg<augadd>scaled_hm[mid[0]-hm_size<floordiv>2:mid[0]+hm_size<floordiv>2 mid[1]-hm_size<floordiv>2:mid[1]+hm_size<floordiv>2 :]<line_sep>x_hm_avg<augadd>scaled_x_hm[mid[0]-hm_size<floordiv>2:mid[0]+hm_size<floordiv>2 mid[1]-hm_size<floordiv>2:mid[1]+hm_size<floordiv>2 :]<line_sep>y_hm_avg<augadd>scaled_y_hm[mid[0]-hm_size<floordiv>2:mid[0]+hm_size<floordiv>2 mid[1]-hm_size<floordiv>2:mid[1]+hm_size<floordiv>2 :]<line_sep>z_hm_avg<augadd>scaled_z_hm[mid[0]-hm_size<floordiv>2:mid[0]+hm_size<floordiv>2 mid[1]-hm_size<floordiv>2:mid[1]+hm_size<floordiv>2 :]<block_end>hm_avg<augdiv>len(scales)<line_sep>x_hm_avg<augdiv>len(scales)<line_sep>y_hm_avg<augdiv>len(scales)<line_sep>z_hm_avg<augdiv>len(scales)<line_sep>t2=time.time()<line_sep># Get 2d joints joints_2d=utils.extract_2d_joint_from_heatmap(hm_avg args.input_size joints_2d)<line_sep># Get 3d joints joints_3d=utils.extract_3d_joints_from_heatmap(joints_2d x_hm_avg y_hm_avg z_hm_avg args.input_size joints_3d)<line_sep>print('Post FPS' 1/(time.time()-t2))<line_sep># Plot 2d location heatmap joint_map=np.zeros(shape=(args.input_size args.input_size 3))<for_stmt>joint_num range(joints_2d.shape[0])<block_start>cv2.circle(joint_map center=(joints_2d[joint_num][1] joints_2d[joint_num][0]) radius=3 color=(255 0 0) thickness=-1)<block_end># Plot 2d limbs limb_img=utils.draw_limbs_2d(cam_img joints_2d limb_parents)<line_sep># Plot 3d limbs <if_stmt>args.plot_3d<block_start>ax.clear()<line_sep>ax.view_init(azim=0 elev=90)<line_sep>ax.set_xlim(-700 700)<line_sep>ax.set_ylim(-800 800)<line_sep>ax.set_zlim(-700 700)<line_sep>ax.set_xlabel('x')<line_sep>ax.set_ylabel('y')<line_sep>ax.set_zlabel('z')<line_sep>utils.draw_limbs_3d(joints_3d limb_parents ax)<block_end># draw heatmap # hm_img = utils.draw_predicted_heatmap(hm_avg*200, args.input_size) # cv2.imshow('hm', hm_img.astype(np.uint8)) # cv2.waitKey(0) glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)<line_sep>utils.draw_limb_3d_gl(joints_3d limb_parents)<line_sep>pygame.display.flip()<line_sep>pygame.time.wait(1)<line_sep>concat_img=np.concatenate((limb_img joint_map) axis=1)<line_sep># ax2.imshow(concat_img[..., ::-1].astype(np.uint8)) cv2.imshow('2d' concat_img.astype(np.uint8))<line_sep>cv2.waitKey(1)<line_sep># ax2.imshow(concat_img.astype(np.uint8)) # plt.pause(0.0001) # plt.show(block=False) print('Forward FPS' 1/(time.time()-t1))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>pygame.init()<line_sep>display=(800 600)<line_sep>pygame.display.set_mode(display DOUBLEBUF|OPENGL)<line_sep>gluPerspective(70 (display[0]/display[1]) 0.1 200.0)<line_sep>view_range=800<line_sep># glOrtho(-view_range, view_range, # -view_range, view_range, # -view_range, view_range) glTranslatef(0.0 0.0 100)<line_sep>demo()<block_end>
<import_stmt>sys<import_stmt>re<import_from_stmt>downloader Downloader<line_sep>cookie=sys.argv[1]<line_sep>dl=Downloader(cookie=cookie)<if_stmt>len(sys.argv)<ne>3<block_start><raise>Exception('Invalid arguments. Usage : {program} <cookie> <url_or_class_id>'.format(program=sys.argv[0]))<block_end><if_stmt>re.match(r'^[0-9]+$' sys.argv[2])<block_start>dl.download_course_by_class_id(sys.argv[2])<block_end><else_stmt><block_start>dl.download_course_by_url(sys.argv[2])<block_end>
""" Grid map library in python author: <NAME> """<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<class_stmt>GridMap<block_start>""" GridMap class """<def_stmt>__init__ self width height resolution center_x center_y init_val=0.0<block_start>"""__init__ :param width: number of grid for width :param height: number of grid for heigt :param resolution: grid resolution [m] :param center_x: center x position [m] :param center_y: center y position [m] :param init_val: initial value for all grid """<line_sep>self.width=width<line_sep>self.height=height<line_sep>self.resolution=resolution<line_sep>self.center_x=center_x<line_sep>self.center_y=center_y<line_sep>self.left_lower_x=self.center_x-(self.width/2.0)<times>self.resolution<line_sep>self.left_lower_y=self.center_y-(self.height/2.0)<times>self.resolution<line_sep>self.ndata=self.width<times>self.height<line_sep>self.data=[init_val]<times>int(self.ndata)<block_end><def_stmt>get_value_from_xy_index self x_ind y_ind<block_start>"""get_value_from_xy_index when the index is out of grid map area, return None :param x_ind: x index :param y_ind: y index """<line_sep>grid_ind=self.calc_grid_index_from_xy_index(x_ind y_ind)<if_stmt>0<le>grid_ind<le>self.ndata<block_start><return>self.data[grid_ind]<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>get_xy_index_from_xy_pos self x_pos y_pos<block_start>"""get_xy_index_from_xy_pos :param x_pos: x position [m] :param y_pos: y position [m] """<line_sep>x_ind=self.calc_xy_index_from_position(x_pos self.left_lower_x self.width)<line_sep>y_ind=self.calc_xy_index_from_position(y_pos self.left_lower_y self.height)<line_sep><return>x_ind y_ind<block_end><def_stmt>set_value_from_xy_pos self x_pos y_pos val<block_start>"""set_value_from_xy_pos return bool flag, which means setting value is succeeded or not :param x_pos: x position [m] :param y_pos: y position [m] :param val: grid value """<line_sep>x_ind,y_ind=self.get_xy_index_from_xy_pos(x_pos y_pos)<if_stmt>(<not>x_ind)<or>(<not>y_ind)<block_start><return><false><block_end># NG flag=self.set_value_from_xy_index(x_ind y_ind val)<line_sep><return>flag<block_end><def_stmt>set_value_from_xy_index self x_ind y_ind val<block_start>"""set_value_from_xy_index return bool flag, which means setting value is succeeded or not :param x_ind: x index :param y_ind: y index :param val: grid value """<if_stmt>(x_ind<is><none>)<or>(y_ind<is><none>)<block_start>print(x_ind y_ind)<line_sep><return><false> <false><block_end>grid_ind=int(y_ind<times>self.width+x_ind)<if_stmt>0<le>grid_ind<l>self.ndata<block_start>self.data[grid_ind]=val<line_sep><return><true># OK <block_end><else_stmt><block_start><return><false><block_end><block_end># NG <def_stmt>set_value_from_polygon self pol_x pol_y val inside=<true><block_start>"""set_value_from_polygon Setting value inside or outside polygon :param pol_x: x position list for a polygon :param pol_y: y position list for a polygon :param val: grid value :param inside: setting data inside or outside """<line_sep># making ring polygon <if_stmt>(pol_x[0]<ne>pol_x[-1])<or>(pol_y[0]<ne>pol_y[-1])<block_start>pol_x.append(pol_x[0])<line_sep>pol_y.append(pol_y[0])<block_end># setting value for all grid <for_stmt>x_ind range(int(self.width))<block_start><for_stmt>y_ind range(int(self.height))<block_start>x_pos,y_pos=self.calc_grid_central_xy_position_from_xy_index(x_ind y_ind)<line_sep>flag=self.check_inside_polygon(x_pos y_pos pol_x pol_y)<if_stmt>flag<is>inside<block_start>self.set_value_from_xy_index(x_ind y_ind val)<block_end><block_end><block_end><block_end><def_stmt>calc_grid_index_from_xy_index self x_ind y_ind<block_start>grid_ind=int(y_ind<times>self.width+x_ind)<line_sep><return>grid_ind<block_end><def_stmt>calc_grid_central_xy_position_from_xy_index self x_ind y_ind<block_start>x_pos=self.calc_grid_central_xy_position_from_index(x_ind self.left_lower_x)<line_sep>y_pos=self.calc_grid_central_xy_position_from_index(y_ind self.left_lower_y)<line_sep><return>x_pos y_pos<block_end><def_stmt>calc_grid_central_xy_position_from_index self index lower_pos<block_start><return>lower_pos+index<times>self.resolution+self.resolution/2.0<block_end><def_stmt>calc_xy_index_from_position self pos lower_pos max_index<block_start>ind=int(np.floor((pos-lower_pos)/self.resolution))<if_stmt>0<le>ind<le>max_index<block_start><return>ind<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>check_occupied_from_xy_index self xind yind occupied_val=1.0<block_start>val=self.get_value_from_xy_index(xind yind)<if_stmt>val<ge>occupied_val<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>expand_grid self<block_start>xinds,yinds=[] []<for_stmt>ix range(int(self.width))<block_start><for_stmt>iy range(int(self.height))<block_start><if_stmt>self.check_occupied_from_xy_index(ix iy)<block_start>xinds.append(ix)<line_sep>yinds.append(iy)<block_end><block_end><block_end><for_stmt>(ix iy) zip(xinds yinds)<block_start>self.set_value_from_xy_index(ix+1 iy val=1.0)<line_sep>self.set_value_from_xy_index(ix iy+1 val=1.0)<line_sep>self.set_value_from_xy_index(ix+1 iy+1 val=1.0)<line_sep>self.set_value_from_xy_index(ix-1 iy val=1.0)<line_sep>self.set_value_from_xy_index(ix iy-1 val=1.0)<line_sep>self.set_value_from_xy_index(ix-1 iy-1 val=1.0)<block_end><block_end>@staticmethod<def_stmt>check_inside_polygon iox ioy x y<block_start>npoint=len(x)-1<line_sep>inside=<false><for_stmt>i1 range(npoint)<block_start>i2=(i1+1)%(npoint+1)<if_stmt>x[i1]<ge>x[i2]<block_start>min_x,max_x=x[i2] x[i1]<block_end><else_stmt><block_start>min_x,max_x=x[i1] x[i2]<block_end><if_stmt><not>min_x<l>iox<l>max_x<block_start><continue><block_end><if_stmt>(y[i1]+(y[i2]-y[i1])/(x[i2]-x[i1])<times>(iox-x[i1])-ioy)<g>0.0<block_start>inside=<not>inside<block_end><block_end><return>inside<block_end><def_stmt>plot_grid_map self ax=<none><block_start>grid_data=np.reshape(np.array(self.data) (int(self.height) int(self.width)))<if_stmt><not>ax<block_start>fig,ax=plt.subplots()<block_end>heat_map=ax.pcolor(grid_data cmap="Blues" vmin=0.0 vmax=1.0)<line_sep>plt.axis("equal")<line_sep><return>heat_map<block_end><block_end><def_stmt>test_polygon_set <block_start>ox=[0.0 20.0 50.0 100.0 130.0 40.0]<line_sep>oy=[0.0 -20.0 0.0 30.0 60.0 80.0]<line_sep>grid_map=GridMap(600 290 0.7 60.0 30.5)<line_sep>grid_map.set_value_from_polygon(ox oy 1.0 inside=<false>)<line_sep>grid_map.plot_grid_map()<line_sep>plt.axis("equal")<line_sep>plt.grid(<true>)<block_end><def_stmt>test_position_set <block_start>grid_map=GridMap(100 120 0.5 10.0 -0.5)<line_sep>grid_map.set_value_from_xy_pos(10.1 -1.1 1.0)<line_sep>grid_map.set_value_from_xy_pos(10.1 -0.1 1.0)<line_sep>grid_map.set_value_from_xy_pos(10.1 1.1 1.0)<line_sep>grid_map.set_value_from_xy_pos(11.1 0.1 1.0)<line_sep>grid_map.set_value_from_xy_pos(10.1 0.1 1.0)<line_sep>grid_map.set_value_from_xy_pos(9.1 0.1 1.0)<line_sep>grid_map.plot_grid_map()<block_end>
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_future_stmt> print_function<import_stmt>sys<import_stmt>numpy<as>np<def_stmt>eprint *args **kwargs<block_start>print(*args file=sys.stderr **kwargs)<block_end>selected_feature=['loads_q' 'loads_v' 'prods_q' 'prods_v' 'rho' 'line_status' 'hour_of_day' 'month']<line_sep>inference_info=np.load('./saved_files/inference_info.npz')<line_sep>col=inference_info['col']<line_sep>mean=inference_info['mean']<line_sep>std=inference_info['std']<def_stmt>process raw_obs<block_start>obs=raw_obs.to_dict()<line_sep>x=dict()<line_sep>x['loads_p']=obs['loads']['p']<line_sep>x['loads_q']=obs['loads']['q']<line_sep>x['loads_v']=obs['loads']['v']<line_sep>x['prods_p']=obs['prods']['p']<line_sep>x['prods_q']=obs['prods']['q']<line_sep>x['prods_v']=obs['prods']['v']<line_sep>x['lines_or_p']=obs['lines_or']['p']<line_sep>x['lines_or_q']=obs['lines_or']['q']<line_sep>x['lines_or_v']=obs['lines_or']['v']<line_sep>x['lines_or_a']=obs['lines_or']['a']<line_sep>x['lines_ex_p']=obs['lines_ex']['p']<line_sep>x['lines_ex_q']=obs['lines_ex']['q']<line_sep>x['lines_ex_v']=obs['lines_ex']['v']<line_sep>x['lines_ex_a']=obs['lines_ex']['a']<line_sep>x['day_of_week']=raw_obs.day_of_week<line_sep>x['month']=raw_obs.month<line_sep>x['hour_of_day']=raw_obs.hour_of_day<line_sep>to_maintain_lines=np.where((raw_obs.time_next_maintenance<g>0)&(raw_obs.time_next_maintenance<l>2))[0]<line_sep>x['rho']=np.copy(obs['rho'])<line_sep>x['line_status']=np.copy(obs['line_status'].astype(float))<line_sep>line_num=x['line_status'].shape[0]<if_stmt>len(to_maintain_lines)<block_start>x['rho'][to_maintain_lines]=0.0<line_sep>x['line_status'][to_maintain_lines]=0.0<block_end>x['line_status']<augadd>np.array([x<times>2<for>x range(line_num)])<line_sep>x['rho']=x['rho']-1.0<line_sep>data=[]<for_stmt>feature selected_feature<block_start>col_data=x[feature]<if_stmt>isinstance(col_data np.int32)<block_start>col_data=np.array([col_data])<block_end>data.append(col_data)<block_end>data=np.concatenate(data)<line_sep>data=data[col]<assert_stmt>data.shape[0]<eq>mean.shape[0]<assert_stmt>data.shape[0]<eq>std.shape[0]<line_sep>data=(data-mean)/std<line_sep><return>data<block_end>
# SPDX-License-Identifier: Apache-2.0 """ tf2onnx.tflite_utils - utilities for parsing tflite files into onnx graph """<import_stmt>collections<import_stmt>importlib<import_stmt>logging<import_stmt>struct<import_from_stmt>onnx helper onnx_pb numpy_helper<import_from_stmt>tensorflow.core.framework types_pb2 tensor_pb2 node_def_pb2<import_from_stmt>tensorflow.python.framework tensor_util<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_from_stmt>tf2onnx.tflite.TensorType TensorType<as>TFLiteTensorType<import_from_stmt>tf2onnx.tflite.Model Model<import_from_stmt>tf2onnx.flexbuffers read_flexbuffer<import_from_stmt>tf2onnx.tf_utils read_tf_node_def_attrs<import_from_stmt>tf2onnx.graph Graph<import_from_stmt>tf2onnx utils<line_sep>logger=logging.getLogger(__name__)<line_sep>TFLITE_TO_ONNX_DTYPE={TFLiteTensorType.FLOAT32:onnx_pb.TensorProto.FLOAT TFLiteTensorType.FLOAT16:onnx_pb.TensorProto.FLOAT16 TFLiteTensorType.INT32:onnx_pb.TensorProto.INT32 TFLiteTensorType.UINT8:onnx_pb.TensorProto.UINT8 TFLiteTensorType.INT64:onnx_pb.TensorProto.INT64 TFLiteTensorType.STRING:onnx_pb.TensorProto.STRING TFLiteTensorType.BOOL:onnx_pb.TensorProto.BOOL TFLiteTensorType.INT16:onnx_pb.TensorProto.INT16 TFLiteTensorType.COMPLEX64:onnx_pb.TensorProto.COMPLEX64 TFLiteTensorType.INT8:onnx_pb.TensorProto.INT8 TFLiteTensorType.FLOAT64:onnx_pb.TensorProto.DOUBLE TFLiteTensorType.COMPLEX128:onnx_pb.TensorProto.COMPLEX128 TFLiteTensorType.UINT64:onnx_pb.TensorProto.UINT64 TFLiteTensorType.UINT32:onnx_pb.TensorProto.UINT32 TFLiteTensorType.RESOURCE:onnx_pb.TensorProto.UNDEFINED TFLiteTensorType.VARIANT:onnx_pb.TensorProto.UNDEFINED }<line_sep>TFLITE_TO_TF_DTYPE={TFLiteTensorType.FLOAT32:types_pb2.DT_FLOAT TFLiteTensorType.FLOAT16:types_pb2.DT_HALF TFLiteTensorType.INT32:types_pb2.DT_INT32 TFLiteTensorType.UINT8:types_pb2.DT_UINT8 TFLiteTensorType.INT64:types_pb2.DT_INT64 TFLiteTensorType.STRING:types_pb2.DT_STRING TFLiteTensorType.BOOL:types_pb2.DT_BOOL TFLiteTensorType.INT16:types_pb2.DT_INT16 TFLiteTensorType.COMPLEX64:types_pb2.DT_COMPLEX64 TFLiteTensorType.INT8:types_pb2.DT_INT8 TFLiteTensorType.FLOAT64:types_pb2.DT_DOUBLE TFLiteTensorType.COMPLEX128:types_pb2.DT_COMPLEX128 TFLiteTensorType.UINT64:types_pb2.DT_UINT64 TFLiteTensorType.UINT32:types_pb2.DT_UINT32 TFLiteTensorType.RESOURCE:types_pb2.DT_RESOURCE TFLiteTensorType.VARIANT:types_pb2.DT_VARIANT }<def_stmt>map_tflite_dtype_to_onnx dtype<block_start><return>TFLITE_TO_ONNX_DTYPE[dtype]<block_end><def_stmt>map_tflite_dtype_to_tf dtype<block_start><return>TFLITE_TO_TF_DTYPE[dtype]<block_end># The tflite schema uses snake case, but the python bindings use proper case <def_stmt>snake_to_proper_case name<block_start><return>''.join(n.capitalize()<for>n name.split('_'))<block_end><def_stmt>proper_to_snake_case name<block_start>res=''<for_stmt>c name<block_start><if_stmt>c.isupper()<and>res<block_start>res<augadd>'_'<block_end>res<augadd>c.lower()<block_end><return>res<block_end># Pulled from the tflite schema.fbs file. Needed to decode enum numbers into strings. NODE_ATTR_NAME_TO_ENUM_TYPE={'fused_activation_function':'ActivationFunctionType' 'padding':'Padding' 'type':'LSHProjectionType' 'weights_format':'FullyConnectedOptionsWeightsFormat' 'kernel_type':'LSTMKernelType' 'combiner':'CombinerType' 'in_data_type':'TensorType' 'out_data_type':'TensorType' 'output_type':'TensorType' 'out_type':'TensorType' 'mode':'MirrorPadMode' 'idx_out_type':'TensorType' }<line_sep>NODE_ATTR_NAME_TO_ENUM_TYPE={snake_to_proper_case(key):value<for>key,value NODE_ATTR_NAME_TO_ENUM_TYPE.items()}<line_sep># Pulled from the tflite schema.fbs file. FUNCTION_ATTRS=['then_subgraph_index' 'else_subgraph_index' 'cond_subgraph_index' 'body_subgraph_index' 'subgraph']<line_sep>FUNCTION_ATTRS=[snake_to_proper_case(attr)<for>attr FUNCTION_ATTRS]<line_sep>enum_cache={}<def_stmt>lookup_enum idx enum_name<block_start>"""Given the name of a tflite enum class and an index, return a string with the name of the enum value"""<if_stmt>enum_name<eq>'TensorType'<block_start><return>map_tflite_dtype_to_onnx(idx)<block_end><if_stmt>enum_name<in>enum_cache<block_start>idx_to_name=enum_cache[enum_name]<block_end><else_stmt><block_start>module=importlib.import_module('tf2onnx.tflite.'+enum_name)<line_sep>enum_class=getattr(module enum_name)<line_sep>idx_to_name={value:key<for>key,value enum_class.__dict__.items()<if><not>key.startswith('_')}<line_sep>enum_cache[enum_name]=idx_to_name<block_end>utils.make_sure(idx<in>idx_to_name "Can't lookup value %s for tflite enum %s. Please update tf2onnx or "<concat>"submit an issue on GitHub." idx enum_name)<line_sep><return>idx_to_name[idx]<block_end><def_stmt>get_options_class name<block_start>"""Each tflite optype has a flatbuffer Options class (ex: AddOptions). Returns the options class given its name."""<if_stmt>name<eq>"NONE"<block_start><return><none><block_end>module=importlib.import_module('tf2onnx.tflite.'+name)<line_sep><return>getattr(module name)<block_end><def_stmt>graphs_from_tflite tflite_path input_names=<none> output_names=<none><block_start>""" Given the path to a tflite model, returns a tuple (main_graph, subgraphs) of graph.py Graph objects inputs/outputs will be taken from main graph in model if not overridden """<line_sep>tflite_graphs,opcodes,model,tensor_shapes=read_tflite_model(tflite_path)<line_sep>main_g=<none><line_sep>subgraphs=[]<for_stmt>i,tfl_graph enumerate(tflite_graphs)<block_start>is_main_g=i<eq>len(tflite_graphs)-1<line_sep>prefix=''<if>is_main_g<else>tfl_graph.Name().decode()+'_'<line_sep>tensor_shapes_from_interpreter=<none><if_stmt>is_main_g<block_start>tensor_shapes_from_interpreter=tensor_shapes<block_end>onnx_nodes,_,_,output_shapes,dtypes,f_inputs,f_outputs,graph_name=parse_tflite_graph(tfl_graph opcodes model prefix tensor_shapes_from_interpreter)<line_sep>g_inputs=f_inputs<line_sep>g_outputs=f_outputs<if_stmt>is_main_g# Override IO in main graph <block_start>utils.check_io(input_names output_names output_shapes.keys())<if_stmt>input_names<is><not><none><block_start>g_inputs=input_names<block_end><if_stmt>output_names<is><not><none><block_start>g_outputs=output_names<block_end><block_end>g=Graph(onnx_nodes output_shapes dtypes input_names=g_inputs output_names=g_outputs is_subgraph=<not>is_main_g graph_name=graph_name)<if_stmt>is_main_g<block_start>main_g=g<block_end><else_stmt><block_start>subgraphs.append(g)<block_end><block_end><return>main_g subgraphs<block_end><def_stmt>read_tflite_model tflite_path<block_start>""" Given the path to a tflite model, returns tuple (tflite_graphs, opcodes_map, model) Graphs are topologically sorted and the main graph is last Pass these to parse_tflite_graph """<with_stmt>open(tflite_path 'rb')<as>f<block_start>buf=f.read()<block_end>buf=bytearray(buf)<line_sep>model=Model.GetRootAsModel(buf 0)<line_sep># To save space, each op in the model indicates its opcode as an index into the model's opcode map. opcodes_map={}<for_stmt>i range(model.OperatorCodesLength())<block_start>op_code=model.OperatorCodes(i)<line_sep># TFlite ran out of opcodes since they only used a byte. Old models store opcodes in DeprecatedBuiltinCode. # New models put PLACEHOLDER_FOR_GREATER_OP_CODES in this field to signify that BuiltinCode should be used. code=lookup_enum(op_code.DeprecatedBuiltinCode() 'BuiltinOperator')<if_stmt>code<eq>'PLACEHOLDER_FOR_GREATER_OP_CODES'<block_start>code=lookup_enum(op_code.BuiltinCode() 'BuiltinOperator')<block_end><if_stmt>code<eq>'CUSTOM'<block_start>code=op_code.CustomCode().decode()<block_end>opcodes_map[i]=code<block_end># Shapes stored in tflite models are not always reliable so we get them from the interpreter if possible. tensor_shapes={}<try_stmt><block_start>interpreter=tf.lite.Interpreter(tflite_path)<line_sep>interpreter.allocate_tensors()<line_sep>tensor_cnt=model.Subgraphs(0).TensorsLength()<for_stmt>i range(tensor_cnt)<block_start>name=model.Subgraphs(0).Tensors(i).Name().decode()<line_sep>details=interpreter._get_tensor_details(i)# pylint: disable=protected-access <if_stmt>"shape_signature"<in>details<block_start>tensor_shapes[name]=details["shape_signature"].tolist()<block_end><elif_stmt>"shape"<in>details<block_start>tensor_shapes[name]=details["shape"].tolist()<block_end><block_end><block_end><except_stmt>Exception<as>e# pylint: disable=broad-except <block_start>logger.warning("Error loading model into tflite interpreter: %s" e)<block_end>tflite_graphs=get_model_subgraphs(model)<line_sep><return>tflite_graphs opcodes_map model tensor_shapes<block_end><def_stmt>get_subgraph_dependencies model graph_idx<block_start>"""Returns a list of subgraph indices referenced by the indicated graph"""<line_sep>dependencies=[]<line_sep>g=model.Subgraphs(graph_idx)<for_stmt>i range(g.OperatorsLength())<block_start>op=g.Operators(i)<line_sep>options_type_name=lookup_enum(op.BuiltinOptionsType() 'BuiltinOptions')<line_sep>option_class=get_options_class(options_type_name)<if_stmt>option_class<is><not><none><block_start>options=option_class()<line_sep>options.Init(op.BuiltinOptions().Bytes op.BuiltinOptions().Pos)<for_stmt>attr FUNCTION_ATTRS<block_start><if_stmt>hasattr(options attr)<block_start>value=getattr(options attr)()<line_sep>dependencies.append(value)<block_end><block_end><block_end><block_end><return>dependencies<block_end><def_stmt>get_model_subgraphs model<block_start>"""Returns topologically sorted subgraphs of a model. Guarantees main graph is placed at the end."""<line_sep>main_g=0<line_sep>dependencies={}<line_sep>idx_to_graph={}<for_stmt>i range(model.SubgraphsLength())<block_start>idx_to_graph[i]=model.Subgraphs(i)<line_sep>ds=get_subgraph_dependencies(model i)<line_sep>utils.make_sure(main_g<not><in>ds "Main graph %s is a dependency of subgraph %s" main_g i)<line_sep>dependencies[i]=ds<block_end>ordered=utils.topological_sort(dependencies)<line_sep><return>[idx_to_graph[i]<for>i ordered]<block_end><def_stmt>get_quantization_attr quant_params<block_start>attr={}<line_sep>attr['scale']=quant_params.ScaleAsNumpy().tolist()<line_sep>attr['zero_point']=quant_params.ZeroPointAsNumpy().tolist()<line_sep>attr['quantized_dimension']=quant_params.QuantizedDimension()<if_stmt><not>quant_params.MaxIsNone()<block_start>attr['max']=quant_params.MaxAsNumpy().tolist()<block_end><if_stmt><not>quant_params.MinIsNone()<block_start>attr['min']=quant_params.MinAsNumpy().tolist()<block_end><return>attr<block_end><def_stmt>parse_tflite_string_tensor buffer_bytes shape<block_start>"""Returns an onnx tensor with the string data encoded in the tflite tensor data buffer"""<def_stmt>read_int offset<block_start><return>struct.unpack('<i' buffer_bytes[offset:offset+4])[0]<block_end>offset=0<line_sep>count=read_int(offset)<line_sep>offset<augadd>4<line_sep>offset_list=[]<for_stmt>i range(count)<block_start>offset_list.append(read_int(offset))<line_sep>offset<augadd>4<block_end>offset_list.append(len(buffer_bytes))<line_sep>string_list=[]<for_stmt>i range(count)<block_start>string_list.append(buffer_bytes[offset_list[i]:offset_list[i+1]].decode("utf-8"))<block_end><return>numpy_helper.from_array(np.array(string_list dtype=np.object).reshape(shape))<block_end><def_stmt>op_has_scalar_output input_shapes optype attr<block_start>""" TFLite uses [] to denote both scalars and unknown output shapes. Return True if an op can have scalar outputs despite having non-scalar inputs. Otherwise, we will replace [] with None """<if_stmt>optype<in>["TFL_STRIDED_SLICE" "StridedSlice"]<block_start>inp_rank=len(input_shapes[0])<line_sep><return>attr['shrink_axis_mask']<eq>2<power>inp_rank-1<block_end><if_stmt>(optype.startswith("TFL_REDUCE")<or>optype<in>['All'])<and>len(input_shapes)<eq>2<block_start>inp_rank=len(input_shapes[0])<line_sep>keep_dims=attr.get('keep_dims' <true>)<line_sep># axes input can be a scalar for a single axis num_axes=1<if>input_shapes[1]<eq>[]<else>input_shapes[1][0]<line_sep><return><not>keep_dims<and>inp_rank<eq>num_axes<block_end><if_stmt>optype<eq>"TFL_RESHAPE"<block_start><return>input_shapes[1]<eq>[0]<block_end><if_stmt>optype<eq>"Size"# Op from TF <block_start><return><true><block_end><return><false><block_end><def_stmt>parse_tflite_graph tflite_g opcodes_map model input_prefix='' tensor_shapes_override=<none><block_start>""" Returns a Graph object along with some op count stats. All tflite op types are prefixed with "TFL_". Names of graph inputs are optionally prefixed with a string to prevent name conflicts in subgraphs. Quantizatized tensors are surrounded with quantize/dequantize ops """<line_sep>op_cnt=collections.Counter()<line_sep>attr_cnt=collections.Counter()<line_sep>onnx_nodes=[]<line_sep>output_shapes={}<line_sep>dtypes={}<line_sep>tensor_names={}<if_stmt>tensor_shapes_override<is><none><block_start>tensor_shapes_override={}<block_end># Map tensor name to tflite Tensor object so we can fetch quantization info as needed name_to_tensor={}<line_sep># If a node takes a quantized tensor as input, we must add a dequantize op after it. # Store a mapping so we only need to make at most one dequantize op per tensor. tensor_name_to_dequant_output={}<line_sep># tflite uses generic names (arg0, arg1, etc.) for inputs but full names for other tensors, so # prefixing just the inputs should be fine. Other tensors are prefixed when we do inlining. input_indices={tflite_g.Inputs(i)<for>i range(tflite_g.InputsLength())}<for_stmt>i range(tflite_g.TensorsLength())<block_start>tensor=tflite_g.Tensors(i)<line_sep>name=tensor.Name().decode()<if_stmt>i<in>input_indices<block_start>name=input_prefix+name<block_end>tensor_names[i]=name<line_sep>name_to_tensor[name]=tensor<if_stmt>name<in>tensor_shapes_override<block_start>output_shapes[name]=tensor_shapes_override[name]<block_end><elif_stmt>tensor.ShapeIsNone()<block_start>output_shapes[name]=<none><block_end><elif_stmt>tensor.ShapeSignatureIsNone()# The shape signature uses -1 to signify unknown dims. Old models don't have this and use Shape instead. <block_start>output_shapes[name]=tensor.ShapeAsNumpy().tolist()<block_end><else_stmt><block_start>output_shapes[name]=tensor.ShapeSignatureAsNumpy().tolist()<block_end>buf=model.Buffers(tensor.Buffer())<line_sep>dtypes[name]=map_tflite_dtype_to_onnx(tensor.Type())<if_stmt><not>buf.DataIsNone()<and>tensor.Buffer()<g>0# For const values we use TF to decode the binary data from the buffer <block_start>t=tensor_pb2.TensorProto()<line_sep>t.tensor_content=buf.DataAsNumpy().tobytes()<if_stmt>output_shapes[name]<is><none><block_start>output_shapes[name]=[]<block_end><for_stmt>d output_shapes[name]<block_start>t.tensor_shape.dim.add().size=d<block_end>t.dtype=map_tflite_dtype_to_tf(tensor.Type())<if_stmt>t.dtype<eq>tf.string<block_start>onnx_tensor=parse_tflite_string_tensor(t.tensor_content output_shapes[name])<block_end><else_stmt><block_start>np_data=tensor_util.MakeNdarray(t)<line_sep>onnx_tensor=numpy_helper.from_array(np_data name=name)<block_end>onnx_node=helper.make_node("Const" [] outputs=[name] name=name value=onnx_tensor)<line_sep>onnx_nodes.append(onnx_node)<line_sep>op_cnt["Const"]<augadd>1<block_end><block_end><def_stmt>get_dequant tensor_name<block_start>"""Creates a dequantize op for the provided tensor if needed and returns the output of the op, or the original tensor name if no dequantization is needed"""<line_sep>quant=name_to_tensor[tensor_name].Quantization()<if_stmt>quant<is><none><or>quant.ScaleIsNone()<or>quant.ZeroPointIsNone()<block_start><return>tensor_name<block_end><if_stmt>tensor_name<in>tensor_name_to_dequant_output<block_start><return>tensor_name_to_dequant_output[tensor_name]<block_end>dequant_name=tensor_name+"_dequant"<line_sep>attr=get_quantization_attr(quant)<line_sep>onnx_node=helper.make_node("TFL_DEQUANTIZE" [tensor_name] [dequant_name] name=dequant_name **attr)<line_sep>onnx_nodes.append(onnx_node)<line_sep>tensor_name_to_dequant_output[tensor_name]=dequant_name<line_sep>output_shapes[dequant_name]=output_shapes[tensor_name].copy()<line_sep>dtypes[dequant_name]=onnx_pb.TensorProto.FLOAT<line_sep><return>dequant_name<block_end><def_stmt>get_prequant tensor_name<block_start>"""Called by nodes with the name of the tensor they must output. If the output is supposed to be quantized, creates a Quantize op outputting the tensor. Returns the name that should be used for the "prequantized" tensor, or the original tensor if no quantization is needed"""<line_sep>quant=name_to_tensor[tensor_name].Quantization()<if_stmt>quant<is><none><or>quant.ScaleIsNone()<or>quant.ZeroPointIsNone()<block_start><return>tensor_name<block_end>prequant_name=tensor_name+"_prequant"<line_sep>quantize_name=tensor_name+"_quantize"<line_sep>attr=get_quantization_attr(quant)<line_sep>onnx_node=helper.make_node("TFL_QUANTIZE" [prequant_name] [tensor_name] name=quantize_name **attr)<line_sep>onnx_nodes.append(onnx_node)<line_sep>output_shapes[prequant_name]=output_shapes[tensor_name].copy()<line_sep>dtypes[prequant_name]=onnx_pb.TensorProto.FLOAT<line_sep><return>prequant_name<block_end><for_stmt>i range(tflite_g.OperatorsLength())<block_start>op=tflite_g.Operators(i)<line_sep>optype='TFL_'+opcodes_map[op.OpcodeIndex()]<line_sep>op_cnt[optype]<augadd>1<line_sep>attr={}<line_sep>options_type_name=lookup_enum(op.BuiltinOptionsType() 'BuiltinOptions')<line_sep>option_class=get_options_class(options_type_name)<line_sep>wants_dequantized_input=<true><line_sep>has_prequantized_output=<true><if_stmt>optype<eq>'TFL_QUANTIZE'<block_start>out_tensor=tflite_g.Tensors(op.Outputs(0))<line_sep>quant=out_tensor.Quantization()<line_sep>has_prequantized_output=<false><if_stmt>quant<is><not><none><and><not>quant.ScaleIsNone()<and><not>quant.ZeroPointIsNone()<block_start>attr.update(get_quantization_attr(quant))<block_end><block_end><elif_stmt>optype<eq>'TFL_DEQUANTIZE'<block_start>in_tensor=tflite_g.Tensors(op.Inputs(0))<line_sep>quant=in_tensor.Quantization()<line_sep>wants_dequantized_input=<false><if_stmt>quant<is><not><none><and><not>quant.ScaleIsNone()<and><not>quant.ZeroPointIsNone()<block_start>attr.update(get_quantization_attr(quant))<block_end><block_end>input_names=[tensor_names[op.Inputs(i)]<for>i range(op.InputsLength())<if>op.Inputs(i)<ne>-1]<line_sep>output_names=[tensor_names[op.Outputs(i)]<for>i range(op.OutputsLength())<if>op.Outputs(i)<ne>-1]<if_stmt>optype.startswith("TFL_Flex")<block_start>data=read_flexbuffer(op.CustomOptionsAsNumpy().tobytes() decode_strings=<false>)<line_sep>utils.make_sure(isinstance(data list) "Flex ops are expected to store data as a flexbuffer list")<line_sep>tf_op=data[0].decode("utf-8")<line_sep>tf_node_def=node_def_pb2.NodeDef()<line_sep>tf_node_def.ParseFromString(data[1])<line_sep>input_tf_dtypes=[map_tflite_dtype_to_tf(name_to_tensor[inp].Type())<for>inp input_names]<def_stmt>shape_to_tf_shape dims<block_start><return>[<none><if>d<l>0<else>d<for>d dims]<if>dims<is><not><none><else><none><block_end>input_shapes=[shape_to_tf_shape(output_shapes[inp])<for>inp input_names]<line_sep>tf_attrs,_=read_tf_node_def_attrs(tf_node_def input_tf_dtypes input_shapes)<line_sep>attr.update(tf_attrs)<line_sep>optype=tf_op<block_end><elif_stmt><not>op.CustomOptionsIsNone()<block_start>custom_ops_format=lookup_enum(op.CustomOptionsFormat() 'CustomOptionsFormat')<if_stmt>custom_ops_format<eq>'FLEXBUFFERS'<block_start>data=<none><try_stmt><block_start>data=read_flexbuffer(op.CustomOptionsAsNumpy().tobytes())<block_end><except_stmt>Exception<as>e# pylint: disable=broad-except <block_start>logger.warning("Could not parse attributes for custom op '%s': %s" optype e)<block_end><if_stmt>isinstance(data dict)<block_start>attr.update(data)<block_end><block_end><block_end><if_stmt>option_class<is><not><none><block_start>options=option_class()<line_sep>options.Init(op.BuiltinOptions().Bytes op.BuiltinOptions().Pos)<line_sep># All flatbuffer objects have these properties. block_list=[options_type_name+'BufferHasIdentifier' 'Init' 'GetRootAs'+options_type_name 'GetRootAs']<line_sep># The rest of the properties of the options class provide its attribute names attr_names={opt<for>opt dir(options)<if><not>opt.startswith('_')<and>opt<not><in>block_list}<for_stmt>a list(attr_names)# Flatbufffer list properties have 3 functions: *Length, *IsNone, and *AsNumpy <block_start><if_stmt>a+'Length'<in>attr_names<block_start>attr_names.remove(a+'Length')<line_sep>attr_names.remove(a+'IsNone')<line_sep>attr_names.remove(a)<block_end><block_end><for_stmt>a attr_names<block_start><if_stmt>a.endswith('AsNumpy')<block_start>value=getattr(options a)().tolist()<line_sep>a=a[:-len('AsNumpy')]<block_end><else_stmt># For enums we use a string with the value name, not enum index <block_start>value=getattr(options a)()<if_stmt>a<in>NODE_ATTR_NAME_TO_ENUM_TYPE<block_start>value=lookup_enum(value NODE_ATTR_NAME_TO_ENUM_TYPE[a])<block_end><elif_stmt>a<in>FUNCTION_ATTRS<block_start>value=model.Subgraphs(value).Name().decode()<block_end><block_end>attr_cnt[a]<augadd>1<line_sep>attr[proper_to_snake_case(a)]=value<block_end><block_end><if_stmt>wants_dequantized_input<block_start>input_names=[get_dequant(inp)<for>inp input_names]<block_end><if_stmt>optype<eq>"TFL_TFLite_Detection_PostProcess"# There's a bug in tflite for the output shapes of this op <block_start><for_stmt>out,shape zip(output_names [[-1 -1 4] [-1 -1] [-1 -1] [-1]])<block_start><if_stmt>len(output_shapes[out])<ne>len(shape)<block_start>output_shapes[out]=shape<block_end><block_end><block_end><if_stmt>all(output_shapes[out]<eq>[]<for>out output_names)# tflite uses [] to represent both scalars and completely unknown shapes # If an op has non-scalar inputs and all scalar outputs, it is very likely the shapes are actually unknown. <block_start>inp_shapes=[output_shapes[inp]<for>inp input_names]<if_stmt><not>all(s<eq>[]<for>s inp_shapes)<block_start><if_stmt>any(s<is><none><for>s inp_shapes)<or><not>op_has_scalar_output(inp_shapes optype attr)<block_start><for_stmt>out output_names<block_start>logger.warning("Replacing scalar output shape of %s with unknown shape" out)<line_sep>output_shapes[out]=<none><block_end><block_end><block_end><block_end><if_stmt>has_prequantized_output<block_start>output_names=[get_prequant(out)<for>out output_names]<block_end>onnx_node=helper.make_node(optype input_names output_names name=output_names[0] **attr)<line_sep>onnx_nodes.append(onnx_node)<block_end>inputs=[tensor_names[tflite_g.Inputs(i)]<for>i range(tflite_g.InputsLength())]<line_sep>outputs=[tensor_names[tflite_g.Outputs(i)]<for>i range(tflite_g.OutputsLength())]<line_sep># TODO: Allow input/outputs to be overridden <for_stmt>inp inputs<block_start>onnx_node=helper.make_node("Placeholder" [] outputs=[inp] name=inp)<line_sep>onnx_nodes.append(onnx_node)<block_end>graph_name=(tflite_g.Name()<or>b'tflite graph').decode()<line_sep><return>onnx_nodes op_cnt attr_cnt output_shapes dtypes inputs outputs graph_name<block_end>
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- <import_from_stmt>enum Enum<class_stmt>DependencyType(str Enum)<block_start>"""Defines the dependency type. """<line_sep>required_for_prepare="RequiredForPrepare"<line_sep>required_for_move="RequiredForMove"<block_end><class_stmt>MoveResourceInputType(str Enum)<block_start>"""Defines the move resource input type. """<line_sep>move_resource_id="MoveResourceId"<line_sep>move_resource_source_id="MoveResourceSourceId"<block_end><class_stmt>MoveState(str Enum)<block_start>"""Defines the MoveResource states. """<line_sep>assignment_pending="AssignmentPending"<line_sep>prepare_pending="PreparePending"<line_sep>prepare_in_progress="PrepareInProgress"<line_sep>prepare_failed="PrepareFailed"<line_sep>move_pending="MovePending"<line_sep>move_in_progress="MoveInProgress"<line_sep>move_failed="MoveFailed"<line_sep>discard_in_progress="DiscardInProgress"<line_sep>discard_failed="DiscardFailed"<line_sep>commit_pending="CommitPending"<line_sep>commit_in_progress="CommitInProgress"<line_sep>commit_failed="CommitFailed"<line_sep>committed="Committed"<block_end><class_stmt>ProvisioningState(str Enum)<block_start>"""Defines the provisioning states. """<line_sep>succeeded="Succeeded"<line_sep>updating="Updating"<line_sep>creating="Creating"<line_sep>failed="Failed"<block_end><class_stmt>ResolutionType(str Enum)<block_start>"""Defines the resolution type. """<line_sep>manual="Manual"<line_sep>automatic="Automatic"<block_end><class_stmt>ResourceIdentityType(str Enum)<block_start>"""The type of identity used for the region move service. """<line_sep>none="None"<line_sep>system_assigned="SystemAssigned"<line_sep>user_assigned="UserAssigned"<block_end><class_stmt>TargetAvailabilityZone(str Enum)<block_start>"""Gets or sets the target availability zone. """<line_sep>one="1"<line_sep>two="2"<line_sep>three="3"<line_sep>na="NA"<block_end><class_stmt>ZoneRedundant(str Enum)<block_start>"""Defines the zone redundant resource setting. """<line_sep>enable="Enable"<line_sep>disable="Disable"<block_end>
<import_from_future_stmt> print_function division<import_stmt>sys<import_stmt>os<line_sep>sys.path.append(os.path.join(os.path.dirname(__file__) '..'))<import_from_stmt>lib replay_memory<import_from_stmt>common GridAnnotationWindow<import_stmt>Tkinter<def_stmt>main <block_start>print("Loading replay memory...")<line_sep>memory=replay_memory.ReplayMemory.create_instance_supervised()<line_sep>win=GridAnnotationWindow.create(memory current_anno_attribute_name="current_lane_grid" save_to_fp="annotations_current_lane.pickle" every_nth_example=20)<line_sep>win.brush_size=2<line_sep>win.autosave_every_nth=100<line_sep>win.master.wm_title("Annotate current lane")<line_sep>Tkinter.mainloop()<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
"""test module importing itself"""<line_sep># pylint: disable=no-absolute-import,using-constant-test <import_from_future_stmt> print_function<import_from_stmt>. import_itself# [import-self] __revision__=0<if_stmt>__revision__<block_start>print(import_itself)<block_end>
<import_stmt>open3d<as>o3d<import_from_stmt>.method_selector get_te_method<def_stmt>run_icp src tgt trans_init config<block_start>te=get_te_method(config.method)<if_stmt>config.method<eq>"gicp"<block_start><return>o3d.pipelines.registration.registration_generalized_icp(src tgt config.threshold trans_init te).transformation<block_end><return>o3d.pipelines.registration.registration_icp(src tgt config.threshold trans_init te).transformation<block_end>
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>unittest<import_stmt>os<import_stmt>paddle<import_stmt>socket<import_stmt>threading<class_stmt>TestFleetPrivateFunction(unittest.TestCase)<block_start><def_stmt>test_wait_port self<block_start><def_stmt>init_server port<block_start><import_stmt>time<line_sep>time.sleep(5)<line_sep>sock=socket.socket(socket.AF_INET socket.SOCK_STREAM)<line_sep>sock.bind(("127.0.0.1" port))<line_sep>sock.listen(10)<while_stmt><true><block_start>c,addr=sock.accept()<line_sep>c.send("0")<line_sep>c.close()<line_sep><break><block_end><block_end>thr=threading.Thread(target=init_server args=(9292 ))<line_sep>thr.start()<import_stmt>paddle.distributed.fleet<as>fleet<line_sep>ep=["127.0.0.1:9292"]<line_sep>fleet.base.private_helper_function.wait_server_ready(ep)<line_sep>thr.join()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<import_stmt>json<import_from_stmt>pyecharts options<as>opts<import_from_stmt>pyecharts.charts Sankey<with_stmt>open("product.json" "r" encoding="utf-8")<as>f<block_start>j=json.load(f)<block_end>c=(Sankey().add("sankey" nodes=j["nodes"] links=j["links"] pos_top="10%" focus_node_adjacency=<true> levels=[opts.SankeyLevelsOpts(depth=0 itemstyle_opts=opts.ItemStyleOpts(color="#fbb4ae") linestyle_opts=opts.LineStyleOpts(color="source" opacity=0.6) ) opts.SankeyLevelsOpts(depth=1 itemstyle_opts=opts.ItemStyleOpts(color="#b3cde3") linestyle_opts=opts.LineStyleOpts(color="source" opacity=0.6) ) opts.SankeyLevelsOpts(depth=2 itemstyle_opts=opts.ItemStyleOpts(color="#ccebc5") linestyle_opts=opts.LineStyleOpts(color="source" opacity=0.6) ) opts.SankeyLevelsOpts(depth=3 itemstyle_opts=opts.ItemStyleOpts(color="#decbe4") linestyle_opts=opts.LineStyleOpts(color="source" opacity=0.6) ) ] linestyle_opt=opts.LineStyleOpts(curve=0.5) ).set_global_opts(title_opts=opts.TitleOpts(title="Sankey-Level Settings") tooltip_opts=opts.TooltipOpts(trigger="item" trigger_on="mousemove") ).render("sankey_with_level_setting.html"))<line_sep>
"""Convert a word from Latin orthography into its hypothesized pronunciation in the International Phonetic Alphabet (IPA). https://raw.githubusercontent.com/j-duff/cltk/ipa/ cltk/phonology/lat/transcription.py """<import_stmt>re<import_stmt>unicodedata<import_from_stmt>typing List<import_from_stmt>nltk.tokenize wordpunct_tokenize<import_from_stmt>cltk.core.cltk_logger logger<import_from_stmt>cltk.prosody.lat macronizer<as>m<try_stmt># James Tauber's greek_accentuation package <block_start><import_from_stmt>greek_accentuation characters<as>chars<block_end><except_stmt>ImportError<as>import_error<block_start>message=('Missing "greek_accentuation" package. Install with '<concat>"`pip install greek-accentuation`.")<line_sep>logger.error(message)<line_sep>logger.error(import_error)<line_sep><raise><block_end>__author__=["<NAME> <<EMAIL>>"]<line_sep>__license__="MIT License. See LICENSE."<line_sep># Dictionaries of phonological reconstructions for use in transcribing. # <NAME>. 1965. Vox Latina. LATIN={"Classical":{"Allen":{"correspondence":{"p":"p" "t":"t̪" "c":"k" "k":"k" "qu":"kʷ" "b":"b" "d":"d̪" "g":"g" "gu":"gʷ" "ph":"pʰ" "th":"t̪ʰ" "ch":"kʰ" "n":"n̪" "m":"m" "r":"r" "rh":"r" # Voiceless r was spelled but not pronounced. "l":"l" "f":"f" "s":"s" "h":"h" "j":"j" "v":"w" "x":"ks" "z":"z" "ī":"iː" "ū":"uː" "i":"ɪ" "u":"ʊ" "e":"ɛ" "o":"ɔ" "ē":"eː" "ō":"oː" "a":"a" "ā":"aː" "y":"y" "ȳ":"y:" "ae":"aj" "au":"aw" "oe":"oj" "eu":"ew" "ei":"ej" } "diphthongs":[# and digraphs "qu" "gu" "ph" "th" "ch" "rh" "ae" "au" "oe" "eu" "ei" ] "punctuation":["." "," ";" ":" "-" "–" "?" "!" "(" ")" "'" '"' "[" "]" ] "alternations":["j_maker" # word initial and intervocalic i is assumed j "w_maker" # word initial and intervocalic u is assumed w "wj_block" # prevents accidental sequence wj "uj_diph_maker" # after w and j have been created, recognizes # <ui> = [uj] "b_devoice" # b devoices before /t/, /s/ "g_n_nasality_assimilation" # only before n "n_place_assimilation" # should also do labial, and # labio-dental before f. "final_m_drop" # m drops and lengthens + nasalizes preceding # vowel word-finally "ns_nf_lengthening" # vowels lengthen before ns or nf "l_darken" # l darkens to ɫ in coda "j_z_doubling" # intervocalic j and z > jj and zz "long_vowel_catcher" # corrects accidental instances of ɪː # and similar. "e_i_closer_before_vowel" # ɛ to ɛ̣, ɪ to ɪ̣ before another vowel "intervocalic_j" # j glide between vowels ] }}}<line_sep># Unhandled exceptions: preposition "ad" becomes [at̪] not [ad̪] before s and t # subf > suff, subm > summ, subg > sugg, subc > succ, subr > rr # j exceptions like ad*j*ectivum and con*j*unx # All IPA characters used sorted by natural classes. # WILL NEED ADDITIONS AS MORE RECONSTRUCTIONS USED IPA={"voiced":[# [+voice] "b" "d̪" "g" "gʷ" "m" "n̪" "ŋ" "ɱ"<concat>"l" "ɫ" "r" "z" ] "labial":["b" "p" "pʰ" "m"] # [+labial, -labiodental] "labiodental":["f" "ɱ"] # [+labial, +labiodental] "coronal":["d̪" "t̪" "t̪ʰ" "n̪" "s" "z" "r" "l" "ɫ"] # [+coronal] "velar":["g" "k" "kʰ" "kʷ" "gʷ" "ŋ"] # [+velar] "nasal":["m" "ɱ" "n" "ŋ"] # [+consonantal, +nasal] "approximant":["l" "ɫ" "r" "j" "w"] # [+approximant] "continuant":["h" "f" "s" "z" "l" "ɫ" "r"] # [+continuant, +consonantal] "vowel":[# [-consonantal -approximant] "a" "aː" "ɛ" "ɛ̣" "eː" "ɪ" "ɪ̣" "iː" "ɔ" "oː" "ʊ" "u" "uː" "y" "yː" "ãː" "ẽː" "ĩː" "õː" "ũː" ] "high":[# [-consonantal, +high] "ɪ" "ɪ̣" "iː" "ʊ" "u" "uː" "y" "yː" "ɪ̃" "ɪ̣̃" "ĩː" "ʊ̃" "ũ" "ũː" "ỹ" "ỹː" ] "mid":[# [-consonantal, -high, -low] "ɛ" "ɛ̣" "eː" "ɔ" "oː" "ɛ̃" "ɛ̣̃" "ẽː" "ɔ̃" "õː" ] "low":["a" "aː" "ã" "ãː"] # [-consonantal, +low] "front":[# [-consonantal, +front] "ɪ" "ɪ̣" "iː" "y" "yː" "ɛ" "ɛ̣" "eː" "ɪ̃" "ɪ̣̃" "ĩː" "ỹ" "ỹː" "ɛ̃" "ɛ̣̃" "ẽː" ] "central":["a" "aː" "ã" "ãː"] # [-consonantal, -front, -back] "back":[# [-consonantal, +back] "ʊ" "u" "uː" "ɔ" "oː" "ʊ̃" "ũ" "ũː" "ɔ̃" "õː" ] "boundary":["#"] }<class_stmt>Phone<block_start>"""A phonological unit to be manipulated and represented as an IPA string."""<line_sep># Has a bundle of feature values that help classify it so that it can # trigger contextual pronunciation changes. <def_stmt>__init__ self ipa_ch:str<block_start>""" Analyzes features of phonetic signs :param ipa_ch: phonetic sign from IPA """<line_sep># eventually exported to output string self.ipa=unicodedata.normalize("NFC" ipa_ch)<line_sep># will be assigned once in Word, as the pre-context of this phone self.left=""<line_sep># .... as the post-context of this phone self.right=""<line_sep># bundle of features, stored as booleans: self.vce=self.ipa<in>IPA["voiced"]<line_sep>self.lab=self.ipa<in>IPA["labial"]<line_sep>self.lbd=self.ipa<in>IPA["labiodental"]<line_sep>self.cor=self.ipa<in>IPA["coronal"]<line_sep>self.vel=self.ipa<in>IPA["velar"]<line_sep>self.nas=self.ipa<in>IPA["nasal"]<line_sep>self.app=self.ipa<in>IPA["approximant"]<line_sep>self.cont=self.ipa<in>IPA["continuant"]<line_sep>self.vow=self.ipa<in>IPA["vowel"]<line_sep>self.hi=self.ipa<in>IPA["high"]<line_sep>self.mid=self.ipa<in>IPA["mid"]<line_sep>self.lo=self.ipa<in>IPA["low"]<line_sep>self.fr=self.ipa<in>IPA["front"]<line_sep>self.ctr=self.ipa<in>IPA["central"]<line_sep>self.bk=self.ipa<in>IPA["back"]<line_sep>self.bound=self.ipa<in>IPA["boundary"]<block_end><def_stmt>__repr__ self<block_start><return>self.ipa<block_end><block_end><class_stmt>Word<block_start>"""Max. phonological unit, contains phones and triggers alternations."""<line_sep># An ordered collection of Phones, which are bundles of # features/IPA strings. <def_stmt>__init__ self ipa_str:str root:dict<block_start>""" :param ipa_str: :param root: """<line_sep>self.string=unicodedata.normalize("NFC" ipa_str)<line_sep># Appropriate directory in the reconstruction dictionary self.root=root<line_sep># list of contextual pronunciation alternations self.alts=self.root["alternations"]<line_sep># Turns string of IPA characters into list of Phones self.phones=[Phone(c)<for>c re.findall(r".[̪̣̃ʷʰ]*ː?" self.string)]<line_sep>self.syllables=[]<block_end><def_stmt>_refresh self<block_start>""" Assigns left and right contexts for every phone """<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>n<ne>0<block_start>p.left=self.phones[n-1]<block_end><else_stmt><block_start>p.left=Phone("#")<block_end><if_stmt>n<ne>len(self.phones)-1<block_start>p.right=self.phones[n+1]<block_end><else_stmt><block_start>p.right=Phone("#")<block_end><block_end><block_end><def_stmt>_j_maker self<block_start>""" Assume word-initial or intervocalic i to be j """<line_sep>out_phones=self.phones<line_sep>target=Phone("j")<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>p.ipa<eq>"ɪ"<and>((p.left.bound<and>p.right.vow)<or>(p.left.vow<and>p.right.vow))<block_start>out_phones[n]=target<block_end><block_end>self.phones=out_phones<line_sep>self._refresh()<block_end><def_stmt>_w_maker self<block_start>""" Assume word-initial or intervocalic u to be w """<line_sep>out_phones=self.phones<line_sep>target=Phone("w")<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>((p.ipa<eq>"ʊ")<or>(p.ipa<eq>"u"))<and>((p.left.bound<and>(p.right.vow<or>p.right.ipa<eq>"j"))<or>(p.left.vow<and>p.right.vow))<block_start>out_phones[n]=target<block_end><block_end>self.phones=out_phones<line_sep>self._refresh()<block_end><def_stmt>_wj_block self<block_start>""" Addendum to correct possible 'wj' sequences """<line_sep>out_phones=self.phones<line_sep>target=Phone("ɪ")<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>p.left.ipa<eq>"w"<and>p.ipa<eq>"j"<block_start>out_phones[n]=target<block_end><block_end>self.phones=out_phones<line_sep>self._refresh()<block_end><def_stmt>_uj_diph_maker self<block_start>""" Find accidental "ʊɪ" instances and treat as diphthong [uj]. """<line_sep>out_phones=self.phones<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>p.left.ipa<eq>"ʊ"<and>p.ipa<eq>"ɪ"<block_start>out_phones[n-1]=Phone("u")<line_sep>out_phones[n]=Phone("j")<block_end><block_end>self.phones=out_phones<line_sep>self._refresh()<block_end><def_stmt>_b_devoice self<block_start>""" Pronounce b as p when followed by s or t. """<line_sep>out_phones=self.phones<line_sep>target=Phone("p")<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>p.ipa<eq>"b"<and>(p.right.ipa<eq>"s"<or>p.right.ipa<eq>"t̪")<block_start>out_phones[n]=target<block_end><block_end>self.phones=out_phones<line_sep>self._refresh()<block_end><def_stmt>_final_m_drop self<block_start>""" Final m nasalizes and lengthens nucleus and drops. """<line_sep>out_phones=self.phones<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>p.left.vow<and>p.ipa<eq>"m"<and>p.right.bound<block_start>out_phones[n-1]=Phone(p.left.ipa+"̃ː")<del_stmt>out_phones[n]<block_end><block_end>self.phones=out_phones<line_sep>self._refresh()<block_end><def_stmt>_n_place_assimilation self<block_start>""" Pronounce n as ŋ when followed by velar. """<line_sep>out_phones=self.phones<line_sep>target=Phone("ŋ")<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>p.ipa<eq>"n̪"<and>p.right.vel<block_start>out_phones[n]=target<block_end><block_end>self.phones=out_phones<line_sep>self._refresh()<block_end><def_stmt>_g_n_nasality_assimilation self<block_start>""" Pronounce g as ŋ when followed by n. """<line_sep>out_phones=self.phones<line_sep>target=Phone("ŋ")<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>p.ipa<eq>"g"<and>p.right.ipa<eq>"n̪"<block_start>out_phones[n]=target<block_end><block_end>self.phones=out_phones<line_sep>self._refresh()<block_end><def_stmt>_ns_nf_lengthening self<block_start>""" Lengthen vowel before ns or nf. """<line_sep>out_phones=self.phones<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>(p.left.vow<and>"ː"<not><in>p.left.ipa<and>p.ipa<eq>"n̪"<and>(p.right.ipa<eq>"s"<or>p.right.ipa<eq>"f"))<block_start>out_phones[n-1]=Phone(p.left.ipa+"ː")<block_end><block_end>self.phones=out_phones<line_sep>self._refresh()<block_end><def_stmt>_l_darken self<block_start>""" Pronounce l as ɫ in coda. """<line_sep>out_phones=self.phones<line_sep>target=Phone("ɫ")<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>p.ipa<eq>"l"<and>((<not>p.right.vow)<or>p.right.bound)<block_start>out_phones[n]=target<block_end><block_end>self.phones=out_phones<line_sep>self._refresh()<block_end><def_stmt>_j_z_doubling self<block_start>""" Double j and z between vowels. """<line_sep>out_phones=self.phones<line_sep>dupl=[]<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>p.right.vow<and>(p.ipa<eq>"j"<or>p.ipa<eq>"z")<and>p.left.vow<block_start>dupl.append((<true> n-len(self.phones) p.ipa))<block_end><else_stmt><block_start>dupl.append((<false> n-len(self.phones) <none>))<block_end><block_end><for_stmt>t sorted(dupl key=<lambda>tup:tup[1])<block_start><if_stmt>t[0]<block_start>out_phones.insert(t[1] Phone(t[2]))<block_end><block_end>self.phones=out_phones<line_sep>self._refresh()<block_end><def_stmt>_long_vowel_catcher self<block_start>""" Replace ɪː with iː, ʊː with uː, and ɛː with eː. """<line_sep>out_phones=self.phones<line_sep>target_dict={"ɪː":"iː" "ʊː":"uː" "ɛː":"eː" "ɪ̃ː":"ĩː" "ʊ̃ː":"ũː" "ɛ̃ː":"ẽː" }<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>p.ipa<in>target_dict.keys()<block_start>out_phones[n]=Phone(target_dict[p.ipa])<block_end><block_end>self.phones=out_phones<line_sep>self._refresh()<block_end><def_stmt>_e_i_closer_before_vowel self<block_start>""" e and i become closer (̣) when followed by a vowel. """<line_sep>out_phones=self.phones<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>(p.ipa<eq>"ɛ"<or>p.ipa<eq>"ɪ")<and>p.right.vow<block_start>out_phones[n]=Phone(p.ipa+"̣")<block_end><block_end>self.phones=out_phones<line_sep>self._refresh()<block_end><def_stmt>_intervocalic_j self<block_start>""" epenthesize j between vowels """<line_sep>out_phones=self.phones<line_sep>target=Phone("j")<line_sep>j=[]<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>p.left.vow<and>p.vow<block_start>j.append((<true> n-len(self.phones)))<block_end><else_stmt><block_start>j.append((<false> n-len(self.phones)))<block_end><block_end><for_stmt>t sorted(j key=<lambda>tup:tup[1])<block_start><if_stmt>t[0]<block_start>out_phones.insert(t[1] target)<block_end><block_end>self.phones=out_phones<line_sep>self._refresh()<block_end># list of all possible alternations ALTERNATIONS=[("j_maker" _j_maker) ("w_maker" _w_maker) ("wj_block" _wj_block) ("uj_diph_maker" _uj_diph_maker) ("b_devoice" _b_devoice) ("final_m_drop" _final_m_drop) ("n_place_assimilation" _n_place_assimilation) ("g_n_nasality_assimilation" _g_n_nasality_assimilation) ("ns_nf_lengthening" _ns_nf_lengthening) ("l_darken" _l_darken) ("j_z_doubling" _j_z_doubling) ("long_vowel_catcher" _long_vowel_catcher) ("e_i_closer_before_vowel" _e_i_closer_before_vowel) ("intervocalic_j" _intervocalic_j) ]<def_stmt>_alternate self<block_start>""" After setting left and right contexts for every phone... """<line_sep>self._refresh()<line_sep># runs all alternations <for_stmt>a Word.ALTERNATIONS<block_start><if_stmt>a[0]<in>self.alts<block_start>a[1](self)<block_end><block_end><block_end><def_stmt>syllabify self<arrow>List[List[Phone]]<block_start>""" Takes Word input and returns a list of syllables as (onset, nucleus, coda) tuples where onset, nucleus, and coda are all lists of Phones. :return: list of syllables """<line_sep>nuclei=[]<for_stmt>n range(len(self.phones))<block_start>p=self.phones[n]<if_stmt>p.vow<block_start>nuclei.append(n)<block_end><block_end># initialize syllables with a tuple for the first syllable # where onset is everything before the first nucleus # and coda remains unknown. syllables=[[self.phones[0:nuclei[0]] [self.phones[nuclei[0]]] []]]<line_sep># continue for every nucleus, assuming that everything between # the previous nucleus and it is the onset. <for_stmt>x range(len(nuclei)-1)<block_start>i=nuclei[x+1]<line_sep>onset=self.phones[nuclei[x]+1:i]<line_sep>nucleus=[self.phones[i]]<line_sep>syllables.append([onset nucleus []])<block_end># assume that everything after the final nucleus is final coda. syllables[-1][2]=self.phones[nuclei[-1]+1:]<line_sep># now go through and check onset viability <for_stmt>x range(len(syllables)-1)<block_start>onset=syllables[x+1][0]<line_sep>nucleus=syllables[x+1][1]<line_sep>coda=syllables[x+1][2]<line_sep># trim all onsets greater than the maximum 2 phones # removing extra phones from the left # and appending them to the previous coda <if_stmt>len(onset)<g>2<block_start>trim=onset[:-2]<del_stmt>onset[:-2]<line_sep>syllables[x][2]=trim<block_end># once onset is 2 phones... <if_stmt>len(onset)<eq>2# stop + liquid is the only viable sequence and passes <block_start><if_stmt>((<not>onset[0].cont)<and>(<not>onset[0].app)<and>(onset[1].nas<or>onset[1].app))<block_start><break><block_end># otherwise, onset must be right Phone only # the left phone is appended to the previous coda <else_stmt><block_start>trim=onset[0]<del_stmt>onset[0]<line_sep>syllables[x][2]<augadd>[trim]<block_end><block_end><block_end>self.syllables=syllables<line_sep><return>syllables<block_end><def_stmt>_print_ipa self syllabify accentuate<block_start>""" Depending on the syllabify and accentuate parameters Prints an appropriately marked up version of the transcription :param syllabify: :param accentuate: :return: """<line_sep>out=""<if_stmt>syllabify<block_start>syllables=self.syllabify()<line_sep># the ultima is the final syllable ultima=syllables[-1]<line_sep># identify which syllable has stress and store index as accent <if_stmt>accentuate# one syllable words have ultimate stress <block_start><if_stmt>len(syllables)<eq>1<block_start>accent=-1<block_end># two syllable words have penultimate stress <elif_stmt>len(syllables)<eq>2<block_start>accent=-2<block_end><else_stmt># penult is second to last syllable <block_start>penult=syllables[-2]<line_sep># if penult is diphthong (long), penultimate stress <if_stmt>len(penult[1])<g>1<block_start>accent=-2<block_end># if penult is long vowel, penultimate stress <elif_stmt>"ː"<in>penult[1][0].ipa<block_start>accent=-2<block_end># if penult has coda (closed/long by position), # penultimate stress <elif_stmt>len(penult[2])<g>0<block_start>accent=-2<block_end># otherwise (penult is short) antepenultimate stress <else_stmt><block_start>accent=-3<block_end><block_end># loop over syllables by index <for_stmt>x range(len(syllables))<block_start>s=syllables[x]<line_sep># if index matches accent index set above <if_stmt>x-len(syllables)<eq>accent# precede that syllable with # IPA stress punctuation: ' <block_start>out<augadd>"'"<block_end># then, print IPA by syllable segment as usual <for_stmt>n s<block_start><for_stmt>p n<block_start>out<augadd>p.ipa<block_end><block_end># seperate all syllables with IPA syllable punctuation: . <if_stmt>s<ne>ultima<block_start>out<augadd>"."<block_end><block_end><block_end># if no accentuation flag, proceed with syllabified printing <else_stmt><block_start><for_stmt>s syllables<block_start><for_stmt>n s<block_start><for_stmt>p n<block_start>out<augadd>p.ipa<block_end><block_end># seperate all syllables with IPA syllable punctuation: . <if_stmt>s<ne>ultima<block_start>out<augadd>"."<block_end><block_end><block_end><block_end># if no syllabification flag, proceed with # unsyllabified IPA printing <else_stmt><block_start><for_stmt>p self.phones<block_start>out<augadd>p.ipa<block_end><block_end><return>out<block_end><block_end><class_stmt>Transcriber<block_start>"""Uses a reconstruction to transcribe a orthographic string into IPA."""<def_stmt>__init__ self dialect:str reconstruction:str<block_start>""" :param dialect: Latin dialect :param reconstruction: reconstruction method """<line_sep>self.lect=dialect<line_sep>self.recon=reconstruction<line_sep>self.root=LATIN[self.lect][self.recon]<line_sep>self.table=self.root["correspondence"]<line_sep>self.diphs=self.root["diphthongs"]<line_sep>self.punc=self.root["punctuation"]<line_sep>self.macronizer=m.Macronizer("tag_ngram_123_backoff")<block_end><def_stmt>_parse_diacritics self ch:str<arrow>str<block_start>""" EG: input with base a -> a/LENGTH/DIAERESIS/ :param ch: character :return: a string with separated and organized diacritics for easier access later. """<line_sep>out=chars.base(ch).lower()# Initialize out as base of character. length=chars.length(ch)<line_sep>dia=chars.diaeresis(ch)<line_sep>out<augadd>"/"# Create 1st boundary # If any length, place between 1st and 2nd boundary <if_stmt>length<block_start>out<augadd>length<block_end>out<augadd>"/"# Create 2nd boundary <if_stmt>dia# If any diaeresis, <block_start>out<augadd>dia<block_end># place between second and final boundary out<augadd>"/"# Create final boundary <return>out<block_end><def_stmt>_prep_text self text:str<block_start>""" Performs preparatory tasks grouping and reordering characters in order to make transcription formulaic. :param text: :return: """<line_sep>string_in="".join([self._parse_diacritics(ch)<for>ch text])<line_sep># searches for diphthongs and treats them as one phone <for_stmt>d self.diphs<block_start>d1=d[0]<line_sep>d2=d[1]<line_sep>pattern=r"("+d1+r")\/\/\/("+d2+r")(\/\/\/)"<line_sep>string_in=re.sub(pattern r"\1\2\3" string_in)<block_end>tup_out=re.findall(r"(..?)\/([̄̆]*)\/(¨?)\/" string_in)<line_sep><return>tup_out<block_end><def_stmt>transcribe self text macronize=<true> syllabify=<true> accentuate=<true> with_squared_brackets=<true> <block_start>""" >>> allen_transcriber = Transcriber("Classical", "Allen") >>> example = allen_transcriber.transcribe("Quo usque tandem, O Catilina, " + "abutere nostra patientia?") >>> example "['kʷoː 'ʊs.kʷɛ 't̪an̪.d̪ẽː 'oː ka.t̪ɪ.'liː.n̪aː a.buː.'t̪eː.rɛ 'n̪ɔs.t̪raː pa.t̪ɪ̣.'jɛn̪.t̪ɪ̣.ja]" :param text: text to transcribe :param macronize: if True, macronize result :param syllabify: if True, syllabify result :param accentuate: if True, accentuate result :param with_squared_brackets: if True, put squared brackets around transcription :return: transcribed text """<line_sep># if macronize, will first use the tagger to macronize input # otherwise, input will be the raw input string <if_stmt>macronize<block_start>text=self.macronizer.macronize_text(text)<block_end># input is word-tokenized, stripped of non-diacritic punctuation, # and diphthongs and diacritics are handled inp=[self._prep_text(w)<for>w wordpunct_tokenize(text)<if>w<not><in>self.punc]<line_sep>words=[]<for_stmt>w inp<block_start>out=""<for_stmt>c w<block_start><if_stmt>"̄"<in>c[1]<block_start>macron_added=c[0]+"̄"<line_sep>ipa=self.table.get(macron_added macron_added)<block_end><else_stmt><block_start>ipa=self.table.get(c[0] c[0])<block_end>out<augadd>ipa<block_end>transcription=Word(out self.root)<line_sep>transcription._alternate()<line_sep>words.append(transcription)<block_end># Encloses output in brackets, proper notation for surface form. result=" ".join([w._print_ipa(syllabify accentuate)<for>w words])<if_stmt>with_squared_brackets<block_start>result="["+result+"]"<block_end><return>result<block_end><block_end>
# Copyright Pincer 2021-Present # Full MIT License can be found in `LICENSE` at the project root. <import_from_stmt>pincer.core.dispatch GatewayDispatch<class_stmt>TestDispatch<block_start>op=123<line_sep>data={"foo":"bar" "bar":"foo"}<line_sep>seq=456<line_sep>event_name="test_event"<line_sep>dispatch_string=('{"op": 123, "d": {"foo": "bar", "bar": "foo"}, '<concat>'"s": 456, "t": "test_event"}')<line_sep>dispatch=GatewayDispatch(op data seq event_name)<def_stmt>test_string_fmt self<block_start>""" Tests whether or not the dispatch class its string conversion is correct. """<assert_stmt>str(self.dispatch)<eq>self.dispatch_string<block_end><def_stmt>test_from_string self<block_start>""" Tests whether or not the from_string function is properly parsing the string and creating a GatewayDispatch instance. """<assert_stmt>(str(GatewayDispatch.from_string(self.dispatch_string))<eq>self.dispatch_string)<block_end><block_end>
<import_from_stmt>pydantic BaseModel<class_stmt>Configuration(BaseModel)<block_start>pattern:str<line_sep>text:str<line_sep>group_prefix:str="Group"<block_end>
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) <import_stmt>copy<import_stmt>pytest<import_from_stmt>requests.exceptions ConnectionError<import_from_stmt>datadog_checks.gitlab_runner GitlabRunnerCheck<import_from_stmt>.common BAD_CONFIG CONFIG CUSTOM_TAGS GITLAB_RUNNER_VERSION HOST assert_check<line_sep>pytestmark=[pytest.mark.usefixtures('dd_environment') pytest.mark.integration]<def_stmt>test_check aggregator dd_run_check<block_start>instance=CONFIG['instances'][0]<line_sep>init_config=copy.deepcopy(CONFIG['init_config'])<line_sep>gitlab_runner=GitlabRunnerCheck('gitlab_runner' init_config instances=[instance])<line_sep>dd_run_check(gitlab_runner)<line_sep>dd_run_check(gitlab_runner)<line_sep>assert_check(aggregator)<line_sep>aggregator.assert_all_metrics_covered()<block_end><def_stmt>test_connection_failure aggregator<block_start>""" Make sure we're failing when the URL isn't right """<line_sep>gitlab_runner=GitlabRunnerCheck('gitlab' BAD_CONFIG['init_config'] instances=BAD_CONFIG['instances'])<with_stmt>pytest.raises(ConnectionError)<block_start>gitlab_runner.check(BAD_CONFIG['instances'][0])<block_end># We should get two failed service checks aggregator.assert_service_check(GitlabRunnerCheck.MASTER_SERVICE_CHECK_NAME status=GitlabRunnerCheck.CRITICAL tags=['gitlab_host:{}'.format(HOST) 'gitlab_port:1234']+CUSTOM_TAGS count=1 )<line_sep>aggregator.assert_service_check(GitlabRunnerCheck.PROMETHEUS_SERVICE_CHECK_NAME status=GitlabRunnerCheck.CRITICAL tags=CUSTOM_TAGS count=1)<block_end><def_stmt>test_version_metadata aggregator datadog_agent dd_run_check<block_start>check_instance=GitlabRunnerCheck('gitlab_runner' CONFIG['init_config'] instances=[CONFIG['instances'][0]])<line_sep>check_instance.check_id='test:123'<line_sep>dd_run_check(check_instance)<line_sep>raw_version=GITLAB_RUNNER_VERSION<line_sep>major,minor,patch=raw_version.split('.')<line_sep>version_metadata={'version.scheme':'semver' 'version.major':major 'version.minor':minor 'version.patch':patch 'version.raw':raw_version }<line_sep>datadog_agent.assert_metadata('test:123' version_metadata)<block_end>
"""urllib2 style build opener integrates with HTTPSConnection class from this package. """<line_sep>__author__="<NAME>"<line_sep>__date__="21/12/10"<line_sep>__copyright__="(C) 2011 Science and Technology Facilities Council"<line_sep>__license__="BSD - see LICENSE file in top-level directory"<line_sep>__contact__="<EMAIL>"<line_sep>__revision__='$Id$'<import_stmt>logging<import_stmt>sys<line_sep># Py 2 <=> 3 compatibility for class type checking <if_stmt>sys.version_info[0]<g>2<block_start>class_type_=type<import_from_stmt>urllib.request ProxyHandler UnknownHandler HTTPDefaultErrorHandler FTPHandler FileHandler HTTPErrorProcessor HTTPHandler OpenerDirector HTTPRedirectHandler <block_end><else_stmt><block_start><import_stmt>types<line_sep>class_type_=types.ClassType<import_from_stmt>urllib2 ProxyHandler UnknownHandler HTTPDefaultErrorHandler FTPHandler FileHandler HTTPErrorProcessor HTTPHandler OpenerDirector HTTPRedirectHandler <block_end><import_from_stmt>ndg.httpsclient.https HTTPSContextHandler<line_sep>log=logging.getLogger(__name__)<line_sep># Copied from urllib2 with modifications for ssl <def_stmt>build_opener *handlers **kw<block_start>"""Create an opener object from a list of handlers. The opener will use several default handlers, including support for HTTP and FTP. If any of the handlers passed as arguments are subclasses of the default handlers, the default handlers will not be used. """<def_stmt>isclass obj<block_start><return>isinstance(obj class_type_)<or>hasattr(obj "__bases__")<block_end>opener=OpenerDirector()<line_sep>default_classes=[ProxyHandler UnknownHandler HTTPHandler HTTPDefaultErrorHandler HTTPRedirectHandler FTPHandler FileHandler HTTPErrorProcessor]<line_sep>check_classes=list(default_classes)<line_sep>check_classes.append(HTTPSContextHandler)<line_sep>skip=[]<for_stmt>klass check_classes<block_start><for_stmt>check handlers<block_start><if_stmt>isclass(check)<block_start><if_stmt>issubclass(check klass)<block_start>skip.append(klass)<block_end><block_end><elif_stmt>isinstance(check klass)<block_start>skip.append(klass)<block_end><block_end><block_end><for_stmt>klass default_classes<block_start><if_stmt>klass<not><in>skip<block_start>opener.add_handler(klass())<block_end><block_end># Pick up SSL context from keyword settings ssl_context=kw.get('ssl_context')<line_sep># Add the HTTPS handler with ssl_context <if_stmt>HTTPSContextHandler<not><in>skip<block_start>opener.add_handler(HTTPSContextHandler(ssl_context))<block_end><for_stmt>h handlers<block_start><if_stmt>isclass(h)<block_start>h=h()<block_end>opener.add_handler(h)<block_end><return>opener<block_end>
# # MLDB-927-null-row-output.py # mldb.ai inc, 2015 # This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved. # <import_stmt>json<import_stmt>datetime<import_stmt>difflib<import_from_stmt>mldb mldb<line_sep>dataset_index=1<def_stmt>run_transform when format<block_start><global>dataset_index<line_sep>dataset_index<augadd>1<line_sep>result=mldb.put("/v1/procedures/when_procedure" {"type":"transform" "params":{"inputData":"select * from dataset1 when "+when "outputDataset":{"id":"dataset_out_"+str(dataset_index) "type":"sparse.mutable"}}})<line_sep>mldb.post("/v1/procedures/when_procedure/runs")<line_sep>result=mldb.get('/v1/query' q="SELECT * FROM dataset_out_"+str(dataset_index)+" ORDER BY rowHash()" format=format)<line_sep>rows=result.json()<line_sep><return>rows<block_end><def_stmt>load_test_dataset <block_start>ds1=mldb.create_dataset({'type':'sparse.mutable' 'id':'dataset1'})<line_sep>ds1.record_row('user1' [['x' 1 same_time_tomorrow] ['y' 2 same_time_tomorrow]])<line_sep>ds1.record_row('user2' [['x' 3 now] ['y' 4 now]])<line_sep>ds1.commit()<block_end><def_stmt>compare_json json1 json2 format<block_start><if_stmt>json1<ne>json2<block_start>mldb.log("output format differ:\n")<for_stmt>line difflib.ndiff(json1.splitlines() json2.splitlines())<block_start>mldb.log(line)<block_end><assert_stmt>json1<eq>json2 "difference in the way null values are outputted in format %s"%format<block_end><block_end>now=datetime.datetime.now()<line_sep>later=now+datetime.timedelta(seconds=1)<line_sep>same_time_tomorrow=now+datetime.timedelta(days=1)<line_sep>load_test_dataset()<line_sep>formats=['full' 'sparse' 'soa' 'aos' 'table']<for_stmt>format formats<block_start>result=mldb.get('/v1/query' q="SELECT * FROM dataset1 WHEN value_timestamp() > '%s' ORDER BY rowHash()"%later format=format)<line_sep>rows1=json.dumps(result.json() indent=4 sort_keys=<true>)<line_sep>result=mldb.get('/v1/query' q="SELECT * from dataset1 WHEN value_timestamp() > '%s' ORDER BY rowHash()"%later format=format)<line_sep>rows2=json.dumps(result.json() indent=4 sort_keys=<true>)<line_sep>response=run_transform("value_timestamp() > '%s'"%later format)<line_sep>rows3=json.dumps(response indent=4 sort_keys=<true> default=str)<line_sep>compare_json(rows1 rows2 format)<line_sep>compare_json(rows2 rows3 format)<block_end>request.set_return('success')<line_sep>
# Lint as: python3 # # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests of the synthesis service: client and dummy server."""<import_stmt>subprocess<import_stmt>portpicker<import_from_stmt>google.protobuf text_format<import_from_stmt>absl.testing absltest<import_from_stmt>xls.common runfiles<import_from_stmt>xls.synthesis synthesis_pb2<line_sep>CLIENT_PATH=runfiles.get_path('xls/synthesis/synthesis_client_main')<line_sep>SERVER_PATH=runfiles.get_path('xls/synthesis/yosys/yosys_server_main')<line_sep>YOSYS_PATH=runfiles.get_path('xls/synthesis/yosys/bogusys')<line_sep>NEXTPNR_PATH=runfiles.get_path('xls/synthesis/yosys/nextpbr')<line_sep>VERILOG=""" module main( input wire [31:0] x, input wire [31:0] y, output wire [31:0] out ); assign out = x + y; endmodule """<class_stmt>SynthesisServerTest(absltest.TestCase)<block_start><def_stmt>_start_server self<block_start>port=portpicker.pick_unused_port()<line_sep>proc=subprocess.Popen([runfiles.get_path(SERVER_PATH) f'--port={port}' f'--yosys_path={YOSYS_PATH}' f'--nextpnr_path={NEXTPNR_PATH}' '--synthesis_target=ecp5' ])<line_sep><return>port proc<block_end><def_stmt>test_slack self<block_start>port,proc=self._start_server()<line_sep>verilog_file=self.create_tempfile(content=VERILOG)<line_sep>response_text=subprocess.check_output([CLIENT_PATH verilog_file.full_path f'--port={port}' '--ghz=1.0']).decode('utf-8')<line_sep>response=text_format.Parse(response_text synthesis_pb2.CompileResponse())<line_sep># The response is generated by parsing testdata/nextpnr.out. self.assertEqual(response.max_frequency_hz 180280000)<line_sep>proc.terminate()<line_sep>proc.wait()<block_end><def_stmt>test_cell_histogram self<block_start>port,proc=self._start_server()<line_sep>verilog_file=self.create_tempfile(content=VERILOG)<line_sep>response_text=subprocess.check_output([CLIENT_PATH verilog_file.full_path f'--port={port}' '--ghz=1.0']).decode('utf-8')<line_sep>response=text_format.Parse(response_text synthesis_pb2.CompileResponse())<line_sep># The response is generated by parsing bogusys stdout. self.assertLen(response.instance_count.cell_histogram 2)<line_sep>self.assertIn('CCU2C' response.instance_count.cell_histogram)<line_sep>self.assertEqual(response.instance_count.cell_histogram['CCU2C'] 32)<line_sep>self.assertIn('TRELLIS_FF' response.instance_count.cell_histogram)<line_sep>self.assertEqual(response.instance_count.cell_histogram['TRELLIS_FF'] 192)<line_sep>proc.terminate()<line_sep>proc.wait()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
<import_from_stmt>typing Optional Tuple<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>torch Tensor<import_from_stmt>torch.nn.init constant_ xavier_normal_ xavier_uniform_<import_from_stmt>torch.nn.modules.linear NonDynamicallyQuantizableLinear<import_from_stmt>torch.nn.parameter Parameter<import_from_stmt>oslo.torch.nn.modules.functional multi_head_attention_forward<class_stmt>MultiheadAttention(nn.Module)<block_start>r"""Allows the model to jointly attend to information from different representation subspaces as described in the paper: `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_. Multi-Head Attention is defined as: .. math:: \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`. Args: embed_dim: Total dimension of the model. num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``). dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout). bias: If specified, adds bias to input / output projection layers. Default: ``True``. add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``. add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1. Default: ``False``. kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``). vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``). batch_first: If ``True``, then the input and output tensors are provided as (batch, seq, feature). Default: ``False`` (seq, batch, feature). use_sequence_parallel: If ``True``, then self attention module is changed self attention ring module. Default: ``False`` Examples:: >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) >>> attn_output, attn_output_weights = multihead_attn(query, key, value) """<line_sep>__constants__=["batch_first"]<line_sep>bias_k:Optional[torch.Tensor]<line_sep>bias_v:Optional[torch.Tensor]<def_stmt>__init__ self embed_dim num_heads dropout=0.0 bias=<true> add_bias_kv=<false> add_zero_attn=<false> kdim=<none> vdim=<none> batch_first=<false> device=<none> dtype=<none> use_sequence_parallel=<false> parallel_context=<none> <arrow><none><block_start>factory_kwargs={"device":device "dtype":dtype}<line_sep>super(MultiheadAttention self).__init__()<line_sep>self.embed_dim=embed_dim<line_sep>self.kdim=kdim<if>kdim<is><not><none><else>embed_dim<line_sep>self.vdim=vdim<if>vdim<is><not><none><else>embed_dim<line_sep>self._qkv_same_embed_dim=self.kdim<eq>embed_dim<and>self.vdim<eq>embed_dim<line_sep>self.num_heads=num_heads<line_sep>self.dropout=dropout<line_sep>self.batch_first=batch_first<line_sep>self.head_dim=embed_dim<floordiv>num_heads<assert_stmt>(self.head_dim<times>num_heads<eq>self.embed_dim) "embed_dim must be divisible by num_heads"<line_sep># Support sequence parallel self.sequence_parallel_support=<true><line_sep>self.use_sequence_parallel=use_sequence_parallel<line_sep>self.parallel_context=parallel_context<if_stmt>self._qkv_same_embed_dim<is><false><block_start>self.q_proj_weight=Parameter(torch.empty((embed_dim embed_dim) **factory_kwargs))<line_sep>self.k_proj_weight=Parameter(torch.empty((embed_dim self.kdim) **factory_kwargs))<line_sep>self.v_proj_weight=Parameter(torch.empty((embed_dim self.vdim) **factory_kwargs))<line_sep>self.register_parameter("in_proj_weight" <none>)<block_end><else_stmt><block_start>self.in_proj_weight=Parameter(torch.empty((3<times>embed_dim embed_dim) **factory_kwargs))<line_sep>self.register_parameter("q_proj_weight" <none>)<line_sep>self.register_parameter("k_proj_weight" <none>)<line_sep>self.register_parameter("v_proj_weight" <none>)<block_end><if_stmt>bias<block_start>self.in_proj_bias=Parameter(torch.empty(3<times>embed_dim **factory_kwargs))<block_end><else_stmt><block_start>self.register_parameter("in_proj_bias" <none>)<block_end>self.out_proj=NonDynamicallyQuantizableLinear(embed_dim embed_dim bias=bias **factory_kwargs)<if_stmt>add_bias_kv<block_start>self.bias_k=Parameter(torch.empty((1 1 embed_dim) **factory_kwargs))<line_sep>self.bias_v=Parameter(torch.empty((1 1 embed_dim) **factory_kwargs))<block_end><else_stmt><block_start>self.bias_k=self.bias_v=<none><block_end>self.add_zero_attn=add_zero_attn<line_sep>self._reset_parameters()<block_end><def_stmt>_reset_parameters self<block_start><if_stmt>self._qkv_same_embed_dim<block_start>xavier_uniform_(self.in_proj_weight)<block_end><else_stmt><block_start>xavier_uniform_(self.q_proj_weight)<line_sep>xavier_uniform_(self.k_proj_weight)<line_sep>xavier_uniform_(self.v_proj_weight)<block_end><if_stmt>self.in_proj_bias<is><not><none><block_start>constant_(self.in_proj_bias 0.0)<line_sep>constant_(self.out_proj.bias 0.0)<block_end><if_stmt>self.bias_k<is><not><none><block_start>xavier_normal_(self.bias_k)<block_end><if_stmt>self.bias_v<is><not><none><block_start>xavier_normal_(self.bias_v)<block_end><block_end><def_stmt>__setstate__ self state# Support loading old MultiheadAttention checkpoints generated by v1.1.0 <block_start><if_stmt>"_qkv_same_embed_dim"<not><in>state<block_start>state["_qkv_same_embed_dim"]=<true><block_end>super(MultiheadAttention self).__setstate__(state)<block_end><def_stmt>forward self query:Tensor key:Tensor value:Tensor key_padding_mask:Optional[Tensor]=<none> need_weights:bool=<true> attn_mask:Optional[Tensor]=<none> average_attn_weights:bool=<true> <arrow>Tuple[Tensor Optional[Tensor]]<block_start>r""" Args: query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False`` or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``. Queries are compared against key-value pairs to produce the output. See "Attention Is All You Need" for more details. key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False`` or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length, :math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``. See "Attention Is All You Need" for more details. value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when ``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``. See "Attention Is All You Need" for more details. key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key`` to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`. Binary and byte masks are supported. For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for the purpose of attention. For a byte mask, a non-zero value indicates that the corresponding ``key`` value will be ignored. need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``. Default: ``True``. attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape :math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch. Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the corresponding position is not allowed to attend. For a float mask, the mask values will be added to the attention weight. average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads) Outputs: - **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched, :math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``, where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the embedding dimension ``embed_dim``. - **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``, returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or :math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and :math:`S` is the source sequence length. If ``average_weights=False``, returns attention weights per head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`. .. note:: `batch_first` argument is ignored for unbatched inputs. """<line_sep>is_batched=query.dim()<eq>3<if_stmt>self.batch_first<and>is_batched# make sure that the transpose op does not affect the "is" property <block_start><if_stmt>key<is>value<block_start><if_stmt>query<is>key<block_start>query=key=value=query.transpose(1 0)<block_end><else_stmt><block_start>query,key=[x.transpose(1 0)<for>x (query key)]<line_sep>value=key<block_end><block_end><else_stmt><block_start>query,key,value=[x.transpose(1 0)<for>x (query key value)]<block_end># TODO: When batch first is True, RSA <block_end><if_stmt><not>self._qkv_same_embed_dim# TODO: When dimension of qkv is not same, RSA <block_start>attn_output,attn_output_weights=multi_head_attention_forward(query key value self.embed_dim self.num_heads self.in_proj_weight self.in_proj_bias self.bias_k self.bias_v self.add_zero_attn self.dropout self.out_proj.weight self.out_proj.bias training=self.training key_padding_mask=key_padding_mask need_weights=need_weights attn_mask=attn_mask use_separate_proj_weight=<true> q_proj_weight=self.q_proj_weight k_proj_weight=self.k_proj_weight v_proj_weight=self.v_proj_weight average_attn_weights=average_attn_weights use_sequence_parallel=self.use_sequence_parallel parallel_context=self.parallel_context )<block_end><else_stmt><block_start>attn_output,attn_output_weights=multi_head_attention_forward(query key value self.embed_dim self.num_heads self.in_proj_weight self.in_proj_bias self.bias_k self.bias_v self.add_zero_attn self.dropout self.out_proj.weight self.out_proj.bias training=self.training key_padding_mask=key_padding_mask need_weights=need_weights attn_mask=attn_mask average_attn_weights=average_attn_weights use_sequence_parallel=self.use_sequence_parallel parallel_context=self.parallel_context )<block_end><if_stmt>self.batch_first<and>is_batched# TODO: When batch first is True, RSA attention output <block_start><return>attn_output.transpose(1 0) attn_output_weights<block_end><else_stmt><block_start><return>attn_output attn_output_weights<block_end><block_end><block_end>
OEMBED_URL_SCHEME_REGEXPS={'slideshare':r'https?://(?:www\.)?slideshare\.(?:com|net)/.*' 'soundcloud':r'https?://soundcloud.com/.*' 'vimeo':r'https?://(?:www\.)?vimeo\.com/.*' 'youtube':r'https?://(?:(www\.)?youtube\.com|youtu\.be)/.*' }<line_sep>OEMBED_BASE_URLS={'slideshare':'https://www.slideshare.net/api/oembed/2?url=%(url)s' 'soundcloud':'https://soundcloud.com/oembed?url=%(url)s&format=json' 'vimeo':'https://vimeo.com/api/oembed.json?url=%(url)s&maxwidth=400&maxheight=350' 'youtube':'https://www.youtube.com/oembed?url=%(url)s&format=json' }<line_sep>
<import_stmt>logging<def_stmt>get_logger output_dir log_name<block_start>"""Creates a log file and returns an object to interface with it. """<line_sep>logger=logging.getLogger(log_name)<line_sep>logger.setLevel(logging.DEBUG)<line_sep># create file handler which logs even debug messages fh=logging.FileHandler(output_dir+log_name+'.log')<line_sep>fh.setLevel(logging.DEBUG)<line_sep># create console handler with a higher log level ch=logging.StreamHandler()<line_sep>ch.setLevel(logging.ERROR)<line_sep># create formatter and add it to the handlers formatter=logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')<line_sep>fh.setFormatter(formatter)<line_sep>ch.setFormatter(formatter)<line_sep># add the handlers to the logger logger.addHandler(fh)<line_sep>logger.addHandler(ch)<line_sep><return>logger<block_end><def_stmt>init _fresh_run _fill_cache _output_dir _cache_dir log_name<block_start>"""Initializes global variables that are readable from importing modules. """<line_sep><global>fresh_run fill_cache output_dir cache_dir<line_sep>fresh_run=_fresh_run<line_sep>fill_cache=_fill_cache<line_sep>output_dir=_output_dir<line_sep>cache_dir=_cache_dir<line_sep><return>get_logger(output_dir log_name)<block_end>
<import_from_future_stmt> absolute_import division print_function<import_from_stmt>libtbx.test_utils approx_equal<import_from_stmt>cctbx uctbx<import_from_stmt>cctbx.array_family flex<import_from_stmt>six.moves range<try_stmt><block_start><import_from_stmt>six.moves cPickle<as>pickle<block_end><except_stmt>ImportError<block_start><import_stmt>pickle<block_end><def_stmt>exercise_flex_miller_index <block_start><import_from_stmt>scitbx.array_family.flex exercise_triple<line_sep>exercise_triple(flex_triple=flex.miller_index flex_order=flex.order)<line_sep>a=flex.miller_index([(1 2 3) (2 3 4)])<assert_stmt>approx_equal(a.as_vec3_double() [(1 2 3) (2 3 4)])<line_sep>h,k,l=[flex.int((0 1 2 3)) flex.int((1 2 3 4)) flex.int((2 3 4 5))]<line_sep>b=flex.miller_index(h k l)<assert_stmt>approx_equal(b ((0 1 2) (1 2 3) (2 3 4) (3 4 5)))<line_sep># i=flex.miller_index([(1 -2 3) (-2 3 4)])<line_sep>c=flex.complex_double([3-4j -7+8j])<line_sep>x=(0.9284 -1.2845 -0.2293)<assert_stmt>approx_equal(i.fourier_transform_real_part_at_x(fourier_coeffs=c x=x) 15.4357188164)<block_end><def_stmt>exercise_flex_hendrickson_lattman <block_start>a=flex.hendrickson_lattman()<assert_stmt>a.size()<eq>0<line_sep>a=flex.hendrickson_lattman(132)<for_stmt>x a<block_start><assert_stmt>x<eq>(0 0 0 0)<block_end>a=flex.hendrickson_lattman(((1 2 3 4) (2 3 4 5) (3 4 5 6)))<assert_stmt>a.size()<eq>3<assert_stmt>a.count((1 2 3 4))<eq>1<assert_stmt>a.count((0 0 0 0))<eq>0<assert_stmt>tuple(a)<eq>((1 2 3 4) (2 3 4 5) (3 4 5 6))<assert_stmt>tuple(a+a)<eq>((2 4 6 8) (4 6 8 10) (6 8 10 12))<line_sep>a<augadd>a<assert_stmt>tuple(a)<eq>((2 4 6 8) (4 6 8 10) (6 8 10 12))<line_sep>p=pickle.dumps(a)<line_sep>b=pickle.loads(p)<assert_stmt>tuple(a)<eq>tuple(b)<line_sep>centric_flags=flex.bool([<false> <true>])<line_sep>phase_integrals=flex.complex_double([complex(0.5 -0.7) complex(-0.3 0.4)])<line_sep>a=flex.hendrickson_lattman(centric_flags=centric_flags phase_integrals=phase_integrals max_figure_of_merit=1-1.e-6)<assert_stmt>approx_equal(a [(2.2684820912654264 -3.1758749277715967 0 0) (-0.3295836866004328 0.43944491546724396 0 0)])<assert_stmt>approx_equal([a.slice(i)<for>i range(4)] [[2.2684820912654264 -0.3295836866004328] [-3.1758749277715967 0.43944491546724396] [0.0 0.0] [0.0 0.0]])<line_sep>a=flex.hendrickson_lattman(3 (1 2 3 4))<assert_stmt>a.all_eq((1 2 3 4))<assert_stmt><not>a.all_eq((1 2 0 4))<assert_stmt>approx_equal(a.conj() [(1 -2 3 -4) (1 -2 3 -4) (1 -2 3 -4)])<assert_stmt>approx_equal(a.conj().conj() a)<line_sep># a=flex.double()<line_sep>h=flex.hendrickson_lattman(a=a b=a)<assert_stmt>h.size()<eq>0<line_sep>a=flex.double([1 2 3])<line_sep>b=flex.double([-3 4 5])<line_sep>h=flex.hendrickson_lattman(a=a b=b)<assert_stmt>approx_equal(h [(1 -3 0 0) (2 4 0 0) (3 5 0 0)])<assert_stmt>approx_equal(h<eq>(1 -3 0 0) (<true> <false> <false>))<assert_stmt>approx_equal(h<ne>(1 -3 0 0) (<false> <true> <true>))<assert_stmt>approx_equal(h<ne>(0 0 0 0) (<true> <true> <true>))<assert_stmt>approx_equal(h<eq>h.deep_copy() (<true> <true> <true>))<assert_stmt>approx_equal(h<eq>flex.hendrickson_lattman(a=b b=a) (<false> <false> <false>))<assert_stmt>approx_equal(h<ne>flex.hendrickson_lattman(a=b b=a) (<true> <true> <true>))<assert_stmt>approx_equal(h<ne>flex.hendrickson_lattman(a=b b=a) (<true> <true> <true>))<assert_stmt>approx_equal(h<ne>h.deep_copy() (<false> <false> <false>))<line_sep>c=flex.double([4 5 6])<line_sep>d=flex.double([-4 7 8])<line_sep>h=flex.hendrickson_lattman(a=a b=b c=c d=d)<assert_stmt>approx_equal(h [(1 -3 4 -4) (2 4 5 7) (3 5 6 8)])<assert_stmt>approx_equal(h.as_abcd() [a b c d])<line_sep>h=h<times>2<assert_stmt>approx_equal(h [(2 -6 8 -8) (4 8 10 14) (6 10 12 16)])<block_end><def_stmt>exercise_flex_xray_scatterer <block_start><import_from_stmt>cctbx uctbx sgtbx xray<line_sep>uc=uctbx.unit_cell((10 11 12))<line_sep>sg=sgtbx.space_group_info("P 2")<line_sep>a=flex.xray_scatterer()<assert_stmt>a.size()<eq>0<line_sep>a=flex.xray_scatterer((xray.scatterer("Si1" (0.1 0.2 0.3)) xray.scatterer("O1" (0.2 0.3 0.4) (1 2 3 -0.1 0.2 -0.3) 0.9) xray.scatterer("K1" (0.3 0.4 0.5) (3 1 2 -0.2 0.3 -0.1) 0.8 fp=5 fdp=7)))<assert_stmt>a.size()<eq>3<assert_stmt>a[1].multiplicity()<eq>0<line_sep>a[1].apply_symmetry(uc sg.group())<assert_stmt>a[1].multiplicity()<eq>2<assert_stmt>approx_equal(a[1].weight() 0.9)<line_sep>a.front().occupancy=0.8<assert_stmt>approx_equal(a[0].occupancy 0.8)<line_sep>a.back().occupancy=0.7<assert_stmt>approx_equal(a[-1].occupancy 0.7)<line_sep>a[0].flags.set_grad_site(state=<true>)<line_sep>a[1].flags.set_grad_fp(state=<true>)<line_sep>a[2].flags.param=-234<line_sep>p=pickle.dumps(a)<line_sep>b=pickle.loads(p)<line_sep>a_=a.deep_copy()<assert_stmt>a_.n_grad_u_iso()<eq>a_.n_grad_u_aniso()<eq>0<line_sep>a_[0].flags.set_grad_u_iso(state=<true>)<line_sep>a_[1].flags.set_grad_u_aniso(state=<true>)<line_sep>a_[2].flags.set_grad_u_aniso(state=<true>)<assert_stmt>a_.n_grad_u_iso()<eq>1<assert_stmt>a_.n_grad_u_aniso()<eq>2<for_stmt>i,ai enumerate(a)<block_start>bi=b[i]<assert_stmt>ai.label<eq>bi.label<assert_stmt>ai.scattering_type<eq>bi.scattering_type<assert_stmt>approx_equal(ai.fp bi.fp)<assert_stmt>approx_equal(ai.fdp bi.fdp)<assert_stmt>approx_equal(ai.site bi.site)<assert_stmt>ai.flags.use_u_aniso()<eq>bi.flags.use_u_aniso()<assert_stmt>ai.u_iso<eq>bi.u_iso<assert_stmt>ai.u_star<eq>bi.u_star<assert_stmt>ai.multiplicity()<eq>bi.multiplicity()<assert_stmt>approx_equal(ai.weight() bi.weight())<assert_stmt>ai.flags.bits<eq>bi.flags.bits<assert_stmt>ai.flags.param<eq>bi.flags.param<block_end><assert_stmt>b[0].flags.grad_site()<assert_stmt><not>b[0].flags.grad_fp()<assert_stmt><not>b[1].flags.grad_site()<assert_stmt>b[1].flags.grad_fp()<assert_stmt>b[2].flags.param<eq>-234<assert_stmt>list(a.extract_labels())<eq>["Si1" "O1" "K1"]<assert_stmt>list(a.extract_scattering_types())<eq>["Si" "O" "K"]<assert_stmt>approx_equal(a.extract_sites() ((0.1 0.2 0.3) (0.2 0.3 0.4) (0.3 0.4 0.5)))<line_sep>a.set_sites(sites=flex.vec3_double(((-0.1 -0.2 -0.3) (-0.2 -0.3 -0.4) (-0.3 -0.4 -0.5))))<assert_stmt>approx_equal(a.extract_sites() ((-0.1 -0.2 -0.3) (-0.2 -0.3 -0.4) (-0.3 -0.4 -0.5)))<assert_stmt>approx_equal(a[1].site (-0.2 -0.3 -0.4))<assert_stmt>approx_equal(a.extract_occupancies() (0.8 0.9 0.7))<assert_stmt>approx_equal(a.extract_fps() (0.0 0.0 5.0))<assert_stmt>approx_equal(a.extract_fdps() (0.0 0.0 7.0))<line_sep>a.set_occupancies(occupancies=flex.double((0.1 0.2 0.3)))<line_sep>a.set_fps(fps=flex.double((0.0 0.0 1.0)))<line_sep>a.set_fdps(fdps=flex.double((0.0 0.0 2.0)))<assert_stmt>approx_equal(a.extract_occupancies() (0.1 0.2 0.3))<assert_stmt>approx_equal(a.extract_fps() (0.0 0.0 1.0))<assert_stmt>approx_equal(a.extract_fdps() (0.0 0.0 2.0))<assert_stmt>approx_equal(a[1].occupancy 0.2)<assert_stmt>approx_equal(a[2].fp 1.0)<assert_stmt>approx_equal(a[2].fdp 2.0)<assert_stmt>approx_equal(a.extract_u_iso() (0.0 -1.0 -1.0))<assert_stmt>approx_equal(a.extract_u_iso_or_u_equiv(unit_cell=uc) (0.0 258 236+1/3.))<line_sep>a.set_u_iso(u_iso=flex.double((3 4 5)) selection=flex.bool(a.size() <true>) unit_cell=uc)<assert_stmt>approx_equal(a.extract_u_iso() (3 -1 -1))<assert_stmt>approx_equal(a.extract_u_iso_or_u_equiv(unit_cell=uc) (3 4 5))<assert_stmt>approx_equal(a[1].u_iso -1)<line_sep>u_cart_answer=[(-1.0 -1.0 -1.0 -1.0 -1.0 -1.0) (4 4 4 0 0 0) (5 5 5 0 0 0)]<assert_stmt>approx_equal(a.extract_u_cart(uc) u_cart_answer)<line_sep>a.set_u_star(u_star=flex.sym_mat3_double([(-1 -2 -1 -1 -1 -1) (1 2 3 -0.6 0.2 -0.3) (3 1 2 -0.2 0.5 -0.1)]))<assert_stmt>approx_equal(a.extract_u_star() [(-1 -1 -1 -1 -1 -1) (1 2 3 -0.6 0.2 -0.3) (3 1 2 -0.2 0.5 -0.1)])<assert_stmt>approx_equal(a[1].u_star (1 2 3 -0.6 0.2 -0.3))<line_sep>unit_cell=uctbx.unit_cell((1 1 1 90 90 90))<line_sep>a.set_u_cart(unit_cell=unit_cell u_cart=flex.sym_mat3_double([(-1 -2 -1 -1 -1 -1) (1 2 3 -0.6 0.2 -0.3) (3 1 2 -0.2 0.5 -0.1)]))<assert_stmt>approx_equal(a.extract_u_cart(unit_cell=unit_cell) [(-1 -1 -1 -1 -1 -1) (1 2 3 -0.6 0.2 -0.3) (3 1 2 -0.2 0.5 -0.1)])<line_sep># a.set_u_cart(unit_cell=unit_cell u_cart=flex.sym_mat3_double([(1 2 3 4 5 6) (0 0 0 1 2 3) (1 2 3 0 0 0)]) selection=flex.size_t([1 2]))<assert_stmt>approx_equal(a.extract_u_cart(unit_cell=unit_cell) [(-1 -1 -1 -1 -1 -1) (0 0 0 1 2 3) (1 2 3 0 0 0)])<line_sep># unit_cell=uctbx.unit_cell((10 10 10 90 90 90))<line_sep>a.set_u_cart(unit_cell=unit_cell u_cart=flex.sym_mat3_double([(-1 -2 -1 -1 -1 -1) (1 2 3 -0.6 0.2 -0.3) (3 1 2 -0.2 0.5 -0.1)]))<assert_stmt>approx_equal(a.extract_u_star() [(-1 -1 -1 -1 -1 -1) (0.01 0.02 0.03 -0.006 0.002 -0.003) (0.03 0.01 0.02 -0.002 0.005 -0.001)])<assert_stmt>approx_equal(a.extract_u_iso() [3 -1 -1])<line_sep>a.scale_adps(2.0)<assert_stmt>approx_equal(a.extract_u_star() [(-1 -1 -1 -1 -1 -1) (0.02 0.04 0.06 -0.012 0.004 -0.006) (0.06 0.02 0.04 -0.004 0.01 -0.002)])<assert_stmt>approx_equal(a.extract_u_iso() [6 -1 -1])<assert_stmt>a.count_anisotropic()<eq>2<assert_stmt>a.count_anomalous()<eq>1<line_sep>a.convert_to_isotropic(unit_cell=unit_cell)<assert_stmt>a.count_anisotropic()<eq>0<line_sep>a.convert_to_anisotropic(unit_cell=unit_cell)<assert_stmt>a.count_anisotropic()<eq>3<line_sep>m=a.sites_mod_positive()<assert_stmt>approx_equal(m.extract_sites() [(0.9 0.8 0.7) (0.8 0.7 0.6) (0.7 0.6 0.5)])<line_sep>m[2].site=(0.7 0.6 1.4)# to avoid +-0.5 ambiguity m=m.sites_mod_short()<assert_stmt>approx_equal(m.extract_sites() [(-0.1 -0.2 -0.3) (-0.2 -0.3 -0.4) (-0.3 -0.4 0.4)])<line_sep># <assert_stmt>a.extract_grad_u_iso().all_eq(<false>)<line_sep>a[1].flags.set_grad_u_iso(state=<true>)<assert_stmt>list(a.extract_grad_u_iso())<eq>[<false> <true> <false>]<block_end><def_stmt>exercise_extract_u_cart_plus_u_iso <block_start><import_from_stmt>cctbx uctbx sgtbx xray<line_sep>uc=uctbx.unit_cell((1 1 1))<line_sep>sg=sgtbx.space_group_info("P 1")<line_sep>a=flex.xray_scatterer()<assert_stmt>a.size()<eq>0<line_sep>s1=xray.scatterer(label="C" u=0.1)<line_sep>s2=xray.scatterer(label="C" u=0.1)<line_sep>s2.flags.set_use_u_iso(<false>)<line_sep>s3=xray.scatterer(label="C" u=(1 1 1 1 1 1))<line_sep>s4=xray.scatterer(label="C" u=(1 1 1 1 1 1))<line_sep>s4.flags.set_use_u_aniso(<false>)<line_sep>s5=xray.scatterer(label="C" u=0.1)<line_sep>s5.u_star=(1 1 1 1 1 1)<line_sep>s5.flags.set_use_u_aniso(<true>)<line_sep>s6=xray.scatterer(label="C" u=0.1)<line_sep>s6.u_star=(1 1 1 1 1 1)<line_sep>s7=xray.scatterer(label="C" u=(1 1 1 1 1 1))<line_sep>s7.u_iso=0.1<line_sep>s8=xray.scatterer(label="C" u=(1 1 1 1 1 1))<line_sep>s8.u_iso=0.1<line_sep>s8.flags.set_use_u_iso(<true>)<line_sep>s9=xray.scatterer(label="C")<line_sep>s10=xray.scatterer(label="C")<line_sep>s10.flags.set_use_u_iso(<false>)<line_sep>a=flex.xray_scatterer((s1 s2 s3 s4 s5 s6 s7 s8 s9 s10))<line_sep>u_cart_total=a.extract_u_cart_plus_u_iso(uc)<assert_stmt>approx_equal(u_cart_total [(0.1 0.1 0.1 0 0 0) (0 0 0 0 0 0) (1 1 1 1 1 1) (0 0 0 0 0 0) (1.1 1.1 1.1 1 1 1) (0.1 0.1 0.1 0 0 0) (1 1 1 1 1 1) (1.1 1.1 1.1 1 1 1) (0 0 0 0 0 0) (0 0 0 0 0 0)])<block_end><def_stmt>run <block_start>exercise_flex_miller_index()<line_sep>exercise_flex_hendrickson_lattman()<line_sep>exercise_flex_xray_scatterer()<line_sep>exercise_extract_u_cart_plus_u_iso()<line_sep>print("OK")<block_end><if_stmt>(__name__<eq>"__main__")<block_start>run()<block_end>
#! /usr/bin/env python # $Id: test___init__.py 5174 2007-05-31 00:01:52Z wiemann $ # Author: <NAME> <<EMAIL>> # Copyright: This module has been placed in the public domain. """ Test module for transforms/__init__.py. """<import_from_stmt>__init__ DocutilsTestSupport# must be imported before docutils <import_from_stmt>docutils transforms utils<import_stmt>unittest<class_stmt>TestTransform(transforms.Transform)<block_start>default_priority=100<line_sep>applied=0<def_stmt>apply self **kwargs<block_start>self.applied<augadd>1<assert_stmt>kwargs<eq>{'foo':42}<block_end><block_end><class_stmt>KwargsTestCase(unittest.TestCase)<block_start><def_stmt>test_kwargs self<block_start>transformer=transforms.Transformer(utils.new_document('test data'))<line_sep>transformer.add_transform(TestTransform foo=42)<line_sep>transformer.apply_transforms()<line_sep>self.assertEqual(len(transformer.applied) 1)<line_sep>self.assertEqual(len(transformer.applied[0]) 4)<line_sep>transform_record=transformer.applied[0]<line_sep>self.assertEqual(transform_record[1] TestTransform)<line_sep>self.assertEqual(transform_record[3] {'foo':42})<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# Copyright 2018 The pybadge Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for pybadges."""<import_stmt>base64<import_stmt>doctest<import_stmt>json<import_stmt>os.path<import_stmt>unittest<import_stmt>pathlib<import_stmt>sys<import_stmt>tempfile<import_stmt>xmldiff.main<import_stmt>pybadges<line_sep>TEST_DIR=os.path.dirname(__file__)<line_sep>PNG_IMAGE_B64=('iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAIAAAD91JpzAAAAD0lEQVQI12P4zw'<concat>'AD/xkYAA/+Af8iHnLUAAAAAElFTkSuQmCC')<line_sep>PNG_IMAGE=base64.b64decode(PNG_IMAGE_B64)<class_stmt>TestPybadgesBadge(unittest.TestCase)<block_start>"""Tests for pybadges.badge."""<def_stmt>test_docs self<block_start>doctest.testmod(pybadges optionflags=doctest.ELLIPSIS)<block_end><def_stmt>test_whole_link_and_left_link self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>pybadges.badge(left_text='foo' right_text='bar' left_link='http://example.com/' whole_link='http://example.com/')<block_end><block_end><def_stmt>test_changes self<block_start><with_stmt>open(os.path.join(TEST_DIR 'test-badges.json') 'r')<as>f<block_start>examples=json.load(f)<block_end><for_stmt>example examples<block_start>file_name=example.pop('file_name')<with_stmt>self.subTest(example=file_name)<block_start>filepath=os.path.join(TEST_DIR 'golden-images' file_name)<with_stmt>open(filepath mode="r" encoding="utf-8")<as>f<block_start>golden_image=f.read()<block_end>pybadge_image=pybadges.badge(**example)<line_sep>diff=xmldiff.main.diff_texts(golden_image pybadge_image)<line_sep>self.assertFalse(diff)<block_end><block_end><block_end><block_end><class_stmt>TestEmbedImage(unittest.TestCase)<block_start>"""Tests for pybadges._embed_image."""<def_stmt>test_data_url self<block_start>url='data:image/png;base64,'+PNG_IMAGE_B64<line_sep>self.assertEqual(url pybadges._embed_image(url))<block_end><def_stmt>test_http_url self<block_start>url='https://dev.w3.org/SVG/tools/svgweb/samples/svg-files/python.svg'<line_sep>self.assertRegex(pybadges._embed_image(url) r'^data:image/svg(\+xml)?;base64,')<block_end><def_stmt>test_not_image_url self<block_start><with_stmt>self.assertRaisesRegex(ValueError 'expected an image, got "text"')<block_start>pybadges._embed_image('http://www.google.com/')<block_end><block_end>@unittest.skipIf(sys.platform.startswith("win") "requires Unix filesystem")<def_stmt>test_svg_file_path self<block_start>image_path=os.path.abspath(os.path.join(TEST_DIR 'golden-images' 'build-failure.svg'))<line_sep>self.assertRegex(pybadges._embed_image(image_path) r'^data:image/svg(\+xml)?;base64,')<block_end>@unittest.skipIf(sys.platform.startswith("win") "requires Unix filesystem")<def_stmt>test_png_file_path self<block_start><with_stmt>tempfile.NamedTemporaryFile()<as>png<block_start>png.write(PNG_IMAGE)<line_sep>png.flush()<line_sep>self.assertEqual(pybadges._embed_image(png.name) 'data:image/png;base64,'+PNG_IMAGE_B64)<block_end><block_end>@unittest.skipIf(sys.platform.startswith("win") "requires Unix filesystem")<def_stmt>test_unknown_type_file_path self<block_start><with_stmt>tempfile.NamedTemporaryFile()<as>non_image<block_start>non_image.write(b'Hello')<line_sep>non_image.flush()<with_stmt>self.assertRaisesRegex(ValueError 'not able to determine file type')<block_start>pybadges._embed_image(non_image.name)<block_end><block_end><block_end>@unittest.skipIf(sys.platform.startswith("win") "requires Unix filesystem")<def_stmt>test_text_file_path self<block_start><with_stmt>tempfile.NamedTemporaryFile(suffix='.txt')<as>non_image<block_start>non_image.write(b'Hello')<line_sep>non_image.flush()<with_stmt>self.assertRaisesRegex(ValueError 'expected an image, got "text"')<block_start>pybadges._embed_image(non_image.name)<block_end><block_end><block_end><def_stmt>test_file_url self<block_start>image_path=os.path.abspath(os.path.join(TEST_DIR 'golden-images' 'build-failure.svg'))<with_stmt>self.assertRaisesRegex(ValueError 'unsupported scheme "file"')<block_start>pybadges._embed_image(pathlib.Path(image_path).as_uri())<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_from_stmt>datetime datetime timedelta<import_from_stmt>functools wraps<import_from_stmt>typing Any Callable Dict List Tuple<line_sep># NOTE: return flag = False to avoid caching # considers one optional parameter, no_cache # if true, bypass cache system, otherwise use normally <def_stmt>alru_cache max_size:int=128 ttl:timedelta=timedelta(minutes=1)<block_start><def_stmt>decorator func:Callable[<ellipsis> Any]<arrow>Any<block_start>cache:Dict[Any Tuple[datetime Any]]={}<line_sep>keys:List[Any]=[]<def_stmt>in_cache key:Any<arrow>bool# key not in cache <block_start><if_stmt>key<not><in>cache<block_start><return><false><block_end># key in cache but expired <if_stmt>datetime.now()-cache[key][0]<g>ttl<block_start><return><false><block_end># key in cache and not expired <return><true><block_end><def_stmt>update_cache_and_return key:Any flag:bool value:Any<arrow>Any# if flag = False, do not update cache and return value <block_start><if_stmt><not>flag<block_start><return>value<block_end># if flag = True, update cache now=datetime.now()<line_sep>cache[key]=(now value)<line_sep>keys.append(key)<line_sep># remove oldest key if cache is full <if_stmt>len(keys)<g>max_size<block_start><del_stmt>cache[keys.pop(0)]<block_end># return value from cache <return>cache[key][1]<block_end>@wraps(func)<async_keyword><def_stmt>wrapper *args:List[Any] **kwargs:Dict[str Any]<arrow>Any<block_start>key=tuple(args) frozenset({k:v<for>k,v kwargs.items()<if>k<not><in>["no_cache"]})<if_stmt>"no_cache"<in>kwargs<and>kwargs["no_cache"]<block_start>(flag value)=<await>func(*args **kwargs)<line_sep><return>update_cache_and_return(key flag value)<block_end><if_stmt>in_cache(key)<block_start><return>cache[key][1]<block_end>(flag value)=<await>func(*args **kwargs)<line_sep><return>update_cache_and_return(key flag value)<block_end><return>wrapper<block_end><return>decorator<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_stmt>SimTracker.TrackAssociation.packedCandidatesGenAssociationDefault_cfi<as>_mod<line_sep>packedPFCandidateToGenAssociation=_mod.packedCandidatesGenAssociationDefault.clone(trackToGenAssoc="prunedTrackMCMatch" )<line_sep>lostTracksToGenAssociation=_mod.packedCandidatesGenAssociationDefault.clone(trackToGenAssoc="prunedTrackMCMatch" trackToPackedCandidatesAssoc="lostTracks")<line_sep>packedCandidateToGenAssociationTask=cms.Task(packedPFCandidateToGenAssociation lostTracksToGenAssociation)<line_sep>
# This code is part of Qiskit. # # (C) Copyright IBM 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """The Numpy Minimum Eigensolver algorithm."""<import_from_stmt>typing List Optional Union Callable<import_stmt>logging<import_stmt>numpy<as>np<import_from_stmt>qiskit.opflow OperatorBase<import_from_stmt>..eigen_solvers.numpy_eigen_solver NumPyEigensolver<import_from_stmt>.minimum_eigen_solver MinimumEigensolver MinimumEigensolverResult ListOrDict<line_sep>logger=logging.getLogger(__name__)<class_stmt>NumPyMinimumEigensolver(MinimumEigensolver)<block_start>""" The Numpy Minimum Eigensolver algorithm. """<def_stmt>__init__ self filter_criterion:Callable[[Union[List np.ndarray] float Optional[ListOrDict[float]]] bool]=<none> <arrow><none><block_start>""" Args: filter_criterion: callable that allows to filter eigenvalues/eigenstates. The minimum eigensolver is only searching over feasible states and returns an eigenstate that has the smallest eigenvalue among feasible states. The callable has the signature `filter(eigenstate, eigenvalue, aux_values)` and must return a boolean to indicate whether to consider this value or not. If there is no feasible element, the result can even be empty. """<line_sep>self._ces=NumPyEigensolver(filter_criterion=filter_criterion)<line_sep>self._ret=MinimumEigensolverResult()<block_end>@property<def_stmt>filter_criterion self <arrow>Optional[Callable[[Union[List np.ndarray] float Optional[ListOrDict[float]]] bool]]<block_start>"""returns the filter criterion if set"""<line_sep><return>self._ces.filter_criterion<block_end>@filter_criterion.setter<def_stmt>filter_criterion self filter_criterion:Optional[Callable[[Union[List np.ndarray] float Optional[ListOrDict[float]]] bool]] <arrow><none><block_start>"""set the filter criterion"""<line_sep>self._ces.filter_criterion=filter_criterion<block_end>@classmethod<def_stmt>supports_aux_operators cls<arrow>bool<block_start><return>NumPyEigensolver.supports_aux_operators()<block_end><def_stmt>compute_minimum_eigenvalue self operator:OperatorBase aux_operators:Optional[ListOrDict[OperatorBase]]=<none><arrow>MinimumEigensolverResult<block_start>super().compute_minimum_eigenvalue(operator aux_operators)<line_sep>result_ces=self._ces.compute_eigenvalues(operator aux_operators)<line_sep>self._ret=MinimumEigensolverResult()<if_stmt>result_ces.eigenvalues<is><not><none><and>len(result_ces.eigenvalues)<g>0<block_start>self._ret.eigenvalue=result_ces.eigenvalues[0]<line_sep>self._ret.eigenstate=result_ces.eigenstates[0]<if_stmt>result_ces.aux_operator_eigenvalues<block_start>self._ret.aux_operator_eigenvalues=result_ces.aux_operator_eigenvalues[0]<block_end><block_end>logger.debug("MinimumEigensolver:\n%s" self._ret)<line_sep><return>self._ret<block_end><block_end>
<import_from_stmt>django.contrib admin<import_from_stmt>.models BlogPost<line_sep>admin.site.register(BlogPost)<line_sep>
<import_from_future_stmt> print_function division<import_stmt>os<import_stmt>torch<import_from_stmt>torch.autograd Variable<import_from_stmt>skimage io<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_from_stmt>torch.utils.data Dataset<import_from_stmt>geotnf.transformation GeometricTnf<import_from_stmt>geotnf.flow read_flo_file<class_stmt>TSSDataset(Dataset)<block_start>""" TSS image pair dataset http://taniai.space/projects/cvpr16_dccs/ Args: csv_file (string): Path to the csv file with image names and annotation files. dataset_path (string): Directory with the images. output_size (2-tuple): Desired output size transform (callable): Transformation for post-processing the training pair (eg. image normalization) """<def_stmt>__init__ self csv_file dataset_path output_size=(240 240) transform=<none><block_start>self.out_h,self.out_w=output_size<line_sep>self.pairs=pd.read_csv(csv_file)<line_sep>self.img_A_names=self.pairs.iloc[: 0]<line_sep>self.img_B_names=self.pairs.iloc[: 1]<line_sep>self.flow_direction=self.pairs.iloc[: 2].values.astype('int')<line_sep>self.flip_img_A=self.pairs.iloc[: 3].values.astype('int')<line_sep>self.pair_category=self.pairs.iloc[: 4].values.astype('int')<line_sep>self.dataset_path=dataset_path<line_sep>self.transform=transform<line_sep># no cuda as dataset is called from CPU threads in dataloader and produces confilct self.affineTnf=GeometricTnf(out_h=self.out_h out_w=self.out_w use_cuda=<false>)<block_end><def_stmt>__len__ self<block_start><return>len(self.pairs)<block_end><def_stmt>__getitem__ self idx# get pre-processed images <block_start>flip_img_A=self.flip_img_A[idx]<line_sep>image_A,im_size_A=self.get_image(self.img_A_names idx flip_img_A)<line_sep>image_B,im_size_B=self.get_image(self.img_B_names idx)<line_sep># get flow output path flow_path=self.get_GT_flow_relative_path(idx)<line_sep>sample={'source_image':image_A 'target_image':image_B 'source_im_size':im_size_A 'target_im_size':im_size_B 'flow_path':flow_path}<line_sep># # get ground-truth flow # flow = self.get_GT_flow(idx) # sample = {'source_image': image_A, 'target_image': image_B, 'source_im_size': im_size_A, 'target_im_size': im_size_B, 'flow_GT': flow} <if_stmt>self.transform<block_start>sample=self.transform(sample)<block_end><return>sample<block_end><def_stmt>get_image self img_name_list idx flip=<false><block_start>img_name=os.path.join(self.dataset_path img_name_list[idx])<line_sep>image=io.imread(img_name)<line_sep># if grayscale convert to 3-channel image <if_stmt>image.ndim<eq>2<block_start>image=np.repeat(np.expand_dims(image 2) axis=2 repeats=3)<block_end># flip horizontally if needed <if_stmt>flip<block_start>image=np.flip(image 1)<block_end># get image size im_size=np.asarray(image.shape)<line_sep># convert to torch Variable image=np.expand_dims(image.transpose((2 0 1)) 0)<line_sep>image=torch.Tensor(image.astype(np.float32))<line_sep>image_var=Variable(image requires_grad=<false>)<line_sep># Resize image using bilinear sampling with identity affine tnf image=self.affineTnf(image_var).data.squeeze(0)<line_sep>im_size=torch.Tensor(im_size.astype(np.float32))<line_sep><return>(image im_size)<block_end><def_stmt>get_GT_flow self idx<block_start>img_folder=os.path.dirname(self.img_A_names[idx])<line_sep>flow_dir=self.flow_direction[idx]<line_sep>flow_file='flow'+str(flow_dir)+'.flo'<line_sep>flow_file_path=os.path.join(self.dataset_path img_folder flow_file)<line_sep>flow=torch.FloatTensor(read_flo_file(flow_file_path))<line_sep><return>flow<block_end><def_stmt>get_GT_flow_relative_path self idx<block_start>img_folder=os.path.dirname(self.img_A_names[idx])<line_sep>flow_dir=self.flow_direction[idx]<line_sep>flow_file='flow'+str(flow_dir)+'.flo'<line_sep>flow_file_path=os.path.join(img_folder flow_file)<line_sep><return>flow_file_path<block_end><block_end>
<import_from_stmt>formiko.main main<line_sep>exit(main())<line_sep>
######### # Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. <import_from_stmt>manager_rest.deployment_update utils<import_from_stmt>manager_rest.storage models get_node<import_from_stmt>manager_rest.manager_exceptions UnknownModificationStageError<import_from_stmt>manager_rest.deployment_update.constants ENTITY_TYPES ACTION_TYPES<line_sep>OUTPUT_ENTITY_LEN=2<line_sep>WORKFLOW_ENTITY_LEN=2<line_sep>OPERATION_ENTITY_LEN=2<line_sep>PROPERTY_ENTITY_LEN=2<line_sep>RELATIONSHIP_ENTITY_LEN=4<line_sep>NODE_ENTITY_LEN=2<class_stmt>EntityValidatorBase(object)<block_start><def_stmt>__init__ self sm<block_start>self.sm=sm<line_sep>self._validation_mapper={ACTION_TYPES.ADD:self._validate_add ACTION_TYPES.MODIFY:self._validate_modify ACTION_TYPES.REMOVE:self._validate_remove}<block_end><def_stmt>validate self dep_update step<block_start><try_stmt><block_start>self._validate_entity(dep_update step)<block_end><except_stmt>UnknownModificationStageError<as>e<block_start>entity_identifier_msg="Entity type {0} with entity id {1}".format(step.entity_type step.entity_id)<line_sep>err_msg="{0}: {1}".format(entity_identifier_msg e.message)<line_sep><raise>UnknownModificationStageError(err_msg)<block_end><block_end><def_stmt>_validate_entity self dep_update step<block_start><raise>NotImplementedError<block_end><def_stmt>_in_old self *args **kwargs<block_start><raise>NotImplementedError<block_end><def_stmt>_in_new self *args **kwargs<block_start><raise>NotImplementedError<block_end><def_stmt>_validate_add self entity_id entity_type **kwargs<block_start><if_stmt><not>(self._in_new(**kwargs)<and><not>self._in_old(**kwargs))<block_start><raise>UnknownModificationStageError("The entity either doesn't exist in the deployment update "<concat>"blueprint or exists in the original deployment blueprint")<block_end><block_end><def_stmt>_validate_modify self entity_id entity_type **kwargs<block_start><if_stmt><not>(self._in_new(**kwargs)<and>self._in_old(**kwargs))<block_start><raise>UnknownModificationStageError("The entity either doesn't exist in the deployment update "<concat>"blueprint or it doesn't exists in the original deployment "<concat>"blueprint")<block_end><block_end><def_stmt>_validate_remove self entity_id entity_type **kwargs<block_start><if_stmt><not>(<not>self._in_new(**kwargs)<and>self._in_old(**kwargs))<block_start><raise>UnknownModificationStageError("The entity either exists in the deployment update blueprint "<concat>"or doesn't exists in the original deployment blueprint")<block_end><block_end><def_stmt>_get_storage_node self deployment_id node_id<block_start>node=get_node(deployment_id node_id)<line_sep><return>node.to_dict()<if>node<else>{}<block_end><block_end><class_stmt>NodeValidator(EntityValidatorBase)<block_start><def_stmt>_validate_entity self dep_update step<block_start>entity_keys=utils.get_entity_keys(step.entity_id)<if_stmt>len(entity_keys)<ne>NODE_ENTITY_LEN<block_start><return><block_end>_,node_id=entity_keys<line_sep>validate=self._validation_mapper[step.action]<line_sep><return>validate(step.entity_id step.entity_type dep_update=dep_update node_id=node_id)<block_end><def_stmt>_in_old self dep_update node_id<block_start>storage_node=self._get_storage_node(dep_update.deployment_id node_id)<line_sep><return>bool(storage_node)<block_end><def_stmt>_in_new self dep_update node_id<block_start>raw_node=utils.get_raw_node(dep_update.deployment_plan node_id)<line_sep><return>bool(raw_node)<block_end><block_end><class_stmt>RelationshipValidator(EntityValidatorBase)<block_start><def_stmt>_validate_entity self dep_update step<block_start>entity_keys=utils.get_entity_keys(step.entity_id)<if_stmt>len(entity_keys)<l>RELATIONSHIP_ENTITY_LEN<block_start><return><block_end>_,source_node_id,relationships,source_relationship_index=entity_keys[:RELATIONSHIP_ENTITY_LEN]<line_sep>target_relationship_index=entity_keys[RELATIONSHIP_ENTITY_LEN]<if>len(entity_keys)<g>RELATIONSHIP_ENTITY_LEN<else><none><line_sep># assert the index is indeed readable source_relationship_index=utils.parse_index(source_relationship_index)<line_sep>target_relationship_index=utils.parse_index(target_relationship_index)<if_stmt><not>(source_relationship_index<or>target_relationship_index)<block_start><return><block_end>validate=self._validation_mapper[step.action]<line_sep><return>validate(step.entity_id step.entity_type dep_update=dep_update source_node_id=source_node_id relationships=relationships source_relationship_index=source_relationship_index target_relationship_index=target_relationship_index)<block_end><def_stmt>_in_new self dep_update source_node_id relationships source_relationship_index target_relationship_index<block_start>source_node=utils.get_raw_node(dep_update.deployment_plan source_node_id)<if_stmt><not>(source_node<and>len(source_node[relationships])<g>source_relationship_index)<block_start><return><block_end>target_node_id=source_node[relationships][source_relationship_index]['target_id']<line_sep>raw_target_node=utils.get_raw_node(dep_update.deployment_plan target_node_id)<line_sep><return>raw_target_node<block_end><def_stmt>_in_old self dep_update source_node_id relationships source_relationship_index target_relationship_index<block_start>source_node=self._get_storage_node(dep_update.deployment_id source_node_id)<if_stmt><not>(source_node<and>len(source_node[relationships])<g>target_relationship_index)<block_start><return><block_end>target_node_id=source_node[relationships][target_relationship_index]['target_id']<line_sep>storage_target_node=self._get_storage_node(dep_update.deployment_id target_node_id)<line_sep><return>storage_target_node<block_end><block_end><class_stmt>PropertyValidator(EntityValidatorBase)<block_start><def_stmt>_validate_entity self dep_update step<block_start>property_keys=utils.get_entity_keys(step.entity_id)<if_stmt>len(property_keys)<l>PROPERTY_ENTITY_LEN<block_start><return><block_end>_,node_id=property_keys[:PROPERTY_ENTITY_LEN]<line_sep>property_id=property_keys[PROPERTY_ENTITY_LEN:]<line_sep>validate=self._validation_mapper[step.action]<line_sep><return>validate(step.entity_id step.entity_type dep_update=dep_update node_id=node_id property_id=property_id)<block_end>@staticmethod<def_stmt>_in_new dep_update node_id property_id<block_start>raw_node=utils.get_raw_node(dep_update.deployment_plan node_id)<line_sep><return>utils.traverse_object(raw_node property_id)<is><not><none><block_end><def_stmt>_in_old self dep_update node_id property_id<block_start>storage_node=self._get_storage_node(dep_update.deployment_id node_id)<line_sep><return>utils.traverse_object(storage_node property_id)<is><not><none><block_end><block_end><class_stmt>OperationValidator(EntityValidatorBase)<block_start><def_stmt>_validate_entity self dep_update step<block_start>operation_keys=utils.get_entity_keys(step.entity_id)<if_stmt>len(operation_keys)<l>OPERATION_ENTITY_LEN<block_start><return><block_end>_,node_id=operation_keys[:OPERATION_ENTITY_LEN]<line_sep>operation_id=operation_keys[OPERATION_ENTITY_LEN:]<line_sep>validate=self._validation_mapper[step.action]<line_sep><return>validate(step.entity_id step.entity_type dep_update=dep_update node_id=node_id operation_id=operation_id)<block_end><def_stmt>_in_new self dep_update node_id operation_id<block_start>raw_node=utils.get_raw_node(dep_update.deployment_plan node_id)<line_sep><return>utils.traverse_object(raw_node operation_id)<is><not><none><block_end><def_stmt>_in_old self dep_update node_id operation_id<block_start>storage_node=self._get_storage_node(dep_update.deployment_id node_id)<line_sep><return>utils.traverse_object(storage_node operation_id)<is><not><none><block_end><block_end><class_stmt>WorkflowValidator(EntityValidatorBase)<block_start><def_stmt>_validate_entity self dep_update step<block_start>workflow_keys=utils.get_entity_keys(step.entity_id)<if_stmt>len(workflow_keys)<l>WORKFLOW_ENTITY_LEN<block_start><return><block_end>workflows=workflow_keys[0]<line_sep>workflow_id=workflow_keys[1:]<line_sep>validate=self._validation_mapper[step.action]<line_sep><return>validate(step.entity_id step.entity_type dep_update=dep_update workflow_id=workflow_id workflows=workflows)<block_end>@staticmethod<def_stmt>_in_new dep_update workflow_id workflows<block_start>raw_workflows=dep_update.deployment_plan[workflows]<line_sep><return>utils.traverse_object(raw_workflows workflow_id)<is><not><none><block_end><def_stmt>_in_old self dep_update workflow_id workflows<block_start>deployment=self.sm.get(models.Deployment dep_update.deployment_id)<line_sep>storage_workflows=deployment.workflows<or>{}<line_sep><return>utils.traverse_object(storage_workflows workflow_id)<is><not><none><block_end><block_end><class_stmt>OutputValidator(EntityValidatorBase)<block_start><def_stmt>_validate_entity self dep_update step<block_start>output_keys=utils.get_entity_keys(step.entity_id)<if_stmt>len(output_keys)<l>OUTPUT_ENTITY_LEN<block_start><return><block_end>outputs=output_keys[0]<line_sep>output_id=output_keys[1:]<line_sep>validate=self._validation_mapper[step.action]<line_sep><return>validate(step.entity_id step.entity_type dep_update=dep_update output_id=output_id outputs=outputs)<block_end>@staticmethod<def_stmt>_in_new dep_update output_id outputs<block_start>raw_outputs=dep_update.deployment_plan[outputs]<line_sep><return>utils.traverse_object(raw_outputs output_id)<is><not><none><block_end><def_stmt>_in_old self dep_update output_id outputs<block_start>deployment=self.sm.get(models.Deployment dep_update.deployment_id)<line_sep>storage_outputs=deployment.outputs<or>{}<line_sep><return>utils.traverse_object(storage_outputs output_id)<is><not><none><block_end><block_end><class_stmt>DescriptionValidator(EntityValidatorBase)<block_start><def_stmt>_validate_entity self dep_update step<block_start>description_key=step.entity_id<line_sep>validate=self._validation_mapper[step.action]<line_sep><return>validate(step.entity_id step.entity_type dep_update=dep_update description_key=description_key)<block_end><def_stmt>_in_new self dep_update description_key<block_start>raw_description=dep_update.deployment_plan[description_key]<line_sep><return>bool(raw_description)<block_end><def_stmt>_in_old self dep_update description_key<block_start>deployment=self.sm.get(models.Deployment dep_update.deployment_id)<line_sep>storage_description=deployment.description<or>{}<line_sep><return>bool(storage_description)<block_end><block_end><class_stmt>StepValidator(object)<block_start><def_stmt>__init__ self sm<block_start>self._validation_mapper={ENTITY_TYPES.NODE:NodeValidator(sm) ENTITY_TYPES.RELATIONSHIP:RelationshipValidator(sm) ENTITY_TYPES.PROPERTY:PropertyValidator(sm) ENTITY_TYPES.OPERATION:OperationValidator(sm) ENTITY_TYPES.WORKFLOW:WorkflowValidator(sm) ENTITY_TYPES.OUTPUT:OutputValidator(sm) ENTITY_TYPES.DESCRIPTION:DescriptionValidator(sm)}<block_end><def_stmt>validate self dep_update step<block_start>""" validate an entity id of provided type exists in provided blueprint. raises error if id doesn't exist :param dep_update: the deployment update object. :param step: the deployment update step object :return: None """<if_stmt>step.entity_type<in>ENTITY_TYPES<block_start>self._validation_mapper[step.entity_type].validate(dep_update step)<block_end><block_end><block_end>
<import_from_stmt>jsonrpcserver method Result Success dispatch<import_from_stmt>werkzeug.serving run_simple<import_from_stmt>werkzeug.wrappers Request Response<line_sep>@method<def_stmt>ping <arrow>Result<block_start><return>Success("pong")<block_end>@Request.application<def_stmt>application request<block_start><return>Response(dispatch(request.data.decode()) 200 mimetype="application/json")<block_end><if_stmt>__name__<eq>"__main__"<block_start>run_simple("localhost" 5000 application)<block_end>
<import_stmt>json<import_stmt>logging<import_stmt>lzma<import_stmt>os<import_stmt>sqlite3<import_stmt>sys<import_stmt>time<import_stmt>urllib.request<import_from_stmt>pathlib Path<import_from_stmt>zipfile ZipFile<import_from_stmt>panoramix.utils.helpers COLOR_BLUE COLOR_BOLD COLOR_GRAY COLOR_GREEN COLOR_HEADER COLOR_OKGREEN COLOR_UNDERLINE COLOR_WARNING ENDC FAIL cache_dir cached opcode <line_sep>""" a module for management of bytes4 signatures from the database db schema: hash - 0x12345678 name - transferFrom folded_name - transferFrom(address,address,uint256) cooccurs - comma-dellimeted list of hashes: `0x12312312,0xabababab...` params - json: `[ { "type": "address", "name": "_from" }, { "type": "address", "name": "_to" }, { "type": "uint256", "name": "_value" } ]` """<line_sep>logger=logging.getLogger(__name__)<line_sep>conn=<none><def_stmt>supplements_path <block_start><return>cache_dir()/"supplement.db"<block_end><def_stmt>check_supplements <block_start>panoramix_supplements=supplements_path()<if_stmt><not>panoramix_supplements.is_file()<block_start>compressed_supplements=(Path(__file__).parent.parent/"data"/"supplement.db.xz")<line_sep>logger.info("Decompressing %s into %s..." compressed_supplements panoramix_supplements)<with_stmt>lzma.open(compressed_supplements)<as>inf panoramix_supplements.open("wb")<as>outf<block_start><while_stmt>(buf:=inf.read(1024<times>1024))<block_start>outf.write(buf)<block_end><block_end><block_end><assert_stmt>panoramix_supplements.is_file()<block_end><def_stmt>_cursor <block_start><global>conn<line_sep>check_supplements()<if_stmt>conn<is><none><block_start>conn=sqlite3.connect(supplements_path())<block_end># try: c=conn.cursor()<line_sep># except Exception: # # fails in multi-threading, this should help # conn = sqlite3.connect("supplement.db") # return conn.cursor() <return>c<block_end>@cached<def_stmt>fetch_sigs hash<block_start>c=_cursor()<line_sep>c.execute("SELECT * from functions where hash=?" (hash ))<line_sep>results=c.fetchall()<line_sep>res=[]<for_stmt>row results<block_start>res.append({"hash":row[0] "name":row[1] "folded_name":row[2] "params":json.loads(row[3]) "cooccurs":row[4].split(",") })<block_end><return>res<block_end>@cached<def_stmt>fetch_sig hash<block_start><if_stmt>type(hash)<eq>str<block_start>hash=int(hash 16)<block_end>hash="{:#010x}".format(hash)<line_sep>c=_cursor()<line_sep>c.execute("SELECT hash, name, folded_name, params, cooccurs from functions where hash=?" (hash ) )<line_sep>results=c.fetchall()<if_stmt>len(results)<eq>0<block_start><return><none><block_end># Take the one that cooccurs with the most things, it's probably the most relevant. row=max(results key=<lambda>row:len(row[4]))<line_sep><return>{"hash":hash "name":row[1] "folded_name":row[2] "params":json.loads(row[3]) }<block_end>""" Abi crawler and parser. used to refill supplement.py with new ABI/func definitions. It's used by scripts that are not a part of panoramix repo. The function is here, so people wanting to parse ABIs on their own can use parse_insert_abi implementation as a reference. It handles some unobvious edge-cases, like arrays of tuples. """<def_stmt>crawl_abis_from_cache # imports here, because this is not used as a part of a regular panoramix run, # and we don't want to import stuff unnecessarily. <block_start><import_stmt>json<import_stmt>os<import_stmt>re<import_stmt>sqlite3<import_stmt>sys<import_stmt>time<import_stmt>urllib<import_stmt>urllib.request<try_stmt><block_start><import_from_stmt>web3 Web3<block_end><except_stmt>Exception<block_start>print("install web3:\n\t`pip install web3`")<block_end># the only dependency in the project :D conn=sqlite3.connect("supplement.db")<line_sep>cursor=conn.cursor()<line_sep>conn2=sqlite3.connect("supp2.db")<line_sep>cursor2=conn2.cursor()<def_stmt>parse_insert_abi abi<block_start><def_stmt>parse_inputs func_inputs<block_start>inputs=[]<line_sep>params=[]<line_sep>param_counter=0<for_stmt>r func_inputs<block_start>param_counter<augadd>1<line_sep>type_=r["type"]<line_sep>name_=r["name"]<if_stmt>len(name_)<eq>0<block_start>name_="param"+str(param_counter)<block_end><if_stmt>name_[0]<ne>"_"<block_start>name_="_"+name_<block_end>params.append({"type":r["type"] "name":name_})<if_stmt>"tuple"<not><in>type_<block_start>inputs.append(type_)<block_end><else_stmt><block_start>type_=f"({parse_inputs(r['components'])[0]})"+type_[5:]<line_sep>inputs.append(type_)<block_end><block_end><return>",".join(inputs) params<block_end>output={}<for_stmt>func abi<block_start><if_stmt>func["type"]<in>["constructor" "fallback"]<block_start><continue><block_end>inputs,params=parse_inputs(func["inputs"])<line_sep>fname=f"{func['name']}({inputs})"<line_sep>sha3=Web3.sha3(text=fname).hex()[:10]<if_stmt>sha3<in>output<block_start>print("double declaration for the same hash! {}".format(fname))<line_sep><continue><block_end>output[sha3]={"name":func["name"] "folded_name":fname "params":params }<block_end><for_stmt>sha3,row output.items()<block_start>row["cooccurs"]=list(output.keys())<line_sep>insert_row=(sha3 row["name"] row["folded_name"] json.dumps(row["params"]) ",".join(row["cooccurs"]) )<line_sep>insert_row2=(int(sha3 16) row["name"] row["folded_name"] json.dumps(row["params"]) )<line_sep>test_hash,test_cooccurs=insert_row[0] insert_row[4]<line_sep>cursor.execute("SELECT * from functions where hash=? and cooccurs=?" (test_hash test_cooccurs) )<line_sep>results=cursor.fetchall()<if_stmt>len(results)<eq>0<block_start>print("inserting" sha3 row["folded_name"])<line_sep>cursor.execute("INSERT INTO functions VALUES (?, ?, ?, ?, ?)" insert_row)<line_sep>conn.commit()<block_end>cursor2.execute("SELECT * from functions where hash=?" (insert_row2[0] ))<line_sep>results=cursor2.fetchall()<if_stmt>len(results)<eq>0<block_start>print("inserting2" sha3 row["folded_name"])<line_sep>cursor2.execute("INSERT INTO functions VALUES (?, ?, ?, ?)" insert_row2)<line_sep>conn2.commit()<block_end><block_end><block_end><def_stmt>crawl_cache <block_start>idx=0<line_sep>path="./cache_abis/"<if_stmt><not>os.path.isdir(path)<block_start>print("dir cache_abis doesn't exist. it should be there and it should contain abi files")<line_sep><return><block_end><for_stmt>fname os.listdir(path)<block_start>address=fname[:-4]<line_sep>fname=path+fname<line_sep>idx<augadd>1<line_sep>print(idx address)<with_stmt>open(fname)<as>f<block_start>abi=json.loads(f.read())<line_sep>parse_insert_abi(abi)<block_end><block_end><block_end>crawl_cache()<block_end>
# # Copyright 2014+ Carnegie Mellon University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>gzip bz2<import_stmt>json<import_stmt>re<import_stmt>os<import_stmt>tempfile<import_from_stmt>flexneuart.config DOCID_FIELD<def_stmt>create_temp_file <block_start>""""Create a temporary file :return temporary file name """<line_sep>f,file_name=tempfile.mkstemp()<line_sep>os.close(f)<line_sep><return>file_name<block_end><class_stmt>FileWrapper<block_start><def_stmt>__enter__ self<block_start><return>self<block_end><def_stmt>__init__ self file_name flags='r'<block_start>"""Constructor, which opens a regular or gzipped-file :param file_name a name of the file, it has a '.gz' or '.bz2' extension, we open a compressed stream. :param flags open flags such as 'r' or 'w' """<line_sep>dir_name=os.path.dirname(file_name)<if_stmt>dir_name<block_start>os.makedirs(dir_name exist_ok=<true>)<block_end><if_stmt>file_name.endswith('.gz')<block_start>self._file=gzip.open(file_name flags)<line_sep>self._is_compr=<true><block_end><elif_stmt>file_name.endswith('.bz2')<block_start>self._file=bz2.open(file_name flags)<line_sep>self._is_compr=<true><block_end><else_stmt><block_start>self._file=open(file_name flags)<line_sep>self._is_compr=<false><block_end><block_end><def_stmt>write self s<block_start><if_stmt>self._is_compr<block_start>self._file.write(s.encode())<block_end><else_stmt><block_start>self._file.write(s)<block_end><block_end><def_stmt>read self qty<block_start><if_stmt>self._is_compr<block_start><return>self._file.read(qty).decode()<block_end><else_stmt><block_start><return>self._file.read(qty)<block_end><block_end><def_stmt>close self<block_start>self._file.close()<block_end><def_stmt>__exit__ self type value tb<block_start>self._file.close()<block_end><def_stmt>__iter__ self<block_start><for_stmt>line self._file<block_start><yield>line.decode()<if>self._is_compr<else>line<block_end><block_end><block_end><def_stmt>jsonl_gen file_name<block_start>"""A generator that produces parsed doc/query entries one by one. :param file_name: an input file name """<with_stmt>FileWrapper(file_name)<as>f<block_start><for_stmt>i,line enumerate(f)<block_start>ln=i+1<line_sep>line=line.strip()<if_stmt><not>line<block_start><continue><block_end><try_stmt><block_start>data=json.loads(line)<block_end><except_stmt><block_start><raise>Exception('Error parsing JSON in line: %d'%ln)<block_end><if_stmt><not>DOCID_FIELD<in>data<block_start><raise>Exception('Missing %s field in JSON in line: %d'%(DOCID_FIELD ln))<block_end><yield>data<block_end><block_end><block_end><def_stmt>multi_file_linegen dir_name pattern<block_start>"""A generator that reads all files from a given directory matching the pattern and yields their contents line by line. :param dir_name: a source directory name :param pattern: a pattern should match fully (we use fullmatch) """<for_stmt>fn os.listdir(dir_name)<block_start><if_stmt>re.fullmatch(pattern fn)<block_start>full_path=os.path.join(dir_name fn)<line_sep>print('Processing: '+full_path)<with_stmt>FileWrapper(full_path)<as>inp<block_start><for_stmt>line inp<block_start><yield>line<block_end><block_end><block_end><block_end><block_end>
""" This File implements the data reader for the FBMS59 Dataset. See the file davis2016_data_utils.py for a more detailed documentation of the functions. The main difference with respect to DAVIS2016 is the fact that the data reader returns the number of images per category (used to explain away the large class imbalance of this dataset in score computation). After the first use, you can speed up the code by commenting pre-processing away (See line 109). """<import_stmt>numpy<as>np<import_stmt>os<import_stmt>cv2<import_stmt>re<import_stmt>tensorflow<as>tf<import_from_stmt>data.aug_flips random_flip_images<class_stmt>DirectoryIterator(object)<block_start>""" Class for managing data loading.of images and labels We assume that the folder structure is: """<def_stmt>__init__ self directory part='train' for_testing=<false> test_temporal_t=1<block_start>self.directory=directory<line_sep>self.num_experiments=0<line_sep>self.samples_per_cat={}<line_sep>parsing_dir={'train':['Trainingset'] 'val':['Testset'] 'trainval':['Trainingset' 'Testset']}<line_sep>data_dirs=[os.path.join(directory d)<for>d parsing_dir.get(part)]<for_stmt>d data_dirs<block_start><if_stmt><not>os.path.isdir(d)<block_start><raise>IOError("Directory {} file not found".format(d))<block_end><block_end># First count how many experiments are out there self.samples=0<line_sep># Each filename is a tuple image / components self.image_filenames=[]<line_sep>self.annotation_filenames=[]<for_stmt>d data_dirs<block_start><if_stmt>for_testing<block_start>self._parse_testtime_dir(d test_temporal_t)<block_end><else_stmt><block_start>self._parse_data_dir(d)<block_end><block_end><if_stmt>self.samples<eq>0<block_start><raise>IOError("Did not find any file in the dataset folder")<block_end><if_stmt><not>for_testing<block_start>self.num_experiments=len(self.image_filenames)<block_end>print('Found {} images belonging to {} experiments.'.format(self.samples self.num_experiments))<block_end><def_stmt>_parse_data_dir self data_dir<block_start>""" This function will read all the files in data_dir and return a list of lists containing the different fnames for each category. """<line_sep>categories=os.listdir(data_dir)<for_stmt>folder_name categories<block_start>all_fnames_list_fname=os.path.join(data_dir folder_name folder_name+".bmf")<if_stmt><not>os.path.isfile(all_fnames_list_fname)<block_start><raise>IOError("Not found file {}".format(all_fnames_list_fname))<block_end>all_fnames_list=np.loadtxt(all_fnames_list_fname dtype=np.str skiprows=1)<line_sep># Correct from pgm to jpg all_fnames_list=[f.split('.')[0]+'.jpg'<for>f all_fnames_list]<line_sep>all_fnames_list=[os.path.join(data_dir folder_name f)<for>f all_fnames_list]<line_sep>self.samples<augadd>len(all_fnames_list)<line_sep># Append the last self.image_filenames.append(all_fnames_list)<block_end><block_end><def_stmt>_parse_testtime_dir self data_dir test_temporal_t=1<block_start>""" This function will read all the files in data_dir and return a list of lists containing the different fnames for each category. """<line_sep>self.test_tuples=[]<line_sep>categories=os.listdir(data_dir)<for_stmt>folder_name categories<block_start>all_fnames_list_fname=os.path.join(data_dir folder_name folder_name+".bmf")<if_stmt><not>os.path.isfile(all_fnames_list_fname)<block_start><raise>IOError("Not found file {}".format(all_fnames_list_fname))<block_end>all_fnames_list=np.loadtxt(all_fnames_list_fname dtype=np.str skiprows=1)<line_sep># Correct from pgm to jpg all_fnames_list=[f.split('.')[0]+'.jpg'<for>f all_fnames_list]<line_sep>all_fnames_list=[os.path.join(data_dir folder_name f)<for>f all_fnames_list]<line_sep># Get ground_truth annotation_fnames,numbers,type_weird=self.find_gt(os.path.join(data_dir folder_name 'GroundTruth'))<line_sep>goal_annotation_fnames=[f.split('.')[0]+'.jpg'<for>f annotation_fnames]<line_sep>goal_annotation_fnames=[os.path.join(data_dir folder_name 'GroundTruth' f)<for>f goal_annotation_fnames]<line_sep># NOTE: Run the commented part only once to preprocess GT annotation_fnames=[os.path.join(data_dir folder_name 'GroundTruth' f)<for>f annotation_fnames]<for_stmt>i range(len(goal_annotation_fnames))<block_start>mask=cv2.imread(annotation_fnames[i])<line_sep>mask=cv2.cvtColor(mask cv2.COLOR_BGR2GRAY)<line_sep>mask=mask/255.0<if_stmt>type_weird<block_start>mask[mask<g>0.99]=0.0<block_end><if_stmt>'marple7'<eq>folder_name<block_start>mask=mask<g>0.05<block_end><elif_stmt>'marple2'<eq>folder_name<block_start>mask=mask<g>0.4<block_end><else_stmt><block_start>mask=mask<g>0.1<block_end>mask=np.asarray(mask<times>255 dtype=np.uint8)<line_sep>cv2.imwrite(goal_annotation_fnames[i] mask)<block_end># Create offsets numbers=np.array(numbers)-np.min(numbers)<line_sep>seq_len=np.max(numbers)<line_sep>offsets=numbers+test_temporal_t<if_stmt>offsets[0]<l>numbers[0]# test was negative, needs to increase: <block_start>offsets[0]<augadd>2<times>abs(test_temporal_t)<block_end><if_stmt>offsets[-1]<g>numbers[-1]# test was positive, needs to decrease: <block_start>offsets[-1]<augsub>2<times>abs(test_temporal_t)<block_end><for_stmt>i range(len(offsets))<block_start>offsets[i]=np.maximum(offsets[i] 0)<line_sep>offsets[i]=np.minimum(offsets[i] seq_len)<block_end><for_stmt>i,k enumerate(numbers)<block_start>self.test_tuples.append((all_fnames_list[k] all_fnames_list[offsets[i]] goal_annotation_fnames[i] "{}".format(len(annotation_fnames))))<block_end>self.samples<augadd>len(annotation_fnames)<line_sep>self.samples_per_cat[folder_name]=len(annotation_fnames)<line_sep>self.num_experiments<augadd>1<block_end><block_end><def_stmt>find_gt self directory<block_start>all_files=os.listdir(directory)<line_sep># Check in which kind of folder you are type_weird=<false><for_stmt>file all_files<block_start><if_stmt>file.endswith('ppm')<block_start>type_weird=<true><line_sep><break><block_end><block_end><if_stmt><not>type_weird<block_start>all_files=[file<for>file all_files<if>file.endswith('pgm')]<line_sep># Sort them <try_stmt><block_start>all_files=sorted(all_files key=<lambda>x:int(x.split('.')[0].split('_')[-1]))<line_sep>numbers=[int(file.split('.')[0].split('_')[-1])<for>file all_files]<block_end><except_stmt><block_start>all_files=sorted(all_files key=<lambda>x:int(re.search(r'\d+' x).group()))<line_sep>numbers=[int(re.search(r'\d+' file).group())<for>file all_files]<block_end><return>all_files numbers type_weird<block_end># Solve weird type all_files=[file<for>file all_files<if>file.endswith('ppm')<and><not>'PROB'<in>file]<line_sep>all_files=sorted(all_files key=<lambda>x:int(x.split('_')[1]))<line_sep>numbers=[int(file.split('_')[1])<for>file all_files]<line_sep><return>all_files numbers type_weird<block_end><block_end><class_stmt>FBMS59Reader(object)<block_start><def_stmt>__init__ self root_dir max_temporal_len=3 min_temporal_len=2 num_threads=6<block_start>self.root_dir=root_dir<line_sep>self.max_temporal_len=max_temporal_len<line_sep>self.min_temporal_len=min_temporal_len<assert_stmt>min_temporal_len<l>max_temporal_len "Temporal lenghts are not consistent"<assert_stmt>min_temporal_len<g>0 "Min temporal len should be positive"<line_sep>self.num_threads=num_threads<block_end><def_stmt>get_filenames_list self partition<block_start>iterator=DirectoryIterator(self.root_dir partition)<line_sep>filenames,annotation_filenames=iterator.image_filenames iterator.annotation_filenames<line_sep>#Training calls it before, so it will be overwritten self.val_samples=iterator.samples<line_sep><return>filenames annotation_filenames<block_end><def_stmt>get_test_tuples self partition test_temporal_t=1<block_start>iterator=DirectoryIterator(self.root_dir partition for_testing=<true> test_temporal_t=test_temporal_t)<line_sep>test_tuples=iterator.test_tuples<line_sep>#Training calls it before, so it will be overwritten self.val_samples=iterator.samples<line_sep>self.samples_per_cat=iterator.samples_per_cat<line_sep>self.num_categories=len(iterator.samples_per_cat.keys())<line_sep><return>test_tuples<block_end><def_stmt>preprocess_image self img<block_start>orig_width=640<line_sep>orig_height=384<line_sep>img=(tf.cast(img tf.float32)/tf.constant(255.0))-0.5<line_sep>img=tf.image.resize_images(img [orig_height orig_width])<line_sep><return>img<block_end><def_stmt>preprocess_mask self mask<block_start>orig_width=640<line_sep>orig_height=384<line_sep>mask=(tf.cast(mask tf.float32)/tf.constant(255.0))<line_sep>mask=tf.image.resize_images(mask [orig_height orig_width] method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)<line_sep><return>mask<block_end><def_stmt>random_crop_image_pair self image_1 image_2 max_cropping_percent=0.9<block_start>''' Produces an (equal) random crop for image_1 and image_2 that is at minimum max_cropping_percent smaller than the original image. The resulting patch is then reshaped to original size '''<line_sep>rand=tf.random_uniform(shape=[] minval=0 maxval=1 dtype=tf.float32)<line_sep>cropping_percent=max_cropping_percent+rand<times>(1-max_cropping_percent)<line_sep>image_width=image_1.get_shape().as_list()[1]<line_sep>image_height=image_1.get_shape().as_list()[0]<line_sep>num_channels=image_1.get_shape().as_list()[2]<line_sep>crop_width=tf.cast(image_width<times>cropping_percent tf.int32)<line_sep>crop_height=tf.cast(image_height<times>cropping_percent tf.int32)<line_sep>image_c=tf.concat((image_1 image_2) axis=-1)<line_sep>image_c=tf.random_crop(image_c size=[crop_height crop_width num_channels<times>2])<line_sep>image_c.set_shape([<none> <none> num_channels<times>2])<line_sep># Resize image_c=tf.image.resize_images(image_c [image_height image_width])<line_sep>image_1=image_c[: : :3]<line_sep>image_2=image_c[: : 3:6]<line_sep><return>image_1 image_2<block_end><def_stmt>central_cropping self img cropping_percent<block_start>orig_height,orig_width=img.get_shape().as_list()[0:2]<line_sep>img=tf.image.central_crop(img cropping_percent)<line_sep>img=tf.image.resize_images(img [orig_height orig_width])<line_sep><return>img<block_end><def_stmt>augment_pair self image_1 image_2# Random flips <block_start>image_1,image_2=random_flip_images(image_1 image_2)<line_sep>image_1,image_2=self.random_crop_image_pair(image_1 image_2 self.train_crop)<line_sep><return>image_1 image_2<block_end><def_stmt>dataset_map self input_queue<block_start>fname_number,direction=input_queue[0] input_queue[1]<line_sep># Take care with the casting when sampling!! t_shift=tf.random_uniform(shape=[] minval=self.min_temporal_len maxval=self.max_temporal_len+1 dtype=tf.int32)<line_sep>t_shift=tf.cast(t_shift dtype=tf.float32)<line_sep>img2_fname_number=t_shift<times>direction+fname_number<line_sep># Conversions fname_number=tf.cast(fname_number dtype=tf.int32)<line_sep>img2_fname_number=tf.cast(img2_fname_number dtype=tf.int32)<line_sep># Reading fname_1=tf.gather(self.filenames fname_number)<line_sep>fname_2=tf.gather(self.filenames img2_fname_number)<line_sep>file_content=tf.read_file(fname_1)<line_sep>image_1=tf.image.decode_jpeg(file_content channels=3)<line_sep>image_1=self.preprocess_image(image_1)<line_sep>file_content=tf.read_file(fname_2)<line_sep>image_2=tf.image.decode_jpeg(file_content channels=3)<line_sep>image_2=self.preprocess_image(image_2)<line_sep># Data augmentation image_1,image_2=self.augment_pair(image_1 image_2)<line_sep><return>image_1 image_2<block_end><def_stmt>image_inputs self batch_size=32 partition='train' train_crop=1.0# Generates input batches for FBMS dataset. <block_start>t_len=self.max_temporal_len<line_sep>file_list,_=self.get_filenames_list(partition)<line_sep>self.train_crop=train_crop<line_sep># Accumulates subsequent filenames, and makes a dataset with # end-points. N=0<line_sep>last_fname_numbers=[]# Will be used to calculate flow backward first_fname_numbers=[]# Will be used to calculate flow forward <for_stmt>fnames file_list<block_start>last_fname_numbers.append(np.arange(N+t_len N+len(fnames) dtype=np.int32))<line_sep>first_fname_numbers.append(np.arange(N N+len(fnames)-t_len dtype=np.int32))<line_sep>N<augadd>len(fnames)<block_end>self.filenames=np.concatenate(file_list)<line_sep>last_fname_numbers=np.concatenate(last_fname_numbers)<line_sep>last_fname_numbers=np.vstack((last_fname_numbers -1.0<times>np.ones_like(last_fname_numbers))).T<line_sep>first_fname_numbers=np.concatenate(first_fname_numbers)<line_sep>first_fname_numbers=np.vstack((first_fname_numbers 1.0<times>np.ones_like(first_fname_numbers))).T<line_sep>all_fname_numbers=np.vstack((first_fname_numbers last_fname_numbers))<line_sep>all_fname_numbers=np.asarray(all_fname_numbers dtype=np.float32)<line_sep>np.random.shuffle(all_fname_numbers)<line_sep># Form training batches dataset=tf.data.Dataset.from_tensor_slices(all_fname_numbers)<line_sep>dataset=dataset.shuffle(buffer_size=all_fname_numbers.shape[0] reshuffle_each_iteration=<true>)<line_sep>dataset=dataset.repeat(<none>)<line_sep>dataset=dataset.map(self.dataset_map num_parallel_calls=self.num_threads)<line_sep>dataset=dataset.batch(batch_size drop_remainder=<true>)<line_sep>dataset=dataset.prefetch(buffer_size=3<times>batch_size)<line_sep>iterator=dataset.make_initializable_iterator()<line_sep>img1s,img2s=iterator.get_next()<line_sep># Extra arguments returned for compatibility with test functions. <return>(img1s img2s tf.constant(1.0) 'f' 1.0) iterator<block_end><def_stmt>test_inputs self batch_size=32 partition='val' t_len=2 with_fname=<false> test_crop=1.0# Reads test inputs data # The main difference with Davis2016 consists in retuning # the number of elements per category. <block_start>test_tuples=self.get_test_tuples(partition t_len)<line_sep>self.test_crop=test_crop<line_sep>self.num_threads=1<line_sep># Form training batches dataset=tf.data.Dataset.from_tensor_slices(test_tuples)<line_sep>dataset=dataset.repeat(<none>)<line_sep>dataset=dataset.map(self.test_dataset_map num_parallel_calls=1)<line_sep>dataset=dataset.batch(batch_size drop_remainder=<false>)<line_sep>dataset=dataset.prefetch(buffer_size=3<times>batch_size)<line_sep>iterator=dataset.make_initializable_iterator()<line_sep>img1s,img2s,seg1s,fnames,samples_per_cat=iterator.get_next()<if_stmt>with_fname<block_start><return>(img1s img2s seg1s fnames samples_per_cat) iterator<block_end><return>(img1s img2s seg1s samples_per_cat) iterator<block_end><def_stmt>test_dataset_map self input_queue<block_start>fname_1,fname_2,annotation_fname,samples_per_cat=input_queue[0] input_queue[1] input_queue[2] input_queue[3]<line_sep>samples_per_cat=tf.string_to_number(samples_per_cat)<line_sep>file_content=tf.read_file(fname_1)<line_sep>image_1=tf.image.decode_jpeg(file_content channels=3)<line_sep>image_1=self.preprocess_image(image_1)<line_sep>file_content=tf.read_file(fname_2)<line_sep>image_2=tf.image.decode_jpeg(file_content channels=3)<line_sep>image_2=self.preprocess_image(image_2)<line_sep>file_content=tf.read_file(annotation_fname)<line_sep>seg_1=tf.image.decode_jpeg(file_content channels=1)<line_sep>seg_1=self.preprocess_mask(seg_1)<line_sep># Cropping preprocess image_1=self.central_cropping(image_1 self.test_crop)<line_sep>image_2=self.central_cropping(image_2 self.test_crop)<line_sep>seg_1=self.central_cropping(seg_1 self.test_crop)<line_sep><return>image_1 image_2 seg_1 fname_1 samples_per_cat<block_end><def_stmt>augmented_inputs self partition='val' t_len=2 test_crops=[1.0]<block_start>(img_1 img_2 seg_1 fname _),itr=self.test_inputs(batch_size=1 t_len=t_len partition=partition with_fname=<true> test_crop=1.0)<line_sep>img_1=tf.squeeze(img_1 axis=0)<line_sep>img_2=tf.squeeze(img_2 axis=0)<line_sep>seg_1=tf.squeeze(seg_1 axis=0)<line_sep>batch_dict={'img_1s':{} 'img_2s':{} 'seg_1s':{}}<for_stmt>crop test_crops<block_start>cropped_img_1=self.central_cropping(img_1 cropping_percent=crop)<line_sep>cropped_img_2=self.central_cropping(img_2 cropping_percent=crop)<line_sep>cropped_seg_1=self.central_cropping(seg_1 cropping_percent=crop)<line_sep>batch_dict['seg_1s'][crop]=cropped_seg_1<line_sep>batch_dict['img_1s'][crop]=cropped_img_1<line_sep>batch_dict['img_2s'][crop]=cropped_img_2<block_end><return>batch_dict fname itr<block_end><block_end>
# Configuration file for the Sphinx documentation builder. # Documentation: # http://www.sphinx-doc.org/en/master/config <import_stmt>os<import_stmt>sys<import_stmt>subprocess<line_sep># -- Path setup -------------------------------------------------------------- _ROOT=os.path.join('..' '..')<line_sep>sys.path.append(os.path.abspath(os.path.join(_ROOT 'src')))<line_sep># -- Project information ----------------------------------------------------- project='GOG Galaxy Integrations API'<line_sep>copyright='2019, GOG.com'<line_sep>_author,_version=subprocess.check_output(['python' os.path.join(_ROOT 'setup.py') '--author' '--version'] universal_newlines=<true>).strip().split('\n')<line_sep>author=_author<line_sep>version=_version<line_sep>release=_version<line_sep># -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions=['sphinx.ext.autodoc' 'sphinxcontrib.asyncio' 'sphinx_autodoc_typehints' 'm2r2'# mdinclude directive for makrdown files ]<line_sep>autodoc_member_order='bysource'<line_sep>autodoc_inherit_docstrings=<false><line_sep>autodoc_mock_imports=["aiohttp"]<line_sep>set_type_checking_flag=<true><line_sep># Add any paths that contain templates here, relative to this directory. templates_path=['_templates']<line_sep># List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns=[]# type: ignore # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme="sphinx_rtd_theme"<line_sep>html_theme_options={# 'canonical_url': '', # main page to be serach in google with trailing slash 'display_version':<true> 'style_external_links':<true> # Toc options 'collapse_navigation':<false> 'sticky_navigation':<true> 'navigation_depth':4 }<line_sep># Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ["_static"] master_doc='index'<line_sep>
<import_stmt>dcase_util<line_sep># Metadata meta=dcase_util.containers.MetaDataContainer([{'filename':'test1.wav' 'event_label':'cat' 'onset':1.0 'offset':3.0} {'filename':'test1.wav' 'event_label':'dog' 'onset':2.0 'offset':6.0} {'filename':'test1.wav' 'event_label':'speech' 'onset':5.0 'offset':8.0} ])<line_sep># Initilize encoder event_roll_encoder=dcase_util.data.EventRollEncoder(label_list=meta.unique_event_labels time_resolution=0.02)<line_sep># Encode event_roll=event_roll_encoder.encode(metadata_container=meta length_seconds=10.0)<line_sep># Visualize event_roll.plot()<line_sep>
""" Definition of the Session class. """<import_stmt>re<import_stmt>sys<import_stmt>time<import_stmt>json<import_stmt>base64<import_stmt>random<import_stmt>hashlib<import_stmt>asyncio<import_stmt>weakref<import_stmt>datetime<import_from_stmt>http.cookies SimpleCookie<import_from_stmt>..event._component new_type<import_from_stmt>._component2 PyComponent JsComponent AppComponentMeta<import_from_stmt>._asset Asset Bundle solve_dependencies<import_from_stmt>._assetstore AssetStore INDEX<import_from_stmt>._assetstore assets<as>assetstore<import_from_stmt>._clientcore serializer<import_from_stmt>. logger<import_from_stmt>.. config<line_sep>reprs=json.dumps<line_sep># Use the system PRNG for session id generation (if possible) # NOTE: secure random string generation implementation is adapted # from the Django project. <def_stmt>get_random_string length=24 allowed_chars=<none><block_start>""" Produce a securely generated random string. With a length of 12 with the a-z, A-Z, 0-9 character set returns a 71-bit value. log_2((26+26+10)^12) =~ 71 bits """<line_sep>allowed_chars=allowed_chars<or>('abcdefghijklmnopqrstuvwxyz'+'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')<try_stmt><block_start>srandom=random.SystemRandom()<block_end><except_stmt>NotImplementedError# pragma: no cover <block_start>srandom=random<line_sep>logger.warning('Falling back to less secure Mersenne Twister random string.')<line_sep>bogus="%s%s%s"%(random.getstate() time.time() 'sdkhfbsdkfbsdbhf')<line_sep>random.seed(hashlib.sha256(bogus.encode()).digest())<block_end><return>''.join(srandom.choice(allowed_chars)<for>i range(length))<block_end><class_stmt>Session<block_start>""" A connection between Python and the client runtime (JavaScript). The session is what holds together the app widget, the web runtime, and the websocket instance that connects to it. Responsibilities: * Send messages to the client and process messages received by the client. * Keep track of PyComponent instances used by the session. * Keep track of JsComponent instances associated with the session. * Ensure that the client has all the module definitions it needs. """<line_sep>STATUS=new_type('Enum' () {'PENDING':1 'CONNECTED':2 'CLOSED':0})<def_stmt>__init__ self app_name store=<none> request=<none># Allow custom store for testing <block_start>self._store=store<if>(store<is><not><none>)<else>assetstore<assert_stmt>isinstance(self._store AssetStore)<line_sep>self._creation_time=time.time()# used by app manager # Id and name of the app self._id=get_random_string()<line_sep>self._app_name=app_name<line_sep># To keep track of what modules are defined at the client self._present_classes=set()# Component classes known by the client self._present_modules=set()# module names that, plus deps self._present_assets=set()# names of used associated assets self._assets_to_ignore=set()# user settable # Data for this session (in addition to the data provided by the store) self._data={}<line_sep># More vars self._runtime=<none># init web runtime, will be set when used self._ws=<none># init websocket, will be set when a connection is made self._closing=<false># Flag to help with shutdown # PyComponent or JsComponent instance, can be None if app_name is __default__ self._component=<none><line_sep># The session assigns component id's and keeps track of component objects self._component_counter=0<line_sep>self._component_instances=weakref.WeakValueDictionary()<line_sep>self._dead_component_ids=set()<line_sep># Keep track of roundtrips. The _ping_calls elements are: # [ping_count, {objects}, *(callback, args)] self._ping_calls=[]<line_sep>self._ping_counter=0<line_sep>self._eval_result={}<line_sep>self._eval_count=0<line_sep># While the client is not connected, we keep a queue of # commands, which are send to the client as soon as it connects self._pending_commands=[]<line_sep># request related information self._request=request<if_stmt>request<and>request.cookies<block_start>cookies=request.cookies<block_end><else_stmt><block_start>cookies={}<block_end>self._set_cookies(cookies)<block_end><def_stmt>__repr__ self<block_start>t='<%s for %r (%i) at 0x%x>'<line_sep><return>t%(self.__class__.__name__ self.app_name self.status id(self))<block_end>@property<def_stmt>request self<block_start>"""The tornado request that was at the origin of this session. """<line_sep><return>self._request<block_end>@property<def_stmt>id self<block_start>""" The unique identifier of this session. """<line_sep><return>self._id<block_end>@property<def_stmt>app_name self<block_start>""" The name of the application that this session represents. """<line_sep><return>self._app_name<block_end>@property<def_stmt>app self<block_start>""" The root PyComponent or JsComponent instance that represents the app. """<line_sep><return>self._component<block_end>@property<def_stmt>runtime self<block_start>""" The runtime that is rendering this app instance. Can be None if the client is a browser. """<line_sep><return>self._runtime<block_end>@property<def_stmt>status self<block_start>""" The status of this session. The lifecycle for each session is: * status 1: pending * status 2: connected * status 0: closed """<if_stmt>self._ws<is><none><block_start><return>self.STATUS.PENDING# not connected yet <block_end><elif_stmt>self._ws.close_code<is><none><block_start><return>self.STATUS.CONNECTED# alive and kicking <block_end><else_stmt><block_start><return>self.STATUS.CLOSED<block_end><block_end># connection closed @property<def_stmt>present_modules self<block_start>""" The set of module names that is (currently) available at the client. """<line_sep><return>set(self._present_modules)<block_end>@property<def_stmt>assets_to_ignore self<block_start>""" The set of names of assets that should *not* be pushed to the client, e.g. because they are already present on the page. Add names to this set to prevent them from being loaded. """<line_sep><return>self._assets_to_ignore<block_end><def_stmt>close self<block_start>""" Close the session: close websocket, close runtime, dispose app. """<line_sep># Stop guarding objects to break down any circular refs self._ping_calls=[]<line_sep>self._closing=<true># suppress warnings for session being closed. <try_stmt># Close the websocket <block_start><if_stmt>self._ws<block_start>self._ws.close_this()<block_end># Close the runtime <if_stmt>self._runtime<block_start>self._runtime.close()<block_end># Dispose the component and break the circular reference <if_stmt>self._component<is><not><none><block_start>self._component.dispose()<line_sep>self._component=<none><block_end># Discard data self._data={}<block_end><finally_stmt><block_start>self._closing=<false><block_end><block_end>## Hooking up with app, websocket, runtime <def_stmt>_set_ws self ws<block_start>""" A session is always first created, so we know what page to serve. The client will connect the websocket, and communicate the session_id so it can be connected to the correct Session via this method """<if_stmt>self._ws<is><not><none><block_start><raise>RuntimeError('Session is already connected.')<block_end># Set websocket object - this is what changes the status to CONNECTED self._ws=ws<line_sep>self._ws.write_command(("PRINT" "Flexx session says hi"))<line_sep># Send pending commands <for_stmt>command self._pending_commands<block_start>self._ws.write_command(command)<block_end>self._ws.write_command(('INIT_DONE' ))<block_end><def_stmt>_set_cookies self cookies=<none><block_start>""" To set cookies, must be an http.cookie.SimpleCookie object. When the app is loaded as a web app, the cookies are set *before* the main component is instantiated. Otherwise they are set when the websocket is connected. """<line_sep>self._cookies=cookies<if>cookies<else>SimpleCookie()<block_end><def_stmt>_set_runtime self runtime<block_start><if_stmt>self._runtime<is><not><none><block_start><raise>RuntimeError('Session already has a runtime.')<block_end>self._runtime=runtime<block_end>## Cookies, mmm <def_stmt>get_cookie self name default=<none> max_age_days=31 min_version=<none><block_start>""" Gets the value of the cookie with the given name, else default. Note that cookies only really work for web apps. """<import_from_stmt>tornado.web decode_signed_value<if_stmt>name<in>self._cookies<block_start>value=self._cookies[name].value<line_sep>value=decode_signed_value(config.cookie_secret name value max_age_days=max_age_days min_version=min_version)<line_sep><return>value.decode()<block_end><else_stmt><block_start><return>default<block_end><block_end><def_stmt>set_cookie self name value expires_days=30 version=<none> domain=<none> expires=<none> path="/" **kwargs<block_start>""" Sets the given cookie name/value with the given options. Set value to None to clear. The cookie value is secured using `flexx.config.cookie_secret`; don't forget to set that config value in your server. Additional keyword arguments are set on the Cookie.Morsel directly. """<line_sep># This code is taken (in modified form) from the Tornado project # Copyright 2009 Facebook # Licensed under the Apache License, Version 2.0 # Assume tornado is available ... <import_from_stmt>tornado.escape native_str<import_from_stmt>tornado.httputil format_timestamp<import_from_stmt>tornado.web create_signed_value<line_sep># Clear cookie? <if_stmt>value<is><none><block_start>value=""<line_sep>expires=datetime.datetime.utcnow()-datetime.timedelta(days=365)<block_end><else_stmt><block_start>secret=config.cookie_secret<line_sep>value=create_signed_value(secret name value version=version key_version=<none>)<block_end># The cookie library only accepts type str, in both python 2 and 3 name=native_str(name)<line_sep>value=native_str(value)<if_stmt>re.search(r"[\x00-\x20]" name+value)# Don't let us accidentally inject bad stuff <block_start><raise>ValueError("Invalid cookie %r: %r"%(name value))<block_end><if_stmt>name<in>self._cookies<block_start><del_stmt>self._cookies[name]<block_end>self._cookies[name]=value<line_sep>morsel=self._cookies[name]<if_stmt>domain<block_start>morsel["domain"]=domain<block_end><if_stmt>expires_days<is><not><none><and><not>expires<block_start>expires=datetime.datetime.utcnow()+datetime.timedelta(days=expires_days)<block_end><if_stmt>expires<block_start>morsel["expires"]=format_timestamp(expires)<block_end><if_stmt>path<block_start>morsel["path"]=path<block_end><for_stmt>k,v kwargs.items()<block_start><if_stmt>k<eq>'max_age'<block_start>k='max-age'<block_end># skip falsy values for httponly and secure flags because # SimpleCookie sets them regardless <if_stmt>k<in>['httponly' 'secure']<and><not>v<block_start><continue><block_end>morsel[k]=v<block_end>self.send_command('EXEC' 'document.cookie = "%s";'%morsel.OutputString().replace('"' '\\"'))<block_end>## Data <def_stmt>add_data self name data<block_start>""" Add data to serve to the client (e.g. images), specific to this session. Returns the link at which the data can be retrieved. Note that actions can be used to send (binary) data directly to the client (over the websocket). Parameters: name (str): the name of the data, e.g. 'icon.png'. If data has already been set on this name, it is overwritten. data (bytes): the data blob. Returns: str: the (relative) url at which the data can be retrieved. """<if_stmt><not>isinstance(name str)<block_start><raise>TypeError('Session.add_data() name must be a str.')<block_end><if_stmt>name<in>self._data<block_start><raise>ValueError('Session.add_data() got existing name %r.'%name)<block_end><if_stmt><not>isinstance(data bytes)<block_start><raise>TypeError('Session.add_data() data must be bytes.')<block_end>self._data[name]=data<line_sep><return>'flexx/data/%s/%s'%(self.id name)<block_end># relative path for export <def_stmt>remove_data self name<block_start>""" Remove the data associated with the given name. If you need this, consider using actions instead. Note that data is automatically released when the session is closed. """<line_sep>self._data.pop(name <none>)<block_end><def_stmt>get_data_names self<block_start>""" Get a list of names of the data provided by this session. """<line_sep><return>list(self._data.keys())<block_end><def_stmt>get_data self name<block_start>""" Get the data corresponding to the given name. This can be data local to the session, or global data. Returns None if data by that name is unknown. """<if_stmt><true><block_start>data=self._data.get(name <none>)<block_end><if_stmt>data<is><none><block_start>data=self._store.get_data(name)<block_end><return>data<block_end><def_stmt>_dump_data self<block_start>""" Get a dictionary that contains all data specific to this session. The keys represent relative paths, the values are all bytes. Private method, used by App.dump(). """<line_sep>d={}<for_stmt>fname self.get_data_names()<block_start>d['flexx/data/{}/{}'.format(self.id fname)]=self.get_data(fname)<block_end><return>d<block_end>## Keeping track of component objects <def_stmt>_register_component self component id=<none><block_start>""" Called by PyComponent and JsComponent to give them an id and register with the session. """<assert_stmt>isinstance(component (PyComponent JsComponent))<assert_stmt>component.session<is>self<line_sep>cls=component.__class__<if_stmt>self._component<is><none><block_start>self._component=component# register root component (i.e. the app) <block_end># Set id <if_stmt>id<is><none><block_start>self._component_counter<augadd>1<line_sep>id=cls.__name__+'_'+str(self._component_counter)<block_end>component._id=id<line_sep>component._uid=self.id+'_'+id<line_sep># Register the instance using a weakref self._component_instances[component._id]=component<line_sep># Register the class to that the client has the needed definitions self._register_component_class(cls)<line_sep>self.keep_alive(component)<block_end><def_stmt>_unregister_component self component<block_start>self._dead_component_ids.add(component.id)<line_sep># self.keep_alive(component) # does not work on pypy; deletion in final # Because we use weak refs, and we want to be able to keep (the id of) # the object so that INVOKE on it can be silently ignored (because it # is disposed). The object id gets removed by the DISPOSE_ACK command. <block_end><def_stmt>get_component_instance self id<block_start>""" Get PyComponent or JsComponent instance that is associated with this session and has the corresponding id. The returned value can be None if it does not exist, and a returned component can be disposed. """<line_sep><return>self._component_instances.get(id <none>)<block_end>## JIT asset definitions <def_stmt>_register_component_class self cls<block_start>""" Mark the given PyComponent or JsComponent class as used; ensure that the client knows about the module that it is defined in, dependencies of this module, and associated assets of any of these modules. """<if_stmt><not>(isinstance(cls type)<and>issubclass(cls (PyComponent JsComponent)))<block_start><raise>TypeError('_register_component_class() needs a PyComponent '<concat>'or JsComponent class')<block_end># Early exit if we know the class already <if_stmt>cls<in>self._present_classes<block_start><return><block_end># Make sure that no two Component classes have the same name, or we get problems # that are difficult to debug. Unless classes are defined interactively. # The modules of classes that are re-registered are re-defined. The base # class of such a component is assumed to be either unchanged or defined # in the same module. It can also happen that a class is registered for # which the module was defined earlier (e.g. ui.html). Such modules # are redefined as well. same_name=[c<for>c self._present_classes<if>c.__name__<eq>cls.__name__]<if_stmt>same_name<block_start>is_interactive=self._app_name<eq>'__default__'<line_sep>same_name.append(cls)<line_sep>is_dynamic_cls=all([c.__module__<eq>'__main__'<for>c same_name])<if_stmt><not>(is_interactive<and>is_dynamic_cls)<block_start><raise>RuntimeError('Cannot have multiple Component classes with '<concat>'the same name unless using interactive session '<concat>'and the classes are dynamically defined: %r'%same_name)<block_end><block_end># Mark the class and the module as used logger.debug('Registering Component class %r'%cls.__name__)<line_sep>self._register_module(cls.__jsmodule__)<block_end><def_stmt>_register_module self mod_name<block_start>""" Register a module with the client, as well as its dependencies, and associated assests of the module and its dependencies. If the module was already defined, it is re-defined. """<if_stmt>(mod_name.startswith(('flexx.app' 'flexx.event'))<and>'.examples'<not><in>mod_name)<block_start><return><block_end># these are part of flexx core assets modules=set()<line_sep>assets=[]<def_stmt>collect_module_and_deps mod<block_start><if_stmt>mod.name.startswith(('flexx.app' 'flexx.event'))<block_start><return># these are part of flexx core assets <block_end><if_stmt>mod.name<not><in>self._present_modules<block_start>self._present_modules.add(mod.name)<for_stmt>dep mod.deps<block_start><if_stmt>dep.startswith(('flexx.app' 'flexx.event'))<block_start><continue><block_end>submod=self._store.modules[dep]<line_sep>collect_module_and_deps(submod)<block_end>modules.add(mod)<block_end><block_end># Collect module and dependent modules that are not yet defined self._store.update_modules()# Ensure up-to-date module definition mod=self._store.modules[mod_name]<line_sep>collect_module_and_deps(mod)<line_sep>f=<lambda>m:(m.name.startswith('__main__') m.name)<line_sep>modules=solve_dependencies(sorted(modules key=f))<line_sep># Collect associated assets <for_stmt>mod modules<block_start><for_stmt>asset_name self._store.get_associated_assets(mod.name)<block_start><if_stmt>asset_name<not><in>self._present_assets<block_start>self._present_assets.add(asset_name)<line_sep>assets.append(self._store.get_asset(asset_name))<block_end><block_end><block_end># If the module was already defined and thus needs to be re-defined, # we only redefine *this* module, no deps and no assoctated assets. <if_stmt><not>modules<block_start>modules.append(mod)<block_end># Collect CSS and JS assets <for_stmt>mod modules<block_start><if_stmt>mod.get_css().strip()<block_start>assets.append(self._store.get_asset(mod.name+'.css'))<block_end><block_end><for_stmt>mod modules<block_start>assets.append(self._store.get_asset(mod.name+'.js'))<block_end># Mark classes as used <for_stmt>mod modules<block_start><for_stmt>cls mod.component_classes<block_start>self._present_classes.add(cls)<block_end><block_end># Push assets over the websocket. Note how this works fine with the # notebook because we turn ws commands into display(HTML()). # JS can be defined via eval() or by adding a <script> to the DOM. # The latter allows assets that do not use strict mode, but sourceURL # does not work on FF. So we only want to eval our own assets. <for_stmt>asset assets<block_start><if_stmt>asset.name<in>self._assets_to_ignore<block_start><continue><block_end>logger.debug('Loading asset %s'%asset.name)<line_sep># Determine command suffix. All our sources come in bundles, # for which we use eval because it makes sourceURL work on FF. # (It does not work in Chrome in either way.) suffix=asset.name.split('.')[-1].upper()<if_stmt>suffix<eq>'JS'<and>isinstance(asset Bundle)<block_start>suffix='JS-EVAL'<block_end>self.send_command('DEFINE' suffix asset.name asset.to_string())<block_end><block_end>## Communication with the client <def_stmt>send_command self *command<block_start>""" Send a command to the other side. Commands consists of at least one argument (a string representing the type of command). """<assert_stmt>len(command)<ge>1<if_stmt>self._closing<block_start><pass><block_end><elif_stmt>self.status<eq>self.STATUS.CONNECTED<block_start>self._ws.write_command(command)<block_end><elif_stmt>self.status<eq>self.STATUS.PENDING<block_start>self._pending_commands.append(command)<block_end><else_stmt>#raise RuntimeError('Cannot send commands; app is closed') <block_start>logger.warning('Cannot send commands; app is closed')<block_end><block_end><def_stmt>_receive_command self command<block_start>""" Received a command from JS. """<line_sep>cmd=command[0]<if_stmt>cmd<eq>'EVALRESULT'<block_start>self._eval_result[command[2]]=command[1]<block_end><elif_stmt>cmd<eq>'PRINT'<block_start>print('JS:' command[1])<block_end><elif_stmt>cmd<eq>'INFO'<block_start>logger.info('JS: '+command[1])<block_end><elif_stmt>cmd<eq>'WARN'<block_start>logger.warning('JS: '+command[1])<block_end><elif_stmt>cmd<eq>'ERROR'<block_start>logger.error('JS: '+command[1]+' - stack trace in browser console (hit F12).')<block_end><elif_stmt>cmd<eq>'INVOKE'<block_start>id,name,args=command[1:]<line_sep>ob=self.get_component_instance(id)<if_stmt>ob<is><none><block_start><if_stmt>id<not><in>self._dead_component_ids<block_start>t='Cannot invoke %s.%s; session does not know it (anymore).'<line_sep>logger.warning(t%(id name))<block_end><block_end><elif_stmt>ob._disposed<block_start><pass># JS probably send something before knowing the object was dead <block_end><else_stmt><block_start>func=getattr(ob name <none>)<if_stmt>func<block_start>func(*args)<block_end><block_end><block_end><elif_stmt>cmd<eq>'PONG'<block_start>self._receive_pong(command[1])<block_end><elif_stmt>cmd<eq>'INSTANTIATE'<block_start>modulename,cname,id,args,kwargs=command[1:]<line_sep># Maybe we still have the instance? c=self.get_component_instance(id)<if_stmt>c<and><not>c._disposed<block_start>self.keep_alive(c)<line_sep><return><block_end># Try to find the class m,cls,e=<none> <none> 0<if_stmt>modulename<in>assetstore.modules<block_start>m=sys.modules[modulename]<line_sep>cls=getattr(m cname <none>)<if_stmt>cls<is><none><block_start>e=1<block_end><elif_stmt><not>(isinstance(cls type)<and>issubclass(cls JsComponent))<block_start>cls,e=<none> 2<block_end><elif_stmt>cls<not><in>AppComponentMeta.CLASSES<block_start>cls,e=<none> 3<block_end><block_end><if_stmt>cls<is><none><block_start><raise>RuntimeError('Cannot INSTANTIATE %s.%s (%i)'%(modulename cname e))<block_end># Instantiate kwargs['flx_session']=self<line_sep>kwargs['flx_id']=id<assert_stmt>len(args)<eq>0<line_sep>c=cls(**kwargs)# calls keep_alive via _register_component() <block_end><elif_stmt>cmd<eq>'DISPOSE'# Gets send from local to proxy <block_start>id=command[1]<line_sep>c=self.get_component_instance(id)<if_stmt>c<and><not>c._disposed# no need to warn if component does not exist <block_start>c._dispose()<block_end>self.send_command('DISPOSE_ACK' command[1])<line_sep>self._component_instances.pop(id <none>)# Drop local ref now <block_end><elif_stmt>cmd<eq>'DISPOSE_ACK'# Gets send from proxy to local <block_start>self._component_instances.pop(command[1] <none>)<line_sep>self._dead_component_ids.discard(command[1])<block_end><else_stmt><block_start>logger.error('Unknown command received from JS:\n%s'%command)<block_end><block_end><def_stmt>keep_alive self ob iters=1<block_start>""" Keep an object alive for a certain amount of time, expressed in Python-JS ping roundtrips. This is intended for making JsComponent (i.e. proxy components) survice the time between instantiation triggered from JS and their attachement to a property, though any type of object can be given. """<line_sep>ping_to_schedule_at=self._ping_counter+iters<line_sep>el=self._get_ping_call_list(ping_to_schedule_at)<line_sep>el[1][id(ob)]=ob<block_end># add to dict of objects to keep alive <def_stmt>call_after_roundtrip self callback *args<block_start>""" A variant of ``call_soon()`` that calls a callback after a py-js roundrip. This can be convenient to delay an action until after other things have settled down. """<line_sep># The ping_counter represents the ping count that is underway. # Since we want at least a full ping, we want one count further. ping_to_schedule_at=self._ping_counter+1<line_sep>el=self._get_ping_call_list(ping_to_schedule_at)<line_sep>el.append((callback args))<block_end><async_keyword><def_stmt>co_roundtrip self<block_start>""" Coroutine to wait for one Py-JS-Py roundtrip. """<line_sep>count=0<def_stmt>up <block_start><nonlocal>count<line_sep>count<augadd>1<block_end>self.call_after_roundtrip(up)<while_stmt>count<l>1<block_start><await>asyncio.sleep(0.02)<block_end><block_end><async_keyword><def_stmt>co_eval self js<block_start>""" Coroutine to evaluate JS in the client, wait for the result, and then return it. It is recomended to use this method only for testing purposes. """<line_sep>id=self._eval_count<line_sep>self._eval_count<augadd>1<line_sep>self.send_command('EVALANDRETURN' js id)<while_stmt>id<not><in>self._eval_result<block_start><await>asyncio.sleep(0.2)<block_end><return>self._eval_result.pop(id)<block_end><def_stmt>_get_ping_call_list self ping_count<block_start>""" Get an element from _ping_call for the specified ping_count. The element is a list [ping_count, {objects}, *(callback, args)] """<line_sep># No pending ping_calls? <if_stmt>len(self._ping_calls)<eq>0# Start pinging <block_start>send_ping_later(self)<line_sep># Append element el=[ping_count {}]<line_sep>self._ping_calls.append(el)<line_sep><return>el<block_end># Try to find existing element, or insert it <for_stmt>i reversed(range(len(self._ping_calls)))<block_start>el=self._ping_calls[i]<if_stmt>el[0]<eq>ping_count<block_start><return>el<block_end><elif_stmt>el[0]<l>ping_count<block_start>el=[ping_count {}]<line_sep>self._ping_calls.insert(i+1 el)<line_sep><return>el<block_end><block_end><else_stmt><block_start>el=[ping_count {}]<line_sep>self._ping_calls.insert(0 el)<line_sep><return>el<block_end><block_end><def_stmt>_receive_pong self count# Process ping calls <block_start><while_stmt>len(self._ping_calls)<g>0<and>self._ping_calls[0][0]<le>count<block_start>_,objects,*callbacks=self._ping_calls.pop(0)<line_sep>objects.clear()<del_stmt>objects<for_stmt>callback,args callbacks<block_start>asyncio.get_event_loop().call_soon(callback *args)<block_end><block_end># Continue pinging? <if_stmt>len(self._ping_calls)<g>0<block_start>send_ping_later(self)<block_end><block_end><block_end><def_stmt>send_ping_later session# This is to prevent the prevention of the session from being discarded due # to a ref lingering in an asyncio loop. <block_start><def_stmt>x weaksession<block_start>s=weaksession()<if_stmt>s<is><not><none><and>s.status<g>0<block_start>s._ping_counter<augadd>1<line_sep>s.send_command('PING' s._ping_counter)<block_end><block_end># asyncio.get_event_loop().call_soon(x, weakref.ref(session)) asyncio.get_event_loop().call_later(0.01 x weakref.ref(session))<block_end>## Functions to get page # These could be methods, but are only for internal use <def_stmt>get_page session<block_start>""" Get the string for the HTML page to render this session's app. Not a lot; all other JS and CSS assets are pushed over the websocket. """<line_sep>css_assets=[assetstore.get_asset('reset.css')]<line_sep>js_assets=[assetstore.get_asset('flexx-core.js')]<line_sep><return>_get_page(session js_assets css_assets 3 <false>)<block_end><def_stmt>get_page_for_export session commands link=0<block_start>""" Get the string for an exported HTML page (to run without a server). In this case, there is no websocket to push JS/CSS assets over; these need to be included inside or alongside the main html page. """<line_sep># This function basically collects all assets that the session needs, # creates a special -export.js asset that executes the given commands, # and puts it al together using _get_page(). # We start as a normal page ... css_assets=[assetstore.get_asset('reset.css')]<line_sep>js_assets=[assetstore.get_asset('flexx-core.js')]<line_sep># Get all the used modules modules=[assetstore.modules[name]<for>name session.present_modules]<line_sep>f=<lambda>m:(m.name.startswith('__main__') m.name)<line_sep>modules=solve_dependencies(sorted(modules key=f))<line_sep># First the associated assets asset_names=set()<for_stmt>mod modules<block_start><for_stmt>asset_name assetstore.get_associated_assets(mod.name)<block_start><if_stmt>asset_name<not><in>asset_names<block_start>asset_names.add(asset_name)<line_sep>asset=assetstore.get_asset(asset_name)<if_stmt>asset.name.lower().endswith('.js')<block_start>js_assets.append(asset)<block_end><else_stmt><block_start>css_assets.append(asset)<block_end><block_end><block_end><block_end># Then the modules themselves <for_stmt>mod modules<block_start><if_stmt>mod.get_css().strip()<block_start>css_assets.append(assetstore.get_asset(mod.name+'.css'))<block_end><block_end><for_stmt>mod modules<block_start>js_assets.append(assetstore.get_asset(mod.name+'.js'))<block_end># Create asset for launching the app (commands that normally get send # over the websocket) lines=[]<line_sep>lines.append('flexx.is_exported = true;\n')<line_sep>lines.append('flexx.run_exported_app = function () {')<line_sep>lines.append(' var commands_b64 = [')<for_stmt>command commands<block_start><if_stmt>command[0]<ne>'DEFINE'<block_start>command_str=base64.encodebytes(serializer.encode(command)).decode()<line_sep>lines.append(' "'+command_str.replace('\n' '')+'",')<block_end><block_end>lines.append(' ];')<line_sep>lines.append(' bb64 = flexx.require("bb64");')<line_sep>lines.append(' for (var i=0; i<commands_b64.length; i++) {')<line_sep>lines.append(' var command = flexx.serializer.decode('<concat>'bb64.decode(commands_b64[i]));')<line_sep>lines.append(' flexx.s1._receive_command(command);')<line_sep>lines.append(' }\n};\n')<line_sep># Create a session asset for it, "-export.js" is always embedded export_asset=Asset('flexx-export.js' '\n'.join(lines))<line_sep>js_assets.append(export_asset)<line_sep># Combine it all <return>_get_page(session js_assets css_assets link <true>)<block_end><def_stmt>_get_page session js_assets css_assets link export<block_start>""" Compose index page. Depending on the value of link and the types of assets, the assets are either embedded or linked. """<line_sep>pre_path='flexx/assets'<if>export<else>'/flexx/assets'# relative / abs codes=[]<for_stmt>assets [css_assets js_assets]<block_start><for_stmt>asset assets<block_start><if_stmt>link<in>(0 1)<block_start>html=asset.to_html('{}' link)<block_end><else_stmt><block_start><if_stmt>asset.name.endswith(('-info.js' '-export.js'))# Special case, is always embedded, see get_page_for_export() <block_start>html=asset.to_html('' 0)<block_end><else_stmt><block_start>html=asset.to_html(pre_path+'/shared/{}' link)<block_end><block_end>codes.append(html)<if_stmt>export<and>assets<is>js_assets<block_start>codes.append('<script>window.flexx.spin();</script>')<block_end><block_end>codes.append('')<block_end># whitespace between css and js assets codes.append('<script>flexx.create_session("%s", "%s");</script>\n'%(session.app_name session.id))<line_sep>src=INDEX<if_stmt>link<in>(0 1)<block_start>asset_names=[a.name<for>a css_assets+js_assets]<line_sep>toc='<!-- Contents:\n\n- '+'\n- '.join(asset_names)+'\n\n-->'<line_sep>codes.insert(0 toc)<line_sep>src=src.replace('ASSET-HOOK' '\n\n\n'.join(codes))<block_end><else_stmt><block_start>src=src.replace('ASSET-HOOK' '\n'.join(codes))<block_end><return>src<block_end>
# coding: utf-8 <import_from_future_stmt> absolute_import print_function unicode_literals <import_from_stmt>pydocx.models XmlModel XmlCollection<import_from_stmt>pydocx.openxml.wordprocessing.run Run<import_from_stmt>pydocx.openxml.wordprocessing.smart_tag_run SmartTagRun<class_stmt>DeletedRun(XmlModel)<block_start>XML_TAG='del'<line_sep>children=XmlCollection(Run SmartTagRun 'wordprocessing.DeletedRun' # TODO Needs InsertedRun )<block_end>
default_app_config='messaging.apps.MessagingConfig'<line_sep>
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>copy<import_stmt>unittest<import_from_stmt>content_classification_lens ContentClassificationLens _RulesMatcher <import_from_stmt>request_track Request<import_stmt>test_utils<class_stmt>ContentClassificationLensTestCase(unittest.TestCase)<block_start>_DOCUMENT_URL='http://bla.com'<line_sep>_MAIN_FRAME_ID='123.1'<line_sep>_REQUEST=Request.FromJsonDict({'url':_DOCUMENT_URL 'document_url':_DOCUMENT_URL 'request_id':'1234.1' 'frame_id':_MAIN_FRAME_ID 'initiator':{'type':'other'} 'timestamp':2 'status':200 'timing':{} 'resource_type':'Document'})<line_sep>_PAGE_EVENTS=[{'method':'Page.frameStartedLoading' 'frame_id':_MAIN_FRAME_ID} {'method':'Page.frameAttached' 'frame_id':'123.13' 'parent_frame_id':_MAIN_FRAME_ID}]<line_sep>_RULES=['bla.com']<def_stmt>testGetDocumentUrl self<block_start>trace=test_utils.LoadingTraceFromEvents([self._REQUEST] self._PAGE_EVENTS)<line_sep>lens=ContentClassificationLens(trace [] [])<line_sep>self.assertEquals(self._DOCUMENT_URL lens._GetDocumentUrl())<line_sep># Don't be fooled by redirects. request=copy.deepcopy(self._REQUEST)<line_sep>request.status=302<line_sep>request.document_url='http://www.bla.com'<line_sep>trace=test_utils.LoadingTraceFromEvents([request self._REQUEST] self._PAGE_EVENTS)<line_sep>lens=ContentClassificationLens(trace [] [])<line_sep>self.assertEquals(self._DOCUMENT_URL lens._GetDocumentUrl())<block_end><def_stmt>testGetDocumentUrlSeveralChanges self<block_start>request=copy.deepcopy(self._REQUEST)<line_sep>request.status=200<line_sep>request.document_url='http://www.blabla.com'<line_sep>request2=copy.deepcopy(request)<line_sep>request2.document_url='http://www.blablabla.com'<line_sep>trace=test_utils.LoadingTraceFromEvents([self._REQUEST request request2] self._PAGE_EVENTS)<line_sep>lens=ContentClassificationLens(trace [] [])<line_sep>self.assertEquals(request2.document_url lens._GetDocumentUrl())<block_end><def_stmt>testNoRules self<block_start>trace=test_utils.LoadingTraceFromEvents([self._REQUEST] self._PAGE_EVENTS)<line_sep>lens=ContentClassificationLens(trace [] [])<line_sep>self.assertFalse(lens.IsAdRequest(self._REQUEST))<line_sep>self.assertFalse(lens.IsTrackingRequest(self._REQUEST))<block_end><def_stmt>testAdRequest self<block_start>trace=test_utils.LoadingTraceFromEvents([self._REQUEST] self._PAGE_EVENTS)<line_sep>lens=ContentClassificationLens(trace self._RULES [])<line_sep>self.assertTrue(lens.IsAdRequest(self._REQUEST))<line_sep>self.assertFalse(lens.IsTrackingRequest(self._REQUEST))<block_end><def_stmt>testTrackingRequest self<block_start>trace=test_utils.LoadingTraceFromEvents([self._REQUEST] self._PAGE_EVENTS)<line_sep>lens=ContentClassificationLens(trace [] self._RULES)<line_sep>self.assertFalse(lens.IsAdRequest(self._REQUEST))<line_sep>self.assertTrue(lens.IsTrackingRequest(self._REQUEST))<block_end><def_stmt>testMainFrameIsNotAnAdFrame self<block_start>trace=test_utils.LoadingTraceFromEvents([self._REQUEST] self._PAGE_EVENTS)<line_sep>lens=ContentClassificationLens(trace self._RULES [])<line_sep>self.assertFalse(lens.IsAdOrTrackingFrame(self._MAIN_FRAME_ID))<block_end><def_stmt>testAdFrame self<block_start>request=copy.deepcopy(self._REQUEST)<line_sep>request.request_id='1234.2'<line_sep>request.frame_id='123.123'<line_sep>trace=test_utils.LoadingTraceFromEvents([self._REQUEST request] self._PAGE_EVENTS)<line_sep>lens=ContentClassificationLens(trace self._RULES [])<line_sep>self.assertTrue(lens.IsAdOrTrackingFrame(request.frame_id))<block_end><def_stmt>testAdAndTrackingRequests self<block_start>ad_request=copy.deepcopy(self._REQUEST)<line_sep>ad_request.request_id='1234.2'<line_sep>ad_request.frame_id='123.123'<line_sep>non_ad_request_non_ad_frame=copy.deepcopy(self._REQUEST)<line_sep>non_ad_request_non_ad_frame.request_id='1234.3'<line_sep>non_ad_request_non_ad_frame.url='http://www.example.com'<line_sep>non_ad_request_non_ad_frame.frame_id='123.456'<line_sep>non_ad_request_ad_frame=copy.deepcopy(self._REQUEST)<line_sep>non_ad_request_ad_frame.request_id='1234.4'<line_sep>non_ad_request_ad_frame.url='http://www.example.com'<line_sep>non_ad_request_ad_frame.frame_id=ad_request.frame_id<line_sep>trace=test_utils.LoadingTraceFromEvents([self._REQUEST ad_request non_ad_request_non_ad_frame non_ad_request_ad_frame] self._PAGE_EVENTS)<line_sep>lens=ContentClassificationLens(trace self._RULES [])<line_sep>self.assertSetEqual(set([self._REQUEST ad_request non_ad_request_ad_frame]) set(lens.AdAndTrackingRequests()))<block_end><block_end><class_stmt>_MatcherTestCase(unittest.TestCase)<block_start>_RULES_WITH_WHITELIST=['/thisisanad.' '@@myadvertisingdomain.com/*' '@@||www.mydomain.com/ads/$elemhide']<line_sep>_SCRIPT_RULE='domainwithscripts.com/*$script'<line_sep>_THIRD_PARTY_RULE='domainwithscripts.com/*$third-party'<line_sep>_SCRIPT_REQUEST=Request.FromJsonDict({'url':'http://domainwithscripts.com/bla.js' 'resource_type':'Script' 'request_id':'1234.1' 'frame_id':'123.1' 'initiator':{'type':'other'} 'timestamp':2 'timing':{}})<def_stmt>testRemovesWhitelistRules self<block_start>matcher=_RulesMatcher(self._RULES_WITH_WHITELIST <false>)<line_sep>self.assertEquals(3 len(matcher._rules))<line_sep>matcher=_RulesMatcher(self._RULES_WITH_WHITELIST <true>)<line_sep>self.assertEquals(1 len(matcher._rules))<block_end><def_stmt>testScriptRule self<block_start>matcher=_RulesMatcher([self._SCRIPT_RULE] <false>)<line_sep>request=copy.deepcopy(self._SCRIPT_REQUEST)<line_sep>request.resource_type='Stylesheet'<line_sep>self.assertFalse(matcher.Matches(request ContentClassificationLensTestCase._DOCUMENT_URL))<line_sep>self.assertTrue(matcher.Matches(self._SCRIPT_REQUEST ContentClassificationLensTestCase._DOCUMENT_URL))<block_end><def_stmt>testGetTldPlusOne self<block_start>self.assertEquals('easy.com' _RulesMatcher._GetTldPlusOne('http://www.easy.com/hello/you'))<line_sep>self.assertEquals('not-so-easy.co.uk' _RulesMatcher._GetTldPlusOne('http://www.not-so-easy.co.uk/hello/you'))<line_sep>self.assertEquals('hard.co.uk' _RulesMatcher._GetTldPlusOne('http://hard.co.uk/'))<block_end><def_stmt>testThirdPartyRule self<block_start>matcher=_RulesMatcher([self._THIRD_PARTY_RULE] <false>)<line_sep>request=copy.deepcopy(self._SCRIPT_REQUEST)<line_sep>document_url='http://www.domainwithscripts.com/good-morning'<line_sep>self.assertFalse(matcher.Matches(request document_url))<line_sep>document_url='http://anotherdomain.com/good-morning'<line_sep>self.assertTrue(matcher.Matches(request document_url))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# Generated by Django 2.0.3 on 2018-06-29 15:55 <import_stmt>django.db.models.deletion<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("order" "0047_order_line_name_length")]<line_sep>operations=[migrations.AlterField(model_name="order" name="token" field=models.CharField(blank=<true> max_length=36 unique=<true>) ) migrations.AlterField(model_name="order" name="voucher" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="+" to="discount.Voucher" ) ) ]<block_end>
# # # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Logical units for querying instances."""<import_stmt>itertools<import_from_stmt>ganeti constants<import_from_stmt>ganeti locking<import_from_stmt>ganeti utils<import_from_stmt>ganeti.cmdlib.base NoHooksLU<import_from_stmt>ganeti.cmdlib.common ShareAll GetWantedInstances CheckInstancesNodeGroups AnnotateDiskParams<import_from_stmt>ganeti.cmdlib.instance_utils NICListToTuple<import_from_stmt>ganeti.hypervisor hv_base<class_stmt>LUInstanceQueryData(NoHooksLU)<block_start>"""Query runtime instance data. """<line_sep>REQ_BGL=<false><def_stmt>ExpandNames self<block_start>self.needed_locks={}<line_sep># Use locking if requested or when non-static information is wanted <if_stmt><not>(self.op.static<or>self.op.use_locking)<block_start>self.LogWarning("Non-static data requested, locks need to be acquired")<line_sep>self.op.use_locking=<true><block_end><if_stmt>self.op.instances<or><not>self.op.use_locking# Expand instance names right here <block_start>(_ self.wanted_names)=GetWantedInstances(self self.op.instances)<block_end><else_stmt># Will use acquired locks <block_start>self.wanted_names=<none><block_end><if_stmt>self.op.use_locking<block_start>self.share_locks=ShareAll()<if_stmt>self.wanted_names<is><none><block_start>self.needed_locks[locking.LEVEL_INSTANCE]=locking.ALL_SET<block_end><else_stmt><block_start>self.needed_locks[locking.LEVEL_INSTANCE]=self.wanted_names<block_end>self.needed_locks[locking.LEVEL_NODEGROUP]=[]<line_sep>self.needed_locks[locking.LEVEL_NODE]=[]<line_sep>self.needed_locks[locking.LEVEL_NETWORK]=[]<line_sep>self.recalculate_locks[locking.LEVEL_NODE]=constants.LOCKS_REPLACE<line_sep>self.dont_collate_locks[locking.LEVEL_NODEGROUP]=<true><line_sep>self.dont_collate_locks[locking.LEVEL_NODE]=<true><line_sep>self.dont_collate_locks[locking.LEVEL_NETWORK]=<true><block_end><block_end><def_stmt>DeclareLocks self level<block_start><if_stmt>self.op.use_locking<block_start>owned_instances=dict(self.cfg.GetMultiInstanceInfoByName(self.owned_locks(locking.LEVEL_INSTANCE)))<if_stmt>level<eq>locking.LEVEL_NODEGROUP# Lock all groups used by instances optimistically; this requires going # via the node before it's locked, requiring verification later on <block_start>self.needed_locks[locking.LEVEL_NODEGROUP]=frozenset(group_uuid<for>instance_uuid owned_instances<for>group_uuid self.cfg.GetInstanceNodeGroups(instance_uuid))<block_end><elif_stmt>level<eq>locking.LEVEL_NODE<block_start>self._LockInstancesNodes()<block_end><elif_stmt>level<eq>locking.LEVEL_NETWORK<block_start>self.needed_locks[locking.LEVEL_NETWORK]=frozenset(net_uuid<for>instance_uuid owned_instances.keys()<for>net_uuid self.cfg.GetInstanceNetworks(instance_uuid))<block_end><block_end><block_end><def_stmt>CheckPrereq self<block_start>"""Check prerequisites. This only checks the optional instance list against the existing names. """<line_sep>owned_instances=frozenset(self.owned_locks(locking.LEVEL_INSTANCE))<line_sep>owned_groups=frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))<line_sep>owned_node_uuids=frozenset(self.owned_locks(locking.LEVEL_NODE))<line_sep>owned_networks=frozenset(self.owned_locks(locking.LEVEL_NETWORK))<if_stmt>self.wanted_names<is><none><block_start><assert_stmt>self.op.use_locking "Locking was not used"<line_sep>self.wanted_names=owned_instances<block_end>instances=dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))<if_stmt>self.op.use_locking<block_start>CheckInstancesNodeGroups(self.cfg instances owned_groups owned_node_uuids <none>)<block_end><else_stmt><block_start><assert_stmt><not>(owned_instances<or>owned_groups<or>owned_node_uuids<or>owned_networks)<block_end>self.wanted_instances=list(instances.values())<block_end><def_stmt>_ComputeBlockdevStatus self node_uuid instance dev<block_start>"""Returns the status of a block device """<if_stmt>self.op.static<or><not>node_uuid<block_start><return><none><block_end>result=self.rpc.call_blockdev_find(node_uuid (dev instance))<if_stmt>result.offline<block_start><return><none><block_end>result.Raise("Can't compute disk status for %s"%instance.name)<line_sep>status=result.payload<if_stmt>status<is><none><block_start><return><none><block_end><return>(status.dev_path status.major status.minor status.sync_percent status.estimated_time status.is_degraded status.ldisk_status)<block_end><def_stmt>_ComputeDiskStatus self instance node_uuid2name_fn dev<block_start>"""Compute block device status. """<line_sep>(anno_dev )=AnnotateDiskParams(instance [dev] self.cfg)<line_sep><return>self._ComputeDiskStatusInner(instance <none> node_uuid2name_fn anno_dev)<block_end><def_stmt>_ComputeDiskStatusInner self instance snode_uuid node_uuid2name_fn dev<block_start>"""Compute block device status. @attention: The device has to be annotated already. """<line_sep>drbd_info=<none><line_sep>output_logical_id=dev.logical_id<if_stmt>dev.dev_type<in>constants.DTS_DRBD# we change the snode then (otherwise we use the one passed in) <block_start><if_stmt>dev.logical_id[0]<eq>instance.primary_node<block_start>snode_uuid=dev.logical_id[1]<line_sep>snode_minor=dev.logical_id[4]<line_sep>pnode_minor=dev.logical_id[3]<block_end><else_stmt><block_start>snode_uuid=dev.logical_id[0]<line_sep>snode_minor=dev.logical_id[3]<line_sep>pnode_minor=dev.logical_id[4]<block_end>drbd_info={"primary_node":node_uuid2name_fn(instance.primary_node) "primary_minor":pnode_minor "secondary_node":node_uuid2name_fn(snode_uuid) "secondary_minor":snode_minor "port":dev.logical_id[2] }<line_sep># replace the secret present at the end of the ids with None output_logical_id=dev.logical_id[:-1]+(<none> )<block_end>dev_pstatus=self._ComputeBlockdevStatus(instance.primary_node instance dev)<line_sep>dev_sstatus=self._ComputeBlockdevStatus(snode_uuid instance dev)<if_stmt>dev.children<block_start>dev_children=[self._ComputeDiskStatusInner(instance snode_uuid node_uuid2name_fn d)<for>d dev.children]<block_end><else_stmt><block_start>dev_children=[]<block_end><return>{"iv_name":dev.iv_name "dev_type":dev.dev_type "logical_id":output_logical_id "drbd_info":drbd_info "pstatus":dev_pstatus "sstatus":dev_sstatus "children":dev_children "mode":dev.mode "size":dev.size "spindles":dev.spindles "name":dev.name "uuid":dev.uuid }<block_end><def_stmt>Exec self feedback_fn<block_start>"""Gather and return data"""<line_sep>result={}<line_sep>cluster=self.cfg.GetClusterInfo()<line_sep>node_uuids=itertools.chain(*(self.cfg.GetInstanceNodes(i.uuid)<for>i self.wanted_instances))<line_sep>nodes=dict(self.cfg.GetMultiNodeInfo(node_uuids))<line_sep>groups=dict(self.cfg.GetMultiNodeGroupInfo(node.group<for>node nodes.values()))<for_stmt>instance self.wanted_instances<block_start>pnode=nodes[instance.primary_node]<line_sep>hvparams=cluster.FillHV(instance skip_globals=<true>)<if_stmt>self.op.static<or>pnode.offline<block_start>remote_state=<none><if_stmt>pnode.offline<block_start>self.LogWarning("Primary node %s is marked offline, returning static"<concat>" information only for instance %s"%(pnode.name instance.name))<block_end><block_end><else_stmt><block_start>remote_info=self.rpc.call_instance_info(instance.primary_node instance.name instance.hypervisor cluster.hvparams[instance.hypervisor])<line_sep>remote_info.Raise("Error checking node %s"%pnode.name)<line_sep>remote_info=remote_info.payload<line_sep>allow_userdown=cluster.enabled_user_shutdown<and>(instance.hypervisor<ne>constants.HT_KVM<or>hvparams[constants.HV_KVM_USER_SHUTDOWN])<if_stmt>remote_info<and>"state"<in>remote_info<block_start><if_stmt>hv_base.HvInstanceState.IsShutdown(remote_info["state"])<block_start><if_stmt>allow_userdown<block_start>remote_state="user down"<block_end><else_stmt><block_start>remote_state="down"<block_end><block_end><else_stmt><block_start>remote_state="up"<block_end><block_end><else_stmt><block_start><if_stmt>instance.admin_state<eq>constants.ADMINST_UP<block_start>remote_state="down"<block_end><elif_stmt>instance.admin_state<eq>constants.ADMINST_DOWN<block_start><if_stmt>instance.admin_state_source<eq>constants.USER_SOURCE<block_start>remote_state="user down"<block_end><else_stmt><block_start>remote_state="down"<block_end><block_end><else_stmt><block_start>remote_state="offline"<block_end><block_end><block_end>group2name_fn=<lambda>uuid:groups[uuid].name<line_sep>node_uuid2name_fn=<lambda>uuid:nodes[uuid].name<line_sep>disk_objects=self.cfg.GetInstanceDisks(instance.uuid)<line_sep>output_disks=[self._ComputeDiskStatus(instance node_uuid2name_fn d)<for>d disk_objects]<line_sep>secondary_nodes=self.cfg.GetInstanceSecondaryNodes(instance.uuid)<line_sep>snodes_group_uuids=[nodes[snode_uuid].group<for>snode_uuid secondary_nodes]<line_sep>result[instance.name]={"name":instance.name "config_state":instance.admin_state "run_state":remote_state "pnode":pnode.name "pnode_group_uuid":pnode.group "pnode_group_name":group2name_fn(pnode.group) "snodes":[node_uuid2name_fn(n)<for>n secondary_nodes] "snodes_group_uuids":snodes_group_uuids "snodes_group_names":[group2name_fn(u)<for>u snodes_group_uuids] "os":instance.os # this happens to be the same format used for hooks "nics":NICListToTuple(self instance.nics) "disk_template":utils.GetDiskTemplate(disk_objects) "disks":output_disks "hypervisor":instance.hypervisor "network_port":instance.network_port "hv_instance":instance.hvparams "hv_actual":hvparams "be_instance":instance.beparams "be_actual":cluster.FillBE(instance) "os_instance":instance.osparams "os_actual":cluster.SimpleFillOS(instance.os instance.osparams) "serial_no":instance.serial_no "mtime":instance.mtime "ctime":instance.ctime "uuid":instance.uuid }<block_end><return>result<block_end><block_end>
<import_from_stmt>moto.core.exceptions JsonRESTError<class_stmt>DataSyncClientError(JsonRESTError)<block_start>code=400<block_end><class_stmt>InvalidRequestException(DataSyncClientError)<block_start><def_stmt>__init__ self msg=<none><block_start>self.code=400<line_sep>super().__init__("InvalidRequestException" msg<or>"The request is not valid.")<block_end><block_end>
<import_stmt>dgl<import_stmt>mxnet<as>mx<import_stmt>numpy<as>np<import_stmt>logging time<import_from_stmt>operator attrgetter itemgetter<import_from_stmt>mxnet nd gluon<import_from_stmt>mxnet.gluon nn<import_from_stmt>dgl.utils toindex<import_from_stmt>dgl.nn.mxnet GraphConv<import_from_stmt>gluoncv.model_zoo get_model<import_from_stmt>gluoncv.data.batchify Pad<def_stmt>iou boxA boxB# determine the (x, y)-coordinates of the intersection rectangle <block_start>xA=max(boxA[0] boxB[0])<line_sep>yA=max(boxA[1] boxB[1])<line_sep>xB=min(boxA[2] boxB[2])<line_sep>yB=min(boxA[3] boxB[3])<line_sep>interArea=max(0 xB-xA)<times>max(0 yB-yA)<if_stmt>interArea<l>1e-7<block_start><return>0<block_end>boxAArea=(boxA[2]-boxA[0])<times>(boxA[3]-boxA[1])<line_sep>boxBArea=(boxB[2]-boxB[0])<times>(boxB[3]-boxB[1])<if_stmt>boxAArea+boxBArea-interArea<l>1e-7<block_start><return>0<block_end>iou_val=interArea/float(boxAArea+boxBArea-interArea)<line_sep><return>iou_val<block_end><def_stmt>object_iou_thresh gt_object pred_object iou_thresh=0.5<block_start>obj_iou=iou(gt_object[1:5] pred_object[1:5])<if_stmt>obj_iou<ge>iou_thresh<block_start><return><true><block_end><return><false><block_end><def_stmt>triplet_iou_thresh pred_triplet gt_triplet iou_thresh=0.5<block_start>sub_iou=iou(gt_triplet[5:9] pred_triplet[5:9])<if_stmt>sub_iou<ge>iou_thresh<block_start>ob_iou=iou(gt_triplet[9:13] pred_triplet[9:13])<if_stmt>ob_iou<ge>iou_thresh<block_start><return><true><block_end><block_end><return><false><block_end>@[email protected]('auc')<class_stmt>AUCMetric(mx.metric.EvalMetric)<block_start><def_stmt>__init__ self name='auc' eps=1e-12<block_start>super(AUCMetric self).__init__(name)<line_sep>self.eps=eps<block_end><def_stmt>update self labels preds<block_start>mx.metric.check_label_shapes(labels preds)<line_sep>label_weight=labels[0].asnumpy()<line_sep>preds=preds[0].asnumpy()<line_sep>tmp=[]<for_stmt>i range(preds.shape[0])<block_start>tmp.append((label_weight[i] preds[i][1]))<block_end>tmp=sorted(tmp key=itemgetter(1) reverse=<true>)<line_sep>label_sum=label_weight.sum()<if_stmt>label_sum<eq>0<or>label_sum<eq>label_weight.size<block_start><return><block_end>label_one_num=np.count_nonzero(label_weight)<line_sep>label_zero_num=len(label_weight)-label_one_num<line_sep>total_area=label_zero_num<times>label_one_num<line_sep>height=0<line_sep>width=0<line_sep>area=0<for_stmt>a,_ tmp<block_start><if_stmt>a<eq>1.0<block_start>height<augadd>1.0<block_end><else_stmt><block_start>width<augadd>1.0<line_sep>area<augadd>height<block_end><block_end>self.sum_metric<augadd>area/total_area<line_sep>self.num_inst<augadd>1<block_end><block_end>@[email protected]('predcls')<class_stmt>PredCls(mx.metric.EvalMetric)<block_start>'''Metric with ground truth object location and label'''<def_stmt>__init__ self topk=20 iou_thresh=0.99<block_start>super(PredCls self).__init__('predcls@%d'%(topk))<line_sep>self.topk=topk<line_sep>self.iou_thresh=iou_thresh<block_end><def_stmt>update self labels preds<block_start><if_stmt>labels<is><none><or>preds<is><none><block_start>self.num_inst<augadd>1<line_sep><return><block_end>preds=preds[preds[: 0].argsort()[::-1]]<line_sep>m=min(self.topk preds.shape[0])<line_sep>count=0<line_sep>gt_edge_num=labels.shape[0]<line_sep>label_matched=[<false><for>label labels]<for_stmt>i range(m)<block_start>pred=preds[i]<for_stmt>j range(gt_edge_num)<block_start><if_stmt>label_matched[j]<block_start><continue><block_end>label=labels[j]<if_stmt>int(label[2])<eq>int(pred[2])<and>triplet_iou_thresh(pred label self.iou_thresh)<block_start>count<augadd>1<line_sep>label_matched[j]=<true><block_end><block_end><block_end>total=labels.shape[0]<line_sep>self.sum_metric<augadd>count/total<line_sep>self.num_inst<augadd>1<block_end><block_end>@[email protected]('phrcls')<class_stmt>PhrCls(mx.metric.EvalMetric)<block_start>'''Metric with ground truth object location and predicted object label from detector'''<def_stmt>__init__ self topk=20 iou_thresh=0.99<block_start>super(PhrCls self).__init__('phrcls@%d'%(topk))<line_sep>self.topk=topk<line_sep>self.iou_thresh=iou_thresh<block_end><def_stmt>update self labels preds<block_start><if_stmt>labels<is><none><or>preds<is><none><block_start>self.num_inst<augadd>1<line_sep><return><block_end>preds=preds[preds[: 1].argsort()[::-1]]<line_sep>m=min(self.topk preds.shape[0])<line_sep>count=0<line_sep>gt_edge_num=labels.shape[0]<line_sep>label_matched=[<false><for>label labels]<for_stmt>i range(m)<block_start>pred=preds[i]<for_stmt>j range(gt_edge_num)<block_start><if_stmt>label_matched[j]<block_start><continue><block_end>label=labels[j]<if_stmt>int(label[2])<eq>int(pred[2])<and>int(label[3])<eq>int(pred[3])<and>int(label[4])<eq>int(pred[4])<and>triplet_iou_thresh(pred label self.iou_thresh)<block_start>count<augadd>1<line_sep>label_matched[j]=<true><block_end><block_end><block_end>total=labels.shape[0]<line_sep>self.sum_metric<augadd>count/total<line_sep>self.num_inst<augadd>1<block_end><block_end>@[email protected]('sgdet')<class_stmt>SGDet(mx.metric.EvalMetric)<block_start>'''Metric with predicted object information by the detector'''<def_stmt>__init__ self topk=20 iou_thresh=0.5<block_start>super(SGDet self).__init__('sgdet@%d'%(topk))<line_sep>self.topk=topk<line_sep>self.iou_thresh=iou_thresh<block_end><def_stmt>update self labels preds<block_start><if_stmt>labels<is><none><or>preds<is><none><block_start>self.num_inst<augadd>1<line_sep><return><block_end>preds=preds[preds[: 1].argsort()[::-1]]<line_sep>m=min(self.topk len(preds))<line_sep>count=0<line_sep>gt_edge_num=labels.shape[0]<line_sep>label_matched=[<false><for>label labels]<for_stmt>i range(m)<block_start>pred=preds[i]<for_stmt>j range(gt_edge_num)<block_start><if_stmt>label_matched[j]<block_start><continue><block_end>label=labels[j]<if_stmt>int(label[2])<eq>int(pred[2])<and>int(label[3])<eq>int(pred[3])<and>int(label[4])<eq>int(pred[4])<and>triplet_iou_thresh(pred label self.iou_thresh)<block_start>count<augadd>1<line_sep>label_matched[j]=<true><block_end><block_end><block_end>total=labels.shape[0]<line_sep>self.sum_metric<augadd>count/total<line_sep>self.num_inst<augadd>1<block_end><block_end>@[email protected]('sgdet+')<class_stmt>SGDetPlus(mx.metric.EvalMetric)<block_start>'''Metric proposed by `Graph R-CNN for Scene Graph Generation`'''<def_stmt>__init__ self topk=20 iou_thresh=0.5<block_start>super(SGDetPlus self).__init__('sgdet+@%d'%(topk))<line_sep>self.topk=topk<line_sep>self.iou_thresh=iou_thresh<block_end><def_stmt>update self labels preds<block_start>label_objects,label_triplets=labels<line_sep>pred_objects,pred_triplets=preds<if_stmt>label_objects<is><none><or>pred_objects<is><none><block_start>self.num_inst<augadd>1<line_sep><return><block_end>count=0<line_sep># count objects object_matched=[<false><for>obj label_objects]<line_sep>m=len(pred_objects)<line_sep>gt_obj_num=label_objects.shape[0]<for_stmt>i range(m)<block_start>pred=pred_objects[i]<for_stmt>j range(gt_obj_num)<block_start><if_stmt>object_matched[j]<block_start><continue><block_end>label=label_objects[j]<if_stmt>int(label[0])<eq>int(pred[0])<and>object_iou_thresh(pred label self.iou_thresh)<block_start>count<augadd>1<line_sep>object_matched[j]=<true><block_end><block_end><block_end># count predicate and triplet pred_triplets=pred_triplets[pred_triplets[: 1].argsort()[::-1]]<line_sep>m=min(self.topk len(pred_triplets))<line_sep>gt_triplet_num=label_triplets.shape[0]<line_sep>triplet_matched=[<false><for>label label_triplets]<line_sep>predicate_matched=[<false><for>label label_triplets]<for_stmt>i range(m)<block_start>pred=pred_triplets[i]<for_stmt>j range(gt_triplet_num)<block_start>label=label_triplets[j]<if_stmt><not>predicate_matched<block_start><if_stmt>int(label[2])<eq>int(pred[2])<and>triplet_iou_thresh(pred label self.iou_thresh)<block_start>count<augadd>label[3]<line_sep>predicate_matched[j]=<true><block_end><block_end><if_stmt><not>triplet_matched[j]<block_start><if_stmt>int(label[2])<eq>int(pred[2])<and>int(label[3])<eq>int(pred[3])<and>int(label[4])<eq>int(pred[4])<and>triplet_iou_thresh(pred label self.iou_thresh)<block_start>count<augadd>1<line_sep>triplet_matched[j]=<true><block_end><block_end><block_end><block_end># compute sum total=labels.shape[0]<line_sep>N=gt_obj_num+2<times>total<line_sep>self.sum_metric<augadd>count/N<line_sep>self.num_inst<augadd>1<block_end><block_end><def_stmt>extract_gt g img_size<block_start>'''extract prediction from ground truth graph'''<if_stmt>g<is><none><or>g.number_of_nodes()<eq>0<block_start><return><none> <none><block_end>gt_eids=np.where(g.edata['rel_class'].asnumpy()<g>0)[0]<if_stmt>len(gt_eids)<eq>0<block_start><return><none> <none><block_end>gt_class=g.ndata['node_class'][: 0].asnumpy()<line_sep>gt_bbox=g.ndata['bbox'].asnumpy()<line_sep>gt_bbox[: 0]<augdiv>img_size[1]<line_sep>gt_bbox[: 1]<augdiv>img_size[0]<line_sep>gt_bbox[: 2]<augdiv>img_size[1]<line_sep>gt_bbox[: 3]<augdiv>img_size[0]<line_sep>gt_objects=np.vstack([gt_class gt_bbox.transpose(1 0)]).transpose(1 0)<line_sep>gt_node_ids=g.find_edges(gt_eids)<line_sep>gt_node_sub=gt_node_ids[0].asnumpy()<line_sep>gt_node_ob=gt_node_ids[1].asnumpy()<line_sep>gt_rel_class=g.edata['rel_class'][gt_eids 0].asnumpy()-1<line_sep>gt_sub_class=gt_class[gt_node_sub]<line_sep>gt_ob_class=gt_class[gt_node_ob]<line_sep>gt_sub_bbox=gt_bbox[gt_node_sub]<line_sep>gt_ob_bbox=gt_bbox[gt_node_ob]<line_sep>n=len(gt_eids)<line_sep>gt_triplets=np.vstack([np.ones(n) np.ones(n) gt_rel_class gt_sub_class gt_ob_class gt_sub_bbox.transpose(1 0) gt_ob_bbox.transpose(1 0)]).transpose(1 0)<line_sep><return>gt_objects gt_triplets<block_end><def_stmt>extract_pred g topk=100 joint_preds=<false><block_start>'''extract prediction from prediction graph for validation and visualization'''<if_stmt>g<is><none><or>g.number_of_nodes()<eq>0<block_start><return><none> <none><block_end>pred_class=g.ndata['node_class_pred'].asnumpy()<line_sep>pred_class_prob=g.ndata['node_class_logit'].asnumpy()<line_sep>pred_bbox=g.ndata['pred_bbox'][: 0:4].asnumpy()<line_sep>pred_objects=np.vstack([pred_class pred_bbox.transpose(1 0)]).transpose(1 0)<line_sep>score_pred=g.edata['score_pred'].asnumpy()<line_sep>score_phr=g.edata['score_phr'].asnumpy()<line_sep>score_pred_topk_eids=(-score_pred).argsort()[0:topk].tolist()<line_sep>score_phr_topk_eids=(-score_phr).argsort()[0:topk].tolist()<line_sep>topk_eids=sorted(list(set(score_pred_topk_eids+score_phr_topk_eids)))<line_sep>pred_rel_prob=g.edata['preds'][topk_eids].asnumpy()<if_stmt>joint_preds<block_start>pred_rel_class=pred_rel_prob[: 1:].argmax(axis=1)<block_end><else_stmt><block_start>pred_rel_class=pred_rel_prob.argmax(axis=1)<block_end>pred_node_ids=g.find_edges(topk_eids)<line_sep>pred_node_sub=pred_node_ids[0].asnumpy()<line_sep>pred_node_ob=pred_node_ids[1].asnumpy()<line_sep>pred_sub_class=pred_class[pred_node_sub]<line_sep>pred_sub_class_prob=pred_class_prob[pred_node_sub]<line_sep>pred_sub_bbox=pred_bbox[pred_node_sub]<line_sep>pred_ob_class=pred_class[pred_node_ob]<line_sep>pred_ob_class_prob=pred_class_prob[pred_node_ob]<line_sep>pred_ob_bbox=pred_bbox[pred_node_ob]<line_sep>pred_triplets=np.vstack([score_pred[topk_eids] score_phr[topk_eids] pred_rel_class pred_sub_class pred_ob_class pred_sub_bbox.transpose(1 0) pred_ob_bbox.transpose(1 0)]).transpose(1 0)<line_sep><return>pred_objects pred_triplets<block_end>
string="a"<times>ITERATIONS<line_sep># --- <for_stmt>char string<block_start><pass><block_end>
<import_from_stmt>talon.voice Context Key<line_sep>ctx=Context("symbol")<line_sep>keymap={# simple "(question [mark] | questo)":"?" "plus":"+" "tilde":"~" "(bang | exclamation point | clamor)":"!" "(dollar [sign] | dolly)":"$" "(downscore | crunder)":"_" "colon":":" "(lparen | [left] paren | precorp )":"(" "(rparen | are paren | right paren | precose)":")" "(brace | left brace | kirksorp)":"{" "(rbrace | are brace | right brace | kirkos)":"}" "(angle | left angle | less than)":"<" "(rangle | are angle | right angle | greater than)":">" "(star | asterisk)":"*" "(pound | hash [sign] | octo | number sign)":"#" "percent [sign]":"%" "caret":"^" "at sign":"@" "(and sign | ampersand | amper)":"&" "(pipe | spike)":"|" "(dubquote | double quote | quatches)":'"' # compound "mintwice":"--" "plustwice":"++" "minquall":"-=" "pluqual":"+=" "starqual":"*=" "triple quote":"'''" "triple tick":"```" "[forward] dubslash":"//" "coal twice":"::" "(dot dot | dotdot)":".." "(ellipsis | dot dot dot | dotdotdot)":"..." # unnecessary: use repetition commands? }<line_sep>ctx.keymap(keymap)<line_sep>
<import_from_future_stmt> print_function<import_stmt>akumulid_test_tools<as>att<import_stmt>datetime<import_stmt>itertools<import_stmt>json<import_stmt>math<import_stmt>multiprocessing<import_stmt>os<import_stmt>sys<import_stmt>time<import_stmt>traceback<try_stmt><block_start><import_from_stmt>urllib2 urlopen<block_end><except_stmt>ImportError<block_start><import_from_stmt>urllib urlopen<block_end>HOST='127.0.0.1'<line_sep>TCPPORT=8282<line_sep>HTTPPORT=8181<line_sep>""" Test plan: Process 1 (reader). - Start process 2 (writer). - Read all data in fwd direction in range [begin, end-window]. Process 2 (writer). - Write data in range [begin, mid] in a loop. - Long pause. - Write data in range (mid, end] in a loop. - Exit. """<def_stmt>writer dt delta N<block_start><try_stmt><block_start>chan=att.TCPChan(HOST TCPPORT)<line_sep># fill data in print("Sending {0} messages through TCP...".format(N))<line_sep>tags={"tag":['Foo'] }<line_sep>print("Generating first {0} messages...".format(N/2))<line_sep>messages=att.generate_messages(dt delta N 'test' **tags)<for_stmt>it itertools.islice(messages N/2)<block_start>chan.send(it)<block_end>time.sleep(10)<line_sep>print("Generating last {0} messages...".format(N/2))<for_stmt>it messages<block_start>chan.send(it)<block_end>print("{0} messages sent".format(N))<line_sep>time.sleep(10)<block_end><except_stmt><block_start>print("Exception in writer")<line_sep>traceback.print_exc()<line_sep>sys.exit(1)<block_end><block_end><def_stmt>reader dtstart delta N# Start writer process <block_start>wproc=multiprocessing.Process(name='Writer' target=writer args=[dtstart delta N])<line_sep>wproc.start()<try_stmt><block_start>window=att.get_window_width()<line_sep>end=dtstart+delta<times>(N-1)-2<times>window<line_sep>begin=dtstart<line_sep>timedelta=end-begin<line_sep>points_required=int(math.ceil((timedelta.seconds<times>1000000.0+timedelta.microseconds)/(delta.seconds<times>1000000.0+delta.microseconds)))+1<line_sep>query_params={"output":{"format":"csv"}}<line_sep>query=att.makequery("test" begin end **query_params)<line_sep>queryurl="http://{0}:{1}/api/query".format(HOST HTTPPORT)<line_sep>response=urlopen(queryurl json.dumps(query))<line_sep>exp_ts=begin<line_sep>exp_value=0<line_sep>iterations=0<line_sep>print("Test #1 - continuous queries")<for_stmt>line response<block_start><try_stmt><block_start>columns=line.split(',')<line_sep>tagline=columns[0].strip()<line_sep>timestamp=att.parse_timestamp(columns[1].strip())<line_sep>value=float(columns[2].strip())<line_sep>exp_tags='test tag=Foo'<line_sep>att.check_values(exp_tags tagline 'ENDS' exp_ts timestamp exp_value<times>1.0 value iterations)<line_sep>exp_ts<augadd>delta<line_sep>exp_value<augadd>1<line_sep>iterations<augadd>1<block_end><except_stmt><block_start>print("Error at line: {0}".format(line))<line_sep><raise><block_end><block_end>print("Query completed")<line_sep># Check that we received all values <if_stmt>iterations<ne>points_required<block_start><raise>ValueError("Expect {0} data points, get {1} data points".format(points_required iterations))<block_end>print("Test #1 passed")<block_end><finally_stmt><block_start>wproc.join()<block_end><block_end><def_stmt>main path debug=<false><block_start><if_stmt><not>os.path.exists(path)<block_start>print("Path {0} doesn't exists".format(path))<line_sep>sys.exit(1)<block_end>akumulid=att.Akumulid(path)<if_stmt><not>debug# Reset database <block_start>akumulid.delete_database()<line_sep>akumulid.create_database()<line_sep># start ./akumulid server print("Starting server...")<line_sep>akumulid.serve()<line_sep>time.sleep(5)<block_end><else_stmt><block_start>print("Akumulid should be started first")<block_end><try_stmt><block_start>dt=datetime.datetime.utcnow()<line_sep>delta=datetime.timedelta(milliseconds=1)<line_sep>nmsgs=100000<line_sep>rproc=multiprocessing.Process(name='Reader' target=reader args=[dt delta nmsgs])<line_sep>rproc.start()<line_sep>rproc.join()<block_end><except_stmt><block_start>traceback.print_exc()<line_sep>sys.exit(1)<block_end><finally_stmt><block_start><if_stmt><not>debug<block_start>print("Stopping server...")<line_sep>akumulid.stop()<line_sep>time.sleep(5)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>print(' '.join(sys.argv))<if_stmt>len(sys.argv)<l>2<block_start>print("Not enough arguments")<line_sep>sys.exit(1)<block_end>main(sys.argv[1] sys.argv[2]<eq>'debug'<if>len(sys.argv)<eq>3<else><false>)<block_end><else_stmt><block_start><raise>ImportError("This module shouldn't be imported")<block_end>
# -*- coding: utf-8 -*- <import_stmt>os<import_stmt>re<import_from_stmt>collections OrderedDict<import_from_stmt>pygments highlight<import_from_stmt>pygments.formatters.terminal TerminalFormatter<import_from_stmt>.index_command IndexCommand<import_from_stmt>...version_parser VersionParser<import_from_stmt>...version_selector VersionSelector<import_from_stmt>...utils.lexers TOMLLexer<import_from_stmt>...utils.helpers call template<import_from_stmt>...build Builder<class_stmt>InitCommand(IndexCommand)<block_start>""" Creates a basic <comment>poetry.toml</> file in current directory. init { template? : Template to use } {--name= : Name of the package} {--description= : Description of the package} {--author= : Author name of the package} {--dependency=* : Package to require with a version constraint, e.g. requests:^2.10.0 or requests==2.11.1} {--dev-dependency=* : Package to require for development with a version constraint, e.g. requests:^2.10.0 or requests==2.11.1} {--l|license= : License of the package} """<line_sep>help=""" The <info>init</info> command creates a basic <comment>poetry.toml</> file in the current directory. <info>poet init</info> """<def_stmt>__init__ self<block_start>self._git_config=<none><line_sep>super(InitCommand self).__init__()<block_end><def_stmt>handle self<block_start>formatter=self.get_helper('formatter')<line_sep>self.line(['' formatter.format_block('Welcome to the Poet config generator' 'bg=blue;fg=white' <true>) ''])<line_sep>template_name=self.argument('template')<if_stmt>template_name<block_start>self.line(['' 'Using <comment>{}</> template to create '<concat>'your <info>poetry.toml</> config.'.format(template_name) ''])<if_stmt>template_name<eq>'default'<block_start>output=template('poetry.toml').render()<with_stmt>open(self.poet_file 'w')<as>fd<block_start>fd.write(output)<block_end><block_end><return><block_end>self.line(['' 'This command will guide you through creating your <info>poetry.toml</> config.' ''])<line_sep>poet_file=self.poet_file<line_sep>git_config=self.git_config()<line_sep>name=self.option('name')<if_stmt><not>name<block_start>name=os.path.basename(os.path.dirname(poet_file))<line_sep>name=name.lower()<block_end>question=self.create_question('Package name [<comment>{}</comment>]: '.format(name) default=name)<line_sep>name=self.ask(question)<line_sep>version='0.1.0'<line_sep>question=self.create_question('Version [<comment>{}</comment>]: '.format(version) default=version)<line_sep>version=self.ask(question)<line_sep>description=self.option('description')<or>''<line_sep>question=self.create_question('Description [<comment>{}</comment>]: '.format(description) default=description)<line_sep>description=self.ask(question)<line_sep>author=self.option('author')<if_stmt><not>author<and>git_config.get('user.name')<and>git_config.get('user.email')<block_start>author='{} <{}>'.format(git_config['user.name'] git_config['user.email'])<block_end>question=self.create_question('Author [<comment>{}</comment>, n to skip]: '.format(author) default=author)<line_sep>question.validator=<lambda>v:self._validate_author(v author)<line_sep>author=self.ask(question)<if_stmt><not>author<block_start>authors=[]<block_end><else_stmt><block_start>authors=[author]<block_end>license=self.option('license')<or>''<line_sep>question=self.create_question('License [<comment>{}</comment>]: '.format(license) default=license)<line_sep>license=self.ask(question)<line_sep>self.line('')<line_sep>requirements=[]<line_sep>question='Would you like to define your dependencies'<concat>' (require) interactively?'<if_stmt>self.confirm(question <true>)<block_start>requirements=self._format_requirements(self._determine_requirements(self.option('dependency')))<block_end>dev_requirements=[]<line_sep>question='<question>Would you like to define your dev dependencies'<concat>' (require-dev) interactively'<if_stmt>self.confirm(question <true>)<block_start>dev_requirements=self._format_requirements(self._determine_requirements(self.option('dev-dependency')))<block_end>output=template('poetry.toml.jinja2').render(name=name version=version description=description authors=authors license=license dependencies=requirements dev_dependencies=dev_requirements)<if_stmt>self.input.is_interactive()<block_start>self.line('<info>Generated file</>')<if_stmt>self.output.is_decorated()<block_start>self.line(['' highlight(output TOMLLexer() TerminalFormatter()) ''])<block_end><else_stmt><block_start>self.line(['' output ''])<block_end><if_stmt><not>self.confirm('Do you confirm generation?' <true>)<block_start>self.line('<error>Command aborted</error>')<line_sep><return>1<block_end><with_stmt>open(self.poet_file 'w')<as>fd<block_start>fd.write(output)<block_end><block_end><block_end><def_stmt>_determine_requirements self requires<block_start><if_stmt>requires<block_start>requires=self._normalize_requirements(requires)<line_sep>result=[]<for_stmt>requirement requires<block_start><if_stmt>'version'<not><in>requirement# determine the best version automatically <block_start>version=self._find_best_version_for_package(requirement['name'])<line_sep>requirement['version']=version<line_sep>self.line('Using version <info>{}</info> for <info{}</info>'.format(requirement['version'] requirement['name']))<block_end>result.append(requirement['name']+' '+requirement['version'])<block_end><block_end>version_parser=VersionParser()<line_sep>question=self.create_question('Search for a package:')<line_sep>package=self.ask(question)<while_stmt>package<is><not><none><block_start>matches=self._find_packages(package)<if_stmt><not>matches<block_start>self.line('<error>Unable to find package</>')<line_sep>package=<false><block_end><else_stmt><block_start>exact_match=<none><line_sep>choices=[]<for_stmt>found_package matches<block_start>choices.append(found_package['name'])<line_sep># Removing exact match feature for now # if found_package['name'] == package: # exact_match = True # break <block_end><if_stmt><not>exact_match<block_start>self.line('Found <info>{}</info> packages matching <info>{}</info>'.format(len(matches) package))<line_sep>package=self.choice('\nEnter package # to add, or the complete package name if it is not listed' choices attempts=3)<block_end><block_end># no constraint yet, determine the best version automatically <if_stmt>package<is><not><false><and>' '<not><in>package<block_start>question=self.create_question('Enter the version constraint to require '<concat>'(or leave blank to use the latest version):')<line_sep>question.attempts=3<line_sep>question.validator=<lambda>x:(x<or>'').strip()<or><false><line_sep>constraint=self.ask(question)<if_stmt>constraint<is><false><block_start>constraint=self._find_best_version_for_package(package)<line_sep>self.line('Using version <info>{}</info> for <info>{}</info>'.format(constraint package))<block_end>package<augadd>' {}'.format(constraint)<block_end><if_stmt>package<is><not><false><block_start>requires.append(package)<block_end>package=self.ask('\nSearch for a package:')<block_end><return>requires<block_end><def_stmt>_validate_author self author default<block_start>author=author<or>default<if_stmt>author<in>['n' 'no']<block_start><return><block_end>m=Builder.AUTHOR_REGEX.match(author)<if_stmt><not>m<block_start><raise>ValueError('Invalid author string. Must be in the format: '<concat>'<NAME> <<EMAIL>>')<block_end><return>author<block_end><def_stmt>_find_packages self package<block_start><return>self._repository.search(package 1)<block_end><def_stmt>_find_best_version_for_package self package<block_start>selector=VersionSelector(self._repository)<line_sep>package=selector.find_best_candidate(package)<line_sep><return>selector.find_recommended_require_version(package)<block_end><def_stmt>_format_requirements self requirements<block_start>requires=OrderedDict()<line_sep>requirements=self._normalize_requirements(requirements)<for_stmt>requirement requirements<block_start>requires[requirement['name']]=requirement['version']<block_end><return>requires<block_end><def_stmt>_normalize_requirements self requirements<block_start>parser=VersionParser()<line_sep><return>parser.parse_name_version_pairs(requirements)<block_end><def_stmt>git_config self<block_start>config_list=call(['git' 'config' '-l'])<line_sep>git_config={}<line_sep>m=re.findall('(?ms)^([^=]+)=(.*?)$' config_list)<if_stmt>m<block_start><for_stmt>group m<block_start>git_config[group[0]]=group[1]<block_end><block_end><return>git_config<block_end><block_end>
<import_stmt>hashlib<import_stmt>json<import_from_stmt>.client CacheAction<import_from_stmt>services.utils get_traceback_str<import_from_stmt>.utils error_event_msg progress_event_msg artifact_cache_id unpack_pathspec_with_attempt_id MAX_S3_SIZE <import_from_stmt>..refiner.refinery unpack_processed_value<import_from_stmt>services.ui_backend_service.api.utils operators_to_filters<import_from_stmt>metaflow DataArtifact<class_stmt>SearchArtifacts(CacheAction)<block_start>""" Fetches artifacts by pathspecs and performs a search against the object contents. Caches artifacts based on pathspec, and search results based on a combination of query&artifacts searched Parameters ---------- pathspecs : List[str] A list of artifact pathspecs (with attempt id as last component) to fetch and match the search term against: ["FlowId/RunNumber/StepName/TaskId/ArtifactName/0"] searchterm : str A searchterm to match against the fetched S3 artifacts contents. Returns ------- Dict or None example: { "pathspec": { "included": boolean, "matches": boolean } } matches: determines whether object content matched search term included: denotes if the object content was able to be included in the search (accessible or not) """<line_sep>@classmethod<def_stmt>format_request cls pathspecs searchterm operator="eq" invalidate_cache=<false><block_start>msg={'pathspecs':list(frozenset(sorted(pathspecs))) 'searchterm':searchterm 'operator':operator}<line_sep>artifact_keys=[]<for_stmt>pathspec pathspecs<block_start>artifact_keys.append(artifact_cache_id(pathspec))<block_end>request_id=lookup_id(pathspecs searchterm operator)<line_sep>stream_key='search:stream:%s'%request_id<line_sep>result_key='search:result:%s'%request_id<line_sep><return>msg [result_key *artifact_keys] stream_key [stream_key result_key] invalidate_cache<block_end>@classmethod<def_stmt>response cls keys_objs<block_start>""" Action should respond with a dictionary of { "pathspec": { "matches": boolean, "included": boolean } } that tells the client whether the search term matches in the given pathspec, or if performing search was impossible """<line_sep><return>[json.loads(val)<for>key,val keys_objs.items()<if>key.startswith('search:result')][0]<block_end>@classmethod<def_stmt>stream_response cls it<block_start><for_stmt>msg it<block_start><if_stmt>msg<is><none><block_start><yield>msg<block_end><else_stmt><block_start><yield>{'event':msg}<block_end><block_end><block_end>@classmethod<def_stmt>execute cls message=<none> keys=<none> existing_keys={} stream_output=<none> invalidate_cache=<false> **kwargs<block_start>pathspecs=message['pathspecs']<if_stmt>invalidate_cache<block_start>results={}<line_sep>pathspecs_to_fetch=[loc<for>loc pathspecs]<block_end><else_stmt># make a copy of already existing results, as the cache action has to produce all keys it promised # in the format_request response. <block_start>results={**existing_keys}<line_sep># Make a list of artifact pathspecs that require fetching (not cached previously) pathspecs_to_fetch=[loc<for>loc pathspecs<if><not>artifact_cache_id(loc)<in>existing_keys]<block_end>artifact_keys=[key<for>key keys<if>key.startswith('search:artifactdata')]<line_sep>result_key=[key<for>key keys<if>key.startswith('search:result')][0]<line_sep># Helper functions for streaming status updates. <def_stmt>stream_progress num<block_start><return>stream_output(progress_event_msg(num))<block_end><def_stmt>stream_error err id traceback=<none><block_start><return>stream_output(error_event_msg(err id traceback))<block_end># Fetch artifacts that are not cached already <for_stmt>idx,pathspec enumerate(pathspecs_to_fetch)<block_start>stream_progress((idx+1)/len(pathspecs_to_fetch))<try_stmt><block_start>pathspec_without_attempt,attempt_id=unpack_pathspec_with_attempt_id(pathspec)<line_sep>artifact_key="search:artifactdata:{}".format(pathspec)<line_sep>artifact=DataArtifact(pathspec_without_attempt attempt=attempt_id)<if_stmt>artifact.size<l>MAX_S3_SIZE<block_start>results[artifact_key]=json.dumps([<true> artifact.data])<block_end><else_stmt><block_start>results[artifact_key]=json.dumps([<false> 'artifact-too-large' "{}: {} bytes".format(artifact.pathspec artifact.size)])<block_end><block_end><except_stmt>Exception<as>ex<block_start>stream_error(str(ex) ex.__class__.__name__ get_traceback_str())<line_sep>results[artifact_key]=json.dumps([<false> ex.__class__.__name__ get_traceback_str()])<block_end><block_end># Perform search on loaded artifacts. search_results={}<line_sep>searchterm=message['searchterm']<line_sep>operator=message['operator']<line_sep>filter_fn=operators_to_filters[operator]<if>operator<in>operators_to_filters<else>operators_to_filters["eq"]<def_stmt>format_loc x<block_start>"extract pathspec from the artifact cache key"<line_sep><return>x[len("search:artifactdata:"):]<block_end><for_stmt>key artifact_keys<block_start><if_stmt>key<in>results<block_start>load_success,value,detail=unpack_processed_value(json.loads(results[key]))<block_end><else_stmt><block_start>load_success,value,_=<false> <none> <none><block_end># keep the matching case-insensitive matches=filter_fn(str(value).lower() searchterm.lower())<line_sep>search_results[format_loc(key)]={"included":load_success "matches":matches "error":<none><if>load_success<else>{"id":value<or>"artifact-handle-failed" "detail":detail<or>"Unknown error during artifact processing"}}<block_end>results[result_key]=json.dumps(search_results)<line_sep><return>results<block_end><block_end><def_stmt>lookup_id locations searchterm operator<block_start>"construct a unique id to be used with stream_key and result_key"<line_sep>_string="-".join(list(frozenset(sorted(locations))))+searchterm+operator<line_sep><return>hashlib.sha1(_string.encode('utf-8')).hexdigest()<block_end>
<import_stmt>numpy<as>np<import_stmt>cv2<import_stmt>sys<import_stmt>matplotlib.pyplot<as>plt<line_sep>img1=cv2.imread(sys.argv[1] 0)<line_sep>img2=cv2.imread(sys.argv[2] 0)<line_sep>orb=cv2.ORB_create()<line_sep>kp1,des1=orb.detectAndCompute(img1 <none>)<line_sep>kp2,des2=orb.detectAndCompute(img2 <none>)<line_sep>bf=cv2.BFMatcher(cv2.NORM_HAMMING crossCheck=<true>)<line_sep>matches=bf.match(des1 des2)<line_sep>matches=sorted(matches key=<lambda>x:x.distance)<line_sep>img3=cv2.drawMatches(img1 kp1 img2 kp2 matches[:10] <none> flags=2)<line_sep>cv2.imshow('window' img3)<line_sep>cv2.waitKey(0)<line_sep>
<import_from_stmt>docassemble.base.config s3_config S3_ENABLED azure_config AZURE_ENABLED<def_stmt>get_cloud <block_start><if_stmt>S3_ENABLED<block_start><import_stmt>docassemble.webapp.amazon<line_sep>cloud=docassemble.webapp.amazon.s3object(s3_config)<block_end><elif_stmt>AZURE_ENABLED<block_start><import_stmt>docassemble.webapp.microsoft<line_sep>cloud=docassemble.webapp.microsoft.azureobject(azure_config)<block_end><else_stmt><block_start>cloud=<none><block_end><return>cloud<block_end><def_stmt>get_custom_cloud provider config<block_start><if_stmt>provider<is><none><or>config<is><none><block_start><return><none><block_end><if_stmt>provider<eq>'s3'<block_start><import_stmt>docassemble.webapp.amazon<line_sep>cloud=docassemble.webapp.amazon.s3object(config)<block_end><elif_stmt>provider<eq>'azure'<block_start><import_stmt>docassemble.webapp.microsoft<line_sep>cloud=docassemble.webapp.microsoft.azureobject(config)<block_end><else_stmt><block_start>cloud=<none><block_end><return>cloud<block_end>
"""PyUSB virtual FTDI device."""<line_sep># Copyright (c) 2020, <NAME> <<EMAIL>> # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause <import_from_stmt>collections deque<import_from_stmt>logging getLogger<import_from_stmt>struct unpack<as>sunpack<import_from_stmt>typing Union<import_from_stmt>pyftdi.tracer FtdiMpsseEngine FtdiMpsseTracer<class_stmt>VirtMpsseTracer(FtdiMpsseTracer)<block_start>"""Reuse MPSSE tracer as a MPSSE command decoder engine. """<def_stmt>__init__ self port:'VirtFtdiPort' version:int<block_start>super().__init__(version)<line_sep>self.log=getLogger('pyftdi.virt.mpsse.{port.iface}')<line_sep>self._port=port<block_end><def_stmt>_get_engine self iface:int<block_start>iface<augsub>1<try_stmt><block_start>self._engines[iface]<block_end><except_stmt>IndexError<as>exc<block_start><raise>ValueError('No MPSSE engine available on interface %d'%iface)<from>exc<block_end><if_stmt><not>self._engines[iface]<block_start>self._engines[iface]=VirtMpsseEngine(self self._port)<block_end><return>self._engines[iface]<block_end><block_end><class_stmt>VirtMpsseEngine(FtdiMpsseEngine)<block_start>"""Virtual implementation of a MPSSE. Far from being complete for now :-) """<def_stmt>__init__ self tracer:VirtMpsseTracer port:'VirtFtdiPort'<block_start>super().__init__(port.iface)<line_sep>self.log=getLogger(f'pyftdi.virt.mpsse.{port.iface}')<line_sep>self._tracer=tracer<line_sep>self._port=port<line_sep>self._width=port.width<line_sep>self._mask=(1<lshift>self._width)-1<line_sep>self._reply_q=deque()<block_end><def_stmt>send self buf:Union[bytes bytearray]<arrow><none><block_start>super().send(buf)<line_sep># cannot post the response before the send() method has completed # see FtdiMpsseEngine.send() for execution steps: expected reply size # is only known (stored) once the command execution has completed self.reply()<block_end><def_stmt>reply self<arrow><none><block_start>"""Post the reply to a command back into the virtual FTDI FIFO."""<while_stmt>self._reply_q<block_start>self._port.write_from_mpsse(self self._reply_q.popleft())<block_end><block_end><def_stmt>_cmd_get_bits_low self<block_start>super()._cmd_get_bits_low()<line_sep>byte=self._port.gpio&0xff<line_sep>buf=bytes([byte])<line_sep>self._reply_q.append(buf)<line_sep><return><true><block_end><def_stmt>_cmd_get_bits_high self<block_start>super()._cmd_get_bits_high()<line_sep>byte=(self._port.gpio<rshift>8)&0xff<line_sep>buf=bytes([byte])<line_sep>self._reply_q.append(buf)<line_sep><return><true><block_end><def_stmt>_cmd_set_bits_low self<block_start>buf=self._trace_tx[1:3]<if_stmt><not>super()._cmd_set_bits_low()<block_start><return><false><block_end>port=self._port<line_sep>byte,direction=sunpack('BB' buf)<line_sep>gpi=port.gpio&~direction&self._mask<line_sep>gpo=byte&direction&self._mask<line_sep>msb=port.gpio&~0xFF<line_sep>gpio=gpi|gpo|msb<line_sep>port.update_gpio(self <false> direction gpio)<line_sep>self.log.debug('. bbwl %04x: %s' port.gpio f'{port.gpio:016b}')<line_sep><return><true><block_end><def_stmt>_cmd_set_bits_high self<block_start>buf=self._trace_tx[1:3]<if_stmt><not>super()._cmd_set_bits_high()<block_start><return><false><block_end>port=self._port<line_sep>byte,direction=sunpack('BB' buf)<line_sep>byte<auglshift>8<line_sep>direction<auglshift>8<line_sep>gpi=port.gpio&~direction&self._mask<line_sep>gpo=byte&direction&self._mask<line_sep>lsb=port.gpio&0xFF<line_sep>gpio=gpi|gpo|lsb<line_sep>port.update_gpio(self <false> direction gpio)<line_sep>self.log.debug('. bbwh %04x: %s' port.gpio f'{port.gpio:016b}')<line_sep><return><true><block_end><block_end>
<import_from_stmt>django.conf.urls url include<import_from_stmt>django.contrib admin<import_from_stmt>django.conf.urls.static static<import_from_stmt>. settings<import_from_stmt>django.urls path<line_sep>admin.autodiscover()<line_sep>urlpatterns=[path('admin' admin.site.urls) path('' include('ltc.web.urls') name='index') path('analyzer' include('ltc.analyzer.urls') name='analyzer') path('online' include('ltc.online.urls') name='online') path('controller' include('ltc.controller.urls') name='controller') path('administrator' include('ltc.administrator.urls') name='administrator')]+static(settings.STATIC_URL document_root=settings.STATIC_URL)<line_sep>
""" Copyright 2015-2016 @_rc0r <<EMAIL>> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_stmt>argparse<import_stmt>datetime<try_stmt><block_start><import_stmt>simplejson<as>json<block_end><except_stmt>ImportError<block_start><import_stmt>json<block_end><import_stmt>sys<import_stmt>time<import_stmt>afl_utils<import_from_stmt>afl_utils.AflPrettyPrint clr print_ok print_warn print_err<class_stmt>AflCronDaemon(object)<block_start><def_stmt>__init__ self config_file quiet=<false><block_start>self.config=self.load_config(config_file)<line_sep>self.quiet=quiet<block_end><def_stmt>load_config self config_file<block_start><with_stmt>open(config_file 'r')<as>raw_config<block_start>config=json.load(raw_config)<block_end><return>config<block_end><def_stmt>get_module self module_path<block_start>module_name=module_path.rsplit('.' 1)[1]<try_stmt><block_start>module=__import__(module_path fromlist=[module_name])<block_end><except_stmt>ImportError<block_start><raise>ValueError('Module \'{}\' could not be imported'.format(module_path ))<block_end><return>module<block_end><def_stmt>get_member self module member_name<block_start><try_stmt><block_start>cls=getattr(module member_name)<block_end><except_stmt>AttributeError<block_start><raise>ValueError('Module \'{}\' has no member \'{}\''.format(module member_name ))<block_end><return>cls<block_end><def_stmt>run_job self job<block_start>job_module=self.get_module(job['module'])<line_sep>job_func=self.get_member(job_module job['function'])<line_sep>job_args=[job['module'].rsplit('.' 1)[1]]+job['params'].split()<if_stmt><not>self.quiet<block_start>print_ok('Executing \'{}\' ({}.{})'.format(job['name'] job['module'] job['function']))<block_end>job_func(job_args)<block_end><def_stmt>run self<block_start>doExit=<false><while_stmt><not>doExit<block_start><try_stmt><block_start>time_start=datetime.datetime.now()<for_stmt>job self.config['jobs']<block_start>self.run_job(job)<block_end>print_ok('All jobs done [{}]'.format(datetime.datetime.now()-time_start))<if_stmt>float(self.config['interval'])<l>0<block_start>doExit=<true><block_end><else_stmt><block_start>time.sleep(float(self.config['interval'])<times>60)<block_end><block_end><except_stmt>KeyboardInterrupt<block_start>print('\b\b')<line_sep>print_ok('Aborted by user. Good bye!')<line_sep>doExit=<true><block_end><block_end><block_end><block_end><def_stmt>show_info <block_start>print(clr.CYA+'afl-cron '+clr.BRI+'%s'%afl_utils.__version__+clr.RST+' by %s'%afl_utils.__author__)<line_sep>print('Periodically run tools from the afl-utils collection.')<line_sep>print('')<block_end><def_stmt>main argv<block_start>parser=argparse.ArgumentParser(description='Post selected contents of fuzzer_stats to Twitter.' usage='afl-stats [-c config] [-d] [-h] [-q]\n')<line_sep>parser.add_argument('-c' '--config' dest='config_file' help='afl-stats config file (Default: afl-stats.conf)!' default='afl-cron.conf')<line_sep>parser.add_argument('-d' '--daemon' dest='daemon' action='store_const' const=<true> help='Daemon mode: run in background' default=<false>)<line_sep>parser.add_argument('-q' '--quiet' dest='quiet' action='store_const' const=<true> help='Suppress any output' default=<false>)<line_sep>args=parser.parse_args(argv[1:])<if_stmt><not>args.quiet<and><not>args.daemon<block_start>show_info()<block_end>cron=AflCronDaemon(args.config_file quiet=args.quiet)<line_sep>cron.run()<block_end><if_stmt>__name__<eq>"__main__"<block_start>main(sys.argv)<block_end>