content
stringlengths 0
1.55M
|
---|
""" Tests for molecule creation and file i/o
"""<import_stmt>io<import_stmt>os<import_stmt>subprocess<import_from_stmt>future.utils PY2 native_str<import_from_stmt>builtins str<import_stmt>collections<import_stmt>pathlib<import_stmt>gzip<import_stmt>bz2<import_stmt>pickle<import_stmt>numpy<import_stmt>pytest<import_stmt>moldesign<as>mdt<line_sep>mdt.compute.config.engine_type='docker'<import_from_stmt>moldesign units<as>u<import_from_stmt>.helpers get_data_path native_str_buffer requires_internet_connection<import_from_stmt>.object_fixtures h2_trajectory h2_harmonic h2<line_sep>__PYTEST_MARK__='io'<line_sep>@pytest.fixture<def_stmt>bipyridine_sdf <block_start><return>mdt.read(get_data_path('bipyridine.sdf'))<block_end>@pytest.fixture<def_stmt>bipyridine_xyz <block_start><return>mdt.read(get_data_path('bipyridine.xyz'))<block_end>@pytest.fixture<def_stmt>bipyridine_mol2 <block_start><return>mdt.read(get_data_path('bipyridine.mol2'))<block_end>@pytest.fixture<def_stmt>bipyridine_iupac <block_start><return>mdt.from_name('bipyridine')<block_end>@pytest.fixture<def_stmt>bipyridine_inchi <block_start><return>mdt.from_inchi('InChI=1S/C10H8N2/c1-3-7-11-9(5-1)10-6-2-4-8-12-10/h1-8H')<block_end>@pytest.fixture<def_stmt>bipyridine_smiles <block_start><return>mdt.from_smiles('c1ccnc(c1)c2ccccn2')<block_end>ATOMDATA={# (symbol, valence, mass)
1:('H' 1 1.008<times>u.amu) 6:('C' 4 12.000<times>u.amu) 7:('N' 3 14.003<times>u.amu) 8:('O' 2 15.995<times>u.amu)}<line_sep>@pytest.mark.parametrize('key' 'iupac smiles inchi xyz sdf'.split())@pytest.mark.screening<def_stmt>test_auto_unique_atom_names key request<block_start>mol=request.getfixturevalue('bipyridine_'+key)<line_sep>atomnames=set(atom.name<for>atom mol.atoms)<assert_stmt>len(atomnames)<eq>mol.num_atoms<block_end><def_stmt>test_atom_names_preserved_from_input_file_mol2 bipyridine_mol2<block_start>mol=bipyridine_mol2<for_stmt>atom mol.atoms<block_start><assert_stmt>atom.name<eq>atom.symbol+str(atom.index)<block_end><block_end>@pytest.fixture<def_stmt>propane_pdb <block_start><return>mdt.read(get_data_path('propane.pdb'))<block_end><def_stmt>test_pdb_with_missing_chains propane_pdb<block_start>""" In response to an observed bug where various conversions would fail with a PDB file
that's missing chain data
"""<line_sep>mol=propane_pdb<if_stmt><not>mdt.compute.packages.openbabel.force_remote<block_start>pbmol=mdt.interfaces.mol_to_pybel(mol)<assert_stmt>len(pbmol.atoms)<eq>mol.num_atoms<block_end>pmedmol=mdt.interfaces.mol_to_parmed(mol)<assert_stmt>len(pmedmol.atoms)<eq>mol.num_atoms<block_end>@pytest.mark.parametrize('key' 'mol2 xyz sdf iupac smiles inchi'.split())@pytest.mark.screening<def_stmt>test_read_bipyridine_from_format key request<block_start>mol=request.getfixturevalue('bipyridine_'+key)<line_sep>atomcounts=collections.Counter(atom.symbol<for>atom mol.atoms)<assert_stmt>len(atomcounts)<eq>3<assert_stmt>atomcounts['C']<eq>10<assert_stmt>atomcounts['N']<eq>2<assert_stmt>atomcounts['H']<eq>8<assert_stmt>mol.charge<eq>0<assert_stmt>abs(mol.mass-156.069<times>u.amu)<l>0.001<times>u.amu<for_stmt>atom mol.atoms<block_start><assert_stmt>atom.formal_charge<eq>0.0<line_sep>symb,val,mss=ATOMDATA[atom.atnum]<assert_stmt>atom.symbol<eq>symb<assert_stmt>atom.valence<eq>val<assert_stmt>abs(atom.mass-mss)<l>0.001<times>u.amu<block_end><assert_stmt>mol.num_bonds<eq>21<line_sep>bondorders=collections.Counter(bond.order<for>bond mol.bonds)<assert_stmt>bondorders[2]<eq>6<assert_stmt>bondorders[1]<eq>15<assert_stmt>len(bondorders)<eq>2<block_end>@pytest.mark.parametrize('suffix' ['gz' 'bz2'])<def_stmt>test_compressed_write bipyridine_xyz tmpdir suffix# Note: compressed read is tested elsewhere when reading test data files
<block_start>path=pathlib.Path(native_str(tmpdir))<line_sep>dest=path/('bipyr.xyz.'+suffix)<line_sep>bipyridine_xyz.write(dest)<line_sep># don't use MDT's reader here! Need to make sure it's really gzip'd
<if_stmt>suffix<eq>'gz'<block_start>opener=gzip.open<block_end><elif_stmt>suffix<eq>'bz2'<block_start>opener=bz2.BZ2File<block_end><else_stmt><block_start><raise>ValueError('Unrecognized suffix "%s"'%suffix)<block_end><if_stmt>PY2<block_start>mode='r'<block_end><else_stmt><block_start>mode='rt'<if_stmt>suffix<eq>'bz2'<block_start>opener=bz2.open<block_end><block_end><with_stmt>opener(str(dest) mode)<as>infile<block_start>content=infile.read()<block_end>mol=mdt.read(content format='xyz')<assert_stmt>mol.num_atoms<eq>bipyridine_xyz.num_atoms<block_end>@pytest.fixture<def_stmt>dna_pdb <block_start><return>mdt.read(pathlib.Path(get_data_path('ACTG.pdb')))<block_end>@pytest.fixture<def_stmt>dna_mmcif <block_start><return>mdt.read(get_data_path('ACTG.cif'))<block_end>@pytest.fixture<def_stmt>dna_sequence <block_start><return>mdt.build_bdna('ACTG')<block_end>@pytest.fixture<def_stmt>pdb_1kbu <block_start><return>mdt.read(pathlib.Path(get_data_path('1KBU.pdb.bz2')))<block_end>@pytest.fixture<def_stmt>mmcif_1kbu <block_start><return>mdt.read(get_data_path('1KBU.cif.bz2'))<block_end>@requires_internet_connection<def_stmt>test_from_pdb_pdb_format <block_start>mol=mdt.from_pdb('3aid')<assert_stmt>mol.metadata.pdbid<eq>'3aid'<assert_stmt>mol.metadata.sourceformat<eq>'pdb'<assert_stmt>mol.num_atoms<eq>1912<block_end>@requires_internet_connection<def_stmt>test_from_pdb_mmcif_format <block_start>mol=mdt.from_pdb('3aid' usecif=<true>)<assert_stmt>mol.metadata.pdbid<eq>'3aid'<assert_stmt>mol.metadata.sourceformat<eq>'mmcif'<assert_stmt>mol.metadata.sourceurl.split('.')[-1]<eq>'cif'<assert_stmt>mol.num_atoms<eq>1912<block_end>@[email protected]("Takes over 10 minutes right now ...")<def_stmt>test_mmcif_fallback_if_no_pdb_file <block_start>mol=mdt.from_pdb('4V5X')<assert_stmt>mol.metadata.pdbid.lower()<eq>'4v5x'<assert_stmt>mol.metadata.sourceformat<eq>'mmcif'<assert_stmt>mol.metadata.sourceurl.split('.')[-1]<eq>'cif'<block_end>@pytest.mark.parametrize('key' 'pdb mmcif sequence'.split())<def_stmt>test_read_dna_from_format key request<block_start><if_stmt>key<eq>'mmcif'<block_start>pytest.xfail(reason='Known mmcif parser bug, fix this by 0.7.4')<block_end>mol=request.getfixturevalue('dna_'+key)<block_end><def_stmt>test_write_file_to_buffer bipyridine_smiles<block_start>mol=bipyridine_smiles<line_sep>buffer=native_str_buffer()<line_sep>mol.write(buffer format='pdb')<line_sep>buffer.seek(0)<line_sep>newmol=mdt.read(buffer.getvalue() format='pdb')<assert_stmt>mol.num_atoms<eq>newmol.num_atoms<block_end><def_stmt>test_write_pickle_to_buffer bipyridine_smiles<block_start>mol=bipyridine_smiles<line_sep>buffer=io.BytesIO()<line_sep>mol.write(buffer format='pkl')<line_sep>newmol=pickle.loads(buffer.getvalue())<assert_stmt>newmol.is_identical(mol verbose=<true>)<block_end><def_stmt>test_read_from_buffer <block_start>s=native_str("2\nmy xyz file\n H 1.0 1.0 1.0\n H 1.0 2.0 1.0\n")<line_sep>buffer=native_str_buffer(s)<line_sep>h2=mdt.read(buffer format='xyz')<assert_stmt>h2.num_atoms<eq>2<block_end>@pytest.mark.parametrize('key' '<KEY>'.split())@pytest.mark.screening<def_stmt>test_1kbu_assembly_data key request<block_start>mol=request.getfixturevalue('%s_1kbu'%key)<assert_stmt>len(mol.properties.bioassemblies)<eq>1<assert_stmt>'1'<in>mol.properties.bioassemblies<line_sep>assembly=mol.properties.bioassemblies['1']<assert_stmt>len(assembly.transforms)<eq>2<assert_stmt>set(assembly.chains)<eq>set(c.name<for>c mol.chains)<line_sep># first transform is identity
numpy.testing.assert_allclose(assembly.transforms[0] numpy.identity(4))<line_sep># second transform's rotation is unitary
rot=assembly.transforms[1][:3 :3]<line_sep>numpy.testing.assert_allclose(rot.dot(rot.T) numpy.identity(3))<block_end>@pytest.mark.parametrize('key' '<KEY>'.split())<def_stmt>test_1kbu_assembly_build key request<block_start>asym=request.getfixturevalue('%s_1kbu'%key)<line_sep>original=mdt.Molecule(asym)<line_sep>assembly=asym.properties.bioassemblies['1']<line_sep>rot=assembly.transforms[1][:3 :3]<line_sep>move=assembly.transforms[1][:3 3]<times>u.angstrom<line_sep>mol=mdt.build_assembly(asym 1)<assert_stmt>mol.num_chains<eq>2<times>asym.num_chains<line_sep># test that original is unaffected
<assert_stmt>original.is_identical(asym)<line_sep>testchain=assembly.chains[0]<line_sep>new_chain_pos=mol.chains[testchain].positions.T.ldot(rot).T+move[<none> :]<line_sep>numpy.testing.assert_allclose(new_chain_pos.defunits_value() mol.chains[asym.num_chains].positions.defunits_value())<block_end>@pytest.mark.parametrize('fmt' 'smiles pdb mol2 sdf inchi mmcif pkl'.split())<def_stmt>test_topology_preserved_in_serialization bipyridine_smiles fmt<block_start>""" Test that bond topology is preserved even if it doesn't make sense from distances
"""<if_stmt>fmt<ne>'pkl'<block_start>pytest.xfail("We are currently unable to get an unambiguous representation of a molecular "<concat>"sructure with ANY current file formats or parsers.")<block_end>mol=bipyridine_smiles.copy()# don't screw up the fixture object
mol.bond_graph[mol.atoms[3]][mol.atoms[5]]=3<line_sep>mol.bond_graph[mol.atoms[5]][mol.atoms[3]]=3<line_sep>mol.atoms[3].x<augadd>10.0<times>u.angstrom<line_sep>newmol=mdt.read(mol.write(format=fmt) format=fmt)<assert_stmt>mol.same_bonds(newmol verbose=<true>)<block_end><def_stmt>test_write_traj h2_trajectory tmpdir<block_start>path=os.path.join(str(tmpdir) 'traj.xyz')<line_sep>h2_trajectory.write(path)<assert_stmt>int(subprocess.check_output(['wc' '-l' path]).split()[0])<eq>((h2_trajectory.mol.num_atoms+2)<times>h2_trajectory.num_frames)<block_end>
|
# -*- coding: utf -*-
<import_stmt>pytest<import_from_stmt>subfinder.subsearcher SubHDSubSearcher<import_from_stmt>subfinder.subfinder SubFinder<import_from_stmt>subfinder.subsearcher.exceptions LanguageError ExtError<line_sep>@pytest.fixture(scope='module')<def_stmt>subhd <block_start>s=SubFinder()<line_sep>z=SubHDSubSearcher(s)<line_sep><return>z<block_end><def_stmt>test_languages subhd<block_start>subhd._check_languages(['zh_chs'])<with_stmt>pytest.raises(LanguageError)<block_start>subhd._check_languages(['fake_lang'])<block_end><block_end><def_stmt>test_exts subhd<block_start>subhd._check_exts(['ass'])<with_stmt>pytest.raises(ExtError)<block_start>subhd._check_exts(['fage_ext'])<block_end><block_end>
|
<import_from_stmt>selenium.webdriver.support.wait WebDriverWait<import_from_stmt>selenium.webdriver.support expected_conditions<as>EC<import_from_stmt>selenium.webdriver.common.by By<import_from_stmt>selenium.webdriver.common.keys Keys<import_from_stmt>selenium.common.exceptions NoSuchElementException<import_from_stmt>selenium.webdriver Chrome<import_stmt>os<import_stmt>speech_recognition<as>sr<import_from_stmt>time sleep<import_from_stmt>typing Type<import_from_stmt>pypasser.exceptions IpBlock<import_from_stmt>pypasser.utils download_audio convert_to_wav<class_stmt>reCaptchaV2(object)<block_start>"""
reCaptchaV2 bypass
-----------------
Solving reCaptcha V2 using speech to text
Attributes
----------
driver: webdriver
play: bool
default is True
attempts: int
default is 3 times
Returns
----------
bool: result of solver
"""<def_stmt>__new__ cls *args **kwargs<arrow>bool<block_start>instance=super(reCaptchaV2 cls).__new__(cls)<line_sep>instance.__init__(*args **kwargs)<line_sep>remaining_attempts=instance.attempts<line_sep>file_path=<none><try_stmt><block_start>cls.__click_check_box__(instance.driver)<if_stmt>cls.__is_checked__(instance.driver)<block_start><return><true><block_end>cls.__click_audio_button__(instance.driver)<while_stmt>remaining_attempts<block_start>remaining_attempts<augsub>1<line_sep>link=cls.__get_audio_link__(instance.driver instance.play)<line_sep>file_path=convert_to_wav(download_audio(link))<line_sep>cls.__type_text__(instance.driver cls.speech_to_text(file_path))<line_sep>os.remove(file_path)<line_sep>checked=cls.__is_checked__(instance.driver)<if_stmt>checked<or><not>remaining_attempts<block_start><return>checked<block_end><block_end><block_end><except_stmt>Exception<as>e<block_start><if_stmt>file_path<block_start>os.remove(file_path)<block_end><if_stmt>'rc-doscaptcha-header'<in>instance.driver.page_source<block_start><raise>IpBlock()<block_end><else_stmt><block_start><raise>e<block_end><block_end><block_end><def_stmt>__init__ self driver:Type[Chrome] play:bool=<true> attempts:int=3<block_start>self.driver=driver<line_sep>self.play=play<line_sep>self.attempts=attempts<block_end><def_stmt>__click_check_box__ driver<block_start>driver.switch_to.frame(driver.find_element(By.TAG_NAME "iframe"))<line_sep>check_box=WebDriverWait(driver 10).until(EC.presence_of_element_located((By.CSS_SELECTOR "#recaptcha-anchor")))<line_sep>check_box.click()<line_sep>driver.switch_to.default_content()<block_end><def_stmt>__click_audio_button__ driver<block_start>driver.switch_to.frame(driver.find_elements(By.TAG_NAME "iframe")[2])<line_sep>audio_btn=WebDriverWait(driver 10).until(EC.presence_of_element_located((By.CSS_SELECTOR "#recaptcha-audio-button")))<line_sep>audio_btn.click()<line_sep>driver.switch_to.default_content()<block_end><def_stmt>__get_audio_link__ driver play<block_start>voice=driver.find_elements(By.TAG_NAME "iframe")[2]<line_sep>driver.switch_to.frame(voice)<line_sep>download_btn=WebDriverWait(driver 10).until(EC.presence_of_element_located((By.CSS_SELECTOR ".rc-audiochallenge-tdownload-link")))<line_sep>link=download_btn.get_attribute('href')<if_stmt>play<block_start>play_button=WebDriverWait(driver 10).until(EC.presence_of_element_located((By.CSS_SELECTOR ".rc-audiochallenge-play-button > button")))<line_sep>play_button.click()<block_end><return>link<block_end><def_stmt>__type_text__ driver text<block_start>text_field=WebDriverWait(driver 10).until(EC.presence_of_element_located((By.CSS_SELECTOR "#audio-response")))<line_sep>text_field.send_keys(text Keys.ENTER)<line_sep>driver.switch_to.default_content()<block_end><def_stmt>__is_checked__ driver<block_start>sleep(3)<line_sep>driver.switch_to.frame(WebDriverWait(driver 10).until(EC.presence_of_element_located((By.CSS_SELECTOR 'iframe[name^=a]'))))<try_stmt><block_start>driver.find_element(By.CSS_SELECTOR '.recaptcha-checkbox-checked')<line_sep>driver.switch_to.default_content()<line_sep><return><true><block_end><except_stmt>NoSuchElementException<block_start>driver.switch_to.default_content()<line_sep><return><false><block_end><block_end><def_stmt>speech_to_text audio_path:str<arrow>str<block_start>r=sr.Recognizer()<with_stmt>sr.AudioFile(audio_path)<as>source<block_start>audio=r.record(source)<block_end><return>r.recognize_sphinx(audio)<block_end><block_end>
|
"""
Copyright (c) 2019 Cisco Systems, Inc. All rights reserved.
License at https://github.com/cisco/mercury/blob/master/LICENSE
"""<import_stmt>os<import_stmt>sys<line_sep>sys.path.append(os.path.dirname(os.path.abspath(__file__)))<line_sep>sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')<import_from_stmt>pmercury.protocols.protocol Protocol<class_stmt>HTTP_Server(Protocol)<block_start><def_stmt>__init__ self fp_database=<none> config=<none># populate fingerprint databases
<block_start>self.fp_db={}<line_sep># configuration
HTTP_Server.all_headers=<false><line_sep>HTTP_Server.all_headers_and_data=<false><if_stmt>config<eq><none><or>'http_server'<not><in>config<block_start>HTTP_Server.static_names=set([b'appex-activity-id' b'cdnuuid' b'cf-ray' b'content-range' b'content-type' b'date' b'etag' b'expires' b'flow_context' b'ms-cv' b'msregion' b'ms-requestid' b'request-id' b'vary' b'x-amz-cf-pop' b'x-amz-request-id' b'x-azure-ref-originshield' b'x-cache' b'x-cache-hits' b'x-ccc' b'x-diagnostic-s' b'x-feserver' b'x-hw' b'x-msedge-ref' b'x-ocsp-responder-id' b'x-requestid' b'x-served-by' b'x-timer' b'x-trace-context'])<line_sep>HTTP_Server.static_names_and_values=set([b'access-control-allow-credentials' b'access-control-allow-headers' b'access-control-allow-methods' b'access-control-expose-headers' b'cache-control' b'connection' b'content-language' b'content-transfer-encoding' b'p3p' b'pragma' b'server' b'strict-transport-security' b'x-aspnetmvc-version' b'x-aspnet-version' b'x-cid' b'x-ms-version' b'x-xss-protection'])<line_sep>HTTP_Server.headers_data=[0 1 2]<line_sep>HTTP_Server.contextual_data={b'via':'via'}<block_end><else_stmt><block_start>HTTP_Server.static_names=set([])<line_sep>HTTP_Server.static_names_and_values=set([])<line_sep>HTTP_Server.headers_data=[]<line_sep>HTTP_Server.contextual_data={}<if_stmt>'static_names'<in>config['http_server']<block_start><if_stmt>config['http_server']['static_names']<eq>['*']<block_start>HTTP_Server.all_headers=<true><block_end>HTTP_Server.static_names=set(map(<lambda>x:x.encode() config['http_server']['static_names']))<block_end><if_stmt>'static_names_and_values'<in>config['http_server']<block_start><if_stmt>config['http_server']['static_names_and_values']<eq>['*']<block_start>HTTP_Server.all_headers_and_data=<true><block_end>HTTP_Server.static_names_and_values=set(map(<lambda>x:x.encode() config['http_server']['static_names_and_values']))<block_end><if_stmt>'preamble'<in>config['http_server']<block_start><if_stmt>'version'<in>config['http_server']['preamble']<block_start>HTTP_Server.headers_data.append(0)<block_end><if_stmt>'code'<in>config['http_server']['preamble']<block_start>HTTP_Server.headers_data.append(1)<block_end><if_stmt>'reason'<in>config['http_server']['preamble']<block_start>HTTP_Server.headers_data.append(2)<block_end><if_stmt>'*'<in>config['http_server']['preamble']<block_start>HTTP_Server.headers_data=[0 1 2]<block_end><block_end><if_stmt>'context'<in>config['http_server']<block_start><for_stmt>c config['http_server']['context']<block_start>HTTP_Server.contextual_data[c.encode()]=c.lower().replace('-' '_')<block_end><block_end><block_end><block_end>@staticmethod<def_stmt>proto_identify data offset data_len<block_start><if_stmt>data_len-offset<l>16<block_start><return><false><block_end><if_stmt>(data[offset]<eq>72<and>data[offset+1]<eq>84<and>data[offset+2]<eq>84<and>data[offset+3]<eq>80<and>data[offset+4]<eq>47<and>data[offset+5]<eq>49)<block_start><return><true><block_end><return><false><block_end>@staticmethod<def_stmt>fingerprint data offset data_len<block_start>t_=data[offset:].split(b'\x0d\x0a' 1)<line_sep>response=t_[0].split(b'\x20' 2)<if_stmt>len(response)<l>2<block_start><return><none> <none><block_end>c=[]<for_stmt>rh HTTP_Server.headers_data<block_start><try_stmt><block_start>c.append('(%s)'%response[rh].hex())<block_end><except_stmt>IndexError<block_start>c.append('()')<block_end><block_end><if_stmt>len(t_)<eq>1<block_start><return>''.join(c) <none><block_end>headers=t_[1].split(b'\x0d\x0a')<if_stmt>headers[0]<eq>''<block_start>headers=headers[1:]<block_end>http_ah=HTTP_Server.all_headers<line_sep>http_ahd=HTTP_Server.all_headers_and_data<line_sep>http_sn=HTTP_Server.static_names<line_sep>http_snv=HTTP_Server.static_names_and_values<line_sep>http_ctx=HTTP_Server.contextual_data<line_sep>context=[]<for_stmt>h_ headers<block_start><if_stmt>h_<eq>b''<block_start><break><block_end>t0_=h_.split(b'\x3a\x20' 1)[0]<line_sep>t0_lower=t0_.lower()<line_sep>h_c=''<if_stmt>http_ahd<block_start>h_c=h_.hex()<block_end><elif_stmt>t0_lower<in>http_snv<block_start>h_c=h_.hex()<block_end><elif_stmt>t0_lower<in>http_sn<block_start>h_c=t0_.hex()<block_end><elif_stmt>http_ah<block_start>h_c=t0_.hex()<block_end><if_stmt>h_c<ne>''<block_start>c.append('(%s)'%h_c)<block_end><if_stmt>t0_lower<in>http_ctx<block_start><if_stmt>b'\x3a\x20'<in>h_<block_start><try_stmt><block_start>context.append({'name':http_ctx[t0_lower] 'data':h_.split(b'\x3a\x20' 1)[1].decode()})<block_end><except_stmt>UnicodeDecodeError<block_start>context.append({'name':http_ctx[t0_lower] 'data':h_.split(b'\x3a\x20' 1)[1].hex()})<block_end><block_end><else_stmt><block_start>context.append({'name':http_ctx[t0_lower] 'data':''})<block_end><block_end><block_end><return>''.join(c) context<block_end><def_stmt>get_human_readable self fp_str_<block_start>t_=[bytes.fromhex(x[1:])<for>x fp_str_.split(')')[:-1]]<try_stmt><block_start>fp_h=[{'version':t_[0].decode()} {'code':t_[1].decode()} {'response':t_[2].decode()}]<block_end><except_stmt><block_start>fp_h=[{'version':t_[0].hex()} {'code':t_[1].hex()} {'response':t_[2].hex()}]<block_end><for_stmt>i range(3 len(t_)-1)<block_start>field=t_[i].split(b': ')<if_stmt>len(field)<eq>2<block_start><try_stmt><block_start>fp_h.append({field[0].decode():field[1].decode()})<block_end><except_stmt><block_start>fp_h.append({field[0].hex():field[1].hex()})<block_end><block_end><else_stmt><block_start><try_stmt><block_start>fp_h.append({field[0].decode():''})<block_end><except_stmt><block_start>fp_h.append({field[0].hex():''})<block_end><block_end><block_end><return>fp_h<block_end><block_end>
|
<import_stmt>unittest<import_from_stmt>vaurien.util chunked<class_stmt>TestUtil(unittest.TestCase)<block_start><def_stmt>test_chunked self<block_start>self.assertEqual(sum(list(chunked(7634 2049))) 7634)<block_end><block_end>
|
<import_stmt>nltk<import_stmt>glob<import_stmt>json<import_stmt>os<line_sep>nltk.download('punkt')<class_stmt>NLTKSegmenter<block_start><def_stmt>__init self<block_start><pass><block_end>@staticmethod<def_stmt>segment_string article<block_start><return>nltk.tokenize.sent_tokenize(article)<block_end><block_end>wiki_path="data/extracted"<line_sep>output_path="formatted/wiki-key.txt"<line_sep>segmenter=NLTKSegmenter()<with_stmt>open(output_path "w")<as>output<block_start><for_stmt>dirname glob.glob(os.path.join(wiki_path '*') recursive=<false>)<block_start><for_stmt>filename glob.glob(os.path.join(dirname 'wiki_*') recursive=<true>)<block_start>print(filename)<line_sep>article_lines=[]<line_sep>article_open=<false><with_stmt>open(filename mode='r' newline='\n')<as>file<block_start><for_stmt>line file<block_start>line=line.rstrip()<if_stmt>'<doc id='<in>line<block_start>article_open=<true><block_end><elif_stmt>'</doc>'<in>line<block_start>key_sentences,contents=[] []<line_sep>key,content=<none> []<for_stmt>sentences article_lines[1:]<block_start><if_stmt>len(sentences)<g>1<block_start><if_stmt>key<block_start><if_stmt>len(content)<g>0<or>len(contents)<eq>0<block_start>key_sentences.append(key)<line_sep>contents.append(content)<block_end><else_stmt><block_start>contents[-1].append(key)<block_end>key,content=<none> []<block_end>key_sentences.append(sentences[0])<line_sep>contents.append(sentences[1:])<block_end><elif_stmt>len(sentences)<g>0<block_start><if_stmt>key<block_start>content.append(sentences[0])<block_end><else_stmt><block_start>key=sentences[0]<block_end><block_end><block_end><if_stmt>key<block_start><if_stmt>len(content)<g>0<or>len(contents)<eq>0<block_start>key_sentences.append(key)<line_sep>contents.append(content)<block_end><else_stmt><block_start>contents[-1].append(key)<block_end><block_end>contents=[" ".join(content)<for>content contents]<line_sep>article={"key":key_sentences "content":contents}<line_sep>output.write(json.dumps(article))<line_sep>output.write("\n")<line_sep>article_open=<false><line_sep>article_lines=[]<block_end><else_stmt><block_start><if_stmt>article_open<and>line<block_start>sentences=segmenter.segment_string(line)<line_sep>article_lines.append(sentences)<block_end><block_end><block_end><block_end><block_end><block_end><block_end>
|
<import_from_stmt>distutils.core setup<import_from_stmt>distutils.extension Extension<import_from_stmt>Cython.Distutils build_ext<line_sep>setup(cmdclass={'build_ext':build_ext} ext_modules=[Extension("bg_decoder" ["bg_decoder.pyx"]) Extension("clm_decoder" ["clm_decoder.pyx"]) Extension("clm_decoder2" ["clm_decoder2.pyx"])])<line_sep>
|
# Leo colorizer control file for eiffel mode.
# This file is in the public domain.
# Properties for eiffel mode.
properties={"lineComment":"--" }<line_sep># Attributes dict for eiffel_main ruleset.
eiffel_main_attributes_dict={"default":"null" "digit_re":"" "escape":"\\" "highlight_digits":"true" "ignore_case":"true" "no_word_sep":"" }<line_sep># Dictionary of attributes dictionaries for eiffel mode.
attributesDictDict={"eiffel_main":eiffel_main_attributes_dict }<line_sep># Keywords dict for eiffel_main ruleset.
eiffel_main_keywords_dict={"alias":"keyword1" "all":"keyword1" "and":"keyword1" "as":"keyword1" "check":"keyword1" "class":"keyword1" "creation":"keyword1" "current":"literal2" "debug":"keyword1" "deferred":"keyword1" "do":"keyword1" "else":"keyword1" "elseif":"keyword1" "end":"keyword1" "ensure":"keyword1" "expanded":"keyword1" "export":"keyword1" "external":"keyword1" "false":"literal2" "feature":"keyword1" "from":"keyword1" "frozen":"keyword1" "if":"keyword1" "implies":"keyword1" "indexing":"keyword1" "infix":"keyword1" "inherit":"keyword1" "inspect":"keyword1" "invariant":"keyword1" "is":"keyword1" "like":"keyword1" "local":"keyword1" "loop":"keyword1" "not":"keyword1" "obsolete":"keyword1" "old":"keyword1" "once":"keyword1" "or":"keyword1" "precursor":"literal2" "prefix":"keyword1" "redefine":"keyword1" "rename":"keyword1" "require":"keyword1" "rescue":"keyword1" "result":"literal2" "retry":"keyword1" "select":"keyword1" "separate":"keyword1" "strip":"literal2" "then":"keyword1" "true":"literal2" "undefine":"keyword1" "unique":"literal2" "until":"keyword1" "variant":"keyword1" "void":"literal2" "when":"keyword1" "xor":"keyword1" }<line_sep># Dictionary of keywords dictionaries for eiffel mode.
keywordsDictDict={"eiffel_main":eiffel_main_keywords_dict }<line_sep># Rules for eiffel_main ruleset.
<def_stmt>eiffel_rule0 colorer s i<block_start><return>colorer.match_eol_span(s i kind="comment1" seq="--" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="" exclude_match=<false>)<block_end><def_stmt>eiffel_rule1 colorer s i<block_start><return>colorer.match_span(s i kind="literal1" begin="\"" end="\"" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="" exclude_match=<false> no_escape=<false> no_line_break=<true> no_word_break=<false>)<block_end><def_stmt>eiffel_rule2 colorer s i<block_start><return>colorer.match_span(s i kind="literal1" begin="'" end="'" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="" exclude_match=<false> no_escape=<false> no_line_break=<true> no_word_break=<false>)<block_end><def_stmt>eiffel_rule3 colorer s i<block_start><return>colorer.match_keywords(s i)<block_end># Rules dict for eiffel_main ruleset.
rulesDict1={"\"":[eiffel_rule1 ] "'":[eiffel_rule2 ] "-":[eiffel_rule0 ] "0":[eiffel_rule3 ] "1":[eiffel_rule3 ] "2":[eiffel_rule3 ] "3":[eiffel_rule3 ] "4":[eiffel_rule3 ] "5":[eiffel_rule3 ] "6":[eiffel_rule3 ] "7":[eiffel_rule3 ] "8":[eiffel_rule3 ] "9":[eiffel_rule3 ] "@":[eiffel_rule3 ] "A":[eiffel_rule3 ] "B":[eiffel_rule3 ] "C":[eiffel_rule3 ] "D":[eiffel_rule3 ] "E":[eiffel_rule3 ] "F":[eiffel_rule3 ] "G":[eiffel_rule3 ] "H":[eiffel_rule3 ] "I":[eiffel_rule3 ] "J":[eiffel_rule3 ] "K":[eiffel_rule3 ] "L":[eiffel_rule3 ] "M":[eiffel_rule3 ] "N":[eiffel_rule3 ] "O":[eiffel_rule3 ] "P":[eiffel_rule3 ] "Q":[eiffel_rule3 ] "R":[eiffel_rule3 ] "S":[eiffel_rule3 ] "T":[eiffel_rule3 ] "U":[eiffel_rule3 ] "V":[eiffel_rule3 ] "W":[eiffel_rule3 ] "X":[eiffel_rule3 ] "Y":[eiffel_rule3 ] "Z":[eiffel_rule3 ] "a":[eiffel_rule3 ] "b":[eiffel_rule3 ] "c":[eiffel_rule3 ] "d":[eiffel_rule3 ] "e":[eiffel_rule3 ] "f":[eiffel_rule3 ] "g":[eiffel_rule3 ] "h":[eiffel_rule3 ] "i":[eiffel_rule3 ] "j":[eiffel_rule3 ] "k":[eiffel_rule3 ] "l":[eiffel_rule3 ] "m":[eiffel_rule3 ] "n":[eiffel_rule3 ] "o":[eiffel_rule3 ] "p":[eiffel_rule3 ] "q":[eiffel_rule3 ] "r":[eiffel_rule3 ] "s":[eiffel_rule3 ] "t":[eiffel_rule3 ] "u":[eiffel_rule3 ] "v":[eiffel_rule3 ] "w":[eiffel_rule3 ] "x":[eiffel_rule3 ] "y":[eiffel_rule3 ] "z":[eiffel_rule3 ] }<line_sep># x.rulesDictDict for eiffel mode.
rulesDictDict={"eiffel_main":rulesDict1 }<line_sep># Import dict for eiffel mode.
importDict={}<line_sep>
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
<import_from_stmt>...pipeline_logger pipeline_logger<import_stmt>pandas<as>pd<import_from_stmt>enum Enum<class_stmt>FilterOutReason(Enum)<block_start>TrainDataset="train-dataset"<line_sep>DevDataset="dev-dataset"<line_sep>EmptyModelName="empty-model-name"<line_sep>ModelCompeting="model-competing"<block_end><class_stmt>ProposalsFilter<block_start>step="proposals_filtering"<def_stmt>_filter self proposals<block_start><raise>NotImplementedError<block_end><def_stmt>filter self proposals<block_start>which,reason=self._filter(proposals)<line_sep>self.log(proposals=proposals which=which reason=reason)<line_sep><return>which reason<block_end><def_stmt>__rshift__ self other<block_start><return>CompoundFilter([self other])<block_end><def_stmt>__call__ self proposals<block_start>which,reason=self.filter(proposals)<line_sep><return>proposals[which]<block_end><def_stmt>log self **kwargs<block_start>pipeline_logger(f"filtering::{self.step}::filtered" **kwargs)<block_end><block_end><class_stmt>CompoundFilter(ProposalsFilter)<block_start>step="compound_filtering"<def_stmt>__init__ self filters<block_start>self.filters=filters<block_end><def_stmt>_filter self proposals<block_start>agg_which=pd.Series(data=<true> index=proposals.index)<line_sep>agg_reason=pd.Series(data="" index=proposals.index)<for_stmt>f self.filters<block_start>which,reason=f.filter(proposals)<line_sep>agg_reason[agg_which&~which]=reason<line_sep>agg_which<augand>which<line_sep>proposals=proposals[which]<block_end><return>agg_which agg_reason[~agg_which]<block_end><block_end><class_stmt>NopFilter(ProposalsFilter)<block_start>step="nop_filtering"<def_stmt>_filter self proposals<block_start>which=pd.Series(data=<true> index=proposals.index)<line_sep>reason=pd.Series()<line_sep><return>which reason<block_end><block_end># filter proposals for which structure prediction
# * was unable to find model type or
# * found dataset cell containing "dev" or "train"
# this filter could be applied before taxonomy linking,
# but to make error analysis easier it's applied after
<class_stmt>StructurePredictionFilter(ProposalsFilter)<block_start>step="structure_filtering"<def_stmt>_filter self proposals<block_start>which=(proposals.struct_model_type<ne>'')&~proposals.struct_dataset.str.contains('dev')&~proposals.struct_dataset.str.contains('train')<line_sep>reason=pd.Series(data="" index=proposals.index)<line_sep>reason[proposals.struct_dataset.str.contains('train')]="train-dataset"<line_sep>reason[proposals.struct_dataset.str.contains('dev')]="dev-dataset"<line_sep>reason[proposals.struct_model_type<eq>'']="empty-model-type"<line_sep><return>which reason[~which]<block_end><block_end><class_stmt>ConfidenceFilter(ProposalsFilter)<block_start>step="confidence_filtering"<def_stmt>__init__ self confidence=-1<block_start>self.confidence=confidence<block_end><def_stmt>_filter self proposals<block_start>which=proposals.confidence<ge>self.confidence<line_sep>reason="confidence "+proposals[~which].confidence.round(2).astype(str)+f" < {self.confidence}"<line_sep><return>which reason[~which]<block_end><def_stmt>log self **kwargs<block_start>super().log(**kwargs confidence=self.confidence)<block_end><block_end><class_stmt>BestResultFilter(ProposalsFilter)<block_start>step="best_result_filtering"<def_stmt>__init__ self taxonomy context="paper"<block_start><assert_stmt>context<in>["paper" "table"]<line_sep>self.metrics_info=taxonomy.metrics_info<line_sep>self.context=context<block_end><def_stmt>_filter self proposals<block_start>reason=pd.Series(data="" index=proposals.index)<line_sep>indices=[]<if_stmt>self.context<eq>"paper"<block_start>context_column=proposals.index.to_series().str.split('/' expand=<false>).apply(<lambda>x:x[0])<block_end><else_stmt><block_start>context_column=proposals.index.to_series().str.split('/' expand=<false>).apply(<lambda>x:x[0]+"/"+x[1])<block_end><for_stmt>key_all,group proposals[(proposals.model_type<eq>'model-best')&~proposals.parsed.isna()].groupby(by=["dataset" "metric" "task" context_column])<block_start>dataset,metric,task,paper=key_all<line_sep>key=(task dataset metric)<line_sep>d=0<if_stmt>key<in>self.metrics_info<block_start>d=self.metrics_info[key]<block_end><elif_stmt>metric<in>self.metrics_info<block_start>d=self.metrics_info[metric]<block_end><elif_stmt>'error'<in>metric.lower()<block_start>d=-1<block_end><elif_stmt>'accuracy'<in>metric.lower()<block_start>d=1<block_end><if_stmt>d<ge>0<block_start>index=group.parsed.idxmax()<block_end><else_stmt><block_start>index=group.parsed.idxmin()<block_end>indices.append(index)<line_sep>reason[group.index[group.index<ne>index]]="replaced by "+str(index)<block_end>reason[proposals.struct_model_type<eq>'model-competing']="model-competing"<line_sep>which=proposals.index.to_series().isin(indices)<line_sep><return>which reason[~which]<block_end><def_stmt>log self **kwargs<block_start>super().log(**kwargs context=self.context)<block_end><block_end>
|
<import_from_stmt>fixtures *<import_stmt>Evtx.Evtx<as>evtx<def_stmt>test_file_header system<block_start>'''
regression test parsing some known fields in the file header.
Args:
system (bytes): the system.evtx test file contents. pytest fixture.
'''<line_sep>fh=evtx.FileHeader(system 0x0)<line_sep># collected empirically
<assert_stmt>fh.magic()<eq>'ElfFile\x00'<assert_stmt>fh.major_version()<eq>0x3<assert_stmt>fh.minor_version()<eq>0x1<assert_stmt>fh.flags()<eq>0x1<assert_stmt>fh.is_dirty()<is><true><assert_stmt>fh.is_full()<is><false><assert_stmt>fh.current_chunk_number()<eq>0x8<assert_stmt>fh.chunk_count()<eq>0x9<assert_stmt>fh.oldest_chunk()<eq>0x0<assert_stmt>fh.next_record_number()<eq>0x34d8<assert_stmt>fh.checksum()<eq>0x41b4b1ec<assert_stmt>fh.calculate_checksum()<eq>fh.checksum()<block_end><def_stmt>test_file_header2 security<block_start>'''
regression test parsing some known fields in the file header.
Args:
security (bytes): the security.evtx test file contents. pytest fixture.
'''<line_sep>fh=evtx.FileHeader(security 0x0)<line_sep># collected empirically
<assert_stmt>fh.magic()<eq>'ElfFile\x00'<assert_stmt>fh.major_version()<eq>0x3<assert_stmt>fh.minor_version()<eq>0x1<assert_stmt>fh.flags()<eq>0x1<assert_stmt>fh.is_dirty()<is><true><assert_stmt>fh.is_full()<is><false><assert_stmt>fh.current_chunk_number()<eq>0x19<assert_stmt>fh.chunk_count()<eq>0x1a<assert_stmt>fh.oldest_chunk()<eq>0x0<assert_stmt>fh.next_record_number()<eq>0x8b2<assert_stmt>fh.checksum()<eq>0x3f6e33d5<assert_stmt>fh.calculate_checksum()<eq>fh.checksum()<block_end>
|
<import_from_stmt>datetime datetime timedelta<import_from_stmt>typing Optional Tuple<import_from_stmt>monitor.database.events BlockchainStateEvent ConnectionsEvent FarmingInfoEvent HarvesterPlotsEvent SignagePointEvent WalletBalanceEvent <import_from_stmt>sqlalchemy.orm Session<import_from_stmt>sqlalchemy.sql.expression select<import_from_stmt>sqlalchemy.sql.functions func<def_stmt>get_proofs_found session:Session<arrow>Optional[int]<block_start>result=session.execute(select(func.sum(FarmingInfoEvent.proofs)))<line_sep><return>result.scalars().first()<block_end><def_stmt>get_harvester_count session:Session<arrow>Optional[int]<block_start>result=session.execute(select(ConnectionsEvent.harvester_count).order_by(ConnectionsEvent.ts.desc()))<line_sep><return>result.scalars().first()<block_end><def_stmt>get_sync_status session:Session<arrow>Optional[bool]<block_start>result=session.execute(select(BlockchainStateEvent.synced).order_by(BlockchainStateEvent.ts.desc()))<line_sep><return>result.scalars().first()<block_end><def_stmt>get_blockchain_state session:Session<arrow>Optional[BlockchainStateEvent]<block_start>result=session.execute(select(BlockchainStateEvent).order_by(BlockchainStateEvent.ts.desc()))<line_sep><return>result.scalars().first()<block_end><def_stmt>get_wallet_balance session:Session<arrow>Optional[WalletBalanceEvent]<block_start>result=session.execute(select(WalletBalanceEvent).order_by(WalletBalanceEvent.ts.desc()))<line_sep><return>result.scalars().first()<block_end><def_stmt>get_connections session:Session<arrow>Optional[ConnectionsEvent]<block_start>result=session.execute(select(ConnectionsEvent).order_by(ConnectionsEvent.ts.desc()))<line_sep><return>result.scalars().first()<block_end><def_stmt>get_farming_start session:Session<arrow>Optional[datetime]<block_start>result=session.execute(select(func.min(FarmingInfoEvent.ts)))<line_sep><return>result.scalars().first()<block_end><def_stmt>get_previous_signage_point session:Session<arrow>Optional[str]<block_start>result=session.execute(select(FarmingInfoEvent.signage_point).order_by(FarmingInfoEvent.ts.desc()).distinct(FarmingInfoEvent.signage_point).limit(2))<line_sep><return>result.all()[-1][0]<block_end><def_stmt>get_plot_delta session:Session period=timedelta(hours=24)<arrow>Tuple[int int]<block_start>result=session.execute(select(func.min(HarvesterPlotsEvent.ts)))<line_sep>first_ts=result.scalars().first()<if_stmt>first_ts<is><none><block_start><return>0 0<block_end>initial_ts=max(first_ts datetime.now()-period)<line_sep>sub_query=select([HarvesterPlotsEvent.plot_count HarvesterPlotsEvent.portable_plot_count HarvesterPlotsEvent.plot_size HarvesterPlotsEvent.portable_plot_size]).where(HarvesterPlotsEvent.ts<g>initial_ts).order_by(HarvesterPlotsEvent.ts).group_by(HarvesterPlotsEvent.host)<line_sep>result=session.execute(select([func.sum(sub_query.c.plot_count) func.sum(sub_query.c.portable_plot_count) func.sum(sub_query.c.plot_size) func.sum(sub_query.c.portable_plot_size)]))<line_sep>initial_plots=result.one()<if_stmt>initial_plots<is><none><block_start><return>0 0<block_end>initial_og_plot_count,initial_portable_plot_count,initial_og_plot_size,initial_portable_plot_size=initial_plots<line_sep>initial_plot_count=initial_og_plot_count+initial_portable_plot_count<line_sep>initial_plot_size=initial_og_plot_size+initial_portable_plot_size<line_sep>current_plot_count=get_plot_count(session)<if_stmt>current_plot_count<is><none><block_start><return>0 0<block_end>current_plot_size=get_plot_size(session)<if_stmt>current_plot_size<is><none><block_start><return>0 0<block_end><return>current_plot_count-initial_plot_count current_plot_size-initial_plot_size<block_end><def_stmt>get_plot_count session:Session<arrow>Optional[int]<block_start>og_plot_count=get_og_plot_count(session)<line_sep>portable_plot_count=get_portable_plot_count(session)<if_stmt>og_plot_count<is><not><none><and>portable_plot_count<is><not><none><block_start><return>og_plot_count+portable_plot_count<block_end><elif_stmt>og_plot_count<is><not><none><and>portable_plot_count<is><none><block_start><return>og_plot_count<block_end><elif_stmt>og_plot_count<is><none><and>portable_plot_count<is><not><none><block_start><return>portable_plot_count<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>get_plot_size session:Session<arrow>Optional[int]<block_start>og_plot_size=get_og_plot_size(session)<line_sep>portable_plot_size=get_portable_plot_size(session)<if_stmt>og_plot_size<is><not><none><and>portable_plot_size<is><not><none><block_start><return>og_plot_size+portable_plot_size<block_end><elif_stmt>og_plot_size<is><not><none><and>portable_plot_size<is><none><block_start><return>og_plot_size<block_end><elif_stmt>og_plot_size<is><none><and>portable_plot_size<is><not><none><block_start><return>portable_plot_size<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>get_og_plot_size session:Session<arrow>Optional[int]<block_start>sub_query=select([func.max(HarvesterPlotsEvent.plot_size).label("plot_size")]).where(HarvesterPlotsEvent.ts<g>datetime.now()-timedelta(seconds=30)).group_by(HarvesterPlotsEvent.host)<line_sep>result=session.execute(select(func.sum(sub_query.c.plot_size)))<line_sep><return>result.scalars().first()<block_end><def_stmt>get_og_plot_count session:Session<arrow>Optional[int]<block_start>sub_query=select([func.max(HarvesterPlotsEvent.plot_count).label("plot_count")]).where(HarvesterPlotsEvent.ts<g>datetime.now()-timedelta(seconds=30)).group_by(HarvesterPlotsEvent.host)<line_sep>result=session.execute(select(func.sum(sub_query.c.plot_count)))<line_sep><return>result.scalars().first()<block_end><def_stmt>get_portable_plot_size session:Session<arrow>Optional[int]<block_start>sub_query=select([func.max(HarvesterPlotsEvent.portable_plot_size).label("portable_plot_size")]).where(HarvesterPlotsEvent.ts<g>datetime.now()-timedelta(seconds=30)).group_by(HarvesterPlotsEvent.host)<line_sep>result=session.execute(select(func.sum(sub_query.c.portable_plot_size)))<line_sep><return>result.scalars().first()<block_end><def_stmt>get_portable_plot_count session:Session<arrow>Optional[int]<block_start>sub_query=select([func.max(HarvesterPlotsEvent.portable_plot_count).label("portable_plot_count")]).where(HarvesterPlotsEvent.ts<g>datetime.now()-timedelta(seconds=30)).group_by(HarvesterPlotsEvent.host)<line_sep>result=session.execute(select(func.sum(sub_query.c.portable_plot_count)))<line_sep><return>result.scalars().first()<block_end><def_stmt>get_signage_points_per_minute session:Session interval:timedelta<arrow>Optional[float]<block_start>result=session.execute(select(func.count(SignagePointEvent.ts)).where(SignagePointEvent.ts<ge>datetime.now()-interval))<line_sep>num_signage_points=result.scalars().first()<if_stmt>num_signage_points<is><none><block_start><return><none><block_end><return>num_signage_points/(interval.seconds/60)<block_end><def_stmt>get_passed_filters_per_minute session:Session interval:timedelta<arrow>Optional[float]<block_start>result=session.execute(select(func.sum(FarmingInfoEvent.passed_filter)).where(FarmingInfoEvent.ts<ge>datetime.now()-interval))<line_sep>passed_filters=result.scalars().first()<if_stmt>passed_filters<is><none><block_start><return><none><block_end><return>passed_filters/(interval.seconds/60)<block_end><def_stmt>get_current_balance session:Session<arrow>int<block_start>result=session.execute(select(WalletBalanceEvent.confirmed).order_by(WalletBalanceEvent.ts.desc()))<line_sep><return>result.scalars().first()<block_end><def_stmt>get_last_payment session:Session<arrow>int<block_start>current_balance=get_current_balance(session)<line_sep>previous_balance_query=session.execute(select(WalletBalanceEvent.confirmed).where(WalletBalanceEvent.confirmed<ne>current_balance).order_by(WalletBalanceEvent.ts.desc()))<line_sep>last_balance=previous_balance_query.scalars().first()<line_sep><return>int(current_balance)-int(last_balance)<block_end>
|
# -*- coding: utf-8 -*-
"""
=============================
OT for image color adaptation
=============================
This example presents a way of transferring colors between two images
with Optimal Transport as introduced in [6]
[6] <NAME>., <NAME>., <NAME>., & <NAME>. (2014).
Regularized discrete optimal transport.
SIAM Journal on Imaging Sciences, 7(3), 1853-1882.
"""<line_sep># Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: MIT License
# sphinx_gallery_thumbnail_number = 2
<import_stmt>os<import_from_stmt>pathlib Path<import_stmt>numpy<as>np<import_from_stmt>matplotlib pyplot<as>plt<import_stmt>ot<line_sep>rng=np.random.RandomState(42)<def_stmt>im2mat img<block_start>"""Converts an image to matrix (one pixel per line)"""<line_sep><return>img.reshape((img.shape[0]<times>img.shape[1] img.shape[2]))<block_end><def_stmt>mat2im X shape<block_start>"""Converts back a matrix to an image"""<line_sep><return>X.reshape(shape)<block_end><def_stmt>minmax img<block_start><return>np.clip(img 0 1)<block_end>##############################################################################
# Generate data
# -------------
# Loading images
this_file=os.path.realpath('__file__')<line_sep>data_path=os.path.join(Path(this_file).parent.parent.parent 'data')<line_sep>I1=plt.imread(os.path.join(data_path 'ocean_day.jpg')).astype(np.float64)/256<line_sep>I2=plt.imread(os.path.join(data_path 'ocean_sunset.jpg')).astype(np.float64)/256<line_sep>X1=im2mat(I1)<line_sep>X2=im2mat(I2)<line_sep># training samples
nb=500<line_sep>idx1=rng.randint(X1.shape[0] size=(nb ))<line_sep>idx2=rng.randint(X2.shape[0] size=(nb ))<line_sep>Xs=X1[idx1 :]<line_sep>Xt=X2[idx2 :]<line_sep>##############################################################################
# Plot original image
# -------------------
plt.figure(1 figsize=(6.4 3))<line_sep>plt.subplot(1 2 1)<line_sep>plt.imshow(I1)<line_sep>plt.axis('off')<line_sep>plt.title('Image 1')<line_sep>plt.subplot(1 2 2)<line_sep>plt.imshow(I2)<line_sep>plt.axis('off')<line_sep>plt.title('Image 2')<line_sep>##############################################################################
# Scatter plot of colors
# ----------------------
plt.figure(2 figsize=(6.4 3))<line_sep>plt.subplot(1 2 1)<line_sep>plt.scatter(Xs[: 0] Xs[: 2] c=Xs)<line_sep>plt.axis([0 1 0 1])<line_sep>plt.xlabel('Red')<line_sep>plt.ylabel('Blue')<line_sep>plt.title('Image 1')<line_sep>plt.subplot(1 2 2)<line_sep>plt.scatter(Xt[: 0] Xt[: 2] c=Xt)<line_sep>plt.axis([0 1 0 1])<line_sep>plt.xlabel('Red')<line_sep>plt.ylabel('Blue')<line_sep>plt.title('Image 2')<line_sep>plt.tight_layout()<line_sep>##############################################################################
# Instantiate the different transport algorithms and fit them
# -----------------------------------------------------------
# EMDTransport
ot_emd=ot.da.EMDTransport()<line_sep>ot_emd.fit(Xs=Xs Xt=Xt)<line_sep># SinkhornTransport
ot_sinkhorn=ot.da.SinkhornTransport(reg_e=1e-1)<line_sep>ot_sinkhorn.fit(Xs=Xs Xt=Xt)<line_sep># prediction between images (using out of sample prediction as in [6])
transp_Xs_emd=ot_emd.transform(Xs=X1)<line_sep>transp_Xt_emd=ot_emd.inverse_transform(Xt=X2)<line_sep>transp_Xs_sinkhorn=ot_sinkhorn.transform(Xs=X1)<line_sep>transp_Xt_sinkhorn=ot_sinkhorn.inverse_transform(Xt=X2)<line_sep>I1t=minmax(mat2im(transp_Xs_emd I1.shape))<line_sep>I2t=minmax(mat2im(transp_Xt_emd I2.shape))<line_sep>I1te=minmax(mat2im(transp_Xs_sinkhorn I1.shape))<line_sep>I2te=minmax(mat2im(transp_Xt_sinkhorn I2.shape))<line_sep>##############################################################################
# Plot new images
# ---------------
plt.figure(3 figsize=(8 4))<line_sep>plt.subplot(2 3 1)<line_sep>plt.imshow(I1)<line_sep>plt.axis('off')<line_sep>plt.title('Image 1')<line_sep>plt.subplot(2 3 2)<line_sep>plt.imshow(I1t)<line_sep>plt.axis('off')<line_sep>plt.title('Image 1 Adapt')<line_sep>plt.subplot(2 3 3)<line_sep>plt.imshow(I1te)<line_sep>plt.axis('off')<line_sep>plt.title('Image 1 Adapt (reg)')<line_sep>plt.subplot(2 3 4)<line_sep>plt.imshow(I2)<line_sep>plt.axis('off')<line_sep>plt.title('Image 2')<line_sep>plt.subplot(2 3 5)<line_sep>plt.imshow(I2t)<line_sep>plt.axis('off')<line_sep>plt.title('Image 2 Adapt')<line_sep>plt.subplot(2 3 6)<line_sep>plt.imshow(I2te)<line_sep>plt.axis('off')<line_sep>plt.title('Image 2 Adapt (reg)')<line_sep>plt.tight_layout()<line_sep>plt.show()<line_sep>
|
<import_from_stmt>janome.version JANOME_VERSION<as>__version__<line_sep>__all__=["__version__" ]<line_sep>
|
<import_from_stmt>PyObjCTools.TestSupport *<import_stmt>objc<import_from_stmt>Foundation *<import_stmt>Foundation<class_stmt>TestNSEnumeratorInteraction(TestCase)<block_start><def_stmt>setUp self<block_start>self.arrayContainer=NSArray.arrayWithArray_(range(100))<block_end><def_stmt>testNoFastEnumeration self<block_start>self.assertNotHasAttr(Foundation 'NSFastEnumerationState')<block_end><def_stmt>testInOperator self<block_start>y=[]<for_stmt>x self.arrayContainer.objectEnumerator()<block_start>y.append(x)<block_end>self.assertEqual(len(y) len(self.arrayContainer))<for_stmt>i range(len(y))<block_start>self.assertEqual(y[i] self.arrayContainer[i])<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
<import_from_stmt>enum Enum EnumMeta<import_from_stmt>six with_metaclass<class_stmt>_CaseInsensitiveEnumMeta(EnumMeta)<block_start><def_stmt>__getitem__ self name<block_start><return>super().__getitem__(name.upper())<block_end><def_stmt>__getattr__ cls name<block_start>"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""<try_stmt><block_start><return>cls._member_map_[name.upper()]<block_end><except_stmt>KeyError<block_start><raise>AttributeError(name)<block_end><block_end><block_end><class_stmt>AgentConfigurationRebootStatus(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>UNKNOWN="unknown"<line_sep>REBOOTED="rebooted"<line_sep>NOT_REBOOTED="notRebooted"<block_end><class_stmt>CredentialType(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Credential type of the run as account.
"""<line_sep>V_MWARE_FABRIC="VMwareFabric"<line_sep>HYPER_V_FABRIC="HyperVFabric"<line_sep>LINUX_GUEST="LinuxGuest"<line_sep>WINDOWS_GUEST="WindowsGuest"<line_sep>LINUX_SERVER="LinuxServer"<line_sep>WINDOWS_SERVER="WindowsServer"<block_end><class_stmt>HighlyAvailable(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Value indicating whether the VM is highly available.
"""<line_sep>UNKNOWN="Unknown"<line_sep>NO="No"<line_sep>YES="Yes"<block_end><class_stmt>HypervisorConfigurationHypervisorType(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>UNKNOWN="unknown"<line_sep>HYPERV="hyperv"<block_end><class_stmt>MachinePropertiesMonitoringState(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>MONITORED="monitored"<line_sep>DISCOVERED="discovered"<block_end><class_stmt>MachinePropertiesVirtualizationState(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>UNKNOWN="unknown"<line_sep>PHYSICAL="physical"<line_sep>VIRTUAL="virtual"<line_sep>HYPERVISOR="hypervisor"<block_end><class_stmt>MachineResourcesConfigurationCpuSpeedAccuracy(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>ACTUAL="actual"<line_sep>ESTIMATED="estimated"<block_end><class_stmt>OperatingSystemConfigurationBitness(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>THIRTY_TWO_BIT="32bit"<line_sep>SIXTY_FOUR_BIT="64bit"<block_end><class_stmt>OperatingSystemConfigurationFamily(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>UNKNOWN="unknown"<line_sep>WINDOWS="windows"<line_sep>LINUX="linux"<line_sep>SOLARIS="solaris"<line_sep>AIX="aix"<block_end><class_stmt>VirtualDiskMode(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Disk mode property used for identifying independent disks.
"""<line_sep>PERSISTENT="persistent"<line_sep>INDEPENDENT_PERSISTENT="independent_persistent"<line_sep>INDEPENDENT_NONPERSISTENT="independent_nonpersistent"<line_sep>NONPERSISTENT="nonpersistent"<line_sep>UNDOABLE="undoable"<line_sep>APPEND="append"<block_end><class_stmt>VirtualMachineConfigurationVirtualMachineType(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>UNKNOWN="unknown"<line_sep>HYPERV="hyperv"<line_sep>LDOM="ldom"<line_sep>LPAR="lpar"<line_sep>VMWARE="vmware"<line_sep>VIRTUAL_PC="virtualPc"<line_sep>XEN="xen"<block_end>
|
<import_stmt>pytest<import_from_stmt>tests.conftest MockMixin<class_stmt>A<block_start><def_stmt>method self<block_start>"""aaaa"""<block_end><def_stmt>method2 self<block_start><return>2<block_end><block_end><class_stmt>B(A MockMixin)<block_start><def_stmt>method self<block_start><return>1<block_end><block_end><def_stmt>test_mock_mixin <block_start>b=B()<assert_stmt>b.method()<eq>1<line_sep>b.method.assert_called()<assert_stmt>b.method2()<eq>2<line_sep>b.method2.assert_called()<block_end><def_stmt>test_mock_mixin__2_instances <block_start>b1=B()<line_sep>b2=B()<assert_stmt>b1.method()<eq>1<line_sep>b1.method.assert_called()<line_sep>b2.method.assert_not_called()<block_end><def_stmt>test_mock_call_context <block_start>b1=B()<with_stmt>pytest.raises(AssertionError)<block_start><with_stmt>b1.method.called_within_context()<block_start><pass><block_end><block_end><with_stmt>b1.method.called_within_context()<block_start>b1.method()<block_end><with_stmt>pytest.raises(AssertionError)<block_start><with_stmt>b1.method.called_within_context()<block_start>b1.method()<block_end><block_end><with_stmt>b1.method.called_within_context(first=<false>)<block_start>b1.method()<block_end><with_stmt>pytest.raises(AssertionError)<block_start><with_stmt>b1.method.called_within_context(first=<false> times=2)<block_start>b1.method()<block_end><block_end><with_stmt>b1.method.called_within_context(first=<false> times=2)<block_start>b1.method()<line_sep>b1.method()<block_end><block_end>
|
# -*- coding: utf-8 -*-
"""
sphinxcontrib.napoleon._upstream
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Functions to help compatibility with upstream sphinx.ext.napoleon.
:copyright: Copyright 2013-2018 by <NAME>, see AUTHORS.
:license: BSD, see LICENSE for details.
"""<def_stmt>_ message *args<block_start>"""
NOOP implementation of sphinx.locale.get_translation shortcut.
"""<line_sep><return>message<block_end>
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module containing various utility functions for MindMeld NLP Components.
"""<import_stmt>importlib<import_stmt>logging<import_stmt>enum<import_from_stmt>typing Union Optional List<import_from_stmt>collections defaultdict<import_from_stmt>..exceptions InvalidMaskError<line_sep>logger=logging.getLogger(__name__)<def_stmt>_is_module_available module_name:str<block_start>"""
checks if a module is available or not (eg. _is_module_available("sentence_transformers"))
Args:
module_name (str): name of the model to check
Returns:
bool, if or not the given module exists
"""<line_sep><return>bool(importlib.util.find_spec(module_name)<is><not><none>)<block_end><def_stmt>_get_module_or_attr module_name:str func_name:str=<none><block_start>"""
Loads an attribute from a module or a module itself
(check if the module exists before calling this function)
"""<line_sep>m=importlib.import_module(module_name)<if_stmt><not>func_name<block_start><return>m<block_end><if_stmt>func_name<not><in>dir(m)<block_start><raise>ImportError(f"Cannot import {func_name} from {module_name}")<block_end><return>getattr(m func_name)<block_end><class_stmt>MaskState(enum.Enum)<block_start>"""
This class encoded three NLP states:
unset: state when the user has neither allowed/denied the NLP component.
This state is needed to propagate state up/down the tree since we only
propagate state to unset nodes, never to user-defined nodes
allow: state when the user has explicitly allowed a node.
deny: state when the user has explicitly denied a node.
"""<line_sep>unset=enum.auto()<line_sep>allow=enum.auto()<line_sep>deny=enum.auto()<def_stmt>__bool__ self<block_start><return>self<eq>self.allow<block_end><block_end><class_stmt>TreeNode<block_start><def_stmt>__init__ self nlp_name:str parent:Optional['TreeNode']=<none> children:Optional[List['TreeNode']]=<none> mask_state:Optional[MaskState]=<none><block_start>"""
Constructor for the tree node
Args:
nlp_name: The name of the NLP component. eg. "weather"
is a name for a domain
parent: The parent of the NLP component. eg. parent of
an intent is a domain
children: The children of the NLP component. eg.
children of an intent are entities
mask_state: The mask state of the NLP component
"""<line_sep>self.nlp_name=nlp_name<line_sep>self.mask_state=mask_state<line_sep>self.parent=parent<line_sep>self.children=children<or>[]<block_end><block_end><class_stmt>TreeNlp<block_start>"""
This data structure encodes a NLP tree hierarchy where each node
encodes a mask state, based on which certain NLP components are allowed
or denied based on user input
"""<def_stmt>__init__ self nlp mask_state=MaskState.unset# root
<block_start>self.root=TreeNode('root' mask_state=mask_state)<line_sep># construct NLP tree
<for_stmt>domain nlp.domains<block_start>domain_node=TreeNode(domain parent=self.root mask_state=mask_state)<line_sep>self.root.children.append(domain_node)<for_stmt>intent nlp.domains[domain].intents<block_start>intent_node=TreeNode(intent parent=domain_node mask_state=mask_state)<line_sep>domain_node.children.append(intent_node)<line_sep>entities=nlp.domains[domain].intents[intent].entities<for_stmt>entity entities<block_start>entity_node=TreeNode(entity parent=intent_node mask_state=mask_state)<line_sep>intent_node.children.append(entity_node)<for_stmt>role entities[entity].role_classifier.roles<block_start>role_node=TreeNode(role parent=intent_node mask_state=mask_state)<line_sep>entity_node.children.append(role_node)<block_end><block_end><block_end><block_end><block_end>@staticmethod<def_stmt>_convert_tree_node_to_values *nlp_components<block_start>result=[<none><for>_ ['domain' 'intent' 'entity' 'role']]<for_stmt>idx,component enumerate(nlp_components)<block_start>component_name=component.nlp_name<if>isinstance(component TreeNode)<else>component<line_sep>result[idx]=component_name<block_end><return>result<block_end><def_stmt>get_domain_nodes self<block_start><return>self.root.children<or>[]<block_end><def_stmt>get_intent_nodes self domain:Union[str TreeNode]<block_start>domain,_,_,_=self._convert_tree_node_to_values(domain)<for_stmt>domain_node self.root.children<block_start><if_stmt>domain_node.nlp_name<eq>domain<block_start><return>domain_node.children<block_end><block_end><return>[]<block_end><def_stmt>get_entity_nodes self domain:Union[str TreeNode] intent:Union[str TreeNode]<block_start>domain,intent,_,_=self._convert_tree_node_to_values(domain intent)<for_stmt>intent_node self.get_intent_nodes(domain)<block_start><if_stmt>intent_node.nlp_name<eq>intent<block_start><return>intent_node.children<block_end><block_end><return>[]<block_end><def_stmt>get_role_nodes self domain:Union[str TreeNode] intent:Union[str TreeNode] entity:Union[str TreeNode]<block_start>domain,intent,entity,_=self._convert_tree_node_to_values(domain intent entity)<for_stmt>entity_node self.get_entity_nodes(domain intent)<block_start><if_stmt>entity_node.nlp_name<eq>entity<block_start><return>entity_node.children<block_end><block_end><return>[]<block_end><def_stmt>update self mask_state:bool domain:Union[str TreeNode] intent:Optional[Union[str TreeNode]]=<none> entity:Optional[Union[str TreeNode]]=<none> role:Optional[Union[str TreeNode]]=<none><block_start>"""
This function updates the NLP tree with mask values. Note:
Args:
mask_state: True is mask off, False is mask on
domain: domain of NLP
intent: intent of NLP
entity: entity of NLP
role: role of NLP
"""<line_sep>domain_name,intent_name,entity_name,role_name=self._convert_tree_node_to_values(domain intent entity role)<line_sep># validation check
nlp_components=[domain_name intent_name entity_name role_name]<for_stmt>i range(1 len(nlp_components))<block_start><if_stmt>any(<not>component<for>component nlp_components[:i])<and>nlp_components[i]<block_start><raise>InvalidMaskError(f"Unable to resolve NLP hierarchy since "<concat>f"{str(nlp_components[i])} does not have an valid ancestor")<block_end><block_end><for_stmt>domain_node self.get_domain_nodes()<block_start><if_stmt>domain_node.nlp_name<ne>domain_name<block_start><continue><block_end><if_stmt><not>intent_name<block_start>domain_node.mask_state=mask_state<line_sep><return><block_end><for_stmt>intent_node self.get_intent_nodes(domain_node.nlp_name)<block_start><if_stmt>intent_name<not><in>('*' intent_node.nlp_name)<block_start><continue><block_end><if_stmt><not>entity_name<block_start>intent_node.mask_state=mask_state<line_sep># If the intent is * and it's terminal, eg. "domain.*", then
# we mask the intent AND continue to iterate through the other
# intents of the domain
<if_stmt>intent_name<eq>'*'<block_start><continue><block_end># If the intent is not *, then it's terminal, eg. "domain.intent",
# then we mask the intent and end the function's operations
<return><block_end><for_stmt>entity_node self.get_entity_nodes(domain_node.nlp_name intent_node.nlp_name)<block_start><if_stmt>entity_name<not><in>('*' entity_node.nlp_name)<block_start><continue><block_end><if_stmt><not>role_name<block_start>entity_node.mask_state=mask_state<line_sep># If the entity is * and it's terminal, eg. "domain.intent.*", then
# we mask the entity AND continue to iterate through the other
# entities of the intent
<if_stmt>entity_name<eq>'*'<block_start><continue><block_end># If the entity is not *, then it's terminal, eg. "domain.intent.entity",
# then we mask the entity and end the function's operations
<return><block_end><for_stmt>role_node self.get_role_nodes(domain_node.nlp_name intent_node.nlp_name entity_node.nlp_name)<block_start><if_stmt>role_name<not><in>('*' role_node.nlp_name)<block_start><continue><block_end>role_node.mask_state=mask_state<if_stmt>role_name<eq>'*'<block_start><continue><block_end><return><block_end><block_end><block_end><block_end><block_end><def_stmt>_sync_nodes self<block_start>"""
This function does two actions sequentially:
1. down-flow: flow mask decisions down the tree
2. up-flow: flow mask decisions up the tree
Each node has three allow states: True, False and None. True and False
are explicitly set by the user while None is the default state.
For 1., if a parent is allowed, then all it's "eligible" descendant components
are allowed as well. An "eligible" component is a node set to None (ie non-user defined),
since a user might have explicitly set a child.
For 2., if all children of a NLP component are not allowed, then the parent
will not be allowed as well. When we do an up-flow, we update nodes regardless of being
explicitly set or not. This is because of the rule that if all the descendants are masked,
the parent should be masked as well, even if it's explicitly set to the contrary.
"""<for_stmt>domain self.get_domain_nodes()<block_start>intents=self.get_intent_nodes(domain)<for_stmt>intent intents# sync down
<block_start><if_stmt>domain.mask_state<ne>MaskState.unset<and>intent.mask_state<eq>MaskState.unset<block_start>intent.mask_state=domain.mask_state<block_end>entities=self.get_entity_nodes(domain intent)<for_stmt>entity entities# sync down
<block_start><if_stmt>intent.mask_state<ne>MaskState.unset<and>entity.mask_state<eq>MaskState.unset<block_start>entity.mask_state=intent.mask_state<block_end>roles=self.get_role_nodes(domain intent entity)<for_stmt>role roles# sync down
<block_start><if_stmt>entity.mask_state<ne>MaskState.unset<and>role.mask_state<eq>MaskState.unset<block_start>role.mask_state=entity.mask_state<block_end><block_end># sync up entity-role
<if_stmt>roles<and>all(role.mask_state<eq>MaskState.deny<for>role roles)<block_start>entity.mask_state=MaskState.deny<block_end><block_end># We do not perform sync ups for entities since tagger models cannot
# deny their parent text classification models. For example,
# just because the developer wants to deny all the entities in a particular
# intent, doesn't mean the intent should be denied as well.
<block_end># sync up domain-intent
<if_stmt>intents<and>all(intent.mask_state<eq>MaskState.deny<for>intent intents)<block_start>domain.mask_state=MaskState.deny<block_end><block_end><block_end><def_stmt>_default_to_regular self d<block_start><if_stmt>isinstance(d defaultdict)<block_start>d={k:self._default_to_regular(v)<for>k,v d.items()}<block_end><return>d<block_end><def_stmt>to_dict self<arrow>dict<block_start>"""
This function serializes TreeNlp into a dict structure by only adding keys representing
allow MaskState nodes and not adding keys for deny and unset MaskState nodes.
"""<line_sep>self._sync_nodes()<line_sep># The results has three nested dicts: {domain: {intent: {entity: role: {}}}}
result=defaultdict(<lambda>:defaultdict(<lambda>:defaultdict(dict)))<for_stmt>domain self.get_domain_nodes()<block_start><if_stmt>domain.mask_state<block_start>result[domain.nlp_name]=defaultdict(<lambda>:defaultdict(dict))<block_end><for_stmt>intent self.get_intent_nodes(domain.nlp_name)<block_start><if_stmt>intent.mask_state<block_start>result[domain.nlp_name][intent.nlp_name]=defaultdict(dict)<block_end><for_stmt>entity self.get_entity_nodes(domain.nlp_name intent.nlp_name)<block_start><if_stmt>entity.mask_state<block_start>result[domain.nlp_name][intent.nlp_name][entity.nlp_name]={}<block_end><for_stmt>role self.get_role_nodes(domain.nlp_name intent.nlp_name entity.nlp_name)<block_start><if_stmt>role.mask_state<block_start>result[domain.nlp_name][intent.nlp_name][entity.nlp_name][role.nlp_name]={}<block_end><block_end><block_end><block_end><block_end>serialize_results=self._default_to_regular(result)<line_sep><return>serialize_results<block_end><block_end>
|
<import_stmt>base64<import_stmt>logging<import_stmt>queue<import_stmt>sys<import_from_stmt>urllib.parse urlparse<import_stmt>requests.exceptions<as>req_exc<import_from_stmt>libs jenkinslib<def_stmt>_logging_fatal msg *args **kwargs<block_start>logging.critical(msg *args **kwargs)<line_sep>exit(1)<block_end><class_stmt>HijackStdOut<block_start><def_stmt>__enter__ self# Preserve old stdout because we may already have hijacked it
<block_start>self.old_stdout=sys.stdout<line_sep>sys.stdout=sys.stderr<line_sep><return>sys.stdout<block_end><def_stmt>__exit__ self _type value traceback<block_start>sys.stdout=self.old_stdout<block_end><block_end><class_stmt>BasePlugin<block_start>"""JAF Plugin Base Class"""<line_sep>results_queue=queue.Queue()<line_sep>jobs_queue=queue.Queue()<def_stmt>__init__ self args<block_start>self.args=args<line_sep>logging.basicConfig(format="%(asctime)s - %(message)s")<line_sep>self.logging=logging.getLogger()<line_sep>self.logging.fatal=_logging_fatal<line_sep>self.server_url=urlparse(self.args.server)<if_stmt>args.output_file<block_start><try_stmt><block_start>sys.stdout=open(args.output_file "w")<block_end><except_stmt>Exception<block_start>self.logging.fatal("Specified Output File Path is invalid or inaccessible.")<block_end><block_end><block_end><def_stmt>_get_jenkins_server self cred<block_start>"""Setup initial connection to the jenkins server and handle authentication
:param cred: Credential dict"""<try_stmt><block_start><if_stmt>cred<block_start><if_stmt>"cookie"<in>cred<block_start><return>jenkinslib.Jenkins(self.args.server cookie=cred["cookie"] crumb=cred["crumb"] timeout=self.args.timeout headers={"User-Agent":self.args.user_agent} )<block_end><elif_stmt>"authheader"<in>cred<block_start><return>jenkinslib.Jenkins(self.args.server authheader="Basic "+base64.b64encode(cred["authheader"].encode("utf8")).decode("ascii") timeout=self.args.timeout headers={"User-Agent":self.args.user_agent} )<block_end><else_stmt><block_start><return>jenkinslib.Jenkins(self.args.server username=cred["username"] password=cred["password"] timeout=self.args.timeout headers={"User-Agent":self.args.user_agent} )<block_end><block_end><else_stmt><block_start><return>jenkinslib.Jenkins(self.args.server timeout=self.args.timeout headers={"User-Agent":self.args.user_agent} )<block_end><block_end><except_stmt>jenkinslib.JenkinsException<as>ex<block_start><if_stmt>"[403]"<in>str(ex).split("\n")[0]<block_start>self.logging.fatal("%s authentication failed or no access" self._get_username(cred))<block_end><else_stmt><block_start>self.logging.fatal("Unable to access Jenkins at: %s With User: %s For Reason:\n\t%s"%((self.server_url.netloc<if>len(self.server_url.netloc)<g>0<else>self.args.server) self._get_username(cred) str(ex).split("\n")[0] ))<block_end><block_end><except_stmt>(req_exc.SSLError req_exc.ConnectionError)<block_start>self.logging.fatal("Unable to connect to: "+(self.server_url.netloc<if>len(self.server_url.netloc)<g>0<else>self.args.server))<block_end><except_stmt>Exception<block_start>self.logging.exception("")<block_end><block_end><def_stmt>_get_username self cred<block_start>"""Utility function to return the user based on the cred type to display in error messages."""<if_stmt><not>cred<block_start><return>"Anonymous"<block_end><elif_stmt>"username"<in>cred<block_start><return>cred["username"]<block_end><elif_stmt>"authheader"<in>cred<block_start><return>cred["authheader"].split(":")[0]<block_end><elif_stmt><not>cred<block_start><return>"Anonymous"<block_end><else_stmt><block_start><return>"Cookie (User Unknown)"<block_end><block_end><def_stmt>_validate_jenkins_server_accessible self<block_start>"""Utility function to return if we appear to have access to the jenkins server or not"""<line_sep># Catch inaccessible server before slamming a bunch of threads at it.
cred=<none><line_sep>server=self._get_jenkins_server(cred)<if_stmt>server.basic_access_check()<ne>500<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><block_end>
|
<import_stmt>os<import_stmt>tempfile<import_stmt>time<import_from_stmt>dusty.systems.virtualbox asset_is_set run_command_on_vm<import_from_stmt>dusty constants<import_from_stmt>dusty.source Repo<import_from_stmt>dusty.memoize reset_memoize_cache<import_from_stmt>...testcases DustyIntegrationTestCase<import_from_stmt>...fixtures assets_fixture<class_stmt>TestAssetsCLI(DustyIntegrationTestCase)<block_start><def_stmt>setUp self<block_start>super(TestAssetsCLI self).setUp()<line_sep>assets_fixture()<line_sep>self.run_command('repos override github.com/lib/a {}'.format(self.fake_local_repo_location))<line_sep>self.required_app_file=tempfile.mkstemp()[1]<with_stmt>open(self.required_app_file 'w')<as>f<block_start>f.write('required_app_contents')<block_end>self.optional_app_file=tempfile.mkstemp()[1]<with_stmt>open(self.optional_app_file 'w')<as>f<block_start>f.write('optional_app_contents')<block_end>self.required_lib_file=tempfile.mkstemp()[1]<with_stmt>open(self.required_lib_file 'w')<as>f<block_start>f.write('required_lib_contents')<block_end>self.optional_lib_file=tempfile.mkstemp()[1]<with_stmt>open(self.optional_lib_file 'w')<as>f<block_start>f.write('optional_lib_contents')<block_end>self.run_command('bundles activate bundle-a')<line_sep>self.run_command('assets set required_app_asset {}'.format(self.required_app_file))<line_sep>self.run_command('assets set required_lib_asset {}'.format(self.required_lib_file))<block_end><def_stmt>tearDown self<block_start>os.remove(self.required_app_file)<line_sep>os.remove(self.required_lib_file)<line_sep>os.remove(self.optional_app_file)<line_sep>os.remove(self.optional_lib_file)<line_sep>run_command_on_vm('sudo rm -rf {}'.format(constants.VM_ASSETS_DIR))<try_stmt><block_start>self.run_command('stop --rm')<block_end><except_stmt><block_start><pass><block_end>super(TestAssetsCLI self).tearDown()<block_end>@DustyIntegrationTestCase.retriable_assertion(.1 5)<def_stmt>assertAssetContentsRetriable self container_path asset_contents<block_start>self.assertFileContentsInContainer('app-a' container_path asset_contents)<block_end><def_stmt>test_asset_in_container self<block_start>self.run_command('up --no-pull')<line_sep>self.assertAssetContentsRetriable('/required_app_path' 'required_app_contents')<line_sep>self.assertAssetContentsRetriable('/required_lib_path' 'required_lib_contents')<block_end><def_stmt>test_required_asset_fail self<block_start>self.run_command('bundles activate bundle-a')<line_sep>self.run_command('assets unset required_app_asset')<with_stmt>self.assertRaises(self.CommandError)<block_start>output=self.run_command('up --no-pull')<block_end><block_end><def_stmt>test_optional_asset self<block_start>self.run_command('assets set optional_app_asset {}'.format(self.optional_app_file))<line_sep>self.run_command('assets set optional_lib_asset {}'.format(self.optional_lib_file))<line_sep>self.run_command('up --no-pull')<line_sep>self.assertAssetContentsRetriable('/optional_app_path' 'optional_app_contents')<line_sep>self.assertAssetContentsRetriable('/optional_lib_path' 'optional_lib_contents')<block_end><def_stmt>test_unset self<block_start>self.run_command('assets unset required_app_asset')<line_sep>self.run_command('assets unset required_lib_asset')<line_sep>reset_memoize_cache()<line_sep>self.assertFalse(asset_is_set('required_app_asset'))<line_sep>self.assertFalse(asset_is_set('required_lib_asset'))<block_end><def_stmt>test_read self<block_start><with_stmt>self.assertLogToClientOutput('required_app_contents')<block_start>self.run_command('assets read required_app_asset')<block_end><with_stmt>self.assertLogToClientOutput('required_lib_contents')<block_start>self.run_command('assets read required_lib_asset')<block_end><block_end><block_end>
|
# Program 18a: Generating a multifractal image.
# Save the image.
# See Figure 18.1(b).
<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>skimage exposure io img_as_uint<line_sep>p1,p2,p3,p4=0.3 0.4 0.25 0.05<line_sep>p=[[p1 p2] [p3 p4]]<for_stmt>k range(1 9 1)<block_start>M=np.zeros([2<power>(k+1) 2<power>(k+1)])<line_sep>M.tolist()<for_stmt>i range(2<power>k)<block_start><for_stmt>j range(2<power>k)<block_start>M[i][j]=p1<times>p[i][j]<line_sep>M[i][j+2<power>k]=p2<times>p[i][j]<line_sep>M[i+2<power>k][j]=p3<times>p[i][j]<line_sep>M[i+2<power>k][j+2<power>k]=p4<times>p[i][j]<block_end><block_end>p=M<block_end># Plot the multifractal image.
M=exposure.adjust_gamma(M 0.2)<line_sep>plt.imshow(M cmap='gray' interpolation='nearest')<line_sep># Save the image as a portable network graphics (png) image.
im=np.array(M dtype='float64')<line_sep>im=exposure.rescale_intensity(im out_range='float')<line_sep>im=img_as_uint(im)<line_sep>io.imsave('Multifractal.png' im)<line_sep>io.show()<line_sep>
|
<import_stmt>json<import_from_stmt>ipaddress IPv4Address IPv4Interface IPv6Address IPv6Interface<import_from_stmt>django.contrib.auth.models Permission User<import_from_stmt>django.contrib.contenttypes.models ContentType<import_from_stmt>django.core.exceptions FieldDoesNotExist<import_from_stmt>django.db.models ManyToManyField<import_from_stmt>django.forms.models model_to_dict<import_from_stmt>django.test Client<import_from_stmt>django.test TestCase<as>_TestCase<import_from_stmt>requests.models HTTPError<import_from_stmt>rest_framework status<import_from_stmt>taggit.managers TaggableManager<import_from_stmt>.functions extract_form_failures<class_stmt>MockedResponse(object)<block_start><def_stmt>__init__ self status_code=status.HTTP_200_OK ok=<true> fixture=<none> content=<none><block_start>self.status_code=status_code<if_stmt>fixture<block_start>self.content=self.load_fixture(fixture)<block_end><elif_stmt>content<block_start>self.content=json.dumps(content)<block_end><else_stmt><block_start>self.content=<none><block_end>self.ok=ok<block_end><def_stmt>load_fixture self path<block_start><with_stmt>open(path "r")<as>f<block_start><return>f.read()<block_end><block_end><def_stmt>json self<block_start><return>json.loads(self.content)<block_end><def_stmt>raise_for_status self<block_start><if_stmt>(status.HTTP_400_BAD_REQUEST<le>self.status_code<le>status.HTTP_511_NETWORK_AUTHENTICATION_REQUIRED)<block_start><raise>HTTPError("" response=self)<block_end><block_end><block_end><class_stmt>TestCase(_TestCase)<block_start>user_permissions=()<def_stmt>setUp self# Create the test user and assign permissions
<block_start>self.user=User.objects.create_user(username="testuser")<line_sep>self.add_permissions(*self.user_permissions)<line_sep># Initialize the test client
self.client=Client()<line_sep>self.client.force_login(self.user)<block_end><def_stmt>add_permissions self *names<block_start>"""
Assign a set of permissions to the test user.
Accepts permission names in the form <app>.<action>_<model>.
"""<for_stmt>name names<block_start>app,codename=name.split(".")<line_sep>perm=Permission.objects.get(content_type__app_label=app codename=codename)<line_sep>self.user.user_permissions.add(perm)<block_end><block_end><def_stmt>remove_permissions self *names<block_start>"""
Remove a set of permissions from the test user, if assigned.
"""<for_stmt>name names<block_start>app,codename=name.split(".")<line_sep>perm=Permission.objects.get(content_type__app_label=app codename=codename)<line_sep>self.user.user_permissions.remove(perm)<block_end><block_end><def_stmt>assertHttpStatus self response expected_status<block_start>"""
Provide detail when receiving an unexpected HTTP response.
"""<line_sep>err_message=<none><line_sep># Construct an error message only if the test is going to fail
<if_stmt>response.status_code<ne>expected_status<block_start><if_stmt>hasattr(response "data")# REST API response; pass the response data through directly
<block_start>err=response.data<block_end><else_stmt># Try to extract form validation errors from the response HTML
<block_start>form_errors=extract_form_failures(response.content)<line_sep>err=form_errors<or>response.content<or>"No data"<block_end>err_message=f"Expected HTTP status {expected_status}; received {response.status_code}: {err}"<block_end>self.assertEqual(response.status_code expected_status err_message)<block_end><block_end><class_stmt>ModelTestCase(TestCase)<block_start>"""
Parent class for test cases which deal with models.
"""<line_sep>model=<none><def_stmt>add_permissions self *names<block_start>perms=[]<for_stmt>name names<block_start>perms.append(f"{self.model._meta.app_label}.{name}_{self.model._meta.model_name}")<block_end>super().add_permissions(*perms)<block_end><def_stmt>remove_permissions self *names<block_start>perms=[]<for_stmt>name names<block_start>perms.append(f"{self.model._meta.app_label}.{name}_{self.model._meta.model_name}")<block_end>super().add_permissions(*perms)<block_end><def_stmt>_get_queryset self<block_start>"""
Returns a base queryset suitable for use in test methods.
"""<line_sep><return>self.model.objects.all()<block_end><def_stmt>prepare_instance self instance<block_start>"""
Override this method to perform manipulation of an instance prior to its
evaluation against test data.
"""<line_sep><return>instance<block_end><def_stmt>model_to_dict self instance fields api=<false><block_start>"""
Returns a dictionary representation of an instance.
"""<line_sep># Prepare the instance and call Django's model_to_dict() to extract all fields
model_dict=model_to_dict(self.prepare_instance(instance) fields=fields)<line_sep># Map any additional (non-field) instance attributes that were specified
<for_stmt>attr fields<block_start><if_stmt>hasattr(instance attr)<and>attr<not><in>model_dict<block_start>model_dict[attr]=getattr(instance attr)<block_end><block_end><for_stmt>key,value list(model_dict.items())<block_start><try_stmt><block_start>field=instance._meta.get_field(key)<block_end><except_stmt>FieldDoesNotExist# Attribute is not a model field
<block_start><continue><block_end># Handle ManyToManyFields
<if_stmt>value<and>type(field)<in>(ManyToManyField TaggableManager)<block_start><if_stmt>field.related_model<is>ContentType<block_start>model_dict[key]=sorted([f"{ct.app_label}.{ct.model}"<for>ct value])<block_end><else_stmt><block_start>model_dict[key]=sorted([obj.pk<for>obj value])<block_end><block_end><if_stmt>api<and>type(value)<in>(IPv4Address IPv6Address IPv4Interface IPv6Interface )<block_start>model_dict[key]=str(value)<block_end><if_stmt>api# Replace ContentType numeric IDs with <app_label>.<model>
<block_start><if_stmt>type(getattr(instance key))<is>ContentType<block_start>ct=ContentType.objects.get(pk=value)<line_sep>model_dict[key]=f"{ct.app_label}.{ct.model}"<block_end><block_end><block_end><return>model_dict<block_end><def_stmt>assertInstanceEqual self instance data exclude=<none> api=<false><block_start>"""
Compares a model instance to a dictionary, checking that its attribute values
match those specified in the dictionary.
"""<if_stmt>exclude<is><none><block_start>exclude=[]<block_end>fields=[k<for>k data.keys()<if>k<not><in>exclude]<line_sep>model_dict=self.model_to_dict(instance fields=fields api=api)<line_sep># Omit any dictionary keys which are not instance attributes or have been excluded
relevant_data={k:v<for>k,v data.items()<if>hasattr(instance k)<and>k<not><in>exclude}<line_sep>self.assertDictEqual(model_dict relevant_data)<block_end><block_end>
|
# -*- coding: utf-8 -*-
"""
Integration tests for solidspy
"""<import_stmt>numpy<as>np<import_from_stmt>scipy.sparse.linalg eigsh<import_stmt>solidspy.postprocesor<as>pos<import_stmt>solidspy.assemutil<as>ass<import_stmt>solidspy.solutil<as>sol<def_stmt>test_4_elements <block_start>"""2×2 mesh with uniaxial load"""<line_sep>nodes=np.array([[0 0 0] [1 2 0] [2 2 2] [3 0 2] [4 1 0] [5 2 1] [6 1 2] [7 0 1] [8 1 1]])<line_sep>cons=np.array([[0 -1] [0 -1] [0 0] [0 0] [-1 -1] [0 0] [0 0] [0 0] [0 0]])<line_sep>eles=np.array([[0 1 0 0 4 8 7] [1 1 0 4 1 5 8] [2 1 0 7 8 6 3] [3 1 0 8 5 2 6]])<line_sep>loads=np.array([[3 0 1] [6 0 2] [2 0 1]])<line_sep>mater=np.array([[1.0 0.3]])<line_sep>assem_op,bc_array,neq=ass.DME(cons eles)<line_sep>stiff,_=ass.assembler(eles mater nodes neq assem_op)<line_sep>load_vec=ass.loadasem(loads bc_array neq)<line_sep>disp=sol.static_sol(stiff load_vec)<line_sep>disp_complete=pos.complete_disp(bc_array nodes disp)<line_sep>disp_analytic=np.array([[0.6 0.0] [-0.6 0.0] [-0.6 4.0] [0.6 4.0] [0.0 0.0] [-0.6 2.0] [0.0 4.0] [0.6 2.0] [0.0 2.0]])<assert_stmt>np.allclose(disp_complete disp_analytic)<block_end><def_stmt>test_2_elements <block_start>"""2x1 mesh cantilever beam"""<line_sep>nodes=np.array([[0 0 0] [1 1 0] [2 2 0] [3 0 1] [4 1 1] [5 2 1]])<line_sep>cons=np.array([[-1 -1] [0 0] [0 0] [-1 -1] [0 0] [0 0]])<line_sep>eles=np.array([[0 1 0 0 1 4 3] [1 1 0 1 2 5 4]])<line_sep>loads=np.array([[2 0 -0.5] [5 0 -0.5]])<line_sep>mater=np.array([[1.0 0.3]])<line_sep>assem_op,bc_array,neq=ass.DME(cons eles)<line_sep>stiff,_=ass.assembler(eles mater nodes neq assem_op)<line_sep>load_vec=ass.loadasem(loads bc_array neq)<line_sep>disp=sol.static_sol(stiff load_vec)<line_sep>disp_complete=pos.complete_disp(bc_array nodes disp)<line_sep>disp_analytic=1/45<times>np.array([[0 0] [-273 -390] [-364 -1144] [0 0] [273 -390] [364 -1144]])<assert_stmt>np.allclose(disp_complete disp_analytic)<block_end><def_stmt>test_beams <block_start>"""Beams with axial force"""<line_sep># Analytic problem
nodes=np.array([[0 0.0 0.0] [1 0.0 6.0] [2 4.0 6.0]])<line_sep>cons=np.array([[-1 -1 -1] [0 0 0] [-1 -1 -1]])<line_sep>mats=np.array([[200e9 1.33e-4 0.04]])<line_sep>elements=np.array([[0 8 0 0 1] [1 8 0 1 2]])<line_sep>loads=np.array([[1 -12000 -24000 -6000]])<line_sep>assem_op,bc_array,neq=ass.DME(cons elements ndof_node=3)<line_sep>stiff,_=ass.assembler(elements mats nodes neq assem_op sparse=<false>)<line_sep>load_vec=ass.loadasem(loads bc_array neq ndof_node=3)<line_sep>solution=sol.static_sol(stiff load_vec)<line_sep>solution_analytic=np.array([-6.29e-6 -1.695e-5 -0.13e-3])<assert_stmt>np.allclose(solution solution_analytic rtol=1e-1)<block_end><def_stmt>test_eigs_truss <block_start>"""Eigenvalues of a bar"""<line_sep>nnodes=513<line_sep>x=np.linspace(0 np.pi nnodes)<line_sep>nodes=np.zeros((nnodes 3))<line_sep>nodes[: 0]=range(nnodes)<line_sep>nodes[: 1]=x<line_sep>cons=np.zeros((nnodes 2))<line_sep>cons[: 1]=-1<line_sep>cons[0 0]=-1<line_sep>cons[-1 0]=-1<line_sep>mats=np.array([[1.0 1.0 1.0]])<line_sep>elements=np.zeros((nnodes-1 5) dtype=int)<line_sep>elements[: 0]=range(nnodes-1)<line_sep>elements[: 1]=6<line_sep>elements[: 3]=range(nnodes-1)<line_sep>elements[: 4]=range(1 nnodes)<line_sep>assem_op,bc_array,neq=ass.DME(cons elements)<line_sep>stiff,mass=ass.assembler(elements mats nodes neq assem_op)<line_sep>vals,_=eigsh(stiff M=mass which="SM")<assert_stmt>np.allclose(vals np.linspace(1 6 6)<power>2 rtol=1e-2)<block_end><def_stmt>test_eigs_beam <block_start>"""Eigenvalues of a cantilever beam"""<line_sep>nnodes=10<line_sep>x=np.linspace(0 np.pi nnodes)<line_sep>nodes=np.zeros((nnodes 3))<line_sep>nodes[: 0]=range(nnodes)<line_sep>nodes[: 1]=x<line_sep>cons=np.zeros((nnodes 3))<line_sep>cons[0 :]=-1<line_sep>cons[: 0]=-1<line_sep>mats=np.array([[1.0 1.0 1.0 1.0]])<line_sep>elements=np.zeros((nnodes-1 5) dtype=int)<line_sep>elements[: 0]=range(nnodes-1)<line_sep>elements[: 1]=7<line_sep>elements[: 3]=range(nnodes-1)<line_sep>elements[: 4]=range(1 nnodes)<line_sep>assem_op,bc_array,neq=ass.DME(cons elements ndof_node=3)<line_sep>stiff,mass=ass.assembler(elements mats nodes neq assem_op)<line_sep>vals,_=eigsh(stiff M=mass which="SM")<line_sep>vals_analytic=np.array([0.596864162694467 1.49417561427335 2.50024694616670 3.49998931984744 4.50000046151508 5.49999998005609])<assert_stmt>np.allclose(vals<power>0.25 vals_analytic rtol=1e-2)<block_end>
|
<import_stmt>tensorflow<as>tf<line_sep>_INITIAL_SIGMA2_VALUE=0.1<class_stmt>CoreRNN(tf.keras.layers.Layer)<block_start><def_stmt>__init__ self observation_dim=256 rnn_hidden_size=512 rnn_depth=1 rnn_dropout=0.0 rnn_cell=tf.keras.layers.GRU **kwargs <block_start>super(CoreRNN self).__init__(name='CoreRNN' **kwargs)<line_sep># self.lstm = tf.keras.Sequential()
# for i in range(rnn_depth):
# self.lstm.add(
# tf.keras.layers.LSTM(
# rnn_hidden_size,
# return_sequences = True,
# return_state = True,
# kernel_regularizer = tf.keras.regularizers.l2(1e-5),
# recurrent_regularizer = tf.keras.regularizers.l2(1e-5),
# )
# )
self.lstm=tf.keras.layers.LSTM(rnn_hidden_size return_sequences=<true> return_state=<true> kernel_regularizer=tf.keras.regularizers.l2(1e-5) recurrent_regularizer=tf.keras.regularizers.l2(1e-5) )<line_sep>self.linear_mean1=tf.keras.layers.Dense(units=rnn_hidden_size dtype=tf.float32 activation=tf.nn.relu kernel_regularizer=tf.keras.regularizers.l2(1e-5) )<line_sep>self.linear_mean2=tf.keras.layers.Dense(units=observation_dim dtype=tf.float32 kernel_regularizer=tf.keras.regularizers.l2(1e-5) )<block_end><def_stmt>call self x hidden=<none> training=<true><block_start>output_seq=self.lstm(x initial_state=hidden training=training)<line_sep>mean=self.linear_mean2(self.linear_mean1(output_seq[0]))<line_sep><return>mean output_seq[1:]<block_end><block_end><class_stmt>BeamState<block_start>"""Structure that contains necessary states for beam search."""<def_stmt>__init__ self source=<none><block_start><if_stmt><not>source<block_start>self.mean_set=[]<line_sep>self.hidden_set=[]<line_sep>self.neg_likelihood=0<line_sep>self.trace=[]<line_sep>self.block_counts=[]<block_end><else_stmt><block_start>self.mean_set=source.mean_set.copy()<line_sep>self.hidden_set=source.hidden_set.copy()<line_sep>self.trace=source.trace.copy()<line_sep>self.block_counts=source.block_counts.copy()<line_sep>self.neg_likelihood=source.neg_likelihood<block_end><block_end><def_stmt>append self mean hidden cluster<block_start>"""Append new item to the BeamState."""<line_sep>self.mean_set.append(mean.clone())<line_sep>self.hidden_set.append(hidden.clone())<line_sep>self.block_counts.append(1)<line_sep>self.trace.append(cluster)<block_end><block_end><class_stmt>Model(tf.keras.Model)<block_start><def_stmt>__init__ self observation_dim=256 rnn_hidden_size=512 rnn_depth=1 rnn_dropout=0.0 sigma2=<none> transition_bias=<none> crp_alpha=1.0 **kwargs <block_start>super(Model self).__init__(name='uis-rnn' **kwargs)<line_sep>self.rnn_model=CoreRNN(observation_dim rnn_hidden_size rnn_depth rnn_dropout)<line_sep>self.estimate_sigma2=sigma2<is><none><line_sep>self.estimate_transition_bias=transition_bias<is><none><line_sep>sigma2=_INITIAL_SIGMA2_VALUE<if>self.estimate_sigma2<else>args.sigma2<line_sep>self.sigma2=sigma2<times>tf.get_variable(name='sigma2' shape=[observation_dim] initializer=tf.ones_initializer() )<line_sep>self.transition_bias=transition_bias<line_sep>self.transition_bias_denominator=0.0<line_sep>self.crp_alpha=crp_alpha<block_end><def_stmt>call self x hidden=<none> training=<true><block_start><return>self.rnn_model(x hidden=hidden training=training)<block_end><block_end>
|
# -*- coding: utf-8 -*-
INVALID_URLS=['http://' 'http://.' 'http://..' 'http://../' 'http://?' 'http://??' 'http://??/' 'http://#' 'http://##' 'http://##/' 'http://foo.bar?q=Spaces should be encoded' '//' '//a' '///a' '///' 'http:///a' 'foo.com' 'rdar://1234' 'h://test' 'http:// shouldfail.com' ':// should fail' 'http://foo.bar/foo(bar)baz quux' 'htto://foo.bar/' 'http://-error-.invalid/' 'http://-a.b.co' 'http://a.b-.co' 'http://.www.foo.bar/' ]<line_sep>VALID_URLS=['http://foo.com/blah_blah' 'http://foo.com/blah_blah/' 'http://foo.com/blah_blah_(wikipedia)' 'http://foo.com/blah_blah_(wikipedia)_(again)' 'http://www.example.com/wpstyle/?p=364' 'https://www.example.com/foo/?bar=baz&inga=42&quux' 'http://172.16.31.10/' 'http://172.16.31.10:8080/' 'http://foo.com/blah_(wikipedia)#cite-132' 'http://foo.com/blah_(wikipedia)_blah#cite-1' u'http://foo.com/unicode_(✪)_in_parens' 'http://foo.com/(something)?after=parens' 'http://sub.damowmow.com/' 'http://code.google.com/events/#&product=browser' 'http://j.mp' 'ftp://foo.bar/baz' 'http://foo.bar/?q=Test%20URL-encoded%20stuff' 'http://1337.net' 'http://a.b-c.de' 'http://172.16.58.3' 'http://a.b--c.de/' ]<line_sep>
|
"""Commands for running python functions and scripts"""<import_stmt>inspect<import_stmt>shlex<import_stmt>sys<import_stmt>json<import_from_stmt>html escape<import_from_stmt>typing Union Callable List<import_from_stmt>..incremental_processing file_dependencies<import_from_stmt>..logging logger<import_from_stmt>mara_page html _<import_from_stmt>.. pipelines<class_stmt>RunFunction(pipelines.Command)<block_start><def_stmt>__init__ self function:Callable=<none> args:[str]=<none> file_dependencies:[str]=<none><arrow><none><block_start>"""
Runs an arbitrary python function
Args:
function: The parameterless function to run
args: A list of arguments to be passed to the script
file_dependencies: Run triggered based on whether a list of files changed since the last pipeline run
Note:
if you want to pass arguments, then use a lambda function
"""<line_sep>self.function=function<line_sep>self.args=args<or>[]<line_sep>self.file_dependencies=file_dependencies<or>[]<block_end><def_stmt>run self<arrow>bool<block_start>dependency_type='RunFunction '+self.function.__name__<if_stmt>self.file_dependencies<block_start><assert_stmt>(self.parent)<line_sep>pipeline_base_path=self.parent.parent.base_path()<if_stmt><not>file_dependencies.is_modified(self.node_path() dependency_type pipeline_base_path self.file_dependencies)<block_start>logger.log('no changes')<line_sep><return><true><block_end><block_end><if_stmt><not>self.function(*self.args)<block_start><return><false><block_end><if_stmt>self.file_dependencies<block_start>file_dependencies.update(self.node_path() dependency_type pipeline_base_path self.file_dependencies)<block_end><return><true><block_end><def_stmt>html_doc_items self<arrow>[(str str)]<block_start><return>[('function' _.pre[escape(str(self.function))]) ('args' _.tt[repr(self.args)]) (_.i['implementation'] html.highlight_syntax(inspect.getsource(self.function) 'python')) ('file dependencies' [_.i[dependency _.br]<for>dependency self.file_dependencies])]<block_end><block_end><class_stmt>ExecutePython(pipelines.Command)<block_start><def_stmt>__init__ self file_name:Union[Callable str] args:Union[Callable List[str]]=<none> file_dependencies:[str]=<none><arrow><none><block_start>"""
Runs a python script in a separate interpreter process
Args:
file_name: the path of the file to run, relative to the pipeline directory
args: A list of arguments to be passed to the script
file_dependencies: Run triggered based on whether a list of files changed since the last pipeline run
"""<line_sep>self._file_name=file_name<line_sep>self._args=args<or>[]<line_sep>self.file_dependencies=file_dependencies<or>[]<block_end>@property<def_stmt>file_name self<block_start><return>self._file_name()<if>callable(self._file_name)<else>self._file_name<block_end>@property<def_stmt>args self<block_start><return>self._args()<if>callable(self._args)<else>self._args<block_end><def_stmt>run self<arrow>bool<block_start>dependency_type='ExecutePython '+self.file_name<if_stmt>self.file_dependencies<block_start><assert_stmt>(self.parent)<line_sep>pipeline_base_path=self.parent.parent.base_path()<if_stmt><not>file_dependencies.is_modified(self.node_path() dependency_type pipeline_base_path self.file_dependencies)<block_start>logger.log('no changes')<line_sep><return><true><block_end><block_end><if_stmt><not>super().run()<block_start><return><false><block_end><if_stmt>self.file_dependencies<block_start>file_dependencies.update(self.node_path() dependency_type pipeline_base_path self.file_dependencies)<block_end><return><true><block_end><def_stmt>shell_command self<block_start><return>f'{shlex.quote(sys.executable)} -u "{self.parent.parent.base_path()/self.file_name}" {" ".join(map(str self.args))}'<block_end><def_stmt>html_doc_items self<block_start>path=self.parent.parent.base_path()/self.file_name<line_sep><return>[('file name' _.i[self.file_name]) ('args' _.tt[json.dumps(self.args)]) (_.i['content'] html.highlight_syntax(path.read_text().strip('\n')<if>path.exists()<else>'' 'python')) (_.i['shell command'] html.highlight_syntax(self.shell_command() 'bash')) ('file dependencies' [_.i[dependency _.br]<for>dependency self.file_dependencies])]<block_end><block_end>
|
# Time: O(n + logc), c is the number of candies
# Space: O(1)
<class_stmt>Solution(object)<block_start><def_stmt>distributeCandies self candies num_people<block_start>"""
:type candies: int
:type num_people: int
:rtype: List[int]
"""<line_sep># find max integer p s.t. sum(1 + 2 + ... + p) <= C
# => remaining : 0 <= C-(1+p)*p/2 < p+1
# => -2p-2 < p^2+p-2C <= 0
# => 2C+1/4 < (p+3/2)^2 and (p+1/2)^2 <= 2C+1/4
# => sqrt(2C+1/4)-3/2 < p <= sqrt(2C+1/4)-1/2
# => p = floor(sqrt(2C+1/4)-1/2)
p=int((2<times>candies+0.25)<power>0.5-0.5)<line_sep>remaining=candies-(p+1)<times>p<floordiv>2<line_sep>rows,cols=divmod(p num_people)<line_sep>result=[0]<times>num_people<for_stmt>i xrange(num_people)<block_start>result[i]=(i+1)<times>(rows+1)+(rows<times>(rows+1)<floordiv>2)<times>num_people<if>i<l>cols<else>(i+1)<times>rows+((rows-1)<times>rows<floordiv>2)<times>num_people<block_end>result[cols]<augadd>remaining<line_sep><return>result<block_end><block_end># Time: O(n + logc), c is the number of candies
# Space: O(1)
<class_stmt>Solution2(object)<block_start><def_stmt>distributeCandies self candies num_people<block_start>"""
:type candies: int
:type num_people: int
:rtype: List[int]
"""<line_sep># find max integer p s.t. sum(1 + 2 + ... + p) <= C
left,right=1 candies<while_stmt>left<le>right<block_start>mid=left+(right-left)<floordiv>2<if_stmt><not>((mid<le>candies<times>2<floordiv>(mid+1)))<block_start>right=mid-1<block_end><else_stmt><block_start>left=mid+1<block_end><block_end>p=right<line_sep>remaining=candies-(p+1)<times>p<floordiv>2<line_sep>rows,cols=divmod(p num_people)<line_sep>result=[0]<times>num_people<for_stmt>i xrange(num_people)<block_start>result[i]=(i+1)<times>(rows+1)+(rows<times>(rows+1)<floordiv>2)<times>num_people<if>i<l>cols<else>(i+1)<times>rows+((rows-1)<times>rows<floordiv>2)<times>num_people<block_end>result[cols]<augadd>remaining<line_sep><return>result<block_end><block_end># Time: O(sqrt(c)), c is the number of candies
# Space: O(1)
<class_stmt>Solution3(object)<block_start><def_stmt>distributeCandies self candies num_people<block_start>"""
:type candies: int
:type num_people: int
:rtype: List[int]
"""<line_sep>result=[0]<times>num_people<line_sep>i=0<while_stmt>candies<ne>0<block_start>result[i%num_people]<augadd>min(candies i+1)<line_sep>candies<augsub>min(candies i+1)<line_sep>i<augadd>1<block_end><return>result<block_end><block_end>
|
# Copyright 2018 BLEMUNDSBURY AI LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>cape.client.exceptions CapeException<import_from_stmt>cape_webservices.tests.test_api.conftest CapeClient API_URL<import_stmt>pytest time<line_sep># pytest automatically imports cape_client fixture in conftest.py
<def_stmt>test_token cape_client<block_start>token=cape_client.get_user_token()<assert_stmt>token<block_end><def_stmt>test_admin_token cape_client<block_start>admin_token=cape_client.get_admin_token()<line_sep># Authenticate another client using the admin token
cape_client2=CapeClient(API_URL admin_token)<line_sep>token=cape_client2.get_user_token()<assert_stmt>token<eq>cape_client.get_user_token()<block_end><def_stmt>test_saved_replies cape_client# Get Saved Replies
<block_start>saved_replies=cape_client.get_saved_replies()['items']<line_sep># Delete all existing saved replies
<for_stmt>saved_reply saved_replies<block_start>cape_client.delete_saved_reply(saved_reply['id'])<block_end><assert_stmt>cape_client.get_saved_replies()['totalItems']<eq>0<line_sep># Create saved replies
reply_id=cape_client.create_saved_reply(question='Question' answer='Answer')['replyId']<line_sep>cape_client.create_saved_reply(question='Another Question' answer='Another Answer')<line_sep>saved_replies=cape_client.get_saved_replies()['items']<assert_stmt>len(saved_replies)<eq>2<line_sep># Check number_of_items and offset
saved_replies=cape_client.get_saved_replies(number_of_items=1)['items']<assert_stmt>len(saved_replies)<eq>1<line_sep>saved_replies=cape_client.get_saved_replies(offset=1)['items']<assert_stmt>len(saved_replies)<eq>1<line_sep># Search
saved_replies=cape_client.get_saved_replies(search_term='another')<assert_stmt>saved_replies['totalItems']<eq>1<line_sep># Check searchReplyId
specific_replies=cape_client.get_saved_replies(saved_reply_ids=[saved_replies['items'][0]['id']])<assert_stmt>specific_replies['items'][0]['id']<eq>saved_replies['items'][0]['id']<line_sep># Add paraphrase questions
paraphrase_id=cape_client.add_paraphrase_question(reply_id question='Paraphrase Question')<line_sep>cape_client.add_paraphrase_question(reply_id question='Another Paraphrase Question')<line_sep>cape_client.add_paraphrase_question(reply_id question='Yet Another Paraphrase Question')<for_stmt>saved_reply cape_client.get_saved_replies()['items']<block_start><if_stmt>saved_reply['id']<eq>reply_id<block_start><assert_stmt>len(saved_reply['paraphraseQuestions'])<eq>3<block_end><else_stmt><block_start><assert_stmt>len(saved_reply['paraphraseQuestions'])<eq>0<block_end><block_end># Modify paraphrase question
modified_paraphrase_text='Modified Paraphrase Question'<line_sep>cape_client.edit_paraphrase_question(paraphrase_id modified_paraphrase_text)<line_sep>saved_reply=[saved_reply<for>saved_reply cape_client.get_saved_replies()['items']<if>saved_reply['id']<eq>reply_id][0]<for_stmt>paraphrase_question saved_reply['paraphraseQuestions']<block_start><if_stmt>paraphrase_question['id']<eq>paraphrase_id<block_start><assert_stmt>paraphrase_question['question']<eq>modified_paraphrase_text<block_end><else_stmt><block_start><assert_stmt>paraphrase_question['question']<ne>modified_paraphrase_text<block_end><block_end># Delete paraphrase question
cape_client.delete_paraphrase_question(paraphrase_id)<for_stmt>saved_reply cape_client.get_saved_replies()['items']<block_start><if_stmt>saved_reply['id']<eq>reply_id<block_start><assert_stmt>len(saved_reply['paraphraseQuestions'])<eq>2<block_end><else_stmt><block_start><assert_stmt>len(saved_reply['paraphraseQuestions'])<eq>0<block_end><block_end># Modify the canonical question
modified_canonical_question='Modified Canonical Question'<line_sep>cape_client.edit_canonical_question(reply_id modified_canonical_question)<line_sep>saved_reply=[saved_reply<for>saved_reply cape_client.get_saved_replies()['items']<if>saved_reply['id']<eq>reply_id][0]<assert_stmt>saved_reply['canonicalQuestion']<eq>modified_canonical_question<line_sep># Add answers
answer_id=cape_client.add_answer(reply_id answer='Added Answer')<line_sep>cape_client.add_answer(reply_id answer='Another Answer')<line_sep>cape_client.add_answer(reply_id answer='Yet Another Answer')<line_sep>cape_client.add_answer(reply_id answer='You guessed right, another answer')<for_stmt>saved_reply cape_client.get_saved_replies()['items']<block_start><if_stmt>saved_reply['id']<eq>reply_id<block_start><assert_stmt>len(saved_reply['answers'])<eq>5<block_end><else_stmt><block_start><assert_stmt>len(saved_reply['answers'])<eq>1<block_end><block_end># Modify answer
modified_answer_text='Modified Answer Text'<line_sep>cape_client.edit_answer(answer_id modified_answer_text)<line_sep>saved_reply=[saved_reply<for>saved_reply cape_client.get_saved_replies()['items']<if>saved_reply['id']<eq>reply_id][0]<for_stmt>answer saved_reply['answers']<block_start><if_stmt>answer['id']<eq>answer_id<block_start><assert_stmt>answer['answer']<eq>modified_answer_text<block_end><else_stmt><block_start><assert_stmt>answer['answer']<ne>modified_answer_text<block_end><block_end># Delete answer
cape_client.delete_answer(answer_id)<for_stmt>saved_reply cape_client.get_saved_replies()['items']<block_start><if_stmt>saved_reply['id']<eq>reply_id<block_start><assert_stmt>len(saved_reply['answers'])<eq>4<block_end><else_stmt><block_start><assert_stmt>len(saved_reply['answers'])<eq>1<block_end><block_end># Try to delete an answer from a saved reply with only 1 answer
reply_id=cape_client.create_saved_reply('New Question' 'New Answer')['replyId']<line_sep>saved_reply=[saved_reply<for>saved_reply cape_client.get_saved_replies()['items']<if>saved_reply['id']<eq>reply_id][0]<line_sep>answer_id=saved_reply['answers'][0]['id']<with_stmt>pytest.raises(CapeException)<block_start>cape_client.delete_answer(answer_id)<block_end>saved_reply=[saved_reply<for>saved_reply cape_client.get_saved_replies()['items']<if>saved_reply['id']<eq>reply_id][0]<assert_stmt>len(saved_reply['answers'])<eq>1<block_end><def_stmt>test_annotations cape_client<block_start>cape_client.add_annotation('Where is the cat?' 'On the mat' 'Animals' start_offset=12 end_offset=24)<line_sep>annotations=cape_client.get_annotations(search_term='cat')<assert_stmt>annotations['totalItems']<eq>1<line_sep>response=cape_client.add_annotation('Where is the dog?' 'On the log' 'Animals' start_offset=34 end_offset=58)<line_sep>annotations=cape_client.get_annotations(annotation_ids=[response['annotationId']])<assert_stmt>annotations['totalItems']<eq>1<line_sep>answers=cape_client.answer('Where is the dog?')<assert_stmt>answers[0]['answerText']<eq>'On the log'<assert_stmt>answers[0]['sourceType']<eq>'annotation'<assert_stmt>answers[0]['metadata']['startOffset']<eq>34<line_sep>annotations=cape_client.get_annotations(document_ids=['Animals'])<assert_stmt>annotations['totalItems']<eq>2<line_sep>answer_id=cape_client.add_annotation_answer(response['annotationId'] 'Another answer')<line_sep>annotations=cape_client.get_annotations(annotation_ids=[response['annotationId']])<assert_stmt>len(annotations['items'][0]['answers'])<eq>2<line_sep>cape_client.edit_annotation_answer(answer_id 'Yet another answer')<line_sep>annotations=cape_client.get_annotations(annotation_ids=[response['annotationId']])<assert_stmt>annotations['items'][0]['answers'][1]['answer']<eq>'Yet another answer'<line_sep>cape_client.delete_annotation_answer(answer_id)<line_sep>annotations=cape_client.get_annotations(annotation_ids=[response['annotationId']])<assert_stmt>len(annotations['items'][0]['answers'])<eq>1<line_sep>cape_client.edit_annotation_canonical_question(response['annotationId'] "New question?")<line_sep>annotations=cape_client.get_annotations(annotation_ids=[response['annotationId']])<assert_stmt>annotations['items'][0]['canonicalQuestion']<eq>"New question?"<line_sep>question_id=cape_client.add_annotation_paraphrase_question(response['annotationId'] "Another question?")<line_sep>annotations=cape_client.get_annotations(annotation_ids=[response['annotationId']])<assert_stmt>annotations['items'][0]['paraphraseQuestions'][0]['question']<eq>"Another question?"<line_sep>cape_client.edit_annotation_paraphrase_question(question_id "Yet another question?")<line_sep>annotations=cape_client.get_annotations(annotation_ids=[response['annotationId']])<assert_stmt>annotations['items'][0]['paraphraseQuestions'][0]['question']<eq>"Yet another question?"<line_sep>cape_client.delete_annotation_paraphrase_question(question_id)<line_sep>annotations=cape_client.get_annotations(annotation_ids=[response['annotationId']])<assert_stmt>len(annotations['items'][0]['paraphraseQuestions'])<eq>0<line_sep>cape_client.delete_annotation(response['annotationId'])<line_sep>annotations=cape_client.get_annotations(document_ids=['Animals'])<assert_stmt>annotations['totalItems']<eq>1<line_sep>cape_client.add_annotation('Where is the cat?' 'On my hat' 'Strange Animals' start_offset=12 end_offset=24)<line_sep>answers=cape_client.answer('Where is the cat?' document_ids=['Animals'])<assert_stmt>answers[0]['answerText']<eq>'On the mat'<line_sep>answers=cape_client.answer('Where is the cat?' document_ids=['Strange Animals'])<assert_stmt>answers[0]['answerText']<eq>'On my hat'<line_sep>cape_client.add_annotation('Does this have metadata?' 'Yes' 'Custom Stuff' start_offset=0 end_offset=3 metadata={'custom_field':'testing'})<line_sep>answers=cape_client.answer('Does this have metadata?' document_ids=['Custom Stuff'])<assert_stmt>answers[0]['metadata']['custom_field']<eq>'testing'<for_stmt>annotation cape_client.get_annotations()['items']<block_start>cape_client.delete_annotation(annotation['id'])<block_end><with_stmt>pytest.raises(CapeException)<block_start>cape_client.delete_annotation('fakeid')<block_end><with_stmt>pytest.raises(CapeException)<block_start>cape_client.add_annotation_answer('fakeid' 'fake answer')<block_end><with_stmt>pytest.raises(CapeException)<block_start>cape_client.delete_annotation_answer('fakeid')<block_end><with_stmt>pytest.raises(CapeException)<block_start>cape_client.edit_annotation_answer('fakeid' 'fake answer')<block_end><with_stmt>pytest.raises(CapeException)<block_start>cape_client.edit_annotation_canonical_question('fakeid' 'fake question')<block_end><with_stmt>pytest.raises(CapeException)<block_start>cape_client.add_annotation_paraphrase_question('fakeid' 'fake question')<block_end><with_stmt>pytest.raises(CapeException)<block_start>cape_client.edit_annotation_paraphrase_question('fakeid' 'fake question')<block_end><with_stmt>pytest.raises(CapeException)<block_start>cape_client.delete_annotation_paraphrase_question('fakeid')<block_end><with_stmt>pytest.raises(CapeException)<block_start>cape_client.add_annotation('Do we have both a start and end offset?' 'No' 'Failures' end_offset=43)<block_end><with_stmt>pytest.raises(CapeException)<block_start>cape_client.add_annotation('Do we have both a start and end offset?' 'No' 'Failures' start_offset=12)<block_end><block_end><def_stmt>test_invalid_delete_reply cape_client<block_start><with_stmt>pytest.raises(CapeException)<block_start>cape_client.delete_saved_reply('fake')<block_end><block_end><def_stmt>test_invalid_edit_canonical_question cape_client<block_start><with_stmt>pytest.raises(CapeException)<block_start>cape_client.edit_canonical_question('fake' 'Test')<block_end><block_end><def_stmt>test_invalid_add_paraphrase_question cape_client<block_start><with_stmt>pytest.raises(CapeException)<block_start>cape_client.add_paraphrase_question('fake' 'Test')<block_end><block_end><def_stmt>test_invalid_edit_paraphrase_question cape_client<block_start><with_stmt>pytest.raises(CapeException)<block_start>cape_client.edit_paraphrase_question('fake' 'Test')<block_end><block_end><def_stmt>test_invalid_delete_paraphrase_question cape_client<block_start><with_stmt>pytest.raises(CapeException)<block_start>cape_client.delete_paraphrase_question('fake')<block_end><block_end><def_stmt>test_invalid_add_answer cape_client<block_start><with_stmt>pytest.raises(CapeException)<block_start>cape_client.add_answer('fake' 'Test')<block_end><block_end><def_stmt>test_invalid_edit_answer cape_client<block_start><with_stmt>pytest.raises(CapeException)<block_start>cape_client.edit_answer('fake' 'Test')<block_end><block_end><def_stmt>test_documents cape_client<block_start>cape_client.upload_document(title='Test' text='Testing' origin='A test' replace=<true>)<line_sep>documents=cape_client.get_documents()['items']<assert_stmt>len(documents)<g>0<for_stmt>document documents<block_start>cape_client.delete_document(document['id'])<block_end>documents=cape_client.get_documents()['items']<assert_stmt>len(documents)<eq>0<block_end><def_stmt>test_answer cape_client<block_start>documents=cape_client.get_documents()['items']<for_stmt>document documents<block_start>cape_client.delete_document(document['id'])<block_end>cape_client.upload_document(title='Sky' text='The sky is blue.' origin='sky.txt' replace=<true>)<line_sep>answers=cape_client.answer('What colour is the sky?' source_type="document")<assert_stmt>answers[0]['answerText']<eq>'blue'<block_end><def_stmt>test_answer_inline cape_client<block_start>documents=cape_client.get_documents()['items']<for_stmt>document documents<block_start>cape_client.delete_document(document['id'])<block_end>answers=cape_client.answer('What colour is the sky?' source_type="document" text="The sky is blue")<assert_stmt>answers[0]['answerText']<eq>'blue'<block_end><def_stmt>test_answer_from_saved_replies cape_client_answer<block_start>cape_client=cape_client_answer<line_sep>print(cape_client.get_saved_replies()['totalItems'])<line_sep>cape_client.create_saved_reply(question='What is a dog?' answer='A dog is a pet')<line_sep>print(cape_client.get_saved_replies()['totalItems'])<line_sep>cape_client.create_saved_reply(question='What is a horse?' answer='A horse is a pet')<line_sep>print(cape_client.get_saved_replies()['totalItems'])<line_sep>cape_client.create_saved_reply(question='What is a cat?' answer='A cat is a pet')<line_sep>print(cape_client.get_saved_replies()['totalItems'])<line_sep>cape_client.create_saved_reply(question='What is a fish?' answer='A fish is a pet')<line_sep>print(cape_client.get_saved_replies()['totalItems'])<line_sep>cape_client.create_saved_reply(question='What is a potato?' answer='A potato is a vegetable')<line_sep>print(cape_client.get_saved_replies()['totalItems'])<assert_stmt>cape_client.get_saved_replies()['totalItems']<eq>5<line_sep># Answer
answers=cape_client.answer('What is a fish?' source_type="saved_reply")<assert_stmt>answers[0]['answerText']<eq>'A fish is a pet'<block_end><def_stmt>test_inbox cape_client<block_start>events=cape_client.get_inbox()['items']<for_stmt>event events<block_start>cape_client.archive_inbox(event['id'])<block_end>events=cape_client.get_inbox()['items']<assert_stmt>len(events)<eq>0<line_sep>cape_client.answer('What colour is the sky?')<line_sep># HACK: Saving inbox events is sent to a worker and doesn't block, so we can't know for sure when it finishes
time.sleep(1)<line_sep>events=cape_client.get_inbox()['items']<line_sep>item=events[0]<assert_stmt>len(events)<eq>1<line_sep>events=cape_client.get_inbox(read=<true>)['items']<assert_stmt>len(events)<eq>0<line_sep>cape_client.mark_inbox_read(item['id'])<line_sep>events=cape_client.get_inbox(read=<true>)['items']<assert_stmt>len(events)<eq>1<line_sep>cape_client.archive_inbox(item['id'])<line_sep>events=cape_client.get_inbox(read=<true>)['items']<assert_stmt>len(events)<eq>0<block_end><def_stmt>test_default_threshold cape_client<block_start>cape_client.set_default_threshold('high')<line_sep>threshold=cape_client.get_default_threshold()<assert_stmt>threshold<eq>'high'<line_sep>cape_client.set_default_threshold('medium')<line_sep>threshold=cape_client.get_default_threshold()<assert_stmt>threshold<eq>'medium'<block_end><def_stmt>test_invalid_threshold cape_client<block_start><with_stmt>pytest.raises(CapeException)<block_start>cape_client.set_default_threshold('potato')<block_end><block_end><def_stmt>test_user_profile cape_client<block_start>profile=cape_client.get_profile()<assert_stmt>profile<eq>{'username':'testuser' 'plan':'free' 'termsAgreed':<false> 'onboardingCompleted':<false> 'forwardEmail':<none> 'forwardEmailVerified':<false>}<block_end><def_stmt>test_spans cape_client:CapeClient<block_start><for_stmt>document cape_client.get_documents()['items']<block_start>cape_client.delete_document(document['id'])<block_end><for_stmt>saved_reply cape_client.get_saved_replies()['items']<block_start>cape_client.delete_saved_reply(saved_reply['id'])<block_end>texts={}<line_sep>texts['Pizza']='I like pizzas.'<line_sep>texts['Sky']="The sky is blue."<line_sep>texts['Colour']="My favorite colour is red"<line_sep>questions={"Do you like pizzas ?" "What is red?" "what is sky?"}<for_stmt>title,text texts.items()<block_start>cape_client.upload_document(title text document_id=title)<block_end><for_stmt>question questions<block_start>answer=cape_client.answer(question)[0]<assert_stmt>answer['answerText']<in>answer['answerContext']<assert_stmt>answer['answerText']<eq>texts[answer['sourceId']][answer['answerTextStartOffset']:answer['answerTextEndOffset']]<assert_stmt>answer['answerContext']<eq>texts[answer['sourceId']][answer['answerContextStartOffset']:answer['answerContextEndOffset']]<block_end><for_stmt>document cape_client.get_documents()['items']<block_start>cape_client.delete_document(document['id'])<block_end><for_stmt>saved_reply cape_client.get_saved_replies()['items']<block_start>cape_client.delete_saved_reply(saved_reply['id'])<block_end><block_end>
|
<import_from_stmt>argparse Namespace<import_stmt>os<import_stmt>time<import_from_stmt>tqdm tqdm<import_from_stmt>PIL Image<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch.utils.data DataLoader<import_stmt>sys<line_sep>sys.path.append(".")<line_sep>sys.path.append("..")<import_from_stmt>configs data_configs<import_from_stmt>datasets.inference_dataset InferenceDataset<import_from_stmt>datasets.augmentations AgeTransformer<import_from_stmt>utils.common tensor2im log_image<import_from_stmt>options.test_options TestOptions<import_from_stmt>models.psp pSp<def_stmt>run <block_start>test_opts=TestOptions().parse()<line_sep>out_path_results=os.path.join(test_opts.exp_dir 'inference_results')<line_sep>out_path_coupled=os.path.join(test_opts.exp_dir 'inference_coupled')<line_sep>os.makedirs(out_path_results exist_ok=<true>)<line_sep>os.makedirs(out_path_coupled exist_ok=<true>)<line_sep># update test options with options used during training
ckpt=torch.load(test_opts.checkpoint_path map_location='cpu')<line_sep>opts=ckpt['opts']<line_sep>opts.update(vars(test_opts))<line_sep>opts=Namespace(**opts)<line_sep>net=pSp(opts)<line_sep>net.eval()<line_sep>net.cuda()<line_sep>age_transformers=[AgeTransformer(target_age=age)<for>age opts.target_age.split(',')]<line_sep>print(f'Loading dataset for {opts.dataset_type}')<line_sep>dataset_args=data_configs.DATASETS[opts.dataset_type]<line_sep>transforms_dict=dataset_args['transforms'](opts).get_transforms()<line_sep>dataset=InferenceDataset(root=opts.data_path transform=transforms_dict['transform_inference'] opts=opts)<line_sep>dataloader=DataLoader(dataset batch_size=opts.test_batch_size shuffle=<false> num_workers=int(opts.test_workers) drop_last=<false>)<if_stmt>opts.n_images<is><none><block_start>opts.n_images=len(dataset)<block_end>global_time=[]<for_stmt>age_transformer age_transformers<block_start>print(f"Running on target age: {age_transformer.target_age}")<line_sep>global_i=0<for_stmt>input_batch tqdm(dataloader)<block_start><if_stmt>global_i<ge>opts.n_images<block_start><break><block_end><with_stmt>torch.no_grad()<block_start>input_age_batch=[age_transformer(img.cpu()).to('cuda')<for>img input_batch]<line_sep>input_age_batch=torch.stack(input_age_batch)<line_sep>input_cuda=input_age_batch.cuda().float()<line_sep>tic=time.time()<line_sep>result_batch=run_on_batch(input_cuda net opts)<line_sep>toc=time.time()<line_sep>global_time.append(toc-tic)<for_stmt>i range(len(input_batch))<block_start>result=tensor2im(result_batch[i])<line_sep>im_path=dataset.paths[global_i]<if_stmt>opts.couple_outputs<or>global_i%100<eq>0<block_start>input_im=log_image(input_batch[i] opts)<line_sep>resize_amount=(256 256)<if>opts.resize_outputs<else>(1024 1024)<line_sep>res=np.concatenate([np.array(input_im.resize(resize_amount)) np.array(result.resize(resize_amount))] axis=1)<line_sep>age_out_path_coupled=os.path.join(out_path_coupled age_transformer.target_age)<line_sep>os.makedirs(age_out_path_coupled exist_ok=<true>)<line_sep>Image.fromarray(res).save(os.path.join(age_out_path_coupled os.path.basename(im_path)))<block_end>age_out_path_results=os.path.join(out_path_results age_transformer.target_age)<line_sep>os.makedirs(age_out_path_results exist_ok=<true>)<line_sep>image_name=os.path.basename(im_path)<line_sep>im_save_path=os.path.join(age_out_path_results image_name)<line_sep>Image.fromarray(np.array(result.resize(resize_amount))).save(im_save_path)<line_sep>global_i<augadd>1<block_end><block_end><block_end><block_end>stats_path=os.path.join(opts.exp_dir 'stats.txt')<line_sep>result_str='Runtime {:.4f}+-{:.4f}'.format(np.mean(global_time) np.std(global_time))<line_sep>print(result_str)<with_stmt>open(stats_path 'w')<as>f<block_start>f.write(result_str)<block_end><block_end><def_stmt>run_on_batch inputs net opts<block_start>result_batch=net(inputs randomize_noise=<false> resize=opts.resize_outputs)<line_sep><return>result_batch<block_end><if_stmt>__name__<eq>'__main__'<block_start>run()<block_end>
|
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
<import_from_stmt>unittest TestCase<import_stmt>mock<import_from_stmt>hpOneView.connection connection<import_from_stmt>hpOneView.resources.activity.alerts Alerts<import_from_stmt>hpOneView.resources.resource ResourceClient<class_stmt>AlertsTest(TestCase)<block_start><def_stmt>setUp self<block_start>self.host='127.0.0.1'<line_sep>self.connection=connection(self.host)<line_sep>self._client=Alerts(self.connection)<block_end>@mock.patch.object(ResourceClient 'get_all')<def_stmt>test_get_all self mock_get<block_start>self._client.get_all(filter="name='name'" sort='name:ascending' view='day')<line_sep>mock_get.assert_called_once_with(count=-1 filter="name='name'" query='' sort='name:ascending' start=0 view='day')<block_end>@mock.patch.object(ResourceClient 'get')<def_stmt>test_get_specific self mock_get<block_start>self._client.get('35323930-4936-4450-5531-303153474820')<line_sep>mock_get.assert_called_once_with('35323930-4936-4450-5531-303153474820')<block_end>@mock.patch.object(ResourceClient 'get_by')<def_stmt>test_get_by_called_once self mock_get_by<block_start>self._client.get_by('alertState' 'Active')<line_sep>mock_get_by.assert_called_once_with('alertState' 'Active')<block_end>@mock.patch.object(ResourceClient 'update')<def_stmt>test_update_should_fail_when_no_uri_is_provided self mock_update<block_start>resource={'alertState':'Cleared' 'assignedToUser':'Paul' 'alertUrgency':'None' 'notes':'Problem fixed' 'eTag':'2014-03-28T04:40:06.831Z'}<line_sep>self.assertRaises(ValueError self._client.update resource)<block_end>@mock.patch.object(ResourceClient 'update')<def_stmt>test_update_should_use_given_values_by_resource_uri self mock_update<block_start>resource={'uri':'/rest/alerts/26' 'alertState':'Cleared' 'assignedToUser':'Paul' 'alertUrgency':'None' 'notes':'Problem fixed' 'eTag':'2014-03-28T04:40:06.831Z'}<line_sep>self._client.update(resource.copy() '/rest/alerts/26')<line_sep>resource_test=resource.copy()<del_stmt>resource_test["uri"]<line_sep>mock_update.assert_called_once_with(resource=resource_test timeout=-1 uri='/rest/alerts/26')<block_end>@mock.patch.object(ResourceClient 'update')<def_stmt>test_update_should_use_given_values_by_uri_param self mock_update<block_start>resource={'alertState':'Cleared' 'assignedToUser':'Paul' 'alertUrgency':'None' 'notes':'Problem fixed' 'eTag':'2014-03-28T04:40:06.831Z'}<line_sep>self._client.update(resource '/rest/alerts/26')<line_sep>mock_update.assert_called_once_with(resource=resource.copy() timeout=-1 uri='/rest/alerts/26')<block_end>@mock.patch.object(ResourceClient 'delete')<def_stmt>test_delete_called_once self mock_delete<block_start>id_alert='35323930-4936-4450-5531-303153474820'<line_sep>self._client.delete(id_alert)<line_sep>mock_delete.assert_called_once_with(id_alert)<block_end>@mock.patch.object(ResourceClient 'delete')<def_stmt>test_delete_alert_change_log_called_once_by_id self mock_delete<block_start>id_alert='20'<line_sep>self._client.delete_alert_change_log(id_alert)<line_sep>mock_delete.assert_called_once_with({'uri':'/rest/alerts/AlertChangeLog/20'})<block_end>@mock.patch.object(ResourceClient 'delete_all')<def_stmt>test_delete_all_called_once self mock_delete<block_start>self._client.delete_all('name="name"')<line_sep>mock_delete.assert_called_once_with(filter='name="name"' timeout=-1)<block_end>@mock.patch.object(ResourceClient 'delete')<def_stmt>test_delete_alert_change_log_called_once_by_uri self mock_delete<block_start>uri='/rest/alerts/AlertChangeLog/20'<line_sep>self._client.delete_alert_change_log(uri)<line_sep>mock_delete.assert_called_once_with({'uri':uri})<block_end><block_end>
|
"""Test Common Process"""<import_stmt>logging<import_stmt>time<import_stmt>unittest<import_from_stmt>loopchain.baseservice CommonSubprocess<import_from_stmt>loopchain.utils loggers<line_sep>loggers.set_preset_type(loggers.PresetType.develop)<line_sep>loggers.update_preset()<class_stmt>TestCommonSubprocess(unittest.TestCase)<block_start><def_stmt>test_common_subprocess self# GIVEN
<block_start>process_args=['ls']<line_sep>logging.debug(f"run common subprocess....")<line_sep>subprocess=CommonSubprocess(process_args)<line_sep>logging.debug(f"after run common subprocess....")<line_sep>subprocess.start()<line_sep>subprocess.start()<line_sep>subprocess.start()<line_sep>self.assertTrue(subprocess.is_run())<line_sep># WHEN
time.sleep(2)<line_sep>subprocess.stop()<line_sep>subprocess.wait()<line_sep>subprocess.wait()<line_sep>subprocess.stop()<line_sep># THEN
self.assertFalse(subprocess.is_run())<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
|
# Copyright 2020 Nokia Software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>collections<import_from_stmt>oslo_config cfg<import_from_stmt>oslo_log log<as>logging<import_from_stmt>stevedore extension<import_from_stmt>mistral_lib actions<as>ml_actions<import_from_stmt>mistral_lib.utils inspect_utils<as>i_utils<line_sep>CONF=cfg.CONF<line_sep>LOG=logging.getLogger(__name__)<class_stmt>GeneratedPythonActionDescriptor(ml_actions.PythonActionDescriptor)<block_start>"""Represents a legacy python action generated by a generator.
It's needed temporarily until we fully refactor OpenStack actions in the
'mistral-extra' project. The difference of this descriptor and the standard
PythonActionDescriptor class is how they initialize a spec of parameters
and description.
"""<def_stmt>__init__ self name action_cls action_cls_attrs=<none> namespace='' project_id=<none> scope=<none> desc=<none> params_spec=<none><block_start>super(GeneratedPythonActionDescriptor self).__init__(name action_cls action_cls_attrs namespace project_id scope)<if_stmt>desc<block_start>self._desc=desc<block_end><if_stmt>params_spec<block_start>self._params_spec=params_spec<block_end><block_end><def_stmt>__repr__ self<block_start><return>'Generated Python action [name=%s, cls=%s, params_spec=%s]'%(self.name self.action_class self.params_spec)<block_end><block_end><class_stmt>LegacyActionProvider(ml_actions.ActionProvider)<block_start>"""Represents the old way of configuring actions.
There are two sources where this action provider loads actions
from:
* Action classes configured in the entry point "mistral.actions"
* Action classes generated by generators configured in the
entry point "mistral.generators" as a function returning a
collection of them.
"""<def_stmt>__init__ self name='legacy'<block_start>super().__init__(name)<line_sep># TODO(rakhmerov): Come up with a convenient structure to keep action
# classes indexed so that we could search and filter easily.
self._action_descs=collections.OrderedDict()<line_sep>self._load_actions()<block_end><def_stmt>_load_actions self<block_start>self._load_action_plugins()<line_sep>self._load_action_generators()<block_end><def_stmt>_load_action_plugins self<block_start><if_stmt><not>CONF.legacy_action_provider.load_action_plugins<block_start><return><block_end>LOG.info("Loading actions plugged in with the entry point "<concat>"'mistral.actions'...")<line_sep>ext_mgr=extension.ExtensionManager(namespace='mistral.actions' invoke_on_load=<false>)<for_stmt>action_name ext_mgr.names()<block_start>action_cls=ext_mgr[action_name].plugin<if_stmt>CONF.legacy_action_provider.only_builtin_actions<block_start><if_stmt><not>action_cls.__module__.startswith('mistral.')<block_start><continue><block_end><block_end>action_desc=ml_actions.PythonActionDescriptor(action_name action_cls namespace='')<line_sep>self._action_descs[action_name]=action_desc<line_sep>LOG.debug('Registered action: %s' action_desc)<block_end><block_end><def_stmt>_load_action_generators self<block_start><if_stmt><not>CONF.legacy_action_provider.load_action_generators<block_start><return><block_end>LOG.info("Loading actions from the action generators plugged in "<concat>"with the entry point 'mistral.generators'")<for_stmt>gen self._get_action_generators()<block_start>self._register_generator_actions(gen)<block_end><block_end>@staticmethod<def_stmt>_get_action_generators <block_start>res=[]<line_sep>ext_mgr=extension.ExtensionManager(namespace='mistral.generators' invoke_on_load=<true>)<line_sep># TODO(rakhmerov): this is all ugly. It turns out that the only
# way to register actions via generators is to register a special
# function in the entry point that returns a list of generators.
# But we can't directly register a generator.
<for_stmt>ext ext_mgr<block_start><if_stmt>ext.obj<is><not><none><block_start><for_stmt>gen ext.obj<block_start>res.append(gen)<block_end><block_end><block_end><return>res<block_end><def_stmt>_register_generator_actions self generator# TODO(rakhmerov): Here we have an implicit dependency on
# "mistral-extra" because ActionGenerator class is defined
# in "mistral-extra". Of course, it works because of duck
# typing but architecture wise it's just very bad. "mistral"
# must not depend on "mistral-extra" because the latter is
# just a project with mistral extensions. In fact, we can't
# even extend ActionGenerator class within "mistral" for
# testing purposes.
# So it's all done this way for compatibility until all
# OpenStack actions are redesigned with action providers.
<block_start><for_stmt>action generator.create_actions()<block_start>action_desc=GeneratedPythonActionDescriptor(action['name'] generator.base_action_class i_utils.get_public_fields(action['class']) desc=action['description'] params_spec=action['arg_list'])<line_sep>LOG.debug('Registered action: %s' action_desc)<line_sep>self._action_descs[action['name']]=action_desc<block_end><block_end><def_stmt>find self action_name namespace=<none><block_start><return>self._action_descs.get(action_name)<block_end><def_stmt>find_all self namespace=<none> limit=<none> sort_fields=<none> sort_dirs=<none> **filters# TODO(rakhmerov): Apply sort_keys, sort_dirs, and filters.
<block_start><return>self._action_descs.values()<block_end><block_end>
|
<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_from_stmt>sklearn.model_selection train_test_split<import_from_stmt>sklearn.ensemble RandomForestClassifier<line_sep>RSEED=50<line_sep># Load in data
df=pd.read_csv('https://s3.amazonaws.com/projects-rf/clean_data.csv')<line_sep># Full dataset: https://www.kaggle.com/cdc/behavioral-risk-factor-surveillance-system
# Extract the labels
labels=np.array(df.pop('label'))<line_sep># 30% examples in test data
train,test,train_labels,test_labels=train_test_split(df labels stratify=labels test_size=0.3 random_state=RSEED)<line_sep># Imputation of missing values
train=train.fillna(train.mean())<line_sep>test=test.fillna(test.mean())<line_sep># Features for feature importances
features=list(train.columns)<line_sep># Create the model with 100 trees
model=RandomForestClassifier(n_estimators=100 random_state=RSEED max_features='sqrt' n_jobs=-1 verbose=1)<line_sep># Fit on training data
model.fit(train train_labels)<line_sep>n_nodes=[]<line_sep>max_depths=[]<line_sep># Stats about the trees in random forest
<for_stmt>ind_tree model.estimators_<block_start>n_nodes.append(ind_tree.tree_.node_count)<line_sep>max_depths.append(ind_tree.tree_.max_depth)<block_end>print(f'Average number of nodes {int(np.mean(n_nodes))}')<line_sep>print(f'Average maximum depth {int(np.mean(max_depths))}')<line_sep># Training predictions (to demonstrate overfitting)
train_rf_predictions=model.predict(train)<line_sep>train_rf_probs=model.predict_proba(train)[: 1]<line_sep># Testing predictions (to determine performance)
rf_predictions=model.predict(test)<line_sep>rf_probs=model.predict_proba(test)[: 1]<import_from_stmt>sklearn.metrics precision_score recall_score roc_auc_score roc_curve<import_stmt>matplotlib.pyplot<as>plt<line_sep># Plot formatting
plt.style.use('fivethirtyeight')<line_sep>plt.rcParams['font.size']=18<def_stmt>evaluate_model predictions probs train_predictions train_probs<block_start>"""Compare machine learning model to baseline performance.
Computes statistics and shows ROC curve."""<line_sep>baseline={}<line_sep>baseline['recall']=recall_score(test_labels [1<for>_ range(len(test_labels))])<line_sep>baseline['precision']=precision_score(test_labels [1<for>_ range(len(test_labels))])<line_sep>baseline['roc']=0.5<line_sep>results={}<line_sep>results['recall']=recall_score(test_labels predictions)<line_sep>results['precision']=precision_score(test_labels predictions)<line_sep>results['roc']=roc_auc_score(test_labels probs)<line_sep>train_results={}<line_sep>train_results['recall']=recall_score(train_labels train_predictions)<line_sep>train_results['precision']=precision_score(train_labels train_predictions)<line_sep>train_results['roc']=roc_auc_score(train_labels train_probs)<for_stmt>metric ['recall' 'precision' 'roc']<block_start>print(f'{metric.capitalize()} Baseline: {round(baseline[metric] 2)} Test: {round(results[metric] 2)} Train: {round(train_results[metric] 2)}')<block_end># Calculate false positive rates and true positive rates
base_fpr,base_tpr,_=roc_curve(test_labels [1<for>_ range(len(test_labels))])<line_sep>model_fpr,model_tpr,_=roc_curve(test_labels probs)<line_sep>plt.figure(figsize=(8 6))<line_sep>plt.rcParams['font.size']=16<line_sep># Plot both curves
plt.plot(base_fpr base_tpr 'b' label='baseline')<line_sep>plt.plot(model_fpr model_tpr 'r' label='model')<line_sep>plt.legend()<line_sep>plt.xlabel('False Positive Rate')<line_sep>plt.ylabel('True Positive Rate')<line_sep>plt.title('ROC Curves')<line_sep>plt.show()<line_sep><block_end>evaluate_model(rf_predictions rf_probs train_rf_predictions train_rf_probs)<line_sep>plt.savefig('roc_auc_curve.png')<import_from_stmt>sklearn.metrics confusion_matrix<import_stmt>itertools<def_stmt>plot_confusion_matrix cm classes normalize=<false> title='Confusion matrix' cmap=plt.cm.Oranges<block_start>"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
Source: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""<if_stmt>normalize<block_start>cm=cm.astype('float')/cm.sum(axis=1)[: np.newaxis]<line_sep>print("Normalized confusion matrix")<block_end><else_stmt><block_start>print('Confusion matrix, without normalization')<block_end>print(cm)<line_sep># Plot the confusion matrix
plt.figure(figsize=(10 10))<line_sep>plt.imshow(cm interpolation='nearest' cmap=cmap)<line_sep>plt.title(title size=24)<line_sep>plt.colorbar(aspect=4)<line_sep>tick_marks=np.arange(len(classes))<line_sep>plt.xticks(tick_marks classes rotation=45 size=14)<line_sep>plt.yticks(tick_marks classes size=14)<line_sep>fmt='.2f'<if>normalize<else>'d'<line_sep>thresh=cm.max()/2.<line_sep># Labeling the plot
<for_stmt>i,j itertools.product(range(cm.shape[0]) range(cm.shape[1]))<block_start>plt.text(j i format(cm[i j] fmt) fontsize=20 horizontalalignment="center" color="white"<if>cm[i j]<g>thresh<else>"black")<block_end>plt.grid(<none>)<line_sep>plt.tight_layout()<line_sep>plt.ylabel('True label' size=18)<line_sep>plt.xlabel('Predicted label' size=18)<block_end># Confusion matrix
cm=confusion_matrix(test_labels rf_predictions)<line_sep>plot_confusion_matrix(cm classes=['Poor Health' 'Good Health'] title='Health Confusion Matrix')<line_sep>plt.savefig('cm.png')<line_sep>
|
<import_from_stmt>.models Folder MEDIA_MODELS<def_stmt>handle_uploaded_file file folder=<none> is_public=<true><block_start>'''handle uploaded file to folder
match first media type and create media object and returns it
file: File object
folder: str or Folder isinstance
is_public: boolean
'''<line_sep>_folder=<none><if_stmt>folder<and>isinstance(folder Folder)<block_start>_folder=folder<block_end><elif_stmt>folder<block_start>_folder,folder_created=Folder.objects.get_or_create(name=folder)<block_end><for_stmt>cls MEDIA_MODELS<block_start><if_stmt>cls.matches_file_type(file.name)<block_start>obj,created=cls.objects.get_or_create(original_filename=file.name file=file folder=_folder is_public=is_public)<if_stmt>created<block_start><return>obj<block_end><block_end><block_end><return><none><block_end><def_stmt>handle_uploaded_files files folder=<none> is_public=<true><block_start>'''handle uploaded files to folder
files: array of File objects or single object
folder: str or Folder isinstance
is_public: boolean
'''<line_sep>results=[]<for_stmt>f files<block_start>result=handle_uploaded_file(f folder is_public)<line_sep>results.append(result)<block_end><return>results<block_end>
|
""" contains data utils """<import_stmt>warnings<import_stmt>numpy<as>np<def_stmt>make_rng seed=<none><block_start>""" Create a random number generator
Parameters
----------
seed : bool, int, Generator, BitGenerator, RandomState
a random state
- False - returns None
- None or True - creates a new SFC64 generator with random entropy
- int - creates a new SFC64 generator with the seed given
- SeedSequence - creates a new SFC64 generator with the seed given
- Generator - returns it
- BitGenerator - creates a new generator
- RandomState - returns it
Notes
-----
Do not use a legacy RandomState unless for backward compatibility.
Returns
-------
numpy.random.Generator
"""<if_stmt>seed<is><false><block_start>rng=<none><block_end><elif_stmt>seed<is><none><or>seed<is><true><block_start>rng=np.random.default_rng(np.random.SFC64())<block_end><elif_stmt>isinstance(seed np.random.SeedSequence)<block_start>rng=np.random.default_rng(np.random.SFC64(seed))<block_end><elif_stmt>isinstance(seed int)<block_start>rng=np.random.default_rng(np.random.SFC64(seed))<block_end><elif_stmt>isinstance(seed np.random.Generator)<block_start>rng=seed<block_end><elif_stmt>isinstance(seed np.random.BitGenerator)<block_start>rng=np.random.default_rng(seed)<block_end><elif_stmt>isinstance(seed np.random.RandomState)<block_start>rng=seed<block_end><else_stmt><block_start>warnings.warn("Unknown seed type: %s"%seed)<line_sep>rng=<none><block_end><return>rng<block_end><def_stmt>make_seed_sequence shuffle=<false><block_start>""" Create a seed sequence for random number generation
Parameters
----------
shuffle : bool or int or object with a seed sequence attribute
a random state
- False or True - creates a new seed sequence with random entropy
- int - creates a new seed sequence with the given entropy
Returns
-------
numpy.random.SeedSequence
"""<if_stmt>isinstance(getattr(shuffle 'random_seed' <none>) np.random.SeedSequence)<block_start><return>shuffle.random_seed<block_end><if_stmt>shuffle<is><none><or>isinstance(shuffle bool)<block_start>seed=np.random.SeedSequence()<block_end><elif_stmt>isinstance(shuffle int)<block_start><if_stmt>shuffle<ge>0<block_start>seed=np.random.SeedSequence(shuffle)<block_end><else_stmt># if shuffle is negative, do not shuffle the dataset, but use the seed for randomization
<block_start>seed=np.random.SeedSequence(-shuffle)<block_end><block_end><else_stmt><block_start><raise>TypeError('shuffle can be bool or int' shuffle)<block_end><return>seed<block_end><def_stmt>spawn_seed_sequence source<block_start>""" Return a new seed sequence or None
Parameters
----------
source : numpy.random.SeedSequence or Batch or Pipeline
Returns
-------
numpy.random.SeedSequence
"""<if_stmt>isinstance(source np.random.SeedSequence)<block_start><pass><block_end><elif_stmt>isinstance(getattr(source 'random_seed' <none>) np.random.SeedSequence)<block_start>source=source.random_seed<block_end><else_stmt><block_start><raise>ValueError('source should be SeedSequence, Batch or Pipeline, but given %s'%type(source))<block_end><return>source.spawn(1)[0]<block_end>
|
# colorsystem.py is the full list of colors that can be used to easily create themes.
<class_stmt>Gray<block_start>B0='#000000'<line_sep>B10='#19232D'<line_sep>B20='#293544'<line_sep>B30='#37414F'<line_sep>B40='#455364'<line_sep>B50='#54687A'<line_sep>B60='#60798B'<line_sep>B70='#788D9C'<line_sep>B80='#9DA9B5'<line_sep>B90='#ACB1B6'<line_sep>B100='#B9BDC1'<line_sep>B110='#C9CDD0'<line_sep>B120='#CED1D4'<line_sep>B130='#E0E1E3'<line_sep>B140='#FAFAFA'<line_sep>B150='#FFFFFF'<block_end><class_stmt>Blue<block_start>B0='#000000'<line_sep>B10='#062647'<line_sep>B20='#26486B'<line_sep>B30='#375A7F'<line_sep>B40='#346792'<line_sep>B50='#1A72BB'<line_sep>B60='#057DCE'<line_sep>B70='#259AE9'<line_sep>B80='#37AEFE'<line_sep>B90='#73C7FF'<line_sep>B100='#9FCBFF'<line_sep>B110='#C2DFFA'<line_sep>B120='#CEE8FF'<line_sep>B130='#DAEDFF'<line_sep>B140='#F5FAFF'<line_sep>B150='##FFFFFF'<block_end>
|
""" ntp
This module contains definitions
for the Calvados model objects.
This module contains a collection of YANG definitions
for Cisco IOS\-XR syadmin NTP configuration.
This module contains definitions
for the following management objects\:
NTP configuration data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
Copyright (c) 2012\-2018 by Cisco Systems, Inc.
All rights reserved.
"""<import_stmt>sys<import_from_stmt>collections OrderedDict<import_from_stmt>ydk.types Entity<as>_Entity_<import_from_stmt>ydk.types EntityPath Identity Enum YType YLeaf YLeafList YList LeafDataList Bits Empty Decimal64<import_from_stmt>ydk.types Entity EntityPath Identity Enum YType YLeaf YLeafList YList LeafDataList Bits Empty Decimal64<import_from_stmt>ydk.filters YFilter<import_from_stmt>ydk.errors YError YModelError<import_from_stmt>ydk.errors.error_handler handle_type_error<as>_handle_type_error<class_stmt>Ntp(_Entity_)<block_start>"""
.. attribute:: peer
**type**\: list of :py:class:`Peer <ydk.models.cisco_ios_xr.ntp.Ntp.Peer>`
.. attribute:: server
**type**\: list of :py:class:`Server <ydk.models.cisco_ios_xr.ntp.Ntp.Server>`
.. attribute:: trusted_key
**type**\: list of int
**range:** 1..65534
.. attribute:: authenticate
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: authentication_key
**type**\: list of :py:class:`AuthenticationKey <ydk.models.cisco_ios_xr.ntp.Ntp.AuthenticationKey>`
.. attribute:: trace
**type**\: :py:class:`Trace <ydk.models.cisco_ios_xr.ntp.Ntp.Trace>`
**config**\: False
"""<line_sep>_prefix='ntp'<line_sep>_revision='2016-07-04'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(Ntp self).__init__()<block_end>self._top_entity=<none><line_sep>self.yang_name="ntp"<line_sep>self.yang_parent_name="ntp"<line_sep>self.is_top_level_class=<true><line_sep>self.has_list_ancestor=<false><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([("peer" ("peer" Ntp.Peer)) ("server" ("server" Ntp.Server)) ("authentication-key" ("authentication_key" Ntp.AuthenticationKey)) ("trace" ("trace" Ntp.Trace))])<line_sep>self._leafs=OrderedDict([('trusted_key' (YLeafList(YType.int32 'trusted-key') ['int'])) ('authenticate' (YLeaf(YType.empty 'authenticate') ['Empty'])) ])<line_sep>self.trusted_key=[]<line_sep>self.authenticate=<none><line_sep>self.trace=Ntp.Trace()<line_sep>self.trace.parent=self<line_sep>self._children_name_map["trace"]="trace"<line_sep>self.peer=YList(self)<line_sep>self.server=YList(self)<line_sep>self.authentication_key=YList(self)<line_sep>self._segment_path=<lambda>:"ntp:ntp"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(Ntp ['trusted_key' 'authenticate'] name value)<block_end><class_stmt>Peer(_Entity_)<block_start>"""
.. attribute:: name (key)
**type**\: str
.. attribute:: version
**type**\: int
**range:** 1..4
.. attribute:: key_id
**type**\: int
**range:** 1..65534
.. attribute:: prefer
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""<line_sep>_prefix='ntp'<line_sep>_revision='2016-07-04'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(Ntp.Peer self).__init__()<block_end>self.yang_name="peer"<line_sep>self.yang_parent_name="ntp"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<false><line_sep>self.ylist_key_names=['name']<line_sep>self._child_classes=OrderedDict([])<line_sep>self._leafs=OrderedDict([('name' (YLeaf(YType.str 'name') ['str'])) ('version' (YLeaf(YType.int32 'version') ['int'])) ('key_id' (YLeaf(YType.int32 'key-id') ['int'])) ('prefer' (YLeaf(YType.empty 'prefer') ['Empty'])) ])<line_sep>self.name=<none><line_sep>self.version=<none><line_sep>self.key_id=<none><line_sep>self.prefer=<none><line_sep>self._segment_path=<lambda>:"peer"+"[name='"+str(self.name)+"']"<line_sep>self._absolute_path=<lambda>:"ntp:ntp/%s"%self._segment_path()<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(Ntp.Peer ['name' 'version' 'key_id' 'prefer'] name value)<block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['Ntp.Peer']['meta_info']<block_end><block_end><class_stmt>Server(_Entity_)<block_start>"""
.. attribute:: name (key)
**type**\: str
.. attribute:: version
**type**\: int
**range:** 1..4
.. attribute:: key_id
**type**\: int
**range:** 1..65534
.. attribute:: prefer
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""<line_sep>_prefix='ntp'<line_sep>_revision='2016-07-04'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(Ntp.Server self).__init__()<block_end>self.yang_name="server"<line_sep>self.yang_parent_name="ntp"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<false><line_sep>self.ylist_key_names=['name']<line_sep>self._child_classes=OrderedDict([])<line_sep>self._leafs=OrderedDict([('name' (YLeaf(YType.str 'name') ['str'])) ('version' (YLeaf(YType.int32 'version') ['int'])) ('key_id' (YLeaf(YType.int32 'key-id') ['int'])) ('prefer' (YLeaf(YType.empty 'prefer') ['Empty'])) ])<line_sep>self.name=<none><line_sep>self.version=<none><line_sep>self.key_id=<none><line_sep>self.prefer=<none><line_sep>self._segment_path=<lambda>:"server"+"[name='"+str(self.name)+"']"<line_sep>self._absolute_path=<lambda>:"ntp:ntp/%s"%self._segment_path()<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(Ntp.Server ['name' 'version' 'key_id' 'prefer'] name value)<block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['Ntp.Server']['meta_info']<block_end><block_end><class_stmt>AuthenticationKey(_Entity_)<block_start>"""
.. attribute:: key_number (key)
**type**\: int
**range:** 1..65534
.. attribute:: md5_keyword
**type**\: :py:class:`Md5Keyword <ydk.models.cisco_ios_xr.ntp.Ntp.AuthenticationKey.Md5Keyword>`
**mandatory**\: True
.. attribute:: encryption
**type**\: :py:class:`Encryption <ydk.models.cisco_ios_xr.ntp.Ntp.AuthenticationKey.Encryption>`
.. attribute:: keyname
**type**\: str
**length:** 0..32
**mandatory**\: True
"""<line_sep>_prefix='ntp'<line_sep>_revision='2016-07-04'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(Ntp.AuthenticationKey self).__init__()<block_end>self.yang_name="authentication-key"<line_sep>self.yang_parent_name="ntp"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<false><line_sep>self.ylist_key_names=['key_number']<line_sep>self._child_classes=OrderedDict([])<line_sep>self._leafs=OrderedDict([('key_number' (YLeaf(YType.int32 'key-number') ['int'])) ('md5_keyword' (YLeaf(YType.enumeration 'md5-keyword') [('ydk.models.cisco_ios_xr.ntp' 'Ntp' 'AuthenticationKey.Md5Keyword')])) ('encryption' (YLeaf(YType.enumeration 'encryption') [('ydk.models.cisco_ios_xr.ntp' 'Ntp' 'AuthenticationKey.Encryption')])) ('keyname' (YLeaf(YType.str 'keyname') ['str'])) ])<line_sep>self.key_number=<none><line_sep>self.md5_keyword=<none><line_sep>self.encryption=<none><line_sep>self.keyname=<none><line_sep>self._segment_path=<lambda>:"authentication-key"+"[key-number='"+str(self.key_number)+"']"<line_sep>self._absolute_path=<lambda>:"ntp:ntp/%s"%self._segment_path()<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(Ntp.AuthenticationKey ['key_number' 'md5_keyword' 'encryption' 'keyname'] name value)<block_end><class_stmt>Encryption(Enum)<block_start>"""
Encryption (Enum Class)
.. data:: clear = 0
.. data:: encrypted = 1
"""<line_sep>clear=Enum.YLeaf(0 "clear")<line_sep>encrypted=Enum.YLeaf(1 "encrypted")<line_sep>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['Ntp.AuthenticationKey.Encryption']<block_end><block_end><class_stmt>Md5Keyword(Enum)<block_start>"""
Md5Keyword (Enum Class)
.. data:: md5 = 0
"""<line_sep>md5=Enum.YLeaf(0 "md5")<line_sep>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['Ntp.AuthenticationKey.Md5Keyword']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['Ntp.AuthenticationKey']['meta_info']<block_end><block_end><class_stmt>Trace(_Entity_)<block_start>"""
.. attribute:: ntp_helper
**type**\: :py:class:`NtpHelper <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper>`
**config**\: False
"""<line_sep>_prefix='ntp'<line_sep>_revision='2016-07-04'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(Ntp.Trace self).__init__()<block_end>self.yang_name="trace"<line_sep>self.yang_parent_name="ntp"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<false><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([("ntp_helper" ("ntp_helper" Ntp.Trace.NtpHelper))])<line_sep>self._leafs=OrderedDict()<line_sep>self.ntp_helper=Ntp.Trace.NtpHelper()<line_sep>self.ntp_helper.parent=self<line_sep>self._children_name_map["ntp_helper"]="ntp_helper"<line_sep>self._segment_path=<lambda>:"trace"<line_sep>self._absolute_path=<lambda>:"ntp:ntp/%s"%self._segment_path()<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(Ntp.Trace [] name value)<block_end><class_stmt>NtpHelper(_Entity_)<block_start>"""
.. attribute:: trace
show traceable processes
**type**\: list of :py:class:`Trace_ <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper.Trace_>`
**config**\: False
"""<line_sep>_prefix='ntp'<line_sep>_revision='2016-07-04'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(Ntp.Trace.NtpHelper self).__init__()<block_end>self.yang_name="ntp_helper"<line_sep>self.yang_parent_name="trace"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<false><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([("trace" ("trace" Ntp.Trace.NtpHelper.Trace_))])<line_sep>self._leafs=OrderedDict()<line_sep>self.trace=YList(self)<line_sep>self._segment_path=<lambda>:"ntp_helper"<line_sep>self._absolute_path=<lambda>:"ntp:ntp/trace/%s"%self._segment_path()<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(Ntp.Trace.NtpHelper [] name value)<block_end><class_stmt>Trace_(_Entity_)<block_start>"""
show traceable processes
.. attribute:: buffer (key)
**type**\: str
**config**\: False
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper.Trace_.Location>`
**config**\: False
"""<line_sep>_prefix='ntp'<line_sep>_revision='2016-07-04'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(Ntp.Trace.NtpHelper.Trace_ self).__init__()<block_end>self.yang_name="trace"<line_sep>self.yang_parent_name="ntp_helper"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<false><line_sep>self.ylist_key_names=['buffer']<line_sep>self._child_classes=OrderedDict([("location" ("location" Ntp.Trace.NtpHelper.Trace_.Location))])<line_sep>self._leafs=OrderedDict([('buffer' (YLeaf(YType.str 'buffer') ['str'])) ])<line_sep>self.buffer=<none><line_sep>self.location=YList(self)<line_sep>self._segment_path=<lambda>:"trace"+"[buffer='"+str(self.buffer)+"']"<line_sep>self._absolute_path=<lambda>:"ntp:ntp/trace/ntp_helper/%s"%self._segment_path()<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(Ntp.Trace.NtpHelper.Trace_ ['buffer'] name value)<block_end><class_stmt>Location(_Entity_)<block_start>"""
.. attribute:: location_name (key)
**type**\: str
**config**\: False
.. attribute:: all_options
**type**\: list of :py:class:`AllOptions <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper.Trace_.Location.AllOptions>`
**config**\: False
"""<line_sep>_prefix='ntp'<line_sep>_revision='2016-07-04'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(Ntp.Trace.NtpHelper.Trace_.Location self).__init__()<block_end>self.yang_name="location"<line_sep>self.yang_parent_name="trace"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=['location_name']<line_sep>self._child_classes=OrderedDict([("all-options" ("all_options" Ntp.Trace.NtpHelper.Trace_.Location.AllOptions))])<line_sep>self._leafs=OrderedDict([('location_name' (YLeaf(YType.str 'location_name') ['str'])) ])<line_sep>self.location_name=<none><line_sep>self.all_options=YList(self)<line_sep>self._segment_path=<lambda>:"location"+"[location_name='"+str(self.location_name)+"']"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(Ntp.Trace.NtpHelper.Trace_.Location ['location_name'] name value)<block_end><class_stmt>AllOptions(_Entity_)<block_start>"""
.. attribute:: option (key)
**type**\: str
**config**\: False
.. attribute:: trace_blocks
**type**\: list of :py:class:`TraceBlocks <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks>`
**config**\: False
"""<line_sep>_prefix='ntp'<line_sep>_revision='2016-07-04'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(Ntp.Trace.NtpHelper.Trace_.Location.AllOptions self).__init__()<block_end>self.yang_name="all-options"<line_sep>self.yang_parent_name="location"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=['option']<line_sep>self._child_classes=OrderedDict([("trace-blocks" ("trace_blocks" Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks))])<line_sep>self._leafs=OrderedDict([('option' (YLeaf(YType.str 'option') ['str'])) ])<line_sep>self.option=<none><line_sep>self.trace_blocks=YList(self)<line_sep>self._segment_path=<lambda>:"all-options"+"[option='"+str(self.option)+"']"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(Ntp.Trace.NtpHelper.Trace_.Location.AllOptions ['option'] name value)<block_end><class_stmt>TraceBlocks(_Entity_)<block_start>"""
.. attribute:: data
Trace output block
**type**\: str
**config**\: False
"""<line_sep>_prefix='ntp'<line_sep>_revision='2016-07-04'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks self).__init__()<block_end>self.yang_name="trace-blocks"<line_sep>self.yang_parent_name="all-options"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<true><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([])<line_sep>self._leafs=OrderedDict([('data' (YLeaf(YType.str 'data') ['str'])) ])<line_sep>self.data=<none><line_sep>self._segment_path=<lambda>:"trace-blocks"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks ['data'] name value)<block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['Ntp.Trace.NtpHelper.Trace_.Location.AllOptions']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['Ntp.Trace.NtpHelper.Trace_.Location']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['Ntp.Trace.NtpHelper.Trace_']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['Ntp.Trace.NtpHelper']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['Ntp.Trace']['meta_info']<block_end><block_end><def_stmt>clone_ptr self<block_start>self._top_entity=Ntp()<line_sep><return>self._top_entity<block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['Ntp']['meta_info']<block_end><block_end><class_stmt>ClockAction(_Entity_)<block_start>"""
.. attribute:: clock
**type**\: :py:class:`Clock <ydk.models.cisco_ios_xr.ntp.ClockAction.Clock>`
**config**\: False
"""<line_sep>_prefix='ntp'<line_sep>_revision='2016-07-04'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(ClockAction self).__init__()<block_end>self._top_entity=<none><line_sep>self.yang_name="clock-action"<line_sep>self.yang_parent_name="ntp"<line_sep>self.is_top_level_class=<true><line_sep>self.has_list_ancestor=<false><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([("clock" ("clock" ClockAction.Clock))])<line_sep>self._leafs=OrderedDict()<line_sep>self.clock=ClockAction.Clock()<line_sep>self.clock.parent=self<line_sep>self._children_name_map["clock"]="clock"<line_sep>self._segment_path=<lambda>:"ntp:clock-action"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(ClockAction [] name value)<block_end><class_stmt>Clock(_Entity_)<block_start>"""
.. attribute:: action
**type**\: :py:class:`Action <ydk.models.cisco_ios_xr.ntp.ClockAction.Clock.Action>`
**config**\: False
"""<line_sep>_prefix='ntp'<line_sep>_revision='2016-07-04'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(ClockAction.Clock self).__init__()<block_end>self.yang_name="clock"<line_sep>self.yang_parent_name="clock-action"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<false><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([("Action" ("action" ClockAction.Clock.Action))])<line_sep>self._leafs=OrderedDict()<line_sep>self.action=ClockAction.Clock.Action()<line_sep>self.action.parent=self<line_sep>self._children_name_map["action"]="Action"<line_sep>self._segment_path=<lambda>:"clock"<line_sep>self._absolute_path=<lambda>:"ntp:clock-action/%s"%self._segment_path()<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(ClockAction.Clock [] name value)<block_end><class_stmt>Action(_Entity_)<block_start>"""
"""<line_sep>_prefix='ntp'<line_sep>_revision='2016-07-04'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(ClockAction.Clock.Action self).__init__()<block_end>self.yang_name="Action"<line_sep>self.yang_parent_name="clock"<line_sep>self.is_top_level_class=<false><line_sep>self.has_list_ancestor=<false><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([])<line_sep>self._leafs=OrderedDict()<line_sep>self._segment_path=<lambda>:"Action"<line_sep>self._absolute_path=<lambda>:"ntp:clock-action/clock/%s"%self._segment_path()<line_sep>self._is_frozen=<true><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['ClockAction.Clock.Action']['meta_info']<block_end><block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['ClockAction.Clock']['meta_info']<block_end><block_end><def_stmt>clone_ptr self<block_start>self._top_entity=ClockAction()<line_sep><return>self._top_entity<block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _ntp<as>meta<line_sep><return>meta._meta_table['ClockAction']['meta_info']<block_end><block_end>
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Some utility methods which were removed but are still required for some unit-tests."""<import_from_stmt>typing List Tuple<def_stmt>_create_labels boson_hamilt_harm_basis:List[List[Tuple[List[List[int]] complex]]]<arrow>List[Tuple[str complex]]<block_start>all_labels=[]<for_stmt>num_body_data boson_hamilt_harm_basis<block_start>num_body_labels=_create_num_body_labels(num_body_data)<line_sep>all_labels.extend(num_body_labels)<block_end><return>all_labels<block_end><def_stmt>_create_num_body_labels num_body_data:List[Tuple[List[List[int]] complex]]<arrow>List[Tuple[str complex]]<block_start>num_body_labels=[]<for_stmt>indices,coeff num_body_data<block_start>indices.sort()<line_sep>coeff_label=_create_label_for_coeff(indices)<line_sep>num_body_labels.append((coeff_label coeff))<block_end><return>num_body_labels<block_end><def_stmt>_create_label_for_coeff indices:List[List[int]]<arrow>str<block_start>complete_labels_list=[]<for_stmt>mode,modal_raise,modal_lower indices<block_start><if_stmt>modal_raise<le>modal_lower<block_start>complete_labels_list.append(f"+_{mode}*{modal_raise}")<line_sep>complete_labels_list.append(f"-_{mode}*{modal_lower}")<block_end><else_stmt><block_start>complete_labels_list.append(f"-_{mode}*{modal_lower}")<line_sep>complete_labels_list.append(f"+_{mode}*{modal_raise}")<block_end><block_end>complete_label=" ".join(complete_labels_list)<line_sep><return>complete_label<block_end>
|
# coding=utf8
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>glob<import_stmt>re<import_stmt>sys<import_from_stmt>tensorflow.python.platform gfile<import_from_stmt>settings VOCAB_DICT_FILE<line_sep>_PAD="_PAD"<line_sep>_GO="_GO"<line_sep>_EOS="_EOS"<line_sep>_UNK="_UNK"<line_sep>_START_VOCAB=[_PAD _GO _EOS _UNK]<line_sep>PAD_ID=0<line_sep>GO_ID=1<line_sep>EOS_ID=2<line_sep>UNK_ID=3<line_sep># Regular expressions used to tokenize.
_WORD_SPLIT=re.compile("([.,!?\"':;)(])")<line_sep>_DIGIT_RE=re.compile(r"\d")<def_stmt>basic_tokenizer sentence<block_start>"""Very basic tokenizer: split the sentence into a list of tokens."""<line_sep>words=[]<for_stmt>space_separated_fragment sentence.strip().split()<block_start>words.extend(re.split(_WORD_SPLIT space_separated_fragment))<block_end><return>[w.decode('utf8')<for>w words<if>w]<block_end># forward maximum matching word segmentation
_DICT=<none><line_sep>_MAX_WORD_LENGTH=0<def_stmt>fmm_tokenizer sentence<block_start><global>_DICT<line_sep><global>_MAX_WORD_LENGTH<if_stmt><not>_DICT<block_start>_DICT,_=initialize_vocabulary(VOCAB_DICT_FILE)<for_stmt>v _DICT<block_start><if_stmt>len(v)<g>_MAX_WORD_LENGTH<block_start>_MAX_WORD_LENGTH=len(v)<block_end><block_end>print(_MAX_WORD_LENGTH)<block_end>words=[]<line_sep>begin=0<while_stmt>begin<l>len(sentence)<block_start>end=min(begin+_MAX_WORD_LENGTH len(sentence))<while_stmt>end<g>begin+1<block_start>word=sentence[begin:end]<line_sep># print (word)
<if_stmt>word<in>_DICT<block_start><break><block_end>end<augsub>1<block_end>word=sentence[begin:end]<line_sep>words.append(word.encode('utf8'))<line_sep>begin=end<block_end><return>words<block_end><def_stmt>create_vocabulary vocabulary_path data_path_patterns max_vocabulary_size tokenizer=<none> normalize_digits=<true><block_start>"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
data_path: data file that will be used to create vocabulary.
max_vocabulary_size: limit on the size of the created vocabulary.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""<line_sep>vocab={}<if_stmt>gfile.Exists(vocabulary_path)<block_start>sys.stderr.write('vocabulary path %s exsit. we will use the exised one\n'%vocabulary_path)<line_sep><return><block_end><for_stmt>data_f glob.glob(data_path_patterns)<block_start>print("Creating vocabulary %s from data %s"%(vocabulary_path data_f))<with_stmt>gfile.GFile(data_f mode="r")<as>f<block_start>counter=0<for_stmt>line f<block_start>counter<augadd>1<if_stmt>counter%100000<eq>0<block_start>print(" processing line %d"%counter)<block_end>tokens=tokenizer(line)<if>tokenizer<else>basic_tokenizer(line)<for_stmt>w tokens<block_start>word=re.sub(_DIGIT_RE "0" w)<if>normalize_digits<else>w<if_stmt>word<in>vocab<block_start>vocab[word]<augadd>1<block_end><else_stmt><block_start>vocab[word]=1<block_end><block_end><block_end><block_end><block_end>print('total vaca size: %s'%len(vocab))<line_sep>vocab_list=_START_VOCAB+sorted(vocab key=vocab.get reverse=<true>)<if_stmt>len(vocab_list)<g>max_vocabulary_size<block_start>vocab_list=vocab_list[:max_vocabulary_size]<block_end><with_stmt>gfile.GFile(vocabulary_path mode="w")<as>vocab_file<block_start><for_stmt>w vocab_list<block_start>vocab_file.write(w+"\n")<block_end><block_end><block_end><def_stmt>initialize_vocabulary vocabulary_path<block_start>"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""<if_stmt>gfile.Exists(vocabulary_path)<block_start>rev_vocab=[]<with_stmt>gfile.GFile(vocabulary_path mode="r")<as>f<block_start>rev_vocab.extend(f.readlines())<block_end>rev_vocab=[line.strip().decode('utf8')<for>line rev_vocab]<line_sep>vocab=dict([(x y)<for>(y x) enumerate(rev_vocab)])<line_sep><return>vocab rev_vocab<block_end><else_stmt><block_start><raise>ValueError("Vocabulary file %s not found." vocabulary_path)<block_end><block_end>
|
'''
Copyright 2021 D3M Team
Copyright (c) 2021 DATA Lab at Texas A&M University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''<import_stmt>os<import_stmt>typing<import_stmt>itertools<import_stmt>numpy# type: ignore
<import_stmt>pandas# type: ignore
<import_from_stmt>d3m container exceptions utils<as>d3m_utils<import_from_stmt>d3m.base utils<as>base_utils<import_from_stmt>d3m.metadata base<as>metadata_base hyperparams<import_from_stmt>d3m.primitive_interfaces base transformer<line_sep>__all__=('DenormalizePrimitive' )<line_sep>Inputs=container.Dataset<line_sep>Outputs=container.Dataset<class_stmt>Hyperparams(hyperparams.Hyperparams)<block_start>starting_resource=hyperparams.Hyperparameter[typing.Union[str <none>]](default=<none> semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'] description="From which resource to start denormalizing. If \"None\" then it starts from the dataset entry point." )<line_sep>recursive=hyperparams.UniformBool(default=<true> semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'] description="Denormalize recursively?" )<line_sep>many_to_many=hyperparams.UniformBool(default=<false> semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'] description="Denormalize also many-to-many relations?" )<line_sep>discard_not_joined_tabular_resources=hyperparams.UniformBool(default=<false> semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'] description="Should tabular resources which were not joined be discarded?" )<block_end># TODO: Implement support for M2M relations.
# TODO: Consider the case where there are loops in foreign keys.
# TODO: Add all column names together to "other names" metadata for column.
# TODO: Consider denormalizing deep-first instead of current iterative approach.
# It seems it might be better because when one table is referencing the second one twice,
# which might reference other tables further, then currently we first join the second table
# and then have to repeat joining other tables twice. But we could first join other tables
# once to the second table, and then just do the join with already joined second table.
# Not sure how to behave in "recursive == False" case then.
# TODO: Add a test where main table has a foreign key twice to same table (for example, person 1 and person 2 to table of persons).
<class_stmt>DenormalizePrimitive(transformer.TransformerPrimitiveBase[Inputs Outputs Hyperparams])<block_start>"""
A primitive which converts a Dataset with multiple tabular resources into a Dataset with only one tabular resource,
based on known relations between tabular resources. Any resource which can be joined is joined (thus the resource
itself is removed), and other resources are by default discarded (controlled by ``discard_resources`` hyper-parameter).
If hyper-parameter ``recursive`` is set to ``True``, the primitive will join tables recursively. For example,
if table 1 (main table) has a foreign key that points to table 2, and table 2 has a foreign key that points to table 3,
then after table 2 is jointed into table 1, table 1 will have a foreign key that points to table 3. So now the
primitive continues to join table 3 into the main table.
"""<line_sep>__author__='<NAME> <<EMAIL>>'<line_sep>metadata=metadata_base.PrimitiveMetadata({'id':'f31f8c1f-d1c5-43e5-a4b2-2ae4a761ef2e' 'version':'0.2.0' 'name':"Denormalize datasets" 'python_path':'d3m.primitives.autovideo.common.denormalize' 'source':{'name':'<NAME> - <NAME>' 'contact':'mailto:<EMAIL>' 'uris':['https://gitlab.com/datadrivendiscovery/common-primitives/blob/master/common_primitives/denormalize.py' 'https://gitlab.com/datadrivendiscovery/common-primitives.git' ] } 'algorithm_types':[metadata_base.PrimitiveAlgorithmType.DATA_DENORMALIZATION ] 'primitive_family':metadata_base.PrimitiveFamily.DATA_TRANSFORMATION } )<def_stmt>__init__ self * hyperparams:Hyperparams<arrow><none><block_start>super().__init__(hyperparams=hyperparams)<block_end><def_stmt>produce self * inputs:Inputs timeout:float=<none> iterations:int=<none><arrow>base.CallResult[Outputs]# If only one tabular resource is in the dataset, we do not have anything to do.
<block_start>tabular_resource_ids=[dataset_resource_id<for>dataset_resource_id,dataset_resource inputs.items()<if>isinstance(dataset_resource container.DataFrame)]<if_stmt>len(tabular_resource_ids)<eq>1<block_start><return>base.CallResult(inputs)<block_end># We could set "pick_one" to "False" because we already checked for that, but we leave it
# as "True" because then error messages are more meaningful for this case.
main_resource_id,main_resource=base_utils.get_tabular_resource(inputs self.hyperparams['starting_resource'])<line_sep># Graph is the adjacency representation for the relations graph.
graph=inputs.get_relations_graph()<line_sep>resources=dict(inputs)<line_sep>metadata=inputs.metadata<line_sep>all_resources_joined=set()<while_stmt>self._has_forward_edges(graph main_resource_id)# "resources" and "graph" are modified in-place.
<block_start>metadata,resources_joined=self._denormalize(resources metadata main_resource_id graph)<line_sep>all_resources_joined.update(resources_joined)<if_stmt><not>self.hyperparams['recursive']<block_start><break><block_end><block_end># Do we discard all other tabular resources (including joined ones)?
<if_stmt>self.hyperparams['discard_not_joined_tabular_resources']<block_start>resources_to_remove=[]<for_stmt>resource_id,resource resources.items()<block_start><if_stmt>resource_id<eq>main_resource_id<block_start><continue><block_end><if_stmt><not>isinstance(resource container.DataFrame)<block_start><continue><block_end>resources_to_remove.append(resource_id)<block_end><block_end># Discard only joined tabular resources and which no other resource depends on.
<else_stmt># We deal only with tabular resources here.
<block_start>dependent_upon_resources=self._get_dependent_upon_resources(graph)<line_sep>resources_to_remove=[resource_id<for>resource_id sorted(all_resources_joined-dependent_upon_resources)<if>resource_id<ne>main_resource_id]<block_end><for_stmt>resource_id resources_to_remove<block_start><assert_stmt>resource_id<ne>main_resource_id<del_stmt>resources[resource_id]<line_sep>metadata=metadata.remove((resource_id ) recursive=<true>)<block_end>metadata=metadata.update(() {'dimension':{'length':len(resources) } })<line_sep><return>base.CallResult(container.Dataset(resources metadata))<block_end><def_stmt>_has_forward_edges self graph:typing.Dict[str typing.List[typing.Tuple[str bool int int typing.Dict]]] resource_id:str<arrow>bool# We check first to not create a list in "graph" when accessing it.
<block_start><if_stmt>resource_id<not><in>graph<block_start><return><false><block_end><for_stmt>edge_resource_id,edge_direction,edge_from_index,edge_to_index,custom_state graph[resource_id]<block_start><if_stmt>edge_direction<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>_has_edges_to_process self graph:typing.Dict[str typing.List[typing.Tuple[str bool int int typing.Dict]]] resource_id:str<arrow>bool# We check first to not create a list in "graph" when accessing it.
<block_start><if_stmt>resource_id<not><in>graph<block_start><return><false><block_end><for_stmt>edge_resource_id,edge_direction,edge_from_index,edge_to_index,custom_state graph[resource_id]<block_start><if_stmt>custom_state.get('process' <false>)<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>_denormalize self resources:typing.Dict metadata:metadata_base.DataMetadata main_resource_id:str graph:typing.Dict[str typing.List[typing.Tuple[str bool int int typing.Dict]]]<arrow>typing.Tuple[metadata_base.DataMetadata typing.Set[str]]<block_start>"""
Finds all tables which are pointed to by the main resource and join them into the main table.
``resources`` and ``graph`` are modified in-place.
"""<line_sep>resources_joined:typing.Set[str]=set()<line_sep>main_resource=resources[main_resource_id]<line_sep># Should not really happen.
<if_stmt>main_resource_id<not><in>graph<block_start><return>metadata resources_joined<block_end># We mark all current edges to be processed. We might be adding more edges to the list,
# but we want to do for this call only those which existed at the beginning.
<for_stmt>edge_resource_id,edge_direction,edge_from_index,edge_to_index,custom_state graph[main_resource_id]<block_start>custom_state['process']=<true><block_end><while_stmt>self._has_edges_to_process(graph main_resource_id)<block_start>edge_resource_id,edge_direction,edge_from_index,edge_to_index,custom_state=graph[main_resource_id][0]<if_stmt><not>custom_state.get('process' <false>)<block_start><continue><block_end><del_stmt>custom_state['process']<if_stmt><not>edge_direction# For now we just remove this relation.
# TODO: Support M2M relations.
# We remove the relation we would have joined, backward.
<block_start>self._remove_graph_edge(graph main_resource_id edge_resource_id <false> edge_from_index edge_to_index)<line_sep># We remove the relation we would have joined, forward.
self._remove_graph_edge(graph edge_resource_id main_resource_id <true> edge_to_index edge_from_index)<line_sep><continue><block_end><if_stmt>main_resource_id<eq>edge_resource_id# TODO: Implement.
<block_start><raise>NotImplementedError("Support for loops is not implemented yet.")<block_end># Calling "_join" updates column indices in "graph" and "metadata"
# and also removes the current joined edge from "graph"
main_resource,metadata=self._join(main_resource_id main_resource edge_from_index edge_resource_id resources[edge_resource_id] edge_to_index metadata graph )<line_sep>resources_joined.add(edge_resource_id)<block_end>resources[main_resource_id]=main_resource<line_sep><return>metadata resources_joined<block_end><def_stmt>_row_of_missing_values self resource:container.DataFrame metadata:metadata_base.DataMetadata resource_id:str<arrow>typing.List[typing.Any]<block_start>row=[]<for_stmt>column_index,dtype enumerate(resource.dtypes)<block_start><if_stmt>dtype.kind<in>['b' 'i' 'u' 'f' 'c']<block_start>row.append(numpy.nan)<block_end><elif_stmt>dtype.kind<eq>'O'<and>issubclass(metadata.query_column_field(column_index 'structural_type' at=(resource_id )) str)<block_start>row.append('')<block_end><else_stmt><block_start>row.append(<none>)<block_end><block_end><return>row<block_end><def_stmt>_join self main_resource_id:str main_resource:container.DataFrame main_column_index:int foreign_resource_id:str foreign_resource:container.DataFrame foreign_column_index:int metadata:metadata_base.DataMetadata graph:typing.Dict[str typing.List[typing.Tuple[str bool int int typing.Dict]]]<arrow>typing.Tuple[container.DataFrame metadata_base.DataMetadata]<block_start><if_stmt>main_resource_id<eq>foreign_resource_id# TODO: Implement.
<block_start><raise>NotImplementedError("Support for loops is not implemented yet.")<block_end># We use this information later on.
one_to_one_relation=foreign_resource.iloc[: foreign_column_index].sort_values().equals(main_resource.iloc[: main_column_index].sort_values())<line_sep>foreign_indexer=pandas.Index(foreign_resource.iloc[: foreign_column_index]).get_indexer(main_resource.iloc[: main_column_index])<line_sep># "get_indexer" sets all unresolved values to -1.
unresolved_rows=foreign_indexer<eq>-1<line_sep># We store dtypes so that we can later on compare.
foreign_resource_dtypes=foreign_resource.dtypes<line_sep># -1 is converted into the last row, but we set it to row of missing values if it exists.
resolved_foreign_resource=foreign_resource.take(foreign_indexer).reset_index(drop=<true>)<if_stmt>unresolved_rows.any()# Set all unresolved rows to a row of missing values.
<block_start>resolved_foreign_resource.iloc[unresolved_rows :]=self._row_of_missing_values(foreign_resource metadata foreign_resource_id)<block_end># And store final dtypes so that we can later on compare.
resolved_foreign_resource_dtypes=resolved_foreign_resource.dtypes<line_sep># This makes a copy so that we can modify metadata in-place.
metadata=metadata.update((metadata_base.ALL_ELEMENTS ) {} )<line_sep># TODO: Move this to metadata API.
# We reorder metadata for rows.
<for_stmt>element_metadata_entry [metadata._current_metadata.all_elements metadata._current_metadata.elements[foreign_resource_id] ]<block_start><if_stmt>element_metadata_entry<is><none><block_start><continue><block_end>elements=element_metadata_entry.elements<line_sep>new_elements_evolver=d3m_utils.EMPTY_PMAP.evolver()<for_stmt>i,row_index enumerate(foreign_indexer)<block_start><if_stmt>row_index<eq>-1<block_start><continue><block_end><if_stmt>row_index<in>elements<block_start>new_elements_evolver.set(i elements[row_index])<block_end><block_end>element_metadata_entry.elements=new_elements_evolver.persistent()<line_sep>element_metadata_entry.is_elements_empty=<not>element_metadata_entry.elements<line_sep>element_metadata_entry.update_is_empty()<block_end><assert_stmt>resolved_foreign_resource.shape[1]<g>0<line_sep>main_resource=pandas.concat([main_resource.iloc[: 0:main_column_index] resolved_foreign_resource main_resource.iloc[: main_column_index+1:] ] axis=1)<line_sep>old_semantic_types=metadata.query_column(main_column_index at=(main_resource_id )).get('semantic_types' [])<line_sep># First we remove metadata for the existing column.
# This makes a copy so that we can modify metadata in-place.
metadata=metadata.remove_column(main_column_index at=(main_resource_id ) recursive=<true>)<line_sep># TODO: Move this to metadata API.
# Move columns and make space for foreign metadata to be inserted.
# We iterate over a list so that we can change dict while iterating.
<for_stmt>element_metadata_entry itertools.chain([metadata._current_metadata.all_elements.all_elements<if>metadata._current_metadata.all_elements<is><not><none><else><none>] metadata._current_metadata.all_elements.elements.values()<if>metadata._current_metadata.all_elements<is><not><none><else>iter([<none>]) [metadata._current_metadata.elements[main_resource_id].all_elements] metadata._current_metadata.elements[main_resource_id].elements.values() )<block_start><if_stmt>element_metadata_entry<is><none><block_start><continue><block_end>new_elements_evolver=element_metadata_entry.elements.evolver()<for_stmt>element_index element_metadata_entry.elements.keys(reverse=<true>)# We removed metadata for "main_column_index".
<block_start><assert_stmt>element_index<ne>main_column_index<line_sep>element_index=typing.cast(int element_index)<if_stmt>main_column_index<l>element_index<block_start>metadata_dict=new_elements_evolver[element_index]<line_sep>new_elements_evolver.remove(element_index)<line_sep>new_elements_evolver.set(element_index+resolved_foreign_resource.shape[1]-1 metadata_dict)<block_end><block_end>element_metadata_entry.elements=new_elements_evolver.persistent()<line_sep>element_metadata_entry.is_elements_empty=<not>element_metadata_entry.elements<line_sep>element_metadata_entry.update_is_empty()<block_end># And copy over metadata for new (replaced) columns in place of the existing column.
<for_stmt>column_index range(resolved_foreign_resource.shape[1])# To go over "ALL_ELEMENTS" and all rows.
<block_start><for_stmt>element metadata.get_elements((foreign_resource_id ))<block_start>metadata=metadata.copy_to(metadata [foreign_resource_id element metadata_base.ALL_ELEMENTS] [main_resource_id element main_column_index+column_index] ignore_all_elements=<true> )<line_sep>metadata=metadata.copy_to(metadata [foreign_resource_id element column_index] [main_resource_id element main_column_index+column_index] ignore_all_elements=<true> )<block_end><block_end># Update metadata for new (replaced) columns.
<for_stmt>column_index range(main_column_index main_column_index+resolved_foreign_resource.shape[1])# We copy semantic types describing the role of the column from the original column to all new (replaced) columns.
# TODO: Do not hard-code this list here but maybe extract it from "definitions.json"?
<block_start><for_stmt>semantic_type ['https://metadata.datadrivendiscovery.org/types/Attribute' 'https://metadata.datadrivendiscovery.org/types/Boundary' 'https://metadata.datadrivendiscovery.org/types/BoundingPolygon' 'https://metadata.datadrivendiscovery.org/types/Interval' 'https://metadata.datadrivendiscovery.org/types/IntervalEnd' 'https://metadata.datadrivendiscovery.org/types/IntervalStart' 'https://metadata.datadrivendiscovery.org/types/InstanceWeight' 'https://metadata.datadrivendiscovery.org/types/PrivilegedData' 'https://metadata.datadrivendiscovery.org/types/RedactedPrivilegedData' 'https://metadata.datadrivendiscovery.org/types/RedactedTarget' 'https://metadata.datadrivendiscovery.org/types/SuggestedPrivilegedData' 'https://metadata.datadrivendiscovery.org/types/SuggestedTarget' 'https://metadata.datadrivendiscovery.org/types/Target' 'https://metadata.datadrivendiscovery.org/types/PredictedTarget' 'https://metadata.datadrivendiscovery.org/types/TrueTarget' 'https://metadata.datadrivendiscovery.org/types/Score' 'https://metadata.datadrivendiscovery.org/types/Confidence' 'https://metadata.datadrivendiscovery.org/types/Time' 'https://metadata.datadrivendiscovery.org/types/Location' ]<block_start><if_stmt>semantic_type<in>old_semantic_types<block_start>metadata=metadata.add_semantic_type((main_resource_id metadata_base.ALL_ELEMENTS column_index) semantic_type)<block_end><block_end>is_column_unique=main_resource.iloc[: column_index].is_unique<line_sep>column_semantic_types=metadata.query_column(column_index at=(main_resource_id )).get('semantic_types' [])<line_sep>was_column_unique='https://metadata.datadrivendiscovery.org/types/PrimaryKey'<in>column_semantic_types<or>'https://metadata.datadrivendiscovery.org/types/UniqueKey'<in>column_semantic_types<line_sep># Foreign keys can reference same foreign row multiple times, so values in this column might not be even
# unique anymore, nor they are a primary key at all. So we remove the semantic type marking a column as such.
# We re-set semantic type for any real primary key later on.
metadata=metadata.remove_semantic_type((main_resource_id metadata_base.ALL_ELEMENTS column_index) 'https://metadata.datadrivendiscovery.org/types/PrimaryKey')<line_sep>metadata=metadata.remove_semantic_type((main_resource_id metadata_base.ALL_ELEMENTS column_index) 'https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey')<line_sep>metadata=metadata.remove_semantic_type((main_resource_id metadata_base.ALL_ELEMENTS column_index) 'https://metadata.datadrivendiscovery.org/types/UniqueKey')<line_sep># We re-set semantic type for column which was and is still unique.
<if_stmt>was_column_unique<and>is_column_unique<block_start>metadata=metadata.add_semantic_type((main_resource_id metadata_base.ALL_ELEMENTS column_index) 'https://metadata.datadrivendiscovery.org/types/UniqueKey')<block_end>old_dtype=foreign_resource_dtypes.iloc[column_index-main_column_index]<line_sep>new_dtype=resolved_foreign_resource_dtypes.iloc[column_index-main_column_index]<if_stmt>old_dtype<is><not>new_dtype# Not a nice way to convert a dtype to Python type, but it works.
<block_start>old_type=type(numpy.zeros(1 old_dtype).tolist()[0])<line_sep>new_type=type(numpy.zeros(1 new_dtype).tolist()[0])<if_stmt>old_type<is><not>new_type# Type changed, we have to update metadata about the structural type.
<block_start>metadata=metadata.update((main_resource_id metadata_base.ALL_ELEMENTS column_index) {'structural_type':new_type })<block_end><block_end><block_end># If the original column was a primary key, we should re-set it back.
<if_stmt>'https://metadata.datadrivendiscovery.org/types/PrimaryKey'<in>old_semantic_types<and>(one_to_one_relation<or><not>unresolved_rows.any())<block_start><if_stmt>main_resource.iloc[: main_column_index].is_unique<block_start>metadata=metadata.add_semantic_type((main_resource_id metadata_base.ALL_ELEMENTS main_column_index) 'https://metadata.datadrivendiscovery.org/types/PrimaryKey')<line_sep># Removing "UniqueKey" if it was set before, "PrimaryKey" surpasses it.
metadata=metadata.remove_semantic_type((main_resource_id metadata_base.ALL_ELEMENTS main_column_index) 'https://metadata.datadrivendiscovery.org/types/UniqueKey')<block_end><else_stmt><block_start>metadata=metadata.add_semantic_type((main_resource_id metadata_base.ALL_ELEMENTS main_column_index) 'https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey')<block_end><block_end><elif_stmt>'https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey'<in>old_semantic_types<and>(one_to_one_relation<or><not>unresolved_rows.any())<block_start>metadata=metadata.add_semantic_type((main_resource_id metadata_base.ALL_ELEMENTS main_column_index) 'https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey')<block_end># TODO: Update boundary columns and "confidence for" references.
# This is not currently needed because all file collections are just one column so they do not
# move the column indices. But as a general case we should be updating all standard column references.
# Update columns number in the main resource.
metadata=metadata.update((main_resource_id metadata_base.ALL_ELEMENTS) {'dimension':{'length':main_resource.shape[1] } })<line_sep># We remove the relation we just joined, forward.
self._remove_graph_edge(graph main_resource_id foreign_resource_id <true> main_column_index foreign_column_index)<line_sep># We remove the relation we just joined, backward.
self._remove_graph_edge(graph foreign_resource_id main_resource_id <false> foreign_column_index main_column_index)<line_sep># We have to update column indices if they have changed because we inserted new columns.
<for_stmt>resource_id,edges graph.items()<block_start><if_stmt>resource_id<eq>main_resource_id<block_start><for_stmt>i,(edge_resource_id edge_direction edge_from_index edge_to_index custom_state) enumerate(edges)<block_start><if_stmt>edge_direction<and>main_column_index<l>edge_from_index# We replaced one column with "resolved_foreign_resource.shape[1]" columns, so there is
# "resolved_foreign_resource.shape[1] - 1" new columns to shift indices for.
<block_start>edges[i]=(edge_resource_id edge_direction edge_from_index+resolved_foreign_resource.shape[1]-1 edge_to_index custom_state)<block_end><block_end><block_end><else_stmt><block_start><for_stmt>i,(edge_resource_id edge_direction edge_from_index edge_to_index custom_state) enumerate(edges)<block_start><if_stmt>edge_resource_id<eq>main_resource_id<and><not>edge_direction<and>main_column_index<l>edge_to_index# We replaced one column with "resolved_foreign_resource.shape[1]" columns, so there is
# "resolved_foreign_resource.shape[1] - 1" new columns to shift indices for.
<block_start>edges[i]=(edge_resource_id edge_direction edge_from_index edge_to_index+resolved_foreign_resource.shape[1]-1 custom_state)<block_end><block_end><block_end><block_end># If foreign resource has any additional relations, we copy them to new columns in the main resource.
<if_stmt>foreign_resource_id<in>graph# We iterate over a list so that we can change graph while iterating.
<block_start><for_stmt>edge_resource_id,edge_direction,edge_from_index,edge_to_index,custom_state list(graph[foreign_resource_id])<block_start><if_stmt>edge_resource_id<in>[main_resource_id foreign_resource_id]# TODO: Implement.
<block_start><raise>NotImplementedError("Support for loops is not implemented yet.")<block_end><if_stmt>edge_direction<block_start>graph[main_resource_id].append((edge_resource_id <true> main_column_index+edge_from_index edge_to_index {}))<line_sep>graph[edge_resource_id].append((main_resource_id <false> edge_to_index main_column_index+edge_from_index {}))<block_end><else_stmt># TODO: What should we do about backward relations?
# For now we just ignore backward relations because we do not support M2M relations.
# For the foreign resource we just joined, we could change all relations to instead point
# to the main resource. This might be tricky though if we have a situation where main table
# includes table 1 twice, and table 1 has a relation to table 2. If we after joining table 1
# once rewrite all backward relations from table 2 to table 1 to point to main table now,
# when we get to join the table 1 the second time we might have issues. This is why it might
# better to start joining deep-first. See another TODO.
# TODO: We might have to also update foreign key metadata in this case.
# We might want to update metadata so that if table 1 is joined to the main table, and there is
# also table 2 which has a foreign key that points to table 1, then the foreign key in table 2
# should point to the main table after joining. But what if main table has a foreign key to
# table 1 twice? How do we then update metadata in table 2 to point twice to table 1?
# Metadata does not support that.
# A special case for now. If relation is one-to-one, then we can move backwards relations to the
# main resource without complications mentioned in TODOs above. Maybe some additional columns might
# be joined through M2M relations in this case, once that is supported, but generally this should not
# be a problem. It might add some duplicated columns at that point. This special case is useful
# when "learningData" with only targets is pointing to some other table with real attributes.
<block_start><if_stmt>one_to_one_relation<block_start>self._remove_graph_edge(graph edge_resource_id foreign_resource_id <true> edge_to_index edge_from_index)<line_sep>self._remove_graph_edge(graph foreign_resource_id edge_resource_id <false> edge_from_index edge_to_index)<line_sep>graph[main_resource_id].append((edge_resource_id <false> main_column_index+edge_from_index edge_to_index custom_state))<line_sep>graph[edge_resource_id].append((main_resource_id <true> edge_to_index main_column_index+edge_from_index custom_state))<line_sep># We override metadata for foreign key to make it point to the main resource (and not to foreign resource anymore).
metadata=metadata.update((edge_resource_id metadata_base.ALL_ELEMENTS edge_to_index) {'foreign_key':{'type':'COLUMN' 'resource_id':main_resource_id 'column_index':main_column_index+edge_from_index 'column_name':metadata_base.NO_VALUE } })<block_end><block_end><block_end><block_end><return>main_resource metadata<block_end><def_stmt>_get_dependent_upon_resources self graph:typing.Dict[str typing.List[typing.Tuple[str bool int int typing.Dict]]]<arrow>typing.Set[str]<block_start>"""
Returns a set of resources which have other resources depend on them.
"""<line_sep>dependent_upon_resources=set()<for_stmt>resource_id,edges graph.items()<block_start><for_stmt>edge_resource_id,edge_direction,edge_from_index,edge_to_index,custom_state edges<block_start><if_stmt>edge_direction<block_start>dependent_upon_resources.add(edge_resource_id)<block_end><block_end><block_end><return>dependent_upon_resources<block_end><def_stmt>_remove_graph_edge self graph:typing.Dict[str typing.List[typing.Tuple[str bool int int typing.Dict]]] resource_id:str edge_resource_id:str edge_direction:bool edge_from_index:int edge_to_index:int<arrow><none><block_start><assert_stmt>resource_id<in>graph<for_stmt>i,edge enumerate(graph[resource_id])<block_start><if_stmt>edge[0:4]<eq>(edge_resource_id edge_direction edge_from_index edge_to_index)<block_start><del_stmt>graph[resource_id][i]<line_sep><break><block_end><block_end><if_stmt><not>graph[resource_id]<block_start><del_stmt>graph[resource_id]<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>logging<import_stmt>pprint<import_stmt>sys<line_sep>logging.basicConfig()<for_stmt>dataset_file_path sys.argv[1:]<block_start><try_stmt><block_start>dataset=container.Dataset.load('file://{dataset_doc_path}'.format(dataset_doc_path=os.path.abspath(dataset_file_path)))<block_end><except_stmt>Exception<as>error<block_start><raise>Exception("Unable to load dataset: {dataset_doc_path}".format(dataset_doc_path=dataset_file_path))<from>error<block_end>primitive=DenormalizePrimitive(hyperparams=Hyperparams.defaults().replace({'recursive':<true> 'discard_not_joined_tabular_resources':<false> }))<try_stmt><block_start>denormalized_dataset=primitive.produce(inputs=dataset).value<line_sep>pprint.pprint(denormalized_dataset)<line_sep>denormalized_dataset.metadata.pretty_print()<block_end><except_stmt>Exception<as>error<block_start><raise>Exception("Unable to denormalize dataset: {dataset_doc_path}".format(dataset_doc_path=dataset_file_path))<from>error<block_end><block_end><block_end>
|
# compare Brython stdlib and CPython stdlib
<import_stmt>os<import_stmt>filecmp<import_stmt>shutil<line_sep>bdir=os.path.join(os.path.dirname(os.getcwd()) "www" "src" "Lib")<line_sep>p_old_dir=r'c:\Python39\Lib'<line_sep>p_new_dir=r'c:\Python310\Lib'<for_stmt>dirpath,dirnames,filenames os.walk(bdir)<block_start><if_stmt>"site-packages"<in>dirnames<block_start>dirnames.remove("site-packages")<block_end>prefix=dirpath[len(bdir)+1:]<line_sep>print(prefix)<for_stmt>filename filenames<block_start><if_stmt><not>filename.endswith(".py")<block_start><continue><block_end>ppath=p_old_dir+"\\"+prefix+"\\"+filename<if_stmt>os.path.exists(ppath)<block_start>brython_path=os.path.join(dirpath filename)<line_sep>brython_short=brython_path[len(bdir)+1:]<if_stmt>filecmp.cmp(brython_path ppath shallow=<false>)<block_start>p_new_path=p_new_dir+"\\"+prefix+"\\"+filename<if_stmt>os.path.exists(p_new_path)<block_start><if_stmt>filecmp.cmp(brython_path p_new_path shallow=<false>)#print(brython_short, "same as CPython 3.9",
# "not changed in Python 3.10")
<block_start><pass><block_end><else_stmt><block_start>print(brython_short "same as CPython 3.9" "replace by 3.10 version")<line_sep>shutil.copyfile(p_new_path brython_path)<block_end><block_end><else_stmt><block_start>print('***' brython_short "same as CPython 3.9" "not in Python 3.10")<block_end><block_end><else_stmt><block_start>p_new_path=p_new_dir+"\\"+prefix+"\\"+filename<if_stmt>os.path.exists(p_new_path)<block_start><if_stmt>filecmp.cmp(brython_path p_new_path shallow=<false>)#print(brython_short, "already changed to Python 3.10")
<block_start><pass><block_end><else_stmt><block_start>print('***' brython_short 'not the same as CPython 3.9')<block_end><block_end><else_stmt><block_start>print('***' brython_short "not in Python 3.10")<block_end><block_end><block_end><else_stmt><block_start>p_new_path=p_new_dir+"\\"+prefix+"\\"+filename<if_stmt>os.path.exists(p_new_path)<block_start>print(ppath "not in CPython 3.9, but present in 3.10")<block_end><else_stmt><block_start>print(ppath "not in CPython 3.9 and 3.10")<block_end><block_end><block_end><block_end>
|
<import_from_stmt>.base_network *<class_stmt>StudentNetwork(BaseNetwork)<block_start><def_stmt>__init__ self in_dim:int out_dim:int width:int <block_start>super(StudentNetwork self).__init__()<line_sep>self.in_dim=in_dim<line_sep>self.out_dim=out_dim<line_sep>self.width=width<line_sep>self.layers=nn.Sequential(nn.Linear(in_dim width) nn.ELU() nn.Linear(width width) nn.ELU() nn.Linear(width out_dim) )<block_end><def_stmt>forward self x:torch.Tensor <arrow>torch.Tensor<block_start><return>self.layers(x)<block_end><block_end>
|
<import_from_future_stmt> nested_scopes generators division absolute_import<import_from_future_stmt> with_statement print_function<import_stmt>os sys inspect<import_from_stmt>io StringIO<line_sep>currentdir=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))<line_sep>parentdir=os.path.dirname(currentdir)<line_sep>sys.path.insert(0 parentdir)<line_sep># here, before importing suitename, is an opportunity to set command line
# options and to redirect output.
# sys.argv.extend(["--noinc", "--chart", "--causes"])
# sys.stdout = open("output.txt", "w")
<import_stmt>suitename suites regression<line_sep># The test data:
#
# The first portion is to test membership in every cluster.
# The second portion is to test triage by out of bounds angle
# for each dihedral angle. The unnecessary part of each residue is damaged so
# the extra suite generated will be incomplete and will vanish.
input1=u''' :1a: : : : : Z: 9999.000: 9999.000: 9999.000: 81.495: 212.250: 288.831: 180.000
:1a: : : : : Z: 294.967: 173.990: 53.550: 81.035: 9999.000: 9999.000: 180.000
:1m: : : : : Z: 9999.000: 9999.000: 9999.000: 83.513: 218.120: 291.593: 180.000
:1m: : : : : Z: 292.247: 222.300: 58.067: 86.093: 9999.000: 9999.000: 180.000
:1L: : : : : Z: 9999.000: 9999.000: 9999.000: 85.664: 245.014: 268.257: 180.000
:1L: : : : : Z: 303.879: 138.164: 61.950: 79.457: 9999.000: 9999.000: 180.000
:&a: : : : : Z: 9999.000: 9999.000: 9999.000: 82.112: 190.682: 264.945: 180.000
:&a: : : : : Z: 295.967: 181.839: 51.455: 81.512: 9999.000: 9999.000: 180.000
:7a: : : : : Z: 9999.000: 9999.000: 9999.000: 83.414: 217.400: 222.006: 180.000
:7a: : : : : Z: 302.856: 160.719: 49.097: 82.444: 9999.000: 9999.000: 180.000
:3a: : : : : Z: 9999.000: 9999.000: 9999.000: 85.072: 216.324: 173.276: 180.000
:3a: : : : : Z: 289.320: 164.132: 45.876: 84.956: 9999.000: 9999.000: 180.000
:9a: : : : : Z: 9999.000: 9999.000: 9999.000: 83.179: 210.347: 121.474: 180.000
:9a: : : : : Z: 288.568: 157.268: 49.347: 81.047: 9999.000: 9999.000: 180.000
:1g: : : : : Z: 9999.000: 9999.000: 9999.000: 80.888: 218.636: 290.735: 180.000
:1g: : : : : Z: 167.447: 159.565: 51.326: 85.213: 9999.000: 9999.000: 180.000
:7d: : : : : Z: 9999.000: 9999.000: 9999.000: 83.856: 238.750: 256.875: 180.000
:7d: : : : : Z: 69.562: 170.200: 52.800: 85.287: 9999.000: 9999.000: 180.000
:3d: : : : : Z: 9999.000: 9999.000: 9999.000: 85.295: 244.085: 203.815: 180.000
:3d: : : : : Z: 65.880: 181.130: 54.680: 86.035: 9999.000: 9999.000: 180.000
:5d: : : : : Z: 9999.000: 9999.000: 9999.000: 79.671: 202.471: 63.064: 180.000
:5d: : : : : Z: 68.164: 143.450: 49.664: 82.757: 9999.000: 9999.000: 180.000
:3g: : : : : Z: 9999.000: 9999.000: 9999.000: 84.000: 195.000: 146.000: 180.000
:3g: : : : : Z: 170.000: 170.000: 52.000: 84.000: 9999.000: 9999.000: 180.000
:1e: : : : : Z: 9999.000: 9999.000: 9999.000: 80.514: 200.545: 280.510: 180.000
:1e: : : : : Z: 249.314: 82.662: 167.890: 85.507: 9999.000: 9999.000: 180.000
:1c: : : : : Z: 9999.000: 9999.000: 9999.000: 80.223: 196.591: 291.299: 180.000
:1c: : : : : Z: 153.060: 194.379: 179.061: 83.648: 9999.000: 9999.000: 180.000
:1f: : : : : Z: 9999.000: 9999.000: 9999.000: 81.395: 203.030: 294.445: 180.000
:1f: : : : : Z: 172.195: 138.540: 175.565: 84.470: 9999.000: 9999.000: 180.000
:5j: : : : : Z: 9999.000: 9999.000: 9999.000: 87.417: 223.558: 80.175: 180.000
:5j: : : : : Z: 66.667: 109.150: 176.475: 83.833: 9999.000: 9999.000: 180.000
:5n: : : : : Z: 9999.000: 9999.000: 9999.000: 86.055: 246.502: 100.392: 180.000
:5n: : : : : Z: 73.595: 213.752: 183.395: 85.483: 9999.000: 9999.000: 180.000
:!!: : : : : Z: 9999.000: 9999.000: 9999.000: 0.000: 0.000: 0.000: 0.000
:!!: : : : : Z: 0.000: 0.000: 0.000: 0.000: 9999.000: 9999.000: 0.000
:1b: : : : : Z: 9999.000: 9999.000: 9999.000: 84.215: 215.014: 288.672: 180.000
:1b: : : : : Z: 300.420: 177.476: 58.307: 144.841: 9999.000: 9999.000: 180.000
:1[: : : : : Z: 9999.000: 9999.000: 9999.000: 82.731: 220.463: 288.665: 180.000
:1[: : : : : Z: 296.983: 221.654: 54.213: 143.771: 9999.000: 9999.000: 180.000
:3b: : : : : Z: 9999.000: 9999.000: 9999.000: 84.700: 226.400: 168.336: 180.000
:3b: : : : : Z: 292.771: 177.629: 48.629: 147.950: 9999.000: 9999.000: 180.000
:1z: : : : : Z: 9999.000: 9999.000: 9999.000: 83.358: 206.042: 277.567: 180.000
:1z: : : : : Z: 195.700: 161.600: 50.750: 145.258: 9999.000: 9999.000: 180.000
:5z: : : : : Z: 9999.000: 9999.000: 9999.000: 82.614: 206.440: 52.524: 180.000
:5z: : : : : Z: 163.669: 148.421: 50.176: 147.590: 9999.000: 9999.000: 180.000
:7p: : : : : Z: 9999.000: 9999.000: 9999.000: 84.285: 236.600: 220.400: 180.000
:7p: : : : : Z: 68.300: 200.122: 53.693: 145.730: 9999.000: 9999.000: 180.000
:5p: : : : : Z: 9999.000: 9999.000: 9999.000: 84.457: 213.286: 69.086: 180.000
:5p: : : : : Z: 75.500: 156.671: 57.486: 147.686: 9999.000: 9999.000: 180.000
:1t: : : : : Z: 9999.000: 9999.000: 9999.000: 81.200: 199.243: 288.986: 180.000
:1t: : : : : Z: 180.286: 194.743: 178.200: 147.386: 9999.000: 9999.000: 180.000
:5q: : : : : Z: 9999.000: 9999.000: 9999.000: 82.133: 204.933: 69.483: 180.000
:5q: : : : : Z: 63.417: 115.233: 176.283: 145.733: 9999.000: 9999.000: 180.000
:1o: : : : : Z: 9999.000: 9999.000: 9999.000: 83.977: 216.508: 287.192: 180.000
:1o: : : : : Z: 297.254: 225.154: 293.738: 150.677: 9999.000: 9999.000: 180.000
:7r: : : : : Z: 9999.000: 9999.000: 9999.000: 84.606: 232.856: 248.125: 180.000
:7r: : : : : Z: 63.269: 181.975: 295.744: 149.744: 9999.000: 9999.000: 180.000
:5r: : : : : Z: 9999.000: 9999.000: 9999.000: 83.000: 196.900: 65.350: 180.000
:5r: : : : : Z: 60.150: 138.425: 292.550: 154.275: 9999.000: 9999.000: 180.000
:2a: : : : : Z: 9999.000: 9999.000: 9999.000: 145.399: 260.339: 288.756: 180.000
:2a: : : : : Z: 288.444: 192.733: 53.097: 84.067: 9999.000: 9999.000: 180.000
:4a: : : : : Z: 9999.000: 9999.000: 9999.000: 146.275: 259.783: 169.958: 180.000
:4a: : : : : Z: 298.450: 169.583: 50.908: 83.967: 9999.000: 9999.000: 180.000
:0a: : : : : Z: 9999.000: 9999.000: 9999.000: 149.286: 223.159: 139.421: 180.000
:0a: : : : : Z: 284.559: 158.107: 47.900: 84.424: 9999.000: 9999.000: 180.000
:#a: : : : : Z: 9999.000: 9999.000: 9999.000: 148.006: 191.944: 146.231: 180.000
:#a: : : : : Z: 289.288: 150.781: 42.419: 84.956: 9999.000: 9999.000: 180.000
:4g: : : : : Z: 9999.000: 9999.000: 9999.000: 148.028: 256.922: 165.194: 180.000
:4g: : : : : Z: 204.961: 165.194: 49.383: 82.983: 9999.000: 9999.000: 180.000
:6g: : : : : Z: 9999.000: 9999.000: 9999.000: 145.337: 262.869: 79.588: 180.000
:6g: : : : : Z: 203.863: 189.688: 58.000: 84.900: 9999.000: 9999.000: 180.000
:8d: : : : : Z: 9999.000: 9999.000: 9999.000: 148.992: 270.596: 240.892: 180.000
:8d: : : : : Z: 62.225: 176.271: 53.600: 87.262: 9999.000: 9999.000: 180.000
:4d: : : : : Z: 9999.000: 9999.000: 9999.000: 149.822: 249.956: 187.678: 180.000
:4d: : : : : Z: 80.433: 198.133: 61.000: 89.378: 9999.000: 9999.000: 180.000
:6d: : : : : Z: 9999.000: 9999.000: 9999.000: 146.922: 241.222: 88.894: 180.000
:6d: : : : : Z: 59.344: 160.683: 52.333: 83.417: 9999.000: 9999.000: 180.000
:2g: : : : : Z: 9999.000: 9999.000: 9999.000: 141.900: 258.383: 286.517: 180.000
:2g: : : : : Z: 178.267: 165.217: 48.350: 84.783: 9999.000: 9999.000: 180.000
:2h: : : : : Z: 9999.000: 9999.000: 9999.000: 147.782: 260.712: 290.424: 180.000
:2h: : : : : Z: 296.200: 177.282: 175.594: 86.565: 9999.000: 9999.000: 180.000
:4n: : : : : Z: 9999.000: 9999.000: 9999.000: 143.722: 227.256: 203.789: 180.000
:4n: : : : : Z: 73.856: 216.733: 194.444: 80.911: 9999.000: 9999.000: 180.000
:0i: : : : : Z: 9999.000: 9999.000: 9999.000: 148.717: 274.683: 100.283: 180.000
:0i: : : : : Z: 80.600: 248.133: 181.817: 82.600: 9999.000: 9999.000: 180.000
:6n: : : : : Z: 9999.000: 9999.000: 9999.000: 150.311: 268.383: 84.972: 180.000
:6n: : : : : Z: 63.811: 191.483: 176.644: 85.600: 9999.000: 9999.000: 180.000
:6j: : : : : Z: 9999.000: 9999.000: 9999.000: 141.633: 244.100: 66.056: 180.000
:6j: : : : : Z: 71.667: 122.167: 182.200: 83.622: 9999.000: 9999.000: 180.000
:0k: : : : : Z: 9999.000: 9999.000: 9999.000: 149.070: 249.780: 111.520: 180.000
:0k: : : : : Z: 278.370: 207.780: 287.820: 86.650: 9999.000: 9999.000: 180.000
:2[: : : : : Z: 9999.000: 9999.000: 9999.000: 146.383: 259.402: 291.275: 180.000
:2[: : : : : Z: 291.982: 210.048: 54.412: 147.760: 9999.000: 9999.000: 180.000
:4b: : : : : Z: 9999.000: 9999.000: 9999.000: 145.256: 244.622: 162.822: 180.000
:4b: : : : : Z: 294.159: 171.630: 45.900: 145.804: 9999.000: 9999.000: 180.000
:0b: : : : : Z: 9999.000: 9999.000: 9999.000: 147.593: 248.421: 112.086: 180.000
:0b: : : : : Z: 274.943: 164.764: 56.843: 146.264: 9999.000: 9999.000: 180.000
:4p: : : : : Z: 9999.000: 9999.000: 9999.000: 150.077: 260.246: 213.785: 180.000
:4p: : : : : Z: 71.900: 207.638: 56.715: 148.131: 9999.000: 9999.000: 180.000
:6p: : : : : Z: 9999.000: 9999.000: 9999.000: 146.415: 257.831: 89.597: 180.000
:6p: : : : : Z: 67.923: 173.051: 55.513: 147.623: 9999.000: 9999.000: 180.000
:2z: : : : : Z: 9999.000: 9999.000: 9999.000: 142.900: 236.550: 268.800: 180.000
:2z: : : : : Z: 180.783: 185.133: 54.467: 143.350: 9999.000: 9999.000: 180.000
:4s: : : : : Z: 9999.000: 9999.000: 9999.000: 149.863: 247.562: 170.488: 180.000
:4s: : : : : Z: 277.938: 84.425: 176.413: 148.087: 9999.000: 9999.000: 180.000
:2u: : : : : Z: 9999.000: 9999.000: 9999.000: 143.940: 258.200: 298.240: 180.000
:2u: : : : : Z: 279.640: 183.680: 183.080: 145.120: 9999.000: 9999.000: 180.000
:2o: : : : : Z: 9999.000: 9999.000: 9999.000: 147.342: 256.475: 295.508: 180.000
:2o: : : : : Z: 287.408: 194.525: 293.725: 150.458: 9999.000: 9999.000: 180.000
:epsilon: : : : : Z: 294.967: 173.990: 53.550: 81.495: 154.000: 288.831: 180.000
:epsilon: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:alpha: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:alpha: : : : : Z: 24.000: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:beta: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:beta: : : : : Z: 294.967: 49.000: 53.550: 81.495: 212.250: 288.831: 180.000
:zeta: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 24.000: 180.000
:zeta: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:delta-1: : : : : Z: 294.967: 173.990: 53.550: 59.000: 212.250: 288.831: 180.000
:delta-1: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:gamma: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:gamma: : : : : Z: 294.967: 173.990: 139.000: 81.495: 212.250: 288.831: 180.000
:delta: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:delta: : : : : Z: 294.967: 173.990: 53.550: 59.000: 212.250: 288.831: 180.000
'''<line_sep># A manually selected group of test cases from real files
# designed to test each code path through membership(). The first residue from
# each pair has intentionally been damaged so that it will not produce separate
# output from a report. We use the causes option to illustrate what code path
# is being used.
input2=u'''2xLk:1: C: 11: : : G:__?__:__?__:__?__:81.132:-127.583:-70.677
2xLk:1: C: 12: : : U:169.008:153.891:51.391:80.277:-135.347:-70.614
3cgp:1: B: 19: : : U:__?__:__?__:__?__:82.839:-147.528:-179.087
3cgp:1: B: 20: : : A:139.983:-154.445:63.134:88.055:-145.599:70.874
4pco:1: B: 3: : : U:__?__:__?__:__?__:77.659:-165.227:-68.525
4pco:1: B: 4: : : G:151.914:-179.903:176.058:83.039:-148.171:-66.728
5b2q:1: B: 62: : : G:__?__:__?__:__?__:83.537:-131.816:-116.417
5b2q:1: B: 63: : : U:-69.320:-146.615:47.107:147.038:-148.815:45.665
6gc5:1: F: 2: : : U:__?__:__?__:__?__:144.610:-116.227:152.694
6gc5:1: F: 3: : : U:-66.167:162.580:41.697:145.644:-122.673:127.881
3bns:1: A: 21: : : C:__?__:__?__:__?__:76.224:-166.174:-73.594
3bns:1: A: 22: : : G:150.784:-158.788:175.706:87.605:-146.172:-63.516
3gm7:1: H: 5: : : U:__?__:__?__:__?__:68.910:-153.989:-56.381
3gm7:1: H: 6: : : G:-105.747:164.057:92.120:74.597:-150.523:-79.724
6qit:1: A: 2: : : C:__?__:__?__:__?__:82.169:-138.695:-63.417
6qit:1: A: 3: : : A:-71.504:-131.618:54.061:144.409:-95.827:-140.754
3rer:1: K: 7: : : U:__?__:__?__:__?__:87.510:-99.276:-118.108
3rer:1: K: 8: : : A:-66.924:-158.118:48.287:81.250:__?__:__?__
3diL:1: A: 59: : : C:__?__:__?__:__?__:80.668:-145.667:-36.026
3diL:1: A: 60: : : G:-143.441:115.188:149.951:86.379:-141.567:-69.901
5ho4:1: B: 3: : : G:__?__:__?__:__?__:160.213:-123.685:-174.677
5ho4:1: B: 4: : : G:-107.676:163.883:39.081:85.911:-157.392:-71.638
4mcf:1: E: 4: : : U:__?__:__?__:__?__:78.239:-156.881:-70.399
4mcf:1: E: 5: : : G:-91.794:163.594:87.552:70.675:-141.886:-72.556
3pdr:1: A: 59: : : C:__?__:__?__:__?__:80.441:-149.674:-76.690
3pdr:1: A: 60: : : A:-62.415:171.383:47.537:79.461:-145.680:-71.359
3gm7:1: G: 1: : : C:__?__:__?__:__?__:84.065:-128.784:-61.905
3gm7:1: G: 2: : : U:-76.914:-166.398:55.279:74.218:-157.766:-64.720
6h0r:1: B: 15: : : U:__?__:__?__:__?__:83.971:-122.349:-103.636
6h0r:1: B: 16: : : U:-30.804:145.657:33.314:81.109:-141.719:-75.527
2zko:1: C: 13: : : G:__?__:__?__:__?__:76.629:-150.027:-67.298
2zko:1: C: 14: : : C:-70.016:164.567:71.735:76.499:-160.106:-73.474
3pdr:1: X: 138: : : U:__?__:__?__:__?__:77.324:-177.192:-105.412
3pdr:1: X: 139: : : A:-46.950:179.570:49.599:71.442:-143.233:-61.461
4jah:1: B: 10: : : U:__?__:__?__:__?__:85.890:-164.804:-95.055
4jah:1: B: 11: : : G:-64.134:178.767:49.773:77.067:-152.496:-70.128
3diL:1: A: 13: : : C:__?__:__?__:__?__:135.303:-125.074:-69.725
3diL:1: A: 14: : : G:75.452:147.741:32.719:83.048:-146.012:-75.223
3pdr:1: X: 132: : : U:__?__:__?__:__?__:77.469:-157.795:-115.458
3pdr:1: X: 133: : : U:47.309:136.943:-25.259:83.460:-150.210:-61.763
'''<line_sep>output1='''
:1a: : : : : Z 33 p 1a 1.000
:1m: : : : : Z 33 p 1m 1.000
:1L: : : : : Z 33 p 1L 1.000
:&a: : : : : Z 33 p &a 1.000
:7a: : : : : Z 33 p 7a 1.000
:3a: : : : : Z 33 p 3a 1.000
:9a: : : : : Z 33 p 9a 1.000
:1g: : : : : Z 33 p 1g 1.000
:7d: : : : : Z 33 p 7d 1.000
:3d: : : : : Z 33 p 3d 1.000
:5d: : : : : Z 33 p 5d 1.000
:3g: : : : : Z 33 p 3g 1.000 wannabe
:1e: : : : : Z 33 t 1e 1.000
:1c: : : : : Z 33 t 1c 1.000
:1f: : : : : Z 33 t 1f 1.000
:5j: : : : : Z 33 t 5j 1.000
:5n: : : : : Z 33 t 5n 1.000 wannabe
:!!: : : : : Z trig !! 0.000 epsilon-1
:1b: : : : : Z 32 p 1b 1.000
:1[: : : : : Z 32 p 1[ 1.000
:3b: : : : : Z 32 p 3b 1.000
:1z: : : : : Z 32 p 1z 1.000
:5z: : : : : Z 32 p 5z 1.000
:7p: : : : : Z 32 p 7p 1.000
:5p: : : : : Z 32 p 5p 1.000 wannabe
:1t: : : : : Z 32 t 1t 1.000
:5q: : : : : Z 32 t 5q 1.000
:1o: : : : : Z 32 m 1o 1.000
:7r: : : : : Z 32 m 7r 1.000
:5r: : : : : Z 32 m 5r 1.000 wannabe
:2a: : : : : Z 23 p 2a 1.000
:4a: : : : : Z 23 p 4a 1.000
:0a: : : : : Z 23 p 0a 1.000
:#a: : : : : Z 23 p #a 1.000
:4g: : : : : Z 23 p 4g 1.000
:6g: : : : : Z 23 p 6g 1.000
:8d: : : : : Z 23 p 8d 1.000
:4d: : : : : Z 23 p 4d 1.000
:6d: : : : : Z 23 p 6d 1.000
:2g: : : : : Z 23 p 2g 1.000 wannabe
:2h: : : : : Z 23 t 2h 1.000
:4n: : : : : Z 23 t 4n 1.000
:0i: : : : : Z 23 t 0i 1.000
:6n: : : : : Z 23 t 6n 1.000
:6j: : : : : Z 23 t 6j 1.000
:0k: : : : : Z 23 m 0k 1.000 wannabe
:2[: : : : : Z 22 p 2[ 1.000
:4b: : : : : Z 22 p 4b 1.000
:0b: : : : : Z 22 p 0b 1.000
:4p: : : : : Z 22 p 4p 1.000
:6p: : : : : Z 22 p 6p 1.000
:2z: : : : : Z 22 p 2z 1.000 wannabe
:4s: : : : : Z 22 t 4s 1.000
:2u: : : : : Z 22 t 2u 1.000 wannabe
:2o: : : : : Z 22 m 2o 1.000
:epsilon: : : : : Z trig !! 0.000 epsilon-1
:alpha: : : : : Z 33 p 1a 0.999
:alpha: : : : : Z trig !! 0.000 alpha
:beta: : : : : Z 33 p 1a 0.999
:beta: : : : : Z trig !! 0.000 beta
:zeta: : : : : Z 33 p 1a 0.999
:zeta: : : : : Z trig !! 0.000 zeta-1
:delta-1: : : : : Z trig !! 0.000 delta
:delta-1: : : : : Z trig !! 0.000 delta-1
:gamma: : : : : Z 33 p 1a 0.999
:gamma: : : : : Z trig !! 0.000 gamma
:delta: : : : : Z 33 p 1a 0.999
:delta: : : : : Z trig !! 0.000 delta
'''<line_sep>output2='''2xLk:1: C: 12: : : U 33 p 1g 0.839 1-only-one
3cgp:1: B: 20: : : A 33 p 3g 0.040 1-only-one wannabe
4pco:1: B: 4: : : G 33 t 1c 0.890 2-BETWEEN-dom-sat( 0.22| 0.913)
5b2q:1: B: 63: : : U 32 p 1[ 0.072 2-BETWEEN-dom-sat( 0.941| 0.829)
6gc5:1: F: 3: : : U 22 p 4b 0.889 2-None-dom
3bns:1: A: 22: : : G 33 t 1c 0.901 2-OUTSIDE-dom
3gm7:1: H: 6: : : G 33 p !! 0.000 7D dist 1a
6qit:1: A: 3: : : A 32 p 1[ 0.899 2-OUTSIDE-sat
3rer:1: K: 8: : : A 33 p 7a 0.047 2-None-dom
3diL:1: A: 60: : : G 33 t !! 0.000 7D dist 1e
5ho4:1: B: 4: : : G 23 p !! 0.000 7D dist 4a
4mcf:1: E: 5: : : G 33 p !! 0.000 7D dist 1a
3pdr:1: A: 60: : : A 33 p 1a 0.916 4-BETWEEN-dom-sat( 0.1| 1.2)
3gm7:1: G: 2: : : U 33 p 1a 0.589 4-BETWEEN-dom-sat( 0.428| 0.904)
6h0r:1: B: 16: : : U 33 p 1L 0.033 4-BETWEEN-dom-sat( 0.862| 0.655)
2zko:1: C: 14: : : C 33 p 1a 0.444 4-OUTSIDE-dom
3pdr:1: X: 139: : : A 33 p &a 0.555 4-OUTSIDE-sat
4jah:1: B: 11: : : G 33 p &a 0.912 5-BETWEEN-dom-sat( 0.442| 0.226)
3diL:1: A: 14: : : G 23 p !! 0.000 outlier distance 1.01
3pdr:1: X: 133: : : U 33 m !! 0.000 vacant bin
'''<def_stmt>test input canonicalOutput options identity<block_start>opt=suites.parseOptions(options)<line_sep>stream=StringIO(input)<line_sep>outFile=StringIO()<line_sep>suitename.clearStats()<line_sep>suitename.main(stream outFile=outFile optionsIn=opt)<line_sep>suitename.clearStats()<line_sep>output=outFile.getvalue()<assert_stmt>output.strip()<eq>canonicalOutput.strip() identity<block_end><def_stmt>testAll <block_start>test(input1 output1 "chart=true noinc=true" "cluster and triage test")<line_sep>test(input2 output2 "chart=true noinc=true causes=true" "code paths test")<line_sep>test(regression.in_1ehz regression.out_1ehz "" "1ehz regression test")<block_end># Not normally used, but useful for diagnosing failures
<def_stmt>testVerbose input canonicalOutput options identity<block_start>opt=suites.parseOptions(options)<line_sep>stream=StringIO(input)<line_sep>outFile=StringIO("")<line_sep>suitename.clearStats()<line_sep>suitename.main(stream outFile=outFile optionsIn=opt)<line_sep>output=outFile.getvalue()<if_stmt>output.strip()<eq>canonicalOutput.strip()<block_start>result=<true><line_sep>sys.stderr.write("Success\n")<block_end><else_stmt><block_start>result=<false><line_sep>sys.stderr.write("Failed\n")<line_sep>sys.stderr.write("========================================\n")<line_sep>sys.stderr.write(canonicalOutput.strip())<line_sep>sys.stderr.write("\n\n=========================================\n")<line_sep>sys.stderr.write(output.strip())<line_sep>out2=open("UnitTest-output.txt" "w")<line_sep>out2.write(output.strip())<line_sep>out2.close()<line_sep><return>result<block_end><block_end>testAll()<line_sep>
|
<import_from_stmt>mdecr2dec R2decService<import_from_stmt>mdecbase mdec_main<if_stmt>__name__<eq>'__main__'<block_start>mdec_main(R2decService)<block_end>
|
<import_from_stmt>asciimatics.effects Effect<import_from_stmt>asciimatics.exceptions StopApplication NextScene<class_stmt>MockEffect(Effect)<block_start>"""
Dummy Effect use for some UTs.
"""<def_stmt>__init__ self count=10 stop=<true> swallow=<false> next_scene=<none> frame_rate=1 stop_frame=5 **kwargs<block_start>"""
:param count: When to stop effect
:param stop: Whether to stop the application or skip to next scene.
:param swallow: Whether to swallow any events or not.
:param next_scene: The next scene to move to (if stop=False)
:param frame_rate: The frame rate for updates.
"""<line_sep>super(MockEffect self).__init__(<none> **kwargs)<line_sep>self.stop_called=<false><line_sep>self.reset_called=<false><line_sep>self.event_called=<false><line_sep>self.save_called=<false><line_sep>self.update_called=<false><line_sep>self._count=count<line_sep>self._stop=stop<line_sep>self._swallow=swallow<line_sep>self._next_scene=next_scene<line_sep>self._frame_rate=frame_rate<line_sep># Ugly hack to stop clash with underlying Effect definition. Sorry.
self._my_stop_frame=stop_frame<block_end>@property<def_stmt>stop_frame self<block_start>self.stop_called=<true><line_sep><return>self._my_stop_frame<block_end>@property<def_stmt>frame_update_count self<block_start><return>self._frame_rate<block_end><def_stmt>_update self frame_no<block_start>self.update_called=<true><line_sep>self._count<augsub>1<if_stmt>self._count<le>0<block_start><if_stmt>self._stop<block_start><raise>StopApplication("End of test")<block_end><else_stmt><block_start><raise>NextScene(self._next_scene)<block_end><block_end><block_end><def_stmt>reset self<block_start>self.reset_called=<true><block_end><def_stmt>process_event self event<block_start>self.event_called=<true><line_sep><return><none><if>self._swallow<else>event<block_end><def_stmt>save self<block_start>self.save_called=<true><block_end><block_end>
|
"""Constants for EQ3 Bluetooth Smart Radiator Valves."""<line_sep>PRESET_PERMANENT_HOLD="permanent_hold"<line_sep>PRESET_NO_HOLD="no_hold"<line_sep>PRESET_OPEN="open"<line_sep>PRESET_CLOSED="closed"<line_sep>
|
"""add generic id support
Revision ID: 3bc50ecd0bb
Revises: <PASSWORD>
Create Date: 2015-10-28 19:25:26.378971
"""<line_sep># chunk size to process tv/movies
PROCESS_CHUNK_SIZE=5000<line_sep># revision identifiers, used by Alembic.
revision='<KEY>'<line_sep>down_revision='30688404cda'<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<import_from_stmt>sqlalchemy.schema Sequence CreateSequence MetaData<import_stmt>config<line_sep>meta=MetaData()<def_stmt>upgrade ### commands auto generated by Alembic - please adjust! ###
<block_start>dbid=op.create_table('dbids' sa.Column('id' sa.BigInteger() nullable=<false>) sa.Column('db_id' sa.String(length=50) nullable=<true>) sa.Column('db' sa.Enum('TVRAGE' 'TVMAZE' 'OMDB' 'IMDB' name='enum_dbid_name') nullable=<true>) sa.Column('tvshow_id' sa.Integer() nullable=<true>) sa.Column('movie_id' sa.Integer() nullable=<true>) sa.PrimaryKeyConstraint('id') mysql_charset='utf8' mysql_engine='InnoDB' mysql_row_format='DYNAMIC')<line_sep>op.create_index('idx_db_id_db' 'dbids' ['db_id' 'db'] unique=<false>)<line_sep>op.create_index(op.f('ix_dbids_movie_id') 'dbids' ['movie_id'] unique=<false>)<line_sep>op.create_index(op.f('ix_dbids_tvshow_id') 'dbids' ['tvshow_id'] unique=<false>)<line_sep>bind=op.get_bind()<line_sep>i=0<line_sep>releases=sa.Table('releases' meta autoload=<true> autoload_with=bind)<line_sep>movies=sa.Table('movies' meta autoload=<true> autoload_with=bind)<line_sep>tvshows=sa.Table('tvshows' meta autoload=<true> autoload_with=bind)<line_sep>episodes=sa.Table('episodes' meta autoload=<true> autoload_with=bind)<line_sep>op.drop_constraint('releases_movie_id_fkey' 'releases')<line_sep>op.drop_constraint('releases_tvshow_id_fkey' 'releases')<line_sep>op.drop_constraint('episodes_tvshow_id_fkey' 'episodes')<line_sep>print('Starting ID conversion.')<for_stmt>show bind.execute(tvshows.select().order_by(tvshows.c.id))# Small chance that the new id might conflict with an existing
# id. If so just increment and try again.
<block_start>new_id_ok=<false><while_stmt><not>new_id_ok<block_start><if_stmt>bind.execute(tvshows.select(tvshows.c.id<eq>i)).first()<block_start>print('Found dupe id, incrementing new id')<line_sep>i<augadd>1<block_end><else_stmt><block_start>new_id_ok=<true><block_end><block_end><try_stmt><block_start>print('TVRAGE: {} ({}) -> {}'.format(show[tvshows.c.name] show[tvshows.c.id] i))<block_end><except_stmt># it's just for show, it doesn't matter
<block_start><pass><block_end>bind.execute(dbid.insert().values(id=i db='TVRAGE' db_id=show[tvshows.c.id] tvshow_id=i))<line_sep>bind.execute(releases.update().where(releases.c.tvshow_id<eq>show[tvshows.c.id]).values(tvshow_id=i))<line_sep>bind.execute(episodes.update().where(episodes.c.tvshow_id<eq>show[tvshows.c.id]).values(tvshow_id=i))<line_sep>bind.execute(tvshows.update().where(tvshows.c.id<eq>show[tvshows.c.id]).values(id=i))<line_sep>i<augadd>1<block_end><for_stmt>movie bind.execute(movies.select().order_by(movies.c.id))# Small chance that the new id might conflict with an existing
# id. If so just increment and try again.
<block_start>new_id_ok=<false><while_stmt><not>new_id_ok# movies.id is a character string
<block_start><if_stmt>bind.execute(movies.select(movies.c.id<eq>str(i))).first()<block_start>print('Found dupe id, incrementing new id')<line_sep>i<augadd>1<block_end><else_stmt><block_start>new_id_ok=<true><block_end><block_end><try_stmt><block_start>print('IMDB: {} ({}) -> {}'.format(movie[movies.c.name] movie[movies.c.id] i))<block_end><except_stmt><block_start><pass><block_end>bind.execute(dbid.insert().values(id=i db='IMDB' db_id='tt{}'.format(movie[movies.c.id]) movie_id=i))<line_sep>bind.execute(releases.update().where(releases.c.movie_id<eq>movie[movies.c.id]).values(movie_id=i))<line_sep>bind.execute(movies.update().where(movies.c.id<eq>movie[movies.c.id]).values(id=i))<line_sep>i<augadd>1<block_end>bind.execute(CreateSequence(Sequence('movies_id_seq' start=i)))<if_stmt>config.db.get('engine')<eq>'postgresql'<block_start>bind.execute('ALTER TABLE movies ALTER COLUMN id TYPE INTEGER USING id::integer')<line_sep>bind.execute('ALTER TABLE releases ALTER COLUMN movie_id TYPE INTEGER USING movie_id::integer')<block_end><else_stmt><block_start>op.alter_column('movies' 'id' existing_type=sa.VARCHAR(length=20) type_=sa.Integer() existing_nullable=<false> server_default=sa.text('nextval(\'movies_id_seq\'::regclass)'))<line_sep>op.alter_column('releases' 'movie_id' existing_type=sa.VARCHAR(length=20) type_=sa.Integer() existing_nullable=<false>)<block_end>op.create_foreign_key('releases_movie_id_fkey' 'releases' 'movies' ['movie_id'] ['id'])<line_sep>op.create_foreign_key('releases_tvshow_id_fkey' 'releases' 'tvshows' ['tvshow_id'] ['id'])<line_sep>op.create_foreign_key('episodes_tvshow_id_fkey' 'episodes' 'tvshows' ['tvshow_id'] ['id'])<line_sep>op.create_foreign_key('dbids_tvshow_id_fkey' 'dbids' 'tvshows' ['tvshow_id'] ['id'])<line_sep>op.create_foreign_key('dbids_movie_id_fkey' 'dbids' 'movies' ['movie_id'] ['id'])<line_sep>bind.execute("select setval('dbids_id_seq', (select max(id) from dbids));")<line_sep>bind.execute("select setval('tvshows_id_seq', (select max(id) from tvshows));")<line_sep>bind.execute("select setval('movies_id_seq', (select max(id) from movies));")<line_sep>### end Alembic commands ###
<block_end><def_stmt>downgrade ### commands auto generated by Alembic - please adjust! ###
<block_start>op.alter_column('movies' 'id' existing_type=sa.Integer() type_=sa.VARCHAR(length=20) existing_nullable=<false>)<line_sep>op.drop_index(op.f('ix_dbids_tvshow_id') table_name='dbids')<line_sep>op.drop_index(op.f('ix_dbids_movie_id') table_name='dbids')<line_sep>op.drop_index('idx_db_id_db' table_name='dbids')<line_sep>op.drop_table('dbids')<line_sep>### end Alembic commands ###
<block_end>
|
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2015 the Critic contributors, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
<import_stmt>api<class_stmt>AccessControlProfileError(api.APIError)<block_start>"""Base exception for all errors related to the AccessControlProfile
class"""<line_sep><pass><block_end><class_stmt>InvalidAccessControlProfileId(AccessControlProfileError)<block_start>"""Raised when an invalid access control profile id is used"""<def_stmt>__init__ self value<block_start>"""Constructor"""<line_sep>super(InvalidAccessControlProfileId self).__init__("Invalid access control profile id: %d"%value)<line_sep>self.value=value<block_end><block_end><class_stmt>AccessControlProfile(api.APIObject)<block_start>"""Representation of a an access control profile"""<line_sep>RULE_VALUES=frozenset(["allow" "deny"])<line_sep>@property<def_stmt>id self<block_start>"""The profile's unique id"""<line_sep><return>self._impl.id<block_end>@property<def_stmt>title self<block_start>"""The profile's title, or None"""<line_sep><return>self._impl.title<block_end>@property<def_stmt>access_token self<block_start>"""The access token that owns this profile, or None"""<line_sep><return>self._impl.getAccessToken(self.critic)<block_end><class_stmt>Category(object)<block_start>"""Representation of an access control category
Each category is controlled by a rule ("allow" or "deny") and a list
of exceptions (possibly empty). The effective policy is the rule,
unless an exception applies, in which case it's the opposite of the
rule."""<def_stmt>__init__ self rule exceptions<block_start>self.rule=rule<line_sep>self.exceptions=exceptions<block_end><block_end><class_stmt>HTTPException(object)<block_start>"""Representation of an exception for the "http" category
The exception consists of the HTTP request method and a regular
expression that must match the entire request path."""<line_sep>REQUEST_METHODS=frozenset(["GET" "HEAD" "OPTIONS" "POST" "PUT" "DELETE"])<def_stmt>__init__ self exception_id request_method path_pattern<block_start>self.id=exception_id<line_sep>self.request_method=request_method<line_sep>self.path_pattern=path_pattern<block_end><block_end>@property<def_stmt>http self<block_start>"""Access control category "http"
This category controls web frontend requests.
Exceptions are of the type HTTPException."""<line_sep><return>self._impl.getHTTP(self.critic)<block_end><class_stmt>RepositoryException(object)<block_start>"""Representation of an exception for the "repositories" category
The exception consists of the access type ("read" or "modify") and the
repository."""<line_sep>ACCESS_TYPES=frozenset(["read" "modify"])<def_stmt>__init__ self exception_id access_type repository<block_start>self.id=exception_id<line_sep>self.access_type=access_type<line_sep>self.repository=repository<block_end><block_end>@property<def_stmt>repositories self<block_start>"""Access control category "repositories"
This category controls access to Git repositories, both via the web
frontend and the Git hook. Note that read-only Git access over SSH
is not controlled by access control.
Exceptions are of the type RepositoryException."""<line_sep><return>self._impl.getRepositories(self.critic)<block_end><class_stmt>ExtensionException(object)<block_start>"""Representation of an exception for the "extensions" category
The exception consists of the access type ("install" or "execute")
and the extension."""<line_sep>ACCESS_TYPES=frozenset(["install" "execute"])<def_stmt>__init__ self exception_id access_type extension<block_start>self.id=exception_id<line_sep>self.access_type=access_type<line_sep>self.extension=extension<block_end><block_end>@property<def_stmt>extensions self<block_start>"""Access control category "extensions"
This category controls access to any functionality provided by an
extension.
Exceptions are of the type ExtensionException."""<line_sep><return>self._impl.getExtensions(self.critic)<block_end><block_end><def_stmt>fetch critic profile_id<block_start>"""Fetch an AccessControlProfile object with the given profile id"""<import_stmt>api.impl<assert_stmt>isinstance(critic api.critic.Critic)<line_sep><return>api.impl.accesscontrolprofile.fetch(critic int(profile_id))<block_end><def_stmt>fetchAll critic title=<none><block_start>"""Fetch AccessControlProfile objects for all primary profiles in the system
A profile is primary if it is not the additional restrictions imposed for
accesses authenticated with an access token.
If |title| is not None, fetch only profiles with a matching title."""<import_stmt>api.impl<assert_stmt>isinstance(critic api.critic.Critic)<if_stmt>title<is><not><none><block_start>title=str(title)<block_end><return>api.impl.accesscontrolprofile.fetchAll(critic title)<block_end>
|
"""This problem was asked by Google.
You are writing an AI for a 2D map game. You are somewhere in a 2D grid,
and there are coins strewn about over the map.
Given the position of all the coins and your current position,
find the closest coin to you in terms of Manhattan distance.
That is, you can move around up, down, left, and right, but not diagonally.
If there are multiple possible closest coins, return any of them.
For example, given the following map, where you are x, coins are o,
and empty spaces are . (top left is 0, 0):
---------------------
| . | . | x | . | o |
---------------------
| o | . | . | . | . |
---------------------
| o | . | . | . | o |
---------------------
| . | . | o | . | . |
---------------------
return (0, 4), since that coin is closest.
This map would be represented in our question as:
Our position: (0, 2)
Coins: [(0, 4), (1, 0), (2, 0), (3, 2)]
"""<line_sep>
|
<import_from_stmt>utils device_create<import_from_stmt>utils namecreater<import_from_stmt>datetime datetime<import_stmt>random<line_sep>db=device_create.get_creator("172.16.58.3" "bitkyTest")<line_sep>device=db.Device<line_sep>employee=db.Employee<line_sep>device.drop()<line_sep>employee.drop()<line_sep># 生成并插入 device 集合
result=device.insert_many([{'GroupId':group_id 'DeviceId':device_id 'ChargeStatus':0 'WorkStatus':0 'ChargeStatusTime':datetime.utcnow() 'WorkStatusTime':datetime.utcnow() 'RemainChargeTime':500 'CardNumber':hex(random.randint(1 0xFFFFFFFF))[2:]}<for>group_id range(1 101)<for>device_id range(1 101)])<line_sep># 从数据库中获取到 device 集合
device_list=[device.find_one({'_id':device_id})<for>device_id result.inserted_ids]<line_sep># 插入完整的 employee 并更新为完整的 device
<for_stmt>device_item device_list<block_start>employee_item=namecreater.random_employee_from_device(device_item)<line_sep>employee_item_result=employee.insert_one(employee_item)<line_sep>device.update_one(device_item {'$set':{'EmployeeObjectId':str(employee_item_result.inserted_id)}})<block_end>
|
<import_stmt>numpy<as>np<def_stmt>r_neighbors kdtree r<block_start>""" Get indices of all neartest neighbors with a distance < r for each point
Parameters
----------
kdtree: pyntcloud.structrues.KDTree
The KDTree built on top of the points in point cloud
r: float
Maximum distance to consider a neighbor
Returns
-------
r_neighbors: (N, X) ndarray of lists
Where N = kdtree.data.shape[0]
len(X) varies for each point
"""<line_sep><return>np.array(kdtree.query_ball_tree(kdtree r))<block_end>
|
<import_from_stmt>pyemitter Emitter<class_stmt>ExceptionSource(object)<block_start>APSW='apsw'<line_sep>Peewee='peewee'<block_end><class_stmt>Manager(Emitter)<block_start><def_stmt>add self source exc_info name=<none><block_start><if_stmt><not>exc_info<or>len(exc_info)<ne>3<block_start><raise>ValueError('Invalid value provided for the "exc_info" parameter')<block_end># Retrieve error message
message=self._get_message(exc_info[1] name)<line_sep># Emit event
self.emit('exception' source message exc_info)<block_end><def_stmt>_get_message self exception name=<none><block_start><if_stmt>name<block_start>name_cap=name.capitalize()<block_end><else_stmt><block_start>name='<unknown>'<line_sep>name_cap='<unknown>'<block_end># Retrieve exception message
ex_message=self._clean_exception_message(exception exception.message)<line_sep># Map exception to a more helpful message
key='%s.%s'%(type(exception).__module__ type(exception).__name__)<if_stmt>key<eq>'exceptions.ImportError'<block_start><return>'Unable to import the %s library (%s)'%(name ex_message)<block_end><if_stmt>key<eq>'apsw.CantOpenError'<block_start><return>'Unable to open %s (%s)'%(name ex_message)<block_end><if_stmt>key<eq>'apsw.CorruptError'<block_start><return>'%s is corrupt (%s)'%(name_cap ex_message)<block_end><if_stmt>key<eq>'apsw.FullError'<block_start><return>'Drive containing the %s is full (%s)'%(name ex_message)<block_end><if_stmt>key<eq>'apsw.IOError'<block_start><return>'%s raised an input/output error (%s)'%(name_cap ex_message)<block_end><if_stmt>key<eq>'apsw.NotADBError'<block_start><return>'%s doesn\'t have a valid SQLite header (%s)'%(name_cap ex_message)<block_end><if_stmt>key<eq>'apsw.PermissionsError'<block_start><return>'Access denied to the %s (%s)'%(name ex_message)<block_end><if_stmt>key<eq>'apsw.ReadOnlyError'<block_start><return>'Unable to write to the %s (%s)'%(name ex_message)<block_end># Unknown exception
<return>'<%s> (%s)'%(key ex_message)<block_end>@classmethod<def_stmt>_clean_exception_message cls ex message<block_start><if_stmt><not>message<block_start><return>message<block_end># ImportError
<if_stmt>isinstance(ex ImportError)<and>':'<in>message<and>(message.startswith('/')<or>message.startswith('./'))# Strip path from message (if it looks like a path)
<block_start><return>message[message.index(':')+1:].strip().capitalize()<block_end># Strip exception type prefix from message
<return>message.lstrip(type(ex).__name__+':').strip()<block_end><block_end># Construct default manager
ExceptionWrapper=Manager()<line_sep>
|
"""
Firewall check for Idiot.
"""<import_stmt>biplist<import_stmt>idiot<import_from_stmt>idiot CheckPlugin<class_stmt>FirewallCheck(CheckPlugin)<block_start>name="Firewall"<def_stmt>run self<block_start><try_stmt><block_start>d=biplist.readPlist('/Library/Preferences/com.apple.alf.plist')<line_sep>enabled=(d['globalstate']<ge>1)<block_end><except_stmt><block_start><return>(<false> "failed to read firewall config plist")<block_end><return>(enabled "{}".format("enabled"<if>enabled<else>"disabled"))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>print(FirewallCheck().run())<block_end>
|
"""Add Created and Updated date to Notebook
Revision ID: 5d3c326dd901
Revises: <PASSWORD>
Create Date: 2016-08-29 10:39:23.609605
"""<line_sep># revision identifiers, used by Alembic.
revision='5d3c326dd901'<line_sep>down_revision='<PASSWORD>'<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<def_stmt>upgrade ### commands auto generated by Alembic - please adjust! ###
<block_start>op.add_column('notebooks' sa.Column('created_date' sa.DateTime() nullable=<true>))<line_sep>op.add_column('notebooks' sa.Column('updated_date' sa.DateTime() nullable=<true>))<line_sep>### end Alembic commands ###
<block_end><def_stmt>downgrade ### commands auto generated by Alembic - please adjust! ###
<block_start>op.drop_column('notebooks' 'updated_date')<line_sep>op.drop_column('notebooks' 'created_date')<line_sep>### end Alembic commands ###
<block_end>
|
<import_from_stmt>.embed *<import_from_stmt>.embed_base *<import_from_stmt>.embed_core *<line_sep>__all__=(*embed.__all__ *embed_base.__all__ *embed_core.__all__ )<line_sep>
|
# -*-coding:utf-8-*-
<import_from_stmt>scrapy signals<import_from_stmt>scrapy.downloadermiddlewares.useragent UserAgentMiddleware<import_stmt>random<class_stmt>RandomUserAgentMiddleware(UserAgentMiddleware)<block_start><def_stmt>__init__ self user_agent='Scrapy' user_agent_list=<none><block_start>super(RandomUserAgentMiddleware self).__init__(user_agent=user_agent)<line_sep>self.user_agent_list=user_agent_list<block_end>@classmethod<def_stmt>from_crawler cls crawler<block_start>user_agent_list=crawler.settings.get('USER_AGENT_LIST' <none>)<if_stmt><not>user_agent_list<block_start>user_agent_file=crawler.settings.get('USER_AGENT_FILE' <none>)<if_stmt>user_agent_file<block_start><with_stmt>open(user_agent_file)<as>fr<block_start>user_agent_list=fr.readlines()<block_end><block_end><block_end>o=cls(crawler.settings['USER_AGENT'] user_agent_list)<line_sep>crawler.signals.connect(o.spider_opened signal=signals.spider_opened)<line_sep><return>o<block_end><def_stmt>process_request self request spider<block_start><if_stmt>isinstance(self.user_agent_list tuple)<and>len(self.user_agent_list)<g>0<block_start>user_agent=random.choice(self.user_agent_list)<block_end><else_stmt><block_start>user_agent=self.user_agent<block_end>request.headers.setdefault(b'User-Agent' user_agent)<block_end><block_end>
|
<import_from_future_stmt> absolute_import<import_from_stmt>django.db models<import_from_stmt>roll_engine.fsm BatchFSMixin<import_from_stmt>roll_engine.mixins BatchMixin<import_from_stmt>roll_engine.exceptions DeploymentError<import_from_stmt>.base FSMedModel InheritanceMetaclass<class_stmt>DeploymentBatch(BatchMixin BatchFSMixin FSMedModel)<block_start>__metaclass__=InheritanceMetaclass<line_sep>index=models.IntegerField(null=<true>)<line_sep>pause_time=models.IntegerField(default=0)<line_sep>FORT_INDEX=1<class_stmt>Meta<block_start>abstract=<true><block_end>@classmethod<def_stmt>validate_meta cls<block_start><pass><block_end><def_stmt>get_object self<block_start><return>self<block_end><def_stmt>is_fort_batch self<block_start><raise>DeploymentError('return boolean to indicate whether a fort batch')<block_end><def_stmt>save self *args **kwargs<block_start><if_stmt>self.pk<is><none><block_start><if_stmt>self.deployment<is><not><none><block_start>self.pause_time=self.deployment.config.pause_time<block_end><block_end>super(DeploymentBatch self).save(*args **kwargs)<block_end><def_stmt>is_reach_up_server_threshold self<block_start><return><false><block_end><block_end>
|
<import_stmt>torch.nn<as>nn<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_from_stmt>torchfusion.initializers *<import_from_stmt>torch.nn.modules.conv _ConvNd _ConvTransposeMixin _single _pair _triple<import_from_stmt>torch.nn.modules.batchnorm _BatchNorm<class_stmt>MultiSequential(nn.Sequential)<block_start><def_stmt>__init__ self *args<block_start>super(MultiSequential self).__init__(*args)<block_end><def_stmt>forward self *input<block_start><for_stmt>module self._modules.values()<block_start>input=module(*input)<block_end><return>input<block_end><block_end><class_stmt>Conv1d(nn.Conv1d)<block_start><def_stmt>__init__ self in_channels out_channels kernel_size stride=1 padding=0 dilation=1 groups=1 bias=<true> weight_init=Kaiming_Normal() bias_init=Zeros()<block_start>super(Conv1d self).__init__(in_channels out_channels kernel_size stride padding dilation groups bias)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias<and>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>Conv2d(nn.Conv2d)<block_start><def_stmt>__init__ self in_channels out_channels kernel_size stride=1 padding=0 dilation=1 groups=1 bias=<true> weight_init=Kaiming_Normal() bias_init=Zeros()<block_start>super(Conv2d self).__init__(in_channels out_channels kernel_size stride padding dilation groups bias)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias<and>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>Conv3d(nn.Conv3d)<block_start><def_stmt>__init__ self in_channels out_channels kernel_size stride=1 padding=0 dilation=1 groups=1 bias=<true> weight_init=Kaiming_Normal() bias_init=Zeros()<block_start>super(Conv3d self).__init__(in_channels out_channels kernel_size stride padding dilation groups bias)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias<and>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>DepthwiseConv1d(nn.Conv1d)<block_start><def_stmt>__init__ self in_channels kernel_size stride=1 padding=0 dilation=1 groups=1 bias=<true> multiplier=1 weight_init=Kaiming_Normal() bias_init=Zeros()<block_start>super(DepthwiseConv1d self).__init__(in_channels in_channels<times>multiplier kernel_size stride padding dilation in_channels bias)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias<and>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>DepthwiseConv2d(nn.Conv2d)<block_start><def_stmt>__init__ self in_channels kernel_size stride=1 padding=0 dilation=1 groups=1 bias=<true> multiplier=1 weight_init=Kaiming_Normal() bias_init=Zeros()<block_start>super(DepthwiseConv2d self).__init__(in_channels in_channels<times>multiplier kernel_size stride pa2ding dilation in_channels bias)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias<and>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>DepthwiseConv3d(nn.Conv3d)<block_start><def_stmt>__init__ self in_channels kernel_size stride=1 padding=0 dilation=1 groups=1 bias=<true> multiplier=1 weight_init=Kaiming_Normal() bias_init=Zeros()<block_start>super(DepthwiseConv3d self).__init__(in_channels in_channels<times>multiplier kernel_size stride pa2ding dilation in_channels bias)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias<and>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>ConvTranspose1d(nn.ConvTranspose1d)<block_start><def_stmt>__init__ self in_channels out_channels kernel_size stride=1 padding=0 output_padding=0 groups=1 bias=<true> dilation=1 weight_init=Kaiming_Normal() bias_init=Zeros()<block_start>super(ConvTranspose1d self).__init__(in_channels out_channels kernel_size stride padding output_padding groups bias dilation)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias<and>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>ConvTranspose2d(nn.ConvTranspose2d)<block_start><def_stmt>__init__ self in_channels out_channels kernel_size stride=1 padding=0 output_padding=0 groups=1 bias=<true> dilation=1 weight_init=Kaiming_Normal() bias_init=Zeros()<block_start>super(ConvTranspose2d self).__init__(in_channels out_channels kernel_size stride padding output_padding groups bias dilation)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias<and>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>ConvTranspose3d(nn.ConvTranspose3d)<block_start><def_stmt>__init__ self in_channels out_channels kernel_size stride=1 padding=0 output_padding=0 groups=1 bias=<true> dilation=1 weight_init=Kaiming_Normal() bias_init=Zeros()<block_start>super(ConvTranspose3d self).__init__(in_channels out_channels kernel_size stride padding output_padding groups bias dilation)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias<and>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>DepthwiseConvTranspose1d(nn.ConvTranspose1d)<block_start><def_stmt>__init__ self in_channels kernel_size stride=1 padding=0 output_padding=0 groups=1 bias=<true> dilation=1 multiplier=1 weight_init=Kaiming_Normal() bias_init=Zeros()<block_start>super(DepthwiseConvTranspose1d self).__init__(in_channels in_channels<times>multiplier kernel_size stride padding output_padding in_channels bias dilation)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias<and>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>DepthwiseConvTranspose2d(nn.ConvTranspose2d)<block_start><def_stmt>__init__ self in_channels kernel_size stride=1 padding=0 output_padding=0 groups=1 bias=<true> dilation=1 multiplier=1 weight_init=Kaiming_Normal() bias_init=Zeros()<block_start>super(DepthwiseConvTranspose2d self).__init__(in_channels in_channels<times>multiplier kernel_size stride padding output_padding in_channels bias dilation)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias<and>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>DepthwiseConvTranspose3d(nn.ConvTranspose3d)<block_start><def_stmt>__init__ self in_channels kernel_size stride=1 padding=0 output_padding=0 groups=1 bias=<true> dilation=1 multiplier=1 weight_init=Kaiming_Normal() bias_init=Zeros()<block_start>super(DepthwiseConvTranspose3d self).__init__(in_channels in_channels<times>multiplier kernel_size stride padding output_padding in_channels bias dilation)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias<and>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>Linear(nn.Linear)<block_start><def_stmt>__init__ self in_features out_features bias=<true> weight_init=Xavier_Normal() bias_init=Zeros()<block_start>"""
:param in_features:
:param out_features:
:param bias:
:param weight_init:
:param bias_init:
"""<line_sep>super(Linear self).__init__(in_features out_features bias)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias<and>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>Flatten(nn.Module)<block_start><def_stmt>__init__ self batch_first=<true><block_start>"""
:param batch_first:
"""<line_sep>super(Flatten self).__init__()<line_sep>self.batch_first=batch_first<block_end><def_stmt>forward self inputs<block_start><if_stmt>self.batch_first<block_start>size=torch.prod(torch.LongTensor(list(inputs.size())[1:])).item()<line_sep><return>inputs.view(-1 size)<block_end><else_stmt><block_start>size=torch.prod(torch.LongTensor(list(inputs.size())[:len(inputs.size())-1])).item()<line_sep><return>inputs.view(size -1)<block_end><block_end><block_end><class_stmt>Reshape(nn.Module)<block_start><def_stmt>__init__ self output_shape batch_first=<true><block_start>"""
:param output_shape:
:param batch_first:
"""<line_sep>super(Reshape self).__init__()<line_sep>self.output_shape=output_shape<line_sep>self.batch_first=batch_first<block_end><def_stmt>forward self inputs<block_start><if_stmt>isinstance(self.output_shape int)<block_start>size=[self.output_shape]<block_end><else_stmt><block_start>size=list(self.output_shape)<block_end><if_stmt>self.batch_first<block_start>input_total_size=torch.prod(torch.LongTensor(list(inputs.size())[1:])).item()<block_end><else_stmt><block_start>input_total_size=torch.prod(torch.LongTensor(list(inputs.size())[:len(inputs.size())-1])).item()<block_end>target_total_size=torch.prod(torch.LongTensor(size)).item()<if_stmt>input_total_size<ne>target_total_size<block_start><raise>ValueError(" Reshape must preserve total dimension, input size: {} and output size: {}".format(input.size()[1:] self.output_shape))<block_end>size=list(size)<if_stmt>self.batch_first<block_start>size=tuple([-1]+size)<block_end><else_stmt><block_start>size=tuple(size+[-1])<block_end>outputs=inputs.view(size)<line_sep><return>outputs<block_end><block_end><class_stmt>_GlobalPoolNd(nn.Module)<block_start><def_stmt>__init__ self flatten=<true><block_start>"""
:param flatten:
"""<line_sep>super(_GlobalPoolNd self).__init__()<line_sep>self.flatten=flatten<block_end><def_stmt>pool self input<block_start>"""
:param input:
:return:
"""<line_sep><raise>NotImplementedError()<block_end><def_stmt>forward self input<block_start>"""
:param input:
:return:
"""<line_sep>input=self.pool(input)<line_sep>size_0=input.size(1)<line_sep><return>input.view(-1 size_0)<if>self.flatten<else>input<block_end><block_end><class_stmt>GlobalAvgPool1d(_GlobalPoolNd)<block_start><def_stmt>__init__ self flatten=<true><block_start>"""
:param flatten:
"""<line_sep>super(GlobalAvgPool1d self).__init__(flatten)<block_end><def_stmt>pool self input<block_start><return>F.adaptive_avg_pool1d(input 1)<block_end><block_end><class_stmt>GlobalAvgPool2d(_GlobalPoolNd)<block_start><def_stmt>__init__ self flatten=<true><block_start>"""
:param flatten:
"""<line_sep>super(GlobalAvgPool2d self).__init__(flatten)<block_end><def_stmt>pool self input<block_start><return>F.adaptive_avg_pool2d(input 1)<block_end><block_end><class_stmt>GlobalAvgPool3d(_GlobalPoolNd)<block_start><def_stmt>__init__ self flatten=<true><block_start>"""
:param flatten:
"""<line_sep>super(GlobalAvgPool3d self).__init__(flatten)<block_end><def_stmt>pool self input<block_start><return>F.adaptive_avg_pool3d(input 1)<block_end><block_end><class_stmt>GlobalMaxPool1d(_GlobalPoolNd)<block_start><def_stmt>__init__ self flatten=<true><block_start>"""
:param flatten:
"""<line_sep>super(GlobalMaxPool1d self).__init__(flatten)<block_end><def_stmt>pool self input<block_start><return>F.adaptive_max_pool1d(input 1)<block_end><block_end><class_stmt>GlobalMaxPool2d(_GlobalPoolNd)<block_start><def_stmt>__init__ self flatten=<true><block_start>"""
:param flatten:
"""<line_sep>super(GlobalMaxPool2d self).__init__(flatten)<block_end><def_stmt>pool self input<block_start><return>F.adaptive_max_pool2d(input 1)<block_end><block_end><class_stmt>GlobalMaxPool3d(_GlobalPoolNd)<block_start><def_stmt>__init__ self flatten=<true><block_start>"""
:param flatten:
"""<line_sep>super(GlobalMaxPool3d self).__init__(flatten)<block_end><def_stmt>pool self input<block_start><return>F.adaptive_max_pool3d(input 1)<block_end><block_end><class_stmt>RNNBase(nn.RNNBase)<block_start><def_stmt>__init__ self mode input_size hidden_size num_layers=1 bias=<true> batch_first=<false> dropout=0 bidirectional=<false> weight_init=<none><block_start>"""
:param mode:
:param input_size:
:param hidden_size:
:param num_layers:
:param bias:
:param batch_first:
:param dropout:
:param bidirectional:
:param weight_init:
"""<line_sep>super(RNNBase self).__init__(mode input_size hidden_size num_layers bias batch_first dropout bidirectional)<if_stmt>weight_init<is><not><none><block_start><for_stmt>weight super(RNNBase self).parameters()<block_start>weight_init(weight)<block_end><block_end><block_end><block_end><class_stmt>RNN(RNNBase)<block_start><def_stmt>__init__ self *args **kwargs<block_start>"""
:param args:
:param kwargs:
"""<if_stmt>'nonlinearity'<in>kwargs<block_start><if_stmt>kwargs['nonlinearity']<eq>'tanh'<block_start>mode='RNN_TANH'<block_end><elif_stmt>kwargs['nonlinearity']<eq>'relu'<block_start>mode='RNN_RELU'<block_end><else_stmt><block_start><raise>ValueError("Unknown nonlinearity '{}'".format(kwargs['nonlinearity']))<block_end><del_stmt>kwargs['nonlinearity']<block_end><else_stmt><block_start>mode='RNN_TANH'<block_end>super(RNN self).__init__(mode *args **kwargs)<block_end><block_end><class_stmt>GRU(RNNBase)<block_start><def_stmt>__init__ self *args **kwargs<block_start>"""
:param args:
:param kwargs:
"""<line_sep>super(GRU self).__init__('GRU' *args **kwargs)<block_end><block_end><class_stmt>LSTM(RNNBase)<block_start><def_stmt>__init__ self *args **kwargs<block_start>"""
:param args:
:param kwargs:
"""<line_sep>super(LSTM self).__init__('LSTM' *args **kwargs)<block_end><block_end><class_stmt>Swish(nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Swish self).__init__()<block_end><def_stmt>forward self inputs<block_start><return>inputs<times>torch.sigmoid(inputs)<block_end><block_end><class_stmt>GroupNorm(nn.GroupNorm)<block_start><def_stmt>__init__ self *args weight_init=<none> bias_init=<none><block_start>"""
:param args:
:param weight_init:
:param bias_init:
"""<line_sep>super(GroupNorm self).__init__(*args)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>LayerNorm(nn.LayerNorm)<block_start><def_stmt>__init__ self *args weight_init=<none> bias_init=<none><block_start>"""
:param args:
:param weight_init:
:param bias_init:
"""<line_sep>super(LayerNorm self).__init__(*args)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>Embedding(nn.Embedding)<block_start><def_stmt>__init__ self num_embeddings embedding_dim padding_idx=<none> max_norm=<none> norm_type=2 scale_grad_by_freq=<false> sparse=<false> _weight=<none> weight_init=<none><block_start>"""
:param num_embeddings:
:param embedding_dim:
:param padding_idx:
:param max_norm:
:param norm_type:
:param scale_grad_by_freq:
:param sparse:
:param _weight:
:param weight_init:
"""<line_sep>super(Embedding self).__init__(num_embeddings embedding_dim padding_idx max_norm norm_type scale_grad_by_freq sparse _weight)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><block_end><block_end><class_stmt>BatchNorm(_BatchNorm)<block_start><def_stmt>__init__ self num_features eps=1e-5 momentum=0.1 affine=<true> track_running_stats=<true> weight_init=<none> bias_init=<none><block_start>"""
:param num_features:
:param eps:
:param momentum:
:param affine:
:param track_running_stats:
:param weight_init:
:param bias_init:
"""<line_sep>super(BatchNorm self).__init__(num_features eps momentum affine track_running_stats)<if_stmt>weight_init<is><not><none><block_start>weight_init(self.weight.data)<block_end><if_stmt>bias_init<is><not><none><block_start>bias_init(self.bias.data)<block_end><block_end><block_end><class_stmt>BatchNorm1d(BatchNorm)<block_start><def_stmt>_check_input_dim self input<block_start><if_stmt>input.dim()<ne>2<and>input.dim()<ne>3<block_start><raise>ValueError('expected 2D or 3D input (got {}D input)'.format(input.dim()))<block_end><block_end><block_end><class_stmt>BatchNorm2d(BatchNorm)<block_start><def_stmt>_check_input_dim self input<block_start><if_stmt>input.dim()<ne>4<block_start><raise>ValueError('expected 4D input (got {}D input)'.format(input.dim()))<block_end><block_end><block_end><class_stmt>BatchNorm3d(BatchNorm)<block_start><def_stmt>_check_input_dim self input<block_start><if_stmt>input.dim()<ne>5<block_start><raise>ValueError('expected 5D input (got {}D input)')<block_end><block_end><block_end>
|
#coding:utf8
<class_stmt>Config<block_start>caption_data_path='caption.pth'# 经过预处理后的人工描述信息
img_path='/home/cy/caption_data/'<line_sep># img_path='/mnt/ht/aichallenger/raw/ai_challenger_caption_train_20170902/caption_train_images_20170902/'
img_feature_path='results.pth'# 所有图片的features,20w*2048的向量
scale_size=300<line_sep>img_size=224<line_sep>batch_size=8<line_sep>shuffle=<true><line_sep>num_workers=4<line_sep>rnn_hidden=256<line_sep>embedding_dim=256<line_sep>num_layers=2<line_sep>share_embedding_weights=<false><line_sep>prefix='checkpoints/caption'#模型保存前缀
env='caption'<line_sep>plot_every=10<line_sep>debug_file='/tmp/debugc'<line_sep>model_ckpt=<none># 模型断点保存路径
lr=1e-3<line_sep>use_gpu=<true><line_sep>epoch=1<line_sep>test_img='img/example.jpeg'<block_end>
|
<import_from_stmt>typing Any Dict<import_stmt>pytest<import_from_stmt>nuplan.common.actor_state.state_representation Point2D<import_from_stmt>nuplan.common.maps.abstract_map SemanticMapLayer<import_from_stmt>nuplan.common.maps.abstract_map_objects Intersection<import_from_stmt>nuplan.common.maps.nuplan_map.map_factory NuPlanMapFactory<import_from_stmt>nuplan.common.maps.test_utils add_map_objects_to_scene<import_from_stmt>nuplan.common.utils.testing.nuplan_test NUPLAN_TEST_PLUGIN nuplan_test<import_from_stmt>nuplan.database.tests.nuplan_db_test_utils get_test_maps_db<line_sep>maps_db=get_test_maps_db()<line_sep>map_factory=NuPlanMapFactory(maps_db)<line_sep>@nuplan_test(path='json/intersections/on_intersection.json')<def_stmt>test_get_intersections scene:Dict[str Any]<arrow><none><block_start>"""
Test getting intersections at a point.
"""<line_sep>nuplan_map=map_factory.build_map_from_name(scene["map"]["area"])<for_stmt>marker,expected_id zip(scene["markers"] scene["xtr"]["expected_nearest_id"])<block_start>pose=marker["pose"]<line_sep>intersection:Intersection=nuplan_map.get_one_map_object(Point2D(pose[0] pose[1]) SemanticMapLayer.INTERSECTION)<assert_stmt>intersection<is><not><none><assert_stmt>expected_id<eq>intersection.id<assert_stmt>intersection.contains_point(Point2D(pose[0] pose[1]))<line_sep>add_map_objects_to_scene(scene [intersection])<block_end><block_end>@nuplan_test(path='json/intersections/nearby.json')<def_stmt>test_get_nearby_intersection scene:Dict[str Any]<arrow><none><block_start>"""
Test getting nearby crosswalks.
"""<line_sep>nuplan_map=map_factory.build_map_from_name(scene["map"]["area"])<for_stmt>marker,expected_distance,expected_id zip(scene["markers"] scene["xtr"]["expected_nearest_distance"] scene["xtr"]["expected_nearest_id"])<block_start>pose=marker["pose"]<line_sep>intersection_id,distance=nuplan_map.get_distance_to_nearest_map_object(Point2D(pose[0] pose[1]) SemanticMapLayer.INTERSECTION)<assert_stmt>intersection_id<is><not><none><assert_stmt>expected_distance<eq>distance<assert_stmt>expected_id<eq>intersection_id<line_sep>intersection:Intersection=nuplan_map.get_map_object(intersection_id SemanticMapLayer.INTERSECTION)<line_sep>add_map_objects_to_scene(scene [intersection])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><raise>SystemExit(pytest.main([__file__] plugins=[NUPLAN_TEST_PLUGIN]))<block_end>
|
"""Exceptions raised by the Firehose service."""<import_from_stmt>moto.core.exceptions JsonRESTError<class_stmt>ConcurrentModificationException(JsonRESTError)<block_start>"""Existing config has a version ID that does not match given ID."""<line_sep>code=400<def_stmt>__init__ self message<block_start>super().__init__("ConcurrentModificationException" message)<block_end><block_end><class_stmt>InvalidArgumentException(JsonRESTError)<block_start>"""The specified input parameter has a value that is not valid."""<line_sep>code=400<def_stmt>__init__ self message<block_start>super().__init__("InvalidArgumentException" message)<block_end><block_end><class_stmt>LimitExceededException(JsonRESTError)<block_start>"""You have already reached the limit for a requested resource."""<line_sep>code=400<def_stmt>__init__ self message<block_start>super().__init__("LimitExceededException" message)<block_end><block_end><class_stmt>ResourceInUseException(JsonRESTError)<block_start>"""The resource is already in use and not available for this operation."""<line_sep>code=400<def_stmt>__init__ self message<block_start>super().__init__("ResourceInUseException" message)<block_end><block_end><class_stmt>ResourceNotFoundException(JsonRESTError)<block_start>"""The specified resource could not be found."""<line_sep>code=400<def_stmt>__init__ self message<block_start>super().__init__("ResourceNotFoundException" message)<block_end><block_end><class_stmt>ValidationException(JsonRESTError)<block_start>"""The tag key or tag value is not valid."""<line_sep>code=400<def_stmt>__init__ self message<block_start>super().__init__("ValidationException" message)<block_end><block_end>
|
"""Utility functions for file io and file path reading."""<line_sep># Copyright 2021 The DDSP Authors.
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Lint as: python3
<import_stmt>os<import_stmt>shutil<import_stmt>pickle<import_stmt>json<def_stmt>get_folder_name path num=1<block_start>"""
Get the name of the folder n levels above the given path.
Example: a/b/c/d.txt, num=1 -> c, num=2 -> b, ...
Args:
path: a file path.
num: the number of upper directories.
Returns: the folder name for that level.
"""<for_stmt>_ range(num)<block_start>path=os.path.dirname(path)<block_end><return>os.path.basename(path)<block_end><def_stmt>copy_file_to_folder file_path dst_dir<block_start>save_path=os.path.join(dst_dir os.path.basename(file_path))<line_sep>shutil.copy(file_path save_path)<block_end><def_stmt>pickle_dump obj path<block_start><with_stmt>open(path 'wb')<as>f<block_start>pickle.dump(obj f)<line_sep>f.close()<block_end><block_end><def_stmt>pickle_load path<block_start><with_stmt>open(path 'rb')<as>f<block_start>data=pickle.load(f)<line_sep>f.close()<block_end><return>data<block_end><def_stmt>json_dump data_json json_save_path<block_start><with_stmt>open(json_save_path 'w')<as>f<block_start>json.dump(data_json f)<line_sep>f.close()<block_end><block_end><def_stmt>json_load json_path<block_start><with_stmt>open(json_path 'r')<as>f<block_start>data=json.load(f)<line_sep>f.close()<block_end><return>data<block_end><def_stmt>write_str_lines save_path lines<block_start>lines=[l+'\n'<for>l lines]<with_stmt>open(save_path 'w' encoding='utf-8')<as>f<block_start>f.writelines(lines)<block_end><block_end>
|
<import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>skgstat<as>skg<import_stmt>scipy<line_sep># produce a random dataset
np.random.seed(42)<line_sep>rcoords=np.random.gamma(40 10 size=(500 2))<line_sep>np.random.seed(42)<line_sep>rvals=np.random.normal(10 4 500)<def_stmt>test_invalid_dist_func # instantiate metrix space
<block_start>ms=skg.MetricSpace(rcoords dist_metric='euclidean')<with_stmt>pytest.raises(AttributeError)<as>e<block_start>skg.Variogram(ms rvals dist_func='cityblock')<assert_stmt>'Distance metric'<in>e.value<block_end><block_end><def_stmt>test_sparse_matrix_no_warning # make a really sparse matrix
<block_start>sparse=skg.MetricSpace(rcoords max_dist=5)<line_sep># call triangular_distance_matrix without warning
V=skg.Variogram(sparse rvals)<line_sep>V.triangular_distance_matrix<block_end><def_stmt>test_dense_matrix_warning <block_start>dense=skg.MetricSpace(rcoords)<line_sep># check the warning
<with_stmt>pytest.raises(RuntimeWarning)<as>w<block_start>V=skg.Variogram(dense rvals)<line_sep>V.triangular_distance_matrix<assert_stmt>'Only available'<in>w.value<block_end><block_end><def_stmt>test_unknown_metric <block_start><with_stmt>pytest.raises(ValueError)<as>e<block_start>skg.MetricSpace(rcoords dist_metric='foobar')<assert_stmt>'Unknown Distance Metric:'<in>e.value<block_end><block_end><def_stmt>test_tree_non_euklidean <block_start><with_stmt>pytest.raises(ValueError)<as>e<block_start>ms=skg.MetricSpace(rcoords 'cityblock')<line_sep>ms.tree<assert_stmt>'can only be constructed'<in>e.value<block_end><block_end><def_stmt>test_metric_pair_metrix <block_start>c1=np.random.gamma(100 4 (300 2))<line_sep>c2=np.random.gamma(50 5 (100 2))<line_sep>ms1=skg.MetricSpace(c1 dist_metric='cityblock')<line_sep>ms2=skg.MetricSpace(c2 dist_metric='euclidean')<with_stmt>pytest.raises(ValueError)<as>e<block_start>skg.MetricSpacePair(ms1 ms2)<assert_stmt>'same distance metric'<in>e.value<block_end><block_end><def_stmt>test_metric_pair_max_dist <block_start>c1=np.random.gamma(100 4 (300 2))<line_sep>c2=np.random.gamma(50 5 (100 2))<line_sep>ms1=skg.MetricSpace(c1 max_dist=50)<line_sep>ms2=skg.MetricSpace(c2 max_dist=400)<with_stmt>pytest.raises(ValueError)<as>e<block_start>skg.MetricSpacePair(ms1 ms2)<assert_stmt>'same max_dist'<in>e.value<block_end><block_end><def_stmt>test_raster_metric # Generate a gridded dataset
<block_start>shape=(100 100)<line_sep>np.random.seed(42)<line_sep>vals=np.random.normal(0 1 size=shape)<line_sep># Coordinates
x=np.arange(0 shape[0])<line_sep>y=np.arange(0 shape[1])<line_sep>xx,yy=np.meshgrid(x y)<line_sep># Flatten everything because we don't care about the 2D at this point
coords=np.dstack((xx.flatten() yy.flatten())).squeeze()<line_sep>vals=vals.flatten()<line_sep># Run the computation
rems=skg.RasterEquidistantMetricSpace(coords shape=shape extent=(x[0] x[-1] y[0] y[-1]) samples=10 runs=10 rnd=42 verbose=<true>)<line_sep># Minimal check of the output
<assert_stmt>rems.max_dist<eq>pytest.approx(140 rel=0.01)<assert_stmt>rems.res<eq>pytest.approx(1 rel=0.0001)<assert_stmt>isinstance(rems.dists scipy.sparse.csr.csr_matrix)<assert_stmt>rems.dists.shape<eq>(10000 10000)<line_sep># Check the random state provides the same final center
<assert_stmt>all(rems._centers[-1]<eq>np.array([62 52]))<line_sep># Check the interface with a Variogram object works
V=skg.Variogram(rems vals)<assert_stmt>V.bin_count<is><not><none><line_sep># Check the variogram is always the same with the random state given
<assert_stmt>V.experimental[0]<eq>pytest.approx(0.89 0.01)<line_sep># Check that the routines are robust to very few data points in the grid (e.g., from nodata values)
coords_sub=coords[0::1000]<line_sep>vals_sub=vals[0::1000]<line_sep>rems_sub=skg.RasterEquidistantMetricSpace(coords_sub shape=shape extent=(x[0] x[-1] y[0] y[-1]) samples=100 runs=10 rnd=42)<line_sep>V=skg.Variogram(rems_sub vals_sub)<line_sep># Check with a single isolated point possibly being used as center
coords_sub=np.concatenate(([coords[0]] coords[-10:]))<line_sep>vals_sub=np.concatenate(([vals[0]] vals[-10:]))<line_sep>rems_sub=skg.RasterEquidistantMetricSpace(coords_sub shape=shape extent=(x[0] x[-1] y[0] y[-1]) samples=100 runs=11 rnd=42)<line_sep>V=skg.Variogram(rems_sub vals_sub)<block_end>
|
<import_from_stmt>..base RNGDataFlow<import_from_stmt>...utils logger fs<import_stmt>os<import_stmt>numpy<as>np<def_stmt>load_data_from_npzs fnames<block_start><if_stmt><not>isinstance(fnames list)<block_start>fnames=[fnames]<block_end>Xs=[]<line_sep>Ys=[]<for_stmt>fname fnames<block_start>d=np.load(fname)<line_sep>logger.info('Loading from {}'.format(fname))<line_sep>X,Y=(d['X'] d['Y'])<line_sep>Xs.append(X)<line_sep>Ys.append(Y)<block_end><return>np.stack(X) np.stack(Y)<block_end><class_stmt>Camvid(RNGDataFlow)<block_start>name='camvid'<line_sep>non_void_nclasses=11<line_sep>_void_labels=[11]<line_sep># optional arguments
data_shape=(360 480 3)<line_sep>mean=[0.39068785 0.40521392 0.41434407]<line_sep>std=[0.29652068 0.30514979 0.30080369]<line_sep>_cmap={0:(128 128 128) # sky
1:(128 0 0) # building
2:(192 192 128) # column_pole
3:(128 64 128) # road
4:(0 0 192) # sidewalk
5:(128 128 0) # Tree
6:(192 128 128) # SignSymbol
7:(64 64 128) # Fence
8:(64 0 128) # Car
9:(64 64 0) # Pedestrian
10:(0 128 192) # Bicyclist
11:(0 0 0)}<line_sep># Void
_mask_labels={0:'sky' 1:'building' 2:'column_pole' 3:'road' 4:'sidewalk' 5:'tree' 6:'sign' 7:'fence' 8:'car' 9:'pedestrian' 10:'byciclist' 11:'void'}<line_sep># frequency and weight of each class (including void)
class_freq=np.array([0.16845114 0.23258652 0.00982927 0.31658215 0.0448627 0.09724055 0.01172954 0.01126809 0.05865686 0.00639231 0.00291665 0.03948423])<line_sep>class_weight=sorted(class_freq)[len(class_freq)<floordiv>2]/class_freq<line_sep>#class_weight = np.array([ 0.49470329, 0.35828961, 8.47807568, 0.26322815,
# 1.8575192 , 0.85698135, 7.10457224, 7.39551774,
# 1.42069214, 13.03649617, 28.57158304, 2.11054735])
<def_stmt>__init__ self which_set shuffle=<true> pixel_z_normalize=<true> data_dir=<none> is_label_one_hot=<false> slide_all=<false> slide_window_size=224 void_overlap=<false><block_start>"""
which_set : one of train, val, test, trainval
shuffle:
data_dir: <data_dir> should contain train.npz, val.npz, test.npz
"""<line_sep>self.shuffle=shuffle<line_sep>self.pixel_z_normalize=pixel_z_normalize<line_sep>self.is_label_one_hot=is_label_one_hot<line_sep>self.void_overlap=void_overlap<if_stmt>data_dir<is><none><block_start>data_dir=fs.get_dataset_path('camvid')<block_end><assert_stmt>os.path.exists(data_dir)<for_stmt>set_name ['train' 'val' 'test']<block_start><assert_stmt>os.path.exists(os.path.join(data_dir '{}.npz'.format(set_name)))<block_end><assert_stmt>which_set<in>['train' 'val' 'test' 'trainval'] which_set<if_stmt>which_set<eq>'train'<block_start>load_fns=['train']<block_end><elif_stmt>which_set<eq>'val'<block_start>load_fns=['val']<block_end><elif_stmt>which_set<eq>'test'<block_start>load_fns=['test']<block_end><else_stmt>#if which_set == 'trainval':
<block_start>load_fns=['train' 'val']<block_end># These npz are assumed to have NHWC format for image, and NHW for label
load_fns=map(<lambda>fn:os.path.join(data_dir '{}.npz'.format(fn)) load_fns)<line_sep>self.X,self.Y=load_data_from_npzs(load_fns)<assert_stmt>self.X.dtype<eq>'uint8'<line_sep>self.slide_window_size=slide_window_size<line_sep>self.slide_all=slide_all<line_sep>self.slide_all_size=<none><block_end><def_stmt>get_data self<block_start>idxs=np.arange(len(self.X))<if_stmt>self.shuffle<block_start>self.rng.shuffle(idxs)<block_end><for_stmt>k idxs<block_start>X=np.asarray(self.X[k] dtype=np.float32)/255.0<line_sep>Y=self.Y[k]<line_sep>H,W=(X.shape[0] X.shape[1])<line_sep>void=Camvid._void_labels[0]<if_stmt>self.is_label_one_hot<block_start>K=Camvid.non_void_nclasses<line_sep>Y_tmp=np.zeros((H W K) dtype=np.float32)<line_sep>mask=(Y.reshape([-1])<l>K)<line_sep>Y_tmp.reshape([-1 K])[np.arange(H<times>W)[mask] Y.reshape([-1])[mask]]=1.0<line_sep>Y=Y_tmp<line_sep>void=np.zeros(K)<block_end><if_stmt>self.pixel_z_normalize<block_start>X=(X-Camvid.mean)/Camvid.std<block_end><if_stmt><not>self.slide_all# do not slide all windows
<block_start><yield>[X Y]<block_end><else_stmt># slide all windows
<block_start>side=self.slide_window_size<line_sep>n_h=H<floordiv>side+int(H%side<ne>0)<line_sep>n_w=W<floordiv>side+int(W%side<ne>0)<for_stmt>hi range(n_h)<block_start>h_overlap=0<line_sep>row=hi<times>side<line_sep>row_end=row+side<if_stmt>row_end<g>H<block_start><if_stmt>self.void_overlap<block_start>h_overlap=row-(H-side)<block_end>row=H-side<line_sep>row_end=H<block_end><for_stmt>wi range(n_w)<block_start>w_overlap=0<line_sep>col=wi<times>side<line_sep>col_end=col+side<if_stmt>col_end<g>W<block_start><if_stmt>self.void_overlap<block_start>w_overlap=col-(W-side)<block_end>col=W-side<line_sep>col_end=W<block_end>Xrc=X[row:row_end col:col_end]<line_sep>Yrc=Y[row:row_end col:col_end].copy()<if_stmt>h_overlap<g>0<block_start>Yrc[:h_overlap :]=void<block_end><if_stmt>w_overlap<g>0<block_start>Yrc[: :w_overlap]=void<block_end><yield>[Xrc Yrc]<block_end><block_end><block_end><block_end><block_end><def_stmt>size self<block_start><if_stmt><not>self.slide_all<block_start><return>len(self.X)<block_end><if_stmt>self.slide_all_size<is><none><block_start>H,W=self.X.shape[1] self.X.shape[2]<line_sep>side=self.slide_window_size<line_sep>n_h=H<floordiv>side+int(H%side<ne>0)<line_sep>n_w=W<floordiv>side+int(W%side<ne>0)<line_sep>self.slide_all_size=n_h<times>n_w<times>len(self.X)<block_end><return>self.slide_all_size<block_end><def_stmt>stitch_sliding_images self l_imgs<block_start>"""
The l_imgs should be probability distribution of labels.
"""<line_sep>side=self.slide_window_size<line_sep>H,W=(Camvid.data_shape[0] Camvid.data_shape[1])<line_sep>n_h=H<floordiv>side+int(H%side<ne>0)<line_sep>n_w=W<floordiv>side+int(W%side<ne>0)<assert_stmt>n_h<times>n_w<eq>len(l_imgs) len(l_imgs)<line_sep>n_ch=len(l_imgs[0].reshape([-1]))/side<power>2<assert_stmt>n_ch<g>1 n_ch<line_sep>image=np.zeros((H W n_ch))<line_sep>i=-1<for_stmt>hi range(n_h)<block_start>row=hi<times>side<line_sep>row_end=row+side<if_stmt>row_end<g>H<block_start>row_end=H<line_sep>row=H-side<block_end><for_stmt>wi range(n_w)<block_start>col=wi<times>side<line_sep>col_end=col+side<if_stmt>col_end<g>W<block_start>col_end=W<line_sep>col=W-side<block_end>i<augadd>1<line_sep>r_=row_end-row<line_sep>c_=col_end-col<line_sep>window=l_imgs[i].reshape([side side n_ch])<line_sep>image[row:row_end col:col_end]<augadd>window<block_end><block_end><return>image<block_end><block_end>
|
<import_from_stmt>sqlalchemy func<import_from_stmt>sqlalchemy.dialects.postgresql JSONB<import_from_stmt>actor_libs.database.orm BaseModel db ModelMixin<line_sep>__all__=['PublishLog' 'TimerPublish']<class_stmt>PublishLog(ModelMixin db.Model)<block_start>"""
controlType: 1:Publish,2:Read,3:Write,4 Execute
publishStatus: 0:Failed,1:Published 2:Arrived
"""<line_sep>__tablename__='publish_logs'<line_sep>__table_args__=(db.Index('publish_logs_msgTime_idx' "msgTime") )<line_sep>topic=db.Column(db.String(1000))# mqtt topic
streamID=db.Column(db.String(1000))# stream id
payload=db.Column(JSONB)# publish payload
publishStatus=db.Column(db.SmallInteger)<line_sep>taskID=db.Column(db.String(64))<line_sep>msgTime=db.Column(db.DateTime server_default=func.now() primary_key=<true>)<line_sep>deviceID=db.Column(db.String primary_key=<true>)# device uid
tenantID=db.Column(db.String primary_key=<true>)<block_end># tenant uid
<class_stmt>TimerPublish(BaseModel)<block_start>__tablename__='timer_publish'<line_sep>taskName=db.Column(db.String)# 任务名
taskStatus=db.Column(db.SmallInteger server_default='2')# 任务状态2 执行 3 成功
timerType=db.Column(db.SmallInteger)# 定时类型1 固定 , 2 间隔
topic=db.Column(db.String(1000))# 主题(mqtt)
payload=db.Column(JSONB)# 下发消息内容
intervalTime=db.Column(JSONB)# 间隔时间{'weekday': 'hour': 'minute'}
crontabTime=db.Column(db.DateTime)# 指定下发时间
deviceIntID=db.Column(db.Integer db.ForeignKey('devices.id' onupdate="CASCADE" ondelete="CASCADE"))<line_sep># 设备id
userIntID=db.Column(db.Integer db.ForeignKey('users.id' onupdate="CASCADE" ondelete="CASCADE"))<block_end># 用户
|
<import_stmt>os<import_stmt>versioneer<import_from_stmt>setuptools setup<try_stmt><block_start>descr=open(os.path.join(os.path.dirname(__file__) 'README.md')).read()<block_end><except_stmt>OSError<block_start>descr=''<block_end># In some cases, the numpy include path is not present by default.
# Let's try to obtain it.
<try_stmt><block_start><import_stmt>numpy<block_end><except_stmt>ImportError<block_start>ext_include_dirs=[]<block_end><else_stmt><block_start>ext_include_dirs=[numpy.get_include() ]<block_end>setup_parameters=dict(name="trackpy" version=versioneer.get_version() cmdclass=versioneer.get_cmdclass() description="particle-tracking toolkit" author="Trackpy Contributors" author_email="<EMAIL>" url="https://github.com/soft-matter/trackpy" install_requires=['numpy>=1.14' 'scipy>=1.1' 'pandas>=0.22' 'pyyaml' 'matplotlib'] python_requires=">=3.6" classifiers=["Programming Language :: Python :: 3" "Programming Language :: Python :: 3.6" "Programming Language :: Python :: 3.7" "Programming Language :: Python :: 3.8" "Programming Language :: Python :: 3.9" ] packages=['trackpy' 'trackpy.refine' 'trackpy.linking' 'trackpy.locate_functions'] long_description=descr long_description_content_type='text/markdown')<line_sep>setup(**setup_parameters)<line_sep>
|
<import_stmt>operator<import_stmt>pytest<import_stmt>numpy<as>np<import_from_stmt>...core ProxyTypeError<import_from_stmt>...containers Tuple List<import_from_stmt>...identifier parameter<import_from_stmt>..bool_ Bool<import_from_stmt>..string Str<import_from_stmt>..number Float Int Number _binop_result<import_from_stmt>...core.tests.utils operator_test<class_stmt>TestPromote(object)<block_start><def_stmt>test_number_unpromotable self<block_start><with_stmt>pytest.raises(ProxyTypeError)<block_start>Number._promote(2.2)<block_end><with_stmt>pytest.raises(ProxyTypeError)<block_start>Number._promote(0)<block_end><block_end><def_stmt>test_primitives self<block_start><assert_stmt>isinstance(Int._promote(0) Int)<assert_stmt>isinstance(Float._promote(2) Float)<assert_stmt>isinstance(Float._promote(2.2) Float)<block_end><def_stmt>test_proxytypes self<block_start><assert_stmt>isinstance(Int._promote(Int(0)) Int)<assert_stmt>isinstance(Float._promote(Float(2.2)) Float)<block_end><def_stmt>test_wrong_primitives self<block_start><with_stmt>pytest.raises(ProxyTypeError)<block_start>Int._promote(2.2)<block_end><block_end><def_stmt>test_wrong_proxytypes self<block_start><with_stmt>pytest.raises(ProxyTypeError match=r"You need to convert it explicitly, like `Int\(x\)`")<block_start>Int._promote(Float(2.2))<block_end><with_stmt>pytest.raises(ProxyTypeError match=r"You need to convert it explicitly, like `Float\(x\)`" )<block_start>Float._promote(Int(0))<block_end><block_end><block_end><class_stmt>TestConstruct(object)<block_start><def_stmt>test_explicit_cast_passthrough self<block_start>i=Int(Int(1))<assert_stmt>i.graft[i.graft["returns"]]<eq>1<assert_stmt>i.params<eq>()<line_sep>x=parameter("x" Int)<line_sep>i=Int(x)<assert_stmt>i.params<eq>(x )<block_end><def_stmt>test_explicit_cast_to_int self<block_start>i=Int(Float(1.0))<assert_stmt>isinstance(i Int)<assert_stmt>i.graft[i.graft["returns"]][0]<eq>"wf.Int.cast"<assert_stmt>i.params<eq>()<line_sep>x=parameter("x" Float)<line_sep>i=Int(x)<assert_stmt>i.params<eq>(x )<line_sep>i=Int(Bool(<true>))<assert_stmt>isinstance(i Int)<assert_stmt>i.graft[i.graft["returns"]][0]<eq>"wf.Int.cast"<assert_stmt>i.params<eq>()<line_sep>x=parameter("x" Bool)<line_sep>i=Int(x)<assert_stmt>i.params<eq>(x )<line_sep>i=Int(Str("1"))<assert_stmt>isinstance(i Int)<assert_stmt>i.graft[i.graft["returns"]][0]<eq>"wf.Int.cast"<assert_stmt>i.params<eq>()<line_sep>x=parameter("x" Str)<line_sep>i=Int(x)<assert_stmt>i.params<eq>(x )<block_end><def_stmt>test_explicit_cast_to_float self<block_start>f=Float(Int(1))<assert_stmt>isinstance(f Float)<assert_stmt>f.graft[f.graft["returns"]][0]<eq>"wf.Float.cast"<assert_stmt>f.params<eq>()<line_sep>x=parameter("x" Int)<line_sep>f=Float(x)<assert_stmt>f.params<eq>(x )<line_sep>f=Float(Bool(<true>))<assert_stmt>isinstance(f Float)<assert_stmt>f.graft[f.graft["returns"]][0]<eq>"wf.Float.cast"<assert_stmt>f.params<eq>()<line_sep>x=parameter("x" Bool)<line_sep>f=Float(x)<assert_stmt>f.params<eq>(x )<line_sep>f=Float(Str("1"))<assert_stmt>isinstance(f Float)<assert_stmt>f.graft[f.graft["returns"]][0]<eq>"wf.Float.cast"<assert_stmt>f.params<eq>()<line_sep>x=parameter("x" Str)<line_sep>f=Float(x)<assert_stmt>f.params<eq>(x )<block_end><block_end><class_stmt>TestNumPyScalars(object)<block_start>@pytest.mark.parametrize("val" [np.uint8(1) np.uint16(1) np.uint32(1) np.uint64(1) np.int8(1) np.int16(1) np.int32(1) np.int64(1) ] )<def_stmt>test_int self val<block_start>i=Int(val)<assert_stmt>isinstance(i.graft[i.graft["returns"]] int)<assert_stmt>i.params<eq>()<block_end>@pytest.mark.parametrize("val" [np.float16(1) np.float32(1) np.float64(1)])<def_stmt>test_float self val<block_start>i=Float(val)<assert_stmt>isinstance(i.graft[i.graft["returns"]] float)<assert_stmt>i.params<eq>()<block_end><def_stmt>test_failure self<block_start><with_stmt>pytest.raises(TypeError)<block_start>Float(np.int32(1))<block_end><with_stmt>pytest.raises(TypeError)<block_start>Int(np.float64(1))<block_end><with_stmt>pytest.raises(TypeError)<block_start>Int(np.datetime64("2020-01-01"))<block_end><block_end><block_end>@pytest.mark.parametrize("a, b, expected" [(Int(0) Int(0) Int) (Float(0.0) Float(0.0) Float) (Int(0) Float(0.0) Float) (Float(0.0) Int(0) Float) ] )<def_stmt>test_binop_result a b expected<block_start><assert_stmt>_binop_result(a b)<eq>expected<block_end><class_stmt>TestAllOperators(object)<block_start>int_obj=Int(0)<line_sep>float_obj=Float(0.0)<line_sep>all_values_to_try=[Int(1) Float(2.2) Bool(<true>) List[Int]([1 2])]<line_sep># ^ we use pre-promoted Proxytypes, not py types, since the `operator_test`
# helper checks if `type(value) is in accepted_types`
@pytest.mark.parametrize("operator, accepted_types, return_type" [["__abs__" () Int] ["__add__" (Int Float Bool) {Float:Float Int:Int Bool:Int}] ["__div__" (Int Float Bool) (Int Float)] ["__divmod__" (Int Float Bool) {Float:Tuple[Float Float] Int:Tuple[Int Int] Bool:Tuple[Int Int] } ] ["__eq__" (Int Float Bool) Bool] ["__floordiv__" (Int Float Bool) {Float:Float Int:Int Bool:Int}] ["__ge__" (Int Float Bool) Bool] ["__gt__" (Int Float Bool) Bool] ["__invert__" () Int] ["__le__" (Int Float Bool) Bool] ["__lt__" (Int Float Bool) Bool] ["__mod__" (Int Float Bool) {Float:Float Int:Int Bool:Int}] ["__mul__" (Int Float Bool) {Float:Float Int:Int Bool:Int}] ["__ne__" (Int Float Bool) Bool] ["__neg__" () Int] ["__pos__" () Int] ["__pow__" (Int Float Bool) {Float:Float Int:Int Bool:Int}] ["__radd__" (Int Float Bool) {Float:Float Int:Int Bool:Int}] ["__rdiv__" (Int Float Bool) (Int Float)] ["__rdivmod__" (Int Float Bool) {Float:Tuple[Float Float] Int:Tuple[Int Int] Bool:Tuple[Int Int] } ] ["__rfloordiv__" (Int Float Bool) {Float:Float Int:Int Bool:Int}] ["__rmod__" (Int Float Bool) {Float:Float Int:Int Bool:Int}] ["__rmul__" (Int Float Bool) {Float:Float Int:Int Bool:Int}] ["__rpow__" (Int Float Bool) {Float:Float Int:Int Bool:Int}] ["__rsub__" (Int Float Bool) {Float:Float Int:Int Bool:Int}] ["__rtruediv__" (Int Float Bool) (Int Float)] ["__sub__" (Int Float Bool) {Float:Float Int:Int Bool:Int}] ["__truediv__" (Int Float Bool) (Int Float)] # Int-specific methods
["__and__" [Int Bool] Int] ["__lshift__" [Int Bool] Int] ["__or__" [Int Bool] Int] ["__rand__" [Int Bool] Int] ["__rlshift__" [Int Bool] Int] ["__ror__" [Int Bool] Int] ["__rrshift__" [Int Bool] Int] ["__rshift__" [Int Bool] Int] ["__rxor__" [Int Bool] Int] ["__xor__" [Int Bool] Int] ] )<def_stmt>test_all_operators_int self operator accepted_types return_type<block_start>operator_test(self.int_obj self.all_values_to_try operator accepted_types return_type)<block_end>@pytest.mark.parametrize("operator, accepted_types, return_type" [["__abs__" () Float] ["__add__" (Int Float Bool) Float] ["__div__" (Int Float Bool) Float] ["__divmod__" (Int Float Bool) Tuple[Float Float]] ["__eq__" (Int Float Bool) Bool] ["__floordiv__" (Int Float Bool) Float] ["__ge__" (Int Float Bool) Bool] ["__gt__" (Int Float Bool) Bool] ["__invert__" () Float] ["__le__" (Int Float Bool) Bool] ["__lt__" (Int Float Bool) Bool] ["__mod__" (Int Float Bool) Float] ["__mul__" (Int Float Bool) Float] ["__ne__" (Int Float Bool) Bool] ["__neg__" () Float] ["__pos__" () Float] ["__pow__" (Int Float Bool) Float] ["__radd__" (Int Float Bool) Float] ["__rdiv__" (Int Float Bool) Float] ["__rdivmod__" (Int Float Bool) Tuple[Float Float]] ["__rfloordiv__" (Int Float Bool) Float] ["__rmod__" (Int Float Bool) Float] ["__rmul__" (Int Float Bool) Float] ["__rpow__" (Int Float Bool) Float] ["__rsub__" (Int Float Bool) Float] ["__rtruediv__" (Int Float Bool) Float] ["__sub__" (Int Float Bool) Float] ["__truediv__" (Int Float Bool) Float] ] )<def_stmt>test_all_operators_float self operator accepted_types return_type<block_start>operator_test(self.float_obj self.all_values_to_try operator accepted_types return_type )<block_end>@pytest.mark.parametrize("obj" [Int(0) Float(2.2)])@pytest.mark.parametrize("op, exception" [(operator.truth TypeError) (operator.index TypeError) (hex TypeError)] )<def_stmt>test_unsupported_unary_methods self obj op exception<block_start><with_stmt>pytest.raises(exception)<block_start>op(obj)<block_end><block_end><block_end>
|
<import_stmt>pytest<import_from_stmt>.preprocessors CaptionPreprocessor<import_from_stmt>.word_vectors Glove Fasttext<class_stmt>WordVectorTestBase(object)<block_start>_WORD_VECTOR=<none><line_sep>@pytest.fixture<def_stmt>word_vector self mocker<block_start>mocker.patch.object(self._WORD_VECTOR '_PRETRAINED_PATH' self._WORD_VECTOR._PRETRAINED_PATH+'.sample')<line_sep>vocab_words=['.' 'znotexistz' 'a' 'i']<line_sep>initializer='zeros'<line_sep>word_vector=self._WORD_VECTOR(vocab_words=vocab_words initializer=initializer)<line_sep><return>word_vector<block_end><def_stmt>test___init__ self word_vector<block_start>EOS_TOKEN=CaptionPreprocessor.EOS_TOKEN<line_sep>word_vector_of=word_vector._word_vector_of<assert_stmt>len(word_vector_of)<eq>3# Not including znotexistz
<assert_stmt>'.'<not><in>word_vector_of<assert_stmt>'znotexistz'<not><in>word_vector_of<assert_stmt>EOS_TOKEN<in>word_vector_of<assert_stmt>'a'<in>word_vector_of<assert_stmt>'i'<in>word_vector_of<block_end><def_stmt>test_vectorize_words self word_vector<block_start>EOS_TOKEN=CaptionPreprocessor.EOS_TOKEN<line_sep>vectors=word_vector.vectorize_words(['qnotexistq' 'znotexistz' EOS_TOKEN 'a'])<assert_stmt><not>vectors[:2].any()# Assert all zeros
<assert_stmt>vectors[2:].all()<block_end><block_end># Assert all non-zeros
<class_stmt>TestGlove(WordVectorTestBase)<block_start>_WORD_VECTOR=Glove<block_end><class_stmt>TestFasttext(WordVectorTestBase)<block_start>_WORD_VECTOR=Fasttext<block_end>
|
<import_stmt>boto3<import_stmt>pytest<import_stmt>sure# noqa # pylint: disable=unused-import
<import_from_stmt>botocore.exceptions ClientError<import_from_stmt>moto mock_s3<import_from_stmt>uuid uuid4<line_sep>DEFAULT_REGION_NAME="us-east-1"<line_sep>@mock_s3<def_stmt>test_get_bucket_replication_for_unexisting_bucket <block_start>bucket_name=str(uuid4())<line_sep>s3=boto3.client("s3" region_name=DEFAULT_REGION_NAME)<with_stmt>pytest.raises(ClientError)<as>exc<block_start>s3.get_bucket_replication(Bucket=bucket_name)<block_end>err=exc.value.response["Error"]<line_sep>err["Code"].should.equal("NoSuchBucket")<line_sep>err["Message"].should.equal("The specified bucket does not exist")<line_sep>err["BucketName"].should.equal(bucket_name)<block_end>@mock_s3<def_stmt>test_get_bucket_replication_bucket_without_replication <block_start>bucket_name=str(uuid4())<line_sep>s3=boto3.client("s3" region_name=DEFAULT_REGION_NAME)<line_sep>s3.create_bucket(Bucket=bucket_name)<with_stmt>pytest.raises(ClientError)<as>exc<block_start>s3.get_bucket_replication(Bucket=bucket_name)<block_end>err=exc.value.response["Error"]<line_sep>err["Code"].should.equal("ReplicationConfigurationNotFoundError")<line_sep>err["Message"].should.equal("The replication configuration was not found")<line_sep>err["BucketName"].should.equal(bucket_name)<block_end>@mock_s3<def_stmt>test_delete_bucket_replication_unknown_bucket <block_start>bucket_name=str(uuid4())<line_sep>s3=boto3.client("s3" region_name=DEFAULT_REGION_NAME)<with_stmt>pytest.raises(ClientError)<as>exc<block_start>s3.delete_bucket_replication(Bucket=bucket_name)<block_end>err=exc.value.response["Error"]<line_sep>err["Code"].should.equal("NoSuchBucket")<line_sep>err["Message"].should.equal("The specified bucket does not exist")<line_sep>err["BucketName"].should.equal(bucket_name)<block_end>@mock_s3<def_stmt>test_delete_bucket_replication_bucket_without_replication <block_start>bucket_name=str(uuid4())<line_sep>s3=boto3.client("s3" region_name=DEFAULT_REGION_NAME)<line_sep>s3.create_bucket(Bucket=bucket_name)<line_sep># No-op
s3.delete_bucket_replication(Bucket=bucket_name)<block_end>@mock_s3<def_stmt>test_create_replication_without_versioning <block_start>bucket_name=str(uuid4())<line_sep>s3=boto3.client("s3" region_name=DEFAULT_REGION_NAME)<line_sep>s3.create_bucket(Bucket=bucket_name)<with_stmt>pytest.raises(ClientError)<as>exc<block_start>s3.put_bucket_replication(Bucket=bucket_name ReplicationConfiguration={"Role":"myrole" "Rules":[{"Destination":{"Bucket":"secondbucket"} "Status":"Enabled"}] } )<block_end>err=exc.value.response["Error"]<line_sep>err["Code"].should.equal("InvalidRequest")<line_sep>err["Message"].should.equal("Versioning must be 'Enabled' on the bucket to apply a replication configuration")<line_sep>err["BucketName"].should.equal(bucket_name)<block_end>@mock_s3<def_stmt>test_create_and_retrieve_replication_with_single_rules <block_start>bucket_name=str(uuid4())<line_sep>s3=boto3.client("s3" region_name=DEFAULT_REGION_NAME)<line_sep>s3.create_bucket(Bucket=bucket_name)<line_sep>s3.put_bucket_versioning(Bucket=bucket_name VersioningConfiguration={"Status":"Enabled"})<line_sep>s3.put_bucket_replication(Bucket=bucket_name ReplicationConfiguration={"Role":"myrole" "Rules":[{"ID":"firstrule" "Priority":2 "Destination":{"Bucket":"secondbucket"} "Status":"Enabled" }] } )<line_sep>config=s3.get_bucket_replication(Bucket=bucket_name)["ReplicationConfiguration"]<line_sep>config.should.equal({"Role":"myrole" "Rules":[{"DeleteMarkerReplication":{"Status":"Disabled"} "Destination":{"Bucket":"secondbucket"} "Filter":{"Prefix":""} "ID":"firstrule" "Priority":2 "Status":"Enabled" }] })<line_sep>s3.delete_bucket_replication(Bucket=bucket_name)<line_sep># Can't retrieve replication that has been deleted
<with_stmt>pytest.raises(ClientError)<as>exc<block_start>s3.get_bucket_replication(Bucket=bucket_name)<block_end>err=exc.value.response["Error"]<line_sep>err["Code"].should.equal("ReplicationConfigurationNotFoundError")<line_sep>err["Message"].should.equal("The replication configuration was not found")<line_sep>err["BucketName"].should.equal(bucket_name)<block_end>@mock_s3<def_stmt>test_create_and_retrieve_replication_with_multiple_rules <block_start>bucket_name=str(uuid4())<line_sep>s3=boto3.client("s3" region_name=DEFAULT_REGION_NAME)<line_sep>s3.create_bucket(Bucket=bucket_name)<line_sep>s3.put_bucket_versioning(Bucket=bucket_name VersioningConfiguration={"Status":"Enabled"})<line_sep>s3.put_bucket_replication(Bucket=bucket_name ReplicationConfiguration={"Role":"myrole" "Rules":[{"Destination":{"Bucket":"secondbucket"} "Status":"Enabled"} {"ID":"secondrule" "Priority":2 "Destination":{"Bucket":"thirdbucket"} "Status":"Disabled" } ] } )<line_sep>config=s3.get_bucket_replication(Bucket=bucket_name)["ReplicationConfiguration"]<line_sep>config.should.have.key("Role").equal("myrole")<line_sep>rules=config["Rules"]<line_sep>rules.should.have.length_of(2)<line_sep>first_rule=rules[0]<line_sep>first_rule.should.have.key("ID")<line_sep>first_rule.should.have.key("Priority").equal(1)<line_sep>first_rule.should.have.key("Status").equal("Enabled")<line_sep>first_rule.should.have.key("Destination").equal({"Bucket":"secondbucket"})<line_sep>second=rules[1]<line_sep>second.should.have.key("ID").equal("secondrule")<line_sep>second.should.have.key("Priority").equal(2)<line_sep>second.should.have.key("Status").equal("Disabled")<line_sep>second.should.have.key("Destination").equal({"Bucket":"thirdbucket"})<block_end>
|
<import_from_stmt>django.test TestCase<import_from_stmt>django.utils timezone<import_from_stmt>..models BlogEntry Feed<class_stmt>BlogModelTest(TestCase)<block_start><def_stmt>test_blog_entry self<block_start>now=timezone.now()<line_sep>b=BlogEntry.objects.create(title='Test Entry' summary='Test Summary' pub_date=now url='http://www.revsys.com' feed=Feed.objects.create(name='psf blog' website_url='psf.example.org' feed_url='feed.psf.example.org' ))<line_sep>self.assertEqual(str(b) b.title)<line_sep>self.assertEqual(b.get_absolute_url() b.url)<block_end><block_end>
|
# Test values must be in the form [(text_input, expected_output), (text_input, expected_output), ...]
test_values=[("president" {"n":{"president" "presidentship" "presidencies" "presidency" "presidentships" "presidents" } "r":{"presidentially"} "a":{"presidential"} "v":{"presiding" "presides" "preside" "presided"} } ) ("elect" {"n":{"elector" "elects" "electors" "elective" "electorates" "elect" "electives" "elections" "electorate" "eligibility" "election" "eligibilities" } "r":set() "a":{"elect" "electoral" "elective" "eligible"} "v":{"elect" "elects" "electing" "elected"} } ) ("running" {"n":{"runninesses" "runnings" "runs" "running" "runniness" "runners" "runner" "run" } "a":{"running" "runny"} "v":{"running" "ran" "runs" "run"} "r":set() } ) ("run" {"n":{"runninesses" "runnings" "runs" "running" "runniness" "runners" "runner" "run" } "a":{"running" "runny"} "v":{"running" "ran" "runs" "run"} "r":set() } ) ("operations" {"n":{"operators" "operations" "operation" "operative" "operator" "operatives" } "a":{"operant" "operative"} "v":{"operated" "operating" "operate" "operates"} "r":{"operatively"} } ) ("operate" {"n":{"operators" "operations" "operation" "operative" "operator" "operatives" } "a":{"operant" "operative"} "v":{"operated" "operating" "operate" "operates"} "r":{"operatively"} } ) ("invest" {"n":{"investitures" "investors" "investiture" "investor" "investments" "investings" "investment" "investing" } "a":set() "v":{"invested" "invests" "invest" "investing"} "r":set() } ) ("investments" {"n":{"investitures" "investors" "investiture" "investor" "investments" "investings" "investment" "investing" } "a":set() "v":{"invested" "invests" "invest" "investing"} "r":set() } ) ("conjugation" {"n":{"conjugate" "conjugation" "conjugates" "conjugations"} "a":{"conjugate"} "v":{"conjugating" "conjugated" "conjugate" "conjugates"} "r":set() } ) ("do" {"n":{"does" "doer" "doers" "do"} "a":set() "v":{"doing" "don't" "does" "didn't" "do" "doesn't" "done" "did" } "r":set() } ) ("word" {"n":{"words" "word" "wordings" "wording"} "a":set() "v":{"words" "word" "worded" "wording"} "r":set() } ) ("love" {"a":{"lovable" "loveable"} "n":{"love" "lover" "lovers" "loves"} "r":set() "v":{"love" "loved" "loves" "loving"} } ) ("word" {"n":{"words" "word" "wordings" "wording"} "a":set() "v":{"words" "word" "worded" "wording"} "r":set() } ) ("verb" {"n":{"verbs" "verb"} "a":{"verbal"} "v":{"verbifying" "verbified" "verbify" "verbifies"} "r":{"verbally"} } ) ("genetic" {"n":{"geneticist" "genetics" "geneticists" "genes" "gene"} "a":{"genic" "genetic" "genetical"} "v":set() "r":{"genetically"} } ) ("politician" {"r":{"politically"} "a":{"political"} "n":{"politician" "politicians" "politics"} "v":set() } ) ("death" {"n":{"death" "dying" "deaths" "die" "dyings" "dice"} "a":{"dying" "deathly"} "v":{"died" "die" "dying" "dies"} "r":{"deathly"} } ) ("attitude" {"n":{"attitudes" "attitude"} "a":set() "v":{"attitudinise" "attitudinized" "attitudinize" "attitudinizes" "attitudinizing" } "r":set() } ) ("cheek" {"n":{"cheek" "cheekinesses" "cheeks" "cheekiness"} "a":{"cheeky"} "v":{"cheek" "cheeks" "cheeked" "cheeking"} "r":{"cheekily"} } ) ("world" {"n":{"worldliness" "world" "worldlinesses" "worlds"} "a":{"worldly" "world"} "v":set() "r":set() } ) ("lake" {"n":{"lake" "lakes"} "a":set() "v":set() "r":set()}) ("guitar" {"n":{"guitarist" "guitarists" "guitar" "guitars"} "a":set() "v":set() "r":set() } ) ("presence" {"n":{"presenter" "present" "presents" "presentness" "presenters" "presentnesses" "presentments" "presentations" "presences" "presence" "presentment" "presentation" } "a":{"present"} "v":{"present" "presents" "presenting" "presented"} "r":{"presently"} } ) ("enthusiasm" {"n":{"enthusiasm" "enthusiasms"} "a":{"enthusiastic"} "v":set() "r":{"enthusiastically"} } ) ("organization" {"n":{"organizers" "organization" "organizations" "organizer"} "a":set() "v":{"organize" "organized" "organizing" "organizes"} "r":set() } ) ("player" {"n":{"plays" "playlet" "playings" "players" "playing" "playlets" "play" "player" } "a":set() "v":{"plays" "play" "playing" "played"} "r":set() } ) ("transportation" {"n":{"transporters" "transportation" "transportations" "transporter" "transport" "transports" } "a":set() "v":{"transport" "transporting" "transports" "transported"} "r":set() } ) ("television" {"n":{"televisions" "television"} "a":set() "v":{"televising" "televise" "televises" "televised"} "r":set() } ) ("cousin" {"n":{"cousins" "cousin"} "a":{"cousinly"} "v":set() "r":set()} ) ("ability" {"n":{"abilities" "ability"} "a":{"able"} "v":set() "r":{"ably"}} ) ("chapter" {"n":{"chapters" "chapter"} "a":set() "v":set() "r":set()}) ("appearance" {"n":{"appearances" "apparitions" "appearance" "apparencies" "apparentness" "apparentnesses" "apparition" "apparency" } "a":{"apparent"} "v":{"appears" "appeared" "appear" "appearing"} "r":{"apparently"} } ) ("drawing" {"n":{"drawings" "drawers" "draws" "drawer" "drawees" "drawee" "draw" "drawing" } "a":set() "v":{"draws" "drew" "drawn" "draw" "drawing"} "r":set() } ) ("university" {"n":{"university" "universities"} "a":set() "v":set() "r":set()} ) ("performance" {"n":{"performings" "performing" "performances" "performance" "performer" "performers" } "a":set() "v":{"performs" "performing" "performed" "perform"} "r":set() } ) ("revenue" {"n":{"revenue" "revenues"} "a":set() "v":set() "r":set()}) # Some Verbs
("cling" {"n":{"cling" "clings"} "a":set() "v":{"clung" "cling" "clinging" "clings"} "r":set() } ) ("decrease" {"n":{"decrease" "decreases"} "a":set() "v":{"decrease" "decreases" "decreased" "decreasing"} "r":set() } ) ("wonder" {"n":{"wonder" "wonderment" "wonderments" "wonders" "wonderers" "wonderer" } "a":{"wondrous"} "v":{"wondering" "wonder" "wonders" "wondered"} "r":{"wondrous" "wondrously"} } ) ("rest" {"n":{"rest" "rests" "resters" "rester"} "a":set() "v":{"rest" "rests" "resting" "rested"} "r":set() } ) ("mutter" {"n":{"mutterer" "mutterers" "muttering" "mutter" "mutterings" "mutters" } "a":set() "v":{"muttering" "muttered" "mutters" "mutter"} "r":set() } ) ("implement" {"n":{"implementations" "implement" "implements" "implementation"} "a":{"implemental"} "v":{"implemented" "implement" "implements" "implementing"} "r":set() } ) ("evolve" {"n":{"evolution" "evolutions"} "a":{"evolutionary"} "v":{"evolved" "evolve" "evolves" "evolving"} "r":{"evolutionarily"} } ) ("allocate" {"n":{"allocations" "allocators" "allocation" "allocator"} "a":{"allocable" "allocatable"} "v":{"allocating" "allocates" "allocated" "allocate"} "r":set() } ) ("flood" {"n":{"flood" "flooding" "floodings" "floods"} "a":set() "v":{"flooding" "flooded" "flood" "floods"} "r":set() } ) # Should there be `flooded` in 'a' here?
("bow" {"n":{"bows" "bow"} "a":set() "v":{"bows" "bowing" "bowed" "bow"} "r":set() } ) ("advocate" {"n":{"advocates" "advocator" "advocacy" "advocacies" "advocators" "advocate" } "a":set() "v":{"advocates" "advocating" "advocated" "advocate"} "r":set() } ) ("divert" {"n":{"diversions" "diversionists" "diversionist" "diversion"} "a":{"diversionary"} "v":{"diverted" "diverts" "divert" "diverting"} "r":set() } ) # Some adjectives
("sweet" {"n":{"sweetnesses" "sweets" "sweetness" "sweet"} "a":{"sweet"} "v":set() "r":{"sweet" "sweetly"} } ) ("glossy" {"n":{"glossiness" "glossy" "glossies" "glossinesses"} "a":{"glossy"} "v":set() "r":{"glossily"} } ) ("relevant" {"n":{"relevancies" "relevance" "relevancy" "relevances"} "a":{"relevant"} "v":set() "r":{"relevantly"} } ) ("aloof" {"n":{"aloofnesses" "aloofness"} "a":{"aloof"} "v":set() "r":{"aloof"}} ) ("therapeutic" {"n":{"therapists" "therapies" "therapy" "therapist" "therapeutic" "therapeutics" } "a":{"therapeutical" "therapeutic"} "v":set() "r":{"therapeutically"} } ) ("obviously" {"n":{"obviousnesses" "obviousness"} "a":{"obvious"} "v":set() "r":{"obviously"} } ) ("jumpy" {"n":{"jumpings" "jumpiness" "jumpinesses" "jump" "jumping" "jumps"} "a":{"jumpy"} "v":{"jump" "jumping" "jumped" "jumps"} "r":set() } ) ("venomous" {"n":{"venom" "venoms"} "a":{"venomous"} "v":set() "r":{"venomously"}} ) ("laughable" {"n":{"laugher" "laughs" "laughers" "laugh"} "a":{"laughable"} "v":{"laughing" "laughs" "laughed" "laugh"} "r":{"laughably"} } ) ("demonic" {"n":{"demons" "demon" "demonizations" "demonization"} "a":{"demonic"} "v":{"demonized" "demonizing" "demonizes" "demonize"} "r":set() } ) ("knotty" {"n":{"knot" "knottiness" "knots" "knottinesses"} "a":{"knotty"} "v":{"knotted" "knotting" "knots" "knot"} "r":set() } ) # Is `knottinesses` a valid plural?
("little" {"n":{"little" "littlenesses" "littles" "littleness"} "a":{"little"} "v":set() "r":{"little"} } ) # Is `littlenesses` a valid plural?
("puzzling" {"n":{"puzzle" "puzzlers" "puzzler" "puzzlement" "puzzlements" "puzzles" } "a":{"puzzling"} "v":{"puzzle" "puzzled" "puzzles" "puzzling"} "r":set() } ) ("overrated" {"n":{"overratings" "overrating"} "a":set() "v":{"overrated" "overrating" "overrate" "overrates"} "r":set() } ) ("walk" {"n":{"walking" "walks" "walkings" "walker" "walk" "walkers"} "a":{"walking"} "v":{"walked" "walking" "walk" "walks"} "r":set() } ) ("walking" {"n":{"walking" "walks" "walkings" "walker" "walk" "walkers"} "a":{"walking"} "v":{"walked" "walking" "walk" "walks"} "r":set() } ) ("be" {"n":{"beings" "being"} "a":set() "v":{"wasn't" "being" "be" "are" "was" "am" "isn't" "is" "aren't" "been" "weren't" "were" "am not" } "r":set() } ) ("am" {"n":{"beings" "being"} "a":set() "v":{"wasn't" "being" "be" "are" "was" "am" "isn't" "is" "aren't" "been" "weren't" "were" "am not" } "r":set() } ) ("run" {"n":{"runnings" "run" "runninesses" "runner" "runniness" "running" "runs" "runners" } "a":{"running" "runny"} "v":{"running" "ran" "run" "runs"} "r":set() } ) ("ran" {"n":{"runnings" "run" "runninesses" "runner" "runniness" "running" "runs" "runners" } "a":{"running" "runny"} "v":{"running" "ran" "run" "runs"} "r":set() } ) ("blanket" {"n":{"blanket" "blankets"} "a":{"blanket"} "v":{"blankets" "blanketed" "blanketing" "blanket"} "r":set() } ) ]<line_sep>
|
"""
Copyright (c) 2017, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on May 20, 2017
@author: jrm
"""<import_from_stmt>atom.api Typed ForwardTyped Long Str Enum Bool observe set_default <import_from_stmt>datetime datetime<import_from_stmt>enaml.core.declarative d_<import_from_stmt>.text_view TextView ProxyTextView<class_stmt>ProxyChronometer(ProxyTextView)<block_start>""" The abstract definition of a proxy Chronometer object.
"""<line_sep>#: A reference to the Label declaration.
declaration=ForwardTyped(<lambda>:Chronometer)<def_stmt>set_base self base<block_start><raise>NotImplementedError<block_end><def_stmt>set_format self format<block_start><raise>NotImplementedError<block_end><def_stmt>set_direction self direction<block_start><raise>NotImplementedError<block_end><def_stmt>set_running self running<block_start><raise>NotImplementedError<block_end><def_stmt>set_mode self mode<block_start><raise>NotImplementedError<block_end><block_end><class_stmt>Chronometer(TextView)<block_start>""" A simple control for displaying read-only text.
"""<line_sep>#: Set the time that the count-up timer is in reference to.
base=d_(Typed(datetime factory=datetime.now))<line_sep>#: Tick counter
ticks=d_(Long() writable=<false>)<line_sep>#: Sets the format string used for display.
format=d_(Str())<line_sep>#: Counting direction
direction=d_(Enum('up' 'down'))<line_sep>#: Defines the behavior when restarting
#: If mode is resume it will continue otherwise
#: it will reset the count.
mode=d_(Enum('resume' 'reset' 'manual'))<line_sep>#: Start / stop the counter
running=d_(Bool())<line_sep>#: A reference to the ProxyLabel object.
proxy=Typed(ProxyChronometer)<line_sep>@observe('base' 'direction' 'format' 'running' 'mode')<def_stmt>_update_proxy self change<block_start>""" An observer which sends the state change to the proxy.
"""<line_sep># The superclass implementation is sufficient.
super(Chronometer self)._update_proxy(change)<block_end><block_end>
|
"""Get data into JVM for prediction and out again as Spark Dataframe"""<import_stmt>logging<line_sep>logger=logging.getLogger('nlu')<import_stmt>pyspark<import_from_stmt>pyspark.sql.functions monotonically_increasing_id<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>pyspark.sql.types StringType StructType StructField<class_stmt>DataConversionUtils()# Modin aswell but optional, so we dont import the type yet
<block_start>supported_types=[pyspark.sql.DataFrame pd.DataFrame pd.Series np.ndarray]<line_sep>@staticmethod<def_stmt>except_text_col_not_found cols<block_start>print(f'Could not find column named "text" in input Pandas Dataframe. Please ensure one column named such exists. Columns in DF are : {cols} ')<block_end>@staticmethod<def_stmt>sdf_to_sdf data spark_sess raw_text_column='text'<block_start>"""No casting, Spark to Spark. Just add index col"""<line_sep>output_datatype='spark'<line_sep>data=data.withColumn('origin_index' monotonically_increasing_id().alias('origin_index'))<line_sep>stranger_features=[]<if_stmt>raw_text_column<in>data.columns# store all stranger features
<block_start><if_stmt>len(data.columns)<g>1<block_start>stranger_features=list(set(data.columns)-set(raw_text_column))<block_end><else_stmt><block_start>DataConversionUtils.except_text_col_not_found(data.columns)<block_end><block_end><return>data stranger_features output_datatype<block_end>@staticmethod<def_stmt>pdf_to_sdf data spark_sess raw_text_column='text'<block_start>"""Casting pandas to spark and add index col"""<line_sep>output_datatype='pandas'<line_sep>stranger_features=[]<line_sep>sdf=<none><line_sep># set first col as text column if there is none
<if_stmt>raw_text_column<not><in>data.columns<block_start>data.rename(columns={data.columns[0]:'text'} inplace=<true>)<block_end>data['origin_index']=data.index<if_stmt>raw_text_column<in>data.columns<block_start><if_stmt>len(data.columns)<g>1# make Nans to None, or spark will crash
<block_start>data=data.where(pd.notnull(data) <none>)<line_sep>data=data.dropna(axis=1 how='all')<line_sep>stranger_features=list(set(data.columns)-set(raw_text_column))<block_end>sdf=spark_sess.createDataFrame(data)<block_end><else_stmt><block_start>DataConversionUtils.except_text_col_not_found(data.columns)<block_end><return>sdf stranger_features output_datatype<block_end>@staticmethod<def_stmt>pds_to_sdf data spark_sess raw_text_column='text'<block_start>"""Casting pandas series to spark and add index col. # for df['text'] colum/series passing casting follows pseries->pdf->spark->pd """<line_sep>output_datatype='pandas_series'<line_sep>sdf=<none><line_sep>schema=StructType([StructField(raw_text_column StringType() <true>)])<line_sep>data=pd.DataFrame(data).dropna(axis=1 how='all')<line_sep># If series from a column is passed, its column name will be reused.
<if_stmt>raw_text_column<not><in>data.columns<and>len(data.columns)<eq>1<block_start>data[raw_text_column]=data[data.columns[0]]<block_end><else_stmt><block_start>logger.info(f'INFO: NLU will assume {data.columns[0]} as label column since default text column could not be find')<line_sep>data[raw_text_column]=data[data.columns[0]]<block_end>data['origin_index']=data.index<if_stmt>raw_text_column<in>data.columns<block_start>sdf=spark_sess.createDataFrame(pd.DataFrame(data[raw_text_column]) schema=schema)<block_end><else_stmt><block_start>DataConversionUtils.except_text_col_not_found(data.columns)<block_end><if_stmt>'origin_index'<not><in>sdf.columns<block_start>sdf=sdf.withColumn('origin_index' monotonically_increasing_id().alias('origin_index'))<block_end><return>sdf [] output_datatype<block_end>@staticmethod<def_stmt>np_to_sdf data spark_sess raw_text_column='text'<block_start>"""Casting numpy array to spark and add index col. This is a bit inefficient. Casting follow np->pd->spark->pd. We could cut out the first pd step """<line_sep>output_datatype='numpy_array'<if_stmt>len(data.shape)<ne>1<block_start>ValueError(f"Exception : Input numpy array must be 1 Dimensional for prediction.. Input data shape is{data.shape}")<block_end>sdf=spark_sess.createDataFrame(pd.DataFrame({raw_text_column:data 'origin_index':list(range(len(data)))}))<line_sep><return>sdf [] output_datatype<block_end>@staticmethod<def_stmt>str_to_sdf data spark_sess raw_text_column='text'<block_start>"""Casting str to spark and add index col. This is a bit inefficient. Casting follow # inefficient, str->pd->spark->pd , we can could first pd"""<line_sep>output_datatype='string'<line_sep>sdf=spark_sess.createDataFrame(pd.DataFrame({raw_text_column:data 'origin_index':[0]} index=[0]))<line_sep><return>sdf [] output_datatype<block_end>@staticmethod<def_stmt>str_list_to_sdf data spark_sess raw_text_column='text'<block_start>"""Casting str list to spark and add index col. This is a bit inefficient. Casting follow # # inefficient, list->pd->spark->pd , we can could first pd"""<line_sep>output_datatype='string_list'<if_stmt>all(type(elem)<eq>str<for>elem data)<block_start>sdf=spark_sess.createDataFrame(pd.DataFrame({raw_text_column:pd.Series(data) 'origin_index':list(range(len(data)))}))<block_end><else_stmt><block_start>ValueError("Exception: Not all elements in input list are of type string.")<block_end><return>sdf [] output_datatype<block_end>@staticmethod<def_stmt>fallback_modin_to_sdf data spark_sess raw_text_column='text'<block_start>"""Casting potential Modin data to spark and add index col. # Modin tests, This could crash if Modin not installed """<line_sep>sdf=<none><line_sep>output_datatype=''<try_stmt><block_start><import_stmt>modin.pandas<as>mpd<if_stmt>isinstance(data mpd.DataFrame)<block_start>data=pd.DataFrame(data.to_dict())# create pandas to support type inference
output_datatype='modin'<line_sep>data['origin_index']=data.index<block_end><if_stmt>raw_text_column<in>data.columns<block_start><if_stmt>len(data.columns)<g>1<block_start>data=data.where(pd.notnull(data) <none>)# make Nans to None, or spark will crash
data=data.dropna(axis=1 how='all')<line_sep>stranger_features=list(set(data.columns)-set(raw_text_column))<block_end>sdf=spark_sess.createDataFrame(data)<block_end><else_stmt><block_start>DataConversionUtils.except_text_col_not_found(data.columns)<block_end><if_stmt>isinstance(data mpd.Series)<block_start>output_datatype='modin_series'<line_sep>data=pd.Series(data.to_dict())# create pandas to support type inference
data=pd.DataFrame(data).dropna(axis=1 how='all')<line_sep>data['origin_index']=data.index<line_sep>index_provided=<true><if_stmt>raw_text_column<in>data.columns<block_start>sdf=spark_sess.createDataFrame(data[['text']])<block_end><else_stmt><block_start>DataConversionUtils.except_text_col_not_found(data.columns)<block_end><block_end><block_end><except_stmt><block_start>print("If you use Modin, make sure you have installed 'pip install modin[ray]' or 'pip install modin[dask]' backend for Modin ")<block_end><return>sdf [] output_datatype<block_end>@staticmethod<def_stmt>to_spark_df data spark_sess raw_text_column='text'<block_start>"""Convert supported datatypes to SparkDF and extract extra data for prediction later on."""<try_stmt><block_start><if_stmt>isinstance(data pyspark.sql.dataframe.DataFrame)<block_start><return>DataConversionUtils.sdf_to_sdf(data spark_sess raw_text_column)<block_end><elif_stmt>isinstance(data pd.DataFrame)<block_start><return>DataConversionUtils.pdf_to_sdf(data spark_sess raw_text_column)<block_end><elif_stmt>isinstance(data pd.Series)<block_start><return>DataConversionUtils.pds_to_sdf(data spark_sess raw_text_column)<block_end><elif_stmt>isinstance(data np.ndarray)<block_start><return>DataConversionUtils.np_to_sdf(data spark_sess raw_text_column)<block_end><elif_stmt>isinstance(data str)<block_start><return>DataConversionUtils.str_to_sdf(data spark_sess raw_text_column)<block_end><elif_stmt>isinstance(data list)<block_start><return>DataConversionUtils.str_list_to_sdf(data spark_sess raw_text_column)<block_end><else_stmt><block_start><return>DataConversionUtils.fallback_modin_to_sdf(data spark_sess raw_text_column)<block_end><block_end><except_stmt><block_start>ValueError("Data could not be converted to Spark Dataframe for internal conversion.")<block_end><block_end><block_end>
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
输入数据形状是[N, C, H, W]时的batchnorm示例
'''<import_stmt>numpy<as>np<import_stmt>paddle<import_from_stmt>paddle.nn BatchNorm2D<line_sep># 设置随机数种子,这样可以保证每次运行结果一致
np.random.seed(100)<line_sep># 创建数据
data=np.random.rand(2 3 3 3).astype('float32')<line_sep># 使用BatchNorm2D计算归一化的输出
# 输入数据维度[N, C, H, W],num_features等于C
bn=BatchNorm2D(num_features=3)<line_sep>x=paddle.to_tensor(data)<line_sep>y=bn(x)<line_sep>print('input of BatchNorm2D Layer: \n {}'.format(x.numpy()))<line_sep>print('output of BatchNorm2D Layer: \n {}'.format(y.numpy()))<line_sep># 取出data中第0通道的数据,
# 使用numpy计算均值、方差及归一化的输出
a=data[: 0 : :]<line_sep>a_mean=a.mean()<line_sep>a_std=a.std()<line_sep>b=(a-a_mean)/a_std<line_sep>print('channel 0 of input data: \n {}'.format(a))<line_sep>print('std {}, mean {}, \n output: \n {}'.format(a_mean a_std b))<line_sep>
|
<import_stmt>functools<import_stmt>os<import_stmt>traceback<import_from_stmt>. graph misc system<def_stmt>rule func<block_start>"""Create a new rule. Calling it will be spawning a task.
This function should be used as a decorator. The passed function
must be a generator which follows the protocol.
"""<line_sep><return>functools.partial(graph.spawn_task func)<block_end><def_stmt>task func<block_start><return>graph.spawn_task(func)<block_end><def_stmt>publish inputs=<none> message=<none> outputs=<none> check=<none> force=<false> result=<none> phony=<false><block_start>"""Inform the system about the task."""<if_stmt>inputs<is><none><block_start>inputs=set()<block_end><elif_stmt>isinstance(inputs str)<block_start><raise>TypeError('Inputs is a string\n\n'<concat>'Rules must publish inputs in the form of an iterable. Wrap the '<concat>'string in a list to resolve this issue.')<block_end><else_stmt><block_start>inputs=set(map(os.path.abspath inputs))<for_stmt>input inputs<block_start><if_stmt><not>os.path.isfile(input)<and><not>graph.has_file(input)<block_start><raise>FileNotFoundError(input)<block_end><block_end><block_end><if_stmt><not>isinstance(message str)<block_start><if_stmt>message<is><none><block_start><raise>ValueError('Publication did not include a message\n\n'<concat>'Every rule must publish a message, even phony ones.')<block_end><else_stmt><block_start><raise>TypeError('Supplied message is not a string\n\n'<concat>'Every rule must publish a message in form of a string. No '<concat>'implicit conversion is done.')<block_end><block_end><if_stmt><not>outputs<block_start><raise>ValueError('Rule did not declare any outputs\n\n'<concat>'Every rule, including phony ones, must have at least 1 output.')<block_end><elif_stmt>isinstance(outputs str)<block_start><raise>TypeError('Outputs is a string\n\n'<concat>'Rules must publish outputs in the form of an iterable. Wrap the '<concat>'string in a list to resolve this issue.')<block_end><else_stmt><block_start>outputs=set(map(os.path.abspath outputs))<for_stmt>output outputs<block_start><if_stmt>graph.has_file(output)<block_start><raise>ValueError('output collision')<block_end># elif not phony and not misc.is_inside(output, system.build('.')):
# raise ValueError('output outside of build directory')
<block_end><block_end><if_stmt><not>isinstance(result dict)<block_start><if_stmt>result<is><none><block_start>result={}<block_end><else_stmt><block_start><raise>TypeError('result must be of type dict')<block_end><block_end><elif_stmt>'outputs'<in>result<block_start><raise>ValueError('outputs is reserved')<block_end><elif_stmt>'inputs'<in>result<block_start><raise>ValueError('inputs is reserved')<block_end>result['outputs']=outputs<line_sep>result['inputs']=inputs<if_stmt>len(outputs)<eq>1<and>'output'<not><in>result<block_start>[output]=outputs<line_sep>result['output']=output<block_end>in_files=set()<for_stmt>input inputs<block_start>file=graph.get_file(input)<if_stmt>file.producer<is><none><block_start>file.stat_if_necessary()<if_stmt><not>file.exists<block_start><raise>FileNotFoundError(file.path)<block_end><block_end>in_files.add(file)<block_end>out_files=set()<for_stmt>output outputs<block_start>file=graph.new_file(output)<line_sep>out_files.add(file)<block_end><if_stmt><not>isinstance(phony bool)<block_start><raise>TypeError('phony must be a boolean')<block_end>stack=traceback.extract_stack()[:-3]<line_sep><return>in_files message out_files check force result phony stack<block_end><def_stmt>deposit inputs=() warnings=<none><block_start>"""Inform the system of additional inputs after execution."""<if_stmt>isinstance(inputs str)<block_start><raise>TypeError('inputs must not be string')<block_end><else_stmt><block_start>deposits={os.path.abspath(path)<for>path inputs}<for_stmt>path deposits<block_start><if_stmt><not>os.path.isfile(path)<block_start><raise>FileNotFoundError(path)<block_end><elif_stmt>misc.is_inside(path system.build('.'))<block_start><raise>ValueError('deposit inside build directory')<block_end><block_end><block_end><if_stmt>warnings<is><not><none><block_start>warnings=warnings.strip()<block_end><return>deposits warnings<block_end>
|
# encoding: utf-8
"""
line/watchdog.py
Created by <NAME> on 2017-07-01.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""<import_from_stmt>exabgp.reactor.api.command.command Command<import_from_stmt>exabgp.reactor.api.command.limit match_neighbors<import_from_stmt>exabgp.reactor.api.command.limit extract_neighbors<import_from_stmt>exabgp.protocol.ip NoNextHop<import_from_stmt>exabgp.bgp.message OUT<import_from_stmt>exabgp.bgp.message.update.attribute NextHop<import_from_stmt>exabgp.configuration.static ParseStaticRoute<def_stmt>register_announce <block_start><pass><block_end>@Command.register('text' 'announce route')<def_stmt>announce_route self reactor service line<block_start><def_stmt>callback <block_start><try_stmt><block_start>descriptions,command=extract_neighbors(line)<line_sep>peers=match_neighbors(reactor.peers() descriptions)<if_stmt><not>peers<block_start>self.log_failure('no neighbor matching the command : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end>changes=self.api_route(command)<if_stmt><not>changes<block_start>self.log_failure('command could not parse route in : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end><for_stmt>change changes<block_start><if_stmt><not>ParseStaticRoute.check(change)<block_start>self.log_message('invalid route for %s : %s'%(', '.join(peers)<if>peers<else>'all peers' change.extensive()))<line_sep><continue><block_end>change.nlri.action=OUT.ANNOUNCE<line_sep>reactor.configuration.inject_change(peers change)<line_sep>self.log_message('route added to %s : %s'%(', '.join(peers)<if>peers<else>'all peers' change.extensive()))<line_sep><yield><false><block_end>reactor.processes.answer_done(service)<block_end><except_stmt>ValueError<block_start>self.log_failure('issue parsing the route')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><except_stmt>IndexError<block_start>self.log_failure('issue parsing the route')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><block_end>reactor.asynchronous.schedule(service line callback())<line_sep><return><true><block_end>@Command.register('text' 'withdraw route')<def_stmt>withdraw_route self reactor service line<block_start><def_stmt>callback <block_start><try_stmt><block_start>descriptions,command=extract_neighbors(line)<line_sep>peers=match_neighbors(reactor.peers() descriptions)<if_stmt><not>peers<block_start>self.log_failure('no neighbor matching the command : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end>changes=self.api_route(command)<if_stmt><not>changes<block_start>self.log_failure('command could not parse route in : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end><for_stmt>change changes# Change the action to withdraw before checking the route
<block_start>change.nlri.action=OUT.WITHDRAW<line_sep># NextHop is a mandatory field (but we do not require in)
<if_stmt>change.nlri.nexthop<is>NoNextHop<block_start>change.nlri.nexthop=NextHop('0.0.0.0')<block_end><if_stmt><not>ParseStaticRoute.check(change)<block_start>self.log_message('invalid route for %s : %s'%(', '.join(peers)<if>peers<else>'all peers' change.extensive()))<line_sep><continue><block_end><if_stmt>reactor.configuration.inject_change(peers change)<block_start>self.log_message('route removed from %s : %s'%(', '.join(peers)<if>peers<else>'all peers' change.extensive()))<line_sep><yield><false><block_end><else_stmt><block_start>self.log_failure('route not found on %s : %s'%(', '.join(peers)<if>peers<else>'all peers' change.extensive()))<line_sep><yield><false><block_end><block_end>reactor.processes.answer_done(service)<block_end><except_stmt>ValueError<block_start>self.log_failure('issue parsing the route')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><except_stmt>IndexError<block_start>self.log_failure('issue parsing the route')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><block_end>reactor.asynchronous.schedule(service line callback())<line_sep><return><true><block_end>@Command.register('text' 'announce vpls')<def_stmt>announce_vpls self reactor service line<block_start><def_stmt>callback <block_start><try_stmt><block_start>descriptions,command=extract_neighbors(line)<line_sep>peers=match_neighbors(reactor.peers() descriptions)<if_stmt><not>peers<block_start>self.log_failure('no neighbor matching the command : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end>changes=self.api_vpls(command)<if_stmt><not>changes<block_start>self.log_failure('command could not parse vpls in : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end><for_stmt>change changes<block_start>change.nlri.action=OUT.ANNOUNCE<line_sep>reactor.configuration.inject_change(peers change)<line_sep>self.log_message('vpls added to %s : %s'%(', '.join(peers)<if>peers<else>'all peers' change.extensive()))<line_sep><yield><false><block_end>reactor.processes.answer_done(service)<block_end><except_stmt>ValueError<block_start>self.log_failure('issue parsing the vpls')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><except_stmt>IndexError<block_start>self.log_failure('issue parsing the vpls')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><block_end>reactor.asynchronous.schedule(service line callback())<line_sep><return><true><block_end>@Command.register('text' 'withdraw vpls')<def_stmt>withdraw_vpls self reactor service line<block_start><def_stmt>callback <block_start><try_stmt><block_start>descriptions,command=extract_neighbors(line)<line_sep>peers=match_neighbors(reactor.peers() descriptions)<if_stmt><not>peers<block_start>self.log_failure('no neighbor matching the command : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end>changes=self.api_vpls(command)<if_stmt><not>changes<block_start>self.log_failure('command could not parse vpls in : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end><for_stmt>change changes<block_start>change.nlri.action=OUT.WITHDRAW<if_stmt>reactor.configuration.inject_change(peers change)<block_start>self.log_message('vpls removed from %s : %s'%(', '.join(peers)<if>peers<else>'all peers' change.extensive()))<line_sep><yield><false><block_end><else_stmt><block_start>self.log_failure('vpls not found on %s : %s'%(', '.join(peers)<if>peers<else>'all peers' change.extensive()))<line_sep><yield><false><block_end><block_end>reactor.processes.answer_done(service)<block_end><except_stmt>ValueError<block_start>self.log_failure('issue parsing the vpls')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><except_stmt>IndexError<block_start>self.log_failure('issue parsing the vpls')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><block_end>reactor.asynchronous.schedule(service line callback())<line_sep><return><true><block_end>@Command.register('text' 'announce attribute')@Command.register('text' 'announce attributes')<def_stmt>announce_attributes self reactor service line<block_start><def_stmt>callback <block_start><try_stmt><block_start>descriptions,command=extract_neighbors(line)<line_sep>peers=match_neighbors(reactor.peers() descriptions)<if_stmt><not>peers<block_start>self.log_failure('no neighbor matching the command : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end>changes=self.api_attributes(command peers)<if_stmt><not>changes<block_start>self.log_failure('command could not parse route in : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end><for_stmt>change changes<block_start>change.nlri.action=OUT.ANNOUNCE<line_sep>reactor.configuration.inject_change(peers change)<line_sep>self.log_message('route added to %s : %s'%(', '.join(peers)<if>peers<else>'all peers' change.extensive()))<line_sep><yield><false><block_end>reactor.processes.answer_done(service)<block_end><except_stmt>ValueError<block_start>self.log_failure('issue parsing the route')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><except_stmt>IndexError<block_start>self.log_failure('issue parsing the route')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><block_end>reactor.asynchronous.schedule(service line callback())<line_sep><return><true><block_end>@Command.register('text' 'withdraw attributes')<def_stmt>withdraw_attribute self reactor service line<block_start><def_stmt>callback <block_start><try_stmt><block_start>descriptions,command=extract_neighbors(line)<line_sep>peers=match_neighbors(reactor.peers() descriptions)<if_stmt><not>peers<block_start>self.log_failure('no neighbor matching the command : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end>changes=self.api_attributes(command peers)<if_stmt><not>changes<block_start>self.log_failure('command could not parse route in : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end><for_stmt>change changes<block_start>change.nlri.action=OUT.WITHDRAW<if_stmt>reactor.configuration.inject_change(peers change)<block_start>self.log_message('route removed from %s : %s'%(', '.join(peers)<if>peers<else>'all peers' change.extensive()))<line_sep><yield><false><block_end><else_stmt><block_start>self.log_failure('route not found on %s : %s'%(', '.join(peers)<if>peers<else>'all peers' change.extensive()))<line_sep><yield><false><block_end><block_end>reactor.processes.answer_done(service)<block_end><except_stmt>ValueError<block_start>self.log_failure('issue parsing the route')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><except_stmt>IndexError<block_start>self.log_failure('issue parsing the route')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><block_end>reactor.asynchronous.schedule(service line callback())<line_sep><return><true><block_end>@Command.register('text' 'announce flow')<def_stmt>announce_flow self reactor service line<block_start><def_stmt>callback <block_start><try_stmt><block_start>descriptions,command=extract_neighbors(line)<line_sep>peers=match_neighbors(reactor.peers() descriptions)<if_stmt><not>peers<block_start>self.log_failure('no neighbor matching the command : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end>changes=self.api_flow(command)<if_stmt><not>changes<block_start>self.log_failure('command could not parse flow in : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end><for_stmt>change changes<block_start>change.nlri.action=OUT.ANNOUNCE<line_sep>reactor.configuration.inject_change(peers change)<line_sep>self.log_message('flow added to %s : %s'%(', '.join(peers)<if>peers<else>'all peers' change.extensive()))<line_sep><yield><false><block_end>reactor.processes.answer_done(service)<block_end><except_stmt>ValueError<block_start>self.log_failure('issue parsing the flow')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><except_stmt>IndexError<block_start>self.log_failure('issue parsing the flow')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><block_end>reactor.asynchronous.schedule(service line callback())<line_sep><return><true><block_end>@Command.register('text' 'withdraw flow')<def_stmt>withdraw_flow self reactor service line<block_start><def_stmt>callback <block_start><try_stmt><block_start>descriptions,command=extract_neighbors(line)<line_sep>peers=match_neighbors(reactor.peers() descriptions)<if_stmt><not>peers<block_start>self.log_failure('no neighbor matching the command : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end>changes=self.api_flow(command)<if_stmt><not>changes<block_start>self.log_failure('command could not parse flow in : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end><for_stmt>change changes<block_start>change.nlri.action=OUT.WITHDRAW<if_stmt>reactor.configuration.inject_change(peers change)<block_start>self.log_message('flow removed from %s : %s'%(', '.join(peers)<if>peers<else>'all peers' change.extensive()))<block_end><else_stmt><block_start>self.log_failure('flow not found on %s : %s'%(', '.join(peers)<if>peers<else>'all peers' change.extensive()))<block_end><yield><false><block_end>reactor.processes.answer_done(service)<block_end><except_stmt>ValueError<block_start>self.log_failure('issue parsing the flow')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><except_stmt>IndexError<block_start>self.log_failure('issue parsing the flow')<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><block_end><block_end>reactor.asynchronous.schedule(service line callback())<line_sep><return><true><block_end>@Command.register('text' 'announce eor')<def_stmt>announce_eor self reactor service command<block_start><def_stmt>callback self command peers<block_start>family=self.api_eor(command)<if_stmt><not>family<block_start>self.log_failure("Command could not parse eor : %s"%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end>reactor.configuration.inject_eor(peers family)<line_sep>self.log_message("Sent to %s : %s"%(', '.join(peers<if>peers<else>[])<if>peers<is><not><none><else>'all peers' family.extensive()))<line_sep><yield><false><line_sep>reactor.processes.answer_done(service)<block_end><try_stmt><block_start>descriptions,command=extract_neighbors(command)<line_sep>peers=match_neighbors(reactor.established_peers() descriptions)<if_stmt><not>peers<block_start>self.log_failure('no neighbor matching the command : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><return><false><block_end>reactor.asynchronous.schedule(service command callback(self command peers))<line_sep><return><true><block_end><except_stmt>ValueError<block_start>self.log_failure('issue parsing the command')<line_sep>reactor.processes.answer_error(service)<line_sep><return><false><block_end><except_stmt>IndexError<block_start>self.log_failure('issue parsing the command')<line_sep>reactor.processes.answer_error(service)<line_sep><return><false><block_end><block_end>@Command.register('text' 'announce route-refresh')<def_stmt>announce_refresh self reactor service command<block_start><def_stmt>callback self command peers<block_start>refreshes=self.api_refresh(command)<if_stmt><not>refreshes<block_start>self.log_failure("Command could not parse route-refresh command : %s"%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end>reactor.configuration.inject_refresh(peers refreshes)<for_stmt>refresh refreshes<block_start>self.log_message("Sent to %s : %s"%(', '.join(peers<if>peers<else>[])<if>peers<is><not><none><else>'all peers' refresh.extensive()))<block_end><yield><false><line_sep>reactor.processes.answer_done(service)<block_end><try_stmt><block_start>descriptions,command=extract_neighbors(command)<line_sep>peers=match_neighbors(reactor.established_peers() descriptions)<if_stmt><not>peers<block_start>self.log_failure('no neighbor matching the command : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><return><false><block_end>reactor.asynchronous.schedule(service command callback(self command peers))<line_sep><return><true><block_end><except_stmt>ValueError<block_start>self.log_failure('issue parsing the command')<line_sep>reactor.processes.answer_error(service)<line_sep><return><false><block_end><except_stmt>IndexError<block_start>self.log_failure('issue parsing the command')<line_sep>reactor.processes.answer_error(service)<line_sep><return><false><block_end><block_end>@Command.register('text' 'announce operational')<def_stmt>announce_operational self reactor service command<block_start><def_stmt>callback self command peers<block_start>operational=self.api_operational(command)<if_stmt><not>operational<block_start>self.log_failure("Command could not parse operational command : %s"%command)<line_sep>reactor.processes.answer_error(service)<line_sep><yield><true><line_sep><return><block_end>reactor.configuration.inject_operational(peers operational)<line_sep>self.log_message("operational message sent to %s : %s"%(', '.join(peers<if>peers<else>[])<if>peers<is><not><none><else>'all peers' operational.extensive()))<line_sep><yield><false><line_sep>reactor.processes.answer_done(service)<block_end><if_stmt>(command.split()+['be' 'safe'])[2].lower()<not><in>('asm' 'adm' 'rpcq' 'rpcp' 'apcq' 'apcp' 'lpcq' 'lpcp' )<block_start>reactor.processes.answer_done(service)<line_sep><return><false><block_end><try_stmt><block_start>descriptions,command=extract_neighbors(command)<line_sep>peers=match_neighbors(reactor.peers() descriptions)<if_stmt><not>peers<block_start>self.log_failure('no neighbor matching the command : %s'%command)<line_sep>reactor.processes.answer_error(service)<line_sep><return><false><block_end>reactor.asynchronous.schedule(service command callback(self command peers))<line_sep><return><true><block_end><except_stmt>ValueError<block_start>self.log_failure('issue parsing the command')<line_sep>reactor.processes.answer_error(service)<line_sep><return><false><block_end><except_stmt>IndexError<block_start>self.log_failure('issue parsing the command')<line_sep>reactor.processes.answer_error(service)<line_sep><return><false><block_end><block_end>
|
#coding=utf-8
<import_stmt>os<import_stmt>signal<import_stmt>logging<import_from_stmt>ProcessHandler.lib.utils close_on_exec<import_from_stmt>ProcessHandler.lib.sock create_sockets<import_from_stmt>ProcessHandler.lib.utils _setproctitle reopen_log_file<line_sep>MAXSIZE=(1<lshift>31)-1<class_stmt>Worker(object)<block_start>SIGNALS=[getattr(signal "SIG%s"%x)<for>x "HUP QUIT INT TERM USR1 USR2 WINCH CHLD".split()]<line_sep>SOCK_BACKLOG=20<def_stmt>__init__ self cfg file_logger ppid sockets=<none><block_start>self.cfg=cfg<line_sep>self.file_logger=file_logger<or>logging.getLogger()<line_sep>self.ppid=ppid<line_sep>self.LISTENERS=sockets<line_sep>self.alive=<true><line_sep>self.booted=<false><line_sep>self.worker_name="worker: %s"%cfg.proc_name<line_sep>self.nr=0# actual handle request count
self.max_requests=int(self.cfg.max_requests<or>MAXSIZE)<line_sep>self.rd_fds=<none><block_end>@property<def_stmt>pid self<block_start><return>os.getpid()<block_end><def_stmt>__str__ self<block_start><return>"<Worker %s>"%self.pid<block_end><def_stmt>init_signals self<block_start>[signal.signal(s signal.SIG_DFL)<for>s self.SIGNALS]<line_sep># init new signaling
signal.signal(signal.SIGQUIT self.handle_quit)<line_sep>signal.signal(signal.SIGTERM self.handle_exit)<line_sep>signal.signal(signal.SIGINT self.handle_exit)<line_sep>signal.signal(signal.SIGWINCH self.handle_winch)<line_sep>signal.signal(signal.SIGUSR1 self.handle_usr1)<line_sep># Don't let SIGQUIT and SIGUSR1 disturb active requests
# by interrupting system calls
<if_stmt>hasattr(signal 'siginterrupt')# python >= 2.6
<block_start>signal.siginterrupt(signal.SIGQUIT <false>)<line_sep>signal.siginterrupt(signal.SIGUSR1 <false>)<block_end><block_end><def_stmt>setup self<block_start><if_stmt>self.cfg.bind<block_start>binds=[]<for_stmt>b self.cfg.bind.split(',')<block_start>addr=b.strip().split(':')<line_sep>binds.append((addr[0] int(addr[1])))<block_end>self.bind=binds<line_sep># self.bind = [tuple(b.strip().split(":")) for b in self.cfg.bind.split(',')] # bind address comma separate
<block_end><else_stmt><block_start>self.bind=<none><block_end>self.unix_socket=self.cfg.unix_socket<block_end><def_stmt>init_process self<block_start>self.setup()<line_sep>#set proc name
_setproctitle(self.worker_name)<line_sep>self.init_signals()<line_sep># bind ip and port if needed
<if_stmt><not>self.LISTENERS<and>(self.bind<or>self.unix_socket)<block_start>self.file_logger.info("Listern on %s, unixdomian:%s" self.cfg.bind<or>"" self.cfg.unix_socket<or>"")<line_sep>self.LISTENERS=create_sockets(self.bind self.unix_socket self.SOCK_BACKLOG)<block_end><if_stmt>self.LISTENERS<block_start><for_stmt>s self.LISTENERS<block_start>close_on_exec(s)<line_sep>s.setblocking(0)<block_end>self.rd_fds=list(self.LISTENERS)<block_end><else_stmt><block_start>self.rd_fds=<none><block_end># enter main loop
self.booted=<true><block_end><def_stmt>run self<block_start>self.init_process()<block_end><def_stmt>handle_request self sock=<none> client=<none> addr=<none><block_start><raise>NotImplementedError()<block_end><def_stmt>stop self<block_start>self.alive=<false><if_stmt>self.LISTENERS<block_start><for_stmt>l self.LISTENERS<block_start>l.close()<block_end><block_end><block_end><def_stmt>handle_quit self sig frame<block_start>self.stop()<block_end><def_stmt>handle_exit self sig frame<block_start>self.alive=<false><line_sep>os._exit(0)<block_end><def_stmt>handle_winch self sig frame<block_start><return><block_end><def_stmt>handle_usr1 self sig frame<block_start>reopen_log_file(self.file_logger)<block_end><def_stmt>handle_usr2 self sig frame<block_start><pass><line_sep>"""
fds = [l.fileno() for l in self.LISTENERS]
os.environ['OPEN_FD'] = ",".join([str(fd) for fd in fds])
"""<block_end>"""
def reload(self):
# config listeners
old_bind = self.bind
old_sock = self.unix_socket
old_port = self.port
if self.port != old_port or self.bind != old_bind or self.unix_socket != old_sock: # ugly
[sock.close() for sock in self.LISTENERS]
self.LISTENERS = create_sockets(self.bind, self.port, self.unix_socket, self.backlog or 20)
"""<block_end>
|
<import_stmt>os<import_from_stmt>examples acquire_token_by_client_credentials test_user_principal_name<import_from_stmt>office365.graph_client GraphClient<line_sep>client=GraphClient(acquire_token_by_client_credentials)<line_sep>target_drive=client.users[test_user_principal_name].drive<line_sep>local_path="../../tests/data/SharePoint User Guide.docx"<with_stmt>open(local_path 'rb')<as>f<block_start>file_content=f.read()<block_end>file_name=os.path.basename(local_path)<line_sep>target_file=target_drive.root.upload(file_name file_content).execute_query()<line_sep>print(f"File {target_file.web_url} has been uploaded")<line_sep>
|
<import_from_stmt>veros.diagnostics.api create_default_diagnostics initialize diagnose output# noqa: F401
|
__________________________________________________________________________________________________<line_sep>56ms<class_stmt>Solution<block_start><def_stmt>intToRoman self num:int<arrow>str<block_start>res=''<line_sep><return>self.toPartRom(num<floordiv>1000 'M--')+self.toPartRom((num%1000)<floordiv>100 'CDM')+self.toPartRom((num%100)<floordiv>10 'XLC')+self.toPartRom(num%10 'IVX')<block_end><def_stmt>toPartRom self n:int subs:str<arrow>str<block_start><if_stmt>n<eq>0<block_start><return>''<block_end><if_stmt>n<le>3<block_start><return>subs[0]<times>n<block_end><if_stmt>n<eq>4<block_start><return>subs[0]+subs[1]<block_end><if_stmt>n<le>8<block_start><return>subs[1]+subs[0]<times>(n-5)<block_end><if_stmt>n<eq>9<block_start><return>subs[0]+subs[2]<block_end><block_end><block_end>__________________________________________________________________________________________________<line_sep>60ms<class_stmt>Solution<block_start><def_stmt>intToRoman self num:int<arrow>str<block_start>memo={1000:'M' 500:'D' 100:'C' 50:'L' 10:'X' 5:'V' 1:'I'}<line_sep>res=[]<for_stmt>it [1000 100 10 1]##
<block_start>a=num<floordiv>it<line_sep>num=num%it<if_stmt>a<eq>9<block_start>res.append(memo[it])<line_sep>res.append(memo[it<times>10])<block_end><elif_stmt>a<eq>4<block_start>res.append(memo[it])<line_sep>res.append(memo[it<times>5])<block_end><elif_stmt>a<eq>5<block_start>res.append(memo[it<times>5])<block_end><elif_stmt>a<l>4<block_start>res<augadd>[memo[it]]<times>a<block_end><else_stmt><block_start>res.append(memo[it<times>5])<line_sep>res<augadd>[memo[it]]<times>(a-5)<block_end><block_end><return>''.join(res)<block_end><block_end>__________________________________________________________________________________________________<line_sep>64ms<class_stmt>Solution<block_start><def_stmt>intToRoman self num:int<arrow>str#table = {"IV" : 4, "IX" : 9, "XL" : 40, "XC" : 90, "CD" : 400, "CM" : 900, "I" : 1, "V" : 5, "X" : 10, "L" : 50, "C" : 100, "D" : 500, "M" : 1000}
<block_start>table={1:"I" 5:"V" 10:"X" 50:"L" 100:"C" 500:"D" 1000:"M" 4:"IV" 9:"IX" 40:"XL" 90:"XC" 400:"CD" 900:"CM"}<line_sep>vals=[1000 900 500 400 100 90 50 40 10 9 5 4 1]<line_sep>solution=[]<for_stmt>v vals<block_start><while_stmt>num-v<ge>0<block_start>num<augsub>v<line_sep>solution.append(table[v])<block_end><block_end><return>''.join(solution)<block_end><block_end>__________________________________________________________________________________________________<line_sep>12352 kb<class_stmt>Solution<block_start><def_stmt>intToRoman self num:'int'<arrow>'str'<block_start>values=[1000 900 500 400 100 90 50 40 10 9 5 4 1]<line_sep>symbols=['M' 'CM' 'D' 'CD' 'C' 'XC' 'L' 'XL' 'X' 'IX' 'V' 'IV' 'I']<line_sep>result=''<for_stmt>symbol,value zip(symbols values)<block_start>result<augadd>symbol<times>(num<floordiv>value)<line_sep>num<augmod>value<block_end><return>result<block_end><block_end>__________________________________________________________________________________________________<line_sep>12392 kb<class_stmt>Solution<block_start><def_stmt>intToRoman self num:'int'<arrow>'str'<block_start>result=[]<line_sep>roman={0:'' 1:'I' 2:'II' 3:'III' 4:'IV' 5:'V' 6:'VI' 7:'VII' 8:'VIII' 9:'IX' 10:'X' 20:'XX' 30:'XXX' 40:'XL' 50:'L' 60:'LX' 70:'LXX' 80:'LXXX' 90:'XC' 100:'C' 200:'CC' 300:'CCC' 400:'CD' 500:'D' 600:'DC' 700:'DCC' 800:'DCCC' 900:'CM' 1000:'M' 2000:'MM' 3000:'MMM'}<line_sep>thousands=num<floordiv>1000<times>1000<line_sep>hundreds=(num-thousands)<floordiv>100<times>100<line_sep>tens=(num-thousands-hundreds)<floordiv>10<times>10<line_sep>ones=(num-thousands-hundreds-tens)<line_sep>print("thousands: {}".format(thousands) "hundreds: {}".format(hundreds) "tens: {}".format(tens) "ones: {}".format(ones) sep='\n')<line_sep>result<augadd>(roman[thousands]+roman[hundreds]+roman[tens]+roman[ones])<line_sep><return>''.join(result)<block_end><block_end>__________________________________________________________________________________________________<line_sep>
|
# Generated by Django 2.1.4 on 2019-01-28 07:15
<import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('api' '0001_initial') ]<line_sep>operations=[migrations.RenameModel(old_name='W12scan_domains' new_name='domains' ) migrations.RenameModel(old_name='W12scan_ips' new_name='ips' ) migrations.RenameModel(old_name='W12scan_properly' new_name='properly' ) ]<block_end>
|
<import_stmt>tensorflow<as>tf<import_from_stmt>detext.layers.embedding_layer create_embedding_layer<import_from_stmt>detext.utils.parsing_utils InputFtrType InternalFtrType<line_sep>DEFAULT_MIN_LEN=1<line_sep>DEFAULT_MAX_LEN=100<class_stmt>IdEmbedLayer(tf.keras.layers.Layer)<block_start>""" ID embedding layer"""<def_stmt>__init__ self num_id_fields embedding_layer_param embedding_hub_url_for_id_ftr<block_start>""" Initializes the layer
For more details on parameters, check args.py
"""<line_sep>super(IdEmbedLayer self).__init__()<line_sep>self._num_id_fields=num_id_fields<line_sep>self.min_len=DEFAULT_MIN_LEN<line_sep>self.max_len=DEFAULT_MAX_LEN<line_sep>self.num_cls_sep=0<if_stmt>num_id_fields<block_start>self.embedding=create_embedding_layer(embedding_layer_param embedding_hub_url_for_id_ftr)<line_sep>self.id_ftr_size=self.embedding.num_units()<block_end><block_end><def_stmt>call self inputs **kwargs<block_start>""" Applies ID embedding lookup and summation on document and user fields
:param inputs: Dict A mapping that contains the following key:
doc_id_fields: list(Tensor(dtype=string)) List of document fields. Each has shape=[batch_size, max_group_size]
user_id_fields: list(Tensor(dtype=string)) List of user fields. Each has shape=[batch_size]
:return: doc_ftrs, user_ftrs
"""<line_sep>doc_id_fields=inputs.get(InputFtrType.DOC_ID_COLUMN_NAMES <none>)<line_sep>user_id_fields=inputs.get(InputFtrType.USER_ID_COLUMN_NAMES <none>)<if_stmt>self._num_id_fields<eq>0<block_start><assert_stmt>doc_id_fields<is><none><and>user_id_fields<is><none> "Document ID fields and user ID fields must be None when there's no id field"<block_end>user_ftrs=self.apply_embed_on_user_id(user_id_fields)<if>user_id_fields<is><not><none><else><none><line_sep>doc_ftrs=self.apply_embed_on_doc_id(doc_id_fields)<if>doc_id_fields<is><not><none><else><none><line_sep><return>doc_ftrs user_ftrs<block_end><def_stmt>apply_embedding self inputs<block_start>"""Applies embedding on give inputs
:param inputs Tensor(dtype=string) Shape=[batch_size]
:return Tensor(dtype=string) Shape=[batch_size, sentence_len, num_units_for_id_ftr]
"""<line_sep>embedding_result=self.embedding({InternalFtrType.SENTENCES:inputs InternalFtrType.NUM_CLS:self.num_cls_sep InternalFtrType.NUM_SEP:self.num_cls_sep InternalFtrType.MIN_LEN:self.min_len InternalFtrType.MAX_LEN:self.max_len })<line_sep>seq_length=embedding_result[InternalFtrType.LENGTH]<line_sep>max_seq_len=tf.math.reduce_max(seq_length)<line_sep>seq_mask=tf.expand_dims(tf.sequence_mask(seq_length max_seq_len dtype=tf.float32) axis=-1)<line_sep>seq_length=tf.expand_dims(tf.cast(seq_length dtype=tf.dtypes.float32) axis=-1)<line_sep>user_id_embeddings=embedding_result[InternalFtrType.EMBEDDED]<line_sep>sum_user_id_embedding=tf.reduce_sum(input_tensor=user_id_embeddings<times>seq_mask axis=1)<line_sep># [batch_size, num_units_for_id_ftr]
user_id_avg_embedding=tf.math.divide_no_nan(sum_user_id_embedding seq_length)# [batch_size, num_units_for_id_ftr]
<return>user_id_avg_embedding<block_end><def_stmt>apply_embed_on_user_id self user_id_fields<block_start>"""Applies embedding lookup and averaging for user id features
:return Tensor Shape=[batch_size, num_user_id_fields, num_units_for_id_ftr]
"""<line_sep>user_ftrs=[]<for_stmt>i,user_field enumerate(user_id_fields)<block_start>user_id_avg_embedding=self.apply_embedding(user_field)<line_sep>user_ftrs.append(user_id_avg_embedding)<block_end><return>tf.stack(user_ftrs axis=1)<block_end><def_stmt>apply_embed_on_doc_id self doc_id_fields<block_start>"""Applies embedding lookup and averaging for doc id features
:return Tensor Shape=[batch_size, max_group_size, num_doc_id_fields, num_units_for_id_ftr]
"""<line_sep>doc_ftrs=[]<for_stmt>i,doc_field enumerate(doc_id_fields)<block_start>doc_field_shape=tf.shape(doc_field)<line_sep>reshape_doc_field=tf.reshape(doc_field shape=[doc_field_shape[0]<times>doc_field_shape[1]])<line_sep>doc_id_avg_embedding=self.apply_embedding(reshape_doc_field)<line_sep>doc_id_avg_embedding=tf.reshape(doc_id_avg_embedding shape=[doc_field_shape[0] doc_field_shape[1] self.id_ftr_size])<line_sep>doc_ftrs.append(doc_id_avg_embedding)<block_end><return>tf.stack(doc_ftrs axis=2)<block_end><block_end>
|
<import_stmt>math<import_from_stmt>pyecharts options<as>opts<import_from_stmt>pyecharts.charts Polar<line_sep>data=[]<for_stmt>i range(361)<block_start>t=i/180<times>math.pi<line_sep>r=math.sin(2<times>t)<times>math.cos(2<times>t)<line_sep>data.append([r i])<block_end>c=(Polar().add_schema(angleaxis_opts=opts.AngleAxisOpts(start_angle=0 min_=0)).add("flower" data label_opts=opts.LabelOpts(is_show=<false>)).set_global_opts(title_opts=opts.TitleOpts(title="Polar-Flower")).render("polar_flower.html"))<line_sep>
|
"""
Contains :ref:`Gunicorn configuration settings <gunicorn:settings>` and
hook functions.
"""<line_sep># Disable keep-alive
keepalive=0<def_stmt>post_worker_init worker<block_start>worker.wsgi(<none> <none>)<block_end><def_stmt>worker_exit server worker<block_start><import_from_stmt>ichnaea.webapp.app worker_exit<line_sep>worker_exit(server worker)<block_end>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>StringIO<import_stmt>bayeslite.grammar<as>grammar<import_stmt>bayeslite.plex<as>Plex<import_from_stmt>bayeslite.util casefold<line_sep>'''
grep -o 'K_[A-Z][A-Z0-9_]*' < grammar.y | sort -u | awk '
{
sub("^K_", "", $1)
# All keywords are US-ASCII, so tolower is the same as casefold.
printf(" \"%s\": grammar.K_%s,\n", tolower($1), $1)
}'
'''<line_sep>keywords={"accuracy":grammar.K_ACCURACY "add":grammar.K_ADD "all":grammar.K_ALL "alter":grammar.K_ALTER "analyze":grammar.K_ANALYZE "and":grammar.K_AND "as":grammar.K_AS "asc":grammar.K_ASC "begin":grammar.K_BEGIN "between":grammar.K_BETWEEN "btable":grammar.K_BTABLE "by":grammar.K_BY "case":grammar.K_CASE "cast":grammar.K_CAST "checkpoint":grammar.K_CHECKPOINT "collate":grammar.K_COLLATE "column":grammar.K_COLUMN "columns":grammar.K_COLUMNS "commit":grammar.K_COMMIT "conf":grammar.K_CONF "confidence":grammar.K_CONFIDENCE "context":grammar.K_CONTEXT "correlation":grammar.K_CORRELATION "create":grammar.K_CREATE "default":grammar.K_DEFAULT "density":grammar.K_DENSITY "dependence":grammar.K_DEPENDENCE "desc":grammar.K_DESC "distinct":grammar.K_DISTINCT "drop":grammar.K_DROP "else":grammar.K_ELSE "end":grammar.K_END "escape":grammar.K_ESCAPE "estimate":grammar.K_ESTIMATE "existing":grammar.K_EXISTING "exists":grammar.K_EXISTS "explicit":grammar.K_EXPLICIT "for":grammar.K_FOR "from":grammar.K_FROM "generator":grammar.K_GENERATOR "given":grammar.K_GIVEN "glob":grammar.K_GLOB "group":grammar.K_GROUP "guess":grammar.K_GUESS "having":grammar.K_HAVING "hypothetical":grammar.K_HYPOTHETICAL "if":grammar.K_IF "ignore":grammar.K_IGNORE "in":grammar.K_IN "infer":grammar.K_INFER "information":grammar.K_INFORMATION "initialize":grammar.K_INITIALIZE "is":grammar.K_IS "isnull":grammar.K_ISNULL "iteration":grammar.K_ITERATION "iterations":grammar.K_ITERATIONS "latent":grammar.K_LATENT "like":grammar.K_LIKE "limit":grammar.K_LIMIT "match":grammar.K_MATCH "minute":grammar.K_MINUTE "minutes":grammar.K_MINUTES "model":grammar.K_MODEL "modeled":grammar.K_MODELED "modelled":grammar.K_MODELLED "models":grammar.K_MODELS "mutual":grammar.K_MUTUAL "not":grammar.K_NOT "notnull":grammar.K_NOTNULL "null":grammar.K_NULL "of":grammar.K_OF "offset":grammar.K_OFFSET "or":grammar.K_OR "order":grammar.K_ORDER "pairwise":grammar.K_PAIRWISE "population":grammar.K_POPULATION "predict":grammar.K_PREDICT "predictive":grammar.K_PREDICTIVE "probability":grammar.K_PROBABILITY "pvalue":grammar.K_PVALUE "regexp":grammar.K_REGEXP "regress":grammar.K_REGRESS "relevance":grammar.K_RELEVANCE "rename":grammar.K_RENAME "rollback":grammar.K_ROLLBACK "row":grammar.K_ROW "rows":grammar.K_ROWS "samples":grammar.K_SAMPLES "schema":grammar.K_SCHEMA "second":grammar.K_SECOND "seconds":grammar.K_SECONDS "select":grammar.K_SELECT "set":grammar.K_SET "similarity":grammar.K_SIMILARITY "simulate":grammar.K_SIMULATE "stattype":grammar.K_STATTYPE "stattypes":grammar.K_STATTYPES "table":grammar.K_TABLE "temp":grammar.K_TEMP "temporary":grammar.K_TEMPORARY "the":grammar.K_THE "then":grammar.K_THEN "to":grammar.K_TO "unset":grammar.K_UNSET "using":grammar.K_USING "value":grammar.K_VALUE "values":grammar.K_VALUES "variable":grammar.K_VARIABLE "variables":grammar.K_VARIABLES "when":grammar.K_WHEN "where":grammar.K_WHERE "with":grammar.K_WITH "within":grammar.K_WITHIN }<def_stmt>scan_name _scanner text<block_start><return>keywords.get(text)<or>keywords.get(casefold(text))<or>grammar.L_NAME<line_sep><block_end><def_stmt>scan_integer scanner text<block_start>scanner.produce(grammar.L_INTEGER int(text 10))<block_end><def_stmt>scan_float scanner text# XXX Consider a system-independent representation of floats which
# we can pass through to the SQL engine. (E.g., for the benefit
# of SQLite4 which will use decimal floating-point arithmetic
# instead of binary floating-point arithmetic.)
<block_start>scanner.produce(grammar.L_FLOAT float(text))<block_end><def_stmt>scan_numpar_next scanner text# Numbered parameters are 1-indexed.
<block_start>scanner.n_numpar<augadd>1<line_sep>scanner.produce(grammar.L_NUMPAR scanner.n_numpar)<block_end><def_stmt>scan_numpar scanner text<block_start><assert_stmt>text[0]<eq>'?'<if_stmt>20<l>len(text)# 2^64 < 10^20
<block_start>scan_bad(scanner text)<block_end><else_stmt><block_start>n=int(text[1:])<if_stmt>n<eq>0# Numbered parameters are 1-indexed.
<block_start>scanner.produce(-1 text)<block_end><else_stmt><block_start>scanner.n_numpar=max(n scanner.n_numpar)<line_sep>scanner.produce(grammar.L_NUMPAR n)<block_end><block_end><block_end><def_stmt>scan_nampar scanner text<block_start>text=casefold(text)<line_sep>n=<none><if_stmt>text<in>scanner.nampar_map<block_start>n=scanner.nampar_map[text]<block_end><else_stmt># Numbered parameters are 1-indexed.
<block_start>scanner.n_numpar<augadd>1<line_sep>n=scanner.n_numpar<line_sep>scanner.nampar_map[text]=n<block_end>scanner.produce(grammar.L_NAMPAR (n text))<block_end><def_stmt>scan_bad scanner text<block_start>scanner.produce(-1 text)<block_end># error
<def_stmt>scan_qname_start scanner text<block_start><assert_stmt>text<eq>'"'<line_sep>scan_quoted_start(scanner text "QNAME")<block_end><def_stmt>scan_qname_end scanner text<block_start>scan_quoted_end(scanner text grammar.L_NAME)<block_end><def_stmt>scan_string_start scanner text<block_start><assert_stmt>text<eq>"'"<line_sep>scan_quoted_start(scanner text "STRING")<block_end><def_stmt>scan_string_end scanner text<block_start>scan_quoted_end(scanner text grammar.L_STRING)<block_end><def_stmt>scan_quoted_start scanner text state<block_start><assert_stmt>scanner.stringio<is><none><assert_stmt>scanner.stringquote<is><none><line_sep>scanner.stringio=StringIO.StringIO()<line_sep>scanner.begin(state)<block_end><def_stmt>scan_quoted_text scanner text<block_start><assert_stmt>scanner.stringio<is><not><none><line_sep>scanner.stringio.write(text)<block_end><def_stmt>scan_quoted_quote scanner text<block_start><assert_stmt>scanner.stringio<is><not><none><assert_stmt>text[0]<eq>text[1]<line_sep>scanner.stringio.write(text[0])<block_end><def_stmt>scan_quoted_end scanner text token<block_start><assert_stmt>scanner.stringio<is><not><none><line_sep>string=scanner.stringio.getvalue()<line_sep>scanner.stringio.close()<line_sep>scanner.stringio=<none><line_sep>scanner.produce(token string)<line_sep>scanner.begin("")<block_end><class_stmt>BQLScanner(Plex.Scanner)<block_start>line_comment=Plex.Str("--")+Plex.Rep(Plex.AnyBut("\n"))<line_sep>whitespace=Plex.Any("\f\n\r\t ")<line_sep># XXX Support non-US-ASCII Unicode text.
letter=Plex.Range("azAZ")<line_sep>digit=Plex.Range("09")<line_sep>digits=Plex.Rep(digit)<line_sep>digits1=Plex.Rep1(digit)<line_sep>hexit=digit|Plex.Range("afAF")<line_sep>hexits1=Plex.Rep1(hexit)<line_sep>integer_dec=digits1<line_sep>integer_hex=Plex.Str("0x" "0X")+hexits1<line_sep>dot=Plex.Str('.')<line_sep>intfrac=digits1+dot+digits<line_sep>fraconly=dot+digits1<line_sep>optsign=Plex.Opt(Plex.Any('+-'))<line_sep>expmark=Plex.Any('eE')<line_sep>exponent=expmark+optsign+digits1<line_sep>optexp=Plex.Opt(exponent)<line_sep>float_dec=((intfrac|fraconly)+optexp)|(digits1+exponent)<line_sep>name_special=Plex.Any("_$")<line_sep>name=(letter|name_special)+Plex.Rep(letter|digit|name_special)<line_sep>lexicon=Plex.Lexicon([(whitespace Plex.IGNORE) (line_comment Plex.IGNORE) (Plex.Str(";") grammar.T_SEMI) (Plex.Str("{") grammar.T_LCURLY) (Plex.Str("}") grammar.T_RCURLY) (Plex.Str("(") grammar.T_LROUND) (Plex.Str(")") grammar.T_RROUND) (Plex.Str("+") grammar.T_PLUS) (Plex.Str("-") grammar.T_MINUS) (Plex.Str("*") grammar.T_STAR) (Plex.Str("/") grammar.T_SLASH) (Plex.Str("%") grammar.T_PERCENT) (Plex.Str("=") grammar.T_EQ) (Plex.Str("==") grammar.T_EQ) (Plex.Str("<") grammar.T_LT) (Plex.Str("<>") grammar.T_NEQ) (Plex.Str("<=") grammar.T_LEQ) (Plex.Str(">") grammar.T_GT) (Plex.Str(">=") grammar.T_GEQ) (Plex.Str("<<") grammar.T_LSHIFT) (Plex.Str(">>") grammar.T_RSHIFT) (Plex.Str("!=") grammar.T_NEQ) (Plex.Str("|") grammar.T_BITIOR) (Plex.Str("||") grammar.T_CONCAT) (Plex.Str(",") grammar.T_COMMA) (Plex.Str("&") grammar.T_BITAND) (Plex.Str("~") grammar.T_BITNOT) (Plex.Str(".") grammar.T_DOT) (Plex.Str("?") scan_numpar_next) (Plex.Str("?")+integer_dec scan_numpar) (Plex.Str(":")+name scan_nampar) (Plex.Str("@")+name scan_nampar) (Plex.Str("$")+name scan_nampar) (Plex.Str("'") scan_string_start) (Plex.Str('"') scan_qname_start) (name scan_name) (integer_dec scan_integer) (integer_hex scan_integer) (float_dec scan_float) (integer_dec+name scan_bad) (integer_hex+name scan_bad) (float_dec+name scan_bad) (Plex.AnyChar scan_bad) Plex.State("STRING" [(Plex.Str("'") scan_string_end) (Plex.Str("''") scan_quoted_quote) (Plex.Rep1(Plex.AnyBut("'")) scan_quoted_text) ]) Plex.State("QNAME" [(Plex.Str('"') scan_qname_end) (Plex.Str('""') scan_quoted_quote) (Plex.Rep1(Plex.AnyBut('"')) scan_quoted_text) ]) ])<def_stmt>__init__ self f context<block_start>Plex.Scanner.__init__(self self.lexicon f context)<line_sep>self.stringio=<none><line_sep>self.stringquote=<none><line_sep>self.n_numpar=0<line_sep>self.nampar_map={}<block_end><def_stmt>produce self token value=<none><block_start><if_stmt>token<is><none># EOF
<block_start>token=0<block_end>Plex.Scanner.produce(self token value)<block_end><block_end>
|
<import_from_stmt>setuptools setup<line_sep>setup(name='swapy' version='0.2.2' description='Easy and modular web development' author='<NAME>' author_email='<EMAIL>' url='https://github.com/danieldaeschle/swapy' packages=['swapy'] install_requires=['werkzeug' 'jinja2'] license='MIT')<line_sep>
|
<import_stmt>sys<import_stmt>cascadetoml<import_stmt>pathlib<import_stmt>typer<import_from_stmt>jinja2 Template<def_stmt>main input_template:pathlib.Path output_path:pathlib.Path<block_start>flashes=cascadetoml.filter_toml(pathlib.Path("../../data/nvm.toml") [])<line_sep>template=Template(input_template.read_text())<line_sep>settings={"nvms":[]}<for_stmt>flash flashes["nvm"]<block_start><if_stmt>"sku"<not><in>flash<or>flash["sku"]<eq>flash["manufacturer"]<block_start><continue><block_end>settings["nvms"].append(dict(flash))<block_end>output_path.write_text(template.render(settings))<block_end><if_stmt>__name__<eq>"__main__"<block_start>typer.run(main)<block_end>
|
<import_stmt>avalon.api<import_stmt>avalon.nuke<line_sep>avalon.api.install(avalon.nuke)<line_sep>
|
<import_from_stmt>django.db models<import_from_stmt>django.db.models Q<class_stmt>EntryManager(models.Manager)# Includes ONLY the PUBLISHED entries by NON-NOVICE authors
<block_start><def_stmt>get_queryset self<block_start><return>super().get_queryset().exclude(Q(is_draft=<true>)|Q(author__is_novice=<true>))<block_end><block_end><class_stmt>EntryManagerAll(models.Manager)# Includes ALL entries (entries by novices, drafts)
<block_start><pass><block_end><class_stmt>EntryManagerOnlyPublished(models.Manager)# Includes ONLY the PUBLISHED entries (entries by NOVICE users still visible)
<block_start><def_stmt>get_queryset self<block_start><return>super().get_queryset().exclude(is_draft=<true>)<block_end><block_end>
|
<def_stmt>foo bar<block_start>bar+1<block_end>
|
"""Initializers.
Functions to initialize posterior distribution variables.
* :func:`.xavier` - Xavier initializer
* :func:`.scale_xavier` - Xavier initializer scaled for scale parameters
* :func:`.pos_xavier` - positive-only initizlier
----------
"""<import_stmt>numpy<as>np<import_from_stmt>probflow.utils.settings get_backend get_datatype<def_stmt>xavier shape<block_start>"""Xavier initializer"""<line_sep>scale=np.sqrt(2/sum(shape))<if_stmt>get_backend()<eq>"pytorch"# TODO: use truncated normal for torch
<block_start><import_stmt>torch<line_sep><return>torch.randn(shape dtype=get_datatype())<times>scale<block_end><else_stmt><block_start><import_stmt>tensorflow<as>tf<line_sep><return>tf.random.truncated_normal(shape mean=0.0 stddev=scale dtype=get_datatype())<block_end><block_end><def_stmt>scale_xavier shape<block_start>"""Xavier initializer for scale variables"""<line_sep>vals=xavier(shape)<if_stmt>get_backend()<eq>"pytorch"<block_start><import_stmt>torch<line_sep>numel=torch.prod(torch.Tensor(shape))<line_sep><return>vals+2-2<times>torch.log(numel)/np.log(10.0)<block_end><else_stmt><block_start><import_stmt>tensorflow<as>tf<line_sep>numel=float(tf.reduce_prod(shape))<line_sep><return>vals+2-2<times>tf.math.log(numel)/tf.math.log(10.0)<block_end><block_end><def_stmt>pos_xavier shape<block_start>"""Xavier initializer for positive variables"""<line_sep>vals=xavier(shape)<if_stmt>get_backend()<eq>"pytorch"<block_start><import_stmt>torch<line_sep>numel=torch.prod(torch.Tensor(shape))<line_sep><return>vals+torch.log(numel)/np.log(10.0)<block_end><else_stmt><block_start><import_stmt>tensorflow<as>tf<line_sep>numel=float(tf.reduce_prod(shape))<line_sep><return>vals+tf.math.log(numel)/tf.math.log(10.0)<block_end><block_end><def_stmt>full_of val<block_start>"""Get initializer which returns tensor full of single value"""<import_stmt>probflow.utils.ops<as>O<def_stmt>init shape<block_start><return>val<times>O.ones(shape)<block_end><return>init<block_end>
|
<import_from_stmt>django.db.models *<class_stmt>Page(Model)<block_start>name=CharField(max_length=50)<line_sep>text=TextField()<def_stmt>__unicode__ self<block_start><return>str(self.text)<block_end><block_end>
|
<import_stmt>cocotb<import_from_stmt>cocotb.triggers Timer<line_sep>@cocotb.test()<async_keyword><def_stmt>test_in_vect_packed dut<block_start>test_value=0x5<line_sep>dut.in_vect_packed.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_vect_packed.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_vect_unpacked dut<block_start>test_value=[0x1 0x0 0x1]<line_sep>dut.in_vect_unpacked.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_vect_unpacked.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_arr dut<block_start>test_value=0x5<line_sep>dut.in_arr.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_arr.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_2d_vect_packed_packed dut<block_start>test_value=(0x5<lshift>6)|(0x5<lshift>3)|0x5<line_sep>dut.in_2d_vect_packed_packed.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_2d_vect_packed_packed.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_2d_vect_packed_unpacked dut<block_start>test_value=[0x5 0x5 0x5]<line_sep>dut.in_2d_vect_packed_unpacked.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_2d_vect_packed_unpacked.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_2d_vect_unpacked_unpacked dut<block_start>test_value=3<times>[[0x1 0x0 0x1]]<line_sep>dut.in_2d_vect_unpacked_unpacked.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_2d_vect_unpacked_unpacked.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_arr_packed dut<block_start>test_value=365<line_sep>dut.in_arr_packed.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_arr_packed.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_arr_unpacked dut<block_start>test_value=[0x5 0x5 0x5]<line_sep>dut.in_arr_unpacked.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_arr_unpacked.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_2d_arr dut<block_start>test_value=365<line_sep>dut.in_2d_arr.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_2d_arr.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_vect_packed_packed_packed dut<block_start>test_value=95869805<line_sep>dut.in_vect_packed_packed_packed.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_vect_packed_packed_packed.value<eq>test_value<block_end># Questa is unable to access elements of a logic array if the last dimension is unpacked (gh-2605)
@cocotb.test(expect_error=IndexError<if>cocotb.LANGUAGE<eq>"verilog"<and>cocotb.SIM_NAME.lower().startswith("modelsim")<else>())<async_keyword><def_stmt>test_in_vect_packed_packed_unpacked dut<block_start>test_value=[365 365 365]<line_sep>dut.in_vect_packed_packed_unpacked.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_vect_packed_packed_unpacked.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_vect_packed_unpacked_unpacked dut<block_start>test_value=3<times>[3<times>[5]]<line_sep>dut.in_vect_packed_unpacked_unpacked.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_vect_packed_unpacked_unpacked.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_vect_unpacked_unpacked_unpacked dut<block_start>test_value=3<times>[3<times>[[1 0 1]]]<line_sep>dut.in_vect_unpacked_unpacked_unpacked.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_vect_unpacked_unpacked_unpacked.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_arr_packed_packed dut<block_start>test_value=(365<lshift>18)|(365<lshift>9)|(365)<line_sep>dut.in_arr_packed_packed.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_arr_packed_packed.value<eq>test_value<block_end># Questa is unable to access elements of a logic array if the last dimension is unpacked (gh-2605)
@cocotb.test(expect_error=IndexError<if>cocotb.LANGUAGE<eq>"verilog"<and>cocotb.SIM_NAME.lower().startswith("modelsim")<else>())<async_keyword><def_stmt>test_in_arr_packed_unpacked dut<block_start>test_value=[365 365 365]<line_sep>dut.in_arr_packed_unpacked.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_arr_packed_unpacked.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_arr_unpacked_unpacked dut<block_start>test_value=3<times>[3<times>[5]]<line_sep>dut.in_arr_unpacked_unpacked.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_arr_unpacked_unpacked.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_2d_arr_packed dut<block_start>test_value=(365<lshift>18)|(365<lshift>9)|(365)<line_sep>dut.in_2d_arr_packed.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_2d_arr_packed.value<eq>test_value<block_end># Questa is unable to access elements of a logic array if the last dimension is unpacked (gh-2605)
@cocotb.test(expect_error=IndexError<if>cocotb.LANGUAGE<eq>"verilog"<and>cocotb.SIM_NAME.lower().startswith("modelsim")<else>())<async_keyword><def_stmt>test_in_2d_arr_unpacked dut<block_start>test_value=[365 365 365]<line_sep>dut.in_2d_arr_unpacked.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_2d_arr_unpacked.value<eq>test_value<block_end>@cocotb.test()<async_keyword><def_stmt>test_in_3d_arr dut<block_start>test_value=(365<lshift>18)|(365<lshift>9)|(365)<line_sep>dut.in_3d_arr.value=test_value<line_sep><await>Timer(1 "ns")<assert_stmt>dut.out_3d_arr.value<eq>test_value<block_end>
|
<import_stmt>related<line_sep>@related.immutable<class_stmt>ImageOptions(object)<block_start>registry=related.URLField()<line_sep>email=related.StringField()<block_end><def_stmt>test_image_options <block_start>options=ImageOptions(registry="https://imgur.com/gallery/GAhlfKS" email="<EMAIL>")<assert_stmt>options.registry<assert_stmt>options.email<block_end>
|
<import_from_stmt>. backbone<import_from_stmt>.losses *<import_from_stmt>.single_stage_model *<import_from_stmt>.supervised *<import_from_stmt>.partial_completion_mask *<import_from_stmt>.partial_completion_content *<import_from_stmt>.partial_completion_content_cgan *<line_sep>
|
<import_stmt>unittest<import_from_stmt>pathlib Path<import_stmt>unittest<import_stmt>cryptol<import_from_stmt>cryptol.single_connection *<import_from_stmt>cryptol.bitvector BV<class_stmt>TestEvenMansour(unittest.TestCase)<block_start><def_stmt>test_EvenMansour self<block_start>connect(verify=<false>)<line_sep>load_file(str(Path('tests' 'cryptol' 'test-files' 'examples' 'contrib' 'EvenMansour.cry')))<line_sep>F_10_4=cry_eval('F:[10][4]')<line_sep>self.assertTrue(call('is_a_permutation' F_10_4))<line_sep>Finv_10_4=cry_eval("F':[10][4]")<line_sep>digits=[BV(size=4 value=i)<for>i range(0 10)]<line_sep># ^ the same as: c.eval('[0..9]:[_][4]')
self.assertTrue(call('is_inverse_permutation' digits F_10_4 Finv_10_4))<line_sep>self.assertTrue(check('E_and_D_are_inverses'))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
|
<import_from_stmt>django.core urlresolvers<import_from_stmt>regressiontests.comment_tests.custom_comments.models CustomComment<import_from_stmt>regressiontests.comment_tests.custom_comments.forms CustomCommentForm<def_stmt>get_model <block_start><return>CustomComment<block_end><def_stmt>get_form <block_start><return>CustomCommentForm<block_end><def_stmt>get_form_target <block_start><return>urlresolvers.reverse("regressiontests.comment_tests.custom_comments.views.custom_submit_comment")<block_end><def_stmt>get_flag_url c<block_start><return>urlresolvers.reverse("regressiontests.comment_tests.custom_comments.views.custom_flag_comment" args=(c.id ))<block_end><def_stmt>get_delete_url c<block_start><return>urlresolvers.reverse("regressiontests.comment_tests.custom_comments.views.custom_delete_comment" args=(c.id ))<block_end><def_stmt>get_approve_url c<block_start><return>urlresolvers.reverse("regressiontests.comment_tests.custom_comments.views.custom_approve_comment" args=(c.id ))<block_end>
|
<import_from_stmt>mayo.session.base SessionBase<class_stmt>Test(SessionBase)<block_start>mode='test'<def_stmt>__init__ self config<block_start>super().__init__(config)<line_sep>self.load_checkpoint(self.config.system.checkpoint.load)<block_end><def_stmt>test self<block_start>todo=list(zip(self.task.names self.task.predictions))<line_sep>results=self.run(todo batch=<true>)<for_stmt>names,predictions results<block_start>self.task.test(names predictions)<block_end><block_end><block_end>
|
""" Unit tests for FIFOBuffer """<import_stmt>nose.tools<import_stmt>numpy<import_stmt>dcase_util<import_from_stmt>dcase_util.containers MetaDataContainer<import_from_stmt>dcase_util.data DataBuffer<def_stmt>test_DataBuffer <block_start>buf=DataBuffer(size=2)<line_sep>nose.tools.eq_(buf.count 0)<line_sep>nose.tools.eq_(buf.full <false>)<line_sep>buf.set(key='key1' data=[1 2 3] meta='metadata1')<line_sep>nose.tools.eq_(buf.count 1)<line_sep>nose.tools.eq_(buf.full <false>)<line_sep>buf.set(key='key2' data=[2 3 4] meta='metadata2')<line_sep>nose.tools.eq_(buf.count 2)<line_sep>nose.tools.eq_(buf.full <true>)<line_sep>item_data,item_meta=buf.get(key='key1')<line_sep>nose.tools.eq_(item_data [1 2 3])<line_sep>nose.tools.eq_(item_meta 'metadata1')<line_sep>item_data,item_meta=buf.get(key='key2')<line_sep>nose.tools.eq_(item_data [2 3 4])<line_sep>nose.tools.eq_(item_meta 'metadata2')<line_sep>buf.set(key='key3' data=[3 4 5] meta='metadata3')<line_sep>item_data,item_meta=buf.get(key='key3')<line_sep>nose.tools.eq_(item_data [3 4 5])<line_sep>nose.tools.eq_(item_meta 'metadata3')<line_sep>nose.tools.eq_(buf.get(key='key4') (<none> <none>))<line_sep>nose.tools.eq_(buf.count 2)<line_sep>buf.clear()<line_sep>nose.tools.eq_(buf.count 0)<block_end><def_stmt>test_log <block_start><with_stmt>dcase_util.utils.DisableLogger()<block_start>DataBuffer(size=2 filename='event_roller.cpickle').log()<block_end><block_end>
|
# -*- coding: utf-8 -*-
<import_stmt>datetime<import_stmt>decimal<import_stmt>re<import_stmt>textwrap<import_from_stmt>. _unittest<as>unittest<import_from_stmt>datatest.differences BaseDifference Missing Extra Invalid Deviation _make_difference NOVALUE <line_sep># FOR TESTING: A minimal subclass of BaseDifference.
# BaseDifference itself should not be instantiated
# directly.
<class_stmt>MinimalDifference(BaseDifference)<block_start><def_stmt>__init__ self *args<block_start>self._args=args<block_end>@property<def_stmt>args self<block_start><return>self._args<block_end><block_end><class_stmt>TestBaseDifference(unittest.TestCase)<block_start><def_stmt>test_instantiation self<block_start>"""BaseDifference should not be instantiated directly.
It should only serve as a superclass for more specific
differences.
"""<line_sep># Subclass should instantiate normally:
subclass_instance=MinimalDifference('A')<line_sep># Base class should raise error.
regex="Can't instantiate abstract class BaseDifference"<with_stmt>self.assertRaisesRegex(TypeError regex)<block_start>base_instance=BaseDifference()<block_end><block_end><def_stmt>test_args self<block_start>"""Args should be tuple of arguments."""<line_sep>diff=MinimalDifference('A')<line_sep>self.assertEqual(diff.args ('A' ))<block_end><def_stmt>test_repr self<block_start>diff=MinimalDifference('A')<line_sep>self.assertEqual(repr(diff) "MinimalDifference('A')")<line_sep>diff=MinimalDifference('A' 'B')<line_sep>self.assertEqual(repr(diff) "MinimalDifference('A', 'B')")<line_sep>diff=MinimalDifference('A' <none>)<line_sep>self.assertEqual(repr(diff) "MinimalDifference('A', None)")<def_stmt>myfunc x<block_start><return><true><block_end>diff=MinimalDifference('A' myfunc)<line_sep>self.assertEqual(repr(diff) "MinimalDifference('A', myfunc)")<class_stmt>MyClass(object)<block_start><pass><block_end>diff=MinimalDifference('A' MyClass)<line_sep>self.assertEqual(repr(diff) "MinimalDifference('A', MyClass)")<block_end><def_stmt>test_numbers_equal self<block_start>first=MinimalDifference(1)<line_sep>second=MinimalDifference(1.0)<line_sep>self.assertEqual(first second)<line_sep>first=MinimalDifference(1)<line_sep>second=MinimalDifference(2)<line_sep>self.assertNotEqual(first second)<block_end><def_stmt>test_string_equal self<block_start>first=MinimalDifference('A')<line_sep>second=MinimalDifference('A')<line_sep>self.assertEqual(first second)<block_end><def_stmt>test_nan_equal self<block_start>"""NaN values should test as equal when part of a difference."""<line_sep>first=MinimalDifference(float('nan'))<line_sep>second=MinimalDifference(float('nan'))<line_sep>self.assertEqual(first second)<line_sep># NaNs nested in a tuple should also test as equal.
first=MinimalDifference(('abc' float('nan')))<line_sep>second=MinimalDifference(('abc' float('nan')))<line_sep>self.assertEqual(first second)<line_sep># Complex numbers, too.
first=MinimalDifference(float('nan'))<line_sep>second=MinimalDifference(complex(float('nan')))<line_sep>self.assertEqual(first second)<block_end><def_stmt>test_comparing_different_types self<block_start>diff=MinimalDifference('X')<line_sep>self.assertNotEqual(diff Exception('X'))<line_sep>self.assertNotEqual(diff <none>)<line_sep>self.assertNotEqual(diff <true>)<line_sep>self.assertNotEqual(diff <false>)<block_end><block_end><class_stmt>TestSubclassRelationship(unittest.TestCase)<block_start><def_stmt>test_subclass self<block_start>self.assertTrue(issubclass(Extra BaseDifference))<line_sep>self.assertTrue(issubclass(Missing BaseDifference))<line_sep>self.assertTrue(issubclass(Invalid BaseDifference))<line_sep>self.assertTrue(issubclass(Deviation BaseDifference))<block_end><block_end><class_stmt>TestInvalid(unittest.TestCase)<block_start><def_stmt>test_repr self<block_start>diff=Invalid('foo')<line_sep>self.assertEqual(repr(diff) "Invalid('foo')")<line_sep>diff=Invalid('foo' 'bar')<line_sep>self.assertEqual(repr(diff) "Invalid('foo', expected='bar')")<line_sep>diff=Invalid('foo' <none>)<line_sep>self.assertEqual(repr(diff) "Invalid('foo', expected=None)")<block_end><def_stmt>test_repr_with_callables self<block_start><def_stmt>myfunc x<block_start><return><true><block_end><class_stmt>MyClass(object)<block_start><pass><block_end>diff=Invalid('foo' myfunc)<line_sep>self.assertEqual(repr(diff) "Invalid('foo', expected=myfunc)")<line_sep>diff=Invalid('foo' MyClass)<line_sep>self.assertEqual(repr(diff) "Invalid('foo', expected=MyClass)")<line_sep>diff=Invalid(myfunc 'bar')<line_sep>self.assertEqual(repr(diff) "Invalid(myfunc, expected='bar')")<line_sep>diff=Invalid(MyClass 'bar')<line_sep>self.assertEqual(repr(diff) "Invalid(MyClass, expected='bar')")<block_end><def_stmt>test_same_values self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>Invalid('foo' 'foo')<block_end><block_end><def_stmt>test_equality_error self<block_start><class_stmt>BadObj(object)<block_start><def_stmt>__eq__ self other<block_start><if_stmt>isinstance(other BadObj)<block_start><return><true><block_end><raise>TypeError('Sudden but inevitable betrayal!')<block_end><block_end>diff=Invalid(BadObj() float)# <- Checks for equality on init.
self.assertEqual(diff.invalid BadObj())<line_sep>self.assertEqual(diff.expected float)<block_end><block_end><class_stmt>TestDeviation(unittest.TestCase)<block_start><def_stmt>test_instantiation self<block_start>Deviation(1 100)# Pass without error.
<with_stmt>self.assertRaises(ValueError)<block_start>Deviation(0 100)<block_end><block_end># Deviation should not be zero.
<def_stmt>test_nonquantitative self<block_start><with_stmt>self.assertRaises(TypeError)<block_start>Deviation(set([3]) set([1 2]))<block_end><block_end><def_stmt>test_repr self<block_start>diff=Deviation(1 100)# Simple positive.
self.assertEqual(repr(diff) "Deviation(+1, 100)")<line_sep>diff=Deviation(-1 100)# Simple negative.
self.assertEqual(repr(diff) "Deviation(-1, 100)")<line_sep>diff=Deviation(float('nan') 100)# None reference.
self.assertEqual(repr(diff) "Deviation(float('nan'), 100)")<block_end><def_stmt>test_repr_with_datetime self<block_start>diff=Deviation(datetime.timedelta(hours=-1) datetime.datetime(1989 2 24 hour=11 minute=30) )<line_sep>expected='Deviation(timedelta(seconds=-3600), datetime(1989, 2, 24, 11, 30))'<line_sep>self.assertEqual(repr(diff) expected)<block_end><def_stmt>test_repr_with_date self<block_start>diff=Deviation(datetime.timedelta(days=1) datetime.date(1989 2 24) )<line_sep>expected='Deviation(timedelta(days=+1), date(1989, 2, 24))'<line_sep>self.assertEqual(repr(diff) expected)<block_end><def_stmt>test_zero_and_empty_value_handling self<block_start>"""Empty values receive special handling."""<line_sep># Expected 0 (pass without error).
Deviation(+5 0)<line_sep>Deviation(-5 0)<line_sep>Deviation(float('nan') 0)<with_stmt>self.assertRaises(ValueError)<block_start>Deviation(0 0)<block_end># Expected numeric value (pass without error).
Deviation(+1 5)<line_sep>Deviation(-1 5)<line_sep>Deviation(float('nan') 5)<line_sep># Expected non-zero, with empty or zero deviation.
<with_stmt>self.assertRaises(ValueError)<block_start>Deviation(0 5)<block_end><with_stmt>self.assertRaises(TypeError)<block_start>Deviation(<none> 5)<block_end><with_stmt>self.assertRaises(TypeError)<block_start>Deviation('' 5)<block_end><with_stmt>self.assertRaises(TypeError)<block_start>Deviation(5 <none>)<block_end><with_stmt>self.assertRaises(TypeError)<block_start>Deviation(5 '')<block_end># NaN handling.
Deviation(float('nan') 0)<line_sep>Deviation(0 float('nan'))<block_end><def_stmt>test_repr_eval self<block_start>diff=Deviation(+1 100)<line_sep>self.assertEqual(diff eval(repr(diff)))<line_sep>diff=Deviation(-1 100)<line_sep>self.assertEqual(diff eval(repr(diff)))<line_sep>diff=Deviation(float('nan') 100)<line_sep>self.assertEqual(diff eval(repr(diff)))<block_end><block_end><class_stmt>TestImmutability(unittest.TestCase)<block_start>"""Differences should act like an immutable objects."""<def_stmt>test_missing self<block_start>diff=Missing('foo')<with_stmt>self.assertRaises(AttributeError)<block_start>diff.attr=('bar' )<block_end><with_stmt>self.assertRaises(AttributeError)<block_start>diff.new_attribute='baz'<block_end><block_end><def_stmt>test_extra self<block_start>diff=Extra('foo')<with_stmt>self.assertRaises(AttributeError)<block_start>diff.attr=('bar' )<block_end><with_stmt>self.assertRaises(AttributeError)<block_start>diff.new_attribute='baz'<block_end><block_end><def_stmt>test_invalid self<block_start>diff=Invalid('foo')<with_stmt>self.assertRaises(AttributeError)<block_start>diff.expected='bar'<block_end><with_stmt>self.assertRaises(AttributeError)<block_start>diff.new_attribute='baz'<block_end><block_end><def_stmt>test_deviation self<block_start>diff=Deviation(+1 100)<with_stmt>self.assertRaises(AttributeError)<block_start>diff.expected=101<block_end><with_stmt>self.assertRaises(AttributeError)<block_start>diff.new_attribute=202<block_end><block_end><block_end><class_stmt>TestHashability(unittest.TestCase)<block_start>"""Built-in differences should be hashable (in the same way that
tuples are).
"""<def_stmt>test_hashable self<block_start>"""Differences with hashable *args should be hashable."""<line_sep># Following should all pass without error.
hash(Missing('foo'))<line_sep>hash(Extra('bar'))<line_sep>hash(Invalid('baz'))<line_sep>hash(Invalid('baz' 'qux'))<line_sep>hash(Deviation(-1 10))<block_end><def_stmt>test_unhashable_contents self<block_start>"""The hash behavior of differences should act like tuples do.
When a difference's contents are unhashable, the difference
itself becomes unhashable too.
"""<with_stmt>self.assertRaises(TypeError)<block_start>hash(Missing(['foo']))<block_end><with_stmt>self.assertRaises(TypeError)<block_start>hash(Extra(['bar']))<block_end><with_stmt>self.assertRaises(TypeError)<block_start>hash(Invalid(['baz']))<block_end><with_stmt>self.assertRaises(TypeError)<block_start>hash(Invalid('baz' ['qux']))<block_end><block_end><block_end><class_stmt>TestMakeDifference(unittest.TestCase)<block_start><def_stmt>test_numeric_vs_numeric self<block_start>diff=_make_difference(5 6)<line_sep>self.assertEqual(diff Deviation(-1 6))<block_end><def_stmt>test_decimal_vs_float self<block_start>diff=_make_difference(decimal.Decimal('5') 6.0)<line_sep>self.assertEqual(diff Invalid(decimal.Decimal('5') expected=6.0))<block_end><def_stmt>test_datetime_vs_datetime self<block_start>diff=_make_difference(datetime.datetime(1989 2 24 hour=10 minute=30) datetime.datetime(1989 2 24 hour=11 minute=30) )<line_sep>self.assertEqual(diff Deviation(datetime.timedelta(hours=-1) datetime.datetime(1989 2 24 hour=11 minute=30) ) )<block_end><def_stmt>test_numeric_vs_none self<block_start>diff=_make_difference(5 <none>)<line_sep>self.assertEqual(diff Invalid(5 <none>))<line_sep>diff=_make_difference(0 <none>)<line_sep>self.assertEqual(diff Invalid(0 <none>))<block_end><def_stmt>test_none_vs_numeric self<block_start>diff=_make_difference(<none> 6)<line_sep>self.assertEqual(diff Invalid(<none> 6))<line_sep>diff=_make_difference(<none> 0)<line_sep>self.assertEqual(diff Invalid(<none> 0))<block_end><def_stmt>test_object_vs_object self<block_start>"""Non-numeric comparisons return Invalid type."""<line_sep>diff=_make_difference('a' 'b')<line_sep>self.assertEqual(diff Invalid('a' 'b'))<line_sep>diff=_make_difference(5 'b')<line_sep>self.assertEqual(diff Invalid(5 'b'))<line_sep>diff=_make_difference('a' 6)<line_sep>self.assertEqual(diff Invalid('a' 6))<line_sep>diff=_make_difference(float('nan') 6)<line_sep>self.assertEqual(diff Deviation(float('nan') 6))<line_sep>diff=_make_difference(5 float('nan'))<line_sep>self.assertEqual(diff Deviation(float('nan') float('nan')))<line_sep>fn=<lambda>x:<true><line_sep>diff=_make_difference('a' fn)<line_sep>self.assertEqual(diff Invalid('a' fn))<line_sep>regex=re.compile('^test$')<line_sep>diff=_make_difference('a' regex)<line_sep>self.assertEqual(diff Invalid('a' re.compile('^test$')))<block_end><def_stmt>test_boolean_comparisons self<block_start>"""Boolean differences should not be treated quantitatively."""<line_sep>diff=_make_difference(<false> <true>)<line_sep>self.assertIs(diff.invalid <false>)<line_sep>self.assertIs(diff.expected <true>)<line_sep>diff=_make_difference(<true> <false>)<line_sep>self.assertIs(diff.invalid <true>)<line_sep>self.assertIs(diff.expected <false>)<line_sep>diff=_make_difference(0 <true>)<line_sep>self.assertEqual(diff.invalid 0)<line_sep>self.assertIsNot(diff.invalid <false>)<line_sep>self.assertIs(diff.expected <true>)<line_sep>diff=_make_difference(1 <false>)<line_sep>self.assertEqual(diff.invalid 1)<line_sep>self.assertIsNot(diff.invalid <true>)<line_sep>self.assertIs(diff.expected <false>)<line_sep>diff=_make_difference(<false> 1)<line_sep>self.assertIs(diff.invalid <false>)<line_sep>self.assertEqual(diff.expected 1)<line_sep>self.assertIsNot(diff.expected <true>)<line_sep>diff=_make_difference(<true> 0)<line_sep>self.assertIs(diff.invalid <true>)<line_sep>self.assertEqual(diff.expected 0)<line_sep>self.assertIsNot(diff.expected <false>)<block_end><def_stmt>test_novalue_comparisons self<block_start>diff=_make_difference('a' NOVALUE)<line_sep>self.assertEqual(diff Extra('a'))<line_sep>diff=_make_difference(5 NOVALUE)<line_sep>self.assertEqual(diff Extra(5))<line_sep>diff=_make_difference(0 NOVALUE)<line_sep>self.assertEqual(diff Extra(0))<line_sep>diff=_make_difference(NOVALUE 'a')<line_sep>self.assertEqual(diff Missing('a'))<line_sep>diff=_make_difference(NOVALUE 5)<line_sep>self.assertEqual(diff Missing(5))<line_sep>diff=_make_difference(NOVALUE 0)<line_sep>self.assertEqual(diff Missing(0))<block_end><def_stmt>test_show_expected self<block_start>"""If requirement is common it should be omitted from Invalid
difference (but not from Deviation differences).
"""<line_sep>diff=_make_difference('a' 6 show_expected=<true>)<line_sep>self.assertEqual(diff Invalid('a' expected=6))<line_sep>diff=_make_difference('a' 6 show_expected=<false>)<line_sep>self.assertEqual(diff Invalid('a'))<line_sep># Show expected should not effect Missing, Extra, or Deviation:
diff=_make_difference(NOVALUE 6 show_expected=<true>)<line_sep>self.assertEqual(diff Missing(6))<line_sep>diff=_make_difference(NOVALUE 6 show_expected=<false>)<line_sep>self.assertEqual(diff Missing(6))<line_sep>diff=_make_difference(6 NOVALUE show_expected=<true>)<line_sep>self.assertEqual(diff Extra(6))<line_sep>diff=_make_difference(6 NOVALUE show_expected=<false>)<line_sep>self.assertEqual(diff Extra(6))<line_sep>diff=_make_difference(1 2 show_expected=<true>)<line_sep>self.assertEqual(diff Deviation(-1 2))<line_sep>diff=_make_difference(1 2 show_expected=<false>)<line_sep>self.assertEqual(diff Deviation(-1 2))<block_end><def_stmt>test_same self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>diff=_make_difference('a' 'a')<block_end><with_stmt>self.assertRaises(ValueError)<block_start>diff=_make_difference(<none> <none>)<block_end># NaN should work though.
_make_difference(float('nan') float('nan'))<block_end><block_end>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.