content
stringlengths 0
1.55M
|
---|
# "magictoken" is used for markers as beginning and ending of example text.
<import_stmt>unittest<import_from_stmt>numba.tests.support captured_stdout override_config<class_stmt>DocsLLVMPassTimings(unittest.TestCase)<block_start><def_stmt>test_pass_timings self<block_start><with_stmt>override_config('LLVM_PASS_TIMINGS' <true>)<block_start><with_stmt>captured_stdout()<as>stdout# magictoken.ex_llvm_pass_timings.begin
<block_start><import_stmt>numba<line_sep>@numba.njit<def_stmt>foo n<block_start>c=0<for_stmt>i range(n)<block_start><for_stmt>j range(i)<block_start>c<augadd>j<block_end><block_end><return>c<block_end>foo(10)<line_sep>md=foo.get_metadata(foo.signatures[0])<line_sep>print(md['llvm_pass_timings'])<line_sep># magictoken.ex_llvm_pass_timings.end
<block_end>self.assertIn("Finalize object" stdout.getvalue())<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
<import_stmt>unittest<import_from_stmt>pyowm.alertapi30.enums AlertChannelsEnum OperatorsEnum WeatherParametersEnum<class_stmt>TestAlertChannelsEnum(unittest.TestCase)<block_start><def_stmt>test_items self<block_start>alert_channels_enum=AlertChannelsEnum()<line_sep>self.assertEqual(alert_channels_enum.items() [AlertChannelsEnum.OWM_API_POLLING])<block_end><block_end><class_stmt>TestOperatorsEnum(unittest.TestCase)<block_start><def_stmt>test_items self<block_start>operators_enum=OperatorsEnum()<line_sep>self.assertEqual(sorted(operators_enum.items()) sorted([operators_enum.GREATER_THAN operators_enum.GREATER_THAN_EQUAL operators_enum.LESS_THAN operators_enum.LESS_THAN_EQUAL operators_enum.EQUAL operators_enum.NOT_EQUAL]))<block_end><block_end><class_stmt>TestWeatherParametersEnum(unittest.TestCase)<block_start><def_stmt>test_item self<block_start>weather_parameters_enum=WeatherParametersEnum()<line_sep>self.assertEqual(sorted(weather_parameters_enum.items()) sorted([weather_parameters_enum.CLOUDS weather_parameters_enum.HUMIDITY weather_parameters_enum.PRESSURE weather_parameters_enum.WIND_DIRECTION weather_parameters_enum.WIND_SPEED weather_parameters_enum.TEMPERATURE]))<block_end><block_end> |
# -*- coding: utf-8 -*-
"""
Seasonal Adjustment
==========================================
Robustly estimate and remove trend and periodicity in a noisy timeseries.
Functions
---------
fit_slope -- estimate slope of a timeseries
fit_seasons -- estimate periodicity and seasonal offsets for a timeseries
adjust_trend -- de-trend a timeseries
adjust_seasons -- de-trend and de-seasonalize a timeseries
periodogram -- compute a periodogram of the data
periodogram_peaks -- return a list of intervals containg high-scoring periods
Author
------
<NAME> (<EMAIL>)
"""<import_from_stmt>.version __version__ VERSION<import_from_stmt>.trend fit_trend<import_from_stmt>.seasonal fit_seasons adjust_seasons rsquared_cv<import_from_stmt>.periodogram periodogram periodogram_peaks<line_sep> |
<import_stmt>json<import_stmt>os<import_stmt>unittest<import_from_stmt>unittest TestCase<import_from_stmt>unittest.mock Mock patch<import_from_stmt>api_batch_create.main lambda_handler<import_from_stmt>botocore.exceptions ClientError<import_from_stmt>test_shared.mock_objects InputTestData TestContext<class_stmt>TestCase(TestCase)<block_start><def_stmt>mock_sagemaker_api_call self operation_name kwarg<block_start><if_stmt>operation_name<eq>"DescribeLabelingJob"<block_start>parsed_response={"Error":{"Code":"500" "Message":"Error Uploading"}}<line_sep><raise>ClientError(parsed_response operation_name)<block_end><block_end>@patch("shared.db.get_batch_metadata")@patch("botocore.client.BaseClient._make_api_call" new=mock_sagemaker_api_call)@patch.dict(os.environ {"DEFAULT_WORKTEAM_ARN":"TEST"})<def_stmt>test_lambda_handler_happyCase self get_batch_metadata_mock# Setup
<block_start>event=Mock()<line_sep>event.get.return_value=json.dumps(InputTestData.create_batch_request)<line_sep>get_batch_metadata_mock.return_value=<none><line_sep># Act
val=lambda_handler(event TestContext())<line_sep># Assert
self.assertEqual(val["statusCode"] 200 "Unexpected status code returned")<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
<import_from_stmt>scvelo.preprocessing *# noqa
|
# Copyright (C) 2019 FireEye, Inc. All Rights Reserved.
<import_stmt>os<import_stmt>numpy<import_from_stmt>io StringIO<import_stmt>stringsifter.rank_strings<as>rank_strings<line_sep>test_strings='testing text\n'<concat>'nagain\n'<concat>'wheredoesitgo\n'<concat>'testing text\n'<concat>'nagain\n'<concat>'wheredoesitgo\n'<concat>'testing text\n'<concat>'nagain\n'<concat>'wheredoesitgo\n'<concat>'testing text\n'<def_stmt>_get_rank_strings_stdoutput capsys kwargs<block_start>rank_strings.main(**kwargs)<line_sep>stdout=capsys.readouterr().out<line_sep><return>stdout.split('\n')[:-1]<block_end><def_stmt>_get_kwargs input_strings=test_strings cutoff=<none> cutoff_score=numpy.nan scores=<false> batch=<false><block_start><return>{'input_strings':StringIO(input_strings) 'cutoff':cutoff 'cutoff_score':cutoff_score 'scores':scores 'batch':batch}<block_end><def_stmt>test_string_length featurizer<block_start>test_set=[['' 0] ['foo' 3] ['everybody' 9]]<for_stmt>s,true_len test_set<block_start>feat_len=featurizer.string_length(s)<assert_stmt>feat_len<eq>true_len<block_end><block_end><def_stmt>test_default capsys<block_start>"""
test default processing flow: # strings in == # strings out
"""<line_sep>output_lines=_get_rank_strings_stdoutput(capsys _get_kwargs())<assert_stmt>len(output_lines)<eq>10<block_end><def_stmt>test_scores capsys<block_start>scores_value=<true><line_sep>output_lines=_get_rank_strings_stdoutput(capsys _get_kwargs(scores=scores_value))<line_sep>split_output_lines=[output_line.split(",")<for>output_line output_lines]<line_sep>previous_score=numpy.inf<for_stmt>output_score,output_string split_output_lines<block_start><assert_stmt>(type(output_string)<is>str)<line_sep>float_output_score=float(output_score)<assert_stmt>(type(float_output_score)<is>float)<assert_stmt>(previous_score<ge>float_output_score)<line_sep>previous_score=float_output_score<block_end><block_end><def_stmt>test_cutoff capsys<block_start>cutoff_value=5<line_sep>output_lines=_get_rank_strings_stdoutput(capsys _get_kwargs(cutoff=cutoff_value))<assert_stmt>len(output_lines)<eq>cutoff_value<block_end><def_stmt>test_cutoff_score capsys<block_start>scores_value=<true><line_sep>cutoff_score_value=0.0<line_sep>output_lines=_get_rank_strings_stdoutput(capsys _get_kwargs(scores=scores_value cutoff_score=cutoff_score_value))<line_sep>split_output_lines=[output_line.split(",")<for>output_line output_lines]<for_stmt>output_score,output_string split_output_lines<block_start><assert_stmt>float(output_score)<ge>cutoff_score_value<block_end><block_end><def_stmt>test_batch <block_start>batch_value='tests/fixtures/'<line_sep>batch_files=[batch_value+batch_file<for>batch_file os.listdir(batch_value)]<line_sep>output_lines=rank_strings.main(**_get_kwargs(batch=batch_value))<for_stmt>batch_file batch_files<block_start>ranking_file=batch_file+'.ranked_strings'<assert_stmt>os.path.isfile(ranking_file)<is><true><line_sep>os.remove(ranking_file)<block_end><block_end> |
<import_from_stmt>... utils component_index<def_stmt>get_script_completions cfml_view<block_start>completions=component_index.build_file_completions(cfml_view.view_metadata)[utils.get_setting("cfml_cfc_completions")]<line_sep>completions=[make_completion(completion cfml_view.file_path)<for>completion completions["functions"]]<if_stmt>len(completions)<g>0<block_start><return>cfml_view.CompletionList(completions 0 <false>)<block_end><return><none><block_end><def_stmt>get_dot_completions cfml_view<block_start><if_stmt>len(cfml_view.dot_context)<eq>0<block_start><return><none><block_end><for_stmt>symbol cfml_view.dot_context<block_start><if_stmt><not>symbol.is_function<block_start><if_stmt>symbol.name<eq>"this"<block_start>completions=component_index.build_file_completions(cfml_view.view_metadata)[utils.get_setting("cfml_cfc_completions")]<line_sep>completions=[make_completion(completion cfml_view.file_path)<for>completion completions["functions"]]<line_sep><return>cfml_view.CompletionList(completions 0 <false>)<block_end><if_stmt>len(cfml_view.dot_context)<eq>1<and>symbol.name<eq>"arguments"<block_start>current_function_body=utils.get_current_function_body(cfml_view.view cfml_view.position component_method=<false>)<if_stmt>current_function_body<block_start>function=cfml_view.get_function(current_function_body.begin()-1)<line_sep>meta=cfml_view.get_string_metadata(cfml_view.view.substr(function[2])+"{}")<if_stmt>"functions"<in>meta<and>function[0]<in>meta["functions"]<block_start>args=meta["functions"][function[0]]["meta"]["parameters"]<line_sep>completions=[(arg["name"]+"\targuments" arg["name"])<for>arg args]<line_sep><return>cfml_view.CompletionList(completions 0 <false>)<block_end><block_end><block_end><if_stmt>(symbol.name<eq>"super"<and>cfml_view.project_name<and>cfml_view.view_metadata["extends"])<block_start>comp=component_index.component_index.get_completions_by_dot_path(cfml_view.project_name cfml_view.view_metadata["extends"])<if_stmt><not>comp<and>cfml_view.file_path<block_start>extends_file_path=component_index.component_index.resolve_path(cfml_view.project_name cfml_view.file_path cfml_view.view_metadata["extends"] )<line_sep>comp=component_index.component_index.get_completions_by_file_path(cfml_view.project_name extends_file_path)<block_end><if_stmt>comp<block_start>completions=[(completion.key+"\t"+completion.hint completion.content)<for>completion comp["functions"]]<line_sep><return>cfml_view.CompletionList(completions 0 <false>)<block_end><block_end><block_end><block_end><return><none><block_end><def_stmt>make_completion comp file_path<block_start>hint="this"<if_stmt>len(comp.file_path)<g>0<and>comp.file_path<ne>file_path<block_start>hint=comp.hint<block_end><return>(comp.key+"\t"+hint comp.content)<block_end> |
<import_stmt>click<import_stmt>csv<import_from_stmt>osp.citations.models Text Citation Text_Index<import_from_stmt>peewee fn<line_sep>@click.group()<def_stmt>cli <block_start><pass><block_end>@cli.command()@click.argument('out_file' type=click.File('w'))@click.option('--min_count' default=100)<def_stmt>fuzz out_file min_count<block_start>"""
Write a CSV with title and fuzz.
"""<line_sep>cols=['text_id' 'count' 'fuzz' 'surname' 'title' ]<line_sep>writer=csv.DictWriter(out_file cols)<line_sep>writer.writeheader()<line_sep>count=fn.count(Citation.id)<line_sep>query=(Text.select(Text count).join(Citation).where(Text.display<eq><true>).having(count<g>min_count).group_by(Text.id).naive())<line_sep>texts=list(query)<line_sep># Sort on fuzz, high -> low.
<for_stmt>t sorted(texts key=<lambda>t:t.fuzz reverse=<true>)<block_start>writer.writerow(dict(text_id=t.id count=t.count fuzz=t.fuzz surname=t.surname title=t.title ))<block_end><block_end>@cli.command()@click.argument('out_file' type=click.File('w'))@click.option('--depth' default=1000)<def_stmt>ranks out_file depth<block_start>"""
Write the top N text ranks.
"""<line_sep>cols=['count' 'title' 'author' ]<line_sep>writer=csv.DictWriter(out_file cols)<line_sep>writer.writeheader()<line_sep>ranks=Text_Index.rank_texts()<line_sep>ranks=sorted(ranks key=<lambda>x:x['rank'])<for_stmt>r ranks[:depth]<block_start>text=r['text']<line_sep>writer.writerow(dict(count=text.count title=text.title author=text.authors[0] ))<block_end><block_end> |
# implement samba_tool gpo commands
#
# Copyright <NAME> 2010
# Copyright <NAME> 2011-2012 <<EMAIL>>
#
# based on C implementation by <NAME> and <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
<import_stmt>os<import_stmt>samba.getopt<as>options<import_stmt>ldb<import_from_stmt>samba.auth system_session<import_from_stmt>samba.netcmd Command CommandError Option SuperCommand <import_from_stmt>samba.samdb SamDB<import_from_stmt>samba dsdb<import_from_stmt>samba.dcerpc security<import_from_stmt>samba.ndr ndr_unpack<import_stmt>samba.security<import_stmt>samba.auth<import_from_stmt>samba.auth AUTH_SESSION_INFO_DEFAULT_GROUPS AUTH_SESSION_INFO_AUTHENTICATED AUTH_SESSION_INFO_SIMPLE_PRIVILEGES<import_from_stmt>samba.netcmd.common netcmd_finddc<import_from_stmt>samba policy<import_from_stmt>samba smb<import_stmt>uuid<import_from_stmt>samba.ntacls dsacl2fsacl<import_from_stmt>samba.dcerpc nbt<import_from_stmt>samba.net Net<def_stmt>samdb_connect ctx<block_start>'''make a ldap connection to the server'''<try_stmt><block_start>ctx.samdb=SamDB(url=ctx.url session_info=system_session() credentials=ctx.creds lp=ctx.lp)<block_end><except_stmt>Exception e<block_start><raise>CommandError("LDAP connection to %s failed "%ctx.url e)<block_end><block_end><def_stmt>attr_default msg attrname default<block_start>'''get an attribute from a ldap msg with a default'''<if_stmt>attrname<in>msg<block_start><return>msg[attrname][0]<block_end><return>default<block_end><def_stmt>gpo_flags_string value<block_start>'''return gpo flags string'''<line_sep>flags=policy.get_gpo_flags(value)<if_stmt><not>flags<block_start>ret='NONE'<block_end><else_stmt><block_start>ret=' '.join(flags)<block_end><return>ret<block_end><def_stmt>gplink_options_string value<block_start>'''return gplink options string'''<line_sep>options=policy.get_gplink_options(value)<if_stmt><not>options<block_start>ret='NONE'<block_end><else_stmt><block_start>ret=' '.join(options)<block_end><return>ret<block_end><def_stmt>parse_gplink gplink<block_start>'''parse a gPLink into an array of dn and options'''<line_sep>ret=[]<line_sep>a=gplink.split(']')<for_stmt>g a<block_start><if_stmt><not>g<block_start><continue><block_end>d=g.split(';')<if_stmt>len(d)<ne>2<or><not>d[0].startswith("[LDAP://")<block_start><raise>RuntimeError("Badly formed gPLink '%s'"%g)<block_end>ret.append({'dn':d[0][8:] 'options':int(d[1])})<block_end><return>ret<block_end><def_stmt>encode_gplink gplist<block_start>'''Encode an array of dn and options into gPLink string'''<line_sep>ret=''<for_stmt>g gplist<block_start>ret<augadd>"[LDAP://%s;%d]"%(g['dn'] g['options'])<block_end><return>ret<block_end><def_stmt>dc_url lp creds url=<none> dc=<none><block_start>'''If URL is not specified, return URL for writable DC.
If dc is provided, use that to construct ldap URL'''<if_stmt>url<is><none><block_start><if_stmt>dc<is><none><block_start><try_stmt><block_start>dc=netcmd_finddc(lp creds)<block_end><except_stmt>Exception e<block_start><raise>RuntimeError("Could not find a DC for domain" e)<block_end><block_end>url='ldap://'+dc<block_end><return>url<block_end><def_stmt>get_gpo_dn samdb gpo<block_start>'''Construct the DN for gpo'''<line_sep>dn=samdb.get_default_basedn()<line_sep>dn.add_child(ldb.Dn(samdb "CN=Policies,CN=System"))<line_sep>dn.add_child(ldb.Dn(samdb "CN=%s"%gpo))<line_sep><return>dn<block_end><def_stmt>get_gpo_info samdb gpo=<none> displayname=<none> dn=<none> sd_flags=security.SECINFO_OWNER|security.SECINFO_GROUP|security.SECINFO_DACL|security.SECINFO_SACL<block_start>'''Get GPO information using gpo, displayname or dn'''<line_sep>policies_dn=samdb.get_default_basedn()<line_sep>policies_dn.add_child(ldb.Dn(samdb "CN=Policies,CN=System"))<line_sep>base_dn=policies_dn<line_sep>search_expr="(objectClass=groupPolicyContainer)"<line_sep>search_scope=ldb.SCOPE_ONELEVEL<if_stmt>gpo<is><not><none><block_start>search_expr="(&(objectClass=groupPolicyContainer)(name=%s))"%ldb.binary_encode(gpo)<block_end><if_stmt>displayname<is><not><none><block_start>search_expr="(&(objectClass=groupPolicyContainer)(displayname=%s))"%ldb.binary_encode(displayname)<block_end><if_stmt>dn<is><not><none><block_start>base_dn=dn<line_sep>search_scope=ldb.SCOPE_BASE<block_end><try_stmt><block_start>msg=samdb.search(base=base_dn scope=search_scope expression=search_expr attrs=['nTSecurityDescriptor' 'versionNumber' 'flags' 'name' 'displayName' 'gPCFileSysPath'] controls=['sd_flags:1:%d'%sd_flags])<block_end><except_stmt>Exception e<block_start><if_stmt>gpo<is><not><none><block_start>mesg="Cannot get information for GPO %s"%gpo<block_end><else_stmt><block_start>mesg="Cannot get information for GPOs"<block_end><raise>CommandError(mesg e)<block_end><return>msg<block_end><def_stmt>get_gpo_containers samdb gpo<block_start>'''lists dn of containers for a GPO'''<line_sep>search_expr="(&(objectClass=*)(gPLink=*%s*))"%gpo<try_stmt><block_start>msg=samdb.search(expression=search_expr attrs=['gPLink'])<block_end><except_stmt>Exception e<block_start><raise>CommandError("Could not find container(s) with GPO %s"%gpo e)<block_end><return>msg<block_end><def_stmt>del_gpo_link samdb container_dn gpo<block_start>'''delete GPO link for the container'''<line_sep># Check if valid Container DN and get existing GPlinks
<try_stmt><block_start>msg=samdb.search(base=container_dn scope=ldb.SCOPE_BASE expression="(objectClass=*)" attrs=['gPLink'])[0]<block_end><except_stmt>Exception e<block_start><raise>CommandError("Container '%s' does not exist"%container_dn e)<block_end>found=<false><line_sep>gpo_dn=str(get_gpo_dn(samdb gpo))<if_stmt>'gPLink'<in>msg<block_start>gplist=parse_gplink(msg['gPLink'][0])<for_stmt>g gplist<block_start><if_stmt>g['dn'].lower()<eq>gpo_dn.lower()<block_start>gplist.remove(g)<line_sep>found=<true><line_sep><break><block_end><block_end><block_end><else_stmt><block_start><raise>CommandError("No GPO(s) linked to this container")<block_end><if_stmt><not>found<block_start><raise>CommandError("GPO '%s' not linked to this container"%gpo)<block_end>m=ldb.Message()<line_sep>m.dn=container_dn<if_stmt>gplist<block_start>gplink_str=encode_gplink(gplist)<line_sep>m['r0']=ldb.MessageElement(gplink_str ldb.FLAG_MOD_REPLACE 'gPLink')<block_end><else_stmt><block_start>m['d0']=ldb.MessageElement(msg['gPLink'][0] ldb.FLAG_MOD_DELETE 'gPLink')<block_end><try_stmt><block_start>samdb.modify(m)<block_end><except_stmt>Exception e<block_start><raise>CommandError("Error removing GPO from container" e)<block_end><block_end><def_stmt>parse_unc unc<block_start>'''Parse UNC string into a hostname, a service, and a filepath'''<if_stmt>unc.startswith('\\\\')<and>unc.startswith('//')<block_start><raise>ValueError("UNC doesn't start with \\\\ or //")<block_end>tmp=unc[2:].split('/' 2)<if_stmt>len(tmp)<eq>3<block_start><return>tmp<block_end>tmp=unc[2:].split('\\' 2)<if_stmt>len(tmp)<eq>3<block_start><return>tmp<block_end><raise>ValueError("Invalid UNC string: %s"%unc)<block_end><def_stmt>copy_directory_remote_to_local conn remotedir localdir<block_start><if_stmt><not>os.path.isdir(localdir)<block_start>os.mkdir(localdir)<block_end>r_dirs=[remotedir]<line_sep>l_dirs=[localdir]<while_stmt>r_dirs<block_start>r_dir=r_dirs.pop()<line_sep>l_dir=l_dirs.pop()<line_sep>dirlist=conn.list(r_dir)<for_stmt>e dirlist<block_start>r_name=r_dir+'\\'+e['name']<line_sep>l_name=os.path.join(l_dir e['name'])<if_stmt>e['attrib']&smb.FILE_ATTRIBUTE_DIRECTORY<block_start>r_dirs.append(r_name)<line_sep>l_dirs.append(l_name)<line_sep>os.mkdir(l_name)<block_end><else_stmt><block_start>data=conn.loadfile(r_name)<line_sep>file(l_name 'w').write(data)<block_end><block_end><block_end><block_end><def_stmt>copy_directory_local_to_remote conn localdir remotedir<block_start><if_stmt><not>conn.chkpath(remotedir)<block_start>conn.mkdir(remotedir)<block_end>l_dirs=[localdir]<line_sep>r_dirs=[remotedir]<while_stmt>l_dirs<block_start>l_dir=l_dirs.pop()<line_sep>r_dir=r_dirs.pop()<line_sep>dirlist=os.listdir(l_dir)<for_stmt>e dirlist<block_start>l_name=os.path.join(l_dir e)<line_sep>r_name=r_dir+'\\'+e<if_stmt>os.path.isdir(l_name)<block_start>l_dirs.append(l_name)<line_sep>r_dirs.append(r_name)<line_sep>conn.mkdir(r_name)<block_end><else_stmt><block_start>data=file(l_name 'r').read()<line_sep>conn.savefile(r_name data)<block_end><block_end><block_end><block_end><def_stmt>create_directory_hier conn remotedir<block_start>elems=remotedir.replace('/' '\\').split('\\')<line_sep>path=""<for_stmt>e elems<block_start>path=path+'\\'+e<if_stmt><not>conn.chkpath(path)<block_start>conn.mkdir(path)<block_end><block_end><block_end><class_stmt>cmd_listall(Command)<block_start>"""List all GPOs."""<line_sep>synopsis="%prog [options]"<line_sep>takes_optiongroups={"sambaopts":options.SambaOptions "versionopts":options.VersionOptions "credopts":options.CredentialsOptions }<line_sep>takes_options=[Option("-H" "--URL" help="LDB URL for database or target server" type=str metavar="URL" dest="H")]<def_stmt>run self H=<none> sambaopts=<none> credopts=<none> versionopts=<none><block_start>self.lp=sambaopts.get_loadparm()<line_sep>self.creds=credopts.get_credentials(self.lp fallback_machine=<true>)<line_sep>self.url=dc_url(self.lp self.creds H)<line_sep>samdb_connect(self)<line_sep>msg=get_gpo_info(self.samdb <none>)<for_stmt>m msg<block_start>self.outf.write("GPO : %s\n"%m['name'][0])<line_sep>self.outf.write("display name : %s\n"%m['displayName'][0])<line_sep>self.outf.write("path : %s\n"%m['gPCFileSysPath'][0])<line_sep>self.outf.write("dn : %s\n"%m.dn)<line_sep>self.outf.write("version : %s\n"%attr_default(m 'versionNumber' '0'))<line_sep>self.outf.write("flags : %s\n"%gpo_flags_string(int(attr_default(m 'flags' 0))))<line_sep>self.outf.write("\n")<block_end><block_end><block_end><class_stmt>cmd_list(Command)<block_start>"""List GPOs for an account."""<line_sep>synopsis="%prog <username> [options]"<line_sep>takes_args=['username']<line_sep>takes_optiongroups={"sambaopts":options.SambaOptions "versionopts":options.VersionOptions "credopts":options.CredentialsOptions }<line_sep>takes_options=[Option("-H" "--URL" help="LDB URL for database or target server" type=str metavar="URL" dest="H")]<def_stmt>run self username H=<none> sambaopts=<none> credopts=<none> versionopts=<none><block_start>self.lp=sambaopts.get_loadparm()<line_sep>self.creds=credopts.get_credentials(self.lp fallback_machine=<true>)<line_sep>self.url=dc_url(self.lp self.creds H)<line_sep>samdb_connect(self)<try_stmt><block_start>msg=self.samdb.search(expression='(&(|(samAccountName=%s)(samAccountName=%s$))(objectClass=User))'%(ldb.binary_encode(username) ldb.binary_encode(username)))<line_sep>user_dn=msg[0].dn<block_end><except_stmt>Exception<block_start><raise>CommandError("Failed to find account %s"%username)<block_end># check if its a computer account
<try_stmt><block_start>msg=self.samdb.search(base=user_dn scope=ldb.SCOPE_BASE attrs=['objectClass'])[0]<line_sep>is_computer='computer'<in>msg['objectClass']<block_end><except_stmt>Exception<block_start><raise>CommandError("Failed to find objectClass for user %s"%username)<block_end>session_info_flags=(AUTH_SESSION_INFO_DEFAULT_GROUPS|AUTH_SESSION_INFO_AUTHENTICATED)<line_sep># When connecting to a remote server, don't look up the local privilege DB
<if_stmt>self.url<is><not><none><and>self.url.startswith('ldap')<block_start>session_info_flags<augor>AUTH_SESSION_INFO_SIMPLE_PRIVILEGES<block_end>session=samba.auth.user_session(self.samdb lp_ctx=self.lp dn=user_dn session_info_flags=session_info_flags)<line_sep>token=session.security_token<line_sep>gpos=[]<line_sep>inherit=<true><line_sep>dn=ldb.Dn(self.samdb str(user_dn)).parent()<while_stmt><true><block_start>msg=self.samdb.search(base=dn scope=ldb.SCOPE_BASE attrs=['gPLink' 'gPOptions'])[0]<if_stmt>'gPLink'<in>msg<block_start>glist=parse_gplink(msg['gPLink'][0])<for_stmt>g glist<block_start><if_stmt><not>inherit<and><not>(g['options']&dsdb.GPLINK_OPT_ENFORCE)<block_start><continue><block_end><if_stmt>g['options']&dsdb.GPLINK_OPT_DISABLE<block_start><continue><block_end><try_stmt><block_start>sd_flags=security.SECINFO_OWNER|security.SECINFO_GROUP|security.SECINFO_DACL<line_sep>gmsg=self.samdb.search(base=g['dn'] scope=ldb.SCOPE_BASE attrs=['name' 'displayName' 'flags' 'nTSecurityDescriptor'] controls=['sd_flags:1:%d'%sd_flags])<line_sep>secdesc_ndr=gmsg[0]['nTSecurityDescriptor'][0]<line_sep>secdesc=ndr_unpack(security.descriptor secdesc_ndr)<block_end><except_stmt>Exception<block_start>self.outf.write("Failed to fetch gpo object with nTSecurityDescriptor %s\n"%g['dn'])<line_sep><continue><block_end><try_stmt><block_start>samba.security.access_check(secdesc token security.SEC_STD_READ_CONTROL|security.SEC_ADS_LIST|security.SEC_ADS_READ_PROP)<block_end><except_stmt>RuntimeError<block_start>self.outf.write("Failed access check on %s\n"%msg.dn)<line_sep><continue><block_end># check the flags on the GPO
flags=int(attr_default(gmsg[0] 'flags' 0))<if_stmt>is_computer<and>(flags&dsdb.GPO_FLAG_MACHINE_DISABLE)<block_start><continue><block_end><if_stmt><not>is_computer<and>(flags&dsdb.GPO_FLAG_USER_DISABLE)<block_start><continue><block_end>gpos.append((gmsg[0]['displayName'][0] gmsg[0]['name'][0]))<block_end><block_end># check if this blocks inheritance
gpoptions=int(attr_default(msg 'gPOptions' 0))<if_stmt>gpoptions&dsdb.GPO_BLOCK_INHERITANCE<block_start>inherit=<false><block_end><if_stmt>dn<eq>self.samdb.get_default_basedn()<block_start><break><block_end>dn=dn.parent()<block_end><if_stmt>is_computer<block_start>msg_str='computer'<block_end><else_stmt><block_start>msg_str='user'<block_end>self.outf.write("GPOs for %s %s\n"%(msg_str username))<for_stmt>g gpos<block_start>self.outf.write(" %s %s\n"%(g[0] g[1]))<block_end><block_end><block_end><class_stmt>cmd_show(Command)<block_start>"""Show information for a GPO."""<line_sep>synopsis="%prog <gpo> [options]"<line_sep>takes_optiongroups={"sambaopts":options.SambaOptions "versionopts":options.VersionOptions "credopts":options.CredentialsOptions }<line_sep>takes_args=['gpo']<line_sep>takes_options=[Option("-H" help="LDB URL for database or target server" type=str)]<def_stmt>run self gpo H=<none> sambaopts=<none> credopts=<none> versionopts=<none><block_start>self.lp=sambaopts.get_loadparm()<line_sep>self.creds=credopts.get_credentials(self.lp fallback_machine=<true>)<line_sep>self.url=dc_url(self.lp self.creds H)<line_sep>samdb_connect(self)<try_stmt><block_start>msg=get_gpo_info(self.samdb gpo)[0]<block_end><except_stmt>Exception<block_start><raise>CommandError("GPO '%s' does not exist"%gpo)<block_end><try_stmt><block_start>secdesc_ndr=msg['nTSecurityDescriptor'][0]<line_sep>secdesc=ndr_unpack(security.descriptor secdesc_ndr)<line_sep>secdesc_sddl=secdesc.as_sddl()<block_end><except_stmt>Exception<block_start>secdesc_sddl="<hidden>"<block_end>self.outf.write("GPO : %s\n"%msg['name'][0])<line_sep>self.outf.write("display name : %s\n"%msg['displayName'][0])<line_sep>self.outf.write("path : %s\n"%msg['gPCFileSysPath'][0])<line_sep>self.outf.write("dn : %s\n"%msg.dn)<line_sep>self.outf.write("version : %s\n"%attr_default(msg 'versionNumber' '0'))<line_sep>self.outf.write("flags : %s\n"%gpo_flags_string(int(attr_default(msg 'flags' 0))))<line_sep>self.outf.write("ACL : %s\n"%secdesc_sddl)<line_sep>self.outf.write("\n")<block_end><block_end><class_stmt>cmd_getlink(Command)<block_start>"""List GPO Links for a container."""<line_sep>synopsis="%prog <container_dn> [options]"<line_sep>takes_optiongroups={"sambaopts":options.SambaOptions "versionopts":options.VersionOptions "credopts":options.CredentialsOptions }<line_sep>takes_args=['container_dn']<line_sep>takes_options=[Option("-H" help="LDB URL for database or target server" type=str)]<def_stmt>run self container_dn H=<none> sambaopts=<none> credopts=<none> versionopts=<none><block_start>self.lp=sambaopts.get_loadparm()<line_sep>self.creds=credopts.get_credentials(self.lp fallback_machine=<true>)<line_sep>self.url=dc_url(self.lp self.creds H)<line_sep>samdb_connect(self)<try_stmt><block_start>msg=self.samdb.search(base=container_dn scope=ldb.SCOPE_BASE expression="(objectClass=*)" attrs=['gPLink'])[0]<block_end><except_stmt>Exception<block_start><raise>CommandError("Container '%s' does not exist"%container_dn)<block_end><if_stmt>msg['gPLink']<block_start>self.outf.write("GPO(s) linked to DN %s\n"%container_dn)<line_sep>gplist=parse_gplink(msg['gPLink'][0])<for_stmt>g gplist<block_start>msg=get_gpo_info(self.samdb dn=g['dn'])<line_sep>self.outf.write(" GPO : %s\n"%msg[0]['name'][0])<line_sep>self.outf.write(" Name : %s\n"%msg[0]['displayName'][0])<line_sep>self.outf.write(" Options : %s\n"%gplink_options_string(g['options']))<line_sep>self.outf.write("\n")<block_end><block_end><else_stmt><block_start>self.outf.write("No GPO(s) linked to DN=%s\n"%container_dn)<block_end><block_end><block_end><class_stmt>cmd_setlink(Command)<block_start>"""Add or update a GPO link to a container."""<line_sep>synopsis="%prog <container_dn> <gpo> [options]"<line_sep>takes_optiongroups={"sambaopts":options.SambaOptions "versionopts":options.VersionOptions "credopts":options.CredentialsOptions }<line_sep>takes_args=['container_dn' 'gpo']<line_sep>takes_options=[Option("-H" help="LDB URL for database or target server" type=str) Option("--disable" dest="disabled" default=<false> action='store_true' help="Disable policy") Option("--enforce" dest="enforced" default=<false> action='store_true' help="Enforce policy")]<def_stmt>run self container_dn gpo H=<none> disabled=<false> enforced=<false> sambaopts=<none> credopts=<none> versionopts=<none><block_start>self.lp=sambaopts.get_loadparm()<line_sep>self.creds=credopts.get_credentials(self.lp fallback_machine=<true>)<line_sep>self.url=dc_url(self.lp self.creds H)<line_sep>samdb_connect(self)<line_sep>gplink_options=0<if_stmt>disabled<block_start>gplink_options<augor>dsdb.GPLINK_OPT_DISABLE<block_end><if_stmt>enforced<block_start>gplink_options<augor>dsdb.GPLINK_OPT_ENFORCE<block_end># Check if valid GPO DN
<try_stmt><block_start>msg=get_gpo_info(self.samdb gpo=gpo)[0]<block_end><except_stmt>Exception<block_start><raise>CommandError("GPO '%s' does not exist"%gpo)<block_end>gpo_dn=str(get_gpo_dn(self.samdb gpo))<line_sep># Check if valid Container DN
<try_stmt><block_start>msg=self.samdb.search(base=container_dn scope=ldb.SCOPE_BASE expression="(objectClass=*)" attrs=['gPLink'])[0]<block_end><except_stmt>Exception<block_start><raise>CommandError("Container '%s' does not exist"%container_dn)<block_end># Update existing GPlinks or Add new one
existing_gplink=<false><if_stmt>'gPLink'<in>msg<block_start>gplist=parse_gplink(msg['gPLink'][0])<line_sep>existing_gplink=<true><line_sep>found=<false><for_stmt>g gplist<block_start><if_stmt>g['dn'].lower()<eq>gpo_dn.lower()<block_start>g['options']=gplink_options<line_sep>found=<true><line_sep><break><block_end><block_end><if_stmt>found<block_start><raise>CommandError("GPO '%s' already linked to this container"%gpo)<block_end><else_stmt><block_start>gplist.insert(0 {'dn':gpo_dn 'options':gplink_options})<block_end><block_end><else_stmt><block_start>gplist=[]<line_sep>gplist.append({'dn':gpo_dn 'options':gplink_options})<block_end>gplink_str=encode_gplink(gplist)<line_sep>m=ldb.Message()<line_sep>m.dn=ldb.Dn(self.samdb container_dn)<if_stmt>existing_gplink<block_start>m['new_value']=ldb.MessageElement(gplink_str ldb.FLAG_MOD_REPLACE 'gPLink')<block_end><else_stmt><block_start>m['new_value']=ldb.MessageElement(gplink_str ldb.FLAG_MOD_ADD 'gPLink')<block_end><try_stmt><block_start>self.samdb.modify(m)<block_end><except_stmt>Exception e<block_start><raise>CommandError("Error adding GPO Link" e)<block_end>self.outf.write("Added/Updated GPO link\n")<line_sep>cmd_getlink().run(container_dn H sambaopts credopts versionopts)<block_end><block_end><class_stmt>cmd_dellink(Command)<block_start>"""Delete GPO link from a container."""<line_sep>synopsis="%prog <container_dn> <gpo> [options]"<line_sep>takes_optiongroups={"sambaopts":options.SambaOptions "versionopts":options.VersionOptions "credopts":options.CredentialsOptions }<line_sep>takes_args=['container' 'gpo']<line_sep>takes_options=[Option("-H" help="LDB URL for database or target server" type=str) ]<def_stmt>run self container gpo H=<none> sambaopts=<none> credopts=<none> versionopts=<none><block_start>self.lp=sambaopts.get_loadparm()<line_sep>self.creds=credopts.get_credentials(self.lp fallback_machine=<true>)<line_sep>self.url=dc_url(self.lp self.creds H)<line_sep>samdb_connect(self)<line_sep># Check if valid GPO
<try_stmt><block_start>get_gpo_info(self.samdb gpo=gpo)[0]<block_end><except_stmt>Exception<block_start><raise>CommandError("GPO '%s' does not exist"%gpo)<block_end>container_dn=ldb.Dn(self.samdb container)<line_sep>del_gpo_link(self.samdb container_dn gpo)<line_sep>self.outf.write("Deleted GPO link.\n")<line_sep>cmd_getlink().run(container_dn H sambaopts credopts versionopts)<block_end><block_end><class_stmt>cmd_listcontainers(Command)<block_start>"""List all linked containers for a GPO."""<line_sep>synopsis="%prog <gpo> [options]"<line_sep>takes_optiongroups={"sambaopts":options.SambaOptions "versionopts":options.VersionOptions "credopts":options.CredentialsOptions }<line_sep>takes_args=['gpo']<line_sep>takes_options=[Option("-H" help="LDB URL for database or target server" type=str)]<def_stmt>run self gpo H=<none> sambaopts=<none> credopts=<none> versionopts=<none><block_start>self.lp=sambaopts.get_loadparm()<line_sep>self.creds=credopts.get_credentials(self.lp fallback_machine=<true>)<line_sep>self.url=dc_url(self.lp self.creds H)<line_sep>samdb_connect(self)<line_sep>msg=get_gpo_containers(self.samdb gpo)<if_stmt>len(msg)<block_start>self.outf.write("Container(s) using GPO %s\n"%gpo)<for_stmt>m msg<block_start>self.outf.write(" DN: %s\n"%m['dn'])<block_end><block_end><else_stmt><block_start>self.outf.write("No Containers using GPO %s\n"%gpo)<block_end><block_end><block_end><class_stmt>cmd_getinheritance(Command)<block_start>"""Get inheritance flag for a container."""<line_sep>synopsis="%prog <container_dn> [options]"<line_sep>takes_optiongroups={"sambaopts":options.SambaOptions "versionopts":options.VersionOptions "credopts":options.CredentialsOptions }<line_sep>takes_args=['container_dn']<line_sep>takes_options=[Option("-H" help="LDB URL for database or target server" type=str)]<def_stmt>run self container_dn H=<none> sambaopts=<none> credopts=<none> versionopts=<none><block_start>self.lp=sambaopts.get_loadparm()<line_sep>self.creds=credopts.get_credentials(self.lp fallback_machine=<true>)<line_sep>self.url=dc_url(self.lp self.creds H)<line_sep>samdb_connect(self)<try_stmt><block_start>msg=self.samdb.search(base=container_dn scope=ldb.SCOPE_BASE expression="(objectClass=*)" attrs=['gPOptions'])[0]<block_end><except_stmt>Exception<block_start><raise>CommandError("Container '%s' does not exist"%container_dn)<block_end>inheritance=0<if_stmt>'gPOptions'<in>msg<block_start>inheritance=int(msg['gPOptions'][0])<block_end><if_stmt>inheritance<eq>dsdb.GPO_BLOCK_INHERITANCE<block_start>self.outf.write("Container has GPO_BLOCK_INHERITANCE\n")<block_end><else_stmt><block_start>self.outf.write("Container has GPO_INHERIT\n")<block_end><block_end><block_end><class_stmt>cmd_setinheritance(Command)<block_start>"""Set inheritance flag on a container."""<line_sep>synopsis="%prog <container_dn> <block|inherit> [options]"<line_sep>takes_optiongroups={"sambaopts":options.SambaOptions "versionopts":options.VersionOptions "credopts":options.CredentialsOptions }<line_sep>takes_args=['container_dn' 'inherit_state']<line_sep>takes_options=[Option("-H" help="LDB URL for database or target server" type=str)]<def_stmt>run self container_dn inherit_state H=<none> sambaopts=<none> credopts=<none> versionopts=<none><block_start><if_stmt>inherit_state.lower()<eq>'block'<block_start>inheritance=dsdb.GPO_BLOCK_INHERITANCE<block_end><elif_stmt>inherit_state.lower()<eq>'inherit'<block_start>inheritance=dsdb.GPO_INHERIT<block_end><else_stmt><block_start><raise>CommandError("Unknown inheritance state (%s)"%inherit_state)<block_end>self.lp=sambaopts.get_loadparm()<line_sep>self.creds=credopts.get_credentials(self.lp fallback_machine=<true>)<line_sep>self.url=dc_url(self.lp self.creds H)<line_sep>samdb_connect(self)<try_stmt><block_start>msg=self.samdb.search(base=container_dn scope=ldb.SCOPE_BASE expression="(objectClass=*)" attrs=['gPOptions'])[0]<block_end><except_stmt>Exception<block_start><raise>CommandError("Container '%s' does not exist"%container_dn)<block_end>m=ldb.Message()<line_sep>m.dn=ldb.Dn(self.samdb container_dn)<if_stmt>'gPOptions'<in>msg<block_start>m['new_value']=ldb.MessageElement(str(inheritance) ldb.FLAG_MOD_REPLACE 'gPOptions')<block_end><else_stmt><block_start>m['new_value']=ldb.MessageElement(str(inheritance) ldb.FLAG_MOD_ADD 'gPOptions')<block_end><try_stmt><block_start>self.samdb.modify(m)<block_end><except_stmt>Exception e<block_start><raise>CommandError("Error setting inheritance state %s"%inherit_state e)<block_end><block_end><block_end><class_stmt>cmd_fetch(Command)<block_start>"""Download a GPO."""<line_sep>synopsis="%prog <gpo> [options]"<line_sep>takes_optiongroups={"sambaopts":options.SambaOptions "versionopts":options.VersionOptions "credopts":options.CredentialsOptions }<line_sep>takes_args=['gpo']<line_sep>takes_options=[Option("-H" help="LDB URL for database or target server" type=str) Option("--tmpdir" help="Temporary directory for copying policy files" type=str)]<def_stmt>run self gpo H=<none> tmpdir=<none> sambaopts=<none> credopts=<none> versionopts=<none><block_start>self.lp=sambaopts.get_loadparm()<line_sep>self.creds=credopts.get_credentials(self.lp fallback_machine=<true>)<line_sep># We need to know writable DC to setup SMB connection
<if_stmt>H<and>H.startswith('ldap://')<block_start>dc_hostname=H[7:]<line_sep>self.url=H<block_end><else_stmt><block_start>dc_hostname=netcmd_finddc(self.lp self.creds)<line_sep>self.url=dc_url(self.lp self.creds dc=dc_hostname)<block_end>samdb_connect(self)<try_stmt><block_start>msg=get_gpo_info(self.samdb gpo)[0]<block_end><except_stmt>Exception<block_start><raise>CommandError("GPO '%s' does not exist"%gpo)<block_end># verify UNC path
unc=msg['gPCFileSysPath'][0]<try_stmt><block_start>[dom_name service sharepath]=parse_unc(unc)<block_end><except_stmt>ValueError<block_start><raise>CommandError("Invalid GPO path (%s)"%unc)<block_end># SMB connect to DC
<try_stmt><block_start>conn=smb.SMB(dc_hostname service lp=self.lp creds=self.creds)<block_end><except_stmt>Exception<block_start><raise>CommandError("Error connecting to '%s' using SMB"%dc_hostname)<block_end># Copy GPT
<if_stmt>tmpdir<is><none><block_start>tmpdir="/tmp"<block_end><if_stmt><not>os.path.isdir(tmpdir)<block_start><raise>CommandError("Temoprary directory '%s' does not exist"%tmpdir)<block_end>localdir=os.path.join(tmpdir "policy")<if_stmt><not>os.path.isdir(localdir)<block_start>os.mkdir(localdir)<block_end>gpodir=os.path.join(localdir gpo)<if_stmt>os.path.isdir(gpodir)<block_start><raise>CommandError("GPO directory '%s' already exists, refusing to overwrite"%gpodir)<block_end><try_stmt><block_start>os.mkdir(gpodir)<line_sep>copy_directory_remote_to_local(conn sharepath gpodir)<block_end><except_stmt>Exception e# FIXME: Catch more specific exception
<block_start><raise>CommandError("Error copying GPO from DC" e)<block_end>self.outf.write('GPO copied to %s\n'%gpodir)<block_end><block_end><class_stmt>cmd_create(Command)<block_start>"""Create an empty GPO."""<line_sep>synopsis="%prog <displayname> [options]"<line_sep>takes_optiongroups={"sambaopts":options.SambaOptions "versionopts":options.VersionOptions "credopts":options.CredentialsOptions }<line_sep>takes_args=['displayname']<line_sep>takes_options=[Option("-H" help="LDB URL for database or target server" type=str) Option("--tmpdir" help="Temporary directory for copying policy files" type=str)]<def_stmt>run self displayname H=<none> tmpdir=<none> sambaopts=<none> credopts=<none> versionopts=<none><block_start>self.lp=sambaopts.get_loadparm()<line_sep>self.creds=credopts.get_credentials(self.lp fallback_machine=<true>)<line_sep>net=Net(creds=self.creds lp=self.lp)<line_sep># We need to know writable DC to setup SMB connection
<if_stmt>H<and>H.startswith('ldap://')<block_start>dc_hostname=H[7:]<line_sep>self.url=H<line_sep>flags=(nbt.NBT_SERVER_LDAP|nbt.NBT_SERVER_DS|nbt.NBT_SERVER_WRITABLE)<line_sep>cldap_ret=net.finddc(address=dc_hostname flags=flags)<block_end><else_stmt><block_start>flags=(nbt.NBT_SERVER_LDAP|nbt.NBT_SERVER_DS|nbt.NBT_SERVER_WRITABLE)<line_sep>cldap_ret=net.finddc(domain=self.lp.get('realm') flags=flags)<line_sep>dc_hostname=cldap_ret.pdc_dns_name<line_sep>self.url=dc_url(self.lp self.creds dc=dc_hostname)<block_end>samdb_connect(self)<line_sep>msg=get_gpo_info(self.samdb displayname=displayname)<if_stmt>msg.count<g>0<block_start><raise>CommandError("A GPO already existing with name '%s'"%displayname)<block_end># Create new GUID
guid=str(uuid.uuid4())<line_sep>gpo="{%s}"%guid.upper()<line_sep>realm=cldap_ret.dns_domain<line_sep>unc_path="\\\\%s\\sysvol\\%s\\Policies\\%s"%(realm realm gpo)<line_sep># Create GPT
<if_stmt>tmpdir<is><none><block_start>tmpdir="/tmp"<block_end><if_stmt><not>os.path.isdir(tmpdir)<block_start><raise>CommandError("Temporary directory '%s' does not exist"%tmpdir)<block_end>localdir=os.path.join(tmpdir "policy")<if_stmt><not>os.path.isdir(localdir)<block_start>os.mkdir(localdir)<block_end>gpodir=os.path.join(localdir gpo)<if_stmt>os.path.isdir(gpodir)<block_start><raise>CommandError("GPO directory '%s' already exists, refusing to overwrite"%gpodir)<block_end><try_stmt><block_start>os.mkdir(gpodir)<line_sep>os.mkdir(os.path.join(gpodir "Machine"))<line_sep>os.mkdir(os.path.join(gpodir "User"))<line_sep>gpt_contents="[General]\r\nVersion=0\r\n"<line_sep>file(os.path.join(gpodir "GPT.INI") "w").write(gpt_contents)<block_end><except_stmt>Exception e<block_start><raise>CommandError("Error Creating GPO files" e)<block_end># Connect to DC over SMB
[dom_name service sharepath]=parse_unc(unc_path)<try_stmt><block_start>conn=smb.SMB(dc_hostname service lp=self.lp creds=self.creds)<block_end><except_stmt>Exception e<block_start><raise>CommandError("Error connecting to '%s' using SMB"%dc_hostname e)<block_end>self.samdb.transaction_start()<try_stmt># Add cn=<guid>
<block_start>gpo_dn=get_gpo_dn(self.samdb gpo)<line_sep>m=ldb.Message()<line_sep>m.dn=gpo_dn<line_sep>m['a01']=ldb.MessageElement("groupPolicyContainer" ldb.FLAG_MOD_ADD "objectClass")<line_sep>self.samdb.add(m)<line_sep># Add cn=User,cn=<guid>
m=ldb.Message()<line_sep>m.dn=ldb.Dn(self.samdb "CN=User,%s"%str(gpo_dn))<line_sep>m['a01']=ldb.MessageElement("container" ldb.FLAG_MOD_ADD "objectClass")<line_sep>self.samdb.add(m)<line_sep># Add cn=Machine,cn=<guid>
m=ldb.Message()<line_sep>m.dn=ldb.Dn(self.samdb "CN=Machine,%s"%str(gpo_dn))<line_sep>m['a01']=ldb.MessageElement("container" ldb.FLAG_MOD_ADD "objectClass")<line_sep>self.samdb.add(m)<line_sep># Get new security descriptor
ds_sd_flags=(security.SECINFO_OWNER|security.SECINFO_GROUP|security.SECINFO_DACL)<line_sep>msg=get_gpo_info(self.samdb gpo=gpo sd_flags=ds_sd_flags)[0]<line_sep>ds_sd_ndr=msg['nTSecurityDescriptor'][0]<line_sep>ds_sd=ndr_unpack(security.descriptor ds_sd_ndr).as_sddl()<line_sep># Create a file system security descriptor
domain_sid=security.dom_sid(self.samdb.get_domain_sid())<line_sep>sddl=dsacl2fsacl(ds_sd domain_sid)<line_sep>fs_sd=security.descriptor.from_sddl(sddl domain_sid)<line_sep># Copy GPO directory
create_directory_hier(conn sharepath)<line_sep># Set ACL
sio=(security.SECINFO_OWNER|security.SECINFO_GROUP|security.SECINFO_DACL|security.SECINFO_PROTECTED_DACL)<line_sep>conn.set_acl(sharepath fs_sd sio)<line_sep># Copy GPO files over SMB
copy_directory_local_to_remote(conn gpodir sharepath)<line_sep>m=ldb.Message()<line_sep>m.dn=gpo_dn<line_sep>m['a02']=ldb.MessageElement(displayname ldb.FLAG_MOD_REPLACE "displayName")<line_sep>m['a03']=ldb.MessageElement(unc_path ldb.FLAG_MOD_REPLACE "gPCFileSysPath")<line_sep>m['a05']=ldb.MessageElement("0" ldb.FLAG_MOD_REPLACE "versionNumber")<line_sep>m['a07']=ldb.MessageElement("2" ldb.FLAG_MOD_REPLACE "gpcFunctionalityVersion")<line_sep>m['a04']=ldb.MessageElement("0" ldb.FLAG_MOD_REPLACE "flags")<line_sep>controls=["permissive_modify:0"]<line_sep>self.samdb.modify(m controls=controls)<block_end><except_stmt>Exception<block_start>self.samdb.transaction_cancel()<line_sep><raise><block_end><else_stmt><block_start>self.samdb.transaction_commit()<block_end>self.outf.write("GPO '%s' created as %s\n"%(displayname gpo))<block_end><block_end><class_stmt>cmd_del(Command)<block_start>"""Delete a GPO."""<line_sep>synopsis="%prog <gpo> [options]"<line_sep>takes_optiongroups={"sambaopts":options.SambaOptions "versionopts":options.VersionOptions "credopts":options.CredentialsOptions }<line_sep>takes_args=['gpo']<line_sep>takes_options=[Option("-H" help="LDB URL for database or target server" type=str) ]<def_stmt>run self gpo H=<none> sambaopts=<none> credopts=<none> versionopts=<none><block_start>self.lp=sambaopts.get_loadparm()<line_sep>self.creds=credopts.get_credentials(self.lp fallback_machine=<true>)<line_sep># We need to know writable DC to setup SMB connection
<if_stmt>H<and>H.startswith('ldap://')<block_start>dc_hostname=H[7:]<line_sep>self.url=H<block_end><else_stmt><block_start>dc_hostname=netcmd_finddc(self.lp self.creds)<line_sep>self.url=dc_url(self.lp self.creds dc=dc_hostname)<block_end>samdb_connect(self)<line_sep># Check if valid GPO
<try_stmt><block_start>msg=get_gpo_info(self.samdb gpo=gpo)[0]<line_sep>unc_path=msg['gPCFileSysPath'][0]<block_end><except_stmt>Exception<block_start><raise>CommandError("GPO '%s' does not exist"%gpo)<block_end># Connect to DC over SMB
[dom_name service sharepath]=parse_unc(unc_path)<try_stmt><block_start>conn=smb.SMB(dc_hostname service lp=self.lp creds=self.creds)<block_end><except_stmt>Exception e<block_start><raise>CommandError("Error connecting to '%s' using SMB"%dc_hostname e)<block_end>self.samdb.transaction_start()<try_stmt># Check for existing links
<block_start>msg=get_gpo_containers(self.samdb gpo)<if_stmt>len(msg)<block_start>self.outf.write("GPO %s is linked to containers\n"%gpo)<for_stmt>m msg<block_start>del_gpo_link(self.samdb m['dn'] gpo)<line_sep>self.outf.write(" Removed link from %s.\n"%m['dn'])<block_end><block_end># Remove LDAP entries
gpo_dn=get_gpo_dn(self.samdb gpo)<line_sep>self.samdb.delete(ldb.Dn(self.samdb "CN=User,%s"%str(gpo_dn)))<line_sep>self.samdb.delete(ldb.Dn(self.samdb "CN=Machine,%s"%str(gpo_dn)))<line_sep>self.samdb.delete(gpo_dn)<line_sep># Remove GPO files
conn.deltree(sharepath)<block_end><except_stmt>Exception<block_start>self.samdb.transaction_cancel()<line_sep><raise><block_end><else_stmt><block_start>self.samdb.transaction_commit()<block_end>self.outf.write("GPO %s deleted.\n"%gpo)<block_end><block_end><class_stmt>cmd_aclcheck(Command)<block_start>"""Check all GPOs have matching LDAP and DS ACLs."""<line_sep>synopsis="%prog [options]"<line_sep>takes_optiongroups={"sambaopts":options.SambaOptions "versionopts":options.VersionOptions "credopts":options.CredentialsOptions }<line_sep>takes_options=[Option("-H" "--URL" help="LDB URL for database or target server" type=str metavar="URL" dest="H")]<def_stmt>run self H=<none> sambaopts=<none> credopts=<none> versionopts=<none><block_start>self.lp=sambaopts.get_loadparm()<line_sep>self.creds=credopts.get_credentials(self.lp fallback_machine=<true>)<line_sep>self.url=dc_url(self.lp self.creds H)<line_sep># We need to know writable DC to setup SMB connection
<if_stmt>H<and>H.startswith('ldap://')<block_start>dc_hostname=H[7:]<line_sep>self.url=H<block_end><else_stmt><block_start>dc_hostname=netcmd_finddc(self.lp self.creds)<line_sep>self.url=dc_url(self.lp self.creds dc=dc_hostname)<block_end>samdb_connect(self)<line_sep>msg=get_gpo_info(self.samdb <none>)<for_stmt>m msg# verify UNC path
<block_start>unc=m['gPCFileSysPath'][0]<try_stmt><block_start>[dom_name service sharepath]=parse_unc(unc)<block_end><except_stmt>ValueError<block_start><raise>CommandError("Invalid GPO path (%s)"%unc)<block_end># SMB connect to DC
<try_stmt><block_start>conn=smb.SMB(dc_hostname service lp=self.lp creds=self.creds)<block_end><except_stmt>Exception<block_start><raise>CommandError("Error connecting to '%s' using SMB"%dc_hostname)<block_end>fs_sd=conn.get_acl(sharepath security.SECINFO_OWNER|security.SECINFO_GROUP|security.SECINFO_DACL security.SEC_FLAG_MAXIMUM_ALLOWED)<line_sep>ds_sd_ndr=m['nTSecurityDescriptor'][0]<line_sep>ds_sd=ndr_unpack(security.descriptor ds_sd_ndr).as_sddl()<line_sep># Create a file system security descriptor
domain_sid=security.dom_sid(self.samdb.get_domain_sid())<line_sep>expected_fs_sddl=dsacl2fsacl(ds_sd domain_sid)<if_stmt>(fs_sd.as_sddl(domain_sid)<ne>expected_fs_sddl)<block_start><raise>CommandError("Invalid GPO ACL %s on path (%s), should be %s"%(fs_sd.as_sddl(domain_sid) sharepath expected_fs_sddl))<block_end><block_end><block_end><block_end><class_stmt>cmd_gpo(SuperCommand)<block_start>"""Group Policy Object (GPO) management."""<line_sep>subcommands={}<line_sep>subcommands["listall"]=cmd_listall()<line_sep>subcommands["list"]=cmd_list()<line_sep>subcommands["show"]=cmd_show()<line_sep>subcommands["getlink"]=cmd_getlink()<line_sep>subcommands["setlink"]=cmd_setlink()<line_sep>subcommands["dellink"]=cmd_dellink()<line_sep>subcommands["listcontainers"]=cmd_listcontainers()<line_sep>subcommands["getinheritance"]=cmd_getinheritance()<line_sep>subcommands["setinheritance"]=cmd_setinheritance()<line_sep>subcommands["fetch"]=cmd_fetch()<line_sep>subcommands["create"]=cmd_create()<line_sep>subcommands["del"]=cmd_del()<line_sep>subcommands["aclcheck"]=cmd_aclcheck()<block_end> |
# Inspired by https://docs.aiohttp.org/en/stable/web_quickstart.html
# and https://docs.aiohttp.org/en/stable/web_quickstart.html#resources-and-routes
<import_from_stmt>aiohttp web<line_sep>app=web.Application()<line_sep>## ================================= ##
## Ways to specify routes / handlers ##
## ================================= ##
## Using coroutines
<if_stmt><true># `app.add_routes` with list
<block_start><async_keyword><def_stmt>foo request# $ requestHandler
<block_start><return>web.Response(text="foo")<block_end># $ HttpResponse
<async_keyword><def_stmt>foo2 request# $ requestHandler
<block_start><return>web.Response(text="foo2")<block_end># $ HttpResponse
<async_keyword><def_stmt>foo3 request# $ requestHandler
<block_start><return>web.Response(text="foo3")<block_end># $ HttpResponse
app.add_routes([web.get("/foo" foo) # $ routeSetup="/foo"
web.route("*" "/foo2" foo2) # $ routeSetup="/foo2"
web.get(path="/foo3" handler=foo3) # $ routeSetup="/foo3"
])<line_sep># using decorator
routes=web.RouteTableDef()<line_sep>@routes.get("/bar")# $ routeSetup="/bar"
<async_keyword><def_stmt>bar request# $ requestHandler
<block_start><return>web.Response(text="bar")<block_end># $ HttpResponse
@routes.route("*" "/bar2")# $ routeSetup="/bar2"
<async_keyword><def_stmt>bar2 request# $ requestHandler
<block_start><return>web.Response(text="bar2")<block_end># $ HttpResponse
@routes.get(path="/bar3")# $ routeSetup="/bar3"
<async_keyword><def_stmt>bar3 request# $ requestHandler
<block_start><return>web.Response(text="bar3")<block_end># $ HttpResponse
app.add_routes(routes)<line_sep># `app.router.add_get` / `app.router.add_route`
<async_keyword><def_stmt>baz request# $ requestHandler
<block_start><return>web.Response(text="baz")<block_end># $ HttpResponse
app.router.add_get("/baz" baz)# $ routeSetup="/baz"
<async_keyword><def_stmt>baz2 request# $ requestHandler
<block_start><return>web.Response(text="baz2")<block_end># $ HttpResponse
app.router.add_route("*" "/baz2" baz2)# $ routeSetup="/baz2"
<async_keyword><def_stmt>baz3 request# $ requestHandler
<block_start><return>web.Response(text="baz3")<block_end># $ HttpResponse
app.router.add_get(path="/baz3" handler=baz3)<block_end># $ routeSetup="/baz3"
## Using classes / views
<if_stmt><true># see https://docs.aiohttp.org/en/stable/web_quickstart.html#organizing-handlers-in-classes
<block_start><class_stmt>MyCustomHandlerClass<block_start><async_keyword><def_stmt>foo_handler self request# $ MISSING: requestHandler
<block_start><return>web.Response(text="MyCustomHandlerClass.foo")<block_end><block_end># $ HttpResponse
my_custom_handler=MyCustomHandlerClass()<line_sep>app.router.add_get("/MyCustomHandlerClass/foo" my_custom_handler.foo_handler)# $ routeSetup="/MyCustomHandlerClass/foo"
# Using `web.View`
# ---------------
# `app.add_routes` with list
<class_stmt>MyWebView1(web.View)<block_start><async_keyword><def_stmt>get self# $ requestHandler
<block_start><return>web.Response(text="MyWebView1.get")<block_end><block_end># $ HttpResponse
app.add_routes([web.view("/MyWebView1" MyWebView1)# $ routeSetup="/MyWebView1"
])<line_sep># using decorator
routes=web.RouteTableDef()<line_sep>@routes.view("/MyWebView2")# $ routeSetup="/MyWebView2"
<class_stmt>MyWebView2(web.View)<block_start><async_keyword><def_stmt>get self# $ requestHandler
<block_start><return>web.Response(text="MyWebView2.get")<block_end><block_end># $ HttpResponse
app.add_routes(routes)<line_sep># `app.router.add_view`
<class_stmt>MyWebView3(web.View)<block_start><async_keyword><def_stmt>get self# $ requestHandler
<block_start><return>web.Response(text="MyWebView3.get")<block_end><block_end># $ HttpResponse
app.router.add_view("/MyWebView3" MyWebView3)# $ routeSetup="/MyWebView3"
# no route-setup
<class_stmt>MyWebViewNoRoute(web.View)<block_start><async_keyword><def_stmt>get self# $ requestHandler
<block_start><return>web.Response(text="MyWebViewNoRoute.get")<block_end><block_end># $ HttpResponse
<if_stmt>len(__name__)<l>0# avoid running, but fool analysis to not consider dead code
# no explicit-view subclass (but route-setup)
<block_start><class_stmt>MyWebViewNoSubclassButRoute(somelib.someclass)<block_start><async_keyword><def_stmt>get self# $ requestHandler
<block_start><return>web.Response(text="MyWebViewNoSubclassButRoute.get")<block_end><block_end># $ HttpResponse
app.router.add_view("/MyWebViewNoSubclassButRoute" MyWebViewNoSubclassButRoute)<block_end><block_end># $ routeSetup="/MyWebViewNoSubclassButRoute"
# Apparently there is no enforcement that `add_view` is only for views, and vice-versa
# for `add_get` only being for async functions.
<if_stmt><true><block_start><async_keyword><def_stmt>no_rules request# $ requestHandler
<block_start><return>web.Response(text="no_rules")<block_end># $ HttpResponse
app.router.add_view("/no_rules" no_rules)# $ routeSetup="/no_rules"
<class_stmt>NoRulesView(web.View)<block_start><async_keyword><def_stmt>get self# $ requestHandler
<block_start><return>web.Response(text="NoRulesView.get")<block_end><block_end># $ HttpResponse
app.router.add_get("/NoRulesView" NoRulesView)<block_end># $ routeSetup="/NoRulesView"
## =================== ##
## "Routed parameters" ##
## =================== ##
<if_stmt><true># see https://docs.aiohttp.org/en/stable/web_quickstart.html#variable-resources
<block_start><async_keyword><def_stmt>matching request:web.Request# $ requestHandler
<block_start>name=request.match_info['name']<line_sep>number=request.match_info['number']<line_sep><return>web.Response(text="matching name={} number={}".format(name number))<block_end># $ HttpResponse
app.router.add_get(r"/matching/{name}/{number:\d+}" matching)<block_end># $ routeSetup="/matching/{name}/{number:\d+}"
## ======= ##
## subapps ##
## ======= ##
<if_stmt><true><block_start>subapp=web.Application()<async_keyword><def_stmt>subapp_handler request# $ requestHandler
<block_start><return>web.Response(text="subapp_handler")<block_end># $ HttpResponse
subapp.router.add_get("/subapp_handler" subapp_handler)# $ routeSetup="/subapp_handler"
app.add_subapp("/my_subapp" subapp)<line_sep># similar behavior is possible with `app.add_domain`, but since I don't think we'll have special handling
# for any kind of subapps, I have not created a test for this.
<block_end>## ================================ ##
## Constructing UrlDispatcher first ##
## ================================ ##
<if_stmt><true><block_start><async_keyword><def_stmt>manual_dispatcher_instance request# $ requestHandler
<block_start><return>web.Response(text="manual_dispatcher_instance")<block_end># $ HttpResponse
url_dispatcher=web.UrlDispatcher()<line_sep>url_dispatcher.add_get("/manual_dispatcher_instance" manual_dispatcher_instance)# $ routeSetup="/manual_dispatcher_instance"
subapp2=web.Application(router=url_dispatcher)<line_sep>app.add_subapp("/manual_dispatcher_instance_app" subapp2)<block_end>## =========== ##
## Run the app ##
## =========== ##
<if_stmt>__name__<eq>"__main__"<block_start>print("For auto-reloading server you can use:")<line_sep>print(f"aiohttp-devtools runserver {__file__}")<line_sep>print("after doing `pip install aiohttp-devtools`")<line_sep>print()<line_sep>web.run_app(app)<block_end> |
<import_stmt>gym<import_stmt>torch<import_stmt>torch.multiprocessing<as>mp<import_stmt>numpy<as>np<import_from_stmt>model QNet<import_from_stmt>memory Memory<import_from_stmt>config env_name async_update_step update_target max_episode device log_interval goal_score<class_stmt>Worker(mp.Process)<block_start><def_stmt>__init__ self online_net target_net optimizer global_ep global_ep_r res_queue name<block_start>super(Worker self).__init__()<line_sep>self.env=gym.make(env_name)<line_sep>self.env.seed(500)<line_sep>self.name='w%i'%name<line_sep>self.global_ep,self.global_ep_r,self.res_queue=global_ep global_ep_r res_queue<line_sep>self.online_net,self.target_net,self.optimizer=online_net target_net optimizer<block_end><def_stmt>record self score epsilon loss<block_start><with_stmt>self.global_ep.get_lock()<block_start>self.global_ep.value<augadd>1<block_end><with_stmt>self.global_ep_r.get_lock()<block_start><if_stmt>self.global_ep_r.value<eq>0.<block_start>self.global_ep_r.value=score<block_end><else_stmt><block_start>self.global_ep_r.value=0.99<times>self.global_ep_r.value+0.01<times>score<block_end><block_end><if_stmt>self.global_ep.value%log_interval<eq>0<block_start>print('{} , {} episode | score: {:.2f}, | epsilon: {:.2f}'.format(self.name self.global_ep.value self.global_ep_r.value epsilon))<block_end>self.res_queue.put([self.global_ep.value self.global_ep_r.value loss])<block_end><def_stmt>update_target_model self<block_start>self.target_net.load_state_dict(self.online_net.state_dict())<block_end><def_stmt>get_action self state epsilon<block_start><if_stmt>np.random.rand()<le>epsilon<block_start><return>self.env.action_space.sample()<block_end><else_stmt><block_start><return>self.target_net.get_action(state)<block_end><block_end><def_stmt>run self<block_start>epsilon=1.0<line_sep>steps=0<while_stmt>self.global_ep.value<l>max_episode<block_start><if_stmt>self.global_ep_r.value<g>goal_score<block_start><break><block_end>done=<false><line_sep>score=0<line_sep>state=self.env.reset()<line_sep>state=torch.Tensor(state).to(device)<line_sep>state=state.unsqueeze(0)<line_sep>memory=Memory(async_update_step)<while_stmt><not>done<block_start>steps<augadd>1<line_sep>action=self.get_action(state epsilon)<line_sep>next_state,reward,done,_=self.env.step(action)<line_sep>next_state=torch.Tensor(next_state)<line_sep>next_state=next_state.unsqueeze(0)<line_sep>mask=0<if>done<else>1<line_sep>reward=reward<if><not>done<or>score<eq>499<else>-1<line_sep>action_one_hot=np.zeros(2)<line_sep>action_one_hot[action]=1<line_sep>memory.push(state next_state action_one_hot reward mask)<line_sep>score<augadd>reward<line_sep>state=next_state<line_sep>epsilon<augsub>0.00001<line_sep>epsilon=max(epsilon 0.1)<if_stmt>len(memory)<eq>async_update_step<or>done<block_start>batch=memory.sample()<line_sep>loss=QNet.train_model(self.online_net self.target_net self.optimizer batch)<line_sep>memory=Memory(async_update_step)<if_stmt>done<block_start>self.record(score epsilon loss)<line_sep><break><block_end><block_end><if_stmt>steps%update_target<eq>0<block_start>self.update_target_model()<block_end><block_end>score=score<if>score<eq>500.0<else>score+1<block_end>self.res_queue.put(<none>)<block_end><block_end> |
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keys which only appear in SSD.
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<line_sep># Pretrained classifer model
BACKBONE="backbone"<line_sep>FEATURE_SIZES="feature_sizes"<line_sep>STEPS="steps"<line_sep>SCALES="scales"<line_sep>ASPECT_RATIOS="aspect_ratios"<line_sep>NUM_DEFAULTS_PER_CELL="num_defaults_per_cell"<line_sep>LOC_CONF_OUT_CHANNELS="loc_conf_out_channels"<line_sep>NUM_DEFAULTS="num_default_boxes"<line_sep># Overlap threshold for NMS
NMS_THRESHOLD="nms_threshold"<line_sep>NMS_MAX_DETECTIONS="nms_max_detections"<line_sep># data pipeline
NUM_CROPPING_ITERATIONS="num_cropping_iterations"<line_sep>RANDOM_FLIP_PROBABILITY="random_flip_probability"<line_sep>DATA_NORMALIZATION_MEAN="data_normalization_mean"<line_sep>DATA_NORMALIZATION_STD="data_normalization_std"<line_sep> |
<import_from_stmt>.torch_op_attr *<import_from_stmt>.nndct2torch_op_map *<import_from_stmt>.op_register *<import_from_stmt>.torch_const *<import_from_stmt>.tensor_utils *<import_from_stmt>.schema *<line_sep> |
"""Generated test for checking pynos based actions
"""<import_stmt>xml.etree.ElementTree<as>ET<import_from_stmt>st2tests.base BaseActionTestCase<import_from_stmt>interface_vrrp_vip interface_vrrp_vip<line_sep>__all__=['TestInterfaceVrrpVip']<class_stmt>MockCallback(object)# pylint:disable=too-few-public-methods
<block_start>"""Class to hold mock callback and result
"""<line_sep>returned_data=<none><def_stmt>callback self call **kwargs# pylint:disable=unused-argument
<block_start>"""Mock callback method
"""<line_sep>xml_result=ET.tostring(call)<line_sep>self.returned_data=xml_result<block_end><block_end><class_stmt>TestInterfaceVrrpVip(BaseActionTestCase)<block_start>"""Test holder class
"""<line_sep>action_cls=interface_vrrp_vip<def_stmt>test_action self<block_start>"""Generated test to check action
"""<line_sep>action=self.get_action_instance()<line_sep>mock_callback=MockCallback()<line_sep>kwargs={'username':'' 'rbridge_id':'224' 'ip':'' 'vrid':'10' 'vip':'10.9.2.1' 'int_type':'gigabitethernet' 'password':'' 'port':'22' 'name':'10/0/1' 'test':<true> 'callback':mock_callback.callback}<line_sep>action.run(**kwargs)<line_sep>expected_xml=('<config><interface xmlns="urn:brocade.com:mgmt:brocade-interface"'<concat>'><gigabitethernet><name>10/0/1</name><vrrp xmlns="urn:brocade.com'<concat>':mgmt:brocade-vrrp"><vrid>10</vrid><version>3</version><virtual-i'<concat>'p><virtual-ipaddr>10.9.2.1</virtual-ipaddr></virtual-ip></vrrp></'<concat>'gigabitethernet></interface></config>')<line_sep>self.assertTrue(expected_xml mock_callback.returned_data)<block_end><block_end> |
# Copyright 2020 Amazon Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
<import_stmt>copy<import_stmt>itertools<import_stmt>unittest<import_stmt>byteps.mxnet<as>bps<import_stmt>mxnet<as>mx<import_stmt>mxnet.ndarray<as>nd<import_stmt>numpy<as>np<import_from_stmt>gluoncv.model_zoo get_model<import_from_stmt>mxnet autograd gluon<import_from_stmt>numba jit<import_from_stmt>parameterized parameterized<import_from_stmt>tqdm tqdm<import_from_stmt>meta_test MetaTest<import_from_stmt>utils bernoulli fake_data<line_sep>@jit(nopython=<true>)<def_stmt>round_next_pow2 v<block_start>v<augsub>np.uint32(1)<line_sep>v<augor>v<rshift>np.uint32(1)<line_sep>v<augor>v<rshift>np.uint32(2)<line_sep>v<augor>v<rshift>np.uint32(4)<line_sep>v<augor>v<rshift>np.uint32(8)<line_sep>v<augor>v<rshift>np.uint32(16)<line_sep>v<augadd>np.uint32(1)<line_sep><return>v<block_end><def_stmt>dithering x k state partition='linear' norm="max"<block_start>y=x.flatten()<if_stmt>norm<eq>"max"<block_start>scale=np.max(np.abs(y))<block_end><elif_stmt>norm<eq>"l2"<block_start>scale=np.linalg.norm(y.astype(np.float64) ord=2)<block_end><else_stmt><block_start><raise>ValueError("Unsupported normalization")<block_end>y<augdiv>scale<line_sep>sign=np.sign(y)<line_sep>y=np.abs(y)<line_sep># stocastic rounding
<if_stmt>partition<eq>'linear'<block_start>y<augmul>k<line_sep>low=np.floor(y)<line_sep>p=y-low# whether to ceil
y=low+bernoulli(p state)<line_sep>y<augdiv>k<block_end><elif_stmt>partition<eq>"natural"<block_start>y<augmul>2<power>(k-1)<line_sep>low=round_next_pow2((np.ceil(y).astype(np.uint32)))<rshift>1<line_sep>length=copy.deepcopy(low)<line_sep>length[length<eq>0]=1<line_sep>p=(y-low)/length<line_sep>y=low+length<times>bernoulli(p state)<line_sep>y=y.astype(np.float32)<line_sep>y<augdiv>2<power>(k-1)<block_end><else_stmt><block_start><raise>ValueError("Unsupported partition")<block_end>y<augmul>sign<line_sep>y<augmul>scale<line_sep><return>y.reshape(x.shape)<block_end><class_stmt>DitheringTestCase(unittest.TestCase metaclass=MetaTest)<block_start>@parameterized.expand(itertools.product([2 4 8] ["linear, natural"] ["max" "l2"] np.random.randint(0 2020 size=3).tolist()))<def_stmt>test_dithering self k ptype ntype seed<block_start>ctx=mx.gpu(0)<line_sep>net=get_model("resnet18_v2")<line_sep>net.initialize(mx.init.Xavier() ctx=ctx)<line_sep>net.summary(nd.ones((1 3 224 224) ctx=ctx))<line_sep># hyper-params
batch_size=32<line_sep>optimizer_params={'momentum':0 'wd':0 'learning_rate':0.01}<line_sep>compression_params={"compressor":"dithering" "k":k "partition":ptype "normalize":ntype "seed":seed}<line_sep>print(compression_params)<line_sep>trainer=bps.DistributedTrainer(net.collect_params() "sgd" optimizer_params compression_params=compression_params)<line_sep>loss_fn=gluon.loss.SoftmaxCrossEntropyLoss()<line_sep>train_data=fake_data(batch_size=batch_size)<line_sep>params={}<line_sep>rngs={}<line_sep>rngs_s={}<for_stmt>i,param enumerate(trainer._params)<block_start><if_stmt>param.grad_req<ne>'null'<block_start>params[i]=param._data[0].asnumpy()<line_sep>rngs[i]=np.array([seed seed] dtype=np.uint64)<line_sep>rngs_s[i]=np.array([seed seed] dtype=np.uint64)<block_end><block_end><for_stmt>it,batch tqdm(enumerate(train_data))<block_start>data=batch[0].as_in_context(ctx)<line_sep>label=batch[1].as_in_context(ctx)<with_stmt>autograd.record()<block_start>output=net(data)<line_sep>loss=loss_fn(output label)<block_end>loss.backward()<line_sep>gs={}<line_sep>xs={}<for_stmt>i,param enumerate(trainer._params)<block_start><if_stmt>param.grad_req<ne>'null'<block_start>gs[i]=param._grad[0].asnumpy()<line_sep>xs[i]=param._data[0].asnumpy()<block_end><block_end>trainer.step(batch_size)<for_stmt>i,param enumerate(trainer._params)<block_start><if_stmt>param.grad_req<ne>"null"<block_start>g=gs[i]/(batch_size<times>bps.size())<line_sep>c=dithering(g k rngs[i] ptype ntype)<line_sep>cs=dithering(c k rngs_s[i] ptype ntype)<line_sep>c=cs<line_sep>params[i]<augsub>optimizer_params["learning_rate"]<times>c<line_sep>np_g=c.flatten()<line_sep>mx_g=param._grad[0].asnumpy().flatten()<if_stmt><not>np.allclose(np_g mx_g atol=np.finfo(np.float32).eps)<block_start>diff=np.abs(np_g-mx_g)<line_sep>print("np" np_g)<line_sep>print("mx" mx_g)<line_sep>print("diff" diff)<line_sep>print("max diff" np.max(diff))<line_sep>idx=np.nonzero(diff<g>1e-5)<line_sep>print("idx" idx np_g[idx] mx_g[idx])<line_sep>input()<block_end><block_end><block_end><block_end>cnt=0<line_sep>tot=0<for_stmt>i,param enumerate(trainer._params)<block_start><if_stmt>param.grad_req<ne>"null"<block_start>x=param._data[0].asnumpy()<line_sep>tot<augadd>len(x.flatten())<if_stmt><not>np.allclose(params[i] x atol=np.finfo(np.float32).eps)<block_start>diff=np.abs(x.flatten()-params[i].flatten())<line_sep>idx=np.where(diff<g>np.finfo(np.float32).eps)<line_sep>cnt<augadd>len(idx[0])<block_end><block_end><block_end><assert_stmt>cnt<eq>0 "false/tot=%d/%d=%f"%(cnt tot cnt/tot)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
"""components/intro
initializes the leading text as of right now
currently both classes handle control and view this should be separated
with the logic for dynamic text moving to services.
"""<import_from_stmt>typing List<import_from_stmt>dash.development.base_component ComponentMeta<import_from_stmt>dash_core_components Markdown<import_from_stmt>chime_dash.app.components.base Component<class_stmt>Intro(Component)<block_start>"""
"""<line_sep>localization_file="intro.yml"<def_stmt>get_html self<arrow>List[ComponentMeta]# pylint: disable=W0613
<block_start>"""Initializes the header dash html
"""<line_sep><return>[Markdown(id="intro" dangerously_allow_html=<true> dedent=<true>)]<block_end><def_stmt>build self model pars<block_start>result=<none><if_stmt>model<and>pars<block_start>intro=self.content<line_sep>infected_population_warning_str=(intro["infected-population-warning"]<if>model.infected<g>pars.population<else>"")<line_sep>mitigation_rt_str=(intro["mitigation-rt-less-than-1"]<if>model.r_t<l>1<else>intro["mitigation-rt-more-than-equal-1"])<line_sep>result=intro["description-total-infection"].format(total_infections=model.infected current_hosp=pars.current_hospitalized hosp_rate=pars.hospitalized.rate S=pars.population market_share=pars.market_share)+"\n\n"+infected_population_warning_str+"\n\n"+intro["description-doubling-time"].format(doubling_time=pars.doubling_time recovery_days=pars.infectious_days r_naught=model.r_naught daily_growth=model.daily_growth_rate<times>100.0)+"\n\n"+mitigation_rt_str.format(relative_contact_rate=pars.relative_contact_rate doubling_time_t=model.doubling_time_t r_t=model.r_t daily_growth_t=model.daily_growth_rate_t<times>100.0)<block_end><return>[result]<block_end><block_end> |
<import_from_stmt>bgmi.lib.models Filter<import_from_stmt>bgmi.website.model Episode<def_stmt>test_include <block_start>e=Filter(include="2,3,5").apply_on_episodes([Episode(name="1" title="1" download="1" episode=1) Episode(name="1" title="1" download="2" episode=1) Episode(name="2" title="2" download="3" episode=2) Episode(name="2" title="2" download="4" episode=2) Episode(name="3" title="3" download="5" episode=3) Episode(name="5" title="5" download="6" episode=5) ])<assert_stmt>len(e)<eq>4 e<assert_stmt>{x.download<for>x e}<eq>set("3456")<block_end><def_stmt>test_exclude <block_start>e=Filter(exclude="2,3,5").apply_on_episodes([Episode(title="1" download="1" episode=1) Episode(title="1" download="2" episode=2) Episode(title="2" download="3" episode=1) Episode(title="2" download="4" episode=2) Episode(title="3" download="5" episode=3) Episode(title="5" download="6" episode=5) ])<assert_stmt>len(e)<eq>2 e<assert_stmt>{x.download<for>x e}<eq>{"1" "2"}<block_end> |
<class_stmt>ResolveEventArgs(EventArgs)<block_start>"""
Provides data for loader resolution events,such as the System.AppDomain.TypeResolve,System.AppDomain.ResourceResolve,System.AppDomain.ReflectionOnlyAssemblyResolve,and System.AppDomain.AssemblyResolve events.
ResolveEventArgs(name: str)
ResolveEventArgs(name: str,requestingAssembly: Assembly)
"""<line_sep>@staticmethod<def_stmt>__new__ self name requestingAssembly=<none><block_start>"""
__new__(cls: type,name: str)
__new__(cls: type,name: str,requestingAssembly: Assembly)
"""<line_sep><pass><block_end>Name=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the name of the item to resolve.
Get: Name(self: ResolveEventArgs) -> str
"""<line_sep>RequestingAssembly=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the assembly whose dependency is being resolved.
Get: RequestingAssembly(self: ResolveEventArgs) -> Assembly
"""<block_end> |
# Copyright 2021 Toyota Research Institute. All rights reserved.
# pylint: disable=unused-argument
<import_from_stmt>fvcore.transforms.transform BlendTransform<import_from_stmt>detectron2.data.transforms RandomBrightness<as>_RandomBrightness<import_from_stmt>detectron2.data.transforms RandomContrast<as>_RandomContrast<import_from_stmt>detectron2.data.transforms RandomSaturation<as>_RandomSaturation<def_stmt>apply_no_op_intrinsics blend_tfm intrinsics<block_start><return>intrinsics<block_end><def_stmt>apply_no_op_depth blend_tfm depth<block_start><return>depth<block_end><def_stmt>apply_no_op_box3d blend_tfm box3d<block_start><return>box3d<block_end># (dennis.park) Augment ResizeTransform to handle intrinsics, depth
BlendTransform.register_type("intrinsics" apply_no_op_intrinsics)<line_sep>BlendTransform.register_type("depth" apply_no_op_depth)<line_sep>BlendTransform.register_type("box3d" apply_no_op_box3d)<class_stmt>RandomContrast(_RandomContrast)<block_start><def_stmt>get_transform self image<block_start>tfm=super().get_transform(image)<line_sep><return>BlendTransform(tfm.src_image tfm.src_weight tfm.dst_weight)<block_end><block_end><class_stmt>RandomBrightness(_RandomBrightness)<block_start><def_stmt>get_transform self image<block_start>tfm=super().get_transform(image)<line_sep><return>BlendTransform(tfm.src_image tfm.src_weight tfm.dst_weight)<block_end><block_end><class_stmt>RandomSaturation(_RandomSaturation)<block_start><def_stmt>get_transform self image<block_start>tfm=super().get_transform(image)<line_sep><return>BlendTransform(tfm.src_image tfm.src_weight tfm.dst_weight)<block_end><block_end> |
SCHEDULE_NONE=<none><line_sep>SCHEDULE_HOURLY='0 * * * *'<line_sep>SCHEDULE_DAILY='0 0 * * *'<line_sep>SCHEDULE_WEEKLY='0 0 * * 0'<line_sep>SCHEDULE_MONTHLY='0 0 1 * *'<line_sep>SCHEDULE_YEARLY='0 0 1 1 *'<line_sep> |
__all__=['perm_cache']<import_stmt>json<import_stmt>pickle<import_from_stmt>functools update_wrapper<import_from_stmt>os.path exists<def_stmt>perm_cache cache_type='pkl' cache_file=<none><block_start><class_stmt>PermCache(object)<block_start>_cache_type=cache_type<line_sep>_cache_file=cache_file<def_stmt>__init__ self func<block_start><if_stmt>self._cache_type<not><in>['pkl' 'json']<block_start><raise>ValueError("Invalid cache type: %s"%self._cache_type)<block_end>self._cache_type=self._cache_type<line_sep>self.func=func<if_stmt>self._cache_file<is><none><block_start>self._cache_file=(func.__code__.co_filename.replace('.py' '.'+self.func.__name__))<line_sep>self._cache_file<augadd>'.cache'<block_end><if_stmt>self._cache_file.endswith('.py')<block_start>self._cache_file=self._cache_file.replace('.py' '.'+self._cache_type)<block_end><else_stmt><block_start>self._cache_file<augadd>'.'+self._cache_type<block_end><if_stmt>exists(self._cache_file)<block_start><if_stmt>self._cache_type<eq>'pkl'<block_start><with_stmt>open(self._cache_file 'rb')<as>f<block_start>self.cache=pickle.load(f)<block_end><block_end><elif_stmt>self._cache_type<eq>'json'<block_start><with_stmt>open(self._cache_file 'r')<as>f<block_start>self.cache=json.load(f)<block_end><block_end><block_end><else_stmt><block_start>self.cache={}<block_end>self.__cache_info=dict.fromkeys(['added' 'read' 'total'] 0)<line_sep>update_wrapper(self func)<line_sep><return><block_end><def_stmt>__call__ self *args **kwargs<block_start>key=' '.join(args)+' '.join(['%s=%s'%(k v)<for>k,v kwargs.items()])<line_sep>self.__cache_info['total']<augadd>1<try_stmt><block_start>res=self.cache[key]<line_sep>self.__cache_info['read']<augadd>1<block_end><except_stmt>KeyError<block_start>res=self.func(*args **kwargs)<line_sep>self.cache[key]=res<line_sep>self.__cache_info['added']<augadd>1<block_end><return>res<block_end><def_stmt>cache_info self<block_start><return>self.__cache_info.copy()<block_end><def_stmt>stash_cache self<block_start><if_stmt>self._cache_type<eq>'pkl'<block_start><with_stmt>open(self._cache_file 'wb')<as>f<block_start>pickle.dump(self.cache f)<block_end><block_end><elif_stmt>self._cache_type<eq>'json'<block_start><with_stmt>open(self._cache_file 'w')<as>f<block_start>json.dump(self.cache f indent=2)<block_end><block_end><return><block_end><block_end><return>PermCache<block_end> |
<import_stmt>sys<import_stmt>random<import_stmt>data<import_stmt>torch<import_stmt>matplotlib.pyplot<as>plt<import_stmt>matplotlib.patches<as>patches<line_sep>plt.rc('text' usetex=<true>)<line_sep>plt.rc('font' family='serif' serif='Times')<line_sep>q=float(sys.argv[1])<line_sep># guessing seeds for nice looking datasets
torch.manual_seed(int(2<times>q)+10)<line_sep>random.seed(int(2<times>q)+16)<line_sep>cm=plt.cm.coolwarm<line_sep>params=[(0.05 q) (0.1 q) (0.2 q) (0.3 q) (0.4 q) (0.5 q) ]<line_sep>n=0<line_sep>plt.figure(figsize=(4 11.5) dpi=200)<for_stmt>coord,noise params<block_start>dataset=data.ToyTask(10 coord noise)<line_sep>a,b,c=next(iter(dataset))<line_sep>ax_true=plt.subplot(len(params) 2 n+1 aspect='equal')<line_sep>ax_data=plt.subplot(len(params) 2 n+2 aspect='equal')<for_stmt>i,(weight box) enumerate(zip(a b))<block_start>x=box[0]<line_sep>y=box[1]<line_sep>w=box[2]-box[0]<line_sep>h=box[3]-box[1]<line_sep>config={'alpha':0.3 'linewidth':0 }<line_sep>ax_true.add_patch(patches.Rectangle((x y) w h **config color=cm(1-float(i<l>c))))<line_sep>ax_data.add_patch(patches.Rectangle((x y) w h **config color=cm(1-weight)))<line_sep>ax_true.axes.get_xaxis().set_visible(<false>)<line_sep>ax_data.axes.get_xaxis().set_visible(<false>)<line_sep>ax_true.axes.get_yaxis().set_major_locator(plt.NullLocator())<line_sep>ax_data.axes.get_yaxis().set_visible(<false>)<line_sep>ax_true.set_title('Ground truth: {}'.format(c))<line_sep>ax_data.set_title('Data')<line_sep>ax_true.set_ylabel('$l = {}$'.format(coord))<block_end>n<augadd>2<line_sep>plt.suptitle('\Large$q = {}$'.format(noise))<block_end>plt.subplots_adjust(left=0.1 right=0.9 top=0.96 bottom=0.0 hspace=0)<line_sep>plt.savefig('dataset-{}.pdf'.format(int(round(10<times>q))))<line_sep> |
# -*- coding: utf-8 -*-
<import_from_stmt>django.core.cache caches<import_from_stmt>django.core.cache.backends.base InvalidCacheBackendError<try_stmt><block_start>cache=caches['pages']<block_end><except_stmt>InvalidCacheBackendError<block_start>cache=caches['default']<block_end> |
<import_from_stmt>social_core.backends.beats BeatsOAuth2<line_sep> |
#
# lorawan packet: mhdr(1) mac_payload(1..N) mic(4)
#
<import_from_stmt>MalformedPacketException MalformedPacketException<import_from_stmt>MHDR MHDR<import_from_stmt>Direction Direction<import_from_stmt>MacPayload MacPayload<class_stmt>PhyPayload<block_start><def_stmt>__init__ self key<block_start>self.key=key<block_end><def_stmt>read self packet<block_start><if_stmt>len(packet)<l>12<block_start><raise>MalformedPacketException("Invalid lorawan packet")<line_sep><block_end>self.mhdr=MHDR(packet[0])<line_sep>self.set_direction()<line_sep>self.mac_payload=MacPayload()<line_sep>self.mac_payload.read(self.get_mhdr().get_mtype() packet[1:-4])<line_sep>self.mic=packet[-4:]<block_end><def_stmt>create self mhdr args<block_start>self.mhdr=MHDR(mhdr)<line_sep>self.set_direction()<line_sep>self.mac_payload=MacPayload()<line_sep>self.mac_payload.create(self.get_mhdr().get_mtype() self.key args)<line_sep>self.mic=<none><block_end><def_stmt>length self<block_start><return>len(self.to_raw())<block_end><def_stmt>to_raw self<block_start>phy_payload=[self.get_mhdr().to_raw()]<line_sep>phy_payload<augadd>self.mac_payload.to_raw()<line_sep>phy_payload<augadd>self.get_mic()<line_sep><return>phy_payload<block_end><def_stmt>get_mhdr self<block_start><return>self.mhdr<line_sep><block_end><def_stmt>set_mhdr self mhdr<block_start>self.mhdr=mhdr<block_end><def_stmt>get_direction self<block_start><return>self.direction.get()<block_end><def_stmt>set_direction self<block_start>self.direction=Direction(self.get_mhdr())<block_end><def_stmt>get_mac_payload self<block_start><return>self.mac_payload<block_end><def_stmt>set_mac_payload self mac_payload<block_start>self.mac_payload=mac_payload<block_end><def_stmt>get_mic self<block_start><if_stmt>self.mic<eq><none><block_start>self.set_mic(self.compute_mic())<block_end><return>self.mic<block_end><def_stmt>set_mic self mic<block_start>self.mic=mic<block_end><def_stmt>compute_mic self<block_start><return>self.mac_payload.frm_payload.compute_mic(self.key self.get_direction() self.get_mhdr())<block_end><def_stmt>valid_mic self<block_start><return>self.get_mic()<eq>self.mac_payload.frm_payload.compute_mic(self.key self.get_direction() self.get_mhdr())<block_end><def_stmt>get_payload self<block_start><return>self.mac_payload.frm_payload.decrypt_payload(self.key self.get_direction())<block_end><block_end> |
<import_stmt>unittest<class_stmt>TestTowerMain(unittest.TestCase)<block_start><def_stmt>test_imports self<block_start><try_stmt><block_start><import_from_stmt>dreamcoder.domains.tower.main Flatten TowerCNN tower_options dreamOfTowers visualizePrimitives main <block_end><except_stmt>Exception<block_start>self.fail('Unable to import tower module')<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
# Generated by Django 3.0.6 on 2020-05-07 21:23
<import_from_stmt>django.db migrations models<import_stmt>core.models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("core" "0005_service_ignored_ips") ]<line_sep>operations=[migrations.AddField(model_name="service" name="hide_referrer_regex" field=models.TextField(blank=<true> default="" validators=[core.models._validate_regex]) ) ]<block_end> |
#
# (C) Copyright Cloudlab URV 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_stmt>os<import_stmt>gc<import_stmt>logging<import_stmt>pickle<import_stmt>diskcache<import_from_stmt>joblib._parallel_backends ParallelBackendBase PoolManagerMixin<import_from_stmt>joblib.parallel register_parallel_backend<import_from_stmt>numpy ndarray<import_from_stmt>concurrent.futures ThreadPoolExecutor<import_from_stmt>lithops.multiprocessing Pool<import_from_stmt>lithops.storage Storage<line_sep>logger=logging.getLogger(__name__)<def_stmt>register_lithops <block_start>""" Register Lithops Backend to be called with parallel_backend("lithops"). """<line_sep>register_parallel_backend("lithops" LithopsBackend)<block_end><class_stmt>LithopsBackend(ParallelBackendBase PoolManagerMixin)<block_start>"""A ParallelBackend which will use a multiprocessing.Pool.
Will introduce some communication and memory overhead when exchanging
input and output data with the with the worker Python processes.
However, does not suffer from the Python Global Interpreter Lock.
"""<def_stmt>__init__ self nesting_level=<none> inner_max_num_threads=<none> **pool_kwargs<block_start>super().__init__(nesting_level inner_max_num_threads **{})<line_sep>self.__pool_kwargs=pool_kwargs<block_end># Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS="__JOBLIB_SPAWNED_PARALLEL__"<line_sep>supports_timeout=<true><line_sep>supports_sharedmem=<false><def_stmt>effective_n_jobs self n_jobs<block_start>"""Determine the number of jobs which are going to run in parallel.
This also checks if we are attempting to create a nested parallel
loop.
"""<line_sep># this must be 1 as we only want to create 1 LithopsExecutor()
<return>1<block_end><def_stmt>configure self n_jobs=1 parallel=<none> prefer=<none> require=<none> **memmappingpool_args<block_start>"""Build a process or thread pool and return the number of workers"""<line_sep>n_jobs=self.effective_n_jobs(n_jobs)<line_sep>already_forked=int(os.environ.get(self.JOBLIB_SPAWNED_PROCESS 0))<if_stmt>already_forked<block_start><raise>ImportError('[joblib] Attempting to do parallel computing '<concat>'without protecting your import on a system that does '<concat>'not support forking. To use parallel-computing in a '<concat>'script, you must protect your main loop using "if '<concat>"__name__ == '__main__'"<concat>'". Please see the joblib documentation on Parallel '<concat>'for more information')<block_end># Set an environment variable to avoid infinite loops
os.environ[self.JOBLIB_SPAWNED_PROCESS]='1'<line_sep># Make sure to free as much memory as possible before forking
gc.collect()<line_sep>self._pool=Pool()<line_sep>self.parallel=parallel<line_sep><return>n_jobs<block_end><def_stmt>terminate self<block_start>"""Shutdown the process or thread pool"""<line_sep>super().terminate()<if_stmt>self.JOBLIB_SPAWNED_PROCESS<in>os.environ<block_start><del_stmt>os.environ[self.JOBLIB_SPAWNED_PROCESS]<block_end><block_end><def_stmt>compute_batch_size self<block_start><return>int(1e6)<block_end><def_stmt>apply_async self func callback=<none><block_start>"""Schedule a func to be run"""<line_sep># return self._get_pool().map_async(handle_call, func.items, callback=callback) # bypass
mem_opt_calls=find_shared_objects(func.items)<line_sep><return>self._get_pool().starmap_async(handle_call mem_opt_calls)<block_end><block_end><def_stmt>find_shared_objects calls# find and annotate repeated arguments
<block_start>record={}<for_stmt>i,call enumerate(calls)<block_start><for_stmt>j,arg enumerate(call[1])<block_start><if_stmt>id(arg)<in>record<block_start>record[id(arg)].append((i j))<block_end><else_stmt><block_start>record[id(arg)]=[arg (i j)]<block_end><block_end><for_stmt>k,v call[2].items()<block_start><if_stmt>id(v)<in>record<block_start>record[id(v)].append((i k))<block_end><else_stmt><block_start>record[id(v)]=[v (i k)]<block_end><block_end><block_end># If we found multiple occurrences of one object, then
# store it in shared memory, pass a proxy as a value
calls=[list(item)<for>item calls]<line_sep>storage=Storage()<line_sep>thread_pool=ThreadPoolExecutor(max_workers=len(record))<def_stmt>put_arg_obj positions<block_start>obj=positions.pop(0)<if_stmt>len(positions)<g>1<and>consider_sharing(obj)<block_start>logger.debug('Proxying {}'.format(type(obj)))<line_sep>obj_bin=pickle.dumps(obj)<line_sep>cloud_object=storage.put_cloudobject(obj_bin)<for_stmt>pos positions<block_start>call_n,idx_or_key=pos<line_sep>call=calls[call_n]<if_stmt>isinstance(idx_or_key str)<block_start>call[2][idx_or_key]=cloud_object<block_end><else_stmt><block_start>args_as_list=list(call[1])<line_sep>args_as_list[idx_or_key]=cloud_object<line_sep>call[1]=tuple(args_as_list)<block_end><try_stmt><block_start>call[3].append(idx_or_key)<block_end><except_stmt>IndexError<block_start>call.append([idx_or_key])<block_end><block_end><block_end><block_end>fut=[]<for_stmt>positions record.values()<block_start>f=thread_pool.submit(put_arg_obj positions)<line_sep>fut.append(f)<block_end>[f.result()<for>f fut]<line_sep><return>[tuple(item)<for>item calls]<block_end><def_stmt>handle_call func args kwargs proxy_positions=[]<block_start><if_stmt>len(proxy_positions)<g>0<block_start>args,kwargs=replace_with_values(args kwargs proxy_positions)<block_end><return>func(*args **kwargs)<block_end><def_stmt>replace_with_values args kwargs proxy_positions<block_start>args_as_list=list(args)<line_sep>thread_pool=ThreadPoolExecutor(max_workers=len(proxy_positions))<line_sep>cache=diskcache.Cache('/tmp/lithops/cache')<def_stmt>get_arg_obj idx_or_key<block_start><if_stmt>isinstance(idx_or_key str)<block_start>obj_id=kwargs[idx_or_key]<block_end><else_stmt><block_start>obj_id=args_as_list[idx_or_key]<block_end><if_stmt>obj_id<in>cache<block_start>logger.debug('Get {} (arg {}) from cache'.format(obj_id idx_or_key))<line_sep>obj=cache[obj_id]<block_end><else_stmt><block_start>logger.debug('Get {} (arg {}) from storage'.format(obj_id idx_or_key))<line_sep>storage=Storage()<line_sep>obj_bin=storage.get_cloudobject(obj_id)<line_sep>obj=pickle.loads(obj_bin)<line_sep>cache[obj_id]=obj<block_end><if_stmt>isinstance(idx_or_key str)<block_start>kwargs[idx_or_key]=obj<block_end><else_stmt><block_start>args_as_list[idx_or_key]=obj<block_end><block_end>fut=[]<for_stmt>idx_or_key proxy_positions<block_start>f=thread_pool.submit(get_arg_obj idx_or_key)<line_sep>fut.append(f)<block_end>[f.result()<for>f fut]<line_sep><return>args_as_list kwargs<block_end><def_stmt>consider_sharing obj<block_start><if_stmt>isinstance(obj (ndarray list))# TODO: some heuristic
<block_start><return><true><block_end><return><false><block_end> |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Utilities to compute J-factor maps."""<import_stmt>astropy.units<as>u<line_sep>__all__=["JFactory"]<class_stmt>JFactory<block_start>"""Compute J-Factor maps.
All J-Factors are computed for annihilation. The assumed dark matter
profiles will be centered on the center of the map.
Parameters
----------
geom : `~gammapy.maps.WcsGeom`
Reference geometry
profile : `~gammapy.astro.darkmatter.profiles.DMProfile`
Dark matter profile
distance : `~astropy.units.Quantity`
Distance to convert angular scale of the map
"""<def_stmt>__init__ self geom profile distance<block_start>self.geom=geom<line_sep>self.profile=profile<line_sep>self.distance=distance<block_end><def_stmt>compute_differential_jfactor self<block_start>r"""Compute differential J-Factor.
.. math::
\frac{\mathrm d J}{\mathrm d \Omega} =
\int_{\mathrm{LoS}} \mathrm d r \rho(r)
"""<line_sep># TODO: Needs to be implemented more efficiently
separation=self.geom.separation(self.geom.center_skydir)<line_sep>rmin=separation.rad<times>self.distance<line_sep>rmax=self.distance<line_sep>val=[self.profile.integral(_ rmax)<for>_ rmin.flatten()]<line_sep>jfact=u.Quantity(val).to("GeV2 cm-5").reshape(rmin.shape)<line_sep><return>jfact/u.steradian<block_end><def_stmt>compute_jfactor self<block_start>r"""Compute astrophysical J-Factor.
.. math::
J(\Delta\Omega) =
\int_{\Delta\Omega} \mathrm d \Omega^{\prime}
\frac{\mathrm d J}{\mathrm d \Omega^{\prime}}
"""<line_sep>diff_jfact=self.compute_differential_jfactor()<line_sep><return>diff_jfact<times>self.geom.to_image().solid_angle()<block_end><block_end> |
<import_stmt>tensorflow<as>tf<line_sep>config=tf.ConfigProto()<line_sep>config.gpu_options.allow_growth=<true><import_stmt>numpy<as>np<import_stmt>scipy<import_stmt>os<import_stmt>scipy.io<import_stmt>sys<try_stmt><block_start><import_stmt>cPickle<block_end><except_stmt><block_start><import_stmt>_pickle<as>cPickle<block_end># Syspath for the folder with the utils files
#sys.path.insert(0, "/media/data/srebuffi")
<import_stmt>utils_resnet<import_stmt>utils_icarl<import_stmt>utils_data<line_sep>######### Modifiable Settings ##########
batch_size=128# Batch size
nb_val=50# Validation samples per class
nb_cl=10# Classes per group
nb_groups=10# Number of groups
nb_proto=20# Number of prototypes per class: total protoset memory/ total number of classes
epochs=60# Total number of epochs
lr_old=2.# Initial learning rate
lr_strat=[20 30 40 50]# Epochs where learning rate gets decreased
lr_factor=5.# Learning rate decrease factor
gpu='0'# Used GPU
wght_decay=0.00001# Weight Decay
########################################
######### Paths ##########
# Working station
devkit_path='/home/srebuffi'<line_sep>train_path='/data/datasets/imagenets72'<line_sep>save_path='/data/srebuffi/backup/'<line_sep>###########################
#####################################################################################################
### Initialization of some variables ###
class_means=np.zeros((512 nb_groups<times>nb_cl 2 nb_groups))<line_sep>loss_batch=[]<line_sep>files_protoset=[]<for_stmt>_ range(nb_groups<times>nb_cl)<block_start>files_protoset.append([])<block_end>### Preparing the files for the training/validation ###
# Random mixing
print("Mixing the classes and putting them in batches of classes...")<line_sep>np.random.seed(1993)<line_sep>order=np.arange(nb_groups<times>nb_cl)<line_sep>mixing=np.arange(nb_groups<times>nb_cl)<line_sep>np.random.shuffle(mixing)<line_sep># Loading the labels
labels_dic,label_names,validation_ground_truth=utils_data.parse_devkit_meta(devkit_path)<line_sep># Or you can just do like this
# define_class = ['apple', 'banana', 'cat', 'dog', 'elephant', 'forg']
# labels_dic = {k: v for v, k in enumerate(define_class)}
# Preparing the files per group of classes
print("Creating a validation set ...")<line_sep>files_train,files_valid=utils_data.prepare_files(train_path mixing order labels_dic nb_groups nb_cl nb_val)<line_sep># Pickle order and files lists and mixing
<with_stmt>open(str(nb_cl)+'mixing.pickle' 'wb')<as>fp<block_start>cPickle.dump(mixing fp)<block_end><with_stmt>open(str(nb_cl)+'settings_resnet.pickle' 'wb')<as>fp<block_start>cPickle.dump(order fp)<line_sep>cPickle.dump(files_valid fp)<line_sep>cPickle.dump(files_train fp)<block_end>### Start of the main algorithm ###
<for_stmt>itera range(nb_groups)# Files to load : training samples + protoset
<block_start>print('Batch of classes number {0} arrives ...'.format(itera+1))<line_sep># Adding the stored exemplars to the training set
<if_stmt>itera<eq>0<block_start>files_from_cl=files_train[itera]<block_end><else_stmt><block_start>files_from_cl=files_train[itera][:]<for_stmt>i range(itera<times>nb_cl)<block_start>nb_protos_cl=int(np.ceil(nb_proto<times>nb_groups<times>1./itera))# Reducing number of exemplars of the previous classes
tmp_var=files_protoset[i]<line_sep>files_from_cl<augadd>tmp_var[0:min(len(tmp_var) nb_protos_cl)]<block_end><block_end>## Import the data reader ##
image_train,label_train=utils_data.read_data(train_path labels_dic mixing files_from_cl=files_from_cl)<line_sep>image_batch,label_batch_0=tf.train.batch([image_train label_train] batch_size=batch_size num_threads=8)<line_sep>label_batch=tf.one_hot(label_batch_0 nb_groups<times>nb_cl)<line_sep>## Define the objective for the neural network ##
<if_stmt>itera<eq>0# No distillation
<block_start>variables_graph,variables_graph2,scores,scores_stored=utils_icarl.prepare_networks(gpu image_batch nb_cl nb_groups)<line_sep># Define the objective for the neural network: 1 vs all cross_entropy
<with_stmt>tf.device('/gpu:0')<block_start>scores=tf.concat(scores 0)<line_sep>l2_reg=wght_decay<times>tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES scope='ResNet18'))<line_sep>loss_class=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=label_batch logits=scores))<line_sep>loss=loss_class+l2_reg<line_sep>learning_rate=tf.placeholder(tf.float32 shape=[])<line_sep>opt=tf.train.MomentumOptimizer(learning_rate 0.9)<line_sep>train_step=opt.minimize(loss var_list=variables_graph)<block_end><block_end><if_stmt>itera<g>0# Distillation
<block_start>variables_graph,variables_graph2,scores,scores_stored=utils_icarl.prepare_networks(gpu image_batch nb_cl nb_groups)<line_sep># Copying the network to use its predictions as ground truth labels
op_assign=[(variables_graph2[i]).assign(variables_graph[i])<for>i range(len(variables_graph))]<line_sep># Define the objective for the neural network : 1 vs all cross_entropy + distillation
<with_stmt>tf.device('/gpu:0')<block_start>scores=tf.concat(scores 0)<line_sep>scores_stored=tf.concat(scores_stored 0)<line_sep>old_cl=(order[range(itera<times>nb_cl)]).astype(np.int32)<line_sep>new_cl=(order[range(itera<times>nb_cl nb_groups<times>nb_cl)]).astype(np.int32)<line_sep>label_old_classes=tf.sigmoid(tf.stack([scores_stored[: i]<for>i old_cl] axis=1))<line_sep>label_new_classes=tf.stack([label_batch[: i]<for>i new_cl] axis=1)<line_sep>pred_old_classes=tf.stack([scores[: i]<for>i old_cl] axis=1)<line_sep>pred_new_classes=tf.stack([scores[: i]<for>i new_cl] axis=1)<line_sep>l2_reg=wght_decay<times>tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES scope='ResNet18'))<line_sep>loss_class=tf.reduce_mean(tf.concat([tf.nn.sigmoid_cross_entropy_with_logits(labels=label_old_classes logits=pred_old_classes) tf.nn.sigmoid_cross_entropy_with_logits(labels=label_new_classes logits=pred_new_classes)] 1))<line_sep>loss=loss_class+l2_reg<line_sep>learning_rate=tf.placeholder(tf.float32 shape=[])<line_sep>opt=tf.train.MomentumOptimizer(learning_rate 0.9)<line_sep>train_step=opt.minimize(loss var_list=variables_graph)<block_end><block_end>## Run the learning phase ##
<with_stmt>tf.Session(config=config)<as>sess# Launch the data reader
<block_start>coord=tf.train.Coordinator()<line_sep>threads=tf.train.start_queue_runners(coord=coord)<line_sep>sess.run(tf.global_variables_initializer())<line_sep>lr=lr_old<line_sep># Run the loading of the weights for the learning network and the copy network
<if_stmt>itera<g>0<block_start>void0=sess.run([(variables_graph[i]).assign(save_weights[i])<for>i range(len(variables_graph))])<line_sep>void1=sess.run(op_assign)<block_end><for_stmt>epoch range(epochs)<block_start>print("Batch of classes {} out of {} batches".format(itera+1 nb_groups))<line_sep>print('Epoch %i'%epoch)<for_stmt>i range(int(np.ceil(len(files_from_cl)/batch_size)))<block_start>loss_class_val,_,sc,lab=sess.run([loss_class train_step scores label_batch_0] feed_dict={learning_rate:lr})<line_sep>loss_batch.append(loss_class_val)<line_sep># Plot the training error every 10 batches
<if_stmt>len(loss_batch)<eq>10<block_start>print(np.mean(loss_batch))<line_sep>loss_batch=[]<block_end># Plot the training top 1 accuracy every 80 batches
<if_stmt>(i+1)%80<eq>0<block_start>stat=[]<line_sep>stat<augadd>([ll<in>best<for>ll,best zip(lab np.argsort(sc axis=1)[: -1:])])<line_sep>stat=np.average(stat)<line_sep>print('Training accuracy %f'%stat)<block_end><block_end># Decrease the learning by 5 every 10 epoch after 20 epochs at the first learning rate
<if_stmt>epoch<in>lr_strat<block_start>lr<augdiv>lr_factor<block_end><block_end>coord.request_stop()<line_sep>coord.join(threads)<line_sep># copy weights to store network
save_weights=sess.run([variables_graph[i]<for>i range(len(variables_graph))])<line_sep>utils_resnet.save_model(save_path+'model-iteration'+str(nb_cl)+'-%i.pickle'%itera scope='ResNet18' sess=sess)<block_end># Reset the graph
tf.reset_default_graph()<line_sep>## Exemplars management part ##
nb_protos_cl=int(np.ceil(nb_proto<times>nb_groups<times>1./(itera+1)))# Reducing number of exemplars for the previous classes
files_from_cl=files_train[itera]<line_sep>inits,scores,label_batch,loss_class,file_string_batch,op_feature_map=utils_icarl.reading_data_and_preparing_network(files_from_cl gpu itera batch_size train_path labels_dic mixing nb_groups nb_cl save_path)<with_stmt>tf.Session(config=config)<as>sess<block_start>coord=tf.train.Coordinator()<line_sep>threads=tf.train.start_queue_runners(coord=coord)<line_sep>void3=sess.run(inits)<line_sep># Load the training samples of the current batch of classes in the feature space to apply the herding algorithm
Dtot,processed_files,label_dico=utils_icarl.load_class_in_feature_space(files_from_cl batch_size scores label_batch loss_class file_string_batch op_feature_map sess)<line_sep>processed_files=np.array([x.decode()<for>x processed_files])<line_sep># Herding procedure : ranking of the potential exemplars
print('Exemplars selection starting ...')<for_stmt>iter_dico range(nb_cl)<block_start>ind_cl=np.where(label_dico<eq>order[iter_dico+itera<times>nb_cl])[0]<line_sep>D=Dtot[: ind_cl]<line_sep>files_iter=processed_files[ind_cl]<line_sep>mu=np.mean(D axis=1)<line_sep>w_t=mu<line_sep>step_t=0<while_stmt><not>(len(files_protoset[itera<times>nb_cl+iter_dico])<eq>nb_protos_cl)<and>step_t<l>1.1<times>nb_protos_cl<block_start>tmp_t=np.dot(w_t D)<line_sep>ind_max=np.argmax(tmp_t)<line_sep>w_t=w_t+mu-D[: ind_max]<line_sep>step_t<augadd>1<if_stmt>files_iter[ind_max]<not><in>files_protoset[itera<times>nb_cl+iter_dico]<block_start>files_protoset[itera<times>nb_cl+iter_dico].append(files_iter[ind_max])<block_end><block_end><block_end>coord.request_stop()<line_sep>coord.join(threads)<block_end># Reset the graph
tf.reset_default_graph()<line_sep># Class means for iCaRL and NCM
print('Computing theoretical class means for NCM and mean-of-exemplars for iCaRL ...')<for_stmt>iteration2 range(itera+1)<block_start>files_from_cl=files_train[iteration2]<line_sep>inits,scores,label_batch,loss_class,file_string_batch,op_feature_map=utils_icarl.reading_data_and_preparing_network(files_from_cl gpu itera batch_size train_path labels_dic mixing nb_groups nb_cl save_path)<with_stmt>tf.Session(config=config)<as>sess<block_start>coord=tf.train.Coordinator()<line_sep>threads=tf.train.start_queue_runners(coord=coord)<line_sep>void2=sess.run(inits)<line_sep>Dtot,processed_files,label_dico=utils_icarl.load_class_in_feature_space(files_from_cl batch_size scores label_batch loss_class file_string_batch op_feature_map sess)<line_sep>processed_files=np.array([x.decode()<for>x processed_files])<for_stmt>iter_dico range(nb_cl)<block_start>ind_cl=np.where(label_dico<eq>order[iter_dico+iteration2<times>nb_cl])[0]<line_sep>D=Dtot[: ind_cl]<line_sep>files_iter=processed_files[ind_cl]<line_sep>current_cl=order[range(iteration2<times>nb_cl (iteration2+1)<times>nb_cl)]<line_sep># Normal NCM mean
class_means[: order[iteration2<times>nb_cl+iter_dico] 1 itera]=np.mean(D axis=1)<line_sep>class_means[: order[iteration2<times>nb_cl+iter_dico] 1 itera]<augdiv>np.linalg.norm(class_means[: order[iteration2<times>nb_cl+iter_dico] 1 itera])<line_sep># iCaRL approximated mean (mean-of-exemplars)
# use only the first exemplars of the old classes: nb_protos_cl controls the number of exemplars per class
ind_herding=np.array([np.where(files_iter<eq>files_protoset[iteration2<times>nb_cl+iter_dico][i])[0][0]<for>i range(min(nb_protos_cl len(files_protoset[iteration2<times>nb_cl+iter_dico])))])<line_sep>D_tmp=D[: ind_herding]<line_sep>class_means[: order[iteration2<times>nb_cl+iter_dico] 0 itera]=np.mean(D_tmp axis=1)<line_sep>class_means[: order[iteration2<times>nb_cl+iter_dico] 0 itera]<augdiv>np.linalg.norm(class_means[: order[iteration2<times>nb_cl+iter_dico] 0 itera])<block_end>coord.request_stop()<line_sep>coord.join(threads)<block_end># Reset the graph
tf.reset_default_graph()<block_end># Pickle class means and protoset
<with_stmt>open(str(nb_cl)+'class_means.pickle' 'wb')<as>fp<block_start>cPickle.dump(class_means fp)<block_end><with_stmt>open(str(nb_cl)+'files_protoset.pickle' 'wb')<as>fp<block_start>cPickle.dump(files_protoset fp)<block_end><block_end> |
# -*- coding: utf-8 -*-
#
# dependencies documentation build configuration file, created by Quark
extensions=['sphinx.ext.autodoc' 'sphinx.ext.napoleon']<line_sep>templates_path=['_templates']<line_sep>source_suffix='.rst'<line_sep>master_doc='index'<line_sep>project=u'dependencies'<line_sep>copyright=u'2015, dependencies authors'<line_sep>author=u'dependencies authors'<line_sep>version='0.0.1'<line_sep>release='0.0.1'<line_sep>language=<none><line_sep>exclude_patterns=['_build']<line_sep>pygments_style='sphinx'<line_sep>todo_include_todos=<false><line_sep>html_theme='alabaster'<line_sep>html_static_path=['_static']<line_sep>htmlhelp_basename='dependenciesdoc'<line_sep>latex_elements={}<line_sep>latex_documents=[(master_doc 'dependencies.tex' u'dependencies Documentation' u'dependencies authors' 'manual') ]<line_sep>man_pages=[(master_doc 'dependencies' u'dependencies Documentation' [author] 1)]<line_sep>texinfo_documents=[(master_doc 'dependencies' u'dependencies Documentation' author 'dependencies' 'One line description of dependencies.' 'Miscellaneous') ]<line_sep> |
# Copyright (c) 2020, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
<import_stmt>random<import_stmt>numpy<as>np<import_from_stmt>ai_economist.foundation.base.registrar Registry<class_stmt>BaseAgent<block_start>"""Base class for Agent classes.
Instances of Agent classes are created for each agent in the environment. Agent
instances are stateful, capturing location, inventory, endogenous variables,
and any additional state fields created by environment components during
construction (see BaseComponent.get_additional_state_fields in base_component.py).
They also provide a simple API for getting/setting actions for each of their
registered action subspaces (which depend on the components used to build
the environment).
Args:
idx (int or str): Index that uniquely identifies the agent object amongst the
other agent objects registered in its environment.
multi_action_mode (bool): Whether to allow the agent to take one action for
each of its registered action subspaces each timestep (if True),
or to limit the agent to take only one action each timestep (if False).
"""<line_sep>name=""<def_stmt>__init__ self idx=<none> multi_action_mode=<none><block_start><assert_stmt>self.name<if_stmt>idx<is><none><block_start>idx=0<block_end><if_stmt>multi_action_mode<is><none><block_start>multi_action_mode=<false><block_end><if_stmt>isinstance(idx str)<block_start>self._idx=idx<block_end><else_stmt><block_start>self._idx=int(idx)<block_end>self.multi_action_mode=bool(multi_action_mode)<line_sep>self.single_action_map=({})<line_sep># Used to convert single-action-mode actions to the general format
self.action=dict()<line_sep>self.action_dim=dict()<line_sep>self._action_names=[]<line_sep>self._multi_action_dict={}<line_sep>self._unique_actions=0<line_sep>self._total_actions=0<line_sep>self.state=dict(loc=[0 0] inventory={} escrow={} endogenous={})<line_sep>self._registered_inventory=<false><line_sep>self._registered_endogenous=<false><line_sep>self._registered_components=<false><line_sep>self._noop_action_dict=dict()<line_sep># Special flag to allow logic for multi-action-mode agents
# that are not given any actions.
self._passive_multi_action_agent=<false><line_sep># If this gets set to true, we can make masks faster
self._one_component_single_action=<false><line_sep>self._premask=<none><block_end>@property<def_stmt>idx self<block_start>"""Index used to identify this agent. Must be unique within the environment."""<line_sep><return>self._idx<block_end><def_stmt>register_inventory self resources<block_start>"""Used during environment construction to populate inventory/escrow fields."""<assert_stmt><not>self._registered_inventory<for_stmt>entity_name resources<block_start>self.inventory[entity_name]=0<line_sep>self.escrow[entity_name]=0<block_end>self._registered_inventory=<true><block_end><def_stmt>register_endogenous self endogenous<block_start>"""Used during environment construction to populate endogenous state fields."""<assert_stmt><not>self._registered_endogenous<for_stmt>entity_name endogenous<block_start>self.endogenous[entity_name]=0<block_end>self._registered_endogenous=<true><block_end><def_stmt>_incorporate_component self action_name n<block_start>extra_n=(1<if>self.multi_action_mode<else>0)<line_sep># Each sub-action has a NO-OP in multi action mode)
self.action[action_name]=0<line_sep>self.action_dim[action_name]=n+extra_n<line_sep>self._action_names.append(action_name)<line_sep>self._multi_action_dict[action_name]=<false><line_sep>self._unique_actions<augadd>1<if_stmt>self.multi_action_mode<block_start>self._total_actions<augadd>n+extra_n<block_end><else_stmt><block_start><for_stmt>action_n range(1 n+1)<block_start>self._total_actions<augadd>1<line_sep>self.single_action_map[int(self._total_actions)]=[action_name action_n ]<block_end><block_end><block_end><def_stmt>register_components self components<block_start>"""Used during environment construction to set up state/action spaces."""<assert_stmt><not>self._registered_components<for_stmt>component components<block_start>n=component.get_n_actions(self.name)<if_stmt>n<is><none><block_start><continue><block_end># Most components will have a single action-per-agent, so n is an int
<if_stmt>isinstance(n int)<block_start><if_stmt>n<eq>0<block_start><continue><block_end>self._incorporate_component(component.name n)<block_end># They can also internally handle multiple actions-per-agent,
# so n is an tuple or list
<elif_stmt>isinstance(n (tuple list))<block_start><for_stmt>action_sub_name,n_ n<block_start><if_stmt>n_<eq>0<block_start><continue><block_end><if_stmt>"."<in>action_sub_name<block_start><raise>NameError("Sub-action {} of component {} "<concat>"is illegally named.".format(action_sub_name component.name))<block_end>self._incorporate_component("{}.{}".format(component.name action_sub_name) n_)<block_end><block_end># If that's not what we got something is funky.
<else_stmt><block_start><raise>TypeError("Received unexpected type ({}) from {}.get_n_actions('{}')".format(type(n) component.name self.name))<block_end><for_stmt>k,v component.get_additional_state_fields(self.name).items()<block_start>self.state[k]=v<block_end><block_end># Currently no actions are available to this agent. Give it a placeholder.
<if_stmt>len(self.action)<eq>0<and>self.multi_action_mode<block_start>self._incorporate_component("PassiveAgentPlaceholder" 0)<line_sep>self._passive_multi_action_agent=<true><block_end><elif_stmt>len(self.action)<eq>1<and><not>self.multi_action_mode<block_start>self._one_component_single_action=<true><line_sep>self._premask=np.ones(1+self._total_actions dtype=np.float32)<block_end>self._registered_components=<true><line_sep>self._noop_action_dict={k:v<times>0<for>k,v self.action.items()}<line_sep>verbose=<false><if_stmt>verbose<block_start>print(self.name self.idx "constructed action map:")<for_stmt>k,v self.single_action_map.items()<block_start>print("single action map:" k v)<block_end><for_stmt>k,v self.action.items()<block_start>print("action:" k v)<block_end><for_stmt>k,v self.action_dim.items()<block_start>print("action_dim:" k v)<block_end><block_end><block_end>@property<def_stmt>action_spaces self<block_start>"""
if self.multi_action_mode == True:
Returns an integer array with length equal to the number of action
subspaces that the agent registered. The i'th element of the array
indicates the number of actions associated with the i'th action subspace.
In multi_action_mode, each subspace includes a NO-OP.
Note: self._action_names describes which action subspace each element of
the array refers to.
Example:
>> self.multi_action_mode
True
>> self.action_spaces
[2, 5]
>> self._action_names
["Build", "Gather"]
# [1 Build action + Build NO-OP, 4 Gather actions + Gather NO-OP]
if self.multi_action_mode == False:
Returns a single integer equal to the total number of actions that the
agent can take.
Example:
>> self.multi_action_mode
False
>> self.action_spaces
6
>> self._action_names
["Build", "Gather"]
# 1 NO-OP + 1 Build action + 4 Gather actions.
"""<if_stmt>self.multi_action_mode<block_start>action_dims=[]<for_stmt>m self._action_names<block_start>action_dims.append(np.array(self.action_dim[m]).reshape(-1))<block_end><return>np.concatenate(action_dims).astype(np.int32)<block_end>n_actions=1# (NO-OP)
<for_stmt>m self._action_names<block_start>n_actions<augadd>self.action_dim[m]<block_end><return>n_actions<block_end>@property<def_stmt>loc self<block_start>"""2D list of [row, col] representing agent's location in the environment."""<line_sep><return>self.state["loc"]<block_end>@property<def_stmt>endogenous self<block_start>"""Dictionary representing endogenous quantities (i.e. "Labor").
Example:
>> self.endogenous
{"Labor": 30.25}
"""<line_sep><return>self.state["endogenous"]<block_end>@property<def_stmt>inventory self<block_start>"""Dictionary representing quantities of resources in agent's inventory.
Example:
>> self.inventory
{"Wood": 3, "Stone": 20, "Coin": 1002.83}
"""<line_sep><return>self.state["inventory"]<block_end>@property<def_stmt>escrow self<block_start>"""Dictionary representing quantities of resources in agent's escrow.
https://en.wikipedia.org/wiki/Escrow
Escrow is used to manage any portion of the agent's inventory that is
reserved for a particular purpose. Typically, something enters escrow as part
of a contractual arrangement to disburse that something when another
condition is met. An example is found in the ContinuousDoubleAuction
Component class (see ../components/continuous_double_auction.py). When an
agent creates an order to sell a unit of Wood, for example, the component
moves one unit of Wood from the agent's inventory to its escrow. If another
agent buys the Wood, it is moved from escrow to the other agent's inventory. By
placing the Wood in escrow, it prevents the first agent from using it for
something else (i.e. building a house).
Notes:
The inventory and escrow share the same keys. An agent's endowment refers
to the total quantity it has in its inventory and escrow.
Escrow is provided to simplify inventory management but its intended
semantics are not enforced directly. It is up to Component classes to
enforce these semantics.
Example:
>> self.inventory
{"Wood": 0, "Stone": 1, "Coin": 3}
"""<line_sep><return>self.state["escrow"]<block_end><def_stmt>inventory_to_escrow self resource amount<block_start>"""Move some amount of a resource from agent inventory to agent escrow.
Amount transferred is capped to the amount of resource in agent inventory.
Args:
resource (str): The name of the resource to move (i.e. "Wood", "Coin").
amount (float): The amount to be moved from inventory to escrow. Must be
positive.
Returns:
Amount of resource actually transferred. Will be less than amount argument
if amount argument exceeded the amount of resource in the inventory.
Calculated as:
transferred = np.minimum(self.state["inventory"][resource], amount)
"""<assert_stmt>amount<ge>0<line_sep>transferred=float(np.minimum(self.state["inventory"][resource] amount))<line_sep>self.state["inventory"][resource]<augsub>transferred<line_sep>self.state["escrow"][resource]<augadd>transferred<line_sep><return>float(transferred)<block_end><def_stmt>escrow_to_inventory self resource amount<block_start>"""Move some amount of a resource from agent escrow to agent inventory.
Amount transferred is capped to the amount of resource in agent escrow.
Args:
resource (str): The name of the resource to move (i.e. "Wood", "Coin").
amount (float): The amount to be moved from escrow to inventory. Must be
positive.
Returns:
Amount of resource actually transferred. Will be less than amount argument
if amount argument exceeded the amount of resource in escrow.
Calculated as:
transferred = np.minimum(self.state["escrow"][resource], amount)
"""<assert_stmt>amount<ge>0<line_sep>transferred=float(np.minimum(self.state["escrow"][resource] amount))<line_sep>self.state["escrow"][resource]<augsub>transferred<line_sep>self.state["inventory"][resource]<augadd>transferred<line_sep><return>float(transferred)<block_end><def_stmt>total_endowment self resource<block_start>"""Get the combined inventory+escrow endowment of resource.
Args:
resource (str): Name of the resource
Returns:
The amount of resource in the agents inventory and escrow.
"""<line_sep><return>self.inventory[resource]+self.escrow[resource]<block_end><def_stmt>reset_actions self component=<none><block_start>"""Reset all actions to the NO-OP action (the 0'th action index).
If component is specified, only reset action(s) for that component.
"""<if_stmt><not>component<block_start>self.action.update(self._noop_action_dict)<block_end><else_stmt><block_start><for_stmt>k,v self.action.items()<block_start><if_stmt>"."<in>component<block_start><if_stmt>k.lower()<eq>component.lower()<block_start>self.action[k]=v<times>0<block_end><block_end><else_stmt><block_start>base_component=k.split(".")[0]<if_stmt>base_component.lower()<eq>component.lower()<block_start>self.action[k]=v<times>0<block_end><block_end><block_end><block_end><block_end><def_stmt>has_component self component_name<block_start>"""Returns True if the agent has component_name as a registered subaction."""<line_sep><return>bool(component_name<in>self.action)<block_end><def_stmt>get_random_action self<block_start>"""
Select a component at random and randomly choose one of its actions (other
than NO-OP).
"""<line_sep>random_component=random.choice(self._action_names)<line_sep>component_action=random.choice(list(range(1 self.action_dim[random_component])))<line_sep><return>{random_component:component_action}<block_end><def_stmt>get_component_action self component_name sub_action_name=<none><block_start>"""
Return the action(s) taken for component_name component, or None if the
agent does not use that component.
"""<if_stmt>sub_action_name<is><not><none><block_start><return>self.action.get(component_name+"."+sub_action_name <none>)<block_end>matching_names=[m<for>m self._action_names<if>m.split(".")[0]<eq>component_name]<if_stmt>len(matching_names)<eq>0<block_start><return><none><block_end><if_stmt>len(matching_names)<eq>1<block_start><return>self.action.get(matching_names[0] <none>)<block_end><return>[self.action.get(m <none>)<for>m matching_names]<block_end><def_stmt>set_component_action self component_name action<block_start>"""Set the action(s) taken for component_name component."""<if_stmt>component_name<not><in>self.action<block_start><raise>KeyError("Agent {} of type {} does not have {} registered as a subaction".format(self.idx self.name component_name))<block_end><if_stmt>self._multi_action_dict[component_name]<block_start>self.action[component_name]=np.array(action dtype=np.int32)<block_end><else_stmt><block_start>self.action[component_name]=int(action)<block_end><block_end><def_stmt>populate_random_actions self<block_start>"""Fill the action buffer with random actions. This is for testing."""<for_stmt>component,d self.action_dim.items()<block_start><if_stmt>isinstance(d int)<block_start>self.set_component_action(component np.random.randint(0 d))<block_end><else_stmt><block_start>d_array=np.array(d)<line_sep>self.set_component_action(component np.floor(np.random.rand(*d_array.shape)<times>d_array))<block_end><block_end><block_end><def_stmt>parse_actions self actions<block_start>"""Parse the actions array to fill each component's action buffers."""<if_stmt>self.multi_action_mode<block_start><assert_stmt>len(actions)<eq>self._unique_actions<if_stmt>len(actions)<eq>1<block_start>self.set_component_action(self._action_names[0] actions[0])<block_end><else_stmt><block_start><for_stmt>action_name,action zip(self._action_names actions)<block_start>self.set_component_action(action_name int(action))<block_end><block_end><block_end># Single action mode
<else_stmt># Action was supplied as an index of a specific subaction.
# No need to do any lookup.
<block_start><if_stmt>isinstance(actions dict)<block_start><if_stmt>len(actions)<eq>0<block_start><return><block_end><assert_stmt>len(actions)<eq>1<line_sep>action_name=list(actions.keys())[0]<line_sep>action=list(actions.values())[0]<if_stmt>action<eq>0<block_start><return><block_end>self.set_component_action(action_name action)<block_end># Action was supplied as an index into the full set of combined actions
<else_stmt><block_start>action=int(actions)<line_sep># Universal NO-OP
<if_stmt>action<eq>0<block_start><return><block_end>action_name,action=self.single_action_map.get(action)<line_sep>self.set_component_action(action_name action)<block_end><block_end><block_end><def_stmt>flatten_masks self mask_dict<block_start>"""Convert a dictionary of component action masks into a single mask vector."""<if_stmt>self._one_component_single_action<block_start>self._premask[1:]=mask_dict[self._action_names[0]]<line_sep><return>self._premask<block_end>no_op_mask=[1]<if_stmt>self._passive_multi_action_agent<block_start><return>np.array(no_op_mask).astype(np.float32)<block_end>list_of_masks=[]<if_stmt><not>self.multi_action_mode<block_start>list_of_masks.append(no_op_mask)<block_end><for_stmt>m self._action_names<block_start><if_stmt>m<not><in>mask_dict<block_start><raise>KeyError("No mask provided for {} (agent {})".format(m self.idx))<block_end><if_stmt>self.multi_action_mode<block_start>list_of_masks.append(no_op_mask)<block_end>list_of_masks.append(mask_dict[m])<block_end><return>np.concatenate(list_of_masks).astype(np.float32)<block_end><block_end>agent_registry=Registry(BaseAgent)<line_sep>"""The registry for Agent classes.
This creates a registry object for Agent classes. This registry requires that all
added classes are subclasses of BaseAgent. To make an Agent class available through
the registry, decorate the class definition with @agent_registry.add.
Example:
from ai_economist.foundation.base.base_agent import BaseAgent, agent_registry
@agent_registry.add
class ExampleAgent(BaseAgent):
name = "Example"
pass
assert agent_registry.has("Example")
AgentClass = agent_registry.get("Example")
agent = AgentClass(...)
assert isinstance(agent, ExampleAgent)
Notes:
The foundation package exposes the agent registry as: foundation.agents
An Agent class that is defined and registered following the above example will
only be visible in foundation.agents if defined/registered in a file that is
imported in ../agents/__init__.py.
"""<line_sep> |
<import_from_stmt>unittest TestCase<import_from_stmt>apps.compatibility *<import_stmt>json<import_from_stmt>io BytesIO<line_sep>WALLET_SOFTWARE=b'{"label": "blah", "blockheight": 0, "descriptor": "wsh(sortedmulti(1,[fb7c1f11/48h/1h/0h/2h]tpubDExnGppazLhZPNadP8Q5Vgee2QcvbyAf9GvGaEY7ALVJREaG2vdTqv1MHRoDtPaYP3y1DGVx7wrKKhsLhs26GY263uE6Wi3qNbi71AHZ6p7/0/*,[33a2bf0c/48h/1h/0h/2h]tpubDF4cAhFDn6XSPhQtFECSkQm35oEzVyHHAiPa4Qy83fBtPw9nFJAodN6xF6nY7y2xKMGc5nbDFZfAac88oaurVzrCUxyhmc9J8W5tg3N5NkS/0/*))#vk844svv", "devices": [{"type": "specter", "label": "ability"}, {"type": "coldcard", "label": "hox"}]}'<line_sep>COLDCARD_FILE="""
# Coldcard Multisig setup file (created on Specter Desktop)
#
Name: blah
Policy: 1 of 2
Derivation: m/48'/1'/0'/2'
Format: P2WSH
FB7C1F11: tpubDExnGppazLhZPNadP8Q5Vgee2QcvbyAf9GvGaEY7ALVJREaG2vdTqv1MHRoDtPaYP3y1DGVx7wrKKhsLhs26GY263uE6Wi3qNbi71AHZ6p7
33A2BF0C: tpubDF4cAhFDn6XSPhQtFECSkQm35oEzVyHHAiPa4Qy83fBtPw9nFJAodN6xF6nY7y2xKMGc5nbDFZfAac88oaurVzrCUxyhmc9J8W5tg3N5NkS
"""<line_sep>EXPECTED=('blah' 'wsh(sortedmulti(1,[fb7c1f11/48h/1h/0h/2h]tpubDExnGppazLhZPNadP8Q5Vgee2QcvbyAf9GvGaEY7ALVJREaG2vdTqv1MHRoDtPaYP3y1DGVx7wrKKhsLhs26GY263uE6Wi3qNbi71AHZ6p7/{0,1}/*,[33a2bf0c/48h/1h/0h/2h]tpubDF4cAhFDn6XSPhQtFECSkQm35oEzVyHHAiPa4Qy83fBtPw9nFJAodN6xF6nY7y2xKMGc5nbDFZfAac88oaurVzrCUxyhmc9J8W5tg3N5NkS/{0,1}/*))')<class_stmt>CompatibilityTest(TestCase)<block_start><def_stmt>test_import self<block_start>self.assertEqual(EXPECTED parse_software_wallet_json(json.load(BytesIO(WALLET_SOFTWARE))))<line_sep>self.assertEqual(EXPECTED parse_cc_wallet_txt(BytesIO(COLDCARD_FILE.encode())))<block_end><block_end> |
<import_stmt>pytest<import_from_stmt>plenum.common.constants KeyValueStorageType<import_from_stmt>storage.helper initKeyValueStorage<import_from_stmt>storage.kv_store KeyValueStorage<line_sep>db_no=0<line_sep>@pytest.yield_fixture(params=[KeyValueStorageType.Rocksdb KeyValueStorageType.Leveldb KeyValueStorageType.BinaryFile])<def_stmt>storage request tdir<arrow>KeyValueStorage<block_start><global>db_no<line_sep>db=initKeyValueStorage(request.param tdir 'metrics_db_{}'.format(db_no))<line_sep>db_no<augadd>1<line_sep><yield>db<line_sep>db.close()<block_end> |
<import_from_future_stmt> absolute_import division print_function<import_stmt>os<def_stmt>run <block_start><import_stmt>libtbx.load_env<line_sep>src_dir=libtbx.env.under_dist(module_name="scitbx" path="lbfgs" test=os.path.isdir)<import_stmt>fable.read<line_sep>all_fprocs=fable.read.process(file_names=[os.path.join(src_dir f)<for>f ["sdrive.f" "lbfgs.f"]])<line_sep>namespace="scitbx::lbfgs_fem"<line_sep>functions_public=set(["lbfgs" "blockdata_lb2"])<line_sep>functions_detail=set(["lb1" "daxpy" "ddot" "mcstep" "mcsrch"])<line_sep>functions_program=set(["one_pass"])<import_stmt>fable.cout<line_sep>functions_hpp=fable.cout.process(all_fprocs=all_fprocs namespace=namespace fem_do_safe=<false> suppress_program=<true> suppress_common=<false> suppress_functions=functions_detail.union(functions_program) suppress_function_definitions=functions_public)<line_sep>functions_cpp=fable.cout.process(all_fprocs=all_fprocs namespace=namespace fem_do_safe=<false> suppress_program=<true> suppress_common=<true> suppress_functions=functions_program)<line_sep>functions_cpp[0]="#include <scitbx/lbfgs_fem.hpp>"<line_sep>sdrive_cpp=fable.cout.process(all_fprocs=all_fprocs namespace=namespace fem_do_safe=<false> suppress_common=<true> suppress_functions=functions_detail.union(functions_public))<line_sep>sdrive_cpp[0]=functions_cpp[0]<line_sep>#
<def_stmt>make_target_dir path<block_start>result=libtbx.env.under_build(path=path)<if_stmt>(<not>os.path.isdir(result))<block_start>os.makedirs(result)<assert_stmt>os.path.isdir(result)<block_end><return>result<block_end>target_dir=make_target_dir(path="include/scitbx")<with_stmt>open(os.path.join(target_dir "lbfgs_fem.hpp") "w")<as>fh<block_start>fh.write("\n".join(functions_hpp))<block_end>target_dir=make_target_dir(path="scitbx/lbfgs")<with_stmt>open(os.path.join(target_dir "lbfgs_fem.cpp") "w")<as>fh<block_start>fh.write("\n".join(functions_cpp))<block_end><with_stmt>open(os.path.join(target_dir "sdrive_fem.cpp") "w")<as>fh<block_start>fh.write("\n".join(sdrive_cpp))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>run()<block_end> |
# Copyright 2009-2011 <NAME>.
# This program is distributed under the LGPL2.1 license.
'''
Defines the `Textual` class.
See its documentation for more details.
'''<import_from_future_stmt> division<import_stmt>wx<import_from_stmt>python_toolbox freezing<import_from_stmt>python_toolbox wx_tools<import_from_stmt>python_toolbox.wx_tools.widgets.cute_panel CutePanel<def_stmt>ratio_to_round_degrees ratio<block_start><return>int(ratio<times>360)<block_end><def_stmt>degrees_to_ratio degrees<block_start><return>degrees/360<block_end><class_stmt>Textual(CutePanel)<block_start>'''Display (and allow modifying) the hue as a number 0-359.'''<def_stmt>__init__ self hue_selection_dialog<block_start>wx.Panel.__init__(self parent=hue_selection_dialog size=(75 100))<line_sep>self.set_good_background_color()<line_sep>self.SetHelpText('Set the hue in angles (0°-359°).')<line_sep>self.hue_selection_dialog=hue_selection_dialog<line_sep>self.hue=hue_selection_dialog.hue<line_sep>self.main_v_sizer=wx.BoxSizer(wx.VERTICAL)<line_sep>self.hue_static_text=wx.StaticText(self label='&Hue:')<line_sep>self.main_v_sizer.Add(self.hue_static_text 0 wx.ALIGN_LEFT|wx.BOTTOM border=5)<line_sep>self.h_sizer=wx.BoxSizer(wx.HORIZONTAL)<line_sep>self.main_v_sizer.Add(self.h_sizer 0)<line_sep>self.spin_ctrl=wx.SpinCtrl(self min=0 max=359 initial=ratio_to_round_degrees(self.hue) size=(70 -1) style=wx.SP_WRAP)<if_stmt>wx_tools.is_mac<block_start>self.spin_ctrl.SetValue(ratio_to_round_degrees(self.hue))<block_end>self.h_sizer.Add(self.spin_ctrl 0)<line_sep>self.degree_static_text=wx.StaticText(self label=unichr(176))<line_sep>self.h_sizer.Add(self.degree_static_text 0)<line_sep>self.SetSizerAndFit(self.main_v_sizer)<line_sep>self.Bind(wx.EVT_SPINCTRL self._on_spin source=self.spin_ctrl)<line_sep>self.Bind(wx.EVT_TEXT self._on_text source=self.spin_ctrl)<block_end>value_freezer=freezing.FreezerProperty()<def_stmt>update self<block_start>'''Update to show the new hue.'''<if_stmt><not>self.value_freezer.frozen<and>self.hue<ne>self.hue_selection_dialog.hue<block_start>self.hue=self.hue_selection_dialog.hue<line_sep>self.spin_ctrl.SetValue(ratio_to_round_degrees(self.hue))<block_end><block_end><def_stmt>_on_spin self event<block_start>self.hue_selection_dialog.setter(degrees_to_ratio(self.spin_ctrl.Value))<block_end><def_stmt>_on_text self event<block_start><with_stmt>self.value_freezer<block_start>self.hue_selection_dialog.setter(degrees_to_ratio(self.spin_ctrl.Value))<block_end><block_end><def_stmt>set_focus_on_spin_ctrl_and_select_all self<block_start>'''
The "select all" part works only on Windows and generic `wx.SpinCtrl`
implementations.
'''<line_sep>self.spin_ctrl.SetFocus()<line_sep>self.spin_ctrl.SetSelection(-1 -1)<block_end><block_end> |
<import_from_stmt>.. DataStreamProcessor<class_stmt>conditional(DataStreamProcessor)<block_start><def_stmt>__init__ self predicate flow<block_start>super().__init__()<line_sep>self.predicate=predicate<line_sep>self.flow=flow<block_end><def_stmt>_process self<block_start>ds=self.source._process()<if_stmt>self.predicate(ds.dp)<block_start><if_stmt>callable(self.flow)<block_start>flow=self.flow(ds.dp)<block_end><else_stmt><block_start>flow=self.flow<block_end><return>flow.datastream(ds)<block_end><else_stmt><block_start><return>ds<block_end><block_end><block_end> |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_from_stmt>caffe2.python core<import_from_stmt>hypothesis given<import_stmt>caffe2.python.hypothesis_test_util<as>hu<import_stmt>hypothesis.strategies<as>st<import_stmt>numpy<as>np<def_stmt>_string_lists alphabet=<none><block_start><return>st.lists(elements=st.text(alphabet=alphabet average_size=3) min_size=0 max_size=3)<block_end><class_stmt>TestStringOps(hu.HypothesisTestCase)<block_start>@given(strings=_string_lists())<def_stmt>test_string_prefix self strings<block_start>length=3<line_sep># although we are utf-8 encoding below to avoid python exceptions,
# StringPrefix op deals with byte-length prefixes, which may produce
# an invalid utf-8 string. The goal here is just to avoid python
# complaining about the unicode -> str conversion.
strings=np.array([a.encode('utf-8')<for>a strings] dtype=np.object)<def_stmt>string_prefix_ref strings<block_start><return>(np.array([a[:length]<for>a strings] dtype=object) )<block_end>op=core.CreateOperator('StringPrefix' ['strings'] ['stripped'] length=length)<line_sep>self.assertReferenceChecks(hu.cpu_do op [strings] string_prefix_ref)<block_end>@given(strings=_string_lists())<def_stmt>test_string_suffix self strings<block_start>length=3<line_sep>strings=np.array([a.encode('utf-8')<for>a strings] dtype=np.object)<def_stmt>string_suffix_ref strings<block_start><return>(np.array([a[-length:]<for>a strings] dtype=object) )<block_end>op=core.CreateOperator('StringSuffix' ['strings'] ['stripped'] length=length)<line_sep>self.assertReferenceChecks(hu.cpu_do op [strings] string_suffix_ref)<block_end>@given(strings=st.text(alphabet=['a' 'b'] average_size=3))<def_stmt>test_string_starts_with self strings<block_start>prefix='a'<line_sep>strings=np.array([str(a)<for>a strings] dtype=np.object)<def_stmt>string_starts_with_ref strings<block_start><return>(np.array([a.startswith(prefix)<for>a strings] dtype=bool) )<block_end>op=core.CreateOperator('StringStartsWith' ['strings'] ['bools'] prefix=prefix)<line_sep>self.assertReferenceChecks(hu.cpu_do op [strings] string_starts_with_ref)<block_end>@given(strings=st.text(alphabet=['a' 'b'] average_size=3))<def_stmt>test_string_ends_with self strings<block_start>suffix='a'<line_sep>strings=np.array([str(a)<for>a strings] dtype=np.object)<def_stmt>string_ends_with_ref strings<block_start><return>(np.array([a.endswith(suffix)<for>a strings] dtype=bool) )<block_end>op=core.CreateOperator('StringEndsWith' ['strings'] ['bools'] suffix=suffix)<line_sep>self.assertReferenceChecks(hu.cpu_do op [strings] string_ends_with_ref)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>unittest<line_sep>unittest.main()<block_end> |
<import_stmt>cartography.intel.pagerduty.schedules<import_stmt>tests.data.pagerduty.schedules<line_sep>TEST_UPDATE_TAG=123456789<def_stmt>test_load_schedule_data neo4j_session<block_start>schedule_data=tests.data.pagerduty.schedules.LIST_SCHEDULES_DATA<line_sep>cartography.intel.pagerduty.schedules.load_schedule_data(neo4j_session schedule_data TEST_UPDATE_TAG )<line_sep>expected_nodes={"PI7DH85" }<line_sep>nodes=neo4j_session.run("""
MATCH (n:PagerDutySchedule) RETURN n.id;
""" )<line_sep>actual_nodes={n['n.id']<for>n nodes}<assert_stmt>actual_nodes<eq>expected_nodes<line_sep>expected_layers={"PI7DH85-Night Shift" }<line_sep>layers=neo4j_session.run("""
MATCH (:PagerDutySchedule{id:"PI7DH85"})-[:HAS_LAYER]->(n:PagerDutyScheduleLayer)
RETURN n.id;
""" )<line_sep>actual_layers={n['n.id']<for>n layers}<assert_stmt>actual_layers<eq>expected_layers<block_end> |
# -*- coding: utf8 -*-
<import_from_stmt>utils.config BASE_DIR<import_from_stmt>Crypto.Cipher AES<import_from_stmt>Crypto.Util.Padding unpad pad<import_from_stmt>hashlib md5<import_stmt>base64<import_stmt>execjs<import_stmt>json<def_stmt>auth_sign appId timestamp key='<KEY>'<block_start><return>md5(''.join([appId key str(timestamp)]).encode('utf8')).hexdigest()<block_end><def_stmt>encrypt plaintext accesstoken<block_start>key=accesstoken[16:].encode('utf8')<line_sep>iv='16-Bytes--String'.encode('utf8')<line_sep>data=json.dumps(plaintext ensure_ascii=<false> separators=(',' ':' )).encode('utf8')<line_sep>data=pad(data 16)<line_sep>cipher=AES.new(key AES.MODE_CBC iv)<line_sep>buf=cipher.encrypt(data).hex().encode('utf8')<line_sep><return>base64.b64encode(buf).decode('utf8')<block_end><def_stmt>decrypt ciphertext accesstoken<block_start>key=accesstoken[16:].encode('utf8')<line_sep>iv='16-Bytes--String'.encode('utf8')<line_sep>data=bytes.fromhex(base64.b64decode(ciphertext).decode('utf8'))<line_sep>cipher=AES.new(key AES.MODE_CBC iv)<line_sep>buf=cipher.decrypt(data)<line_sep>buf=unpad(buf 16)<line_sep><return>json.loads(buf)<block_end><def_stmt>cryptojs_encrypt plaintext<block_start>key='null'<line_sep>iv='16-Bytes--String'<with_stmt>open(BASE_DIR+'/utils/crypto-js.js' 'r' encoding='utf8')<as>fp<block_start>script=fp.read()<block_end>ctx=execjs.compile(script)<line_sep>result=ctx.call('encrypt' key iv plaintext)<line_sep><return>result<block_end><def_stmt>cryptojs_decrypt ciphertext<block_start>key='null'<line_sep>iv='16-Bytes--String'<with_stmt>open(BASE_DIR+'/utils/crypto-js.js' 'r' encoding='utf8')<as>fp<block_start>script=fp.read()<block_end>ctx=execjs.compile(script)<line_sep>result=ctx.call('decrypt' key iv ciphertext)<line_sep><return>result<block_end><if_stmt>__name__<eq>'__main__'<block_start><pass><block_end> |
<import_from_stmt>machina.models.base BaseModel<import_from_stmt>machina.models.deterministic_state_model DeterministicSModel<line_sep> |
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
<import_stmt>sys<import_stmt>os<import_stmt>subprocess<import_stmt>difflib<import_stmt>logging<def_stmt>get_systemds_root <block_start><try_stmt><block_start><return>os.environ['SYSTEMDS_ROOT']<block_end><except_stmt>KeyError<as>error<block_start><raise>KeyError(f"SYSTEMDS_ROOT is not set.\nError\n{error}")<block_end><block_end><def_stmt>get_sklearn_root <block_start><return>f'{get_systemds_root()}/scripts/staging/sklearn'<block_end><def_stmt>invoke_systemds path<block_start>root=get_systemds_root()<try_stmt><block_start>script_path=os.path.relpath(path os.getcwd())<line_sep>result=subprocess.run([root+"/bin/systemds" script_path '-nvargs input_X=tests/input_X.csv input_Y=tests/input_Y.csv'] check=<true> stdout=subprocess.PIPE stderr=subprocess.PIPE timeout=10000)<line_sep>logging.debug('*'<times>100)<line_sep>logging.debug('\n'+result.stdout.decode('utf-8'))<line_sep>logging.debug('\n'+result.stderr.decode('utf-8'))<line_sep>logging.debug('*'<times>100)<line_sep># It looks like python does not notice systemds errors
# Is 0 returned in error cases?
# Check if there is any error and raise manually.
<if_stmt>len(result.stderr)<ne>0<or>'error'<in>str(result.stdout).lower()<block_start><raise>subprocess.CalledProcessError(returncode=result.returncode cmd=result.args stderr=result.stderr output=result.stdout)<block_end><block_end><except_stmt>subprocess.CalledProcessError<as>systemds_error<block_start>logging.error("Failed to run systemds!")<line_sep>logging.error("Error code: "+str(systemds_error.returncode))<line_sep>logging.error("Stdout:")<line_sep>logging.error(systemds_error.output.decode("utf-8"))<line_sep>logging.error("Stderr:")<line_sep>logging.error(systemds_error.stderr.decode("utf-8"))<line_sep><return><false><block_end>logging.info("Successfully executed script.")<line_sep><return><true><block_end><def_stmt>test_script path<block_start>logging.info('#'<times>30)<line_sep>logging.info('Running generated script on systemds.')<line_sep>result=invoke_systemds(path)<line_sep>logging.info('Finished test.')<line_sep><return>result<block_end># Compares two script using diff
<def_stmt>compare_script actual expected<block_start><try_stmt><block_start>f_expected=open(f'{get_sklearn_root()}/tests/expected/{expected}')<line_sep>f_actual=open(f'{get_sklearn_root()}/{actual}')<line_sep>diff=difflib.ndiff(f_actual.readlines() f_expected.readlines())<line_sep>changes=[l.strip()<for>l diff<if><not>l.startswith(' ')]<line_sep>logging.info('#'<times>30)<if_stmt>len(changes)<eq>0<block_start>logging.info('Actual script matches expected script.')<line_sep><return><true><block_end><else_stmt><block_start>logging.info('Actual script does not match expected script.')<line_sep>logging.info('Legend:')<line_sep>logging.info(' "+ " ... line unique to actual script')<line_sep>logging.info(' "- " ... line unique to expected script')<line_sep>logging.info(' "? " ... linue not present in either script')<line_sep>logging.info('#'<times>30)<line_sep>logging.info('\n'+'\n'.join(changes))<line_sep>logging.info('#'<times>30)<line_sep><return><false><block_end><block_end><except_stmt>Exception<as>e<block_start>logging.error('Failed to compare script.')<line_sep>logging.error(e)<line_sep><return><false><block_end><block_end> |
<import_from_stmt>elftools.elf.elffile ELFFile<def_stmt>get_executable_arch path<block_start>"""
Returns the architecture of an executable binary
Parameters
----------
path : str
path to the Go binaries generated
Returns
-------
str
Architecture type of the generated binaries
"""<with_stmt>open(str(path) "rb")<as>f<block_start>e=ELFFile(f)<line_sep><return>e.get_machine_arch()<block_end><block_end> |
<import_from_future_stmt> unicode_literals<import_stmt>pytest<import_stmt>json<import_stmt>os<import_from_stmt>solc get_solc_version<import_from_stmt>solc.wrapper solc_wrapper <def_stmt>is_benign err<block_start><return><not>err<or>err<in>('Warning: This is a pre-release compiler version, please do not use it in production.\n' )<block_end><def_stmt>test_help <block_start>output,err,_,_=solc_wrapper(help=<true> success_return_code=1)<assert_stmt>output<assert_stmt>'Solidity'<in>output<assert_stmt>is_benign(err)<block_end><def_stmt>test_version <block_start>output,err,_,_=solc_wrapper(version=<true>)<assert_stmt>output<assert_stmt>'Version'<in>output<assert_stmt>is_benign(err)<block_end><def_stmt>test_providing_stdin FOO_SOURCE<block_start>output,err,_,_=solc_wrapper(stdin=FOO_SOURCE bin=<true>)<assert_stmt>output<assert_stmt>'Foo'<in>output<assert_stmt>is_benign(err)<block_end><def_stmt>test_providing_single_source_file contracts_dir FOO_SOURCE<block_start>source_file_path=os.path.join(contracts_dir 'Foo.sol')<with_stmt>open(source_file_path 'w')<as>source_file<block_start>source_file.write(FOO_SOURCE)<block_end>output,err,_,_=solc_wrapper(source_files=[source_file_path] bin=<true>)<assert_stmt>output<assert_stmt>'Foo'<in>output<assert_stmt>is_benign(err)<block_end><def_stmt>test_providing_multiple_source_files contracts_dir FOO_SOURCE BAR_SOURCE<block_start>source_file_a_path=os.path.join(contracts_dir 'Foo.sol')<line_sep>source_file_b_path=os.path.join(contracts_dir 'Bar.sol')<with_stmt>open(source_file_a_path 'w')<as>source_file<block_start>source_file.write(FOO_SOURCE)<block_end><with_stmt>open(source_file_b_path 'w')<as>source_file<block_start>source_file.write(BAR_SOURCE)<block_end>output,err,_,_=solc_wrapper(source_files=[source_file_a_path source_file_b_path] bin=<true>)<assert_stmt>output<assert_stmt>'Foo'<in>output<assert_stmt>'Bar'<in>output<assert_stmt>is_benign(err)<block_end>@pytest.mark.requires_standard_json<def_stmt>test_providing_standard_json_input FOO_SOURCE BAR_SOURCE<block_start>stdin=json.dumps({"language":"Solidity" "sources":{"Foo.sol":{"content":FOO_SOURCE} "Bar.sol":{"content":BAR_SOURCE}} "settings":{"outputSelection":{"*":{"*":["abi" "evm.bytecode.link_references" "evm.bytecode.object" "devdoc" "metadata" "userdoc"]}}}})<line_sep>output,err,_,_=solc_wrapper(stdin=stdin standard_json=<true>)<line_sep>output=json.loads(output)<assert_stmt>output<assert_stmt>'Foo.sol'<in>output['contracts']<assert_stmt>'Bar.sol'<in>output['contracts']<assert_stmt>is_benign(err)<block_end> |
<import_from_stmt>.gcn GCNRegressor GCNRegressorBypass<import_from_stmt>.gat GATRegressor GATRegressorBypass<import_from_stmt>.mpnn MPNNRegressor MPNNRegressorBypass<import_from_stmt>.attentivefp AttentiveFPRegressor AttentiveFPRegressorBypass<line_sep> |
"""
logan.importer
~~~~~~~~~~~~~~
:copyright: (c) 2012 <NAME>.
:license: Apache License 2.0, see LICENSE for more details.
"""<import_from_future_stmt> absolute_import unicode_literals<try_stmt><block_start>unicode<block_end><except_stmt>NameError<block_start>basestring=unicode=str<block_end># Python 3
<try_stmt><block_start>execfile<block_end><except_stmt>NameError# Python3
<block_start><def_stmt>execfile afile globalz=<none> localz=<none><block_start><with_stmt>open(afile "r")<as>fh<block_start>exec(fh.read() globalz localz)<block_end><block_end><block_end><import_stmt>sys<try_stmt><block_start><import_from_stmt>django.utils.importlib import_module# django<=1.9
<block_end><except_stmt>ImportError<block_start><import_from_stmt>importlib import_module<block_end><import_from_stmt>.settings load_settings create_module<line_sep>installed=<false><def_stmt>install name config_path default_settings **kwargs<block_start>"""Install our custom module importer logic.
Args:
name (str): Module name to handle specially (e.g., "nautobot_config")
config_path (str): Absolute path to the module in question (e.g., "/opt/nautobot/nautobot_config.py")
default_settings (str): Settings module name to inherit settings from (e.g., "nautobot.core.settings")
"""<line_sep><global>installed<if_stmt>installed# TODO: reinstall
<block_start><return><block_end># Ensure that our custom importer for the config module takes precedence over standard Python import machinery
sys.meta_path.insert(0 LoganImporter(name config_path default_settings **kwargs))<line_sep>installed=<true><block_end><class_stmt>ConfigurationError(Exception)<block_start><pass><block_end><class_stmt>LoganImporter(object)<block_start>"""Implementation of importlib.abc.MetaPathFinder interface."""<def_stmt>__init__ self name config_path default_settings=<none> allow_extras=<true> callback=<none><block_start>"""Instantiate the custom meta path finder.
Args:
name (str): Module name to handle specially (e.g., "nautobot_config")
config_path (str): Absolute path to the module in question (e.g., "/opt/nautobot/nautobot_config.py")
default_settings (str): Settings module name to inherit settings from (e.g., "nautobot.core.settings")
allow_extras (bool): Whether to allow extension of settings variables via "EXTRA_<setting>" values
callback (func): Callback function to invoke after loading the module into settings
"""<line_sep>self.name=name<line_sep>self.config_path=config_path<line_sep>self.default_settings=default_settings<line_sep>self.allow_extras=allow_extras<line_sep>self.callback=callback<line_sep>self.validate()<block_end><def_stmt>__repr__ self<block_start><return>"<%s for '%s' (%s)>"%(type(self) self.name self.config_path)<block_end><def_stmt>validate self# TODO(dcramer): is there a better way to handle validation so it
# is lazy and actually happens in LoganLoader?
<block_start><try_stmt><block_start>execfile(self.config_path {"__file__":self.config_path})<block_end><except_stmt>Exception<as>e<block_start>exc_info=sys.exc_info()<line_sep><raise>ConfigurationError(unicode(e) exc_info[2])<block_end><block_end><def_stmt>find_module self fullname path=<none><block_start>"""Meta path finder API function implementation.
Ref: https://docs.python.org/3/library/importlib.html#importlib.abc.MetaPathFinder.find_module
TODO: find_module() API is deprecated, convert this to find_spec() instead.
"""<line_sep># Only find/load the module matching self.name - otherwise let the standard Python import machinery handle it
<if_stmt>fullname<ne>self.name<block_start><return><block_end><return>LoganLoader(name=self.name config_path=self.config_path default_settings=self.default_settings allow_extras=self.allow_extras callback=self.callback )<block_end><block_end><class_stmt>LoganLoader(object)<block_start>"""Implementation of importlib.abc.Loader interface."""<def_stmt>__init__ self name config_path default_settings=<none> allow_extras=<true> callback=<none><block_start>self.name=name<line_sep>self.config_path=config_path<line_sep>self.default_settings=default_settings<line_sep>self.allow_extras=allow_extras<line_sep>self.callback=callback<block_end><def_stmt>load_module self fullname<block_start>"""Loader API function implementation.
TODO: load_module() API is deprecated, convert this to create_module()/exec_module() instead.
"""<try_stmt><block_start><return>self._load_module(fullname)<block_end><except_stmt>Exception<as>e<block_start>exc_info=sys.exc_info()<line_sep><raise>ConfigurationError(unicode(e) exc_info[2])<block_end><block_end><def_stmt>_load_module self fullname# TODO: is this needed?
<block_start><if_stmt>fullname<in>sys.modules<block_start><return>sys.modules[fullname]<block_end># pragma: no cover
<if_stmt>self.default_settings<block_start>default_settings_mod=import_module(self.default_settings)<block_end><else_stmt><block_start>default_settings_mod=<none><block_end>settings_mod=create_module(self.name)<line_sep># Django doesn't play too nice without the config file living as a real file, so let's fake it.
settings_mod.__file__=self.config_path<line_sep># install the default settings for this app
load_settings(default_settings_mod allow_extras=self.allow_extras settings=settings_mod)<line_sep># install the custom settings for this app
load_settings(self.config_path allow_extras=self.allow_extras settings=settings_mod)<if_stmt>self.callback<block_start>self.callback(settings_mod)<block_end><return>settings_mod<block_end><block_end> |
# Import for easier re-export
<import_from_stmt>.poll *# noqa
<import_from_stmt>.settings *# noqa
|
<import_stmt>os<import_from_stmt>torch.backends cudnn<import_from_stmt>config Config<import_from_stmt>utils.logger setup_logger<import_from_stmt>datasets make_dataloader<import_from_stmt>model make_model<import_from_stmt>solver make_optimizer WarmupMultiStepLR<import_from_stmt>loss make_loss<import_from_stmt>processor do_train<if_stmt>__name__<eq>'__main__'<block_start>cfg=Config()<if_stmt><not>os.path.exists(cfg.LOG_DIR)<block_start>os.mkdir(cfg.LOG_DIR)<block_end>logger=setup_logger('{}'.format(cfg.PROJECT_NAME) cfg.LOG_DIR)<line_sep>logger.info("Running with config:\n{}".format(cfg.CFG_NAME))<line_sep>os.environ['CUDA_VISIBLE_DEVICES']=cfg.DEVICE_ID<line_sep>cudnn.benchmark=<true><line_sep># This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware.
train_loader,val_loader,num_query,num_classes=make_dataloader(cfg)<line_sep>model=make_model(cfg num_class=num_classes)<line_sep>loss_func,center_criterion=make_loss(cfg num_classes=num_classes)<line_sep>optimizer,optimizer_center=make_optimizer(cfg model center_criterion)<line_sep>scheduler=WarmupMultiStepLR(optimizer cfg.STEPS cfg.GAMMA cfg.WARMUP_FACTOR cfg.WARMUP_EPOCHS cfg.WARMUP_METHOD)<line_sep>do_train(cfg model center_criterion train_loader val_loader optimizer optimizer_center scheduler # modify for using self trained model
loss_func num_query)<block_end> |
<import_stmt>pytest<import_from_stmt>guv.greenio socket<import_from_stmt>guv listen<line_sep>@pytest.fixture(scope='session')<def_stmt>pub_addr <block_start>"""A working public address that is considered always available
"""<line_sep><return>'gnu.org' 80<block_end>@pytest.fixture(scope='session')<def_stmt>fail_addr <block_start>"""An address that nothing is listening on
"""<line_sep><return>'192.0.0.0' 1000<block_end>@pytest.fixture(scope='function')<def_stmt>gsock <block_start><return>socket()<block_end>@pytest.fixture(scope='function')<def_stmt>server_sock <block_start>sock=listen(('' 0))<line_sep><return>sock<block_end> |
<import_stmt>datetime<import_stmt>threading<import_stmt>pymongo<import_from_stmt>apscheduler.jobstores.mongodb MongoDBJobStore<import_from_stmt>apscheduler.schedulers.tornado TornadoScheduler<import_from_stmt>qaenv mongo_ip mongo_port<import_from_stmt>QUANTAXIS.QAWebServer.basehandles QABaseHandler<import_from_stmt>tornado.ioloop IOLoop PeriodicCallback<import_from_stmt>tornado.web Application RequestHandler<line_sep>"""
增加 mongodb 的数据读取
"""<line_sep>scheduler=<none><line_sep>job_ids=[]<line_sep># 初始化
<def_stmt>init_scheduler database='qascheduler' collection='jobs'<block_start>jobstores={'default':MongoDBJobStore(database=database collection=collection client=pymongo.MongoClient(host=mongo_ip port=mongo_port))}<line_sep><global>scheduler<line_sep>scheduler=TornadoScheduler(jobstores=jobstores)<line_sep>scheduler.start()<line_sep>print('[QAScheduler Init]Scheduler has been started')<line_sep><return>scheduler<block_end># 要执行的定时任务在这里
<def_stmt>task1 options<block_start>print('{} [QASchedule][Task]-{}'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') options))<line_sep># print(threading.enumerate())
<block_end><class_stmt>QASchedulerHandler(QABaseHandler)<block_start>"""
http://0.0.0.0:8010/scheduler/map?job_id=1&action=add
"""<def_stmt>get self<block_start><global>job_ids<line_sep>job_id=self.get_query_argument('job_id' <none>)<line_sep>action=self.get_query_argument('action' <none>)<if_stmt>job_id# add
<block_start><if_stmt>'add'<eq>action<block_start><if_stmt>job_id<not><in>job_ids<block_start>job_ids.append(job_id)<line_sep>scheduler.add_job(task1 'interval' seconds=3 id=job_id args=(job_id ))<line_sep>self.write('[TASK ADDED] - {}'.format(job_id))<block_end><else_stmt><block_start>self.write('[TASK EXISTS] - {}'.format(job_id))<block_end><block_end># remove
<elif_stmt>'remove'<eq>action<block_start><if_stmt>job_id<in>job_ids<block_start>scheduler.remove_job(job_id)<line_sep>job_ids.remove(job_id)<line_sep>self.write('[TASK REMOVED] - {}'.format(job_id))<block_end><else_stmt><block_start>self.write('[TASK NOT FOUND] - {}'.format(job_id))<block_end><block_end><block_end><else_stmt><block_start>self.write('[INVALID PARAMS] INVALID job_id or action')<block_end><block_end><block_end><def_stmt>format_joboutput job<block_start><return>{'id':job.id 'name':job.name 'args':job.args 'kwards':job.kwargs 'coalesce':job.coalesce 'nextruntime':str(job.next_run_time)}<block_end><class_stmt>QAScheduleQuery(QABaseHandler)<block_start><def_stmt>get self<block_start>action=self.get_argument('action' 'queryall')<line_sep>print(action)<if_stmt>action<eq>'queryall'<block_start>jobs=scheduler.get_jobs()<line_sep>print([format_joboutput(x)<for>x jobs])<line_sep>self.write({'res':[format_joboutput(x)<for>x jobs]})<block_end><block_end><block_end> |
""" Implementation of Merge Sort algorithm
"""<def_stmt>merge data<block_start>""" MergeSort is a Divide and Conquer algorithm. It divides input array
in two halves, calls itself for the two halves and then merges the
two sorted halves.
:param array: list of elements that needs to be sorted
:type array: list
"""<if_stmt>len(data)<g>1<block_start>mid=len(data)<floordiv>2<line_sep>lefthalf=data[:mid]<line_sep>righthalf=data[mid:]<line_sep>merge(lefthalf)<line_sep>merge(righthalf)<line_sep>i=j=k=0<while_stmt>i<l>len(lefthalf)<and>j<l>len(righthalf)<block_start><if_stmt>lefthalf[i]<l>righthalf[j]<block_start>data[k]=lefthalf[i]<line_sep>i=i+1<block_end><else_stmt><block_start>data[k]=righthalf[j]<line_sep>j=j+1<block_end>k=k+1<block_end><while_stmt>i<l>len(lefthalf)<block_start>data[k]=lefthalf[i]<line_sep>i=i+1<line_sep>k=k+1<block_end><while_stmt>j<l>len(righthalf)<block_start>data[k]=righthalf[j]<line_sep>j=j+1<line_sep>k=k+1<block_end><block_end><block_end><def_stmt>main <block_start>""" operational function """<line_sep>arr=[34 56 23 67 3 68]<line_sep>print(f"unsorted array: {arr}")<line_sep>merge(arr)<line_sep>print(f" sorted array: {arr}")<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
<import_stmt>pytest<import_from_stmt>supriya.patterns CompositeEvent EventPattern GroupAllocateEvent GroupPattern NodeFreeEvent NoteEvent NullEvent ParallelPattern SequencePattern <import_from_stmt>supriya.patterns.testutils MockUUID<as>M<import_from_stmt>supriya.patterns.testutils run_pattern_test<line_sep>@pytest.mark.parametrize("stop_at, patterns, expected, is_infinite" [(<none> [EventPattern(frequency=SequencePattern([440 550 660])) EventPattern(frequency=SequencePattern([777 888 999])) ] [CompositeEvent([NoteEvent(M("A") delta=0.0 frequency=440) NoteEvent(M("B") delta=0.0 frequency=777) ] delta=1.0 ) CompositeEvent([NoteEvent(M("C") delta=0.0 frequency=550) NoteEvent(M("D") delta=0.0 frequency=888) ] delta=1.0 ) CompositeEvent([NoteEvent(M("E") delta=0.0 frequency=660) NoteEvent(M("F") delta=0.0 frequency=999) ] delta=1.0 ) ] <false> ) (<none> [EventPattern(x=SequencePattern([1 2 3]) delta=1.0) EventPattern(y=SequencePattern([1 2]) delta=1.5) ] [CompositeEvent([NoteEvent(M("A") delta=0.0 x=1) NoteEvent(M("B") delta=0.0 y=1) ] delta=1.0 ) NoteEvent(M("C") delta=0.5 x=2) NoteEvent(M("D") delta=0.5 y=2) NoteEvent(M("E") delta=1.0 x=3) ] <false> ) (1 [EventPattern(x=SequencePattern([1 2 3]) delta=1.0) EventPattern(y=SequencePattern([1 2]) delta=1.5) ] [CompositeEvent([NoteEvent(M("A") delta=0.0 x=1) NoteEvent(M("B") delta=0.0 y=1) ] delta=1.0 )] <false> ) (<none> [GroupPattern(EventPattern(x=SequencePattern([1 2 3]) delta=1.0)) GroupPattern(EventPattern(y=SequencePattern([1 2]) delta=1.5)) ] [CompositeEvent([CompositeEvent([GroupAllocateEvent(M("A"))]) NoteEvent(M("B") delta=0.0 target_node=M("A") x=1) CompositeEvent([GroupAllocateEvent(M("C"))]) NoteEvent(M("D") delta=0.0 target_node=M("C") y=1) ] delta=1.0 ) NoteEvent(M("E") delta=0.5 target_node=M("A") x=2) NoteEvent(M("F") delta=0.5 target_node=M("C") y=2) NoteEvent(M("G") delta=1.0 target_node=M("A") x=3) CompositeEvent([CompositeEvent([NullEvent(delta=0.25) NodeFreeEvent(M("A"))]) CompositeEvent([NullEvent(delta=0.25) NodeFreeEvent(M("C"))]) ]) ] <false> ) (1 [GroupPattern(EventPattern(x=SequencePattern([1 2 3]) delta=1.0)) GroupPattern(EventPattern(y=SequencePattern([1 2]) delta=1.5)) ] [CompositeEvent([CompositeEvent([GroupAllocateEvent(M("A"))]) NoteEvent(M("B") delta=0.0 target_node=M("A") x=1) CompositeEvent([GroupAllocateEvent(M("C"))]) NoteEvent(M("D") delta=0.0 target_node=M("C") y=1) ] delta=1.0 ) CompositeEvent([NullEvent(delta=0.25) NodeFreeEvent(M("A"))] delta=0.5) CompositeEvent([NullEvent(delta=0.25) NodeFreeEvent(M("C"))]) ] <false> ) ] )<def_stmt>test stop_at patterns expected is_infinite<block_start>pattern=ParallelPattern(patterns)<line_sep>run_pattern_test(pattern expected is_infinite stop_at)<block_end> |
<import_from_stmt>.indices Indices Aggregation<import_from_stmt>.base_expression ExpressionException Expression impute_type to_expr cast_expr unify_all unify_types_limited unify_types unify_exprs<import_from_stmt>.typed_expressions ArrayExpression ArrayStructExpression ArrayNumericExpression BooleanExpression CallExpression CollectionExpression DictExpression IntervalExpression LocusExpression NumericExpression Int32Expression Int64Expression Float32Expression Float64Expression SetExpression StringExpression StructExpression TupleExpression NDArrayExpression NDArrayNumericExpression SetStructExpression apply_expr construct_expr construct_variable construct_reference<import_from_stmt>.expression_typecheck expr_any expr_int32 expr_int64 expr_float32 expr_float64 expr_call expr_bool expr_str expr_locus expr_interval expr_array expr_ndarray expr_set expr_dict expr_tuple expr_struct expr_oneof expr_numeric coercer_from_dtype<import_from_stmt>.expression_utils analyze eval_timed eval eval_typed extract_refs_by_indices get_refs matrix_table_source table_source check_entry_indexed check_row_indexed<line_sep>__all__=['Indices' 'Aggregation' 'apply_expr' 'construct_expr' 'construct_variable' 'construct_reference' 'impute_type' 'to_expr' 'cast_expr' 'unify_all' 'unify_types_limited' 'unify_types' 'unify_exprs' 'Expression' 'ExpressionException' 'ArrayExpression' 'ArrayStructExpression' 'ArrayNumericExpression' 'BooleanExpression' 'CallExpression' 'CollectionExpression' 'DictExpression' 'IntervalExpression' 'LocusExpression' 'NumericExpression' 'Int32Expression' 'Int64Expression' 'Float32Expression' 'Float64Expression' 'SetExpression' 'SetStructExpression' 'StringExpression' 'StructExpression' 'TupleExpression' 'NDArrayExpression' 'NDArrayNumericExpression' 'analyze' 'check_entry_indexed' 'check_row_indexed' 'get_refs' 'extract_refs_by_indices' 'eval' 'eval_typed' 'eval_timed' 'expr_any' 'expr_int32' 'expr_int64' 'expr_float32' 'expr_float64' 'expr_bool' 'expr_str' 'expr_call' 'expr_locus' 'expr_struct' 'expr_numeric' 'expr_array' 'expr_ndarray' 'expr_set' 'expr_dict' 'expr_struct' 'expr_tuple' 'expr_interval' 'expr_oneof' 'coercer_from_dtype' 'matrix_table_source' 'table_source']<line_sep> |
<import_stmt>os<import_stmt>time<import_stmt>errno<import_stmt>idiokit<import_from_stmt>abusehelper.core events bot utils<def_stmt>read fd amount=4096<block_start><try_stmt><block_start>data=os.read(fd amount)<block_end><except_stmt>OSError<as>ose<block_start><if_stmt>ose.args[0]<ne>errno.EAGAIN<block_start><raise><block_end>data=""<block_end><return>data<block_end><def_stmt>try_seek fd offset<block_start><try_stmt><block_start><if_stmt>offset<is><none><block_start>os.lseek(fd 0 os.SEEK_END)<block_end><elif_stmt>offset<ge>0<block_start>os.lseek(fd offset os.SEEK_SET)<block_end><else_stmt><block_start>os.lseek(fd offset os.SEEK_END)<block_end><block_end><except_stmt>OSError<as>ose<block_start><if_stmt>ose.args[0]<ne>errno.ESPIPE<block_start><raise><block_end><block_end><block_end><def_stmt>follow_file filename<block_start><while_stmt><true><block_start><try_stmt><block_start>fd=os.open(filename os.O_RDONLY|os.O_NONBLOCK)<block_end><except_stmt>OSError<block_start><yield><none><line_sep><continue><block_end><try_stmt><block_start>inode=os.fstat(fd).st_ino<line_sep>first=<true><while_stmt><true><block_start><try_stmt><block_start>stat=os.stat(filename)<block_end><except_stmt>OSError<block_start>stat=<none><block_end><yield>first time.time() fd<if_stmt>stat<is><none><or>inode<ne>stat.st_ino<block_start><break><block_end>first=<false><block_end><block_end><finally_stmt><block_start>os.close(fd)<block_end><block_end><block_end><def_stmt>tail_file filename offset=<none><block_start>first=<true><line_sep>buffer=[]<for_stmt>result follow_file(filename)<block_start><if_stmt>first<and>result<is><not><none><block_start>_,_,fd=result<line_sep>try_seek(fd offset)<block_end>first=<false><if_stmt>result<is><none><block_start><yield><none><line_sep><continue><block_end>flush,mtime,fd=result<if_stmt>flush<and>buffer<block_start>buffer=[]<block_end><while_stmt><true><block_start>data=read(fd)<if_stmt><not>data<block_start><break><block_end>lines=data.split("\n")<if_stmt>len(lines)<le>1<block_start>buffer.extend(lines)<line_sep><continue><block_end>lines[0]="".join(buffer)+lines[0]<for_stmt>line lines[:-1]<block_start><if_stmt>line.endswith("\r")<block_start>line=line[:-1]<block_end><yield>mtime line<block_end><if_stmt><not>lines[-1]<block_start>buffer=[]<block_end><else_stmt><block_start>buffer=lines[-1:]<block_end><block_end><yield><none><block_end><block_end><class_stmt>TailBot(bot.FeedBot)<block_start>path=bot.Param("path to the followed file")<line_sep>offset=bot.IntParam("file offset" default=<none>)<line_sep>@idiokit.stream<def_stmt>feed self<block_start><for_stmt>result tail_file(self.path self.offset)<block_start><if_stmt>result<is><none><block_start><yield>idiokit.sleep(2.0)<line_sep><continue><block_end>mtime,line=result<line_sep>keys=self.parse(line mtime)<if_stmt>keys<is><none><block_start><continue><block_end>event=events.Event()<for_stmt>key,value keys.items()<block_start>event.add(key value)<block_end><yield>idiokit.send(event)<block_end><block_end><def_stmt>parse self line mtime<block_start>line=line.rstrip()<if_stmt><not>line<block_start><return><block_end>line=utils.force_decode(line)<line_sep><return>{"line":line}<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>TailBot.from_command_line().execute()<block_end> |
<import_from_stmt>. data<import_from_stmt>. pipeline<import_from_stmt>. visualization<import_from_stmt>. base<line_sep> |
<import_from_stmt>. losses<import_from_stmt>.trainer ForwardTrainer ConditionalForwardTrainer AdversarialTrainer ConditionalAdversarialTrainer SCANDALForwardTrainer<import_from_stmt>.alternate AlternatingTrainer<line_sep> |
<import_stmt>os<import_stmt>sys<import_from_stmt>pathlib Path<import_from_stmt>grapl_common.debugger.vsc_debugger wait_for_vsc_debugger<import_from_stmt>grapl_common.grapl_logger get_module_grapl_logger<line_sep>LOGGER=get_module_grapl_logger()<def_stmt>main <arrow><none><block_start>wait_for_vsc_debugger("grapl_e2e_tests")<line_sep>LOGGER.info("executing pytest")<import_from_stmt>grapl_tests_common setup_tests# import here to limit monkeypatch
# Change to the parent directory so pytest can find the tests
os.chdir(Path(__file__).resolve().parent)<line_sep>result=setup_tests.exec_pytest()<line_sep>LOGGER.info(f"tests completed with status code {result}")<line_sep>sys.exit(result)<block_end> |
<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_from_future_stmt> absolute_import<import_from_stmt>...command SubCommand<import_from_stmt>...console Cell<import_from_stmt>...wsgi WSGIApplication<import_from_stmt>... namespaces<import_from_stmt>... db<try_stmt><block_start><import_stmt>readline<block_end><except_stmt>ImportError<block_start><pass><block_end><class_stmt>Init(SubCommand)<block_start>"""initialize a site for first use"""<line_sep>help="""initialize a site for first use"""<def_stmt>add_arguments self parser<block_start>parser.add_argument("-l" "--location" dest="location" default=<none> metavar="PATH" help="location of the Moya server code" )<line_sep>parser.add_argument("-i" "--ini" dest="settings" default=<none> metavar="SETTINGSPATH" help="path to project settings" )<block_end><def_stmt>run self<block_start>args=self.args<line_sep>console=self.console<line_sep>application=WSGIApplication(self.location self.get_settings() validate_db=<true> disable_autoreload=<true> master_settings=self.master_settings )<line_sep>archive=application.archive<line_sep>self.console.div("syncing database")<line_sep>db.sync_all(archive self.console summary=<false>)<line_sep>commands=[command<for>command archive.get_elements_by_type(namespaces.default "command")<if>command._init]<line_sep>commands.sort(key=<lambda>c:c._priority reverse=<true>)<line_sep>fail=<none><for_stmt>command commands<block_start><if_stmt>fail<block_start><break><block_end><for_stmt>app_name archive.apps_by_lib[command.lib.long_name]<block_start>app=archive.apps[app_name]<line_sep>app_id=command.get_appid(app=app)<line_sep># console.div("running 'moya {}'".format(app_id))
console.div(command._synopsis)<line_sep># console.text(command._synopsis, italic=True)
result=self.moya_command.project_invoke(app_id application=application root_vars={"init":<true>})<if_stmt>result<ne>0<block_start>fail=result<line_sep><break><block_end><block_end><block_end>console.nl()<if_stmt><not>fail<block_start>msg="""Site is ready for use!\nRun "moya runserver" from the project directory."""<line_sep># console.text(msg, fg="green", bold=True)
console.table([[Cell(msg fg="green" bold=<true>)]])<block_end><else_stmt><block_start>msg="""A command failed to complete -- check above for any error messages."""<line_sep>console.table([[Cell(msg fg="red" bold=<true>)]])<block_end><block_end><block_end> |
# -*- coding: utf-8 -*-
"""policies.py: Adaption of baselines.ppo2.policies.py for RUDDER for atari games
Author -- <NAME>
Contact -- <EMAIL>
"""<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>baselines.a2c.utils conv fc conv_to_fc batch_to_seq seq_to_batch<import_from_stmt>baselines.common.distributions make_pdtype<import_from_stmt>baselines.ppo2_rudder.reward_redistribution RewardRedistributionModel observation_network<import_from_stmt>TeLL.layers StopGradientLayer<def_stmt>nature_cnn unscaled_images<block_start>"""Convolutional parts of CNN from Nature paper
Taken from baselines.ppo2.policies.py
Parameters
-------
unscaled_images : tensorflow tensor
Frame of shape (batchsize, x, y, c)
Returns
-------
tensorflow tensor
Output features of last convolutional layer with flattened x/y/c dimensions
"""<line_sep>scaled_images=tf.cast(unscaled_images tf.float32)/255.<line_sep>activ=tf.nn.relu<line_sep>h=activ(conv(scaled_images 'c1' nf=32 rf=8 stride=4 init_scale=np.sqrt(2)))<line_sep>h2=activ(conv(h 'c2' nf=64 rf=4 stride=2 init_scale=np.sqrt(2)))<line_sep>h3=activ(conv(h2 'c3' nf=64 rf=3 stride=1 init_scale=np.sqrt(2)))<line_sep>h3=conv_to_fc(h3)<line_sep><return>h3<block_end><def_stmt>lstm xs ms s scope nh<block_start>"""LSTM layer for policy network, using same weight and bias initialization as LSTM in reward redistribution model
Based on baselines.ppo2.policies.py; These initializations were taken directly from the redistribution model LSTM
and could be optimized;
"""<line_sep>nbatch,nin=[v.value<for>v xs[0].get_shape()]<line_sep>lstm_w_init=<lambda>scale:<lambda>*args **kwargs:tf.truncated_normal(*args **kwargs)<times>scale<line_sep>truncated_normal_init=<lambda>mean stddev:<lambda>*args **kwargs:tf.truncated_normal(mean=mean stddev=stddev *args **kwargs)<with_stmt>tf.variable_scope(scope reuse=tf.AUTO_REUSE)<block_start>wx_ig=tf.get_variable("wx_ig" initializer=lstm_w_init(0.1)([nin nh]))<line_sep>wx_og=tf.get_variable("wx_og" initializer=lstm_w_init(0.1)([nin nh]))<line_sep>wx_ci=tf.get_variable("wx_ci" initializer=lstm_w_init(0.0001)([nin nh]))<line_sep>wx_fg=tf.get_variable("wx_fg" initializer=lstm_w_init(0.1)([nin nh]))<line_sep>wh_ig=tf.get_variable("wh_ig" initializer=lstm_w_init(0.001)([nh nh]))<line_sep>wh_og=tf.get_variable("wh_og" initializer=lstm_w_init(0.001)([nh nh]))<line_sep>wh_ci=tf.get_variable("wh_ci" initializer=lstm_w_init(0.001)([nh nh]))<line_sep>wh_fg=tf.get_variable("wh_fg" initializer=lstm_w_init(0.001)([nh nh]))<line_sep>b_ig=tf.get_variable("b_ig" initializer=truncated_normal_init(mean=-5 stddev=0.1)([nh]))<line_sep>b_fg=tf.get_variable("b_fg" initializer=truncated_normal_init(mean=12 stddev=0.1)([nh]))<line_sep>b_og=tf.get_variable("b_og" initializer=truncated_normal_init(mean=-5 stddev=0.1)([nh]))<line_sep>b_ci=tf.get_variable("b_ci" initializer=truncated_normal_init(mean=0 stddev=0.1)([nh]))<line_sep>wx=tf.concat([wx_ig wx_fg wx_og wx_ci] axis=1)<line_sep>wh=tf.concat([wh_ig wh_fg wh_og wh_ci] axis=1)<line_sep>b=tf.concat([b_ig b_fg b_og b_ci] axis=0)<block_end>c,h=tf.split(axis=1 num_or_size_splits=2 value=s)<for_stmt>idx,(x m) enumerate(zip(xs ms))<block_start>c<augmul>(1-m)<line_sep>h<augmul>(1-m)<line_sep>z=tf.matmul(x wx)+tf.matmul(h wh)+b<line_sep>i,f,o,u=tf.split(axis=1 num_or_size_splits=4 value=z)<line_sep>i=tf.nn.sigmoid(i)<line_sep>f=tf.nn.sigmoid(f)<line_sep>o=tf.nn.sigmoid(o)<line_sep>u=tf.tanh(u)<line_sep>c=f<times>c+i<times>u<line_sep>h=o<times>tf.identity(c)<line_sep>xs[idx]=h<block_end>s=tf.concat(axis=1 values=[c h])<line_sep><return>xs s<block_end><class_stmt>LstmPolicy(object)<block_start><def_stmt>__init__ self tf_session ob_space ac_space nbatch reward_redistribution_config observation_network_config lstm_network_config training_config exploration_config nsteps nlstm=64 reuse=<false><block_start>"""LSTM policy network, as described in RUDDER paper
Based on baselines.ppo2.policies.py; LSTM layer sees features from it's own trainable observation network and
the features from the reward redistribution observation network;
Parameters
-------
tf_session : tensorflow session
tensorflow session to compute the graph in
ob_space
Baselines ob_space object (see ppo2_rudder.py); must provide .shape attribute for (x, y, c) shapes;
ac_space
Baselines ac_space object (see ppo2_rudder.py); must provide .n attribute for number of possible actions;
nbatch : int
Batchsize
nsteps : int
Fixed number of timesteps to process at once
reward_redistribution_config : dict
Dictionary containing config for reward redistribution:
-----
lambda_eligibility_trace : float
Eligibility trace value for redistributed reward
vf_contrib : float
Weighting of original value function (vf) vs. redistributed reward (rr), s.t.
:math:`reward = vf \cdot vf\_contrib + rr \cdot (1-vf\_contrib)`
use_reward_redistribution_quality_threshold : float
Quality of reward redistribution has to exceed use_reward_redistribution_quality_threshold to be used;
use_reward_redistribution_quality_threshold range is [0,1]; Quality measure is the squared prediction
error, as described in RUDDER paper;
use_reward_redistribution : bool
Use reward redistribution?
rr_junksize : int
Junksize for reward redistribution; Junks overlap by 1 half each
cont_pred_w : float
Weighting of continous prediciton loss vs. prediction loss of final return at last timestep
intgrd_steps : int
Stepsize for integrated gradients
intgrd_batchsize : int
Integrated gradients is computed batch-wise if intgrd_batchsize > 1
observation_network_config : dict
Dictionary containing config for observation network that processes observations and feeds them to LSTM
network:
-----
show_states : bool
Show frames to network?
show_statedeltas : bool
Show frame deltas to network?
prepoc_states : list of dicts
Network config to preprocess frames
prepoc_deltas : list of dicts
Network config to preprocess frame deltas
prepoc_observations : list of dicts
Network config to preprocess features from frame and frame-delta preprocessing networks
lstm_network_config : dict
Dictionary containing config for LSTM network:
-----
show_actions : bool
Show taken actions to LSTM?
reversed : bool
Process game sequence in reversed order?
layers : list of dicts
Network config for LSTM network and optional additional dense layers
initializations : dict
Initialization config for LSTM network
timestep_encoding : dict
Set "max_value" and "triangle_span" for TeLL.utiltiy.misc_tensorflow.TriangularValueEncoding class
training_config : dict
Dictionary containing config for training and update procedure:
-----
n_no_rr_updates : int
Number of updates to perform without training or using reward redistribution network
n_pretrain_games : int
Number of games to pretrain the reward redistribution network without using it;
downscale_lr_policylag : bool
Downscale learningrate permanently if policy lag gets too large?
optimizer : tf.train optimizer
Optimizer in tf.train, e.g. "AdamOptimizer"
optimizer_params : dict
Kwargs for optimizer
l1 : float
Weighting for l1 weight regularization
l2 : float
Weighting for l2 weight regularization
clip_gradients : float
Threshold for clipping gradients (clipping by norm)
exploration_config : dict
Dictionary containing config for exploration:
-----
sample_actions_from_softmax : bool
True: Apply softmax to policy network output and use it as probabilities to pick an action
False: Use the max. policy network output as action
temporal_safe_exploration : bool
User RUDDER safe exploration
save_pi_threshold : float
Threshold value in range [0,1] for safe actions in RUDDER safe exploration
nlstm : int
Number of LSTM units (=memory cells)
reuse : bool
Reuse tensorflow variables?
"""<line_sep>#
# Shapes
#
nenv=nbatch<floordiv>nsteps<line_sep>nh,nw,nc=ob_space.shape<line_sep>ob_shape=(nbatch nh nw nc)<line_sep>seq_ob_shape=(nenv -1 nh nw 1)<line_sep>nact=ac_space.n<line_sep>#
# Placeholders for inputs
#
X=tf.placeholder(tf.uint8 ob_shape)#obs
M=tf.placeholder(tf.float32 [nbatch])#mask (done t-1)
S=tf.placeholder(tf.float32 [nenv nlstm<times>2])#states
#
# Prepare input
#
single_frames=tf.cast(tf.reshape(X[<ellipsis> -1:] shape=seq_ob_shape) dtype=tf.float32)<line_sep>delta_frames=single_frames-tf.cast(tf.reshape(X[<ellipsis> -2:-1] shape=seq_ob_shape) dtype=tf.float32)<line_sep>#
# Get observation features from RR model
#
rr_model=RewardRedistributionModel(reward_redistribution_config=reward_redistribution_config observation_network_config=observation_network_config lstm_network_config=lstm_network_config training_config=training_config scopename="RR")<line_sep>self.rr_observation_model=rr_model<line_sep>rr_observation_layer=rr_model.get_visual_features(single_frame=single_frames delta_frame=delta_frames additional_inputs=[])<line_sep>#
# Build policy network
#
<with_stmt>tf.variable_scope("model" reuse=reuse)<block_start>temperature=tf.get_variable(initializer=tf.constant(1 dtype=tf.float32) trainable=<false> name='temperature')<line_sep>additional_inputs=[StopGradientLayer(rr_observation_layer)]<line_sep>observation_layers,observation_features=observation_network(single_frame=single_frames delta_frame=delta_frames additional_inputs=additional_inputs observation_network_config=observation_network_config)<line_sep>self.observation_features_shape=observation_features.get_output_shape()<line_sep>xs=[tf.squeeze(v [1])<for>v tf.split(axis=1 num_or_size_splits=nsteps value=tf.reshape(observation_layers[-1].get_output() [nenv nsteps -1]))]<line_sep>ms=batch_to_seq(M nenv nsteps)<line_sep>h5,snew=lstm(xs ms S 'lstm1' nh=nlstm)<line_sep>h5=seq_to_batch(h5)<line_sep>h6=h5<line_sep>pi=fc(h6 'pi' nact)<line_sep>vf=fc(h6 'v' 1)<block_end>self.pdtype=make_pdtype(ac_space)<line_sep>self.pd=self.pdtype.pdfromflat(pi)<if_stmt>exploration_config['sample_actions_from_softmax']<block_start>a0=self.pd.sample_temp(temperature=temperature)<block_end><else_stmt><block_start>a0=tf.argmax(pi axis=-1)<block_end>v0=vf[: 0]<line_sep>neglogp0=self.pd.neglogp(a0)<line_sep>self.initial_state=np.zeros((nenv nlstm<times>2) dtype=np.float32)<def_stmt>step ob state mask<block_start>a,v,s,neglogp=tf_session.run([a0 v0 snew neglogp0] {X:ob S:state M:mask})<line_sep><return>a v s neglogp<block_end><def_stmt>value ob state mask<block_start><return>tf_session.run(v0 {X:ob S:state M:mask})<block_end><def_stmt>action ob state mask *_args **_kwargs<block_start>a,s,neglogp=tf_session.run([a0 snew neglogp0] {X:ob S:state M:mask})<line_sep><return>a s neglogp<block_end>#
# Placeholders for exploration
#
n_envs=pi.shape.as_list()[0]<line_sep>exploration_timesteps_pl=tf.placeholder(dtype=tf.float32 shape=(n_envs ))<line_sep>prev_actions_pl=tf.placeholder(dtype=tf.int64 shape=(n_envs ))<line_sep>gamelengths_pl=tf.placeholder(dtype=tf.float32 shape=(n_envs ))<line_sep>keep_prev_action_pl=tf.placeholder(dtype=tf.bool shape=(n_envs ))<line_sep>prev_action_count_pl=tf.placeholder(dtype=tf.int64 shape=(n_envs ))<line_sep>exploration_durations_pl=tf.placeholder(dtype=tf.float32 shape=(n_envs ))<line_sep>#
# Setting up safe exploration
#
explore=tf.logical_and(tf.logical_and(tf.less_equal(exploration_timesteps_pl gamelengths_pl) tf.less_equal(gamelengths_pl exploration_timesteps_pl+exploration_durations_pl)) tf.not_equal(exploration_timesteps_pl tf.constant(-1 dtype=tf.float32)))<line_sep>safe_pi=pi-tf.reduce_min(pi axis=-1 keep_dims=<true>)<line_sep>safe_pi<augdiv>tf.reduce_max(safe_pi axis=-1 keep_dims=<true>)<line_sep>save_pi_thresholds=(1-(tf.expand_dims(tf.range(n_envs dtype=tf.float32) axis=1)/(n_envs+(n_envs<eq>1)-1))<times>(1-exploration_config['save_pi_threshold']))<line_sep>safe_pi=tf.cast(tf.greater_equal(safe_pi save_pi_thresholds) dtype=tf.float32)<line_sep>safe_pi<augdiv>tf.reduce_sum(safe_pi)<line_sep>rand_safe_a=tf.multinomial(safe_pi 1)[: 0]<line_sep>safe_pi_flat=tf.reshape(safe_pi (-1 ))<line_sep>prev_action_is_safe=tf.gather(safe_pi_flat prev_actions_pl+tf.range(safe_pi.shape.as_list()[0] dtype=tf.int64)<times>safe_pi.shape.as_list()[1])<line_sep>prev_action_is_safe=tf.greater(prev_action_is_safe tf.constant(0 dtype=tf.float32))<line_sep>a_explore=tf.where(tf.logical_and(tf.logical_and(keep_prev_action_pl tf.not_equal(gamelengths_pl exploration_timesteps_pl)) prev_action_is_safe) prev_actions_pl rand_safe_a)<line_sep>a_explore=tf.where(explore a_explore a0)<line_sep># Make sure the actor doesn't repeat an action too often (otherwise screensaver might start)
rand_a=tf.random_uniform(shape=a0.get_shape() minval=0 maxval=ac_space.n dtype=a0.dtype)<line_sep>a_explore=tf.where(tf.greater(prev_action_count_pl tf.constant(20 dtype=tf.int64)) rand_a a_explore)<if_stmt><not>exploration_config['temporal_safe_exploration']<block_start>a_explore=a0<block_end>neglogp_explore=self.pd.neglogp(a_explore)<def_stmt>action_exploration ob state mask *_args exploration_timesteps prev_actions gamelengths keep_prev_action prev_action_count exploration_durations **_kwargs<block_start>"""Get actions with exploration for long-term reward"""<line_sep>a,s,neglogp=tf_session.run([a_explore snew neglogp_explore] {X:ob S:state M:mask exploration_timesteps_pl:exploration_timesteps prev_actions_pl:prev_actions gamelengths_pl:gamelengths exploration_durations_pl:exploration_durations keep_prev_action_pl:keep_prev_action prev_action_count_pl:prev_action_count})<line_sep><return>a s neglogp<block_end>self.X=X<line_sep>self.M=M<line_sep>self.S=S<line_sep>self.pi=pi<line_sep>self.vf=vf<line_sep>self.step=step<line_sep>self.value=value<line_sep>self.action=action<line_sep>self.action_exploration=action_exploration<line_sep>self.seq_ob_shape=seq_ob_shape<line_sep>self.exploration_config=exploration_config<block_end><def_stmt>get_observation_features self frame delta<block_start>"""Get output features of observation network (to be fed into reward redistribution network)"""<with_stmt>tf.variable_scope("model" reuse=tf.AUTO_REUSE)<block_start>_,observation_features=observation_network(single_frame=frame[<ellipsis> -1:] delta_frame=delta additional_inputs=[] observation_network_config=self.exploration_config['observation_network_config'])<line_sep>observation_features=observation_features.get_output()<block_end><return>observation_features<block_end><block_end><class_stmt>LstmPolicyDense(object)<block_start><def_stmt>__init__ self tf_session ob_space ac_space nbatch reward_redistribution_config observation_network_config lstm_network_config training_config exploration_config nsteps nlstm=64 reuse=<false><block_start>"""LSTM policy network with additional dense layer after LSTM layer, as described in RUDDER paper
Based on baselines.ppo2.policies.py; LSTM layer sees features from it's own trainable observation network and
the features from the reward redistribution observation network; The additional dense layer after the LSTM
layer contains 128 hidden units;
Parameters
-------
tf_session : tensorflow session
tensorflow session to compute the graph in
ob_space
Baselines ob_space object (see ppo2_rudder.py); must provide .shape attribute for (x, y, c) shapes;
ac_space
Baselines ac_space object (see ppo2_rudder.py); must provide .n attribute for number of possible actions;
nbatch : int
Batchsize
nsteps : int
Fixed number of timesteps to process at once
reward_redistribution_config : dict
Dictionary containing config for reward redistribution:
-----
lambda_eligibility_trace : float
Eligibility trace value for redistributed reward
vf_contrib : float
Weighting of original value function (vf) vs. redistributed reward (rr), s.t.
:math:`reward = vf \cdot vf\_contrib + rr \cdot (1-vf\_contrib)`
use_reward_redistribution_quality_threshold : float
Quality of reward redistribution has to exceed use_reward_redistribution_quality_threshold to be used;
use_reward_redistribution_quality_threshold range is [0,1]; Quality measure is the squared prediction
error, as described in RUDDER paper;
use_reward_redistribution : bool
Use reward redistribution?
rr_junksize : int
Junksize for reward redistribution; Junks overlap by 1 half each
cont_pred_w : float
Weighting of continous prediciton loss vs. prediction loss of final return at last timestep
intgrd_steps : int
Stepsize for integrated gradients
intgrd_batchsize : int
Integrated gradients is computed batch-wise if intgrd_batchsize > 1
observation_network_config : dict
Dictionary containing config for observation network that processes observations and feeds them to LSTM
network:
-----
show_states : bool
Show frames to network?
show_statedeltas : bool
Show frame deltas to network?
prepoc_states : list of dicts
Network config to preprocess frames
prepoc_deltas : list of dicts
Network config to preprocess frame deltas
prepoc_observations : list of dicts
Network config to preprocess features from frame and frame-delta preprocessing networks
lstm_network_config : dict
Dictionary containing config for LSTM network:
-----
show_actions : bool
Show taken actions to LSTM?
reversed : bool
Process game sequence in reversed order?
layers : list of dicts
Network config for LSTM network and optional additional dense layers
initializations : dict
Initialization config for LSTM network
timestep_encoding : dict
Set "max_value" and "triangle_span" for TeLL.utiltiy.misc_tensorflow.TriangularValueEncoding class
training_config : dict
Dictionary containing config for training and update procedure:
-----
n_no_rr_updates : int
Number of updates to perform without training or using reward redistribution network
n_pretrain_games : int
Number of games to pretrain the reward redistribution network without using it;
downscale_lr_policylag : bool
Downscale learningrate permanently if policy lag gets too large?
optimizer : tf.train optimizer
Optimizer in tf.train, e.g. "AdamOptimizer"
optimizer_params : dict
Kwargs for optimizer
l1 : float
Weighting for l1 weight regularization
l2 : float
Weighting for l2 weight regularization
clip_gradients : float
Threshold for clipping gradients (clipping by norm)
exploration_config : dict
Dictionary containing config for exploration:
-----
sample_actions_from_softmax : bool
True: Apply softmax to policy network output and use it as probabilities to pick an action
False: Use the max. policy network output as action
temporal_safe_exploration : bool
User RUDDER safe exploration
save_pi_threshold : float
Threshold value in range [0,1] for safe actions in RUDDER safe exploration
nlstm : int
Number of LSTM units (=memory cells)
reuse : bool
Reuse tensorflow variables?
"""<line_sep>#
# Shapes
#
nenv=nbatch<floordiv>nsteps<line_sep>nh,nw,nc=ob_space.shape<line_sep>ob_shape=(nbatch nh nw nc)<line_sep>seq_ob_shape=(nenv -1 nh nw 1)<line_sep>nact=ac_space.n<line_sep>#
# Placeholders
#
X=tf.placeholder(tf.uint8 ob_shape)#obs
M=tf.placeholder(tf.float32 [nbatch])#mask (done t-1)
S=tf.placeholder(tf.float32 [nenv nlstm<times>2])#states
#
# Prepare input
#
single_frames=tf.cast(tf.reshape(X[<ellipsis> -1:] shape=seq_ob_shape) dtype=tf.float32)<line_sep>delta_frames=single_frames-tf.cast(tf.reshape(X[<ellipsis> -2:-1] shape=seq_ob_shape) dtype=tf.float32)<line_sep>#
# Get observation features from RR model
#
rr_model=RewardRedistributionModel(reward_redistribution_config=reward_redistribution_config observation_network_config=observation_network_config lstm_network_config=lstm_network_config training_config=training_config scopename="RR")<line_sep>self.rr_observation_model=rr_model<line_sep>rr_observation_layer=rr_model.get_visual_features(single_frame=single_frames delta_frame=delta_frames additional_inputs=[])<line_sep>#
# Build policy network
#
<with_stmt>tf.variable_scope("model" reuse=reuse)<block_start>temperature=tf.get_variable(initializer=tf.constant(1 dtype=tf.float32) trainable=<false> name='temperature')<line_sep>additional_inputs=[StopGradientLayer(rr_observation_layer)]<line_sep>observation_layers,observation_features=observation_network(single_frame=single_frames delta_frame=delta_frames additional_inputs=additional_inputs observation_network_config=observation_network_config)<line_sep>self.observation_features_shape=observation_features.get_output_shape()<line_sep>xs=[tf.squeeze(v [1])<for>v tf.split(axis=1 num_or_size_splits=nsteps value=tf.reshape(observation_layers[-1].get_output() [nenv nsteps -1]))]<line_sep>ms=batch_to_seq(M nenv nsteps)<line_sep>h5,snew=lstm(xs ms S 'lstm1' nh=nlstm)<line_sep>h5=seq_to_batch(h5)<line_sep>h6=fc(h5 'fc1' nh=128 init_scale=np.sqrt(2))<line_sep>pi=fc(h6 'pi' nact)<line_sep>vf=fc(h6 'v' 1)<block_end>self.pdtype=make_pdtype(ac_space)<line_sep>self.pd=self.pdtype.pdfromflat(pi)<if_stmt>exploration_config['sample_actions_from_softmax']<block_start>a0=self.pd.sample_temp(temperature=temperature)<block_end><else_stmt><block_start>a0=tf.argmax(pi axis=-1)<block_end>v0=vf[: 0]<line_sep>neglogp0=self.pd.neglogp(a0)<line_sep>self.initial_state=np.zeros((nenv nlstm<times>2) dtype=np.float32)<def_stmt>step ob state mask<block_start>a,v,s,neglogp=tf_session.run([a0 v0 snew neglogp0] {X:ob S:state M:mask})<line_sep><return>a v s neglogp<block_end><def_stmt>value ob state mask<block_start><return>tf_session.run(v0 {X:ob S:state M:mask})<block_end><def_stmt>action ob state mask *_args **_kwargs<block_start>a,s,neglogp=tf_session.run([a0 snew neglogp0] {X:ob S:state M:mask})<line_sep><return>a s neglogp<block_end>#
# Placeholders for exploration
#
n_envs=pi.shape.as_list()[0]<line_sep>exploration_timesteps_pl=tf.placeholder(dtype=tf.float32 shape=(n_envs ))<line_sep>prev_actions_pl=tf.placeholder(dtype=tf.int64 shape=(n_envs ))<line_sep>gamelengths_pl=tf.placeholder(dtype=tf.float32 shape=(n_envs ))<line_sep>keep_prev_action_pl=tf.placeholder(dtype=tf.bool shape=(n_envs ))<line_sep>prev_action_count_pl=tf.placeholder(dtype=tf.int64 shape=(n_envs ))<line_sep>exploration_durations_pl=tf.placeholder(dtype=tf.float32 shape=(n_envs ))<line_sep>#
# Setting up safe exploration
#
explore=tf.logical_and(tf.logical_and(tf.less_equal(exploration_timesteps_pl gamelengths_pl) tf.less_equal(gamelengths_pl exploration_timesteps_pl+exploration_durations_pl)) tf.not_equal(exploration_timesteps_pl tf.constant(-1 dtype=tf.float32)))<line_sep>safe_pi=pi-tf.reduce_min(pi axis=-1 keep_dims=<true>)<line_sep>safe_pi<augdiv>tf.reduce_max(safe_pi axis=-1 keep_dims=<true>)<line_sep>save_pi_thresholds=(1-(tf.expand_dims(tf.range(n_envs dtype=tf.float32) axis=1)/(n_envs+(n_envs<eq>1)-1))<times>(1-exploration_config['save_pi_threshold']))<line_sep>safe_pi=tf.cast(tf.greater_equal(safe_pi save_pi_thresholds) dtype=tf.float32)<line_sep>safe_pi<augdiv>tf.reduce_sum(safe_pi)<line_sep>rand_safe_a=tf.multinomial(safe_pi 1)[: 0]<line_sep>safe_pi_flat=tf.reshape(safe_pi (-1 ))<line_sep>prev_action_is_safe=tf.gather(safe_pi_flat prev_actions_pl+tf.range(safe_pi.shape.as_list()[0] dtype=tf.int64)<times>safe_pi.shape.as_list()[1])<line_sep>prev_action_is_safe=tf.greater(prev_action_is_safe tf.constant(0 dtype=tf.float32))<line_sep>a_explore=tf.where(tf.logical_and(tf.logical_and(keep_prev_action_pl tf.not_equal(gamelengths_pl exploration_timesteps_pl)) prev_action_is_safe) prev_actions_pl rand_safe_a)<line_sep>a_explore=tf.where(explore a_explore a0)<line_sep># Make sure the actor doesn't repeat an action too often (otherwise screensaver might start)
rand_a=tf.random_uniform(shape=a0.get_shape() minval=0 maxval=ac_space.n dtype=a0.dtype)<line_sep>a_explore=tf.where(tf.greater(prev_action_count_pl tf.constant(20 dtype=tf.int64)) rand_a a_explore)<if_stmt><not>exploration_config['temporal_safe_exploration']<block_start>a_explore=a0<block_end>neglogp_explore=self.pd.neglogp(a_explore)<def_stmt>action_exploration ob state mask *_args exploration_timesteps prev_actions gamelengths keep_prev_action prev_action_count exploration_durations **_kwargs<block_start>"""Get actions with exploration for long-term reward"""<line_sep>a,s,neglogp=tf_session.run([a_explore snew neglogp_explore] {X:ob S:state M:mask exploration_timesteps_pl:exploration_timesteps prev_actions_pl:prev_actions gamelengths_pl:gamelengths exploration_durations_pl:exploration_durations keep_prev_action_pl:keep_prev_action prev_action_count_pl:prev_action_count})<line_sep><return>a s neglogp<block_end>self.X=X<line_sep>self.M=M<line_sep>self.S=S<line_sep>self.pi=pi<line_sep>self.vf=vf<line_sep>self.step=step<line_sep>self.value=value<line_sep>self.action=action<line_sep>self.action_exploration=action_exploration<line_sep>self.seq_ob_shape=seq_ob_shape<line_sep>self.exploration_config=exploration_config<block_end><def_stmt>get_observation_features self frame delta<block_start>"""Get output features of observation network (to be fed into reward redistribution network)"""<with_stmt>tf.variable_scope("model" reuse=tf.AUTO_REUSE)<block_start>_,observation_features=observation_network(single_frame=frame[<ellipsis> -1:] delta_frame=delta additional_inputs=[] observation_network_config=self.exploration_config['observation_network_config'])<line_sep>observation_features=observation_features.get_output()<block_end><return>observation_features<block_end><block_end><class_stmt>CnnPolicy(object)<block_start><def_stmt>__init__ self tf_session ob_space ac_space nbatch reward_redistribution_config observation_network_config lstm_network_config training_config exploration_config reuse=<false> **kwargs<block_start>"""CNN policy network, as described in RUDDER paper
Based on baselines.ppo2.policies.py; Dense layer sees features from it's own trainable observation network and
the features from the reward redistribution observation network;
Parameters
-------
tf_session : tensorflow session
tensorflow session to compute the graph in
ob_space
Baselines ob_space object (see ppo2_rudder.py); must provide .shape attribute for (x, y, c) shapes;
ac_space
Baselines ac_space object (see ppo2_rudder.py); must provide .n attribute for number of possible actions;
nbatch : int
Batchsize
reward_redistribution_config : dict
Dictionary containing config for reward redistribution:
-----
lambda_eligibility_trace : float
Eligibility trace value for redistributed reward
vf_contrib : float
Weighting of original value function (vf) vs. redistributed reward (rr), s.t.
:math:`reward = vf \cdot vf\_contrib + rr \cdot (1-vf\_contrib)`
use_reward_redistribution_quality_threshold : float
Quality of reward redistribution has to exceed use_reward_redistribution_quality_threshold to be used;
use_reward_redistribution_quality_threshold range is [0,1]; Quality measure is the squared prediction
error, as described in RUDDER paper;
use_reward_redistribution : bool
Use reward redistribution?
rr_junksize : int
Junksize for reward redistribution; Junks overlap by 1 half each
cont_pred_w : float
Weighting of continous prediciton loss vs. prediction loss of final return at last timestep
intgrd_steps : int
Stepsize for integrated gradients
intgrd_batchsize : int
Integrated gradients is computed batch-wise if intgrd_batchsize > 1
observation_network_config : dict
Dictionary containing config for observation network that processes observations and feeds them to LSTM
network:
-----
show_states : bool
Show frames to network?
show_statedeltas : bool
Show frame deltas to network?
prepoc_states : list of dicts
Network config to preprocess frames
prepoc_deltas : list of dicts
Network config to preprocess frame deltas
prepoc_observations : list of dicts
Network config to preprocess features from frame and frame-delta preprocessing networks
lstm_network_config : dict
Dictionary containing config for LSTM network:
-----
show_actions : bool
Show taken actions to LSTM?
reversed : bool
Process game sequence in reversed order?
layers : list of dicts
Network config for LSTM network and optional additional dense layers
initializations : dict
Initialization config for LSTM network
timestep_encoding : dict
Set "max_value" and "triangle_span" for TeLL.utiltiy.misc_tensorflow.TriangularValueEncoding class
training_config : dict
Dictionary containing config for training and update procedure:
-----
n_no_rr_updates : int
Number of updates to perform without training or using reward redistribution network
n_pretrain_games : int
Number of games to pretrain the reward redistribution network without using it;
downscale_lr_policylag : bool
Downscale learningrate permanently if policy lag gets too large?
optimizer : tf.train optimizer
Optimizer in tf.train, e.g. "AdamOptimizer"
optimizer_params : dict
Kwargs for optimizer
l1 : float
Weighting for l1 weight regularization
l2 : float
Weighting for l2 weight regularization
clip_gradients : float
Threshold for clipping gradients (clipping by norm)
exploration_config : dict
Dictionary containing config for exploration:
-----
sample_actions_from_softmax : bool
True: Apply softmax to policy network output and use it as probabilities to pick an action
False: Use the max. policy network output as action
temporal_safe_exploration : bool
User RUDDER safe exploration
save_pi_threshold : float
Threshold value in range [0,1] for safe actions in RUDDER safe exploration
reuse : bool
Reuse tensorflow variables?
"""<line_sep>#
# Shapes
#
nh,nw,nc=ob_space.shape<line_sep>activ=tf.nn.relu<line_sep>ob_shape=(nbatch nh nw nc)<line_sep>nact=ac_space.n<line_sep>#
# Placeholders
#
X=tf.placeholder(tf.uint8 ob_shape)#obs
M=tf.placeholder(tf.float32 [nbatch])#mask (done t-1)
#
# Prepare input
#
single_frames=tf.cast(tf.expand_dims(X[<ellipsis> -1:] axis=1) dtype=tf.float32)<line_sep>delta_frames=single_frames-tf.cast(tf.expand_dims(X[<ellipsis> -2:-1] axis=1) dtype=tf.float32)<line_sep>delta_frames<augmul>tf.reshape(M shape=(nbatch 1 1 1 1))<line_sep>#
# Get observation features from RR model
#
rr_model=RewardRedistributionModel(reward_redistribution_config=reward_redistribution_config observation_network_config=observation_network_config lstm_network_config=lstm_network_config training_config=training_config scopename="RR")<line_sep>self.rr_observation_model=rr_model<line_sep>rr_observation_layer=rr_model.get_visual_features(single_frame=single_frames delta_frame=delta_frames additional_inputs=[])<line_sep># Get output tensor
rr_observations=rr_observation_layer.get_output()[: 0]<line_sep>#
# Build policy network
#
<with_stmt>tf.variable_scope("model" reuse=reuse)<block_start>temperature=tf.get_variable(initializer=tf.constant(1 dtype=tf.float32) trainable=<false> name='temperature')<line_sep>observation_features=nature_cnn(X)<block_end>self.observation_features_shape=tf.expand_dims(observation_features axis=0).shape<line_sep># Concat observation feature from RR model and A2C model
h_for_a2c=tf.concat([observation_features tf.stop_gradient(rr_observations)] axis=-1)<with_stmt>tf.variable_scope("model" reuse=reuse)<block_start>h_for_a2c=activ(fc(h_for_a2c 'fc1' nh=512 init_scale=np.sqrt(2)))<block_end><with_stmt>tf.variable_scope("model" reuse=reuse)<block_start>pi=fc(h_for_a2c 'pi' nact init_scale=0.01)<line_sep>vf=fc(h_for_a2c 'v' 1)[: 0]<block_end>self.pdtype=make_pdtype(ac_space)<line_sep>self.pd=self.pdtype.pdfromflat(pi)<if_stmt>exploration_config['sample_actions_from_softmax']<block_start>a0=self.pd.sample_temp(temperature=temperature)<block_end><else_stmt><block_start>a0=tf.argmax(pi axis=-1)<block_end>neglogp0=self.pd.neglogp(a0)<line_sep>self.initial_state=<none><def_stmt>step ob state mask *_args **_kwargs<block_start>a,v,neglogp=tf_session.run([a0 vf neglogp0] {X:ob M:mask})<line_sep><return>a v self.initial_state neglogp<block_end><def_stmt>value ob state mask *_args **_kwargs<block_start><return>tf_session.run(vf {X:ob M:mask})<block_end><def_stmt>action ob state mask *_args **_kwargs<block_start>a,neglogp=tf_session.run([a0 neglogp0] {X:ob M:mask})<line_sep><return>a self.initial_state neglogp<block_end>#
# Placeholders for exploration
#
n_envs=pi.shape.as_list()[0]<line_sep>exploration_timesteps_pl=tf.placeholder(dtype=tf.float32 shape=(n_envs ))<line_sep>prev_actions_pl=tf.placeholder(dtype=tf.int64 shape=(n_envs ))<line_sep>gamelengths_pl=tf.placeholder(dtype=tf.float32 shape=(n_envs ))<line_sep>keep_prev_action_pl=tf.placeholder(dtype=tf.bool shape=(n_envs ))<line_sep>prev_action_count_pl=tf.placeholder(dtype=tf.int64 shape=(n_envs ))<line_sep>exploration_durations_pl=tf.placeholder(dtype=tf.float32 shape=(n_envs ))<line_sep>#
# Setting up safe exploration
#
explore=tf.logical_and(tf.logical_and(tf.less_equal(exploration_timesteps_pl gamelengths_pl) tf.less_equal(gamelengths_pl exploration_timesteps_pl+exploration_durations_pl)) tf.not_equal(exploration_timesteps_pl tf.constant(-1 dtype=tf.float32)))<line_sep>safe_pi=pi-tf.reduce_min(pi axis=-1 keep_dims=<true>)<line_sep>safe_pi<augdiv>tf.reduce_max(safe_pi axis=-1 keep_dims=<true>)<line_sep>save_pi_thresholds=(1-(tf.expand_dims(tf.range(n_envs dtype=tf.float32) axis=1)/(n_envs+(n_envs<eq>1)-1))<times>(1-exploration_config['save_pi_threshold']))<line_sep>safe_pi=tf.cast(tf.greater_equal(safe_pi save_pi_thresholds) dtype=tf.float32)<line_sep>safe_pi<augdiv>tf.reduce_sum(safe_pi)<line_sep>rand_safe_a=tf.multinomial(safe_pi 1)[: 0]<line_sep>safe_pi_flat=tf.reshape(safe_pi (-1 ))<line_sep>prev_action_is_safe=tf.gather(safe_pi_flat prev_actions_pl+tf.range(safe_pi.shape.as_list()[0] dtype=tf.int64)<times>safe_pi.shape.as_list()[1])<line_sep>prev_action_is_safe=tf.greater(prev_action_is_safe tf.constant(0 dtype=tf.float32))<line_sep>a_explore=tf.where(tf.logical_and(tf.logical_and(keep_prev_action_pl tf.not_equal(gamelengths_pl exploration_timesteps_pl)) prev_action_is_safe) prev_actions_pl rand_safe_a)<line_sep>a_explore=tf.where(explore a_explore a0)<line_sep># Make sure the actor doesn't repeat an action too often (otherwise screensaver might start)
rand_a=tf.random_uniform(shape=a0.get_shape() minval=0 maxval=ac_space.n dtype=a0.dtype)<line_sep>a_explore=tf.where(tf.greater(prev_action_count_pl tf.constant(20 dtype=tf.int64)) rand_a a_explore)<if_stmt><not>exploration_config['temporal_safe_exploration']<block_start>a_explore=a0<block_end>neglogp_explore=self.pd.neglogp(a_explore)<def_stmt>action_exploration ob state mask *_args exploration_timesteps prev_actions gamelengths keep_prev_action prev_action_count exploration_durations **_kwargs<block_start>"""Exploration for long-term reward"""<line_sep>a,neglogp=tf_session.run([a_explore neglogp_explore] {X:ob M:mask exploration_timesteps_pl:exploration_timesteps prev_actions_pl:prev_actions gamelengths_pl:gamelengths exploration_durations_pl:exploration_durations keep_prev_action_pl:keep_prev_action prev_action_count_pl:prev_action_count})<line_sep><return>a self.initial_state neglogp<block_end>self.X=X<line_sep>self.M=M<line_sep>self.pi=pi<line_sep>self.vf=vf<line_sep>self.step=step<line_sep>self.action=action<line_sep>self.value=value<line_sep>self.action_exploration=action_exploration<block_end><def_stmt>get_observation_features self frame delta=<none><block_start>"""Get output features of observation network (to be fed into reward redistribution network)"""<with_stmt>tf.variable_scope("model" reuse=tf.AUTO_REUSE)<block_start><return>tf.expand_dims(nature_cnn(frame[: 0]) dim=1)<block_end><block_end><block_end> |
<import_stmt>os<import_from_stmt>conans ConanFile CMake tools<class_stmt>IrrXMLConan(ConanFile)<block_start>name="irrxml"<line_sep>license="ZLIB"<line_sep>homepage="http://www.ambiera.com/irrxml"<line_sep>url="https://github.com/conan-io/conan-center-index"<line_sep>description="irrXML is a simple and fast open source xml parser for C++"<line_sep>topics=("xml" "xml-parser" "parser" "xml-reader")<line_sep>exports_sources=["CMakeLists.txt" "patches/*"]<line_sep>generators="cmake"<line_sep>settings="os" "compiler" "build_type" "arch"<line_sep>options={"shared":[<true> <false>] "fPIC":[<true> <false>]}<line_sep>default_options={"shared":<false> "fPIC":<true>}<line_sep>@property<def_stmt>_source_subfolder self<block_start><return>"source_subfolder"<block_end>@property<def_stmt>_build_subfolder self<block_start><return>"build_subfolder"<block_end><def_stmt>config_options self<block_start><if_stmt>self.settings.os<eq>"Windows"<block_start><del_stmt>self.options.fPIC<block_end><block_end><def_stmt>source self<block_start>tools.get(**self.conan_data["sources"][self.version])<line_sep>extracted_folder=self.name+"-"+self.version<line_sep>os.rename(extracted_folder self._source_subfolder)<block_end><def_stmt>_extract_license self<block_start>header=tools.load(os.path.join(self.package_folder "include" "irrXML.h"))<line_sep>license_contents=header[header.find(r"\section license License")+25:header.find(r"\section history" 1)]<line_sep>tools.save("LICENSE" license_contents)<block_end><def_stmt>_configure_cmake self<block_start>cmake=CMake(self)<line_sep>cmake.configure(build_folder=self._build_subfolder)<line_sep><return>cmake<block_end><def_stmt>build self<block_start><for_stmt>patch self.conan_data["patches"][self.version]<block_start>tools.patch(**patch)<block_end>cmake=self._configure_cmake()<line_sep>cmake.build()<block_end><def_stmt>package self<block_start>cmake=self._configure_cmake()<line_sep>cmake.install()<line_sep>self._extract_license()<line_sep>self.copy(pattern="LICENSE" dst="licenses")<block_end><def_stmt>package_info self<block_start>self.cpp_info.libs=tools.collect_libs(self)<if_stmt>self.settings.os<eq>"Linux"<block_start>self.cpp_info.system_libs=["m"]<block_end><block_end><block_end> |
# Generated by Django 3.0.11 on 2021-02-02 15:51
<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("posthog" "0121_person_email_index") ]<line_sep>operations=[migrations.AddField(model_name="organization" name="setup_section_2_completed" field=models.BooleanField(default=<true>) ) ]<block_end> |
<import_stmt>json<import_stmt>io<import_stmt>pytest<import_from_stmt>CommonServerPython DemistoException FeedIndicatorType CommandResults<def_stmt>util_load_json path<block_start><with_stmt>io.open(path mode='r' encoding='utf-8')<as>f<block_start><return>json.loads(f.read())<block_end><block_end>SOCRADAR_API_ENDPOINT='https://platform.socradar.com/api'<line_sep>CALCULATE_DBOT_SCORE_INPUTS=[(900 3) (800 2) (450 2) (300 1) (100 1) (0 0) ]<def_stmt>test_test_module requests_mock<block_start>"""Tests the test_module validation command.
"""<import_from_stmt>SOCRadarThreatFusion Client test_module<line_sep>mock_socradar_api_key="APIKey"<line_sep>suffix=f'threat/analysis/check/auth?key={mock_socradar_api_key}'<line_sep>mock_response=util_load_json('test_data/check_auth_response.json')<line_sep>requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}' json=mock_response)<line_sep>client=Client(base_url=SOCRADAR_API_ENDPOINT api_key=mock_socradar_api_key verify=<false> proxy=<false>)<line_sep>response=test_module(client)<assert_stmt>response<eq>'ok'<block_end><def_stmt>test_test_module_handles_authorization_error requests_mock<block_start>"""Tests the test_module validation command authorization error.
"""<import_from_stmt>SOCRadarThreatFusion Client test_module MESSAGES<line_sep>mock_socradar_api_key="WrongAPIKey"<line_sep>suffix=f'threat/analysis/check/auth?key={mock_socradar_api_key}'<line_sep>mock_response=util_load_json('test_data/check_auth_response_auth_error.json')<line_sep>requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}' json=mock_response status_code=401)<line_sep>client=Client(base_url=SOCRADAR_API_ENDPOINT api_key=mock_socradar_api_key verify=<false> proxy=<false>)<with_stmt>pytest.raises(DemistoException match=MESSAGES['AUTHORIZATION_ERROR'])<block_start>test_module(client)<block_end><block_end><def_stmt>test_ip_command requests_mock<block_start>"""Tests the ip_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""<import_from_stmt>SOCRadarThreatFusion Client ip_command<line_sep>mock_socradar_api_key="APIKey"<line_sep>mock_response=util_load_json('test_data/score_ip_response.json')<line_sep>suffix='threat/analysis'<line_sep>requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}' json=mock_response)<line_sep>mock_args={'ip':'1.1.1.1'}<line_sep>client=Client(base_url=SOCRADAR_API_ENDPOINT api_key=mock_socradar_api_key verify=<false> proxy=<false>)<line_sep>result=ip_command(client=client args=mock_args )<line_sep>expected_output=util_load_json('test_data/score_ip_expected_output.json')<line_sep>expected_context=util_load_json('test_data/score_ip_expected_context_generic_command.json')<assert_stmt>isinstance(result list)<assert_stmt>result<ne>[]<assert_stmt>'### SOCRadar - Analysis results for IP: 1.1.1.1'<in>result[0].readable_output<assert_stmt>result[0].outputs<eq>expected_context<assert_stmt>result[0].raw_response<eq>expected_output<block_end><def_stmt>test_ip_command_handles_incorrect_entity_type <block_start>"""Tests the ip_command function incorrect entity type error.
"""<import_from_stmt>SOCRadarThreatFusion Client ip_command<line_sep>mock_socradar_api_key="APIKey"<line_sep>mock_args={'ip':'INCORRECT IP ADDRESS'}<line_sep>client=Client(base_url=SOCRADAR_API_ENDPOINT api_key=mock_socradar_api_key verify=<false> proxy=<false>)<with_stmt>pytest.raises(ValueError)<block_start>ip_command(client=client args=mock_args )<block_end><block_end><def_stmt>test_domain_command requests_mock<block_start>"""Tests the domain_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""<import_from_stmt>SOCRadarThreatFusion Client domain_command<line_sep>mock_socradar_api_key="APIKey"<line_sep>mock_response=util_load_json('test_data/score_domain_response.json')<line_sep>suffix='threat/analysis'<line_sep>requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}' json=mock_response)<line_sep>mock_args={'domain':'paloaltonetworks.com'}<line_sep>client=Client(base_url=SOCRADAR_API_ENDPOINT api_key=mock_socradar_api_key verify=<false> proxy=<false>)<line_sep>result=domain_command(client=client args=mock_args )<line_sep>expected_output=util_load_json('test_data/score_domain_expected_output.json')<line_sep>expected_context=util_load_json('test_data/score_domain_expected_context_generic_command.json')<assert_stmt>isinstance(result list)<assert_stmt>result<ne>[]<assert_stmt>'### SOCRadar - Analysis results for domain: paloaltonetworks.com'<in>result[0].readable_output<assert_stmt>result[0].outputs<eq>expected_context<assert_stmt>result[0].raw_response<eq>expected_output<block_end><def_stmt>test_domain_command_handles_incorrect_entity_type <block_start>"""Tests the domain_command function incorrect entity type error.
"""<import_from_stmt>SOCRadarThreatFusion Client domain_command<line_sep>mock_socradar_api_key="APIKey"<line_sep>mock_args={'domain':'INCORRECT DOMAIN'}<line_sep>client=Client(base_url=SOCRADAR_API_ENDPOINT api_key=mock_socradar_api_key verify=<false> proxy=<false>)<with_stmt>pytest.raises(ValueError)<block_start>domain_command(client=client args=mock_args )<block_end><block_end><def_stmt>test_file_command requests_mock<block_start>"""Tests the file_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""<import_from_stmt>SOCRadarThreatFusion Client file_command<line_sep>mock_socradar_api_key="APIKey"<line_sep>mock_response=util_load_json('test_data/score_hash_response.json')<line_sep>suffix='threat/analysis'<line_sep>requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}' json=mock_response)<line_sep>mock_args={'file':'3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792'}<line_sep>client=Client(base_url=SOCRADAR_API_ENDPOINT api_key=mock_socradar_api_key verify=<false> proxy=<false>)<line_sep>result=file_command(client=client args=mock_args )<line_sep>expected_output=util_load_json('test_data/score_hash_expected_output.json')<line_sep>expected_context=util_load_json('test_data/score_hash_expected_context_generic_command.json')<assert_stmt>isinstance(result list)<assert_stmt>result<ne>[]<assert_stmt>'### SOCRadar - Analysis results for hash: 3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792'<in>result[0].readable_output<assert_stmt>result[0].outputs<eq>expected_context<assert_stmt>result[0].raw_response<eq>expected_output<block_end><def_stmt>test_file_command_handles_incorrect_entity_type <block_start>"""Tests the file_command function incorrect entity type error.
"""<import_from_stmt>SOCRadarThreatFusion Client file_command<line_sep>mock_socradar_api_key="APIKey"<line_sep>mock_args={'file':'INCORRECT HASH'}<line_sep>client=Client(base_url=SOCRADAR_API_ENDPOINT api_key=mock_socradar_api_key verify=<false> proxy=<false>)<with_stmt>pytest.raises(ValueError)<block_start>file_command(client=client args=mock_args )<block_end><block_end><def_stmt>test_score_ip requests_mock<block_start>"""Tests the score_ip_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""<import_from_stmt>SOCRadarThreatFusion Client score_ip_command<line_sep>mock_socradar_api_key="APIKey"<line_sep>mock_response=util_load_json('test_data/score_ip_response.json')<line_sep>suffix='threat/analysis'<line_sep>requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}' json=mock_response)<line_sep>mock_args={'ip':'1.1.1.1'}<line_sep>client=Client(base_url=SOCRADAR_API_ENDPOINT api_key=mock_socradar_api_key verify=<false> proxy=<false>)<line_sep>result=score_ip_command(client=client args=mock_args )<line_sep>expected_output=util_load_json('test_data/score_ip_expected_output.json')<line_sep>expected_context=util_load_json('test_data/score_ip_expected_context.json')<assert_stmt>isinstance(result CommandResults)<assert_stmt>'### SOCRadar - Analysis results for IP: 1.1.1.1'<in>result.readable_output<assert_stmt>result.outputs<eq>expected_context<assert_stmt>result.raw_response<eq>expected_output<block_end><def_stmt>test_score_ip_handles_incorrect_entity_type <block_start>"""Tests the score_ip_command function incorrect entity type error.
"""<import_from_stmt>SOCRadarThreatFusion Client score_ip_command<line_sep>mock_socradar_api_key="APIKey"<line_sep>mock_args={'ip':'INCORRECT IP ADDRESS'}<line_sep>client=Client(base_url=SOCRADAR_API_ENDPOINT api_key=mock_socradar_api_key verify=<false> proxy=<false>)<with_stmt>pytest.raises(ValueError)<block_start>score_ip_command(client=client args=mock_args )<block_end><block_end><def_stmt>test_score_domain requests_mock<block_start>"""Tests the score_domain_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""<import_from_stmt>SOCRadarThreatFusion Client score_domain_command<line_sep>mock_socradar_api_key="APIKey"<line_sep>mock_response=util_load_json('test_data/score_domain_response.json')<line_sep>suffix='threat/analysis'<line_sep>requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}' json=mock_response)<line_sep>mock_args={'domain':'paloaltonetworks.com'}<line_sep>client=Client(base_url=SOCRADAR_API_ENDPOINT api_key=mock_socradar_api_key verify=<false> proxy=<false>)<line_sep>result=score_domain_command(client=client args=mock_args )<line_sep>expected_output=util_load_json('test_data/score_domain_expected_output.json')<line_sep>expected_context=util_load_json('test_data/score_domain_expected_context.json')<assert_stmt>isinstance(result CommandResults)<assert_stmt>'### SOCRadar - Analysis results for domain: paloaltonetworks.com'<in>result.readable_output<assert_stmt>result.outputs<eq>expected_context<assert_stmt>result.raw_response<eq>expected_output<block_end><def_stmt>test_score_domain_handles_incorrect_entity_type <block_start>"""Tests the score_domain_command function incorrect entity type error.
"""<import_from_stmt>SOCRadarThreatFusion Client score_domain_command<line_sep>mock_socradar_api_key="APIKey"<line_sep>mock_args={'domain':'INCORRECT DOMAIN'}<line_sep>client=Client(base_url=SOCRADAR_API_ENDPOINT api_key=mock_socradar_api_key verify=<false> proxy=<false>)<with_stmt>pytest.raises(ValueError)<block_start>score_domain_command(client=client args=mock_args )<block_end><block_end><def_stmt>test_score_hash requests_mock<block_start>"""Tests the score_hash_command function.
Configures requests_mock instance to generate the appropriate
SOCRadar ThreatFusion API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""<import_from_stmt>SOCRadarThreatFusion Client score_hash_command<line_sep>mock_socradar_api_key="APIKey"<line_sep>mock_response=util_load_json('test_data/score_hash_response.json')<line_sep>suffix='threat/analysis'<line_sep>requests_mock.get(f'{SOCRADAR_API_ENDPOINT}/{suffix}' json=mock_response)<line_sep>mock_args={'hash':'3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792'}<line_sep>client=Client(base_url=SOCRADAR_API_ENDPOINT api_key=mock_socradar_api_key verify=<false> proxy=<false>)<line_sep>result=score_hash_command(client=client args=mock_args)<line_sep>expected_output=util_load_json('test_data/score_hash_expected_output.json')<line_sep>expected_context=util_load_json('test_data/score_hash_expected_context.json')<assert_stmt>isinstance(result CommandResults)<assert_stmt>'### SOCRadar - Analysis results for hash: 3b7b359ea17ac76341957573e332a2d6bcac363401ac71c8df94dac93df6d792'<in>result.readable_output<assert_stmt>result.outputs<eq>expected_context<assert_stmt>result.raw_response<eq>expected_output<block_end><def_stmt>test_score_hash_handles_incorrect_entity_type <block_start>"""Tests the score_hash_command function incorrect entity type error.
"""<import_from_stmt>SOCRadarThreatFusion Client score_hash_command<line_sep>mock_socradar_api_key="APIKey"<line_sep>mock_args={'hash':'INCORRECT HASH'}<line_sep>client=Client(base_url=SOCRADAR_API_ENDPOINT api_key=mock_socradar_api_key verify=<false> proxy=<false>)<with_stmt>pytest.raises(ValueError)<block_start>score_hash_command(client=client args=mock_args )<block_end><block_end>@pytest.mark.parametrize('socradar_score, dbot_score' CALCULATE_DBOT_SCORE_INPUTS)<def_stmt>test_calculate_dbot_score socradar_score dbot_score<block_start><import_from_stmt>SOCRadarThreatFusion calculate_dbot_score<assert_stmt>calculate_dbot_score(socradar_score)<eq>dbot_score<block_end><def_stmt>test_map_indicator_type <block_start><import_from_stmt>SOCRadarThreatFusion map_indicator_type<assert_stmt>FeedIndicatorType.IP<eq>map_indicator_type('ipv4')<assert_stmt>FeedIndicatorType.IPv6<eq>map_indicator_type('ipv6')<assert_stmt>FeedIndicatorType.Domain<eq>map_indicator_type('hostname')<assert_stmt>FeedIndicatorType.File<eq>map_indicator_type('hash')<assert_stmt><none><is>map_indicator_type('IP')<assert_stmt><none><is>map_indicator_type('invalid')<block_end> |
<class_stmt>FilterIntegerRule(FilterNumericValueRule IDisposable)<block_start>"""
A filter rule that operates on integer values in a Revit project.
FilterIntegerRule(valueProvider: FilterableValueProvider,evaluator: FilterNumericRuleEvaluator,ruleValue: int)
"""<def_stmt>Dispose self<block_start>""" Dispose(self: FilterRule,A_0: bool) """<line_sep><pass><block_end><def_stmt>ReleaseUnmanagedResources self *args<block_start>""" ReleaseUnmanagedResources(self: FilterRule,disposing: bool) """<line_sep><pass><block_end><def_stmt>__enter__ self *args<block_start>""" __enter__(self: IDisposable) -> object """<line_sep><pass><block_end><def_stmt>__exit__ self *args<block_start>""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """<line_sep><pass><block_end><def_stmt>__init__ self *args<block_start>""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """<line_sep><pass><block_end>@staticmethod<def_stmt>__new__ self valueProvider evaluator ruleValue<block_start>""" __new__(cls: type,valueProvider: FilterableValueProvider,evaluator: FilterNumericRuleEvaluator,ruleValue: int) """<line_sep><pass><block_end>RuleValue=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""The user-supplied value against which values from a Revit document will be tested.
Get: RuleValue(self: FilterIntegerRule) -> int
Set: RuleValue(self: FilterIntegerRule)=value
"""<block_end> |
<import_stmt>uuid<import_from_stmt>logging getLogger<import_from_stmt>typing Any Dict List<import_from_stmt>fastapi APIRouter HTTPException<import_from_stmt>src.ml.data Data<import_from_stmt>src.ml.outlier_detection outlier_detector<import_from_stmt>src.ml.prediction classifier<import_from_stmt>src.utils.profiler log_decorator<line_sep>logger=getLogger(__name__)<line_sep>router=APIRouter()<line_sep>@router.get("/health")<def_stmt>health <arrow>Dict[str str]<block_start><return>{"health":"ok"}<block_end>@router.get("/metadata")<def_stmt>metadata <arrow>Dict[str Any]<block_start><return>{"data_type":"float32" "data_structure":"(1,4)" "data_sample":Data().data "prediction_type":"float32" "prediction_structure":"(1,3)" "prediction_sample":[0.97093159 0.01558308 0.01348537] "outlier_type":"bool, float32" "outlier_structure":"(1,2)" "outlier_sample":[<false> 0.4] }<block_end>@router.get("/label")<def_stmt>label <arrow>Dict[int str]<block_start><return>classifier.label<block_end>@log_decorator(endpoint="/predict/test" logger=logger)<def_stmt>_predict_test job_id:str<arrow>Dict[str Any]<block_start>logger.info(f"execute: [{job_id}]")<line_sep>prediction=classifier.predict(data=Data().data)<line_sep>is_outlier,outlier_score=outlier_detector.predict(data=Data().data)<line_sep>prediction_list=list(prediction)<line_sep><return>{"job_id":job_id "prediction":prediction_list "is_outlier":is_outlier "outlier_score":outlier_score }<block_end>@router.get("/predict/test")<def_stmt>predict_test <arrow>Dict[str Any]<block_start>job_id=str(uuid.uuid4())[:6]<line_sep><return>_predict_test(job_id=job_id)<block_end>@log_decorator(endpoint="/predict/test/label" logger=logger)<def_stmt>_predict_test_label job_id:str<arrow>Dict[str Any]<block_start>logger.info(f"execute: [{job_id}]")<line_sep>prediction=classifier.predict_label(data=Data().data)<line_sep>is_outlier,outlier_score=outlier_detector.predict(data=Data().data)<line_sep><return>{"job_id":job_id "prediction":prediction "is_outlier":is_outlier "outlier_score":outlier_score }<block_end>@router.get("/predict/test/label")<def_stmt>predict_test_label <arrow>Dict[str Any]<block_start>job_id=str(uuid.uuid4())[:6]<line_sep><return>_predict_test_label(job_id=job_id)<block_end>@log_decorator(endpoint="/predict" logger=logger)<def_stmt>_predict data:Data job_id:str<arrow>Dict[str Any]<block_start>logger.info(f"execute: [{job_id}]")<if_stmt>len(data.data)<ne>1<or>len(data.data[0])<ne>4<block_start><raise>HTTPException(status_code=404 detail="Invalid input data")<block_end>prediction=classifier.predict(data.data)<line_sep>is_outlier,outlier_score=outlier_detector.predict(data=data.data)<line_sep>prediction_list=list(prediction)<line_sep><return>{"job_id":job_id "prediction":prediction_list "is_outlier":is_outlier "outlier_score":outlier_score }<block_end>@router.post("/predict")<def_stmt>predict data:Data<arrow>Dict[str Any]<block_start>job_id=str(uuid.uuid4())[:6]<line_sep><return>_predict(data=data job_id=job_id)<block_end>@log_decorator(endpoint="/predict/label" logger=logger)<def_stmt>_predict_label data:Data job_id:str<arrow>Dict[str str]<block_start>logger.info(f"execute: [{job_id}]")<if_stmt>len(data.data)<ne>1<or>len(data.data[0])<ne>4<block_start><raise>HTTPException(status_code=404 detail="Invalid input data")<block_end>prediction=classifier.predict_label(data.data)<line_sep>is_outlier,outlier_score=outlier_detector.predict(data=data.data)<line_sep><return>{"job_id":job_id "prediction":prediction "is_outlier":is_outlier "outlier_score":outlier_score }<block_end>@router.post("/predict/label")<def_stmt>predict_label data:Data<arrow>Dict[str Any]<block_start>job_id=str(uuid.uuid4())[:6]<line_sep><return>_predict_label(data=data job_id=job_id)<block_end> |
<import_from_stmt>dataclasses dataclass<import_from_stmt>pygears.sim.sim_gear SimGear<import_from_stmt>pygears.sim delta timestep log clk<import_from_stmt>pygears.sim.sim SimPlugin<import_from_stmt>pygears.conf Inject inject<import_from_stmt>pygears GearDone reg<import_from_stmt>.cosim_port CosimNoData InCosimPort OutCosimPort<class_stmt>CosimBase(SimGear)<block_start>@inject<def_stmt>__init__ self gear timeout=-1 sim_map=Inject('sim/map')<block_start>super().__init__(gear)<line_sep>self.timeout=timeout<line_sep>self.in_cosim_ports=[InCosimPort(self p)<for>p gear.in_ports]<line_sep>self.out_cosim_ports=[OutCosimPort(self p)<for>p gear.out_ports]<line_sep>self.eval_needed=<false><for_stmt>p (self.in_cosim_ports+self.out_cosim_ports)<block_start>sim_map[p.port]=p<block_end><block_end><def_stmt>cycle self<block_start><raise>NotImplementedError()<block_end><def_stmt>forward self<block_start><raise>NotImplementedError()<block_end><def_stmt>back self<block_start><raise>NotImplementedError()<block_end><def_stmt>read_out self port<block_start><if_stmt><not>port<in>self.handlers<block_start><raise>ConnectionResetError<block_end><if_stmt>self.eval_needed<block_start>self.forward()<block_end>self.eval_needed=<true><line_sep>hout=self.handlers[port]<line_sep>hout.reset()<line_sep><return>hout.read()<block_end><def_stmt>ack_out self port<block_start><if_stmt><not>port<in>self.handlers<block_start><raise>ConnectionResetError<block_end>self.eval_needed=<true><line_sep>hout=self.handlers[port]<line_sep>hout.ack()<line_sep>self.activity_monitor=0<block_end><def_stmt>write_in self port data<block_start><if_stmt><not>port<in>self.handlers<block_start><raise>ConnectionResetError<block_end>self.eval_needed=<true><line_sep>hin=self.handlers[port]<line_sep><return>hin.send(data)<block_end><def_stmt>reset_in self port<block_start><if_stmt><not>port<in>self.handlers<block_start><raise>ConnectionResetError<block_end>self.eval_needed=<true><line_sep>hin=self.handlers[port]<line_sep>hin.reset()<block_end><def_stmt>ready_in self port<block_start><if_stmt><not>port<in>self.handlers<block_start><raise>ConnectionResetError<block_end><if_stmt>self.eval_needed<block_start>self.back()<line_sep>self.eval_needed=<false><block_end>hin=self.handlers[port]<if_stmt>hin.ready()<block_start>self.activity_monitor=0<line_sep><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><async_keyword><def_stmt>func self *args **kwds<block_start>self.activity_monitor=0<line_sep>self.eval_needed=<false><try_stmt><block_start><while_stmt><true><block_start>phase=<none><while_stmt>phase<ne>'back'<block_start>phase=<await>delta()<block_end><if_stmt>self.eval_needed<block_start>self.forward()<line_sep>self.eval_needed=<false><block_end><if_stmt>self.activity_monitor<eq>self.timeout<block_start><raise>GearDone<block_end>self.cycle()<line_sep>self.activity_monitor<augadd>1<block_end><block_end><except_stmt>(GearDone BrokenPipeError)# print(f"SimGear canceling: {self.gear.name}")
<block_start><for_stmt>p self.gear.out_ports<block_start>p.producer.finish()<block_end>self._finish()<line_sep><raise>GearDone<block_end><block_end><block_end>@dataclass<class_stmt>AuxClock<block_start>name:str<line_sep>frequency:int<block_end><class_stmt>CosimPlugin(SimPlugin)<block_start>@classmethod<def_stmt>bind cls<block_start>reg.confdef('sim/aux_clock' default=[])<block_end><block_end> |
""" opentrons_shared_data.module: functions and types for module defs """<import_stmt>json<import_from_stmt>pathlib Path<import_from_stmt>typing overload TYPE_CHECKING<import_from_stmt>..load load_shared_data<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>.dev_types SchemaVersions ModuleSchema SchemaV1 SchemaV2 ModuleDefinitionV1 ModuleDefinitionV2 ModuleModel <block_end><class_stmt>ModuleNotFoundError(KeyError)<block_start><def_stmt>__init__ self version:str model_or_loadname:str<block_start>super().__init__(model_or_loadname)<line_sep>self.requested_version=version<line_sep>self.requested_module=model_or_loadname<block_end><def_stmt>__str__ self<arrow>str<block_start><return>f'No such version {self.requested_version} module '<concat>f'{self.requested_module}'<block_end><def_stmt>__repr__ self<arrow>str<block_start><return>f'{self.__class__.__name__}: {self.requested_module} '<concat>f'at version {self.requested_version}'<block_end><block_end><def_stmt>load_schema version:'SchemaVersions'<arrow>'ModuleSchema'<block_start>path=Path('module')/'schemas'/f'{version}.json'<line_sep><return>json.loads(load_shared_data(path))<block_end>@overload<def_stmt>load_definition version:'SchemaV1' model_or_loadname:str<arrow>'ModuleDefinitionV1'<block_start><ellipsis><block_end>@overload<def_stmt>load_definition version:'SchemaV2' model_or_loadname:'ModuleModel'<arrow>'ModuleDefinitionV2'<block_start><ellipsis><block_end><def_stmt>load_definition version model_or_definition<block_start><if_stmt>version<eq>'1'<block_start>path=Path('module')/'definitions'/'1.json'<line_sep>data=json.loads(load_shared_data(path))<try_stmt><block_start><return>data[model_or_definition]<block_end><except_stmt>KeyError<block_start><raise>ModuleNotFoundError('1' model_or_definition)<block_end><block_end><else_stmt><block_start>path=Path(f'module/definitions/2/{model_or_definition}.json')<try_stmt><block_start>data=load_shared_data(path)<block_end><except_stmt>FileNotFoundError<block_start><raise>ModuleNotFoundError('2' model_or_definition)<block_end><return>json.loads(data)<block_end><block_end> |
<import_stmt>asyncio<import_from_stmt>grpclib.utils graceful_exit<import_from_stmt>grpclib.server Server<import_from_stmt>grpclib.reflection.service ServerReflection<import_from_stmt>helloworld.server Greeter<async_keyword><def_stmt>main * host:str='127.0.0.1' port:int=50051<arrow><none><block_start>services=ServerReflection.extend([Greeter()])<line_sep>server=Server(services)<with_stmt>graceful_exit([server])<block_start><await>server.start(host port)<line_sep>print(f'Serving on {host}:{port}')<line_sep><await>server.wait_closed()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>asyncio.run(main())<block_end> |
# -*- coding: utf-8 -*-
"""This file imports Python modules that register analyzers."""<import_from_stmt>plaso.analyzers hashing_analyzer<import_from_stmt>plaso.analyzers yara_analyzer<line_sep> |
<import_from_stmt>..compat mock<def_stmt>test_loop_aborts_without_checks app settings<block_start><assert_stmt>app.run()<eq>1<block_end><def_stmt>test_main_executes_all_checks_before_loop app settings<block_start><with_stmt>mock.patch.object(app "check_forever" side_effect=app.on_interrupt)<as>the_loop<block_start>settings.checks.append({'name':'A' 'script':{'python':'ok, content = True, "ok"'}})<assert_stmt>app.run()<eq>1<block_end><assert_stmt>the_loop.call_count<eq>1<assert_stmt>the_loop.call_args[0][0][0].check.call_count<eq>1<block_end><def_stmt>test_main_filters_names app settings<block_start><with_stmt>mock.patch.object(app "check_forever" side_effect=app.on_interrupt)<as>the_loop<block_start>settings.checks.extend([{'name':'A' 'url':'A'} {'name':'B' 'url':'B'} ])<assert_stmt>app.run(names=['B'])<eq>1<block_end><assert_stmt>the_loop.call_count<eq>1<assert_stmt>the_loop.call_args[0][0][0].check.call_count<eq>1<block_end> |
"""
2-layer controller.
"""<import_from_stmt>aw_nas utils assert_rollout_type<import_from_stmt>aw_nas.utils DistributedDataParallel<import_from_stmt>aw_nas.controller.base BaseController<import_from_stmt>aw_nas.btcs.layer2.search_space Layer2Rollout Layer2DiffRollout DenseMicroRollout DenseMicroDiffRollout StagewiseMacroRollout StagewiseMacroDiffRollout SinkConnectMacroDiffRollout <import_from_stmt>collections OrderedDict<import_stmt>numpy<as>np<import_stmt>os<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>torch.optim<as>optim<try_stmt># from torch.nn.SyncBatchNorm import convert_sync_batch_norm as convert_sync_bn
<block_start><import_from_stmt>torch.nn SyncBatchNorm<line_sep>convert_sync_bn=SyncBatchNorm.convert_sync_batchnorm<block_end><except_stmt>ImportError<block_start>convert_sync_bn=<lambda>m:m<block_end><class_stmt>Layer2Optimizer(optim.Optimizer)<block_start><def_stmt>__init__ self params **opt_cfg<block_start>super(Layer2Optimizer self).__init__([torch.tensor([])] defaults={})<line_sep>macro_opt_type=opt_cfg["macro"].pop("type")<line_sep>micro_opt_type=opt_cfg["micro"].pop("type")<line_sep># currently width alphas & macro-alpha share the same optimizer
self.macro_optimizer=getattr(optim macro_opt_type)(nn.ParameterList(params[0:2]) **opt_cfg["macro"])<line_sep># after adding width-alphas, as 2nd
self.micro_optimizer=getattr(optim micro_opt_type)(nn.ParameterList(params[2:]) **opt_cfg["micro"])<block_end><def_stmt>step self<block_start>self.macro_optimizer.step()<line_sep>self.micro_optimizer.step()<block_end><block_end>torch.optim.layer2=Layer2Optimizer# add patch the torch optim
<class_stmt>Layer2DiffController(BaseController nn.Module)<block_start>NAME="layer2-differentiable"<def_stmt>__init__ self search_space rollout_type mode="eval" device="cuda" macro_controller_type="random_sample" macro_controller_cfg={} micro_controller_type="random_sample" micro_controller_cfg={} inspect_hessian_every=-1 save_alphas_every=-1 multiprocess=<false> schedule_cfg=<none> <block_start>super(Layer2DiffController self).__init__(search_space rollout_type schedule_cfg=schedule_cfg)<line_sep>nn.Module.__init__(self)<line_sep>self.search_space=search_space<line_sep>self.rollout_type=rollout_type<line_sep>self.device=device<line_sep>self.to(self.device)<line_sep>self.inspect_hessian_every=inspect_hessian_every<line_sep>self.inspect_hessian=<false><line_sep>self.save_alphas_every=save_alphas_every<line_sep>self.save_alphas=<false><line_sep>self.saved_dict={"macro":[] "micro":[] "width":[] }<line_sep>self.multiprocess=multiprocess<line_sep># the macro/micro controllers
<if_stmt>macro_controller_type<eq>"macro-stagewise-diff"<block_start>self.macro_controller=MacroStagewiseDiffController(self.search_space.macro_search_space macro_controller_type device=self.device multiprocess=self.multiprocess **macro_controller_cfg )<block_end><elif_stmt>macro_controller_type<eq>"macro-sink-connect-diff"<block_start>self.macro_controller=MacroSinkConnectDiffController(self.search_space.macro_search_space macro_controller_type device=self.device multiprocess=self.multiprocess **macro_controller_cfg )<block_end><else_stmt><block_start><raise>NotImplementedError()<block_end><if_stmt>micro_controller_type<eq>"micro-dense-diff"<block_start>self.micro_controller=MicroDenseDiffController(self.search_space.micro_search_space micro_controller_type device=self.device multiprocess=self.multiprocess **micro_controller_cfg )<block_end><else_stmt><block_start><raise>NotImplementedError()<block_end>object.__setattr__(self "parallel_model" self)<line_sep>self._parallelize()<block_end><def_stmt>_parallelize self<block_start><if_stmt>self.multiprocess<block_start>net=convert_sync_bn(self).to(self.device)<line_sep>object.__setattr__(self "parallel_model" DistributedDataParallel(self (self.device ) find_unused_parameters=<true>) )<block_end><block_end><def_stmt>on_epoch_start self epoch<block_start>super(Layer2DiffController self).on_epoch_start(epoch)<if_stmt>self.inspect_hessian_every<ge>0<and>epoch%self.inspect_hessian_every<eq>0<block_start>self.inspect_hessian=<true><block_end><if_stmt>self.save_alphas_every<ge>0<and>epoch%self.save_alphas_every<eq>0<block_start>self.save_alphas=<true><block_end># save alphas every epoch
<if_stmt>self.save_alphas<block_start>self.saved_dict["macro"].append([alpha.data.cpu()<for>alpha self.macro_controller.cg_alphas])<line_sep>self.saved_dict["micro"].append([alpha.data.cpu()<for>alpha self.micro_controller.cg_alphas])<line_sep>self.saved_dict["width"].append([width_alpha.cpu()<for>width_alpha self.macro_controller.width_alphas])<block_end>self.macro_controller.on_epoch_start(epoch)<line_sep>self.micro_controller.on_epoch_start(epoch)<block_end><def_stmt>set_device self device<block_start>self.device=device<line_sep>self.to(device)<block_end><def_stmt>set_mode self mode<block_start>super(Layer2DiffController self).set_mode(mode)<if_stmt>mode<eq>"train"<block_start>nn.Module.train(self)<block_end><elif_stmt>mode<eq>"eval"<block_start>nn.Module.eval(self)<block_end><else_stmt><block_start><raise>Exception("Unrecognized mode: {}".format(mode))<block_end><block_end><def_stmt>parameters self recurse=<false># FIXME: normal nn.module.parameters() use recurse=True to acquire all params
<block_start>param_list=nn.ParameterList([])<line_sep>param_list.extend(self.macro_controller.parameters())<line_sep>param_list.extend(self.micro_controller.parameters())<line_sep><return>param_list<block_end><def_stmt>_entropy_loss self<block_start><return>(self.macro_controller._entropy_loss()+self.micro_controller._entropy_loss())<block_end><def_stmt>sample self n=1 batch_size=1<block_start><if_stmt>self.multiprocess<block_start><return>self.parallel_model.forward(n=n batch_size=batch_size)<block_end><else_stmt><block_start><return>self.forward(n=n batch_size=batch_size)<block_end><block_end><def_stmt>forward self n=1 batch_size=1<block_start>rollouts=[]<line_sep>macro_rollouts=self.macro_controller.forward(n=n batch_size=batch_size)<line_sep>micro_rollouts=self.micro_controller.forward(n=n batch_size=batch_size)<for_stmt>i range(n)<block_start>rollouts.append(Layer2DiffRollout(macro_rollouts[i] micro_rollouts[i] self.search_space))<block_end><return>rollouts<block_end><def_stmt>gradient self loss return_grads=<true> zero_grads=<true><block_start><if_stmt>zero_grads<block_start>self.zero_grad()<block_end><if_stmt>self.inspect_hessian<block_start><for_stmt>name,param self.named_parameters()<block_start>max_eig=utils.torch_utils.max_eig_of_hessian(loss param)<line_sep>self.logger.info("Max eigenvalue of Hessian of %s: %f" name max_eig)<block_end><block_end>_loss=loss+self._entropy_loss()<line_sep>_loss.backward()<if_stmt>return_grads<block_start><return>utils.get_numpy(_loss) [(k v.grad.clone())<for>k,v self.named_parameters()]<block_end><return>utils.get_numpy(_loss)<block_end><def_stmt>step_current_gradient self optimizer<block_start>self.macro_controller.step_current_gradient(optimizer.macro_optimizer)<line_sep>self.micro_controller.step_current_gradient(optimizer.micro_optimizer)<block_end><def_stmt>step_gradient self gradients optimizer<block_start>self.macro_controller.step_gradient(gradients[0] optimizer.macro_optimizer)<line_sep>self.micro_controller.step_gradient(gradients[1] optimizer.micro_optimizer)<block_end><def_stmt>step self rollouts optimizer perf_name<block_start>macro_rollouts=[r.macro<for>r rollouts]<line_sep>micro_rollouts=[r.micro<for>r rollouts]<line_sep>macro_loss=self.macro_controller.step(macro_rollouts optimizer.macro_optimizer perf_name)<line_sep>micro_loss=self.micro_controller.step(micro_rollouts optimizer.micro_optimizer perf_name)<line_sep><return>macro_loss micro_loss<block_end><def_stmt>summary self rollouts log=<false> log_prefix="" step=<none><block_start>macro_rollouts=[r.macro<for>r rollouts]<line_sep>micro_rollouts=[r.micro<for>r rollouts]<line_sep>self.macro_controller.summary(macro_rollouts log=log log_prefix=log_prefix step=<none>)<line_sep>self.micro_controller.summary(micro_rollouts log=log log_prefix=log_prefix step=<none>)<block_end><def_stmt>save self path<block_start>"""Save the parameters to disk."""<line_sep>torch.save({"epoch":self.epoch "state_dict":self.state_dict()} path)<line_sep>self.logger.info("Saved controller network to %s" path)<line_sep>"""save alphas"""<if_stmt>self.save_alphas_every<is><not><none># os.path.dirname means the parent path of the `PATH`
<block_start>torch.save(self.saved_dict os.path.join(os.path.dirname(os.path.dirname(path)) "alphas.pth") )<block_end><block_end><def_stmt>load self path<block_start>"""Load the parameters from disk."""<line_sep>checkpoint=torch.load(path map_location=torch.device("cpu"))<line_sep>self.load_state_dict(checkpoint["state_dict"])<line_sep>self.on_epoch_start(checkpoint["epoch"])<line_sep>self.logger.info("Loaded controller network from %s" path)<block_end># since the layer2controller.parameters() is a list of [macro_parameters(), micro_parameters()], we need to override the zero_grad() since it used model.parameters()
<def_stmt>zero_grad self<block_start><for_stmt>param self.parameters()<block_start><for_stmt>p param<block_start><if_stmt>p.grad<is><not><none><block_start>p.grad.detach_()<line_sep>p.grad.zero_()<block_end><block_end><block_end><block_end>@classmethod<def_stmt>supported_rollout_types cls<block_start><return>["layer2" "layer2-differentiable"]<block_end><block_end><class_stmt>GetArchMacro(torch.autograd.Function)<block_start>@staticmethod<def_stmt>forward ctx search_space op_weights device i_stage <block_start>stage_conn=torch.zeros((search_space.stage_node_nums[i_stage] search_space.stage_node_nums[i_stage] )).to(device)<line_sep>stage_conn[search_space.idxes[i_stage]]=op_weights<line_sep>ctx.save_for_backward(torch.as_tensor(op_weights) torch.as_tensor(search_space.idxes[i_stage]))<line_sep><return>stage_conn<block_end>@staticmethod<def_stmt>backward ctx grad_output<block_start>op_weights,idxes=ctx.saved_tensors<line_sep>op_weights_grad=grad_output[idxes[0] idxes[1]]<line_sep><return><none> op_weights_grad <none> <none> <none><block_end><block_end><class_stmt>MacroStagewiseDiffController(BaseController nn.Module)<block_start>NAME="macro-stagewise-diff"<line_sep>SCHEDULABLE_ATTRS=["gumbel_temperature" "entropy_coeff" "force_uniform" "width_gumbel_temperature" "width_entropy_coeff" ]<def_stmt>__init__ self search_space rollout_type mode="eval" device="cuda" use_prob=<false> gumbel_hard=<false> gumbel_temperature=1.0 use_sigmoid=<false> use_edge_normalization=<false> entropy_coeff=0.01 max_grad_norm=<none> force_uniform=<false> full_init=<false> # use all-one initialization and big flops reg
progressive_pruning_th=<none> multiprocess=<false> per_stage_width=<true> # default use per stage width
width_entropy_coeff=0.01 width_gumbel_temperature=1.0 schedule_cfg=<none> <block_start>super(MacroStagewiseDiffController self).__init__(search_space rollout_type schedule_cfg=schedule_cfg)<line_sep>nn.Module.__init__(self)<line_sep>self.device=device<line_sep># sampling
self.use_prob=use_prob<line_sep>self.gumbel_hard=gumbel_hard<line_sep>self.gumbel_temperature=gumbel_temperature<line_sep>self.use_sigmoid=use_sigmoid<line_sep># use_prob / use_sigmoid should not the True at the same time
# if both false use plain gumbel softmax
<assert_stmt><not>(use_prob<and>use_sigmoid)<line_sep># edge normalization
self.use_edge_normalization=use_edge_normalization<line_sep># training
self.entropy_coeff=entropy_coeff<line_sep>self.max_grad_norm=max_grad_norm<line_sep>self.force_uniform=force_uniform<line_sep>self.progressive_pruning_th=progressive_pruning_th<line_sep>self.width_choice=self.search_space.width_choice<line_sep>self.multiprocess=multiprocess<line_sep>self.per_stage_width=per_stage_width<line_sep>self.width_gumbel_temperature=width_gumbel_temperature<line_sep>self.width_entropy_coeff=width_entropy_coeff<line_sep># generate parameters
self.full_init=full_init<if_stmt><not>self.full_init<block_start>init_value=1.0e-3<block_end><else_stmt><block_start>init_value=1.0<block_end>self.cg_alphas=nn.ParameterList([nn.Parameter(init_value<times>torch.randn(sum(self.search_space.num_possible_edges)))])<line_sep># width choices [#cells , #width_choice]
<if_stmt>self.width_choice<is><not><none><block_start><if_stmt><not>self.per_stage_width<block_start>self.width_alphas=nn.ParameterList([nn.Parameter(init_value<times>torch.randn(len(self.search_space.cell_layout) len(self.width_choice) ))])<block_end><else_stmt><block_start>self.width_alphas=nn.ParameterList([nn.Parameter(init_value<times>torch.randn(len(self.search_space.stage_node_nums) len(self.width_choice) ))])<block_end><block_end>self.stage_num_alphas=(self.search_space.num_possible_edges)<line_sep># used for competible with sink-connecting ss
<if_stmt>self.use_edge_normalization<block_start><raise>NotImplementedError("MacroDiffController does not support edge-norm")<block_end><else_stmt><block_start>self.cg_betas=<none><block_end>self.get_arch=GetArchMacro()<line_sep>self.to(self.device)<block_end><def_stmt>set_mode self mode<block_start>super(MacroStagewiseDiffController self).set_mode(mode)<if_stmt>mode<eq>"train"<block_start>nn.Module.train(self)<block_end><elif_stmt>mode<eq>"eval"<block_start>nn.Module.eval(self)<block_end><else_stmt><block_start><raise>Exception("Unrecognized mode: {}".format(mode))<block_end><block_end><def_stmt>set_device self device<block_start>self.device=device<line_sep>self.to(device)<block_end><def_stmt>progressive_pruning self<block_start><for_stmt>alpha self.cg_alphas# inpalce replace alphas that smaller than the pruning threshold, no grad
<block_start>alpha.data=alpha<times>(alpha.gt(self.progressive_pruning_th).float())<block_end><block_end><def_stmt>forward self n=1 batch_size=1<block_start><return>self.sample(n=n batch_size=batch_size)<block_end><def_stmt>sample self n=1 batch_size=1<block_start><if_stmt>self.progressive_pruning_th<is><not><none><block_start>self.progressive_pruning()<block_end>width_arch,width_logits=self.sample_width(n=n batch_size=batch_size)<line_sep>rollouts=[]<for_stmt>i_sample range(n)# op_weights.shape: [num_edges, [batch_size,] num_ops]
# edge_norms.shape: [num_edges] do not have batch_size.
<block_start>op_weights_list=[]<line_sep>edge_norms_list=[]<line_sep>sampled_list=[]<line_sep>logits_list=[]<for_stmt>alphas self.cg_alphas<block_start><if_stmt>(self.progressive_pruning_th<is><not><none><and>self.progressive_pruning_th<g>0)<block_start>alphas=alphas.clamp(self.progressive_pruning_th 1.0e4)<block_end><else_stmt><block_start><pass><block_end><if_stmt>self.force_uniform# cg_alpha parameters will not be in the graph
# NOTE: `force_uniform` config does not affects edge_norms (betas),
# if one wants a force_uniform search, keep `use_edge_normalization=False`
<block_start>alphas=torch.zeros_like(alphas)<block_end><if_stmt>batch_size<g>1<block_start>expanded_alpha=(alphas.reshape([alphas.shape[0] 1 alphas.shape[1]]).repeat([1 batch_size 1]).reshape([-1 alphas.shape[-1]]))<block_end><else_stmt><block_start>expanded_alpha=alphas<block_end><if_stmt>self.use_prob<block_start>sampled=F.softmax(expanded_alpha/self.gumbel_temperature dim=-1)<block_end><elif_stmt>self.use_sigmoid<block_start>sampled=utils.relaxed_bernoulli_sample(expanded_alpha self.gumbel_temperature)<block_end><else_stmt># gumbel sampling
<block_start>sampled,_=utils.gumbel_softmax(expanded_alpha self.gumbel_temperature hard=<false>)<block_end><if_stmt>self.gumbel_hard<block_start>op_weights=utils.straight_through(sampled)<block_end><else_stmt><block_start>op_weights=sampled<block_end><if_stmt>batch_size<g>1<block_start>sampled=sampled.reshape([-1 batch_size op_weights.shape[-1]])<line_sep>op_weights=op_weights.reshape([-1 batch_size op_weights.shape[-1]])<block_end>op_weights_list.append(op_weights)<line_sep>sampled_list.append(utils.get_numpy(sampled))<line_sep># logits_list.append(utils.get_numpy(alphas))
logits_list.append(alphas)<line_sep>stage_conns=[]<line_sep>split_op_weights=torch.split(op_weights self.stage_num_alphas)<for_stmt>i_stage range(self.search_space.stage_num)<block_start>stage_conn=self.get_arch.apply(self.search_space split_op_weights[i_stage] self.device i_stage )<line_sep>stage_conns.append(stage_conn)<block_end><block_end>rollouts.append(StagewiseMacroDiffRollout(arch=stage_conns sampled=sampled_list logits=logits_list width_arch=width_arch[i_sample] width_logits=width_logits[i_sample] search_space=self.search_space ))<block_end><return>rollouts<block_end><def_stmt>sample_width self n=1 batch_size=1<block_start><assert_stmt>batch_size<eq>1 "sample_width should not have batch size > 1"<line_sep>width_sampled_list=[]<line_sep>width_logits_list=[]<line_sep>width_op_weights_list=[]<for_stmt>_ range(n)# sample the width alphas
<block_start><for_stmt>width_alphas self.width_alphas<block_start><if_stmt>self.force_uniform# cg_alpha parameters will not be in the graph
# NOTE: `force_uniform` config does not affects edge_norms (betas),
# if one wants a force_uniform search, keep `use_edge_normalization=False`
<block_start>width_alphas=torch.zeros_like(width_alphas)<block_end><if_stmt>batch_size<g>1<block_start>expanded_width_alpha=(width_alphas.reshape([width_alphas.shape[0] 1 width_alphas.shape[1]]).repeat([1 batch_size 1]).reshape([-1 width_alphas.shape[-1]]))<block_end><else_stmt><block_start>expanded_width_alpha=width_alphas<block_end><if_stmt>self.use_prob<block_start>width_sampled=F.softmax(expanded_width_alpha/self.width_gumbel_temperature dim=-1)<block_end><elif_stmt>self.use_sigmoid<block_start>width_sampled=utils.relaxed_bernoulli_sample(expanded_width_alpha self.width_gumbel_temperature)<block_end><else_stmt># gumbel sampling
<block_start>width_sampled,_=utils.gumbel_softmax(expanded_width_alpha self.width_gumbel_temperature hard=<false>)<block_end><if_stmt>self.gumbel_hard<block_start>width_op_weights=utils.straight_through(width_sampled)<block_end><else_stmt><block_start>width_op_weights=width_sampled<block_end><if_stmt>batch_size<g>1<block_start>width_sampled=width_sampled.reshape([-1 batch_size width_op_weights.shape[-1]])<line_sep>width_op_weights=width_op_weights.reshape([-1 batch_size width_op_weights.shape[-1]])<block_end><if_stmt><not>self.per_stage_width<block_start>width_op_weights_full=width_op_weights<line_sep>width_sampled_full=width_sampled<line_sep>width_alphas_full=width_alphas<block_end><else_stmt># the last stage has one more node
<block_start>node_list=self.search_space.stage_node_nums.copy()<line_sep># let the 1st stage num_node -1
# to let all reduction cell uses the width-alphas of next stage
node_list[0]=node_list[0]-1<line_sep>width_op_weights_full=torch.cat([width_op_weights[idx_stage].repeat(num_nodes-1 1)<for>idx_stage,num_nodes enumerate(node_list)])<line_sep>width_sampled_full=torch.cat([width_sampled[idx_stage].repeat(num_nodes-1 1)<for>idx_stage,num_nodes enumerate(node_list)])<line_sep>width_alphas_full=torch.cat([width_alphas[idx_stage].repeat(num_nodes-1 1)<for>idx_stage,num_nodes enumerate(node_list)])<block_end><block_end>width_op_weights_list.append(width_op_weights_full)<line_sep>width_sampled_list.append(utils.get_numpy(width_sampled_full))<line_sep># logits_list.append(utils.get_numpy(alphas))
width_logits_list.append(width_alphas_full)<block_end><return>width_op_weights_list width_logits_list<block_end><def_stmt>save self path<block_start>"""Save the parameters to disk."""<line_sep>torch.save({"epoch":self.epoch "state_dict":self.state_dict()} path)<line_sep>self.logger.info("Saved controller network to %s" path)<block_end><def_stmt>load self path<block_start>"""Load the parameters from disk."""<line_sep>checkpoint=torch.load(path map_location=torch.device("cpu"))<line_sep>self.load_state_dict(checkpoint["state_dict"])<line_sep>self.on_epoch_start(checkpoint["epoch"])<line_sep>self.logger.info("Loaded controller network from %s" path)<block_end><def_stmt>_entropy_loss self<block_start>ent_loss=0.0<if_stmt>self.entropy_coeff<g>0<block_start>alphas=self.cg_alphas[0].split([i-1<for>i self.search_space.stage_node_nums])<line_sep>probs=[F.softmax(alpha dim=-1)<for>alpha self.cg_alphas]<line_sep>ent_loss=(self.entropy_coeff<times>sum(-(torch.log(prob)<times>prob).sum()<for>prob probs)+ent_loss)<block_end><if_stmt>self.width_entropy_coeff<g>0<block_start>width_alphas=self.width_alphas<line_sep>probs=[F.softmax(alpha dim=-1)<for>alpha self.width_alphas]<line_sep>ent_loss=(self.width_entropy_coeff<times>sum(-(torch.log(prob)<times>prob).sum()<for>prob probs)+ent_loss)<block_end><return>ent_loss<block_end><def_stmt>gradient self loss return_grads=<true> zero_grads=<true><block_start><raise>NotImplementedError("the grad function is implemented in the layer2diffcontroller.gradient()")<block_end><def_stmt>step_current_gradient self optimizer<block_start><if_stmt>self.max_grad_norm<is><not><none><block_start>torch.nn.utils.clip_grad_norm_(self.parameters() self.max_grad_norm)<block_end>optimizer.step()<block_end><def_stmt>step_gradient self gradients optimizer<block_start>self.zero_grad()<line_sep>named_params=dict(self.named_parameters())<for_stmt>k,grad gradients<block_start>named_params[k].grad=grad<block_end># clip the gradients
<if_stmt>self.max_grad_norm<is><not><none><block_start>torch.nn.utils.clip_grad_norm_(self.parameters() self.max_grad_norm)<block_end># apply the gradients
optimizer.step()<block_end><def_stmt>step self rollouts optimizer perf_name# very memory inefficient
<block_start>self.zero_grad()<line_sep>losses=[r.get_perf(perf_name)<for>r rollouts]<line_sep>optimizer.step()<line_sep>[l.backward()<for>l losses]<line_sep><return>np.mean([l.detach().cpu().numpy()<for>l losses])<block_end><def_stmt>__getstate__ self<block_start>state=super(MacroStagewiseDiffController self).__getstate__().copy()<del_stmt>state["get_arch"]<line_sep><return>state<block_end><def_stmt>__setstate__ self state<block_start>super(MacroStagewiseDiffController self).__setstate__(state)<line_sep>self.get_arch=GetArchMacro()<block_end><def_stmt>summary self rollouts log=<false> log_prefix="" step=<none><block_start>num=len(rollouts)<line_sep>logits_list=[[utils.get_numpy(logits)<for>logits r.logits]<for>r rollouts]<line_sep>_ss=self.search_space<if_stmt>self.gumbel_hard<block_start>cg_logprobs=[0.0<for>_ range(_ss.num_cell_groups)]<block_end>cg_entros=[0.0<for>_ range(_ss.num_cell_groups)]<for_stmt>rollout,logits zip(rollouts logits_list)<block_start><for_stmt>cg_idx,(vec cg_logits) enumerate(zip(rollout.arch logits))<block_start>prob=utils.softmax(cg_logits)<line_sep>logprob=np.log(prob)<if_stmt>self.gumbel_hard<block_start>inds=np.argmax(utils.get_numpy(vec.op_weights) axis=-1)<line_sep>cg_logprobs[cg_idx]<augadd>np.sum(logprob[range(len(inds)) inds])<block_end>cg_entros[cg_idx]<augadd>-(prob<times>logprob).sum()<block_end><block_end># mean across rollouts
<if_stmt>self.gumbel_hard<block_start>cg_logprobs=[s/num<for>s cg_logprobs]<line_sep>total_logprob=sum(cg_logprobs)<line_sep>cg_logprobs_str=",".join(["{:.2f}".format(n)<for>n cg_logprobs])<block_end>cg_entros=[s/num<for>s cg_entros]<line_sep>total_entro=sum(cg_entros)<line_sep>cg_entro_str=",".join(["{:.2f}".format(n)<for>n cg_entros])<if_stmt>log# maybe log the summary
<block_start>self.logger.info("%s%d rollouts: %s ENTROPY: %2f (%s)" log_prefix num "-LOG_PROB: %.2f (%s) ;"%(-total_logprob cg_logprobs_str)<if>self.gumbel_hard<else>"" total_entro cg_entro_str )<if_stmt>step<is><not><none><and><not>self.writer.is_none()<block_start><if_stmt>self.gumbel_hard<block_start>self.writer.add_scalar("log_prob" total_logprob step)<block_end>self.writer.add_scalar("entropy" total_entro step)<block_end><block_end>stats=[(n+" ENTRO" entro)<for>n,entro zip(_ss.cell_group_names cg_entros)]<if_stmt>self.gumbel_hard<block_start>stats<augadd>[(n+" LOGPROB" logprob)<for>n,logprob zip(_ss.cell_group_names cg_logprobs)]<block_end><return>OrderedDict(stats)<block_end>@classmethod<def_stmt>supported_rollout_types cls<block_start><return>["macro-stagewise" "macro-stagewise-diff" "macro-sink-connect-diff"]<block_end><block_end><class_stmt>GetArchMacroSinkConnect(torch.autograd.Function)<block_start>@staticmethod<def_stmt>forward ctx search_space op_weights device i_stage <block_start>stage_conn=torch.zeros((search_space.stage_node_nums[i_stage] search_space.stage_node_nums[i_stage] )).to(device)<line_sep>stage_conn[np.arange(len(op_weights))+1 np.arange(len(op_weights))]=1<line_sep>stage_conn[-1 :len(op_weights)]=op_weights<line_sep>ctx.save_for_backward(torch.as_tensor(op_weights) torch.as_tensor(search_space.idxes[i_stage]))<line_sep><return>stage_conn<block_end>@staticmethod<def_stmt>backward ctx grad_output<block_start>op_weights,idxes=ctx.saved_tensors<line_sep>op_weights_grad=grad_output[-1 :len(op_weights)]<line_sep><return><none> op_weights_grad <none> <none> <none><block_end><block_end><class_stmt>MacroSinkConnectDiffController(MacroStagewiseDiffController)<block_start>NAME="macro-sink-connect-diff"<line_sep># The TF_NAS-like macro search space(sink-based connecting)
# during each stage, before the reduction node, a `sinking point` aggregate the output of each node's output with softmax
# noted that cg-alpha here should denote whether connected or not
<def_stmt>__init__ self *args **kwargs<block_start>super(MacroSinkConnectDiffController self).__init__(*args **kwargs)<if_stmt><not>self.full_init<block_start>self.cg_alphas=nn.ParameterList([nn.Parameter(1e-3<times>torch.randn(sum([n-1<for>n self.search_space.stage_node_nums])))])<block_end><else_stmt><block_start>self.cg_alphas=nn.ParameterList([nn.Parameter(torch.ones(sum([n-1<for>n self.search_space.stage_node_nums])))])<block_end><assert_stmt>(self.use_sigmoid<eq><false>)<line_sep># sink-connecting should introduce competition in edges
self.get_arch=GetArchMacroSinkConnect()<line_sep>self.stage_num_alphas=[n-1<for>n self.search_space.stage_node_nums]<line_sep>self.to(self.device)<block_end># move the newly generated cg_alphas to cuda
# The only difference with MacroStageWiseDiffController's sample is that the arch is packed into `sink-connect-diff-rollout`
<def_stmt>sample self n=1 batch_size=1# if use progressive pruning
<block_start><if_stmt>self.progressive_pruning_th<is><not><none><block_start>self.progressive_pruning()<block_end>width_arch,width_logits=self.sample_width(n=n batch_size=batch_size)<line_sep>rollouts=[]<for_stmt>i_sample range(n)# op_weights.shape: [num_edges, [batch_size,] num_ops]
# edge_norms.shape: [num_edges] do not have batch_size.
<block_start>op_weights_list=[]<line_sep>edge_norms_list=[]<line_sep>sampled_list=[]<line_sep>logits_list=[]<for_stmt>alphas self.cg_alphas<block_start>splits=[i-1<for>i self.search_space.stage_node_nums]<line_sep>op_weights=[]<line_sep>sampleds=[]<for_stmt>alpha alphas.split(splits)<block_start><if_stmt>(self.force_uniform)# cg_alpha parameters will not be in the graph
# NOTE: `force_uniform` config does not affects edge_norms (betas),
# if one wants a force_uniform search, keep `use_edge_normalization=False`
<block_start>alpha=torch.zeros_like(alpha)<block_end><if_stmt>batch_size<g>1<block_start>expanded_alpha=(alpha.reshape([alpha.shape[0] 1 alpha.shape[1]]).repeat([1 batch_size 1]).reshape([-1 alpha.shape[-1]]))<block_end><else_stmt><block_start>expanded_alpha=alpha<block_end><if_stmt>self.use_prob<block_start>sampled=F.softmax(expanded_alpha/self.gumbel_temperature dim=-1)<block_end><elif_stmt>self.use_sigmoid<block_start>sampled=utils.relaxed_bernoulli_sample(expanded_alpha self.gumbel_temperature)<block_end><else_stmt># gumbel sampling
<block_start>sampled,_=utils.gumbel_softmax(expanded_alpha self.gumbel_temperature hard=<false>)<block_end><if_stmt>self.gumbel_hard<block_start>op_weight=utils.straight_through(sampled)<block_end><else_stmt><block_start>op_weight=sampled<block_end><if_stmt>batch_size<g>1<block_start>sampled=sampled.reshape([-1 batch_size op_weight.shape[-1]])<line_sep>op_weight=op_weight.reshape([-1 batch_size op_weight.shape[-1]])<block_end>op_weights.append(op_weight)<line_sep>sampleds.append(sampled)<block_end>op_weights=torch.cat(op_weights)<line_sep>sampleds=torch.cat(sampleds)<line_sep>op_weights_list.append(op_weights)<line_sep>sampled_list.append(utils.get_numpy(sampleds))<line_sep>logits_list.append(alphas)<line_sep>stage_conns=[]<line_sep>split_op_weights=torch.split(op_weights self.stage_num_alphas)<for_stmt>i_stage range(self.search_space.stage_num)<block_start>stage_conn=self.get_arch.apply(self.search_space split_op_weights[i_stage] self.device i_stage )<line_sep>stage_conns.append(stage_conn)<block_end><block_end>rollouts.append(SinkConnectMacroDiffRollout(arch=stage_conns sampled=sampled_list logits=logits_list width_arch=width_arch[i_sample] width_logits=width_logits[i_sample] search_space=self.search_space ))<block_end><return>rollouts<block_end><def_stmt>__setstate__ self state<block_start>super(MacroSinkConnectDiffController self).__setstate__(state)<line_sep>self.get_arch=GetArchMacroSinkConnect()<block_end><block_end><class_stmt>GetArchMicro(torch.autograd.Function)<block_start>@staticmethod<def_stmt>forward ctx search_space op_weights device<block_start>empty_arch=torch.zeros((search_space._num_nodes search_space._num_nodes search_space.num_op_choices )).to(device)<line_sep>empty_arch[search_space.idx]=op_weights<line_sep>ctx.save_for_backward(torch.as_tensor(op_weights) torch.as_tensor(search_space.idx))<line_sep><return>empty_arch<block_end>@staticmethod<def_stmt>backward ctx grad_output<block_start>op_weights,idxes=ctx.saved_tensors<line_sep>op_weights_grad=grad_output[idxes[0] idxes[1]]<line_sep><return><none> op_weights_grad <none><block_end><block_end><class_stmt>MicroDenseDiffController(BaseController nn.Module)<block_start>NAME="micro-dense-diff"<line_sep>SCHEDULABLE_ATTRS=["gumbel_temperature" "entropy_coeff" "force_uniform"]<def_stmt>__init__ self search_space rollout_type mode="eval" device="cuda" use_prob=<false> gumbel_hard=<false> gumbel_temperature=1.0 use_sigmoid=<true> use_edge_normalization=<false> entropy_coeff=0.01 max_grad_norm=<none> force_uniform=<false> full_init=<false> progressive_pruning_th=<none> multiprocess=<false> schedule_cfg=<none> <block_start>super(MicroDenseDiffController self).__init__(search_space rollout_type schedule_cfg=schedule_cfg)<line_sep>nn.Module.__init__(self)<line_sep>self.device=device<line_sep># sampling
self.use_prob=use_prob<line_sep>self.use_sigmoid=use_sigmoid<line_sep>self.gumbel_hard=gumbel_hard<line_sep>self.gumbel_temperature=gumbel_temperature<assert_stmt><not>(use_prob<and>use_sigmoid)<line_sep># edge normalization
self.use_edge_normalization=use_edge_normalization<line_sep># training
self.entropy_coeff=entropy_coeff<line_sep>self.max_grad_norm=max_grad_norm<line_sep>self.force_uniform=force_uniform<line_sep>self.full_init=full_init<line_sep>self.progressive_pruning_th=progressive_pruning_th<line_sep>self.multiprocess=multiprocess<line_sep>_num_init_nodes=self.search_space.num_init_nodes<line_sep>_num_edges_list=[sum(_num_init_nodes+i<for>i range(self.search_space.get_num_steps(i_cg)))<for>i_cg range(self.search_space.num_cell_groups)]<if_stmt><not>self.full_init<block_start>self.cg_alphas=nn.ParameterList([nn.Parameter(1e-3<times>torch.randn(_num_edges len(self.search_space.cell_shared_primitives[i_cg]) ))# shape: [num_edges, num_ops]
<for>i_cg,_num_edges enumerate(_num_edges_list)])<block_end><else_stmt><block_start>self.cg_alphas=nn.ParameterList([nn.Parameter(1<times>torch.ones(_num_edges len(self.search_space.cell_shared_primitives[i_cg]) ))# shape: [num_edges, num_ops]
<for>i_cg,_num_edges enumerate(_num_edges_list)])<block_end><if_stmt>self.use_edge_normalization<block_start><raise>NotImplementedError("MicroDenseController does not support edge-norm")<block_end><else_stmt><block_start>self.cg_betas=<none><block_end>self.get_arch=GetArchMicro()<line_sep>self.to(self.device)<block_end><def_stmt>set_mode self mode<block_start>super(MicroDenseDiffController self).set_mode(mode)<if_stmt>mode<eq>"train"<block_start>nn.Module.train(self)<block_end><elif_stmt>mode<eq>"eval"<block_start>nn.Module.eval(self)<block_end><else_stmt><block_start><raise>Exception("Unrecognized mode: {}".format(mode))<block_end><block_end><def_stmt>set_device self device<block_start>self.device=device<line_sep>self.to(device)<block_end><def_stmt>progressive_pruning self<block_start><for_stmt>alpha self.cg_alphas# inpalce replace alphas that smaller than the pruning threshold, no grad
<block_start>alpha.data=alpha<times>(alpha.gt(self.progressive_pruning_th).float())<block_end><block_end><def_stmt>forward self n=1 batch_size=1# pylint: disable=arguments-differ
<block_start><return>self.sample(n=n batch_size=batch_size)<block_end><def_stmt>sample self n=1 batch_size=1<block_start><if_stmt>self.progressive_pruning_th<is><not><none><block_start>self.progressive_pruning()<block_end>rollouts=[]<for_stmt>_ range(n)# op_weights.shape: [num_edges, [batch_size,] num_ops]
# edge_norms.shape: [num_edges] do not have batch_size.
<block_start>op_weights_list=[]<line_sep>edge_norms_list=[]<line_sep>sampled_list=[]<line_sep>logits_list=[]<for_stmt>alphas self.cg_alphas<block_start><if_stmt>self.force_uniform# cg_alpha parameters will not be in the graph
# NOTE: `force_uniform` config does not affects edge_norms (betas),
# if one wants a force_uniform search, keep `use_edge_normalization=False`
<block_start>alphas=torch.zeros_like(alphas)<block_end><if_stmt>batch_size<g>1<block_start>expanded_alpha=(alphas.reshape([alphas.shape[0] 1 alphas.shape[1]]).repeat([1 batch_size 1]).reshape([-1 alphas.shape[-1]]))<block_end><else_stmt><block_start>expanded_alpha=alphas<block_end><if_stmt>self.use_prob# probability as sample
<block_start>sampled=F.softmax(expanded_alpha/self.gumbel_temperature dim=-1)<block_end><elif_stmt>self.use_sigmoid<block_start>sampled=utils.relaxed_bernoulli_sample(expanded_alpha self.gumbel_temperature)<block_end><else_stmt># gumbel sampling
<block_start>sampled,_=utils.gumbel_softmax(expanded_alpha self.gumbel_temperature hard=<false>)<block_end><if_stmt>self.gumbel_hard<block_start>op_weights=utils.straight_through(sampled)<block_end><else_stmt><block_start>op_weights=sampled<block_end><if_stmt>batch_size<g>1<block_start>sampled=sampled.reshape([-1 batch_size op_weights.shape[-1]])<line_sep>op_weights=op_weights.reshape([-1 batch_size op_weights.shape[-1]])<block_end>op_weights_list.append(op_weights)<line_sep>sampled_list.append(utils.get_numpy(sampled))<line_sep># logits_list.append(utils.get_numpy(alphas))
logits_list.append((alphas))<block_end><if_stmt>self.use_edge_normalization<block_start><raise>NotImplementedError<block_end><else_stmt><block_start>arch_list=[]<line_sep>logits_arch_list=[]<for_stmt>op_weights op_weights_list<block_start>arch=self.get_arch.apply(self.search_space op_weights self.device)<line_sep>arch_list.append(arch)<block_end><for_stmt>logits logits_list<block_start>logits_arch=self.get_arch.apply(self.search_space logits self.device)<line_sep>logits_arch_list.append(logits_arch)<block_end><block_end>rollouts.append(DenseMicroDiffRollout(arch_list sampled_list logits_list logits_arch_list search_space=self.search_space ))<block_end><return>rollouts<block_end><def_stmt>save self path<block_start>"""Save the parameters to disk."""<line_sep>torch.save({"epoch":self.epoch "state_dict":self.state_dict()} path)<line_sep>self.logger.info("Saved controller network to %s" path)<block_end><def_stmt>load self path<block_start>"""Load the parameters from disk."""<line_sep>checkpoint=torch.load(path map_location=torch.device("cpu"))<line_sep>self.load_state_dict(checkpoint["state_dict"])<line_sep>self.on_epoch_start(checkpoint["epoch"])<line_sep>self.logger.info("Loaded controller network from %s" path)<block_end><def_stmt>_entropy_loss self<block_start><if_stmt>self.entropy_coeff<g>0<block_start>probs=[F.softmax(alpha dim=-1)<for>alpha self.cg_alphas]<line_sep><return>self.entropy_coeff<times>sum(-(torch.log(prob)<times>prob).sum()<for>prob probs)<block_end><return>0.0<block_end><def_stmt>gradient self loss return_grads=<true> zero_grads=<true><block_start><raise>NotImplementedError("the grad function is implemented in the layer2diffcontroller.gradient()")<block_end><def_stmt>step_current_gradient self optimizer<block_start><if_stmt>self.max_grad_norm<is><not><none><block_start>torch.nn.utils.clip_grad_norm_(self.parameters() self.max_grad_norm)<block_end>optimizer.step()<block_end><def_stmt>step_gradient self gradients optimizer<block_start>self.zero_grad()<line_sep>named_params=dict(self.named_parameters())<for_stmt>k,grad gradients<block_start>named_params[k].grad=grad<block_end># clip the gradients
<if_stmt>self.max_grad_norm<is><not><none><block_start>torch.nn.utils.clip_grad_norm_(self.parameters() self.max_grad_norm)<block_end># apply the gradients
optimizer.step()<block_end><def_stmt>step self rollouts optimizer perf_name# very memory inefficient
<block_start>self.zero_grad()<line_sep>losses=[r.get_perf(perf_name)<for>r rollouts]<line_sep>optimizer.step()<line_sep>[l.backward()<for>l losses]<line_sep><return>np.mean([l.detach().cpu().numpy()<for>l losses])<block_end><def_stmt>__getstate__ self<block_start>state=super(MicroDenseDiffController self).__getstate__().copy()<del_stmt>state["get_arch"]<line_sep><return>state<block_end><def_stmt>__setstate__ self state<block_start>super(MicroDenseDiffController self).__setstate__(state)<line_sep>self.get_arch=GetArchMicro()<block_end><def_stmt>summary self rollouts log=<false> log_prefix="" step=<none><block_start>num=len(rollouts)<line_sep>logits_list=[[utils.get_numpy(logits)<for>logits r.logits]<for>r rollouts]<line_sep>_ss=self.search_space<if_stmt>self.gumbel_hard<block_start>cg_logprobs=[0.0<for>_ range(_ss.num_cell_groups)]<block_end>cg_entros=[0.0<for>_ range(_ss.num_cell_groups)]<for_stmt>rollout,logits zip(rollouts logits_list)<block_start><for_stmt>cg_idx,(vec cg_logits) enumerate(zip(rollout.arch logits))<block_start>prob=utils.softmax(cg_logits)<line_sep>logprob=np.log(prob)<if_stmt>self.gumbel_hard<block_start>inds=np.argmax(utils.get_numpy(vec) axis=-1)<line_sep>cg_logprobs[cg_idx]<augadd>np.sum(logprob[range(len(inds)) inds])<block_end>cg_entros[cg_idx]<augadd>-(prob<times>logprob).sum()<block_end><block_end># mean across rollouts
<if_stmt>self.gumbel_hard<block_start>cg_logprobs=[s/num<for>s cg_logprobs]<line_sep>total_logprob=sum(cg_logprobs)<line_sep>cg_logprobs_str=",".join(["{:.2f}".format(n)<for>n cg_logprobs])<block_end>cg_entros=[s/num<for>s cg_entros]<line_sep>total_entro=sum(cg_entros)<line_sep>cg_entro_str=",".join(["{:.2f}".format(n)<for>n cg_entros])<if_stmt>log# maybe log the summary
<block_start>self.logger.info("%s%d rollouts: %s ENTROPY: %2f (%s)" log_prefix num "-LOG_PROB: %.2f (%s) ;"%(-total_logprob cg_logprobs_str)<if>self.gumbel_hard<else>"" total_entro cg_entro_str )<if_stmt>step<is><not><none><and><not>self.writer.is_none()<block_start><if_stmt>self.gumbel_hard<block_start>self.writer.add_scalar("log_prob" total_logprob step)<block_end>self.writer.add_scalar("entropy" total_entro step)<block_end><block_end>stats=[(n+" ENTRO" entro)<for>n,entro zip(_ss.cell_group_names cg_entros)]<if_stmt>self.gumbel_hard<block_start>stats<augadd>[(n+" LOGPROB" logprob)<for>n,logprob zip(_ss.cell_group_names cg_logprobs)]<block_end><return>OrderedDict(stats)<block_end>@classmethod<def_stmt>supported_rollout_types cls<block_start><return>["micro-dense" "micro-dense-diff"]<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_from_stmt>tgming.auth MingAuthenticatorPlugin<def_stmt>create_default_authenticator user_class translations=<none> **unused<block_start>mingauth=MingAuthenticatorPlugin(user_class)<line_sep><return>unused mingauth<block_end> |
# Copyright (c) 2015, <NAME>
# All rights reserved.
<import_stmt>datetime<import_stmt>sys<import_stmt>time<line_sep>perf_counter_available=(sys.version_info.minor<ge>3)<class_stmt>Job<block_start><def_stmt>__init__ self job_name<block_start><if_stmt>perf_counter_available<block_start>self.start=time.perf_counter()<block_end><else_stmt><block_start>self.start=0<block_end>self.job_name=job_name<line_sep>self.stopped=<false><block_end><def_stmt>stop self<block_start><if_stmt>self.stopped<block_start>sys.exit('Already stopped')<block_end>self.stopped=<true><if_stmt>perf_counter_available<block_start>self.total=time.perf_counter()-self.start<block_end><else_stmt><block_start>self.total=0<block_end><block_end><def_stmt>result self<block_start><if_stmt><not>self.stopped<block_start>sys.exit("Stop the job before result")<block_end>print('{}: {}s'.format(self.job_name datetime.timedelta(seconds=self.total)))<block_end><block_end><class_stmt>Timer<block_start><def_stmt>__init__ self<block_start>self.jobs=[]<line_sep>self.total=Job('Total')<block_end><def_stmt>start self job_name<block_start><if_stmt>job_name<eq>'Total'<block_start>sys.exit('Name reserved')<block_end><for_stmt>i self.jobs<block_start><if_stmt>i.job_name<eq>job_name<block_start>sys.exit('Job already exists: {}'.format(job_name))<block_end><block_end>self.jobs.append(Job(job_name))<block_end><def_stmt>stop self<block_start><if_stmt>len(self.jobs)<eq>0<block_start>sys.exit("No jobs to stop")<block_end>self.jobs[-1].stop()<block_end><def_stmt>result self<block_start><if_stmt><not>perf_counter_available<block_start>print('timer.perf_counter is not available (update to python 3.3+)')<line_sep><return><block_end><for_stmt>i self.jobs<block_start>i.result()<block_end>print('-')<line_sep>self.total.stop()<line_sep>self.total.result()<block_end><block_end> |
<import_stmt>yaml<line_sep># TODO u know, work in it...
|
<import_stmt>numpy<as>_np<import_stmt>scipy.sparse<as>_sp<import_from_stmt>._basis_utils _shuffle_sites<line_sep>####################################################
# set of helper functions to implement the partial #
# trace of lattice density matrices. They do not #
# have any checks and states are assumed to be #
# in the non-symmetry reduced basis. #
####################################################
<def_stmt>_lattice_partial_trace_pure psi sub_sys_A L sps return_rdm="A"<block_start>"""
This function computes the partial trace of a dense pure state psi over set of sites sub_sys_A and returns
reduced DM. Vectorisation available.
"""<line_sep>psi_v=_lattice_reshape_pure(psi sub_sys_A L sps)<if_stmt>return_rdm<eq>"A"<block_start><return>_np.squeeze(_np.einsum("...ij,...kj->...ik" psi_v psi_v.conj())) <none><block_end><elif_stmt>return_rdm<eq>"B"<block_start><return><none> _np.squeeze(_np.einsum("...ji,...jk->...ik" psi_v.conj() psi_v))<block_end><elif_stmt>return_rdm<eq>"both"<block_start><return>_np.squeeze(_np.einsum("...ij,...kj->...ik" psi_v psi_v.conj())) _np.squeeze(_np.einsum("...ji,...jk->...ik" psi_v.conj() psi_v))<block_end><block_end><def_stmt>_lattice_partial_trace_mixed rho sub_sys_A L sps return_rdm="A"<block_start>"""
This function computes the partial trace of a set of dense mixed states rho over set of sites sub_sys_A
and returns reduced DM. Vectorisation available.
"""<line_sep>rho_v=_lattice_reshape_mixed(rho sub_sys_A L sps)<if_stmt>return_rdm<eq>"A"<block_start><return>_np.einsum("...jlkl->...jk" rho_v) <none><block_end><elif_stmt>return_rdm<eq>"B"<block_start><return><none> _np.einsum("...ljlk->...jk" rho_v.conj())<block_end><elif_stmt>return_rdm<eq>"both"<block_start><return>_np.einsum("...jlkl->...jk" rho_v) _np.einsum("...ljlk->...jk" rho_v.conj())<block_end><block_end><def_stmt>_lattice_partial_trace_sparse_pure psi sub_sys_A L sps return_rdm="A"<block_start>"""
This function computes the partial trace of a sparse pure state psi over set of sites sub_sys_A and returns
reduced DM.
"""<line_sep>psi=_lattice_reshape_sparse_pure(psi sub_sys_A L sps)<if_stmt>return_rdm<eq>"A"<block_start><return>psi.dot(psi.H) <none><block_end><elif_stmt>return_rdm<eq>"B"<block_start><return><none> psi.H.dot(psi)<block_end><elif_stmt>return_rdm<eq>"both"<block_start><return>psi.dot(psi.H) psi.H.dot(psi)<block_end><block_end><def_stmt>_lattice_reshape_pure psi sub_sys_A L sps<block_start>"""
This function reshapes the dense pure state psi over the Hilbert space defined by sub_sys_A and its complement.
Vectorisation available.
"""<line_sep>extra_dims=psi.shape[:-1]<line_sep>n_dims=len(extra_dims)<line_sep>sub_sys_B=set(range(L))-set(sub_sys_A)<line_sep>sub_sys_A=tuple(sub_sys_A)<line_sep>sub_sys_B=tuple(sub_sys_B)<line_sep>L_A=len(sub_sys_A)<line_sep>L_B=len(sub_sys_B)<line_sep>Ns_A=(sps<power>L_A)<line_sep>Ns_B=(sps<power>L_B)<line_sep>T_tup=sub_sys_A+sub_sys_B<line_sep>psi_v=_shuffle_sites(sps T_tup psi)<line_sep>psi_v=psi_v.reshape(extra_dims+(Ns_A Ns_B))<line_sep><return>psi_v<block_end>'''
def _lattice_reshape_pure(psi,sub_sys_A,L,sps):
"""
This function reshapes the dense pure state psi over the Hilbert space defined by sub_sys_A and its complement.
Vectorisation available.
"""
extra_dims = psi.shape[:-1]
n_dims = len(extra_dims)
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
T_tup = sub_sys_A+sub_sys_B
T_tup = tuple(range(n_dims)) + tuple(n_dims + s for s in T_tup)
R_tup = extra_dims + tuple(sps for i in range(L))
psi_v = psi.reshape(R_tup) # DM where index is given per site as rho_v[i_1,...,i_L,j_1,...j_L]
psi_v = psi_v.transpose(T_tup) # take transpose to reshuffle indices
psi_v = psi_v.reshape(extra_dims+(Ns_A,Ns_B))
return psi_v
'''<def_stmt>_lattice_reshape_mixed rho sub_sys_A L sps<block_start>"""
This function reshapes the dense mixed state psi over the Hilbert space defined by sub_sys_A and its complement.
Vectorisation available.
"""<line_sep>extra_dims=rho.shape[:-2]<line_sep>n_dims=len(extra_dims)<line_sep>sub_sys_B=set(range(L))-set(sub_sys_A)<line_sep>sub_sys_A=tuple(sub_sys_A)<line_sep>sub_sys_B=tuple(sub_sys_B)<line_sep>L_A=len(sub_sys_A)<line_sep>L_B=len(sub_sys_B)<line_sep>Ns_A=(sps<power>L_A)<line_sep>Ns_B=(sps<power>L_B)<line_sep># T_tup tells numpy how to reshuffle the indices such that when I reshape the array to the
# 4-_tensor rho_{ik,jl} i,j are for sub_sys_A and k,l are for sub_sys_B
# which means I need (sub_sys_A,sub_sys_B,sub_sys_A+L,sub_sys_B+L)
T_tup=sub_sys_A+sub_sys_B<line_sep>T_tup=tuple(T_tup)+tuple(L+s<for>s T_tup)<line_sep>rho=rho.reshape(extra_dims+(-1 ))<line_sep>rho_v=_shuffle_sites(sps T_tup rho)<line_sep><return>rho_v.reshape(extra_dims+(Ns_A Ns_B Ns_A Ns_B))<block_end>'''
def _lattice_reshape_mixed(rho,sub_sys_A,L,sps):
"""
This function reshapes the dense mixed state psi over the Hilbert space defined by sub_sys_A and its complement.
Vectorisation available.
"""
extra_dims = rho.shape[:-2]
n_dims = len(extra_dims)
sub_sys_B = set(range(L))-set(sub_sys_A)
sub_sys_A = tuple(sub_sys_A)
sub_sys_B = tuple(sub_sys_B)
L_A = len(sub_sys_A)
L_B = len(sub_sys_B)
Ns_A = (sps**L_A)
Ns_B = (sps**L_B)
# T_tup tells numpy how to reshuffle the indices such that when I reshape the array to the
# 4-_tensor rho_{ik,jl} i,j are for sub_sys_A and k,l are for sub_sys_B
# which means I need (sub_sys_A,sub_sys_B,sub_sys_A+L,sub_sys_B+L)
T_tup = sub_sys_A+sub_sys_B
T_tup = tuple(range(n_dims)) + tuple(s+n_dims for s in T_tup) + tuple(L+n_dims+s for s in T_tup)
R_tup = extra_dims + tuple(sps for i in range(2*L))
rho_v = rho.reshape(R_tup) # DM where index is given per site as rho_v[i_1,...,i_L,j_1,...j_L]
rho_v = rho_v.transpose(T_tup) # take transpose to reshuffle indices
return rho_v.reshape(extra_dims+(Ns_A,Ns_B,Ns_A,Ns_B))
'''<def_stmt>_lattice_reshape_sparse_pure psi sub_sys_A L sps<block_start>"""
This function reshapes the sparse pure state psi over the Hilbert space defined by sub_sys_A and its complement.
"""<line_sep>sub_sys_B=set(range(L))-set(sub_sys_A)<line_sep>sub_sys_A=tuple(sub_sys_A)<line_sep>sub_sys_B=tuple(sub_sys_B)<line_sep>L_A=len(sub_sys_A)<line_sep>L_B=len(sub_sys_B)<line_sep>Ns_A=(sps<power>L_A)<line_sep>Ns_B=(sps<power>L_B)<line_sep>psi=psi.tocoo()<line_sep>T_tup=sub_sys_A+sub_sys_B<line_sep># reshuffle indices for the sub-systems.
# j = sum( j[i]*(sps**i) for i in range(L))
# this reshuffles the j[i] similar to the transpose operation
# on the dense arrays psi_v.transpose(T_tup)
<if_stmt>T_tup<ne>tuple(range(L))<block_start>indx=_np.zeros(psi.col.shape dtype=psi.col.dtype)<for_stmt>i_old,i_new enumerate(T_tup)<block_start>indx<augadd>((psi.col<floordiv>(sps<power>(L-i_new-1)))%sps)<times>(sps<power>(L-i_old-1))<block_end><block_end><else_stmt><block_start>indx=psi.col<block_end># A = _np.array([0,1,2,3,4,5,6,7,8,9,10,11])
# print("make shift way of reshaping array")
# print("A = {}".format(A))
# print("A.reshape((3,4)): \n {}".format(A.reshape((3,4))))
# print("rows: A.reshape((3,4))/4: \n {}".format(A.reshape((3,4))/4))
# print("cols: A.reshape((3,4))%4: \n {}".format(A.reshape((3,4))%4))
psi._shape=(Ns_A Ns_B)<line_sep>psi.row[:]=indx/Ns_B<line_sep>psi.col[:]=indx%Ns_B<line_sep><return>psi.tocsr()<block_end><def_stmt>_tensor_reshape_pure psi sub_sys_A Ns_l Ns_r<block_start>extra_dims=psi.shape[:-1]<if_stmt>sub_sys_A<eq>"left"<block_start><return>psi.reshape(extra_dims+(Ns_l Ns_r))<block_end><else_stmt><block_start>n_dims=len(extra_dims)<line_sep>T_tup=tuple(range(n_dims))+(n_dims+1 n_dims)<line_sep>psi_v=psi.reshape(extra_dims+(Ns_l Ns_r))<line_sep><return>psi_v.transpose(T_tup)<block_end><block_end><def_stmt>_tensor_reshape_sparse_pure psi sub_sys_A Ns_l Ns_r<block_start>psi=psi.tocoo()<line_sep># make shift way of reshaping array
# j = j_l + Ns_r * j_l
# j_l = j / Ns_r
# j_r = j % Ns_r
<if_stmt>sub_sys_A<eq>"left"<block_start>psi._shape=(Ns_l Ns_r)<line_sep>psi.row[:]=psi.col/Ns_r<line_sep>psi.col[:]=psi.col%Ns_r<line_sep><return>psi.tocsr()<block_end><else_stmt><block_start>psi._shape=(Ns_l Ns_r)<line_sep>psi.row[:]=psi.col/Ns_r<line_sep>psi.col[:]=psi.col%Ns_r<line_sep><return>psi.T.tocsr()<block_end><block_end><def_stmt>_tensor_reshape_mixed rho sub_sys_A Ns_l Ns_r<block_start>extra_dims=rho.shape[:-2]<if_stmt>sub_sys_A<eq>"left"<block_start><return>rho.reshape(extra_dims+(Ns_l Ns_r Ns_l Ns_r))<block_end><else_stmt><block_start>n_dims=len(extra_dims)<line_sep>T_tup=tuple(range(n_dims))+(n_dims+1 n_dims)+(n_dims+3 n_dims+2)<line_sep>rho_v=rho.reshape(extra_dims+(Ns_l Ns_r Ns_l Ns_r))<line_sep><return>rho_v.transpose(T_tup)<block_end><block_end><def_stmt>_tensor_partial_trace_pure psi sub_sys_A Ns_l Ns_r return_rdm="A"<block_start>psi_v=_tensor_reshape_pure(psi sub_sys_A Ns_l Ns_r)<if_stmt>return_rdm<eq>"A"<block_start><return>_np.squeeze(_np.einsum("...ij,...kj->...ik" psi_v psi_v.conj())) <none><block_end><elif_stmt>return_rdm<eq>"B"<block_start><return><none> _np.squeeze(_np.einsum("...ji,...jk->...ik" psi_v.conj() psi_v))<block_end><elif_stmt>return_rdm<eq>"both"<block_start><return>_np.squeeze(_np.einsum("...ij,...kj->...ik" psi_v psi_v.conj())) _np.squeeze(_np.einsum("...ji,...jk->...ik" psi_v.conj() psi_v))<block_end><block_end><def_stmt>_tensor_partial_trace_sparse_pure psi sub_sys_A Ns_l Ns_r return_rdm="A"<block_start>psi=_tensor_reshape_sparse_pure(psi sub_sys_A Ns_l Ns_r)<if_stmt>return_rdm<eq>"A"<block_start><return>psi.dot(psi.H) <none><block_end><elif_stmt>return_rdm<eq>"B"<block_start><return><none> psi.H.dot(psi)<block_end><elif_stmt>return_rdm<eq>"both"<block_start><return>psi.dot(psi.H) psi.H.dot(psi)<block_end><block_end><def_stmt>_tensor_partial_trace_mixed rho sub_sys_A Ns_l Ns_r return_rdm="A"<block_start>rho_v=_tensor_reshape_mixed(rho sub_sys_A Ns_l Ns_r)<if_stmt>return_rdm<eq>"A"<block_start><return>_np.squeeze(_np.einsum("...ijkj->...ik" rho_v)) <none><block_end><elif_stmt>return_rdm<eq>"B"<block_start><return><none> _np.squeeze(_np.einsum("...jijk->...ik" rho_v.conj()))<block_end><elif_stmt>return_rdm<eq>"both"<block_start><return>_np.squeeze(_np.einsum("...ijkj->...ik" rho_v)) _np.squeeze(_np.einsum("...jijk->...ik" rho_v.conj()))<block_end><block_end> |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines, unused-argument
<import_stmt>json<def_stmt>peering_legacy_list cmd client peering_location kind asn=<none><block_start><return>client.list(peering_location=peering_location kind=kind asn=asn)<block_end><def_stmt>peering_asn_list cmd client<block_start><return>client.list_by_subscription()<block_end><def_stmt>peering_asn_show cmd client peer_asn_name<block_start><return>client.get(peer_asn_name=peer_asn_name)<block_end><def_stmt>peering_asn_create cmd client peer_asn_name peer_asn=<none> peer_contact_detail=<none> peer_name=<none> validation_state=<none><block_start><return>client.create_or_update(peer_asn_name=peer_asn_name peer_asn=peer_asn peer_contact_detail=peer_contact_detail peer_name=peer_name validation_state=validation_state)<block_end><def_stmt>peering_asn_delete cmd client peer_asn_name<block_start><return>client.delete(peer_asn_name=peer_asn_name)<block_end><def_stmt>peering_location_list cmd client kind direct_peering_type=<none><block_start><return>client.list(kind=kind direct_peering_type=direct_peering_type)<block_end><def_stmt>peering_registered_asn_list cmd client resource_group_name peering_name<block_start><return>client.list_by_peering(resource_group_name=resource_group_name peering_name=peering_name)<block_end><def_stmt>peering_registered_asn_show cmd client resource_group_name peering_name registered_asn_name<block_start><return>client.get(resource_group_name=resource_group_name peering_name=peering_name registered_asn_name=registered_asn_name)<block_end><def_stmt>peering_registered_asn_create cmd client resource_group_name peering_name registered_asn_name asn=<none><block_start><return>client.create_or_update(resource_group_name=resource_group_name peering_name=peering_name registered_asn_name=registered_asn_name asn=asn)<block_end><def_stmt>peering_registered_asn_update cmd client resource_group_name peering_name registered_asn_name asn=<none><block_start><return>client.create_or_update(resource_group_name=resource_group_name peering_name=peering_name registered_asn_name=registered_asn_name asn=asn)<block_end><def_stmt>peering_registered_asn_delete cmd client resource_group_name peering_name registered_asn_name<block_start><return>client.delete(resource_group_name=resource_group_name peering_name=peering_name registered_asn_name=registered_asn_name)<block_end><def_stmt>peering_registered_prefix_list cmd client resource_group_name peering_name<block_start><return>client.list_by_peering(resource_group_name=resource_group_name peering_name=peering_name)<block_end><def_stmt>peering_registered_prefix_show cmd client resource_group_name peering_name registered_prefix_name<block_start><return>client.get(resource_group_name=resource_group_name peering_name=peering_name registered_prefix_name=registered_prefix_name)<block_end><def_stmt>peering_registered_prefix_create cmd client resource_group_name peering_name registered_prefix_name prefix=<none><block_start><return>client.create_or_update(resource_group_name=resource_group_name peering_name=peering_name registered_prefix_name=registered_prefix_name prefix=prefix)<block_end><def_stmt>peering_registered_prefix_update cmd client resource_group_name peering_name registered_prefix_name prefix=<none><block_start><return>client.create_or_update(resource_group_name=resource_group_name peering_name=peering_name registered_prefix_name=registered_prefix_name prefix=prefix)<block_end><def_stmt>peering_registered_prefix_delete cmd client resource_group_name peering_name registered_prefix_name<block_start><return>client.delete(resource_group_name=resource_group_name peering_name=peering_name registered_prefix_name=registered_prefix_name)<block_end><def_stmt>peering_peering_list cmd client resource_group_name=<none><block_start><if_stmt>resource_group_name<is><not><none><block_start><return>client.list_by_resource_group(resource_group_name=resource_group_name)<block_end><return>client.list_by_subscription()<block_end><def_stmt>peering_peering_show cmd client resource_group_name peering_name<block_start><return>client.get(resource_group_name=resource_group_name peering_name=peering_name)<block_end><def_stmt>peering_peering_create cmd client resource_group_name peering_name sku kind location tags=<none> direct=<none> exchange=<none> peering_location=<none><block_start><if_stmt>isinstance(direct str)<block_start>direct=json.loads(direct)<block_end><if_stmt>isinstance(exchange str)<block_start>exchange=json.loads(exchange)<block_end><return>client.create_or_update(resource_group_name=resource_group_name peering_name=peering_name sku=sku kind=kind location=location tags=tags direct=direct exchange=exchange peering_location=peering_location)<block_end><def_stmt>peering_peering_update cmd client resource_group_name peering_name tags=<none><block_start><return>client.update(resource_group_name=resource_group_name peering_name=peering_name tags=tags)<block_end><def_stmt>peering_peering_delete cmd client resource_group_name peering_name<block_start><return>client.delete(resource_group_name=resource_group_name peering_name=peering_name)<block_end><def_stmt>peering_received_route_list cmd client resource_group_name peering_name prefix=<none> as_path=<none> origin_as_validation_state=<none> rpki_validation_state=<none> skip_token=<none><block_start><return>client.list_by_peering(resource_group_name=resource_group_name peering_name=peering_name prefix=prefix as_path=as_path origin_as_validation_state=origin_as_validation_state rpki_validation_state=rpki_validation_state skip_token=skip_token)<block_end><def_stmt>peering_service_country_list cmd client<block_start><return>client.list()<block_end><def_stmt>peering_service_location_list cmd client country=<none><block_start><return>client.list(country=country)<block_end><def_stmt>peering_service_prefix_list cmd client resource_group_name peering_service_name expand=<none><block_start><return>client.list_by_peering_service(resource_group_name=resource_group_name peering_service_name=peering_service_name expand=expand)<block_end><def_stmt>peering_service_prefix_show cmd client resource_group_name peering_service_name prefix_name expand=<none><block_start><return>client.get(resource_group_name=resource_group_name peering_service_name=peering_service_name prefix_name=prefix_name expand=expand)<block_end><def_stmt>peering_service_prefix_create cmd client resource_group_name peering_service_name prefix_name prefix=<none> peering_service_prefix_key=<none><block_start><return>client.create_or_update(resource_group_name=resource_group_name peering_service_name=peering_service_name prefix_name=prefix_name prefix=prefix peering_service_prefix_key=peering_service_prefix_key)<block_end><def_stmt>peering_service_prefix_update cmd client resource_group_name peering_service_name prefix_name prefix=<none> peering_service_prefix_key=<none><block_start><return>client.create_or_update(resource_group_name=resource_group_name peering_service_name=peering_service_name prefix_name=prefix_name prefix=prefix peering_service_prefix_key=peering_service_prefix_key)<block_end><def_stmt>peering_service_prefix_delete cmd client resource_group_name peering_service_name prefix_name<block_start><return>client.delete(resource_group_name=resource_group_name peering_service_name=peering_service_name prefix_name=prefix_name)<block_end><def_stmt>peering_service_provider_list cmd client<block_start><return>client.list()<block_end><def_stmt>peering_service_list cmd client resource_group_name=<none><block_start><if_stmt>resource_group_name<is><not><none><block_start><return>client.list_by_resource_group(resource_group_name=resource_group_name)<block_end><return>client.list_by_subscription()<block_end><def_stmt>peering_service_show cmd client resource_group_name peering_service_name<block_start><return>client.get(resource_group_name=resource_group_name peering_service_name=peering_service_name)<block_end><def_stmt>peering_service_create cmd client resource_group_name peering_service_name location sku=<none> tags=<none> peering_service_location=<none> peering_service_provider=<none><block_start><return>client.create_or_update(resource_group_name=resource_group_name peering_service_name=peering_service_name sku=sku location=location tags=tags peering_service_location=peering_service_location peering_service_provider=peering_service_provider)<block_end><def_stmt>peering_service_update cmd client resource_group_name peering_service_name tags=<none><block_start><return>client.update(resource_group_name=resource_group_name peering_service_name=peering_service_name tags=tags)<block_end><def_stmt>peering_service_delete cmd client resource_group_name peering_service_name<block_start><return>client.delete(resource_group_name=resource_group_name peering_service_name=peering_service_name)<block_end> |
<import_from_stmt>pydantic ConstrainedFloat<import_from_stmt>pydantic_factories.value_generators.constrained_number generate_constrained_number get_constrained_number_range <import_from_stmt>pydantic_factories.value_generators.primitives create_random_float<def_stmt>handle_constrained_float field:ConstrainedFloat<arrow>float<block_start>"""
Handles 'ConstrainedFloat' instances
"""<line_sep>multiple_of=field.multiple_of<if_stmt>multiple_of<eq>0<block_start><return>0<block_end>minimum,maximum=get_constrained_number_range(gt=field.gt ge=field.ge lt=field.lt le=field.le t_type=float multiple_of=multiple_of)<line_sep><return>generate_constrained_number(minimum=minimum maximum=maximum multiple_of=multiple_of method=create_random_float )<block_end> |
OPERATION_VALIDATION_PLUGIN_PATH_VALUE="operation_verification"<line_sep>AUCTION_REQ_VALIDATION_PLUGIN_PATH_VALUE="auction_req_validation"<line_sep>AUCTION_REQ_PROCESSOR_PLUGIN_PATH_VALUE="auction_req_processor"<line_sep>BANK_REQ_VALIDATION_PLUGIN_PATH_VALUE="bank_req_validation"<line_sep>BANK_REQ_PROCESSOR_PLUGIN_PATH_VALUE="bank_req_processor"<line_sep>STATS_CONSUMER_PLUGIN_PATH_VALUE="stats_consumer"<line_sep> |
<class_stmt>Solution<block_start><def_stmt>reformat self s:str<arrow>str<block_start>str_list=[]<line_sep>digit_list=[]<for_stmt>i s<block_start><if_stmt>i.isdigit()<block_start>digit_list<augadd>i <block_end><elif_stmt>i.isalpha()<block_start>str_list<augadd>i <block_end><block_end><if_stmt>abs(len(str_list)-len(digit_list))<g>1<block_start><return>""<block_end>res=""<if_stmt>len(str_list)<l>len(digit_list)<block_start>str_list,digit_list=digit_list str_list<block_end>#str_list always bigger than digit_list
<for_stmt>i,j zip(str_list digit_list)<block_start>res<augadd>i+j<block_end><return>res+(""<if>len(str_list)<eq>len(digit_list)<else>str_list[-1])<block_end><block_end><class_stmt>Solution(object)<block_start><def_stmt>reformat self s<block_start>"""
:type s: str
:rtype: str
"""<def_stmt>char_gen start end count<block_start><for_stmt>c range(ord(start) ord(end)+1)<block_start>c=chr(c)<for_stmt>i range(count[c])<block_start><yield>c<block_end><block_end><yield>''<block_end>count=collections.defaultdict(int)<line_sep>alpha_cnt=0<for_stmt>c s<block_start>count[c]<augadd>1<if_stmt>c.isalpha()<block_start>alpha_cnt<augadd>1<block_end><block_end><if_stmt>abs(len(s)-2<times>alpha_cnt)<g>1<block_start><return>""<block_end>result=[]<line_sep>it1,it2=char_gen('a' 'z' count) char_gen('0' '9' count)<if_stmt>alpha_cnt<l>len(s)-alpha_cnt<block_start>it1,it2=it2 it1<block_end><while_stmt>len(result)<l>len(s)<block_start>result.append(next(it1))<line_sep>result.append(next(it2))<block_end><return>"".join(result)<block_end><block_end><class_stmt>Solution<block_start><def_stmt>reformat self s:str<arrow>str<block_start>a,b=[] []<block_end><for_stmt>c s<block_start><if_stmt>'a'<le>c<le>'z'<block_start>a.append(c)<block_end><else_stmt><block_start>b.append(c)<block_end><block_end><if_stmt>len(a)<l>len(b)<block_start>a,b=b a<block_end><if_stmt>len(a)-len(b)<ge>2<block_start><return>''<block_end>ans=''<for_stmt>i range(len(a)+len(b))<block_start><if_stmt>i%2<eq>0<block_start>ans<augadd>a[i<floordiv>2]<block_end><else_stmt><block_start>ans<augadd>b[i<floordiv>2]<block_end><block_end><return>ans<block_end> |
<import_stmt>os<import_from_stmt>pkg_resources resource_isdir resource_listdir resource_string<import_stmt>yaml<import_from_stmt>nose.tools nottest<import_from_stmt>dusty.compiler.spec_assembler get_specs_from_path<line_sep>@nottest<def_stmt>get_all_test_configs <block_start><return>resource_listdir(__name__ 'test_configs')<block_end>@nottest<def_stmt>resources_for_test_config test_config<block_start>resources={}<for_stmt>key [constants.CONFIG_BUNDLES_KEY 'apps' 'libs' 'services']<block_start>key_path='test_configs/{}/{}'.format(test_config key)<if_stmt>resource_isdir(__name__ key_path)<block_start>resources[key]={resource_name:resource_string(__name__ '{}/{}'.format(key_path resource_name))<for>resource_name resource_listdir(__name__ key_path)}<block_end><block_end><return>resources<block_end>@nottest<def_stmt>specs_for_test_config test_config<block_start>case_path='{}/test_configs/{}/'.format(__path__[0] test_config)<line_sep><return>get_specs_from_path(case_path)<block_end>@nottest<def_stmt>assembled_specs_for_test_config test_config<block_start>assembled_file="{}/test_configs/{}/assembled_spec.yml".format(__path__[0] test_config)<with_stmt>open(assembled_file 'r')<as>f<block_start><return>yaml.load(f.read())<block_end><block_end>@nottest<def_stmt>nginx_config_for_test_config test_config<block_start><return>resource_string(__name__ 'test_configs/{}/nginx.conf'.format(test_config))<block_end>@nottest<def_stmt>docker_compose_yaml_for_test_config test_config<block_start><return>resource_string(__name__ 'test_configs/{}/docker-compose.yml'.format(test_config))<block_end> |
<import_from_stmt>.tidy_verbs *# noqa
<import_from_stmt>.. _get_all_imports<line_sep>__all__=_get_all_imports(globals())<line_sep> |
"""
Created by leeorz.
Description:
采集器:
- 基于 src/collector/wechat_sougou/items/wechat_item.py的公众号采集
- 基于 feedparser,从feeddd解析rss
Changelog: all notable changes to this file will be documented
"""<import_stmt>asyncio<import_stmt>feedparser<import_from_stmt>ruia Response Spider<import_from_stmt>ruia_ua middleware<as>ua_middleware<import_from_stmt>src.collector.utils load_data_to_articlles<import_from_stmt>src.collector.wechat.items WechatItem<import_from_stmt>src.processor html_to_text_h2t<import_from_stmt>src.utils.log LOGGER<import_from_stmt>src.utils.tools md5_encryption<class_stmt>WeiXinSpider(Spider)<block_start>"""微信公众号文章抓取爬虫
Args:
collect_config (dict, optional): 采集器配置
"""<line_sep>name="WeiXinSpider"<line_sep>request_config={"RETRIES":3 "DELAY":3 "TIMEOUT":5}<line_sep>concurrency=10<line_sep>wechat_name=""<line_sep># aiohttp config
aiohttp_kwargs={}<async_keyword><def_stmt>parse self response:Response<block_start>"""解析公众号元数据"""<line_sep>html=<await>response.text()<line_sep>wechat_item:WechatItem=<await>WechatItem.get_item(html=html)<line_sep>wechat_data={**wechat_item.results **{"doc_id":md5_encryption(f"{wechat_item.doc_name}_{self.wechat_name}") "doc_keywords":"" "doc_source_name":self.wechat_name "doc_link":response.url "doc_source":"liuli_wechat" "doc_source_account_nick":wechat_item.doc_source_account_nick "doc_source_account_intro":wechat_item.doc_source_account_intro "doc_content":html_to_text_h2t(html) "doc_html":"" } }<line_sep><await>asyncio.coroutine(load_data_to_articlles)(input_data=wechat_data)<block_end><block_end><def_stmt>run collect_config:dict<block_start>"""rss解析,并使用WeiXinSpider抓取rss条目,并持久化
Args:
collect_config (dict, optional): 采集器配置
"""<line_sep>feeds_dict:dict=collect_config.get("feeds_dict")<line_sep>feeds_name:list=list(feeds_dict)<line_sep>delta_time=collect_config.get("delta_time" 3)<line_sep>WeiXinSpider.request_config={"RETRIES":3 "DELAY":delta_time "TIMEOUT":5 }<for_stmt>name feeds_name<block_start>WeiXinSpider.wechat_name=name<line_sep>LOGGER.info(f"rss源 {name}: {feeds_dict[name]}")<line_sep>fd=feedparser.parse(feeds_dict[name])<line_sep>urls=[]<for_stmt>entry fd.entries<block_start>LOGGER.info(entry.link)<line_sep>urls.append(entry.link)<block_end>WeiXinSpider.start_urls=urls<line_sep>WeiXinSpider.start(middleware=ua_middleware)<block_end><block_end> |
'''
Subclass of Detectron2's Checkpointer, able to load
model states from an in-memory Python dict according
to states as saved in AIDE.
2020-21 <NAME>
'''<import_from_stmt>detectron2.checkpoint DetectionCheckpointer<import_from_stmt>typing List Optional<class_stmt>DetectionCheckpointerInMem(DetectionCheckpointer)<block_start><def_stmt>loadFromObject self stateDict:dict checkpointables:Optional[List[str]]=<none><arrow>object<block_start>'''
Customized routine that loads a model state dict
from an object, rather than a file path.
Most of the remaining code is just copied from
https://detectron2.readthedocs.io/_modules/fvcore/common/checkpoint.html#Checkpointer.load
'''<if_stmt>stateDict<is><none><or>'model'<not><in>stateDict# nothing to load; return
<block_start><return>{}<block_end>incompatible=self._load_model(stateDict)<if_stmt>(incompatible<is><not><none>)# handle some existing subclasses that returns None
<block_start>self._log_incompatible_keys(incompatible)<block_end><for_stmt>key self.checkpointables<if>checkpointables<is><none><else>checkpointables<block_start><if_stmt>key<in>stateDict# pyre-ignore
<block_start>self.logger.info("Loading {} from state dict".format(key))<line_sep>obj=self.checkpointables[key]<line_sep>obj.load_state_dict(stateDict.pop(key))<block_end><block_end># pyre-ignore
# return any further checkpoint data
<return>stateDict<block_end><block_end> |
<import_stmt>base64<import_from_stmt>cryptography.fernet Fernet InvalidToken<def_stmt>encrypt_string string key<block_start>"""
Encrypts a string with the specified key.
The key must be 32 raw bytes.
"""<line_sep>f=Fernet(key)<line_sep># Fernet() works only on byte objects. Convert the string to bytes.
unencrypted_bytes=string.encode()<line_sep>encrypted_bytes=f.encrypt(unencrypted_bytes)<line_sep># Fernet() returns a byte object. Convert it to a string before returning.
encrypted_string=encrypted_bytes.decode()<line_sep><return>encrypted_string<block_end><def_stmt>decrypt_string string key ttl=<none><block_start>"""
Decrypts an encrypted string with the specified key.
The key must be 32 raw bytes.
"""<line_sep>f=Fernet(key)<line_sep># Fernet() works only on byte objects. Convert the string to bytes before decrypting.
encrypted_bytes=string.encode()# str -> bytes
<try_stmt><block_start>decrypted_bytes=f.decrypt(encrypted_bytes ttl=ttl)<block_end><except_stmt>InvalidToken<block_start>"""
From the the Cryptography's library documentation:
If the token is in any way invalid, this exception is raised.
A token may be invalid for a number of reasons: it is older than the
ttl, it is malformed, or it does not have a valid signature.
"""<line_sep><return><none><block_end># TODO(kmullins): Shall we log this case? Is it expected?
decrypted_string=decrypted_bytes.decode()# bytes -> str
<return>decrypted_string<block_end> |
<import_from_stmt>setuptools setup<line_sep>exec(open('docker_replay/version.py').read())<line_sep>setup(name='docker-replay' version=version packages=['docker_replay'] description='Generate docker run commands from running containers' author='<NAME>' author_email='<EMAIL>' url='https://github.com/bcicen/docker-replay' install_requires=['docker>=2.4.2'] license='http://opensource.org/licenses/MIT' classifiers=('Intended Audience :: Developers' 'License :: OSI Approved :: MIT License ' 'Natural Language :: English' 'Programming Language :: Python' 'Programming Language :: Python :: 2.7' 'Programming Language :: Python :: 3.4' ) keywords='docker docker-py devops' entry_points={'console_scripts':['docker-replay = docker_replay:main']})<line_sep> |
<import_stmt>os<import_from_stmt>scipy.io loadmat<import_from_stmt>menpo.shape.pointcloud PointCloud<import_from_stmt>menpo.transform ThinPlateSplines<import_stmt>menpo.transform<as>mt<import_stmt>menpo.io<as>mio<import_from_stmt>glob glob<import_from_stmt>deformation_functions *<line_sep># landmark indices by facial feature
jaw_indices=np.arange(0 17)<line_sep>lbrow_indices=np.arange(17 22)<line_sep>rbrow_indices=np.arange(22 27)<line_sep>upper_nose_indices=np.arange(27 31)<line_sep>lower_nose_indices=np.arange(31 36)<line_sep>leye_indices=np.arange(36 42)<line_sep>reye_indices=np.arange(42 48)<line_sep>outer_mouth_indices=np.arange(48 60)<line_sep>inner_mouth_indices=np.arange(60 68)<line_sep># flipped landmark indices
mirrored_parts_68=np.hstack([jaw_indices[::-1] rbrow_indices[::-1] lbrow_indices[::-1] upper_nose_indices lower_nose_indices[::-1] np.roll(reye_indices[::-1] 4) np.roll(leye_indices[::-1] 4) np.roll(outer_mouth_indices[::-1] 7) np.roll(inner_mouth_indices[::-1] 5)])<def_stmt>load_bb_files bb_file_dirs<block_start>"""load bounding box mat file for challenging, common, full & training datasets"""<line_sep>bb_files_dict={}<for_stmt>bb_file bb_file_dirs<block_start>bb_mat=loadmat(bb_file)['bounding_boxes']<line_sep>num_imgs=np.max(bb_mat.shape)<for_stmt>i range(num_imgs)<block_start>name=bb_mat[0][i][0][0][0][0]<line_sep>bb_init=bb_mat[0][i][0][0][1]-1# matlab indicies
bb_gt=bb_mat[0][i][0][0][2]-1# matlab indicies
<if_stmt>str(name)<in>bb_files_dict.keys()<block_start>print(str(name)+' already exists')<block_end><else_stmt><block_start>bb_files_dict[str(name)]=(bb_init bb_gt)<block_end><block_end><block_end><return>bb_files_dict<block_end><def_stmt>load_bb_dictionary bb_dir mode test_data='full'<block_start>"""create bounding box dictionary of input dataset: train/common/full/challenging"""<if_stmt>mode<eq>'TRAIN'<block_start>bb_dirs=['bounding_boxes_afw.mat' 'bounding_boxes_helen_trainset.mat' 'bounding_boxes_lfpw_trainset.mat']<block_end><else_stmt><block_start><if_stmt>test_data<eq>'common'<block_start>bb_dirs=['bounding_boxes_helen_testset.mat' 'bounding_boxes_lfpw_testset.mat']<block_end><elif_stmt>test_data<eq>'challenging'<block_start>bb_dirs=['bounding_boxes_ibug.mat']<block_end><elif_stmt>test_data<eq>'full'<block_start>bb_dirs=['bounding_boxes_ibug.mat' 'bounding_boxes_helen_testset.mat' 'bounding_boxes_lfpw_testset.mat']<block_end><elif_stmt>test_data<eq>'training'<block_start>bb_dirs=['bounding_boxes_afw.mat' 'bounding_boxes_helen_trainset.mat' 'bounding_boxes_lfpw_trainset.mat']<block_end><else_stmt><block_start>bb_dirs=<none><block_end><block_end><if_stmt>mode<eq>'TEST'<and>test_data<not><in>['full' 'challenging' 'common' 'training']<block_start>bb_files_dict=<none><block_end><else_stmt><block_start>bb_dirs=[os.path.join(bb_dir dataset)<for>dataset bb_dirs]<line_sep>bb_files_dict=load_bb_files(bb_dirs)<block_end><return>bb_files_dict<block_end><def_stmt>center_margin_bb bb img_bounds margin=0.25<block_start>"""create new bounding box with input margin"""<line_sep>bb_size=([bb[0 2]-bb[0 0] bb[0 3]-bb[0 1]])<line_sep>margins=(np.max(bb_size)<times>(1+margin)-bb_size)/2<line_sep>bb_new=np.zeros_like(bb)<line_sep>bb_new[0 0]=np.maximum(bb[0 0]-margins[0] 0)<line_sep>bb_new[0 2]=np.minimum(bb[0 2]+margins[0] img_bounds[1])<line_sep>bb_new[0 1]=np.maximum(bb[0 1]-margins[1] 0)<line_sep>bb_new[0 3]=np.minimum(bb[0 3]+margins[1] img_bounds[0])<line_sep><return>bb_new<block_end><def_stmt>crop_to_face_image img bb_dictionary=<none> gt=<true> margin=0.25 image_size=256 normalize=<true> return_transform=<false><block_start>"""crop face image using bounding box dictionary, or GT landmarks"""<line_sep>name=img.path.name<line_sep>img_bounds=img.bounds()[1]<line_sep># if there is no bounding-box dict and GT landmarks are available, use it to determine the bounding box
<if_stmt>bb_dictionary<is><none><and>img.has_landmarks<block_start>grp_name=img.landmarks.group_labels[0]<line_sep>bb_menpo=img.landmarks[grp_name].bounding_box().points<line_sep>bb=np.array([[bb_menpo[0 1] bb_menpo[0 0] bb_menpo[2 1] bb_menpo[2 0]]])<block_end><elif_stmt>bb_dictionary<is><not><none><block_start><if_stmt>gt<block_start>bb=bb_dictionary[name][1]# ground truth
<block_end><else_stmt><block_start>bb=bb_dictionary[name][0]# init from face detector
<block_end><block_end><else_stmt><block_start>bb=<none><block_end><if_stmt>bb<is><not><none># add margin to bounding box
<block_start>bb=center_margin_bb(bb img_bounds margin=margin)<line_sep>bb_pointcloud=PointCloud(np.array([[bb[0 1] bb[0 0]] [bb[0 3] bb[0 0]] [bb[0 3] bb[0 2]] [bb[0 1] bb[0 2]]]))<if_stmt>return_transform<block_start>face_crop,bb_transform=img.crop_to_pointcloud(bb_pointcloud return_transform=<true>)<block_end><else_stmt><block_start>face_crop=img.crop_to_pointcloud(bb_pointcloud)<block_end><block_end><else_stmt># if there is no bounding box/gt landmarks, use entire image
<block_start>face_crop=img.copy()<line_sep>bb_transform=<none><block_end># if face crop is not a square - pad borders with mean pixel value
h,w=face_crop.shape<line_sep>diff=h-w<if_stmt>diff<l>0<block_start>face_crop.pixels=np.pad(face_crop.pixels ((0 0) (0 -1<times>diff) (0 0)) 'mean')<block_end><elif_stmt>diff<g>0<block_start>face_crop.pixels=np.pad(face_crop.pixels ((0 0) (0 0) (0 diff)) 'mean')<block_end><if_stmt>return_transform<block_start>face_crop,rescale_transform=face_crop.resize([image_size image_size] return_transform=<true>)<if_stmt>bb_transform<is><none><block_start>transform_chain=rescale_transform<block_end><else_stmt><block_start>transform_chain=mt.TransformChain(transforms=(rescale_transform bb_transform))<block_end><block_end><else_stmt><block_start>face_crop=face_crop.resize([image_size image_size])<block_end><if_stmt>face_crop.n_channels<eq>4<block_start>face_crop.pixels=face_crop.pixels[:3 : :]<block_end><if_stmt>normalize<block_start>face_crop.pixels=face_crop.rescale_pixels(0. 1.).pixels<block_end><if_stmt>return_transform<block_start><return>face_crop transform_chain<block_end><else_stmt><block_start><return>face_crop<block_end><block_end><def_stmt>augment_face_image img image_size=256 crop_size=248 angle_range=30 flip=<true><block_start>"""basic image augmentation: random crop, rotation and horizontal flip"""<line_sep># taken from MDM: https://github.com/trigeorgis/mdm
<def_stmt>mirror_landmarks_68 lms im_size<block_start><return>PointCloud(abs(np.array([0 im_size[1]])-lms.as_vector().reshape(-1 2))[mirrored_parts_68])<block_end># taken from MDM: https://github.com/trigeorgis/mdm
<def_stmt>mirror_image im<block_start>im=im.copy()<line_sep>im.pixels=im.pixels[<ellipsis> ::-1].copy()<for_stmt>group im.landmarks<block_start>lms=im.landmarks[group]<if_stmt>lms.points.shape[0]<eq>68<block_start>im.landmarks[group]=mirror_landmarks_68(lms im.shape)<block_end><block_end><return>im<block_end>flip_rand=np.random.random()<g>0.5<line_sep># rot_rand = np.random.random() > 0.5
# crop_rand = np.random.random() > 0.5
rot_rand=<true># like ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
crop_rand=<true># like ECT: https://github.com/HongwenZhang/ECT-FaceAlignment
<if_stmt>crop_rand<block_start>lim=image_size-crop_size<line_sep>min_crop_inds=np.random.randint(0 lim 2)<line_sep>max_crop_inds=min_crop_inds+crop_size<line_sep>img=img.crop(min_crop_inds max_crop_inds)<block_end><if_stmt>flip<and>flip_rand<block_start>img=mirror_image(img)<block_end><if_stmt>rot_rand<block_start>rot_angle=2<times>angle_range<times>np.random.random_sample()-angle_range<line_sep>img=img.rotate_ccw_about_centre(rot_angle)<block_end>img=img.resize([image_size image_size])<line_sep><return>img<block_end><def_stmt>augment_menpo_img_ns img img_dir_ns p_ns=0.<block_start>"""texture style image augmentation using stylized copies in *img_dir_ns*"""<line_sep>img=img.copy()<if_stmt>p_ns<g>0.5<block_start>ns_augs=glob(os.path.join(img_dir_ns img.path.name.split('.')[0]+'_ns*'))<line_sep>num_augs=len(ns_augs)<if_stmt>num_augs<g>0<block_start>ns_ind=np.random.randint(0 num_augs)<line_sep>ns_aug=mio.import_image(ns_augs[ns_ind])<line_sep>ns_pixels=ns_aug.pixels<line_sep>img.pixels=ns_pixels<block_end><block_end><return>img<block_end><def_stmt>augment_menpo_img_geom img p_geom=0.<block_start>"""geometric style image augmentation using random face deformations"""<line_sep>img=img.copy()<if_stmt>p_geom<g>0.5<block_start>grp_name=img.landmarks.group_labels[0]<line_sep>lms_geom_warp=deform_face_geometric_style(img.landmarks[grp_name].points.copy() p_scale=p_geom p_shift=p_geom)<line_sep>img=warp_face_image_tps(img PointCloud(lms_geom_warp) grp_name)<block_end><return>img<block_end><def_stmt>warp_face_image_tps img new_shape lms_grp_name='PTS' warp_mode='constant'<block_start>"""warp image to new landmarks using TPS interpolation"""<line_sep>tps=ThinPlateSplines(new_shape img.landmarks[lms_grp_name])<try_stmt><block_start>img_warp=img.warp_to_shape(img.shape tps mode=warp_mode)<line_sep>img_warp.landmarks[lms_grp_name]=new_shape<line_sep><return>img_warp<block_end><except_stmt>np.linalg.linalg.LinAlgError<as>err<block_start>print('Error:'+str(err)+'\nUsing original landmarks for:\n'+str(img.path))<line_sep><return>img<block_end><block_end><def_stmt>load_menpo_image_list img_dir train_crop_dir img_dir_ns mode bb_dictionary=<none> image_size=256 margin=0.25 bb_type='gt' test_data='full' augment_basic=<true> augment_texture=<false> p_texture=0 augment_geom=<false> p_geom=0 verbose=<false> return_transform=<false><block_start>"""load images from image dir to create menpo-type image list"""<def_stmt>crop_to_face_image_gt img<block_start><return>crop_to_face_image(img bb_dictionary gt=<true> margin=margin image_size=image_size return_transform=return_transform)<block_end><def_stmt>crop_to_face_image_init img<block_start><return>crop_to_face_image(img bb_dictionary gt=<false> margin=margin image_size=image_size return_transform=return_transform)<block_end><def_stmt>crop_to_face_image_test img<block_start><return>crop_to_face_image(img bb_dictionary=<none> margin=margin image_size=image_size return_transform=return_transform)<block_end><def_stmt>augment_menpo_img_ns_rand img<block_start><return>augment_menpo_img_ns(img img_dir_ns p_ns=1.<times>(np.random.rand()<l>p_texture)[0])<block_end><def_stmt>augment_menpo_img_geom_rand img<block_start><return>augment_menpo_img_geom(img p_geom=1.<times>(np.random.rand()<l>p_geom)[0])<block_end><if_stmt>mode<is>'TRAIN'<block_start><if_stmt>train_crop_dir<is><none><block_start>img_set_dir=os.path.join(img_dir 'training')<line_sep>out_image_list=mio.import_images(img_set_dir verbose=verbose normalize=<false>)<if_stmt>bb_type<is>'gt'<block_start>out_image_list=out_image_list.map(crop_to_face_image_gt)<block_end><elif_stmt>bb_type<is>'init'<block_start>out_image_list=out_image_list.map(crop_to_face_image_init)<block_end><block_end><else_stmt><block_start>img_set_dir=os.path.join(img_dir train_crop_dir)<line_sep>out_image_list=mio.import_images(img_set_dir verbose=verbose)<block_end># perform image augmentation
<if_stmt>augment_texture<and>p_texture<g>0<block_start>out_image_list=out_image_list.map(augment_menpo_img_ns_rand)<block_end><if_stmt>augment_geom<and>p_geom<g>0<block_start>out_image_list=out_image_list.map(augment_menpo_img_geom_rand)<block_end><if_stmt>augment_basic<block_start>out_image_list=out_image_list.map(augment_face_image)<block_end><block_end><else_stmt># if mode is 'TEST', load test data
<block_start><if_stmt>test_data<in>['full' 'challenging' 'common' 'training' 'test']<block_start>img_set_dir=os.path.join(img_dir test_data)<line_sep>out_image_list=mio.import_images(img_set_dir verbose=verbose normalize=<false>)<if_stmt>bb_type<is>'gt'<block_start>out_image_list=out_image_list.map(crop_to_face_image_gt)<block_end><elif_stmt>bb_type<is>'init'<block_start>out_image_list=out_image_list.map(crop_to_face_image_init)<block_end><block_end><else_stmt><block_start>img_set_dir=os.path.join(img_dir test_data)<line_sep>out_image_list=mio.import_images(img_set_dir verbose=verbose normalize=<false>)<line_sep>out_image_list=out_image_list.map(crop_to_face_image_test)<block_end><block_end><return>out_image_list<block_end> |
TRAIN_BEGIN='TRAIN_BEGIN'<line_sep>TRAIN_END='TRAIN_END'<line_sep>EPOCH_BEGIN='EPOCH_BEGIN'<line_sep>EPOCH_END='EPOCH_END'<line_sep>BATCH_BEGIN='BATCH_BEGIN'<line_sep>BATCH_END='BATCH_END'<line_sep> |
<import_from_stmt>unittest TestCase<import_stmt>moderngl<import_stmt>numpy<import_stmt>platform<class_stmt>ContextTests(TestCase)<block_start><def_stmt>test_create_destroy self<block_start>"""Create and destroy a context"""<for_stmt>_ range(25)<block_start>ctx=moderngl.create_context(standalone=<true>)<line_sep>ctx.release()<block_end><block_end><def_stmt>test_context_switch self<block_start>"""Ensure context switching is working"""<line_sep>ctx1=moderngl.create_context(standalone=<true>)<line_sep>ctx2=moderngl.create_context(standalone=<true>)<with_stmt>ctx1<as>ctx<block_start>buffer1=ctx.buffer(reserve=1024)<block_end><with_stmt>ctx2<as>ctx<block_start>buffer2=ctx.buffer(reserve=1024)<block_end>self.assertEqual(buffer1.glo buffer2.glo)<line_sep>ctx1.release()<line_sep>ctx2.release()<block_end><def_stmt>test_exit self<block_start>"""Ensure the previous context was activated on exit"""<line_sep>ctx1=moderngl.create_context(standalone=<true>)<line_sep>ctx2=moderngl.create_context(standalone=<true>)<with_stmt>ctx1<as>ctx<block_start>ctx.buffer(reserve=1024)<block_end># Will error out if no context is active "moderngl.error.Error: cannot create buffer"
ctx1.buffer(reserve=1024)<line_sep>ctx1.release()<line_sep>ctx2.release()<block_end><def_stmt>test_share self<block_start>"""Create resources with shared context"""<if_stmt>platform.system().lower()<in>["darwin" "linux"]<block_start>self.skipTest('Context sharing not supported on darwin')<block_end>data1=numpy.array([1 2 3 4] dtype='u1')<line_sep>data2=numpy.array([4 3 2 1] dtype='u1')<line_sep>ctx1=moderngl.create_context(standalone=<true>)<line_sep>ctx2=moderngl.create_context(standalone=<true> share=<true>)<with_stmt>ctx1<as>ctx<block_start>b1=ctx.buffer(data=data1)<block_end><with_stmt>ctx2<as>ctx<block_start>b2=ctx.buffer(data=data2)<block_end># Because the resources are shared the name should increment
self.assertEqual(b1.glo 1)<line_sep>self.assertEqual(b2.glo 2)<line_sep># Ensure we can read the same buffer data in both contexts
<with_stmt>ctx1<block_start>self.assertEqual(b1.read() b'\x01\x02\x03\x04')<line_sep>self.assertEqual(b2.read() b'\x04\x03\x02\x01')<block_end><with_stmt>ctx2<block_start>self.assertEqual(b1.read() b'\x01\x02\x03\x04')<line_sep>self.assertEqual(b2.read() b'\x04\x03\x02\x01')<block_end>ctx1.release()<line_sep>ctx2.release()<block_end><def_stmt>test_extensions self<block_start>ctx=moderngl.create_context(standalone=<true>)<line_sep># self.assertTrue("GL_ARB_vertex_array_object" in ctx.extensions)
# self.assertTrue("GL_ARB_transform_feedback2" in ctx.extensions)
# self.assertTrue("GL_ARB_shader_subroutine" in ctx.extensions)
self.assertIsInstance(ctx.extensions set)<line_sep>self.assertTrue(len(ctx.extensions)<g>0)<line_sep>ctx.release()<block_end><def_stmt>test_attributes self<block_start>"""Ensure enums are present in the context instance"""<line_sep>ctx=moderngl.create_context(standalone=<true>)<line_sep># Flags
self.assertIsInstance(ctx.NOTHING int)<line_sep>self.assertIsInstance(ctx.BLEND int)<line_sep>self.assertIsInstance(ctx.DEPTH_TEST int)<line_sep>self.assertIsInstance(ctx.CULL_FACE int)<line_sep>self.assertIsInstance(ctx.RASTERIZER_DISCARD int)<line_sep>self.assertIsInstance(ctx.PROGRAM_POINT_SIZE int)<line_sep># Primitive modes
self.assertIsInstance(ctx.POINTS int)<line_sep>self.assertIsInstance(ctx.LINES int)<line_sep>self.assertIsInstance(ctx.LINE_LOOP int)<line_sep>self.assertIsInstance(ctx.LINE_STRIP int)<line_sep>self.assertIsInstance(ctx.TRIANGLES int)<line_sep>self.assertIsInstance(ctx.TRIANGLE_STRIP int)<line_sep>self.assertIsInstance(ctx.TRIANGLE_FAN int)<line_sep>self.assertIsInstance(ctx.LINES_ADJACENCY int)<line_sep>self.assertIsInstance(ctx.LINE_STRIP_ADJACENCY int)<line_sep>self.assertIsInstance(ctx.TRIANGLES_ADJACENCY int)<line_sep>self.assertIsInstance(ctx.TRIANGLE_STRIP_ADJACENCY int)<line_sep>self.assertIsInstance(ctx.PATCHES int)<line_sep># Texture filters
self.assertIsInstance(ctx.LINEAR int)<line_sep>self.assertIsInstance(ctx.NEAREST int)<line_sep>self.assertIsInstance(ctx.NEAREST_MIPMAP_NEAREST int)<line_sep>self.assertIsInstance(ctx.LINEAR_MIPMAP_LINEAR int)<line_sep>self.assertIsInstance(ctx.LINEAR_MIPMAP_NEAREST int)<line_sep>self.assertIsInstance(ctx.NEAREST_MIPMAP_LINEAR int)<line_sep># Blend functions
self.assertIsInstance(ctx.ZERO int)<line_sep>self.assertIsInstance(ctx.ONE int)<line_sep>self.assertIsInstance(ctx.SRC_COLOR int)<line_sep>self.assertIsInstance(ctx.ONE_MINUS_SRC_COLOR int)<line_sep>self.assertIsInstance(ctx.SRC_ALPHA int)<line_sep>self.assertIsInstance(ctx.ONE_MINUS_SRC_ALPHA int)<line_sep>self.assertIsInstance(ctx.DST_ALPHA int)<line_sep>self.assertIsInstance(ctx.ONE_MINUS_DST_ALPHA int)<line_sep>self.assertIsInstance(ctx.DST_COLOR int)<line_sep>self.assertIsInstance(ctx.ONE_MINUS_DST_COLOR int)<line_sep># Blend shortcuts
self.assertIsInstance(ctx.DEFAULT_BLENDING tuple)<line_sep>self.assertIsInstance(ctx.ADDITIVE_BLENDING tuple)<line_sep>self.assertIsInstance(ctx.PREMULTIPLIED_ALPHA tuple)<line_sep># Blend equations
self.assertIsInstance(ctx.FUNC_ADD int)<line_sep>self.assertIsInstance(ctx.FUNC_SUBTRACT int)<line_sep>self.assertIsInstance(ctx.FUNC_REVERSE_SUBTRACT int)<line_sep>self.assertIsInstance(ctx.MIN int)<line_sep>self.assertIsInstance(ctx.MAX int)<line_sep># Provoking vertex
self.assertIsInstance(ctx.FIRST_VERTEX_CONVENTION int)<line_sep>self.assertIsInstance(ctx.LAST_VERTEX_CONVENTION int)<block_end><def_stmt>test_enable_direct self<block_start>ctx=moderngl.create_context(standalone=<true>)<line_sep>ctx.error# consume error during initialization
# We already support this, but it's a safe value
GL_PROGRAM_POINT_SIZE=0x8642<line_sep>ctx.enable_direct(GL_PROGRAM_POINT_SIZE)<line_sep>self.assertEqual(ctx.error "GL_NO_ERROR")<line_sep>ctx.disable_direct(GL_PROGRAM_POINT_SIZE)<line_sep>self.assertEqual(ctx.error "GL_NO_ERROR")<block_end><block_end> |
<import_stmt>biplist<import_stmt>pathlib<import_stmt>plistlib<import_stmt>sys<import_from_stmt>scripts.artifact_report ArtifactHtmlReport<import_from_stmt>scripts.ilapfuncs logfunc tsv is_platform_windows<def_stmt>get_appGrouplisting files_found report_folder seeker<block_start>data_list=[]<for_stmt>file_found files_found<block_start>file_found=str(file_found)<with_stmt>open(file_found "rb")<as>fp<block_start><if_stmt>sys.version_info<ge>(3 9)<block_start>plist=plistlib.load(fp)<block_end><else_stmt><block_start>plist=biplist.readPlist(fp)<block_end>bundleid=plist['MCMMetadataIdentifier']<line_sep>p=pathlib.Path(file_found)<line_sep>appgroupid=p.parent.name<line_sep>fileloc=str(p.parents[1])<line_sep>typedir=str(p.parents[1].name)<line_sep>data_list.append((bundleid typedir appgroupid fileloc))<block_end><block_end><if_stmt>len(data_list)<g>0<block_start>filelocdesc='Path column in the report'<line_sep>description='List can included once installed but not present apps. Each file is named .com.apple.mobile_container_manager.metadata.plist'<line_sep>report=ArtifactHtmlReport('Bundle ID by AppGroup & PluginKit IDs')<line_sep>report.start_artifact_report(report_folder 'Bundle ID by AppGroup & PluginKit IDs' description)<line_sep>report.add_script()<line_sep>data_headers=('Bundle ID' 'Type' 'Directory GUID' 'Path')<line_sep>report.write_artifact_data_table(data_headers data_list filelocdesc)<line_sep>report.end_artifact_report()<line_sep>tsvname='Bundle ID - AppGroup ID - PluginKit ID'<line_sep>tsv(report_folder data_headers data_list tsvname)<block_end><else_stmt><block_start>logfunc('No data on Bundle ID - AppGroup ID - PluginKit ID')<block_end><block_end> |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>.io *<import_from_stmt>.layer *<import_from_stmt>.tensor *<import_from_stmt>.optimizer *<import_from_stmt>.nn *<import_from_stmt>.nn_utils *<import_from_stmt>.nn_functional *<import_from_stmt>.nn_init *<import_from_stmt>.varbase *<import_from_stmt>.vision_transforms *<import_from_stmt>.device *<import_from_stmt>.vision_utils *<import_from_stmt>.vision_datasets *<import_from_stmt>.ops *<import_from_stmt>.learning_rate_scheduler *<import_from_stmt>.parambase *<line_sep> |
<import_stmt>copy<def_stmt>load_deform_exp_setting selected_deform_exp<block_start>def_setting=dict()<if_stmt>selected_deform_exp<eq>'3D_max20_D14'<block_start>def_setting=dict()<line_sep>def_setting['MaxDeform']=20# The maximum amplitude of deformations
def_setting['DeformMethods']=['respiratory_motion' 'respiratory_motion' 'respiratory_motion' 'respiratory_motion' 'single_frequency' 'single_frequency' 'single_frequency' 'single_frequency' 'single_frequency' 'mixed_frequency' 'mixed_frequency' 'mixed_frequency' 'mixed_frequency' 'zero']<line_sep>def_setting['UseLungMask']=<true># The peaks of synthetic deformation can only be inside the mask
def_setting['verbose_image']=<false># Detailed writing of images: writing the DVF of the nextFixedImage
def_setting['DVFNormalization']=<true><line_sep>def_setting['MaskToZero']='Torso'<line_sep>def_setting['WriteIntermediateIntensityAugmentation']=<false><line_sep># stages
def_setting['DeleteStage1Images']=<true># After downsampling, delete all images in the original resolution.
# images
def_setting['Canny_LowerThreshold']=50.0<line_sep>def_setting['Canny_UpperThreshold']=100.0<line_sep>def_setting['Im_NoiseSigma']=10# Sigma for adding noise after deformation
def_setting['Im_NoiseAverage']=10# Mean for adding noise after deformation
# occlusion
def_setting['Occlusion']=<true><line_sep>def_setting['Occlusion_NumberOfEllipse']=10<line_sep>def_setting['Occlusion_IntensityRange']=[-800 -780]<line_sep>def_setting['Occlusion_Max_a']=15<line_sep>def_setting['Occlusion_Max_b']=15<line_sep>def_setting['Occlusion_Max_c']=15<line_sep># NextIm
def_setting['NextIm_SigmaN']=2# The intensity noise is less than normal Defomred Images in order to prevent accumulating noise.
# Since we are going to generate several deformed images on the NextIm
def_setting['NextIm_MaxDeform']=15<line_sep># Single Frequency
def_setting['SingleFrequency_BSplineGridSpacing']=[[80 80 80] [70 70 70] [60 60 60] [50 50 50] [45 45 45]]# in mm approximately
def_setting['SingleFrequency_SetGridBorderToZero']=[[1 1 1] [1 1 1] [2 2 2] [2 2 2] [2 2 2]]<line_sep>def_setting['SingleFrequency_GridSmoothingSigma']=[[0.8 0.8 0.8] [0.8 0.8 0.8] [0.8 0.8 0.8] [0.8 0.8 0.8] [0.8 0.8 0.8]]# in voxel not in mm
def_setting['SingleFrequency_BackgroundSmoothingSigma']=[8 8 8 8 8]# in voxel not in mm
def_setting['SingleFrequency_MaxDeformRatio']=[1 1 1 1 1]<line_sep># Mixed Frequency
def_setting['MixedFrequency_BlockRadius']=20# in voxel not in mm
def_setting['MixedFrequency_Np']=[200 150 150 150]<line_sep>def_setting['MixedFrequency_BSplineGridSpacing']=[[80 80 80] [60 60 60] [50 50 50] [45 45 60]]<line_sep>def_setting['MixedFrequency_SigmaRange']=[[10 15] [10 15] [10 15] [10 15]]# in voxel not in mm
def_setting['MixedFrequency_GridSmoothingSigma']=[[0.5 0.5 0.5] [0.5 0.5 0.5] [0.5 0.5 0.5] [0.5 0.5 0.5]]# in voxel not in mm
def_setting['MixedFrequency_SetGridBorderToZero']=[[0 0 0] [0 0 0] [0 0 0] [0 0 0]]# in voxel not in mm
def_setting['MixedFrequency_MaxDeformRatio']=[1 1 1 1]<line_sep># Respiratory Motion
def_setting['RespiratoryMotion_t0']=[30 30 30 30 30]# in mm
def_setting['RespiratoryMotion_s0']=[0.12 0.12 0.12 0.12 0.12]<line_sep>def_setting['RespiratoryMotion_BSplineGridSpacing']=[[80 80 80] [70 70 70] [60 60 60] [50 50 50] [45 45 45]]# in mm approximately
def_setting['RespiratoryMotion_SetGridBorderToZero']=[[1 1 1] [1 1 1] [2 2 2] [2 2 2] [2 2 2]]<line_sep>def_setting['RespiratoryMotion_GridSmoothingSigma']=[[0.8 0.8 0.8] [0.8 0.8 0.8] [0.8 0.8 0.8] [0.8 0.8 0.8] [0.8 0.8 0.8]]# in voxel not in mm
def_setting['RespiratoryMotion_BackgroundSmoothingSigma']=[8 8 8 8 8]# in voxel not in mm
def_setting['RespiratoryMotion_MaxDeformRatio']=[1 1 1 1 1]<line_sep>def_setting['RespiratoryMotion_SingleFrequency_MaxDeformRatio']=[0.5 0.5 0.5 0.5 0.5]<line_sep># translation
def_setting['Translation_MaxDeformRatio']=[1 1 1 1]<line_sep># translation_old
def_setting['BsplineGridSpacing_translation']=[[40 40 40] [40 40 40] [40 40 40] [40 40 40]]<line_sep>def_setting['setGridBorderToZero_translation']=[[1 1 1] [1 1 1] [1 1 1] [1 1 1]]<block_end><elif_stmt>selected_deform_exp<eq>'3D_max20_D14_K'<block_start>deform_exp_setting_temp=load_deform_exp_setting('3D_max20_D14')<line_sep>def_setting=copy.deepcopy(deform_exp_setting_temp)<block_end><elif_stmt>selected_deform_exp<in>['3D_max7_D14_K']<block_start>deform_exp_setting_temp=load_deform_exp_setting('3D_max20_D14')<line_sep>def_setting=copy.deepcopy(deform_exp_setting_temp)<line_sep>def_setting['MaxDeform']=7<line_sep>def_setting['SingleFrequency_BSplineGridSpacing']=[[50 50 50] [45 45 45] [35 35 35] [25 25 25] [20 20 20]]<line_sep>def_setting['SingleFrequency_MaxDeformRatio']=[0.5 1 1 1 1]<line_sep>def_setting['MixedFrequency_BSplineGridSpacing']=[[50 50 50] [40 40 40] [25 25 35] [20 20 30]]<line_sep>def_setting['MixedFrequency_SigmaRange']=[[5 10] [5 10] [5 10] [5 10]]<line_sep>def_setting['MixedFrequency_MaxDeformRatio']=[1 1 1 1]<line_sep>def_setting['RespiratoryMotion_t0']=[15 15 15 15 15]# in mm
def_setting['RespiratoryMotion_s0']=[0.12 0.12 0.12 0.12 0.12]<line_sep>def_setting['RespiratoryMotion_BSplineGridSpacing']=[[50 50 50] [45 45 45] [35 35 35] [25 25 25] [20 20 20]]<line_sep>def_setting['RespiratoryMotion_MaxDeformRatio']=[1 1 1 1 1]<line_sep>def_setting['RespiratoryMotion_SingleFrequency_MaxDeformRatio']=[0.5 0.5 0.5 0.5 0.5]<block_end><elif_stmt>selected_deform_exp<in>['3D_max15_D14_K']<block_start>deform_exp_setting_temp=load_deform_exp_setting('3D_max20_D14')<line_sep>def_setting=copy.deepcopy(deform_exp_setting_temp)<line_sep>def_setting['MaxDeform']=15<line_sep>def_setting['SingleFrequency_BSplineGridSpacing']=[[60 60 60] [50 50 50] [45 45 45] [40 40 40] [35 35 35]]<line_sep>def_setting['SingleFrequency_MaxDeformRatio']=[0.5 1 1 1 1]<line_sep>def_setting['MixedFrequency_BSplineGridSpacing']=[[60 60 60] [50 50 40] [40 40 80] [35 35 80]]<line_sep>def_setting['MixedFrequency_SigmaRange']=[[7 12] [7 12] [7 12] [7 12]]<line_sep>def_setting['MixedFrequency_MaxDeformRatio']=[1 1 1 1]<line_sep>def_setting['RespiratoryMotion_t0']=[22 22 22 22 22]# in mm
def_setting['RespiratoryMotion_s0']=[0.12 0.12 0.12 0.12 0.12]<line_sep>def_setting['RespiratoryMotion_BSplineGridSpacing']=[[60 60 60] [50 50 50] [45 45 45] [40 40 40] [35 35 35]]<line_sep>def_setting['RespiratoryMotion_MaxDeformRatio']=[1 1 1 1 1]<line_sep>def_setting['RespiratoryMotion_SingleFrequency_MaxDeformRatio']=[0.5 0.5 0.5 0.5 0.5]<block_end><else_stmt><block_start>print('warning: -------- selected_deform_exp not found')<block_end><return>def_setting<block_end> |
<import_from_stmt>concurrent.futures ThreadPoolExecutor<import_from_stmt>concurrent.futures ProcessPoolExecutor<import_from_stmt>requests get<import_from_stmt>functools partial<import_from_stmt>os getpid<line_sep>l_urls=['https://google.com']<times>6<line_sep># executor = ThreadPoolExecutor(max_workers=3)
# result = executor.map(get, l_urls)
# print(result)
print('Threads')<with_stmt>ThreadPoolExecutor(max_workers=3)<as>executor<block_start>"""
executor.__enter__ -> self (ThreadPoolExecutor)
executor.__exit__ -> executor.shutdown(wait=True)
"""<line_sep>result=executor.map(get l_urls)<line_sep>print(result)<block_end>print(list(result))<line_sep>print('Processo')<with_stmt>ProcessPoolExecutor()<as>executor<block_start>result=executor.map(partial(print getpid()))<line_sep>print(result)<block_end>print(list(result))<line_sep> |
# Copyright (c) OpenMMLab. All rights reserved.
<import_from_stmt>typing Dict Optional Sequence Tuple<import_from_stmt>numpy ndarray<import_from_stmt>torch Tensor<import_from_stmt>..builder FLOW_ESTIMATORS<import_from_stmt>.pwcnet PWCNet<line_sep>@FLOW_ESTIMATORS.register_module()<class_stmt>LiteFlowNet(PWCNet)<block_start>"""LiteFlowNet model."""<def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<block_end><def_stmt>extract_feat self imgs:Tensor<arrow>Tuple[Tensor Tensor Dict[str Tensor] Dict[str Tensor]]<block_start>"""Extract features from images.
Args:
imgs (Tensor): The concatenated input images.
Returns:
Tuple[Tensor, Tensor, Dict[str, Tensor], Dict[str, Tensor]]: The
first input image, the second input image, the feature pyramid
of the first input image and the feature pyramid of secode
input image.
"""<line_sep>in_channels=self.encoder.in_channels<line_sep># take from github.com:sniklaus/pytorch-liteflownet.git
imgs_mean=[0.411618 0.434631 0.454253]<for_stmt>ich range(in_channels)<block_start>imgs[: ich : :]=imgs[: ich : :]-imgs_mean[ich]<line_sep>imgs[: ich+in_channels : :]=(imgs[: ich+in_channels : :]-imgs_mean[ich])<block_end>img1=imgs[: :in_channels <ellipsis>]<line_sep>img2=imgs[: in_channels: <ellipsis>]<line_sep><return>img1 img2 self.encoder(img1) self.encoder(img2)<block_end><def_stmt>forward_train self imgs:Tensor flow_gt:Tensor valid:Optional[Tensor]=<none> img_metas:Optional[Sequence[dict]]=<none><arrow>Dict[str Tensor]<block_start>"""Forward function for LiteFlowNet when model training.
Args:
imgs (Tensor): The concatenated input images.
flow_gt (Tensor): The ground truth of optical flow.
Defaults to None.
valid (Tensor, optional): The valid mask. Defaults to None.
img_metas (Sequence[dict], optional): meta data of image to revert
the flow to original ground truth size. Defaults to None.
Returns:
Dict[str, Tensor]: The losses of output.
"""<line_sep>img1,img2,feat1,feat2=self.extract_feat(imgs)<line_sep><return>self.decoder.forward_train(img1 img2 feat1 feat2 flow_gt=flow_gt valid=valid)<block_end><def_stmt>forward_test self imgs:Tensor img_metas:Optional[Sequence[dict]]=<none><arrow>Sequence[ndarray]<block_start>"""Forward function for LiteFlowNet when model testing.
Args:
imgs (Tensor): The concatenated input images.
img_metas (Sequence[dict], optional): meta data of image to revert
the flow to original ground truth size. Defaults to None.
Returns:
Sequence[Dict[str, ndarray]]: the batch of predicted optical flow
with the same size of images after augmentation.
"""<line_sep>img1,img2,feat1,feat2=self.extract_feat(imgs)<line_sep><return>self.decoder.forward_test(img1 img2 feat1 feat2 img_metas=img_metas)<block_end><block_end> |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# The author of this file is: https://github.com/mg2015started
<import_stmt>numpy<as>np<def_stmt>get_split_batch batch<block_start>"""memory.sample() returns a batch of experiences, but we want an array
for each element in the memory (s, a, r, s', done)"""<line_sep>states_mb=np.array([each[0][0]<for>each batch])<line_sep># print(states_mb.shape)
actions_mb=np.array([each[0][1]<for>each batch])<line_sep># print(actions_mb.shape)
rewards_mb=np.array([each[0][2]<for>each batch])<line_sep># print(rewards_mb.shape)
next_states_mb=np.array([each[0][3]<for>each batch])<line_sep># print(next_states_mb.shape)
dones_mb=np.array([each[0][4]<for>each batch])<line_sep><return>states_mb actions_mb rewards_mb next_states_mb dones_mb<block_end><def_stmt>OU action mu=0 theta=0.15 sigma=0.3# noise = np.ones(action_dim) * mu
<block_start>noise=theta<times>(mu-action)+sigma<times>np.random.randn(1)<line_sep># noise = noise + d_noise
<return>noise<block_end><def_stmt>calculate_angle ego_location goal_location ego_direction# calculate vector direction
<block_start>goal_location=np.array(goal_location)<line_sep>ego_location=np.array(ego_location)<line_sep>goal_vector=goal_location-ego_location<line_sep>L_g_vector=np.sqrt(goal_vector.dot(goal_vector))<line_sep>ego_vector=np.array([np.cos(ego_direction<times>np.pi/180) np.sin(ego_direction<times>np.pi/180)])<line_sep>L_e_vector=np.sqrt(ego_vector.dot(ego_vector))<line_sep>cos_angle=goal_vector.dot(ego_vector)/(L_g_vector<times>L_e_vector)<line_sep>angle=(np.arccos(cos_angle))<times>180/np.pi<if_stmt>np.cross(goal_vector ego_vector)<g>0<block_start>angle=-angle<block_end><return>angle<block_end><def_stmt>calculate_distance location_a location_b<block_start>""" calculate distance between a and b"""<line_sep><return>np.linalg.norm(location_a-location_b)<block_end> |
# Copyright (c) 2020 Trail of Bits, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
<class_stmt>Colors<block_start><class_stmt>c<block_start>green='\033[92m'<line_sep>yellow='\033[93m'<line_sep>red='\033[91m'<line_sep>magneta='\033[95m'<line_sep>bg_yellow='\033[43m'<line_sep>orange='\033[38;5;202m'<block_end>RESET='\033[0m'<block_end><def_stmt>get_result_color total success<block_start><if_stmt>total<eq>0<block_start><return>Colors.c.magneta<block_end><if_stmt>total<eq>success<block_start><return>Colors.c.green<block_end><if_stmt>success<eq>0<block_start><return>Colors.c.red<block_end><return>Colors.c.yellow<block_end><def_stmt>get_bin_result result<block_start><if_stmt>result<eq>1<block_start><return>Colors.c.green<block_end><if_stmt>result<eq>0<block_start><return>Colors.c.red<block_end><return>Colors.c.magneta<block_end><def_stmt>clean <block_start><return>Colors.RESET<block_end><def_stmt>c color message<block_start><return>color+message+clean()<block_end><def_stmt>fail <block_start><return>Colors.c.red<block_end><def_stmt>succ <block_start><return>Colors.c.green<block_end>#TODO: Not sure if it's worth to generate these for each color from attrs dynamically
<def_stmt>green message<block_start><return>c(Colors.c.green message)<block_end><def_stmt>red message<block_start><return>c(Colors.c.red message)<block_end><def_stmt>yellow message<block_start><return>c(Colors.c.yellow message)<block_end><def_stmt>magneta message<block_start><return>c(Colors.c.magneta message)<block_end><def_stmt>bg_yellow message<block_start><return>c(Colors.c.bg_yellow message)<block_end><def_stmt>orange message<block_start><return>c(Colors.c.orange message)<block_end><def_stmt>id message<block_start><return>message<block_end> |
# Copyright 2017-2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>typed_python.internals isCompiled typeKnownToCompiler localVariableTypesKnownToCompiler <import_from_stmt>typed_python.compiler.type_wrappers.wrapper Wrapper<import_stmt>typed_python.compiler.native_ast<as>native_ast<import_stmt>typed_python<class_stmt>IsCompiledWrapper(Wrapper)<block_start>is_pod=<true><line_sep>is_empty=<false><line_sep>is_pass_by_ref=<false><def_stmt>__init__ self<block_start>super().__init__(isCompiled)<block_end><def_stmt>getNativeLayoutType self<block_start><return>native_ast.Type.Void()<block_end><def_stmt>convert_call self context expr args kwargs<block_start><if_stmt>args<or>kwargs<block_start>context.pushException(TypeError "isCompiled() accepts no arguments")<block_end><return>context.constant(<true>)<block_end><block_end><class_stmt>TypeKnownToCompiler(Wrapper)<block_start>is_pod=<true><line_sep>is_empty=<false><line_sep>is_pass_by_ref=<false><def_stmt>__init__ self<block_start>super().__init__(typeKnownToCompiler)<block_end><def_stmt>getNativeLayoutType self<block_start><return>native_ast.Type.Void()<block_end><def_stmt>convert_call self context expr args kwargs<block_start><if_stmt>len(args)<ne>1<or>kwargs<block_start>context.pushException(TypeError "typeKnownToCompiler() accepts 1 positional argument")<block_end><return>typed_python.compiler.python_object_representation.pythonObjectRepresentation(context args[0].expr_type.typeRepresentation)<block_end><block_end><class_stmt>LocalVariableTypesKnownToCompiler(Wrapper)<block_start>is_pod=<true><line_sep>is_empty=<false><line_sep>is_pass_by_ref=<false><def_stmt>__init__ self<block_start>super().__init__(localVariableTypesKnownToCompiler)<block_end><def_stmt>getNativeLayoutType self<block_start><return>native_ast.Type.Void()<block_end><def_stmt>convert_call self context expr args kwargs<block_start><if_stmt>args<or>kwargs<block_start>context.pushException(TypeError "localVariableTypesKnownToCompiler() accepts no arguments")<block_end><return>context.constant(dict(context.variableStates._types) allowArbitrary=<true>)<block_end><block_end> |
# Lint as: python3
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Terminates currently running Glazier processes."""<import_stmt>logging<import_stmt>os<import_stmt>sys<import_stmt>traceback<import_from_stmt>typing Optional<import_from_stmt>glazier.lib actions<import_from_stmt>glazier.lib buildinfo<import_from_stmt>glazier.lib constants<import_from_stmt>glazier.lib logs<import_from_stmt>glazier.lib winpe<def_stmt>log_and_exit msg:str build_info:buildinfo.BuildInfo code:int=4000 exception:Optional[Exception]=<none> collect:bool=<true><block_start>"""Logs a user-facing error message and exits.
This function handles all Glazier Exceptions by sequentially:
- (Optional) Collecting logs to a zip folder on disk
- Logging the full traceback to the debug log
- Constructing the user-facing failure string, consisting of:
* The message to accompany the failure
* (Optional) The exception object, and if available, the file and line
number of the root exception
* The user-facing help message containing where to look for logs and
where to go for further assistance.
- Log the user-facing failure string
- Exit Glazier with code 1
Args:
msg: The error message to accompany the failure.
build_info: The active BuildInfo class.
code: Error code to append to the failure message.
exception: The exception object.
collect: Whether to collect log files.
"""<if_stmt>collect<block_start><try_stmt><block_start>logs.Collect(os.path.join(build_info.CachePath() r'\glazier_logs.zip'))<block_end><except_stmt>logs.LogError<as>e<block_start>logging.error('logs collection failed with %s' e)<block_end><block_end># Log the full traceback to _BUILD_LOG to assist in troubleshooting
logging.debug(traceback.format_exc())<line_sep>string=f'{msg}\n\n'<if_stmt>exception# Index 2 contains the traceback from the sys.exc_info() tuple
<block_start>trace=sys.exc_info()[2]<if_stmt>trace# Index -1 contains the traceback object of the root exception
<block_start>trace_obj=traceback.extract_tb(trace)[-1]<line_sep># The trace object contains the full file path, grab just the file name
file=os.path.split(trace_obj.filename)[1]<line_sep>lineno=trace_obj.lineno<line_sep>string<augadd>f'Exception: {file}:{lineno}] {exception}\n\n'<block_end><else_stmt><block_start>string<augadd>f'Exception] {exception}\n\n'<block_end><block_end>build_log=constants.SYS_BUILD_LOG<if_stmt>winpe.check_winpe()<block_start>build_log=constants.WINPE_BUILD_LOG<block_end>string<augadd>(f'See {build_log} for more info. '<concat>f'Need help? Visit {constants.HELP_URI}#
{code}')<line_sep>logging.critical(string)<line_sep>sys.exit(1)<block_end> |
<def_stmt>test_non_standard_default_desired_privilege_level iosxe_conn# purpose of this test is to ensure that when a user sets a non-standard default desired priv
# level, that there is nothing in genericdriver/networkdriver that will prevent that from
# actually being set as the default desired priv level
<block_start>iosxe_conn.close()<line_sep>iosxe_conn.default_desired_privilege_level="configuration"<line_sep>iosxe_conn.open()<line_sep>current_prompt=iosxe_conn.get_prompt()<assert_stmt>current_prompt<eq>"csr1000v(config)#"<line_sep>iosxe_conn.close()<block_end> |
<import_stmt>theano.tensor<as>T<import_stmt>theano<import_from_stmt>mozi.utils.utils theano_unique<import_from_stmt>mozi.utils.theano_utils asfloatX<line_sep>floatX=theano.config.floatX<if_stmt>floatX<eq>'float64'<block_start>epsilon=1.0e-8<block_end><else_stmt><block_start>epsilon=1.0e-6<block_end><def_stmt>accuracy y y_pred<block_start>L=T.eq(y_pred.argmax(axis=1) y.argmax(axis=1))<line_sep><return>T.mean(L)<line_sep># L = T.eq(y_pred.argmax(axis=1), y.argmax(axis=1))
# return T.sum(L) / y.shape[0].astype(floatX)
<block_end><def_stmt>mse y y_pred<block_start><return>T.mean(T.sqr(y-y_pred))<block_end><def_stmt>entropy y y_pred<block_start>y_pred=T.clip(y_pred epsilon 1.0-epsilon)<line_sep>L=-(y<times>T.log(y_pred)+(1-y)<times>T.log(1-y_pred))<line_sep><return>T.mean(L)<line_sep># L = - T.sum(y * T.log(y_pred) + (1-y) * T.log(1-y_pred), axis=1)
# return T.mean(L)
<block_end><def_stmt>error y y_pred<block_start>L=T.neq(y_pred.argmax(axis=1) y.argmax(axis=1))<line_sep><return>T.mean(L)<block_end><def_stmt>recall y y_pred<block_start>L=T.eq(y_pred.argmax(axis=1) y.argmax(axis=1))<line_sep><return>T.sum(L)/y.shape[0].astype(floatX)<block_end><def_stmt>precision y y_pred<block_start>L=T.eq(y_pred.argmax(axis=1) y.argmax(axis=1))<line_sep><return>T.sum(L)/y_pred.shape[0].astype(floatX)<block_end><def_stmt>f1 y y_pred<block_start>r=recall(y y_pred)<line_sep>p=precision(y y_pred)<line_sep><return>2<times>p<times>r/(p+r)<block_end><def_stmt>hingeloss y y_pred<block_start>y_pred=T.clip(y_pred 0. 1.0)<line_sep>L=T.max(0 1-y<times>y_pred)<line_sep><return>T.mean(L)<block_end><def_stmt>abs y y_pred<block_start><return>T.mean(T.abs_(y-y_pred))<block_end><def_stmt>SGVB_bin y y_pred<block_start>'''
This cost function is for variational autoencoder with binary inputs
'''<line_sep>ypred,miu_e,logsig_e=y_pred<line_sep>ypred=T.clip(ypred epsilon 1.0-epsilon)<line_sep>logpxz=-T.nnet.binary_crossentropy(ypred y).sum(axis=1)<line_sep>L=logpxz+0.5<times>(1+2<times>logsig_e-miu_e<power>2-T.exp(2<times>logsig_e)).sum(axis=1)<line_sep><return>L.mean()<block_end> |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 13:14:28 2020
@author: derek.bickhart-adm
"""<import_stmt>matplotlib<import_from_stmt>matplotlib pyplot<as>plt<line_sep>matplotlib.use('Agg')<import_from_stmt>matplotlib.collections BrokenBarHCollection<import_from_stmt>matplotlib cm<import_from_stmt>itertools cycle<import_from_stmt>collections defaultdict<import_stmt>argparse<import_stmt>pandas<import_stmt>numpy<as>np<import_stmt>pysam<def_stmt>arg_parse <block_start>parser=argparse.ArgumentParser(description="A tool to plot bin and contig level read depth differences in strain assignment")<line_sep>parser.add_argument('-f' '--fai' help="Input reference fasta index file for the bin" required=<true> type=str)<line_sep>parser.add_argument('-o' '--output' help="Output file basename. Output files are {output}.wins and {output}.pdf" required=<true> type=str )<line_sep>parser.add_argument('-b' '--bam' help="Input CCS read depth bam file" required=<true> type=str)<line_sep>parser.add_argument('-h' '--human' help="Input human-readable variant call file" required=<true> type=str)<line_sep>parser.add_argument('-i' '--binsize' help="Bin size in bases [5000 bp]" type=int default=5000)<line_sep><return>parser.parse_args() parser<block_end><def_stmt>main args parser# Get the contig length list
<block_start>ctglens=dict()<with_stmt>open(args.fai 'r')<as>fai<block_start><for_stmt>l fai<block_start>s=l.rstrip().split()<line_sep>ctglens[s[0]]=s[1]<block_end><block_end># Create windows
winlist=defaultdict(list)<line_sep># offset bp to add for stitching contigs together in one line
ctgoffset=dict()<line_sep>lastbp=0<for_stmt>c ctglens<block_start>ctgoffset[c]=lastbp+100<for_stmt>i range(0 ctglens[c] args.binsize)<block_start>winlist[c].append(window(c i i+args.binsize))<block_end>lastbp<augadd>ctglens[c]<block_end># read each sam region and count the reads
<with_stmt>pysam.AlignmentFile(args.bam 'rb')<as>bamfile<block_start><for_stmt>c,w winlist.items()<block_start><for_stmt>i,win enumerate(w)<block_start>count=0<for_stmt>s bamfile.fetch(c win.start win.end)<block_start><if_stmt>s.is_secondary<block_start><continue><block_end>count<augadd>1<block_end>winlist=updateWin(winlist c i count)<block_end><block_end><block_end># Now, read in the human readable text file and process that
hapset=set()<with_stmt>open(args.human 'r')<as>human<block_start>human.readline()<for_stmt>l human<block_start>s=l.rstrip().split()<line_sep># determine where the contig start falls
<for_stmt>i,win enumerate(winlist[s[2]])<block_start><if_stmt>int(s[3])<l>win.end<and>int(s[3])<ge>win.start<block_start>winlist=updateWin(winlist s[2] i int(s[6]) s[4])<line_sep>print(f'Updating window: {s[2]} {win.start} {win.end} to {s[6]} for Hap {s[4]}')<line_sep>hapset.add(s[4])<block_end><block_end><block_end><block_end># OK, data is in! Let's try plotting
raw=defaultdict(list)<line_sep>bars=list()<for_stmt>c,w winlist.items()<block_start>bars.append([ctgoffset[c] ctglens[c]])<for_stmt>win winlist<block_start><for_stmt>h hapset<block_start>raw["contig"].append(c)<line_sep>raw["start"].append(win.start+ctgoffset[c])<line_sep>raw["end"].append(win.end+ctgoffset[c])<line_sep>raw["hap"].append(h)<line_sep>raw["count"].append(win.getCount(h))<block_end><block_end><block_end>df=pandas.DataFrame(raw)<line_sep>df.to_csv(args.output+'.wins' sep='\t' header=<true>)<line_sep>fig=plt.figure(figsize=(6 8))<line_sep>ax=df[['start' 'hap' 'count']].plot.area(x='start' y='count' colormap='viridis')<line_sep>ax.add_collection(BrokenBarHCollection(bars [-1 1] facecolors=plt.get_cmap('tab20')))<line_sep>ax.axis('tight')<line_sep>plt.savefig(args.output+'.pdf')<block_end><def_stmt>updateWin winlist contig winidx count haplotype='REF'<block_start>winlist[contig].count[haplotype]=count<line_sep><return>winlist<block_end><class_stmt>window<block_start><def_stmt>__init__ self contig start end<block_start>self.contig=contig<line_sep>self.start=start<line_sep>self.end=end<line_sep>self.count=defaultdict(int)<block_end><def_stmt>getCount self hap<block_start><if_stmt>hap<in>self.count<block_start><return>self.count[hap]<block_end><else_stmt><block_start><return>0<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>args,parser=arg_parse()<line_sep>main(args parser)<block_end> |
#
# Ignores AWS ACM validation CNAME records.
#
<import_from_future_stmt> absolute_import division print_function unicode_literals<import_from_stmt>logging getLogger<import_from_stmt>.base BaseProcessor<class_stmt>AwsAcmMangingProcessor(BaseProcessor)<block_start>'''
processors:
awsacm:
class: octodns.processor.acme.AwsAcmMangingProcessor
...
zones:
something.com.:
...
processors:
- awsacm
...
'''<line_sep>log=getLogger('AwsAcmMangingProcessor')<def_stmt>_ignore_awsacm_cnames self zone<block_start><for_stmt>r zone.records<block_start><if_stmt>r._type<eq>'CNAME'<and>r.name.startswith('_')<and>r.value.endswith('.acm-validations.aws.')<block_start>self.log.info('_process: ignoring %s' r.fqdn)<line_sep>zone.remove_record(r)<block_end><block_end><return>zone<block_end><def_stmt>process_source_zone self desired *args **kwargs<block_start><return>self._ignore_awsacm_cnames(desired)<block_end><def_stmt>process_target_zone self existing *args **kwargs<block_start><return>self._ignore_awsacm_cnames(existing)<block_end><block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.