content
stringlengths 0
1.55M
|
---|
<import_stmt>datetime<import_from_stmt>datetime datetime<import_from_stmt>executor.executor dispatch_web_vuln dispatch_service_vuln dispatch_statistics<import_from_stmt>model.vuln Statistics WebVuln WebParam WebParamPosition WebRequest WebResponse ServiceVuln<def_stmt>process_web_vuln instance data<block_start>"""将 web 漏洞 json 转换为相关 model"""<line_sep>detail=data["detail"]<line_sep>p=detail["param"]<if_stmt>p<block_start>param=WebParam(key=p["key"] value=p["value"] position=WebParamPosition(p["position"]))<block_end><else_stmt><block_start>param=<none><block_end>request=[]<line_sep>response=[]<line_sep>extra={}<for_stmt>i range(0 10)<block_start>req_key=f"request{i}"<if>i<else>"request"<line_sep>resp_key=f"response{i}"<if>i<else>"response"<line_sep>req=detail.get(req_key)<line_sep>resp=detail.get(resp_key)<if_stmt>req<eq>""<or>resp<eq>""<block_start><continue><block_end><if_stmt>req<is><none><or>resp<is><none><block_start><break><block_end>request.append(WebRequest(raw=req))<line_sep>response.append(WebResponse(raw=resp))<block_end># 其他的数据可能是自定义的,就单独拿出来
not_extra_key=["request" "response" "param" "payload" "url"]<for_stmt>k,v detail.items()<block_start><for_stmt>item not_extra_key<block_start><if_stmt>item<in>k<block_start><break><block_end><block_end><else_stmt><block_start>extra[k]=v<block_end><block_end>vuln=WebVuln(create_time=datetime.fromtimestamp(data["create_time"]/1000) plugin=data["plugin"] vuln_class=data["vuln_class"] url=data["target"]["url"] param=param request=request response=response extra=extra raw_json=data)<line_sep>dispatch_web_vuln(instance vuln)<block_end><def_stmt>process_statistics instance data<block_start>"""将统计数据 json 转换为相关 json"""<line_sep>s=Statistics(num_found_urls=data["num_found_urls"] num_scanned_urls=data["num_scanned_urls"] num_sent_http_requests=data["num_sent_http_requests"] average_response_time=data["average_response_time"] ratio_failed_http_requests=data["ratio_failed_http_requests"] ratio_progress=data["ratio_progress"] raw_json=data)<line_sep>dispatch_statistics(instance s)<block_end><def_stmt>process_host_vuln instance data<block_start>"""将服务漏洞 json 转换为相关 json"""<line_sep>detail=data["detail"]<line_sep>extra={}<line_sep>not_extra_key=["host" "port"]<for_stmt>k,v detail.items()<block_start><for_stmt>item not_extra_key<block_start><if_stmt>item<in>k<block_start><break><block_end><block_end><else_stmt><block_start>extra[k]=v<block_end><block_end>vuln=ServiceVuln(create_time=datetime.fromtimestamp(data["create_time"]/1000) plugin=data["plugin"] vuln_class=data["vuln_class"] host=detail["host"] port=detail["port"] extra=extra raw_json=data)<line_sep>dispatch_service_vuln(instance vuln)<block_end>
|
# Copyright (c) 2012-2016 Seafile Ltd.
<import_stmt>logging<import_from_stmt>rest_framework.authentication SessionAuthentication<import_from_stmt>rest_framework.permissions IsAdminUser<import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework.views APIView<import_from_stmt>rest_framework status<import_from_stmt>seaserv seafile_api<import_from_stmt>seahub.api2.endpoints.utils check_time_period_valid get_log_events_by_type_and_time<import_from_stmt>seahub.api2.authentication TokenAuthentication<import_from_stmt>seahub.api2.throttling UserRateThrottle<import_from_stmt>seahub.api2.permissions IsProVersion<import_from_stmt>seahub.api2.utils api_error<import_from_stmt>seahub.api2.endpoints.utils get_user_name_dict get_user_contact_email_dict<import_from_stmt>seahub.utils.timeutils datetime_to_isoformat_timestr<line_sep>logger=logging.getLogger(__name__)<class_stmt>FileAudit(APIView)<block_start>authentication_classes=(TokenAuthentication SessionAuthentication)<line_sep>permission_classes=(IsAdminUser IsProVersion)<line_sep>throttle_classes=(UserRateThrottle )<def_stmt>get self request# check the date format, should be like '2015-10-10'
<block_start>start=request.GET.get('start' <none>)<line_sep>end=request.GET.get('end' <none>)<if_stmt><not>check_time_period_valid(start end)<block_start>error_msg='start or end date invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end><try_stmt><block_start>events=get_log_events_by_type_and_time('file_audit' start end)<block_end><except_stmt>Exception<as>e<block_start>logger.error(e)<line_sep>error_msg='Internal Server Error'<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR error_msg)<block_end>result=[]<if_stmt>events# get name/contact_email dict for events user/repo_owner
<block_start>ev_user_list=[]<line_sep>ev_repo_owner_list=[]<for_stmt>ev events<block_start>repo_id=ev.repo_id<line_sep>repo=seafile_api.get_repo(repo_id)<if_stmt>repo<block_start>ev.repo_name=repo.name<line_sep>ev.repo_owner=seafile_api.get_repo_owner(repo_id)<or>seafile_api.get_org_repo_owner(repo_id)<block_end><else_stmt><block_start>ev.repo_name=''<line_sep>ev.repo_owner=''<block_end>ev_user_list.append(ev.user)<line_sep>ev_repo_owner_list.append(ev.repo_owner)<block_end>ev_user_name_dict=get_user_name_dict(ev_user_list)<line_sep>ev_user_contact_email_dict=get_user_contact_email_dict(ev_user_list)<line_sep>ev_repo_owner_name_dict=get_user_name_dict(ev_repo_owner_list)<line_sep>ev_repo_owner_contact_email_dict=get_user_contact_email_dict(ev_repo_owner_list)<for_stmt>ev events<block_start>result.append({'repo_id':ev.repo_id 'repo_name':ev.repo_name 'repo_owner_email':ev.repo_owner 'repo_owner_name':ev_repo_owner_name_dict[ev.repo_owner] 'repo_owner_contact_email':ev_repo_owner_contact_email_dict[ev.repo_owner] 'time':datetime_to_isoformat_timestr(ev.timestamp) 'ip':ev.ip 'file_path':ev.file_path 'etype':ev.etype 'user_email':ev.user 'user_name':ev_user_name_dict[ev.user] 'user_contact_email':ev_user_contact_email_dict[ev.user] })<block_end><block_end><return>Response(result)<block_end><block_end>
|
# This sample tests bidirectional inference when the
# type derives from the expected type and both are
# generic.
<import_from_stmt>typing Mapping Optional Union<line_sep>v0:Optional[Mapping[str Union[int str]]]=dict([("test1" 1) ("test2" 2)])<line_sep>v1:Optional[Mapping[str float]]=dict([("test1" 1) ("test2" 2)])<line_sep># This should generate an error because of a type mismatch.
v2:Mapping[str str]=dict([("test1" 1) ("test2" 2)])<line_sep>
|
<class_stmt>BatchTaskCreateOutDTO(object)<block_start><def_stmt>__init__ self<block_start>self.taskID=<none><block_end><def_stmt>getTaskID self<block_start><return>self.taskID<block_end><def_stmt>setTaskID self taskID<block_start>self.taskID=taskID<block_end><block_end>
|
<import_from_stmt>desktop_local_tests.local_packet_capture_test_case_with_disrupter LocalPacketCaptureTestCaseWithDisrupter<import_from_stmt>desktop_local_tests.windows.windows_enable_new_adapter_disrupter WindowsEnableNewAdapterDisrupter<class_stmt>TestWindowsPacketCaptureDisruptEnableNewAdapter(LocalPacketCaptureTestCaseWithDisrupter)<block_start>'''Summary:
Tests whether traffic leaving the user's device leaks outside of the VPN tunnel when a higher
priority network adapter becomes active after connecting.
Details:
The test first identifies the highest priority adapter and disables it. It then connects to the
VPN and re-enables that adapter. The test looks for leaking traffic once the interface has been
disabled.
Discussion:
There are several ways in which a adapter could become active after connect:
* The adapter is "enabled" via Network Connections (in Control Panel)
* The adapter is enabled but there's no connectivity, e.g. the Ethernet cable is unplugged or
Wi-Fi isn't connected to a Wi-Fi network. We refer to this situation as the adapter having
"no network".
* The adapter never existed in the first place and is created after connect.
This test uses the first method to disable/re-enable the adapter to test for leaks. The other
two scenarios are valid test cases and should also be implemented.
Weaknesses:
Packet capture tests can be noisy. Traffic can be detected as a leak but in actual fact may not
be. For example, traffic might go to a server owned by the VPN provider to re-establish
connections. In general this test is best used for manual exploring leaks rather than for
automation.
Scenarios:
Requires two active adapters.
TODO:
Add tests for inactive and newly created adapters.
'''<def_stmt>__init__ self devices parameters<block_start>super().__init__(WindowsEnableNewAdapterDisrupter devices parameters)<block_end><block_end>
|
<import_stmt>typing<import_from_stmt>rest_framework.decorators api_view permission_classes<import_from_stmt>rest_framework.permissions BasePermission<import_from_stmt>rest_framework.response Response<def_stmt>create_view_with_permissions *classes:typing.Type[BasePermission] <arrow>typing.Callable<block_start>@api_view()@permission_classes(classes)<def_stmt>view *args:typing.Any<arrow>Response<block_start><return>Response()<block_end><return>view<block_end>
|
<import_stmt>toolshed<import_stmt>collections<import_stmt>hashlib<line_sep>slivar_found=collections.defaultdict(list)<line_sep># first file is tsv from slivar
<for_stmt>d toolshed.reader(1)<block_start>slivar_found[d["sample_id"]].append(d["chr:pos:ref:alt"])<block_end>shared_affected=0<line_sep>shared_affected_solved=0<line_sep>exact_match=0<line_sep>sv_del=0<line_sep>oocyte=0<line_sep>indel_plus_sv_comphet=0<line_sep># 2nd file is participant_details.tsv
<for_stmt>d toolshed.reader(2)<block_start>sample_id=d["entity:participant_details_id"]<if_stmt>sample_id<not><in>slivar_found<block_start><continue><block_end><if_stmt>d["07_affected_status"]<ne>"Affected"<block_start><continue><block_end>shared_affected<augadd>1<line_sep>key="chr%s:%s:%s:%s"%(d["13_Chrom-1"] d["14_Pos-1"] d["15_Ref-1"] d["16_Alt-1"])<if_stmt>key<eq>"chr:::"<block_start><continue><block_end>shared_affected_solved<augadd>1<if_stmt>key<in>slivar_found[sample_id]<block_start>print("OK" sample_id key)<line_sep>exact_match<augadd>1<block_end><else_stmt><block_start>sha=hashlib.sha256(sample_id.encode()).hexdigest()<line_sep>#print("WTF", sample_id, key)
<if_stmt>key.endswith("del")<block_start>sv_del<augadd>1<block_end><elif_stmt>sha<in>("c1b669b32e2b899a15bcd8e5d3e2cc9f5eb457a1b8a1c27fce2ab8f26750c050" "8145446cdae4964156aefbb0eb1ab03f2866905de14942cffedc48b782de5086")<block_start>oocyte<augadd>1<block_end><elif_stmt>sha<in>("2b2f722dcb68c22c654c4cc2a9d8db8bda08a88461d5d5d7d89c985ba726eb62" "c52f9645ec80ce4c0c712bb4d8fad246e864b04d53ba94e9a29e3aac15f1985c" )<block_start>indel_plus_sv_comphet<augadd>1<block_end><elif_stmt>sha<eq>"6503b96da638ddab193fa7cbc1e5c20d626f5d2dda7dabe74b09ed4d3e2a677f"<block_start>print("mom:0/0 dad:0/1 kid:1/1 (because of sv deletion but other filters passed)")<block_end><elif_stmt>sha<eq>"8d853c417e5d02c5362e0ddce3666689f2feb8672e2293ff9483d4bd6d7ead42"<block_start>print(sample_id key)<line_sep>print("X. ref: CACCCTCCACGAT")<line_sep>print("X. reported by RGP: pos:802 var:TCCAC/A")<line_sep>print("X. found by our pipelines:")<line_sep>print("X. pos:802 CCCT/C")<line_sep>print("X. pos:808 AC/A")<block_end><else_stmt><block_start>print("BAD" sample_id key)<line_sep>1/0<block_end><block_end><block_end>print("shared_affected" shared_affected)<line_sep>print("shared_affected_solved: " shared_affected_solved)<line_sep>print("exact_match:" exact_match)<line_sep>print("SV deletion (not sought here):" sv_del)<line_sep>print("autosome het 2 girls shared with dad" oocyte)<line_sep>print("comphet missed because 1 side was deletion:" indel_plus_sv_comphet)<line_sep>"""
entity:participant_details_id 01_project_id 02_family_id 03_Individual_ID 06_sex 07_affected_status 08_phenotype_description 09_hpo_present 29_Date-Uploaded 04_paternal_id 05_maternal_id 11_Gene-1 12_Zygosity-1 13_Chrom-1 14_Pos-1 15_Ref-1 16_Alt-1 17_hgvsc-1 18_hgvsp-1 19_Transcript-1 10_hpo_absent 20_Gene-2 21_Zygosity-2 22_Chrom-2 23_Pos-2 24_Ref-2 25_Alt-2 26_hgvsc-2 27_hgvsp-2 28_Transcript-2 31_Notes 30_Other_seq_data
RGP_1003_3 Rare Genomes Project_Genomes RGP_1003 RGP_1003_3 Male Affected Limb-girdle muscular dystrophy HP:0003236 (Elevated serum creatine kinase)|HP:0012378 (Fatigue)|HP:0003325 (Limb-girdle muscle weakness)|HP:0003701 (Proximal muscle weakness) 3/1/2019
RGP_1004_3 Rare Genomes Project_Genomes RGP_1004 RGP_1004_3 Female Affected Limb-girdle muscular dystrophy HP:0012432 (Chronic fatigue)|HP:0006785 (Limb-girdle muscular dystrophy)|HP:0001324 (Muscle weakness)|HP:0003202 (Skeletal muscle atrophy) 3/1/2019
RGP_1004_4 Rare Genomes Project_Genomes RGP_1004 RGP_1004_4 Female Affected Limb-girdle muscular dystrophy HP:0006785 (Limb-girdle muscular dystrophy) 3/1/2019
RGP_1004_5 Rare Genomes Project_Genomes RGP_1004 RGP_1004_5 Female Affected Limb-girdle muscular dystrophy HP:0006785 (Limb-girdle muscular dystrophy) 3/1/2019
RGP_1006_3 Rare Genomes Project_Genomes RGP_1006 RGP_1006_3 Male Affected Myopathy HP:0002355 (Difficulty walking)|HP:0003473 (Fatigable weakness)|HP:0002359 (Frequent falls)|HP:0030237 (Hand muscle weakness)|HP:0007340 (Lower limb muscle weakness)|HP:0001324 (Muscle weakness)|HP:0003484 (Upper limb muscle weakness) 3/1/2019
RGP_1012_1 Rare Genomes Project_Genomes RGP_1012 RGP_1012_1 Female Unaffected Overgrowth; autism 3/1/2019
RGP_1012_2 Rare Genomes Project_Genomes RGP_1012 RGP_1012_2 Male Unaffected Overgrowth; autism 8/16/2019
RGP_1012_3 Rare Genomes Project_Genomes RGP_1012 RGP_1012_3 Male Affected Overgrowth; autism HP:0000729 (Autistic behavior)|HP:0001548 (Overgrowth) 3/1/2019 RGP_1012_2 RGP_1012_1
RGP_1013_3 Rare Genomes Project_Genomes RGP_1013 RGP_1013_3 Male Affected Myopathy HP:0003198 (Myopathy) 3/1/2019
"""<line_sep>
|
<import_stmt>collections<import_stmt>logging<import_stmt>mujoco_py.cymj<as>cymj<line_sep>logger=logging.getLogger(__name__)<class_stmt>MujocoErrorException(Exception)<block_start>""" Exception raised when mujoco error is called. """<line_sep><pass><block_end><def_stmt>error_callback message<block_start>""" Mujoco error callback """<line_sep>message=message.decode()<line_sep>full_message=f"MUJOCO ERROR: {message}"<line_sep>logger.error(full_message)<line_sep><raise>MujocoErrorException(full_message)<block_end># Set it once for all the processes
cymj.set_error_callback(error_callback)<class_stmt>MjWarningBuffer<block_start>"""
Buffering MuJoCo warnings.
That way they don't cause an exception being thrown which crashes the process,
but at the same time we store them in memory and can process.
One can potentially specify buffer capacity if one wants to use a circular buffer.
"""<def_stmt>__init__ self maxlen=<none><block_start>self.maxlen=maxlen<line_sep>self._buffer=collections.deque(maxlen=self.maxlen)<line_sep>self._prev_user_callback=<none><block_end><def_stmt>_intercept_warning self warn_bytes<block_start>""" Intercept a warning """<line_sep>warn=warn_bytes.decode()# Convert bytes to string
logger.warning("MUJOCO WARNING: %s" str(warn))<line_sep>self._buffer.append(warn)<block_end>@property<def_stmt>warnings self<block_start>""" Return a list of warnings to the user """<line_sep><return>list(self._buffer)<block_end><def_stmt>enter self<block_start>""" Enable collecting warnings """<if_stmt>self._prev_user_callback<is><none><block_start>self._prev_user_callback=cymj.get_warning_callback()<block_end>cymj.set_warning_callback(self._intercept_warning)<block_end><def_stmt>clear self<block_start>""" Reset warning buffer """<line_sep>self._buffer.clear()<block_end><def_stmt>exit self<block_start>""" Stop collecting warnings """<if_stmt>self._prev_user_callback<is><not><none><block_start>cymj.set_warning_callback(self._prev_user_callback)<line_sep>self._prev_user_callback=<none><block_end><block_end><def_stmt>__enter__ self<block_start>""" Enter - context manager magic method """<line_sep>self.enter()<block_end><def_stmt>__exit__ self exc_type exc_val exc_tb<block_start>""" Exit - context manager magic method """<line_sep>self.exit()<block_end><def_stmt>__repr__ self<block_start>""" Text representation"""<line_sep><return>"<{} warnings:{}>".format(self.__class__.__name__ len(self.warnings))<block_end><block_end>
|
# model
model=dict(type='ResNet' depth=18 num_classes=10 maxpool=<false>)<line_sep>loss=dict(type='CrossEntropyLoss')<line_sep># dataset
root='/path/to/your/dataset'<line_sep>mean=(0.4914 0.4822 0.4465)<line_sep>std=(0.2023 0.1994 0.2010)<line_sep>batch_size=512<line_sep>num_workers=4<line_sep>data=dict(train=dict(ds_dict=dict(type='CIFAR10' root=root train=<true> ) trans_dict=dict(type='cifar_linear' mean=mean std=std) ) test=dict(ds_dict=dict(type='CIFAR10' root=root train=<false> ) trans_dict=dict(type='cifar_test' mean=mean std=std) ) )<line_sep># training optimizer & scheduler
epochs=100<line_sep>lr=10.0<line_sep>optimizer=dict(type='SGD' lr=lr momentum=0.9 weight_decay=0)<line_sep>lr_cfg=dict(# passed to adjust_learning_rate()
type='MultiStep' steps=epochs lr=lr decay_rate=0.1 decay_steps=[60 80] )<line_sep># log, load & save
log_interval=20<line_sep>work_dir=<none><line_sep>resume=<none><line_sep>load=<none><line_sep>port=10001<line_sep>
|
<import_stmt>datetime<import_from_stmt>typing Dict Optional<import_from_stmt>pydantic BaseModel<class_stmt>ProjectBase(BaseModel)<block_start>project_name:str<line_sep>description:Optional[str]<block_end><class_stmt>ProjectCreate(ProjectBase)<block_start><pass><block_end><class_stmt>Project(ProjectBase)<block_start>project_id:int<line_sep>created_datetime:datetime.datetime<class_stmt>Config<block_start>orm_mode=<true><block_end><block_end><class_stmt>ModelBase(BaseModel)<block_start>project_id:str<line_sep>model_name:str<line_sep>description:Optional[str]<block_end><class_stmt>ModelCreate(ModelBase)<block_start><pass><block_end><class_stmt>Model(ModelBase)<block_start>model_id:int<line_sep>created_datetime:datetime.datetime<class_stmt>Config<block_start>orm_mode=<true><block_end><block_end><class_stmt>ExperimentBase(BaseModel)<block_start>model_id:str<line_sep>model_version_id:str<line_sep>parameters:Optional[Dict]<line_sep>training_dataset:Optional[str]<line_sep>validation_dataset:Optional[str]<line_sep>test_dataset:Optional[str]<line_sep>evaluations:Optional[Dict]<line_sep>artifact_file_paths:Optional[Dict]<block_end><class_stmt>ExperimentCreate(ExperimentBase)<block_start><pass><block_end><class_stmt>ExperimentEvaluations(BaseModel)<block_start>evaluations:Dict<block_end><class_stmt>ExperimentArtifactFilePaths(BaseModel)<block_start>artifact_file_paths:Dict<block_end><class_stmt>Experiment(ExperimentBase)<block_start>experiment_id:int<line_sep>created_datetime:datetime.datetime<class_stmt>Config<block_start>orm_mode=<true><block_end><block_end>
|
<import_stmt>os<import_stmt>json<import_stmt>polymetis<line_sep>__version__=""<line_sep># Conda installed: Get version of conda pkg (assigned $GIT_DESCRIBE_NUMBER during build)
<if_stmt>"CONDA_PREFIX"<in>os.environ<and>os.environ["CONDA_PREFIX"]<in>polymetis.__file__# Search conda pkgs for polymetis & extract version number
<block_start>stream=os.popen("conda list | grep polymetis")<for_stmt>line stream<block_start>info_fields=[s<for>s line.strip("\n").split(" ")<if>len(s)<g>0]<if_stmt>info_fields[0]<eq>"polymetis"# pkg name == polymetis
<block_start>__version__=info_fields[1]<line_sep><break><block_end><block_end><block_end># Built locally: Retrive git tag description of Polymetis source code
<else_stmt># Navigate to polymetis pkg dir, which should be within the git repo
<block_start>original_cwd=os.getcwd()<line_sep>os.chdir(os.path.dirname(polymetis.__file__))<line_sep># Git describe output
stream=os.popen("git describe --tags")<line_sep>version_string=[line<for>line stream][0]<line_sep># Modify to same format as conda env variable GIT_DESCRIBE_NUMBER
version_items=version_string.strip("\n").split("-")<line_sep>__version__=f"{version_items[-2]}_{version_items[-1]}"<line_sep># Reset cwd
os.chdir(original_cwd)<block_end><if_stmt><not>__version__<block_start><raise>Exception("Cannot locate Polymetis version!")<block_end>
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
<class_stmt>Solution# @param head, a ListNode
# @return a list node
<block_start><def_stmt>detectCycle self head<block_start><if_stmt>head<is><none><or>head.next<is><none><block_start><return><none><block_end>slow=head<line_sep>fast=head<while_stmt>fast<is><not><none><and>fast.next<is><not><none><block_start>slow=slow.next<line_sep>fast=fast.next.next<if_stmt>fast<eq>slow<block_start><break><block_end><block_end># No cycle
<if_stmt>fast<is><none><or>fast.next<is><none><block_start><return><none><block_end># Has a cycle, put `slow` back to head
slow=head<while_stmt><true><block_start><if_stmt>fast<eq>slow<block_start><break><block_end>slow=slow.next<line_sep>fast=fast.next<block_end><return>slow<block_end><block_end>
|
<import_from_stmt>office365.sharepoint.client_context ClientContext<import_from_stmt>office365.sharepoint.publishing.site_page SitePage<import_from_stmt>tests test_client_credentials test_team_site_url<line_sep>ctx=ClientContext(test_team_site_url).with_credentials(test_client_credentials)<line_sep>new_page=ctx.site_pages.pages.add()<line_sep>new_page.save_draft(title="Latest News 456")<line_sep>new_page.publish().execute_query()<line_sep>pages=ctx.site_pages.pages.get().execute_query()<for_stmt>page pages# type: SitePage
<block_start>print(page.file_name)<block_end>
|
<import_from_stmt>. local<import_stmt>unittest<class_stmt>LocalDestinationTestCase(unittest.TestCase)<block_start><def_stmt>test_out_dir self<block_start>config=local.Config(out_dir='~/test/')<line_sep>destination=local.LocalDestination(config)<line_sep># Weakly verify out_dir is expanded.
self.assertNotIn('~' destination.out_dir)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
|
<import_from_stmt>starcluster.clustersetup DefaultClusterSetup<import_from_stmt>starcluster.logger log<class_stmt>PySPHInstallerBase(DefaultClusterSetup)<block_start>PYSPH_PROFILE="/etc/profile.d/pysph.sh"<line_sep>PYSPH_HOSTS="/home/pysph/PYSPH_HOSTS"<line_sep>PYSPH_USER="pysph"<def_stmt>_create_env self master<block_start>master.ssh.execute(r"""
echo $HOME
if [ ! -d ~/pysph_env ]; then
mkdir ~/pysph_env &&
virtualenv --system-site-packages ~/pysph_env;
fi
""")<block_end><def_stmt>_install_pysph self master<block_start>commands=r"""
. ~/pysph_env/bin/activate
if ! python -c "import pysph" &> /dev/null; then
export USE_TRILINOS=1
export ZOLTAN_INCLUDE=/usr/include/trilinos
export ZOLTAN_LIBRARY=/usr/lib/x86_64-linux-gnu
cd ~ &&
git clone https://github.com/pypr/pysph &&
cd pysph &&
python setup.py install
fi
"""<line_sep>master.ssh.execute(commands)<block_end><def_stmt>_configure_profile self node<block_start>pysph_profile=node.ssh.remote_file(self.PYSPH_PROFILE 'w')<line_sep>pysph_profile.write("test -e ~/.bashrc && . ~/.bashrc")<line_sep>pysph_profile.close()<block_end><block_end><class_stmt>PySPHInstaller(PySPHInstallerBase)<block_start><def_stmt>run self nodes master user user_shell volumes<block_start>aliases=[n.alias<for>n nodes]<line_sep>log.info("Configuring PYSPH Profile")<for_stmt>node nodes<block_start>self.pool.simple_job(self._configure_profile (node ))<block_end>self.pool.wait(len(nodes))<line_sep>master.ssh.switch_user(self.PYSPH_USER)<line_sep>log.info("Creating virtual environment")<line_sep>self._create_env(master)<line_sep>master.ssh.execute("echo '. ~/pysph_env/bin/activate' > ~/.bashrc")<line_sep>log.info("Installing PySPH")<line_sep>self._install_pysph(master)<line_sep>log.info("Adding nodes to PYSPH hosts file")<line_sep>pysph_hosts=master.ssh.remote_file(self.PYSPH_HOSTS 'w')<line_sep>pysph_hosts.write('\n'.join(aliases)+'\n')<block_end><def_stmt>on_add_node self new_node nodes master user user_shell volumes<block_start>log.info("Configuring PYSPH Profile")<line_sep>self._configure_profile(new_node)<line_sep>master.ssh.switch_user(self.PYSPH_USER)<line_sep>log.info("Adding %s to PYSPH hosts file"%new_node.alias)<line_sep>pysph_hosts=master.ssh.remote_file(self.PYSPH_HOSTS 'a')<line_sep>pysph_hosts.write(new_node.alias+'\n')<line_sep>pysph_hosts.close()<block_end><def_stmt>on_remove_node self remove_node nodes master user user_shell volumes<block_start>master.switch_user(self.PYSPH_USER)<line_sep>log.info("Removing %s from PYSPH hosts file"%remove_node.alias)<line_sep>master.ssh.remove_lines_from_file(self.PYSPH_HOSTS remove_node.alias)<block_end><block_end>
|
# Copyright 2020 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>pytest<import_stmt>re<import_from_stmt>endtoend_tests.helpers.forseti_cli ForsetiCli<class_stmt>TestExplainerWhyDenied<block_start>"""Explainer why_denied tests."""<line_sep>@[email protected]@pytest.mark.explainer<def_stmt>test_why_denied self forseti_cli:ForsetiCli forseti_model_readonly forseti_server_service_account:str project_id:str<block_start>"""Test why_denied for why the Forseti SA doesn't have the
storage.buckets.delete permission.
Args:
forseti_cli (ForsetiCli): Instance of the forseti cli helper
forseti_model_readonly (Tuple): Model name & process result
forseti_server_service_account (str): Server service account email
project_id (str): Project id being scanned
"""<line_sep># Arrange
model_name,_,_=forseti_model_readonly<line_sep>forseti_cli.model_use(model_name=model_name)<line_sep># Act
result=forseti_cli.explainer_why_denied(forseti_server_service_account f'project/{project_id}' permissions=['storage.buckets.delete'])<line_sep># Assert
<assert_stmt>result.returncode<eq>0 f'Forseti stdout: {str(result.stdout)}'<assert_stmt>re.search(r'roles\/cloudmigration.inframanager' str(result.stdout))<assert_stmt>re.search(r'roles\/owner' str(result.stdout))<assert_stmt>re.search(r'roles\/storage.admin' str(result.stdout))<block_end><block_end>
|
<import_from_stmt>setuptools setup find_packages<with_stmt>open('requirements.txt' encoding='utf-8')<as>f<block_start>required=f.read().splitlines()<block_end><with_stmt>open('README.md' encoding='utf-8')<as>f<block_start>long_description=f.read()<block_end>setup(name='dropblock' version='0.3.0' packages=find_packages() long_description=long_description long_description_content_type='text/markdown' install_requires=required url='https://github.com/miguelvr/dropblock' license='MIT' author='<NAME>' author_email='<EMAIL>' description='Implementation of DropBlock: A regularization method for convolutional networks in PyTorch. ')<line_sep>
|
<import_from_stmt>build_pack_utils utils<line_sep>extn=utils.load_extension('lib/additional_commands')<class_stmt>TestAdditionalCommandsExtension(object)<block_start><def_stmt>test_no_additional_commands self<block_start>ctx={}<line_sep>tmp=extn.preprocess_commands(ctx)<assert_stmt>tmp<eq>[]<block_end><def_stmt>test_one_command_as_string self<block_start>ctx={'ADDITIONAL_PREPROCESS_CMDS':'env'}<line_sep>tmp=extn.preprocess_commands(ctx)<assert_stmt>len(tmp)<eq>1<assert_stmt>tmp[0]<eq>['env']<block_end><def_stmt>test_one_additional_command self<block_start>ctx={'ADDITIONAL_PREPROCESS_CMDS':['env']}<line_sep>tmp=extn.preprocess_commands(ctx)<assert_stmt>len(tmp)<eq>1<assert_stmt>tmp[0]<eq>['env']<block_end><def_stmt>test_two_additional_commands self<block_start>ctx={'ADDITIONAL_PREPROCESS_CMDS':['env' 'run_something']}<line_sep>tmp=extn.preprocess_commands(ctx)<assert_stmt>len(tmp)<eq>2<assert_stmt>tmp[0]<eq>['env']<assert_stmt>tmp[1]<eq>['run_something']<block_end><def_stmt>test_command_with_arguments_as_string self<block_start>ctx={'ADDITIONAL_PREPROCESS_CMDS':['echo "Hello World"']}<line_sep>tmp=extn.preprocess_commands(ctx)<assert_stmt>len(tmp)<eq>1<assert_stmt>tmp[0]<eq>['echo "Hello World"']<block_end><def_stmt>test_command_with_arguments_as_list self<block_start>ctx={'ADDITIONAL_PREPROCESS_CMDS':[['echo' '"Hello World!"']]}<line_sep>tmp=extn.preprocess_commands(ctx)<assert_stmt>len(tmp)<eq>1<assert_stmt>len(tmp[0])<eq>2<assert_stmt>tmp[0][0]<eq>'echo'<assert_stmt>tmp[0][1]<eq>'"Hello World!"'<block_end><block_end>
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>os<import_stmt>unittest<import_from_stmt>mock patch<import_from_stmt>click.testing CliRunner<import_from_stmt>slo_generator.cli main<import_from_stmt>slo_generator.utils load_config<import_from_stmt>.test_stubs CTX mock_sd<line_sep>cwd=os.path.dirname(os.path.abspath(__file__))<line_sep>root=os.path.dirname(os.path.dirname(cwd))<class_stmt>TestCLI(unittest.TestCase)<block_start><def_stmt>setUp self<block_start><for_stmt>key,value CTX.items()<block_start>os.environ[key]=value<block_end>slo_config=f'{root}/samples/cloud_monitoring/slo_gae_app_availability.yaml'# noqa: E501
config=f'{root}/samples/config.yaml'<line_sep>self.slo_config=slo_config<line_sep>self.slo_metadata_name=load_config(slo_config ctx=CTX)['metadata']['name']<line_sep>self.config=config<line_sep>self.cli=CliRunner()<block_end>@patch('google.api_core.grpc_helpers.create_channel' return_value=mock_sd(8))<def_stmt>test_cli_compute self mock<block_start>args=['compute' '-f' self.slo_config '-c' self.config]<line_sep>result=self.cli.invoke(main args)<line_sep>self.assertEqual(result.exit_code 0)<block_end>@patch('google.api_core.grpc_helpers.create_channel' return_value=mock_sd(40))<def_stmt>test_cli_compute_folder self mock<block_start>args=['compute' '-f' f'{root}/samples/cloud_monitoring' '-c' self.config]<line_sep>result=self.cli.invoke(main args)<line_sep>self.assertEqual(result.exit_code 0)<block_end><def_stmt>test_cli_compute_no_config self<block_start>args=['compute' '-f' f'{root}/samples' '-c' f'{root}/samples/config.yaml']<line_sep>result=self.cli.invoke(main args)<line_sep>self.assertEqual(result.exit_code 1)<block_end><def_stmt>test_cli_api self# TODO: Write test
<block_start><pass><block_end><def_stmt>test_cli_migrate self# TODO: Write test
<block_start><pass><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
<import_from_stmt>ebcli.containers.envvarcollector EnvvarCollector<import_from_stmt>mock patch<import_from_stmt>unittest TestCase<class_stmt>TestEnvvarCollector(TestCase)<block_start><def_stmt>test_empty_environment self<block_start>self.assertDictEqual({} EnvvarCollector().map)<line_sep>self.assertSetEqual(set() EnvvarCollector().to_remove)<block_end><def_stmt>test_merge_non_overlapping_envs self<block_start>env0=EnvvarCollector({'a':'0' 'b':'1'})<line_sep>env1=EnvvarCollector({'c':'3' 'd':'4'})<line_sep>expected_envvars={'a':'0' 'b':'1' 'c':'3' 'd':'4'}<line_sep>self.assertDictEqual(expected_envvars env0.merge(env1).filtered().map)<line_sep>self.assertDictEqual(expected_envvars env1.merge(env0).filtered().map)<line_sep>self.assertSetEqual(set() env0.merge(env1).to_remove)<line_sep>self.assertSetEqual(set() env1.merge(env0).to_remove)<block_end><def_stmt>test_merge_overlapping_and_vars_to_remove self<block_start>env0=EnvvarCollector({'a':'0' 'd':'1'})<line_sep>env1=EnvvarCollector({'a':'5' 'd':'5'} {'d' 'c'})<line_sep>self.assertEqual({'a':'5'} env0.merge(env1).filtered().map)<line_sep>self.assertEqual({'a':'0'} env1.merge(env0).filtered().map)<line_sep>self.assertSetEqual({'d' 'c'} env0.merge(env1).to_remove)<line_sep>self.assertSetEqual({'d' 'c'} env1.merge(env0).to_remove)<block_end><def_stmt>test_fitered_removed_all_envvars self<block_start>env=EnvvarCollector({'a':'5' 'd':'5'} {'a' 'd'})<line_sep>result=env.filtered()<line_sep>self.assertDictEqual({} result.map)<line_sep>self.assertSetEqual(set() result.to_remove)<block_end><def_stmt>test_fitered_removed_some_envvars self<block_start>env=EnvvarCollector({'a':'5' 'd':'5'} {'a'})<line_sep>result=env.filtered()<line_sep>self.assertDictEqual({'d':'5'} result.map)<line_sep>self.assertSetEqual(set() result.to_remove)<block_end><def_stmt>test_fitered_removed_no_envvars self<block_start>envvars={'a':'5' 'd':'5'}<line_sep>env=EnvvarCollector(envvars)<line_sep>result=env.filtered()<line_sep>self.assertDictEqual(envvars result.map)<line_sep>self.assertSetEqual(set() result.to_remove)<block_end><block_end>
|
# coding=utf-8
<import_stmt>gzip<import_stmt>paddle.v2<as>paddle<import_from_stmt>network_conf Model<import_from_stmt>reader DataGenerator<import_from_stmt>decoder ctc_greedy_decoder<import_from_stmt>utils get_file_list load_dict load_reverse_dict<def_stmt>infer_batch inferer test_batch labels reversed_char_dict# 获取初步预测结果
<block_start>infer_results=inferer.infer(input=test_batch)<line_sep>num_steps=len(infer_results)<floordiv>len(test_batch)<line_sep>probs_split=[infer_results[i<times>num_steps:(i+1)<times>num_steps]<for>i xrange(0 len(test_batch))]<line_sep>results=[]<line_sep># 最佳路径解码
<for_stmt>i,probs enumerate(probs_split)<block_start>output_transcription=ctc_greedy_decoder(probs_seq=probs vocabulary=reversed_char_dict)<line_sep>results.append(output_transcription)<block_end># 打印预测结果
<for_stmt>result,label zip(results labels)<block_start>print("\n预测结果: %s\n实际文字: %s"%(result label))<block_end><block_end><def_stmt>infer model_path image_shape label_dict_path infer_file_list_path<block_start>infer_file_list=get_file_list(infer_file_list_path)<line_sep># 获取标签字典
char_dict=load_dict(label_dict_path)<line_sep># 获取反转的标签字典
reversed_char_dict=load_reverse_dict(label_dict_path)<line_sep># 获取字典大小
dict_size=len(char_dict)<line_sep># 获取reader
data_generator=DataGenerator(char_dict=char_dict image_shape=image_shape)<line_sep># 初始化PaddlePaddle
paddle.init(use_gpu=<true> trainer_count=2)<line_sep># 加载训练好的参数
parameters=paddle.parameters.Parameters.from_tar(gzip.open(model_path))<line_sep># 获取网络模型
model=Model(dict_size image_shape is_infer=<true>)<line_sep># 获取预测器
inferer=paddle.inference.Inference(output_layer=model.log_probs parameters=parameters)<line_sep># 开始预测
test_batch=[]<line_sep>labels=[]<for_stmt>i,(image label) enumerate(data_generator.infer_reader(infer_file_list)())<block_start>test_batch.append([image])<line_sep>labels.append(label)<block_end>infer_batch(inferer test_batch labels reversed_char_dict)<block_end><if_stmt>__name__<eq>"__main__"# 要预测的图像
<block_start>infer_file_list_path='../data/test_data/Challenge2_Test_Task3_GT.txt'<line_sep># 模型的路径
model_path='../models/params_pass.tar.gz'<line_sep># 图像的大小
image_shape=(173 46)<line_sep># 标签的路径
label_dict_path='../data/label_dict.txt'<line_sep># 开始预测
infer(model_path image_shape label_dict_path infer_file_list_path)<block_end>
|
<import_stmt>json<import_stmt>requests<line_sep>url='https://api.github.com/repos/OWASP/owasp-mstg/stats/contributors'<line_sep>headers={'Accept':'application/vnd.github.v3+json'}<line_sep>r=requests.get(url headers=headers)<line_sep>data=r.json()<line_sep>coAuthor="Author/Co-Authors: "<line_sep>topContributors="Top Contributors: "<line_sep>contributors="Contributors: "<line_sep>miniContributors="Mini Contributors: "<line_sep>additions=''<for_stmt>authors data[:]# print(authors['weeks'])
# print(authors['author']['login'])
# print(authors['weeks'])
<block_start>count=0<line_sep># count additions for each author
<for_stmt>allWeeks authors['weeks']<block_start>count<augadd>allWeeks['a']<block_end><if_stmt>(count<ge>2000)# author = "Co-Author: "+authors['author']['login']
# additions = author + " Additions:" + str(count)
# print(additions)
<block_start>coAuthor<augadd>authors['author']['login']+", "<block_end><elif_stmt>((count<ge>500)<and>(count<l>2000))# author = "Top Contributors: "+authors['author']['login']
# additions = author + " Additions:" + str(count)
# print(additions)
<block_start>topContributors<augadd>authors['author']['login']+", "<block_end><elif_stmt>((count<ge>50)<and>(count<l>500))# author = "Contributors: "+authors['author']['login']
# additions = author + " Additions:" + str(count)
# print(additions)
<block_start>contributors<augadd>authors['author']['login']+", "<block_end><elif_stmt>((count<ge>1)<and>(count<l>50))# author = "Mini Contributors: "+authors['author']['login']
# additions = author + " Additions:" + str(count)
# print(additions)
<block_start>miniContributors<augadd>authors['author']['login']+", "<block_end><block_end>print(coAuthor+"\n")<line_sep>print(topContributors+"\n")<line_sep>print(contributors+"\n")<line_sep>print(miniContributors+"\n")<line_sep>
|
#
# class TrainDataset(Dataset):
#
# def __init__(self, args, raw_datasets):
# # This tab processor is for table truncation and linearize.
# self.raw_datasets = raw_datasets
#
# def __getitem__(self, index) -> T_co:
# raw_data = self.raw_datasets[index]
#
# return raw_data.update({"struct_in": struct_in, "text_in": text_in, "seq_out": seq_out})
#
#
# class DevDataset(Dataset):
#
# def __init__(self, args, raw_datasets):
# # This tab processor is for table truncation and linearize.
# self.raw_datasets = raw_datasets
#
# def __getitem__(self, index):
# raw_data = self.raw_datasets[index]
#
# return raw_data.update({"struct_in": struct_in, "text_in": text_in, "seq_out": seq_out})
#
#
# class TestDataset(Dataset):
#
# def __init__(self, args, raw_datasets):
# # This tab processor is for table truncation and linearize.
# self.raw_datasets = raw_datasets
#
# def __getitem__(self, index):
# raw_data = self.raw_datasets[index]
#
# return raw_data.update({"struct_in": struct_in, "text_in": text_in, "seq_out": seq_out})
|
<import_stmt>pytest<import_from_stmt>common.mayastor container_mod mayastor_mod<import_from_stmt>common.nvme nvme_connect nvme_disconnect nvme_disconnect_all nvme_list_subsystems identify_namespace <import_stmt>uuid<import_stmt>mayastor_pb2<as>pb<import_stmt>os<line_sep>POOL_NAME="pool1"<line_sep>NEXUS_GUID="afebdeb9-ff44-1111-2222-254f810ba34a"<line_sep>@pytest.fixture<def_stmt>create_replicas mayastor_mod<block_start>ms0=mayastor_mod.get("ms0")<line_sep>ms1=mayastor_mod.get("ms1")<line_sep>replicas=[]<for_stmt>m (ms0 ms1)<block_start>p=m.pool_create(POOL_NAME "malloc:///disk0?size_mb=100")<assert_stmt>p.state<eq>pb.POOL_ONLINE<line_sep>r=m.replica_create(POOL_NAME str(uuid.uuid4()) 32<times>1024<times>1024)<line_sep>replicas.append(r.uri)<block_end><yield>replicas<for_stmt>m (ms0 ms1)<block_start><try_stmt><block_start>m.pool_destroy(POOL_NAME)<block_end><except_stmt>Exception<block_start><pass><block_end><block_end><block_end>@pytest.fixture<def_stmt>create_nexuses mayastor_mod create_replicas<block_start>uris=[]<line_sep>nvme_disconnect_all()<for_stmt>n ["ms2" "ms3"]<block_start>ms=mayastor_mod.get(n)<line_sep>ms.nexus_create(NEXUS_GUID 32<times>1024<times>1024 create_replicas)<line_sep>uri=ms.nexus_publish(NEXUS_GUID)<line_sep>uris.append(uri)<block_end><yield>uris<line_sep>nvme_disconnect_all()<for_stmt>n ["ms2" "ms3"]<block_start>ms=mayastor_mod.get(n)<line_sep>ms.nexus_destroy(NEXUS_GUID)<block_end><block_end><def_stmt>connect_multipath_nexuses uris<block_start>dev1=nvme_connect(uris[0])<line_sep>dev2=<none><try_stmt><block_start>dev2=nvme_connect(uris[1])<block_end><except_stmt>Exception# The first connect is allowed to fail due to controller ID collision.
<block_start><pass><block_end><if_stmt>dev2<is><none><block_start>dev2=nvme_connect(uris[1])<block_end><return>(dev1 dev2)<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_io_policy create_replicas create_nexuses mayastor_mod<block_start>devs=connect_multipath_nexuses(create_nexuses)<assert_stmt>devs[0]<eq>devs[1] "Paths are different for multipath nexus"<line_sep># Make sure all we see exactly 2 paths and all paths are 'live optimized'
device=devs[0]<line_sep>descr=nvme_list_subsystems(device)<line_sep>paths=descr["Subsystems"][0]["Paths"]<assert_stmt>len(paths)<eq>2 "Number of paths to Nexus mismatches"<for_stmt>p paths<block_start><assert_stmt>p["State"]<eq>"live"<assert_stmt>p["ANAState"]<eq>"optimized"<block_end># Make sure there are 2 virtual NVMe controllers for the namespace.
ns=os.path.basename(device)<for_stmt>i range(2)<block_start>cname=ns.replace("n1" "c%dn1"%i)<line_sep>cpath="/sys/block/%s"%cname<line_sep>l=os.readlink(cpath)<assert_stmt>l.startswith("../devices/virtual/nvme-fabrics/ctl/") "Path device is not a virtual controller"<block_end># Make sure virtual NVMe namespace exists for multipath nexus.
l=os.readlink("/sys/block/%s"%ns)<assert_stmt>l.startswith("../devices/virtual/nvme-subsystem/nvme-subsys") "No virtual NVMe subsystem exists for multipath Nexus"<line_sep># Make sure I/O policy is NUMA.
subsys=descr["Subsystems"][0]["Name"]<line_sep>pfile="/sys/class/nvme-subsystem/%s/iopolicy"%subsys<assert_stmt>os.path.isfile(pfile) "No iopolicy file exists"<with_stmt>open(pfile)<as>f<block_start>iopolicy=f.read().strip()<assert_stmt>iopolicy<eq>"numa" "I/O policy is not NUMA"<block_end># Make sure ANA state is reported properly for both nexuses.
<for_stmt>n ["ms2" "ms3"]<block_start>ms=mayastor_mod.get(n)<line_sep>nexuses=ms.nexus_list_v2()<assert_stmt>len(nexuses)<eq>1 "Number of nexuses mismatches"<assert_stmt>(nexuses[0].ana_state<eq>pb.NVME_ANA_OPTIMIZED_STATE) "ANA state of nexus mismatches"<block_end><block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_namespace_guid create_replicas create_nexuses mayastor_mod<block_start>uri=create_nexuses[0]<line_sep>device=nvme_connect(uri)<line_sep>ns=identify_namespace(device)<line_sep>nvme_disconnect(uri)<line_sep># Namespace's GUID must match Nexus GUID.
<assert_stmt>uuid.UUID(ns["nguid"])<eq>uuid.UUID(NEXUS_GUID) "Namespace NGID doesn't match Nexus GUID"<line_sep># Extended Unique Identifier must be zero.
<assert_stmt>ns["eui64"]<eq>"0000000000000000" "Namespace EUI64 is not zero"<block_end>
|
<def_stmt>foo a b=1 c=3<block_start><pass><block_end>
|
<import_from_stmt>django.db.models.expressions RawSQL<import_from_stmt>corehq toggles<import_from_stmt>corehq.apps.accounting.utils domain_has_privilege<import_from_stmt>corehq.apps.domain.models Domain<import_from_stmt>corehq.apps.linked_domain.models DomainLink DomainLinkHistory<import_from_stmt>corehq.apps.linked_domain.util is_available_upstream_domain is_domain_available_to_link user_has_admin_access_in_all_domains <import_from_stmt>corehq.privileges RELEASE_MANAGEMENT<import_from_stmt>corehq.util.quickcache quickcache<line_sep>@quickcache(['domain'] timeout=60<times>60)<def_stmt>get_upstream_domain_link domain<block_start>"""
:returns: ``DomainLink`` object linking this domain to its upstream
or None if no link exists
"""<line_sep><return>DomainLink.objects.filter(linked_domain=domain).first()<block_end>@quickcache(['domain'] timeout=60<times>60)<def_stmt>is_active_upstream_domain domain<block_start><return>DomainLink.objects.filter(master_domain=domain).exists()<block_end>@quickcache(['domain'] timeout=60<times>60)<def_stmt>is_active_downstream_domain domain<block_start><return>DomainLink.objects.filter(linked_domain=domain).exists()<block_end>@quickcache(['domain'] timeout=60<times>60)<def_stmt>get_linked_domains domain include_deleted=<false><block_start>"""
:param domain:
:return: List of ``DomainLink`` objects for each domain linked to this one.
"""<line_sep>manager=DomainLink.all_objects<if>include_deleted<else>DomainLink.objects<line_sep><return>list(manager.filter(master_domain=domain).all())<block_end><def_stmt>get_actions_in_domain_link_history link<block_start><return>DomainLinkHistory.objects.filter(link=link).annotate(row_number=RawSQL('row_number() OVER (PARTITION BY model, model_detail ORDER BY date DESC)' []))<block_end><def_stmt>get_available_domains_to_link upstream_domain_name user billing_account=<none><block_start>"""
This supports both the old feature flagged version of linked projects and the GAed version
The GAed version is only available to enterprise customers and only usable by admins, but the feature flagged
version is available to anyone who can obtain access (the wild west)
:param upstream_domain_name: potential upstream domain candidate
:param user: user object
:param billing_account: optional parameter to limit available domains to within an enterprise account
:return: list of domain names available to link as downstream projects
"""<if_stmt>domain_has_privilege(upstream_domain_name RELEASE_MANAGEMENT)<block_start><return>get_available_domains_to_link_for_account(upstream_domain_name user billing_account)<block_end><elif_stmt>toggles.LINKED_DOMAINS.enabled(upstream_domain_name)<block_start><return>get_available_domains_to_link_for_user(upstream_domain_name user)<block_end><return>[]<block_end><def_stmt>get_available_domains_to_link_for_account upstream_domain_name user account<block_start>"""
Finds available domains to link based on domains associated with the provided account
"""<line_sep>domains=account.get_domains()<if>account<else>[]<line_sep><return>list({domain<for>domain domains<if>is_domain_available_to_link(upstream_domain_name domain user)})<block_end><def_stmt>get_available_domains_to_link_for_user upstream_domain_name user<block_start>"""
Finds available domains to link based on domains that the provided user is active in
"""<line_sep>domains=[d.name<for>d Domain.active_for_user(user)]<line_sep><return>list({domain<for>domain domains<if>is_domain_available_to_link(upstream_domain_name domain user should_enforce_admin=<false>)})<block_end><def_stmt>get_available_upstream_domains downstream_domain user billing_account=<none><block_start>"""
This supports both the old feature flagged version of linked projects and the GAed version
The GAed version is only available to enterprise customers and only usable by admins, but the feature flagged
version is available to anyone who can obtain access
:param downstream_domain: potential upstream domain candidate
:param user: user object
:param billing_account: optional parameter to limit available domains to within an enterprise account
:return: list of domain names available to link as downstream projects
"""<if_stmt>domain_has_privilege(downstream_domain RELEASE_MANAGEMENT)<block_start><return>get_available_upstream_domains_for_account(downstream_domain user billing_account)<block_end><elif_stmt>toggles.LINKED_DOMAINS.enabled(downstream_domain)<block_start><return>get_available_upstream_domains_for_user(downstream_domain user)<block_end><return>[]<block_end><def_stmt>get_available_upstream_domains_for_account downstream_domain user account<block_start>domains=account.get_domains()<if>account<else>[]<line_sep><return>list({d<for>d domains<if>is_available_upstream_domain(d downstream_domain user)})<block_end><def_stmt>get_available_upstream_domains_for_user domain_name user<block_start>domains=[d.name<for>d Domain.active_for_user(user)]<line_sep><return>list({domain<for>domain domains<if>is_available_upstream_domain(domain domain_name user should_enforce_admin=<false>)})<block_end><def_stmt>get_accessible_downstream_domains upstream_domain_name user<block_start>"""
Returns a list of domain names that actively linked downstream of the provided upstream domain
NOTE: if the RELEASE_MANAGEMENT privilege is enabled, ensure user has admin access
"""<line_sep>downstream_domains=[d.linked_domain<for>d get_linked_domains(upstream_domain_name)]<if_stmt>domain_has_privilege(upstream_domain_name RELEASE_MANAGEMENT)<block_start><return>[domain<for>domain downstream_domains<if>user_has_admin_access_in_all_domains(user [upstream_domain_name domain])]<block_end><return>downstream_domains<block_end>
|
a<l>b<l>c<line_sep>x<in>y<line_sep>x<not><in>y<line_sep>x<is>y<line_sep>x<is><not>y<line_sep>x<l>y<line_sep>x<g>y<line_sep>x<ge>y<line_sep>x<le>y<line_sep>x<eq>y<line_sep>x<ne>y<line_sep>
|
# Copyright 2016 <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__='<NAME>, <NAME>, <NAME>'<line_sep>MESSAGE_CODE_ANNOUNCE=500<line_sep>MESSAGE_CODE_NOTIFY=501<line_sep>MESSAGE_CODE_NOTIFICATION=502<line_sep>MESSAGE_CODE_VALIDATION=503<line_sep>MESSAGE_CODE_PEER_REQUEST=510<line_sep>MESSAGE_CODE_PEER_RESPONSE=511<line_sep>MESSAGE_CODE_PEER_UPDATE=512<line_sep>MESSAGE_CODE_PEER_INIT=513<line_sep>MESSAGE_CODE_GOSSIP_MIN=500<line_sep>MESSAGE_CODE_GOSSIP_MAX=520<line_sep>
|
<import_stmt>torch<import_stmt>losswise<class_stmt>AverageMeter(object)<block_start>"""Computes and stores the average and current value"""<def_stmt>__init__ self<block_start>self.reset()<block_end><def_stmt>reset self<block_start>self.val=0<line_sep>self.avg=0<line_sep>self.sum=0<line_sep>self.count=0<block_end><def_stmt>update self val n=1<block_start>self.val=val<line_sep>self.sum<augadd>val<times>n<line_sep>self.count<augadd>n<line_sep>self.avg=self.sum/self.count<block_end><block_end><class_stmt>Metric(object)<block_start><def_stmt>__init__ self<block_start>self.RMSELIs=AverageMeter()<line_sep>self.RMSELGs=AverageMeter()<line_sep>self.ABSRs=AverageMeter()<line_sep>self.SQRs=AverageMeter()<line_sep>self.DELTA=AverageMeter()<line_sep>self.DELTASQ=AverageMeter()<line_sep>self.DELTACU=AverageMeter()<line_sep>self.losses=AverageMeter()<block_end><def_stmt>update self loss RMSE_Linear RMSE_Log abs_relative sq_relative delta delta_sq delta_cu<block_start><if_stmt>loss<block_start>self.losses.update(loss)<block_end>self.RMSELIs.update(RMSE_Linear)<line_sep>self.RMSELGs.update(RMSE_Log)<line_sep>self.ABSRs.update(abs_relative)<line_sep>self.SQRs.update(sq_relative)<line_sep>self.DELTA.update(delta)<line_sep>self.DELTASQ.update(delta_sq)<line_sep>self.DELTACU.update(delta_cu)<block_end><def_stmt>get_info self<block_start><return>[self.losses.avg self.RMSELIs.avg self.RMSELGs.avg self.ABSRs.avg self.SQRs.avg self.DELTA.avg self.DELTASQ.avg self.DELTACU.avg]<block_end><def_stmt>calculate self depth predict loss=<none># only consider 1~80 meters
<block_start>mask=(depth<ge>1)<times>(depth<le>80)<line_sep>RMSE_Linear=((((predict[mask]-depth[mask])<power>2).mean())<power>0.5).cpu().detach().item()<line_sep>RMSE_Log=((((torch.log(predict[mask])-torch.log(depth[mask]))<power>2).mean())<power>0.5).cpu().detach().item()<line_sep>abs_relative=(torch.abs(predict[mask]-depth[mask])/depth[mask]).mean().cpu().detach().item()<line_sep>sq_relative=((predict[mask]-depth[mask])<power>2/depth[mask]).mean().cpu().detach().item()<line_sep>delta=(torch.max(predict[mask]/depth[mask] depth[mask]/predict[mask])<l>1.25).float().mean().cpu().detach().item()<line_sep>delta_sq=(torch.max(predict[mask]/depth[mask] depth[mask]/predict[mask])<l>1.25<power>2).float().mean().cpu().detach().item()<line_sep>delta_cu=(torch.max(predict[mask]/depth[mask] depth[mask]/predict[mask])<l>1.25<power>3).float().mean().cpu().detach().item()<line_sep>self.update(loss RMSE_Linear RMSE_Log abs_relative sq_relative delta delta_sq delta_cu)<block_end><def_stmt>tensorboard self writer epoch token='train'<block_start>writer.add_scalar(token+'/RMSELIs' self.RMSELIs.avg epoch)<line_sep>writer.add_scalar(token+'/RMSELGs' self.RMSELGs.avg epoch)<line_sep>writer.add_scalar(token+'/ABSRs' self.ABSRs.avg epoch)<line_sep>writer.add_scalar(token+'/SQRs' self.SQRs.avg epoch)<line_sep>writer.add_scalar(token+'/DELTA' self.DELTA.avg epoch)<line_sep>writer.add_scalar(token+'/DELTASQ' self.DELTASQ.avg epoch)<line_sep>writer.add_scalar(token+'/DELTACU' self.DELTACU.avg epoch)<block_end><def_stmt>print self iter token<block_start>string='{}:{}\tL {:.3f} RLI {:.3f} RLO {:.3f} ABS {:.3f} SQ {:.3f} DEL {:.3f} DELQ {:.3f} DELC {:.3f}'.format(token iter *self.get_info())<line_sep><return>string<block_end><block_end><class_stmt>LossWise(object)<block_start><def_stmt>__init__ self key=<none> tag=<none> epochs=300<block_start>self.key=key<line_sep>print(self.key)<if_stmt>len(self.key)<g>0<block_start>losswise.set_api_key(self.key)<line_sep>session=losswise.Session(tag=tag max_iter=epochs)<line_sep>self.error=session.graph('Error' kind='min' display_interval=1)<line_sep>self.loss=session.graph('Loss' kind='min' display_interval=1)<line_sep>self.delta=session.graph('Delta' kind='min' display_interval=1)<line_sep>self.session=session<block_end><block_end><def_stmt>update self info epoch tag='Train'<block_start><if_stmt>len(self.key)<g>0<block_start>self.loss.append(epoch {tag+'/loss':info[0]})<line_sep>self.error.append(epoch {tag+'/RMSE':info[1] tag+'/RMSELog':info[2] tag+'/ABSR':info[3] tag+'/SQUR':info[4]})<line_sep>self.delta.append(epoch {tag+'/1.25':info[5] tag+'/1.25^2':info[6] tag+'/1.25^3':info[7]})<block_end><block_end><def_stmt>done self<block_start>self.session.done()<block_end><block_end><class_stmt>LossWise1(object)<block_start><def_stmt>__init__ self key=<none> tag=<none> epochs=300<block_start>self.key=key<line_sep>print(self.key)<if_stmt>len(self.key)<g>0<block_start>losswise.set_api_key(self.key)<line_sep>session=losswise.Session(tag=tag max_iter=epochs)<line_sep>self.error=session.graph('Error' kind='min' display_interval=1)<line_sep>self.loss_total=session.graph('Loss' kind='min' display_interval=1)<line_sep>self.delta=session.graph('Delta' kind='min' display_interval=1)<line_sep>self.session=session<block_end><block_end><def_stmt>update self info epoch tag='Train'<block_start><if_stmt>len(self.key)<g>0<block_start>self.loss_total.append(epoch {tag+'/loss_gt':info[0] tag+'/loss_pseudo':info[1] tag+'/loss_total':info[2]})<line_sep>self.error.append(epoch {tag+'/RMSE':info[3] tag+'/RMSELog':info[4] tag+'/ABSR':info[5] tag+'/SQUR':info[6]})<line_sep>self.delta.append(epoch {tag+'/1.25':info[7] tag+'/1.25^2':info[8] tag+'/1.25^3':info[9]})<block_end><block_end><def_stmt>done self<block_start>self.session.done()<block_end><block_end><class_stmt>Metric1(object)<block_start><def_stmt>__init__ self<block_start>self.RMSELIs=AverageMeter()<line_sep>self.RMSELGs=AverageMeter()<line_sep>self.ABSRs=AverageMeter()<line_sep>self.SQRs=AverageMeter()<line_sep>self.DELTA=AverageMeter()<line_sep>self.DELTASQ=AverageMeter()<line_sep>self.DELTACU=AverageMeter()<line_sep>self.losses_gt=AverageMeter()<line_sep>self.losses_pseudo=AverageMeter()<line_sep>self.losses_total=AverageMeter()<block_end><def_stmt>update self loss_gt loss_pseudo loss_total RMSE_Linear RMSE_Log abs_relative sq_relative delta delta_sq delta_cu<block_start>self.losses_gt.update(loss_gt)<line_sep>self.losses_pseudo.update(loss_pseudo)<line_sep>self.losses_total.update(loss_total)<line_sep>self.RMSELIs.update(RMSE_Linear)<line_sep>self.RMSELGs.update(RMSE_Log)<line_sep>self.ABSRs.update(abs_relative)<line_sep>self.SQRs.update(sq_relative)<line_sep>self.DELTA.update(delta)<line_sep>self.DELTASQ.update(delta_sq)<line_sep>self.DELTACU.update(delta_cu)<block_end><def_stmt>get_info self<block_start><return>[self.losses_gt.avg self.losses_pseudo.avg self.losses_total.avg self.RMSELIs.avg self.RMSELGs.avg self.ABSRs.avg self.SQRs.avg self.DELTA.avg self.DELTASQ.avg self.DELTACU.avg]<block_end><def_stmt>calculate self depth predict loss_gt=0 loss_psuedo=0 loss_total=0# only consider 1~80 meters
<block_start>mask=(depth<ge>1)<times>(depth<le>80)<line_sep>RMSE_Linear=((((predict[mask]-depth[mask])<power>2).mean())<power>0.5).cpu().data<line_sep>RMSE_Log=((((torch.log(predict[mask])-torch.log(depth[mask]))<power>2).mean())<power>0.5).cpu().data<line_sep>abs_relative=(torch.abs(predict[mask]-depth[mask])/depth[mask]).mean().cpu().data<line_sep>sq_relative=((predict[mask]-depth[mask])<power>2/depth[mask]).mean().cpu().data<line_sep>delta=(torch.max(predict[mask]/depth[mask] depth[mask]/predict[mask])<l>1.25).float().mean().cpu().data<line_sep>delta_sq=(torch.max(predict[mask]/depth[mask] depth[mask]/predict[mask])<l>1.25<power>2).float().mean().cpu().data<line_sep>delta_cu=(torch.max(predict[mask]/depth[mask] depth[mask]/predict[mask])<l>1.25<power>3).float().mean().cpu().data<line_sep>self.update(loss_gt loss_psuedo loss_total RMSE_Linear RMSE_Log abs_relative sq_relative delta delta_sq delta_cu)<block_end><def_stmt>tensorboard self writer epoch token='train'<block_start>writer.add_scalar(token+'/RMSELIs' self.RMSELIs.avg epoch)<line_sep>writer.add_scalar(token+'/RMSELGs' self.RMSELGs.avg epoch)<line_sep>writer.add_scalar(token+'/ABSRs' self.ABSRs.avg epoch)<line_sep>writer.add_scalar(token+'/SQRs' self.SQRs.avg epoch)<line_sep>writer.add_scalar(token+'/DELTA' self.DELTA.avg epoch)<line_sep>writer.add_scalar(token+'/DELTASQ' self.DELTASQ.avg epoch)<line_sep>writer.add_scalar(token+'/DELTACU' self.DELTACU.avg epoch)<block_end><def_stmt>print self iter token<block_start>string='{}:{}\tL {:.3f} {:.3f} {:.3f} RLI {:.3f} RLO {:.3f} ABS {:.3f} SQ {:.3f} DEL {:.3f} DELQ {:.3f} DELC {:.3f}'.format(token iter *self.get_info())<line_sep><return>string<block_end><block_end><def_stmt>roty_pth t<block_start>''' Rotation about the y-axis. '''<line_sep>c=torch.cos(t)<line_sep>s=torch.sin(t)<line_sep><return>torch.FloatTensor([[c 0 s] [0 1 0] [-s 0 c]])<block_end><class_stmt>torchCalib(object)<block_start><def_stmt>__init__ self calib h_shift=0<block_start>self.P2=torch.from_numpy(calib.P).cuda().float()# 3 x 4
self.P2[1 2]<augsub>h_shift<line_sep># self.P3 = torch.from_numpy(calib.P3).cuda() # 3 x 4
self.R0=torch.from_numpy(calib.R0).cuda().float()# 3 x 3
self.V2C=torch.from_numpy(calib.V2C).cuda().float()# 3 x 4
self.C2V=torch.from_numpy(calib.C2V).cuda().float()<line_sep># Camera intrinsics and extrinsics
self.cu=self.P2[0 2]<line_sep>self.cv=self.P2[1 2]<line_sep>self.fu=self.P2[0 0]<line_sep>self.fv=self.P2[1 1]<line_sep>self.tx=self.P2[0 3]/(-self.fu)<line_sep>self.ty=self.P2[1 3]/(-self.fv)<block_end><def_stmt>cart_to_hom self pts<block_start>"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""<line_sep>ones=torch.ones((pts.shape[0] 1) dtype=torch.float32).cuda()<line_sep>pts_hom=torch.cat((pts ones) dim=1)<line_sep><return>pts_hom<block_end><def_stmt>rect_to_lidar self pts_rect<block_start>"""
:param pts_rect: (N, 3)
:return pts_lidar: (N, 3)
"""<line_sep>pts_hom=self.cart_to_hom(torch.matmul(pts_rect torch.inverse(self.R0.t())))<line_sep>pts_rect=torch.matmul(pts_hom self.C2V.t())<line_sep><return>pts_rect<block_end><def_stmt>lidar_to_rect self pts_lidar<block_start>"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""<line_sep>pts_lidar_hom=self.cart_to_hom(pts_lidar)<line_sep>pts_rect=torch.matmul(pts_lidar_hom torch.matmul(self.V2C.t() self.R0.t()))<line_sep><return>pts_rect<block_end><def_stmt>rect_to_img self pts_rect<block_start>"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""<line_sep>pts_rect_hom=self.cart_to_hom(pts_rect)<line_sep>pts_2d_hom=torch.matmul(pts_rect_hom self.P2.t())<line_sep>pts_img=(pts_2d_hom[: 0:2].t()/pts_rect_hom[: 2]).t()# (N, 2)
pts_rect_depth=pts_2d_hom[: 2]-self.P2.t()[3 2]<line_sep># depth in rect camera coord
<return>pts_img pts_rect_depth<block_end><def_stmt>lidar_to_img self pts_lidar<block_start>"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""<line_sep>pts_rect=self.lidar_to_rect(pts_lidar)<line_sep>pts_img,pts_depth=self.rect_to_img(pts_rect)<line_sep><return>pts_img pts_depth<block_end><def_stmt>img_to_rect self u v depth_rect<block_start>"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return:
"""<line_sep>x=((u-self.cu)<times>depth_rect)/self.fu+self.tx<line_sep>y=((v-self.cv)<times>depth_rect)/self.fv+self.ty<line_sep>pts_rect=torch.cat((x.reshape(-1 1) y.reshape(-1 1) depth_rect.reshape(-1 1)) dim=1)<line_sep><return>pts_rect<block_end><def_stmt>img_to_lidar self u v depth_rect<block_start>pts_rect=self.img_to_rect(u v depth_rect)<line_sep><return>self.rect_to_lidar(pts_rect)<block_end><block_end>
|
# std
<import_stmt>os<import_stmt>unittest<line_sep># project
<import_from_stmt>src.notifier.grafana_notifier GrafanaNotifier<import_from_stmt>.dummy_events DummyEvents<class_stmt>TestGrafanaNotifier(unittest.TestCase)<block_start><def_stmt>setUp self<arrow><none><block_start>base_url=os.getenv("GRAFANA_BASE_URL")<line_sep>api_token=os.getenv("GRAFANA_API_TOKEN")<line_sep>self.assertIsNotNone(base_url "You must export GRAFANA_BASE_URL as env variable")<line_sep>self.assertIsNotNone(api_token "You must export GRAFANA_API_TOKEN as env variable")<line_sep>self.notifier=GrafanaNotifier(title_prefix="Test" config={"enable":<true> "credentials":{"base_url":base_url "api_token":api_token } } )<block_end>@unittest.skipUnless(os.getenv("GRAFANA_BASE_URL")<and>os.getenv("GRAFANA_API_TOKEN") "Run only if credentials available")<def_stmt>testGrafanaLowPriorityNotifications self<block_start>success=self.notifier.send_events_to_user(events=DummyEvents.get_low_priority_events())<line_sep>self.assertTrue(success)<block_end>@unittest.skipUnless(os.getenv("GRAFANA_BASE_URL")<and>os.getenv("GRAFANA_BASE_URL") "Run only if credentials available")<def_stmt>testGrafanaNormalPriorityNotifications self<block_start>success=self.notifier.send_events_to_user(events=DummyEvents.get_normal_priority_events())<line_sep>self.assertTrue(success)<block_end>@unittest.skipUnless(os.getenv("GRAFANA_BASE_URL")<and>os.getenv("GRAFANA_BASE_URL") "Run only if credentials available")<def_stmt>testGrafanaHighPriorityNotifications self<block_start>success=self.notifier.send_events_to_user(events=DummyEvents.get_high_priority_events())<line_sep>self.assertTrue(success)<block_end><block_end>
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>re<import_from_stmt>oslo_config cfg<import_from_stmt>sahara conductor<as>c<import_from_stmt>sahara context<import_stmt>sahara.exceptions<as>ex<import_from_stmt>sahara.i18n _<import_stmt>sahara.service.edp.data_sources.manager<as>ds_manager<import_stmt>sahara.service.validations.edp.base<as>b<line_sep>CONF=cfg.CONF<def_stmt>check_data_source_create data **kwargs<block_start>b.check_data_source_unique_name(data['name'])<line_sep>_check_data_source(data)<block_end><def_stmt>_check_datasource_placeholder url<block_start><if_stmt>url<is><none><block_start><return><block_end>total_length=0<line_sep>substrings=re.findall(r"%RANDSTR\(([\-]?\d+)\)%" url)<for_stmt>length map(int substrings)<block_start><if_stmt>length<le>0<block_start><raise>ex.InvalidDataException(_("Requested RANDSTR length"<concat>" must be positive."))<block_end>total_length<augadd>length<block_end><if_stmt>total_length<g>1024<block_start><raise>ex.InvalidDataException(_("Requested RANDSTR length is"<concat>" too long, please choose a "<concat>"value less than 1024."))<block_end><block_end><def_stmt>_check_data_source data<block_start>_check_datasource_placeholder(data["url"])<if_stmt>data["type"]<in>CONF.data_source_types<block_start>ds_manager.DATA_SOURCES.get_data_source(data["type"]).validate(data)<block_end><block_end><def_stmt>check_data_source_update data data_source_id<block_start>ctx=context.ctx()<line_sep>jobs=c.API.job_execution_get_all(ctx)<line_sep>pending_jobs=[job<for>job jobs<if>job.info["status"]<eq>"PENDING"]<for_stmt>job pending_jobs<block_start><if_stmt>data_source_id<in>job.data_source_urls<block_start><raise>ex.UpdateFailedException(_("DataSource is used in a "<concat>"PENDING Job and can not be updated."))<block_end><block_end>ds=c.API.data_source_get(ctx data_source_id)<if_stmt>'name'<in>data<and>data['name']<ne>ds.name<block_start>b.check_data_source_unique_name(data['name'])<block_end>check_data={'type':data.get('type' <none>)<or>ds.type 'url':data.get('url' <none>)<or>ds.url 'credentials':data.get('credentials' <none>)<or>ds.credentials}<line_sep>_check_data_source(check_data)<block_end>
|
# tests/test_provider_Ouest-France_ldap.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:20:59 UTC)
<def_stmt>test_provider_import <block_start><import_stmt>terrascript.provider.Ouest_France.ldap<block_end><def_stmt>test_resource_import <block_start><import_from_stmt>terrascript.resource.Ouest_France.ldap ldap_group<block_end><def_stmt>test_datasource_import <block_start><import_from_stmt>terrascript.data.Ouest_France.ldap ldap_group<import_from_stmt>terrascript.data.Ouest_France.ldap ldap_user<block_end># TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.Ouest_France.ldap
#
# t = terrascript.provider.Ouest_France.ldap.ldap()
# s = str(t)
#
# assert 'https://github.com/Ouest-France/terraform-provider-ldap' in s
# assert '0.7.2' in s
|
<def_stmt>find searchList elem<block_start>endList=[]<for_stmt>indElem range(0 len(elem))<block_start>resultList=[]<for_stmt>ind range(0 len(searchList))<block_start><if_stmt>searchList[ind]<eq>elem[indElem]<block_start>resultList.append(ind)<block_end><block_end>endList.extend([resultList])<block_end><return>endList<block_end>
|
<import_from_stmt>collections OrderedDict<line_sep>threads=24<line_sep>default_params=OrderedDict([############ Common params:
('wn' 'noun') # mammal or noun
('print_every' 20) ('save' <false>) # Whether to save the model in the folder saved_models/
('num_negative' 10) # Number of negative samples to use
('batch_size' 10) # Size of batch to use for training
('epsilon' 1e-5) ('seed' 0) ('dim' 5) ('opt' 'rsgd') # rsgd or exp_map or sgd . Used for all hyperbolic models. #### rsgd always better
('where_not_to_sample' 'ancestors') # both or ancestors or children. Has no effect if neg_sampl_strategy = 'all'.
('neg_edges_attach' 'child') # How to form negative edges: 'parent' (u,v') or 'child' (u', v) or 'both'
############## Angle loss:
('class' 'HypCones') # 'EuclCones' , 'HypCones' , 'OrderEmb'
('neg_sampl_strategy' 'true_neg_non_leaves') ########## true_neg_non_leaves worse than true_neg when init uses true_neg_non_leaves ?????
('lr' 0.0001) ### 1e-4 the best for Hyp cones with rsgd ; 3e-4 better for Eucl cones
('resc_vecs' 0.7) ## 0.7 and 0.8 are similar
('epochs' 300) ('K' 0.1) ('margin' 0.01) ############### Init loss:
('init_class' 'PoincareNIPS') # PoincareNIPS, EuclNIPS
('lr_init' 0.03) # 0.3, 0.03, 0.1 all kind of good; 0.03 the best 94%, but with 1/10 factor for burnin
('epochs_init' 100) ('neg_sampl_strategy_init' 'true_neg') # 'true_neg' always better!
('epochs_init_burn_in' 20) ('neg_sampling_power_init' 0.75) # 0 for uniform, 1 for unigram, 0.75 much better than 0 !!!!!! Do not put 0.
])<line_sep>### We run 3 different jobs, but each of them will be ran in all.py:291 for all training settings (percentage of transitive closure).
non_default_params=[### Our method : hyperbolic entailment cones
# File: task_50percent#dim_5#class_HypCones#init_class_PoincareNIPS#neg_sampl_strategy_true_neg#lr_0.0003#epochs_300#opt_rsgd#where_not_to_sample_children#neg_edges_attach_parent#lr_init_0.03#epochs_init_100#neg_sampl_strategy_init_true_neg
# ======> best OVERALL f1 CONES test = 92.80; CONES valid = 92.60 - after 260 epochs.
# To see the above result at the end of the training, one needs to run the following:
# for i in `ls ./logs/task_50percent#dim_5#*` ; do echo $i; cat $i | grep best | grep CONES | grep OVERALL ; done | grep -A1 'HypCones' ;for i in `ls ./logs/task_50percent#epochs*` ; do echo $i; cat $i | grep best | grep CONES | grep OVERALL ; done
[('dim' 5) ('class' 'HypCones') ('init_class' 'PoincareNIPS') ('neg_sampl_strategy' 'true_neg') ('lr' 0.0003) ('epochs' 300) ('opt' 'rsgd') ('where_not_to_sample' 'children') ('neg_edges_attach' 'parent') ('lr_init' 0.03) ('epochs_init' 100) ('neg_sampl_strategy_init' 'true_neg')] ### Poincare embeddings of Nickel et al., NIPS'18 - we look for the INIT results in this log file.
# File: task_50percent#dim_5#class_HypCones#init_class_PoincareNIPS#neg_sampl_strategy_true_neg_non_leaves#lr_0.0001#epochs_300#opt_exp_map#where_not_to_sample_ancestors#neg_edges_attach_child#lr_init_0.03#epochs_init_100#neg_sampl_strategy_init_true_neg
# ======> best OVERALL f1 INIT test = 83.60; INIT valid = 83.60 - after 80 epochs.
# To see the above result at the end of the training, one needs to run the following:
# for i in `ls ./logs/task_50percent#dim_5#*` ; do echo $i; cat $i | grep best | grep INIT | grep OVERALL ; done | grep -A1 'PoincareNIPS'; for i in `ls ./logs/task_50percent#epochs*` ; do echo $i; cat $i | grep best | grep INIT | grep OVERALL ; done
[('dim' 5) ('class' 'HypCones') ('init_class' 'PoincareNIPS') ('neg_sampl_strategy' 'true_neg_non_leaves') ('lr' 0.0001) ('epochs' 300) ('opt' 'exp_map') ('where_not_to_sample' 'ancestors') ('neg_edges_attach' 'child') ('lr_init' 0.03) ('epochs_init' 100) ('neg_sampl_strategy_init' 'true_neg')] ### Order embeddings of Vendrov et al, ICLR'16
# File: task_50percent#dim_5#class_OrderEmb#neg_sampl_strategy_true_neg#lr_0.1#margin_1#epochs_500#where_not_to_sample_children#neg_edges_attach_parent
# ======> best OVERALL f1 CONES test = 81.70; CONES valid = 81.60 - after 460 epochs.
# To see the above result at the end of the training, one needs to run the following:
# for i in `ls ./logs/task_50percent#dim_5#*` ; do echo $i; cat $i | grep best | grep CONES | grep OVERALL ; done | grep -A1 'OrderEmb' ;for i in `ls ./logs/task_50percent#*` ; do echo $i; cat $i | grep best | grep CONES | grep OVERALL ; done | grep -A1 'OrderEmb'
[('dim' 5) ('class' 'OrderEmb') ('neg_sampl_strategy' 'true_neg') ('lr' 0.1) ('margin' 1) ('epochs' 500) ('where_not_to_sample' 'children') ('neg_edges_attach' 'parent')] ]<line_sep>### Remove duplicate commands
p=[]<for_stmt>i range(len(non_default_params))<block_start>has_copy=<false><for_stmt>j range(i+1 len(non_default_params))<block_start><if_stmt>non_default_params[i]<eq>non_default_params[j]<block_start>has_copy=<true><block_end><block_end><if_stmt><not>has_copy<block_start>p.append(non_default_params[i])<block_end><block_end>non_default_params=p<line_sep>
|
# SPDX-License-Identifier: ISC
# Copyright (c) <NAME> <<EMAIL>>
<import_stmt>os<import_stmt>re<import_stmt>stat<import_stmt>logging<import_stmt>platform<import_stmt>queue<import_stmt>threading<import_stmt>notmuch<import_stmt>pyinotify<import_stmt>ctypes<import_stmt>contextlib<if_stmt>platform.system()<ne>'Linux'<block_start><raise>ImportError('Unsupported platform: {!r}'.format(platform.system()))<block_end><class_stmt>EventHandler(pyinotify.ProcessEvent)<block_start><def_stmt>__init__ self options database<block_start>self.options=options<line_sep>self.database=database<line_sep>super().__init__()<block_end>ignore_re=re.compile('(/xapian/.*(base.|tmp)$)|(\.lock$)|(/dovecot)')<def_stmt>process_IN_DELETE self event<block_start><if_stmt>self.ignore_re.search(event.pathname)<block_start><return><block_end>logging.debug("Detected file removal: {!r}".format(event.pathname))<line_sep>self.database.remove_message(event.pathname)<line_sep>self.database.close()<block_end><def_stmt>process_IN_MOVED_TO self event<block_start><if_stmt>self.ignore_re.search(event.pathname)<block_start><return><block_end>src_pathname=event.src_pathname<if>hasattr(event 'src_pathname')<else><none><line_sep>logging.debug("Detected file rename: {!r} -> {!r}".format(src_pathname event.pathname))<def_stmt>new_mail message<block_start><for_stmt>filter_ self.options.enable_filters<block_start><try_stmt><block_start>filter_.run('id:"{}"'.format(message.get_message_id()))<line_sep>filter_.commit(self.options.dry_run)<block_end><except_stmt>Exception<as>e<block_start>logging.warn('Error processing mail with filter {!r}: {}'.format(filter_.message e))<block_end><block_end><block_end><try_stmt><block_start>self.database.add_message(event.pathname sync_maildir_flags=<true> new_mail_handler=new_mail)<block_end><except_stmt>notmuch.FileError<as>e<block_start>logging.warn('Error opening mail file: {}'.format(e))<line_sep><return><block_end><except_stmt>notmuch.FileNotEmailError<as>e<block_start>logging.warn('File does not look like an email: {}'.format(e))<line_sep><return><block_end><else_stmt><block_start><if_stmt>src_pathname<block_start>self.database.remove_message(src_pathname)<block_end><block_end><finally_stmt><block_start>self.database.close()<block_end><block_end><block_end><def_stmt>watch_for_new_files options database paths daemonize=<false><block_start>wm=pyinotify.WatchManager()<line_sep>mask=(pyinotify.IN_DELETE|pyinotify.IN_MOVED_FROM|pyinotify.IN_MOVED_TO)<line_sep>handler=EventHandler(options database)<line_sep>notifier=pyinotify.Notifier(wm handler)<line_sep>logging.debug('Registering inotify watch descriptors')<line_sep>wdds=dict()<for_stmt>path paths<block_start>wdds[path]=wm.add_watch(path mask)<block_end># TODO: honor daemonize
logging.debug('Running mainloop')<line_sep>notifier.loop()<block_end><try_stmt><block_start>libc=ctypes.CDLL(ctypes.util.find_library("c"))<block_end><except_stmt>ImportError<as>e<block_start><raise>ImportError('Could not load libc: {}'.format(e))<block_end><class_stmt>Libc<block_start><class_stmt>c_dir(ctypes.Structure)<block_start><pass><block_end>c_dir_p=ctypes.POINTER(c_dir)<line_sep>opendir=libc.opendir<line_sep>opendir.argtypes=[ctypes.c_char_p]<line_sep>opendir.restype=c_dir_p<line_sep>closedir=libc.closedir<line_sep>closedir.argtypes=[c_dir_p]<line_sep>closedir.restype=ctypes.c_int<line_sep>@[email protected]<def_stmt>open_directory cls path<block_start>handle=cls.opendir(path)<line_sep><yield>handle<line_sep>cls.closedir(handle)<block_end><class_stmt>c_dirent(ctypes.Structure)<block_start>'''
man 3 readdir says::
On Linux, the dirent structure is defined as follows:
struct dirent {
ino_t d_ino; /* inode number */
off_t d_off; /* offset to the next dirent */
unsigned short d_reclen; /* length of this record */
unsigned char d_type; /* type of file; not supported
by all file system types */
char d_name[256]; /* filename */
};
'''<line_sep>_fields_=(('d_ino' ctypes.c_long) ('d_off' ctypes.c_long) ('d_reclen' ctypes.c_ushort) ('d_type' ctypes.c_byte) ('d_name' ctypes.c_char<times>4096) )<block_end>c_dirent_p=ctypes.POINTER(c_dirent)<line_sep>readdir=libc.readdir<line_sep>readdir.argtypes=[c_dir_p]<line_sep>readdir.restype=c_dirent_p<line_sep># magic value for directory
DT_DIR=4<block_end>blacklist={'.' '..' 'tmp'}<def_stmt>walk_linux channel path<block_start>channel.put(path)<with_stmt>Libc.open_directory(path)<as>handle<block_start><while_stmt><true><block_start>dirent_p=Libc.readdir(handle)<if_stmt><not>dirent_p<block_start><break><block_end><if_stmt>dirent_p.contents.d_type<eq>Libc.DT_DIR<and>dirent_p.contents.d_name<not><in>blacklist<block_start>walk_linux(channel os.path.join(path dirent_p.contents.d_name))<block_end><block_end><block_end><block_end><def_stmt>walk channel path<block_start>channel.put(path)<for_stmt>child_path (os.path.join(path child)<for>child os.listdir(path)<if>child<not><in>blacklist)<block_start><try_stmt><block_start>stat_result=os.stat(child_path)<block_end><except_stmt>Exception<block_start><continue><block_end><if_stmt>stat_result.st_mode&stat.S_IFDIR<block_start>walk(channel child_path)<block_end><block_end><block_end><def_stmt>walker channel path<block_start>walk_linux(channel path)<line_sep>channel.put(<none>)<block_end><def_stmt>quick_find_dirs_hack path<block_start>results=queue.Queue()<line_sep>walker_thread=threading.Thread(target=walker args=(results path))<line_sep>walker_thread.daemon=<true><line_sep>walker_thread.start()<while_stmt><true><block_start>result=results.get()<if_stmt>result<is><not><none><block_start><yield>result<block_end><else_stmt><block_start><break><block_end><block_end><block_end>
|
<import_from_stmt>django_elasticsearch_dsl_drf.viewsets DocumentViewSet<import_from_stmt>django_elasticsearch_dsl_drf.filter_backends FilteringFilterBackend CompoundSearchFilterBackend DefaultOrderingFilterBackend OrderingFilterBackend <import_from_stmt>search.documents NewsDocument<import_from_stmt>search.serializers NewsDocumentSerializer<class_stmt>NewsDocumentView(DocumentViewSet)<block_start>document=NewsDocument<line_sep>serializer_class=NewsDocumentSerializer<line_sep>lookup_field="id"<line_sep>filter_backends=[CompoundSearchFilterBackend FilteringFilterBackend DefaultOrderingFilterBackend OrderingFilterBackend]<line_sep>search_fields=("title" "description")<line_sep>filter_fields={"language":"language"}<line_sep>ordering_fields={"published":"published" "author":"author" "language":"language"}<line_sep>ordering=("published" )<block_end>
|
"""Utility modules for distributed and parallel training. """<import_stmt>torch<class_stmt>SingleDeviceDistributedParallel(torch.nn.parallel.distributed.DistributedDataParallel)<block_start>"""This module implements a module similar to `DistributedDataParallel`, but it accepts
inputs of any shape, and only supports a single device per instance.
"""<def_stmt>__init__ self module device_id find_unused_parameters=<false><block_start>super(SingleDeviceDistributedParallel self).__init__(module [device_id] find_unused_parameters=find_unused_parameters)<block_end><def_stmt>forward self *inputs **kwargs<block_start><if_stmt>self.require_forward_param_sync<block_start>self._sync_params()<block_end>output=self.module(*inputs **kwargs)<if_stmt>torch.is_grad_enabled()<and>self.require_backward_grad_sync<block_start>self.require_forward_param_sync=<true><if_stmt>self.find_unused_parameters<block_start>self.reducer.prepare_for_backward(list(torch.nn.parallel.distributed._find_tensors(output)))<block_end><else_stmt><block_start>self.reducer.prepare_for_backward([])<block_end><block_end><return>output<block_end><def_stmt>state_dict self destination=<none> prefix='' keep_vars=<false><block_start><return>self.module.state_dict(destination prefix keep_vars)<block_end><def_stmt>load_state_dict self state_dict strict=<true><block_start><return>self.module.load_state_dict(state_dict strict)<block_end><block_end>
|
'''Module containing function for computing Julia sets'''<import_from_stmt>numba guvectorize void complex128 int32 float64<line_sep>@guvectorize([void(complex128[:] float64[:] int32[:] int32[:])] '(n),(),()->(n)')<def_stmt>julia_set domain max_norm max_iters iterations<block_start><for_stmt>i,z enumerate(domain)<block_start>iterations[i]=0<while_stmt>(iterations[i]<le>max_iters[0]<and>z.real<times>z.real+z.imag<times>z.imag<le>max_norm[0]<times>max_norm[0])<block_start>z=z<power>2-0.622772+0.42193j<line_sep>iterations[i]<augadd>1<block_end><block_end><block_end>
|
"""cleanup of submission id
Revision ID: 0b8739ab2097
Revises: <KEY>
Create Date: 2020-09-03 16:19:38.703377
"""<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<line_sep># revision identifiers, used by Alembic.
revision='0b8739ab2097'<line_sep>down_revision='<KEY>'<line_sep>branch_labels=<none><line_sep>depends_on=<none><def_stmt>upgrade # ### commands auto generated by Alembic - please adjust! ###
<block_start>op.drop_column('application' 'form_submission_id')<line_sep># ### end Alembic commands ###
<block_end><def_stmt>downgrade # ### commands auto generated by Alembic - please adjust! ###
<block_start>op.add_column('application' sa.Column('form_submission_id' sa.VARCHAR(length=100) autoincrement=<false> nullable=<false>))<line_sep># ### end Alembic commands ###
<block_end>
|
<import_stmt>sys<import_stmt>numpy<as>np<import_from_stmt>sklearn.cluster DBSCAN<import_stmt>time<import_stmt>matplotlib.pyplot<as>plt<def_stmt>read_vecs path<block_start>out=[]<for_stmt>l open(path)<block_start>out.append([float(x)<for>x l.strip().split()])<block_end><return>np.array(out)<block_end>difs=read_vecs(sys.argv[1])<line_sep>mu=np.mean(difs)<line_sep>sd=np.std(difs)<line_sep>print(mu)<line_sep>print(sd)<line_sep>eps=sd<times>0.01<line_sep>min_samples=4#? total guess...do better?
print('dbscanning...')<line_sep>start=time.time()<line_sep>clustering=DBSCAN(eps=eps min_samples=min_samples n_jobs=4).fit(difs)<line_sep>print('done! took ' time.time()-start)<line_sep>labels=list(clustering.labels_)<line_sep>print(labels)<line_sep>plt.hist(labels)<line_sep>plt.title('labels')<line_sep>plt.show()<line_sep>
|
'''
Created on Jul 3, 2014
author: <NAME>
License: BSD
Description: Module containing some (technical) indicators
'''<import_stmt>pandas<as>pd<def_stmt>rsi price n=14<block_start>''' rsi indicator '''<line_sep>gain=price.diff().fillna(0)# fifference between day n and n-1, replace nan (first value) with 0
<def_stmt>rsiCalc p# subfunction for calculating rsi for one lookback period
<block_start>avgGain=p[p<g>0].sum()/n<line_sep>avgLoss=-p[p<l>0].sum()/n<line_sep>rs=avgGain/avgLoss<line_sep><return>100-100/(1+rs)<block_end># run for all periods with rolling_apply
<return>pd.rolling_apply(gain n rsiCalc)<block_end>
|
<import_stmt>functools<import_stmt>inspect<import_stmt>warnings<import_from_stmt>typing Iterable<import_stmt>torch<def_stmt>print_table rows header=['Operation' 'OPS']<block_start>r"""Simple helper function to print a list of lists as a table
:param rows: a :class:`list` of :class:`list` containing the data to be printed. Each entry in the list
represents an individual row
:param input: (optional) a :class:`list` containing the header of the table
"""<if_stmt>len(rows)<eq>0<block_start><return><block_end>col_max=[max([len(str(val[i]))<for>val rows])+3<for>i range(len(rows[0]))]<line_sep>row_format=''.join(["{:<"+str(length)+"}"<for>length col_max])<if_stmt>len(header)<g>0<block_start>print(row_format.format(*header))<line_sep>print(row_format.format(*['-'<times>(val-2)<for>val col_max]))<block_end><for_stmt>row rows<block_start>print(row_format.format(*row))<block_end>print(row_format.format(*['-'<times>(val-3)<for>val col_max]))<block_end><def_stmt>same_device model input# Remove dataparallel wrapper if present
<block_start><if_stmt>isinstance(model torch.nn.DataParallel)<block_start>model=model.module<block_end># Make sure that the input is on the same device as the model
<if_stmt>len(list(model.parameters()))<block_start>input_device=input.device<if><not>isinstance(input Iterable)<else>input[0].device<if_stmt>next(model.parameters()).device<ne>input_device<block_start><if_stmt>isinstance(input Iterable)<block_start><for_stmt>inp input<block_start>inp.to(next(model.parameters()).device)<block_end><block_end><else_stmt><block_start>input.to(next(model.parameters()).device)<block_end><block_end><block_end><return>model input<block_end># Workaround for scopename in pytorch 1.4 and newer
# see: https://github.com/pytorch/pytorch/issues/33463
<class_stmt>scope_name_workaround(object)<block_start><def_stmt>__init__ self<block_start>self.backup=<none><block_end><def_stmt>__enter__ self<block_start><def_stmt>_tracing_name self_ tracing_state<block_start><if_stmt><not>tracing_state._traced_module_stack<block_start><return><none><block_end>module=tracing_state._traced_module_stack[-1]<for_stmt>name,child module.named_children()<block_start><if_stmt>child<is>self_<block_start><return>name<block_end><block_end><return><none><block_end><def_stmt>_slow_forward self_ *input **kwargs<block_start>tracing_state=torch._C._get_tracing_state()<if_stmt><not>tracing_state<or>isinstance(self_.forward torch._C.ScriptMethod)<block_start><return>self_.forward(*input **kwargs)<block_end><if_stmt><not>hasattr(tracing_state '_traced_module_stack')<block_start>tracing_state._traced_module_stack=[]<block_end>name=_tracing_name(self_ tracing_state)<if_stmt>name<block_start>tracing_state.push_scope('%s[%s]'%(self_._get_name() name))<block_end><else_stmt><block_start>tracing_state.push_scope(self_._get_name())<block_end>tracing_state._traced_module_stack.append(self_)<try_stmt><block_start>result=self_.forward(*input **kwargs)<block_end><finally_stmt><block_start>tracing_state.pop_scope()<line_sep>tracing_state._traced_module_stack.pop()<block_end><return>result<block_end>self.backup=torch.nn.Module._slow_forward<line_sep>setattr(torch.nn.Module '_slow_forward' _slow_forward)<block_end><def_stmt>__exit__ self type value tb<block_start>setattr(torch.nn.Module '_slow_forward' self.backup)<block_end><block_end># Source: https://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically
string_types=(type(b'') type(u''))<def_stmt>deprecated reason<block_start>"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
"""<if_stmt>isinstance(reason string_types)# The @deprecated is used with a 'reason'.
#
# .. code-block:: python
#
# @deprecated("please, use another function")
# def old_function(x, y):
# pass
<block_start><def_stmt>decorator func1<block_start><if_stmt>inspect.isclass(func1)<block_start>fmt1="Call to deprecated class {name} ({reason})."<block_end><else_stmt><block_start>fmt1="Call to deprecated function {name} ({reason})."<block_end>@functools.wraps(func1)<def_stmt>new_func1 *args **kwargs<block_start>warnings.simplefilter('always' DeprecationWarning)<line_sep>warnings.warn(fmt1.format(name=func1.__name__ reason=reason) category=DeprecationWarning stacklevel=2)<line_sep>warnings.simplefilter('default' DeprecationWarning)<line_sep><return>func1(*args **kwargs)<block_end><return>new_func1<block_end><return>decorator<block_end><elif_stmt>inspect.isclass(reason)<or>inspect.isfunction(reason)# The @deprecated is used without any 'reason'.
#
# .. code-block:: python
#
# @deprecated
# def old_function(x, y):
# pass
<block_start>func2=reason<if_stmt>inspect.isclass(func2)<block_start>fmt2="Call to deprecated class {name}."<block_end><else_stmt><block_start>fmt2="Call to deprecated function {name}."<block_end>@functools.wraps(func2)<def_stmt>new_func2 *args **kwargs<block_start>warnings.simplefilter('always' DeprecationWarning)<line_sep>warnings.warn(fmt2.format(name=func2.__name__) category=DeprecationWarning stacklevel=2)<line_sep>warnings.simplefilter('default' DeprecationWarning)<line_sep><return>func2(*args **kwargs)<block_end><return>new_func2<block_end><else_stmt><block_start><raise>TypeError(repr(type(reason)))<block_end><block_end>
|
<import_stmt>glob<import_stmt>os<import_from_stmt>nltk.corpus.reader.bracket_parse BracketParseCorpusReader<import_stmt>nltk<import_stmt>tokenizations# pip install pytokenizations==0.7.2
TOKEN_MAPPING={"-LRB-":"(" "-RRB-":")" "-LCB-":"{" "-RCB-":"}" "-LSB-":"[" "-RSB-":"]" "``":'"' "''":'"' "`":"'" '«':'"' '»':'"' ''': "'",
''':"'" '"':'"' '"':'"' '„':'"' '‹':"'" '›':"'" "\u2013":"--" # en dash
"\u2014":"--" # em dash
}<line_sep>train_splits=["0"+str(i)<for>i range(2 10)]+[str(i)<for>i range(10 22)]<line_sep>test_splits=["23"]<line_sep>dev_22_splits=["22"]<line_sep>dev_24_splits=["24"]<def_stmt>glob_raw_files treebank_root splits# Get all applicable raw files
<block_start>results=[fname<for>split splits<for>fname sorted(glob.glob(os.path.join(treebank_root 'raw' 'wsj' split "wsj_????")))]<line_sep># Exclude raw files with no corresponding parse
mrg_results=[fname.replace('parsed/mrg/wsj' 'raw/wsj').replace('.mrg' '')<for>split splits<for>fname sorted(glob.glob(os.path.join(treebank_root 'parsed' 'mrg' 'wsj' split "wsj_????.mrg")))]<line_sep><return>[fname<for>fname results<if>fname<in>mrg_results]<block_end><def_stmt>glob_tree_files target_root splits<block_start><return>[fname<for>split splits<for>fname sorted(glob.glob(os.path.join(target_root split "wsj_????.tree"))+glob.glob(os.path.join(target_root 'parsed' 'mrg' 'wsj' split "wsj_????.mrg")))]<block_end><def_stmt>standardize_form word<block_start>word=word.replace('\\/' '/').replace('\\*' '*')<line_sep># Mid-token punctuation occurs in biomedical text
word=word.replace('-LSB-' '[').replace('-RSB-' ']')<line_sep>word=word.replace('-LRB-' '(').replace('-RRB-' ')')<line_sep>word=word.replace('-LCB-' '{').replace('-RCB-' '}')<line_sep>word=TOKEN_MAPPING.get(word word)<line_sep><return>word<block_end><def_stmt>get_raw_text_for_trees treebank_root splits tree_files<block_start>lines=[]<for_stmt>fname glob_raw_files(treebank_root splits)<block_start><with_stmt>open(fname 'r' encoding="windows-1252")<as>f<block_start><for_stmt>line f<block_start><if_stmt>line.strip()<and><not>line.startswith('.START')# Delete invalid gcharacters caused by encoding issues
<block_start>line=line.replace("Õ" "").replace("å" "")<line_sep>lines.append(line)<block_end><block_end><block_end><block_end>reader=BracketParseCorpusReader('.' tree_files)<line_sep>target_sents=reader.sents()<line_sep>line_iter=iter(lines)<line_sep>line=""<line_sep>pairs=[]<for_stmt>target_sent target_sents<block_start><if_stmt><not>line.strip()<block_start>line=next(line_iter)<block_end># Handle PTB-style escaping mismatches
target_sent=[standardize_form(word)<for>word target_sent]<line_sep># Handle transpositions: sometimes the raw text transposes punctuation,
# while the parsed version cleans up this transposition
<if_stmt>'U.S..'<in>''.join(target_sent)<block_start>target_sent=[x.replace('U.S.' 'U.S')<for>x target_sent]<block_end><if_stmt>'Co.,'<in>''.join(target_sent)<and>'Co,.'<in>line<block_start>target_sent=[x.replace('Co.' 'Co')<for>x target_sent]<block_end><if_stmt>"But that 's"<in>' '.join(target_sent)<and>"But's that"<in>line<block_start>target_sent=[x.replace("that" "tha")<for>x target_sent]<line_sep>target_sent=[x.replace("'s" "t")<for>x target_sent]<block_end><if_stmt>('-- Freshman football player'<in>line<or>'-- Sophomore football player'<in>line<or>'-- Junior football player'<in>line<or>'-- Senior football player'<in>line<or>'-- Graduate-student football player'<in>line<or>'-- Football player'<in>line<or>'-- Freshman basketball player'<in>line<or>'-- Sophomore basketball player'<in>line<or>'-- Junior basketball player'<in>line<or>'-- Senior basketball player'<in>line<or>'-- Basketball player'<in>line)<and>('" .'<in>' '.join(target_sent)<and>target_sent[-1]<eq>'.')<block_start>target_sent=target_sent[:-1]<block_end># Attempt to align raw and parsed text
r2p,p2r=tokenizations.get_alignments(line.replace("`" "'") target_sent)<line_sep># Handle skips: some lines in the raw data are not parsed
<while_stmt><not>all(p2r)<block_start>go_next=<false><if_stmt>line.startswith('(See')<and>'-- WSJ'<in>line<block_start>go_next=<true><block_end><elif_stmt>line<eq>'San Diego '<block_start>go_next=<true><block_end><elif_stmt>line<eq>'" '<block_start>go_next=<true><block_end><if_stmt>go_next<block_start>line=next(line_iter)<line_sep>r2p,p2r=tokenizations.get_alignments(line.replace("`" "'") target_sent)<block_end><else_stmt><block_start><break><block_end><block_end># Handle line breaks in raw format that come in the middle of the sentence
# (such as mid-sentence line breaks in poems)
<for_stmt>_ range(12)# Loop limit is to aid in debugging
<block_start><if_stmt><not>all(p2r)<block_start>line=line+next(line_iter)<line_sep>r2p,p2r=tokenizations.get_alignments(line.replace("`" "'") target_sent)<block_end><block_end><assert_stmt>all(p2r)<line_sep>end=max([max(x)<for>x p2r])+1<line_sep># Trim excess raw text at the start
line_to_save=line[:end]<line_sep>r2p,p2r=tokenizations.get_alignments(line_to_save.replace("`" "'") target_sent)<while_stmt><true><block_start>_,alt_p2r=tokenizations.get_alignments('\n'.join(line_to_save.replace("`" "'").splitlines()[1:]) target_sent)<if_stmt>sum([len(x)<for>x p2r])<eq>sum([len(x)<for>x alt_p2r])<block_start>line_to_save='\n'.join(line_to_save.splitlines()[1:])<block_end><else_stmt><block_start><break><block_end><block_end>pairs.append((line_to_save target_sent))<line_sep>line=line[end:]<block_end><assert_stmt>len(pairs)<eq>len(target_sents)<line_sep><return>[line<for>(line target_sent) pairs]<block_end><def_stmt>get_words_and_whitespace treebank_root splits tree_files<block_start>reader=BracketParseCorpusReader('.' tree_files)<line_sep>target_sents=reader.sents()<line_sep>raw_sents=get_raw_text_for_trees(treebank_root splits tree_files)<line_sep>pairs=[]<for_stmt>line,target_sent zip(raw_sents target_sents)# Fix some errors in the raw text that are also fixed in the parsed trees
<block_start><if_stmt>"But's that just"<in>line<block_start>line=line.replace("But's that just" "But that's just")<block_end><if_stmt>'Co,.'<in>line<block_start>line=line.replace('Co,.' 'Co.,')<block_end><if_stmt>'U.S..'<in>''.join(target_sent)# Address cases where underlying "U.S." got tokenized as "U.S." ".""
# This is expected in the sentence-final position, but it seems to
# occur in other places, too.
<block_start>line=line.replace('U.S.' 'U.S..').replace('U.S.. market' 'U.S. market').replace('U.S.. agenda' 'U.S. agenda').replace('U.S.. even' 'U.S. even').replace('U.S.. counterpart' 'U.S. counterpart').replace('U.S.. unit' 'U.S. unit').replace('U.S..,' 'U.S.,')<block_end>words=target_sent[:]<line_sep>target_sent=[standardize_form(word).replace("``" '"')<for>word target_sent]<line_sep>r2p,p2r=tokenizations.get_alignments(line.replace("`" "'") target_sent)<line_sep>last_char_for_parsed=[max(x)<if>x<else><none><for>x p2r]<line_sep>have_space_after=[<none>]<times>len(words)<for_stmt>i,word enumerate(target_sent)<block_start><if_stmt>last_char_for_parsed[i]<is><none><block_start><continue><block_end>char_after_word=line[last_char_for_parsed[i]+1:last_char_for_parsed[i]+2]<line_sep>have_space_after[i]=(char_after_word<ne>char_after_word.lstrip())<line_sep># Fix the few cases where the word form in the parsed data is incorrect
<if_stmt>word<eq>"'T-"<and>target_sent[i+1]<eq>'is'<block_start>target_sent[i]="'T"<block_end><if_stmt>word<eq>"16"<and>target_sent[i+1:i+5]<eq>['64' '-' 'inch' 'opening']# This error occurs in the test set, and moreover would affect
# tokenization by introducing an extra '/', so we don't fix it.
# target_sent[i] = "16/"
<block_start>have_space_after[i]=<true><block_end><if_stmt>word<eq>"Gaming"<and>target_sent[i-1:i+2]<eq>['and' 'Gaming' 'company']<block_start>target_sent[i]="gaming"<block_end><block_end>pairs.append((target_sent have_space_after))<line_sep># For each token in the treebank, we have now queried the raw string to
# determine if the token should have whitespace following it. The lines
# below are a sanity check that the reconstructed text matches the raw
# version as closely as possible.
to_delete=set()<for_stmt>indices p2r<block_start><if_stmt><not>indices<block_start><continue><block_end>to_delete<augor>set(range(min(indices) max(indices)+1))-set(indices)<block_end>raw=list(line)<for_stmt>i sorted(to_delete reverse=<true>)<block_start><del_stmt>raw[i]<block_end>raw="".join(raw)<line_sep>raw=" ".join(x.strip()<for>x raw.split())<line_sep>guess="".join([w+(" "<if>sp<else>"")<for>(w sp) zip(target_sent have_space_after)])<if_stmt>"filings policy-making"<in>guess# The parsed version of this sentence drops an entire span from the raw
# text. Maybe we shouldn't be training on this bad example, but for now
# we'll just skip validating it.
<block_start><continue><block_end># Fix some issues with the raw text that are corrected in the parsed version
raw=raw.replace("`" "'")<line_sep>raw=raw.replace("and <Tourism" "and Tourism")<line_sep>raw=raw.replace("staf reporter" "staff reporter")<if_stmt>" S$"<in>raw<and>" S$"<not><in>guess<block_start>raw=raw.replace(" S$" " US$")<block_end>raw=raw.replace("16/ 64-inch opening" "16 64-inch opening")<if_stmt>raw<ne>guess<and>raw.replace('."' '".')<eq>guess<block_start>raw=raw.replace('."' '".')<block_end># assert raw == guess
<if_stmt>raw<ne>guess<block_start>print(raw)<line_sep>print(guess)<line_sep>print()<block_end><block_end><return>pairs<block_end><def_stmt>get_id_list target_root splits<block_start>res=[]<for_stmt>fname glob_tree_files(target_root splits)<block_start>reader=BracketParseCorpusReader('.' [fname])<line_sep>num_sents=len(reader.parsed_sents())<line_sep>doc_id=os.path.splitext(os.path.split(fname)[-1])[0]<for_stmt>sent_id range(num_sents)<block_start>sent_id="{}_{:03}".format(doc_id sent_id)<line_sep>res.append((doc_id sent_id))<block_end><block_end><return>res<block_end><def_stmt>write_to_file treebank3_root target_root splits tree_file outfile<block_start>words_and_whitespace=get_words_and_whitespace(treebank3_root splits [tree_file])<line_sep>doc_and_sent_ids=get_id_list(target_root splits)<line_sep># print(len(words_and_whitespace), len(doc_and_sent_ids))
<assert_stmt>len(words_and_whitespace)<eq>len(doc_and_sent_ids)<with_stmt>open(outfile 'w')<as>f<block_start>old_doc_id=<none><for_stmt>(doc_id sent_id),(words have_space_after) zip(doc_and_sent_ids words_and_whitespace)<block_start><if_stmt>doc_id<ne>old_doc_id<block_start>old_doc_id=doc_id<line_sep>f.write("# newdoc_id = {}\n".format(doc_id))<block_end>f.write("# sent_id = {}\n".format(sent_id))<line_sep>text="".join([w+(" "<if>sp<else>"")<for>w,sp zip(words have_space_after)])<line_sep>f.write("# text = {}\n".format(text))<for_stmt>word_id,(w sp) enumerate(zip(words have_space_after) start=1)<block_start><if_stmt>sp<block_start>misc="_"<block_end><else_stmt><block_start>misc="SpaceAfter=No"<block_end>f.write("{}\t{}\t{}\n".format(word_id w misc))<block_end>f.write("\n")<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>argparse<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--treebank3_root" required=<true>)<line_sep>parser.add_argument("--revised_root")<line_sep>args=parser.parse_args()<line_sep>write_to_file(args.treebank3_root args.treebank3_root train_splits 'train_02-21.LDC99T42' 'train_02-21.LDC99T42.text')<line_sep>write_to_file(args.treebank3_root args.treebank3_root test_splits 'test_23.LDC99T42' 'test_23.LDC99T42.text')<line_sep>write_to_file(args.treebank3_root args.treebank3_root dev_22_splits 'dev_22.LDC99T42' 'dev_22.LDC99T42.text')<if_stmt>args.revised_root<is><not><none><block_start>write_to_file(args.treebank3_root args.revised_root train_splits 'train_02-21.LDC2015T13' 'train_02-21.LDC2015T13.text')<line_sep>write_to_file(args.treebank3_root args.revised_root test_splits 'test_23.LDC2015T13' 'test_23.LDC2015T13.text')<line_sep>write_to_file(args.treebank3_root args.revised_root dev_22_splits 'dev_22.LDC2015T13' 'dev_22.LDC2015T13.text')<block_end><block_end>
|
<import_stmt>numpy<import_from_stmt>rdkit.ML.Cluster Murtagh<line_sep>print('1')<line_sep>d=numpy.array([[10.0 5.0] [20.0 20.0] [30.0 10.0] [30.0 15.0] [5.0 10.0]] numpy.float)<line_sep>print('2')<line_sep># clusters = Murtagh.ClusterData(d,len(d),Murtagh.WARDS)
# for i in range(len(clusters)):
# clusters[i].Print()
# print('3')
dists=[]<for_stmt>i range(len(d))<block_start><for_stmt>j range(i)<block_start>dist=sum((d[i]-d[j])<power>2)<line_sep>dists.append(dist)<block_end><block_end>dists=numpy.array(dists)<line_sep>print('Wards:')<line_sep>clusters=Murtagh.ClusterData(dists len(d) Murtagh.WARDS isDistData=1)<line_sep>clusters[0].Print()<line_sep>print('SLINK:')<line_sep>clusters=Murtagh.ClusterData(dists len(d) Murtagh.SLINK isDistData=1)<line_sep>clusters[0].Print()<line_sep>print('CLINK:')<line_sep>clusters=Murtagh.ClusterData(dists len(d) Murtagh.CLINK isDistData=1)<line_sep>clusters[0].Print()<line_sep>print('UPGMA:')<line_sep>clusters=Murtagh.ClusterData(dists len(d) Murtagh.UPGMA isDistData=1)<line_sep>clusters[0].Print()<line_sep>
|
#@<OUT> get cluster status
{"clusterName":"testCluster" "defaultReplicaSet":{"name":"default" "topology":[{"address":"<<<hostname>>>:<<<__mysql_sandbox_port2>>>" "label":"<<<hostname>>>:<<<__mysql_sandbox_port2>>>" "role":"HA"} {"address":"<<<hostname>>>:<<<__mysql_sandbox_port1>>>" "label":"<<<hostname>>>:<<<__mysql_sandbox_port1>>>" "role":"HA"}] "topologyMode":"Single-Primary"}}<line_sep>
|
<import_from_future_stmt> print_function<import_stmt>argparse<import_stmt>torch<import_stmt>torch.backends.cudnn<as>cudnn<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>torch.optim<as>optim<import_from_stmt>torchvision datasets transforms<import_from_stmt>torch.utils.data Dataset DataLoader<import_from_stmt>torch.utils.data.sampler SubsetRandomSampler<import_stmt>numpy<as>np<import_stmt>pickle<import_from_stmt>models.model_resnet *<import_from_stmt>models.model_openbmi *<import_from_stmt>models.model_3dcnn *<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>scipy signal<line_sep>cuda=torch.cuda.is_available()<import_stmt>matplotlib<import_stmt>matplotlib.pyplot<as>plt<line_sep>giga_classes=['right' 'left']<line_sep>colors=['#1f77b4' '#ff7f0e' '#2ca02c' '#d62728' '#9467bd' '#8c564b' '#e377c2' '#7f7f7f' '#bcbd22' '#17becf']<def_stmt>plot_embeddings embeddings targets xlim=<none> ylim=<none><block_start>plt.figure(figsize=(10 10))<for_stmt>i range(10)<block_start>inds=np.where(targets<eq>i)[0]<line_sep>plt.scatter(embeddings[inds 0] embeddings[inds 1] alpha=0.5 color=colors[i])<block_end><if_stmt>xlim<block_start>plt.xlim(xlim[0] xlim[1])<block_end><if_stmt>ylim<block_start>plt.ylim(ylim[0] ylim[1])<block_end>plt.legend(giga_classes)<block_end><def_stmt>extract_embeddings dataloader model num_ftrs=2<block_start><with_stmt>torch.no_grad()<block_start>model.eval()<line_sep># num_ftrs = model.embedding_net.fc.out_features
embeddings=np.zeros((len(dataloader.dataset) num_ftrs))<line_sep>labels=np.zeros(len(dataloader.dataset))<line_sep>k=0<for_stmt>images,target dataloader<block_start><if_stmt>cuda<block_start>images=images.cuda()<block_end>embeddings[k:k+len(images)]=model.get_embedding(images).data.cpu().numpy()<line_sep>labels[k:k+len(images)]=target.numpy()<line_sep>k<augadd>len(images)<block_end><block_end><return>embeddings labels<block_end><def_stmt>train args model device train_loader optimizer epoch<block_start>model.train()<for_stmt>batch_idx,(data target) enumerate(train_loader)<block_start>data,target=data.to(device) target.to(device)<line_sep>#data = data.view(-1,1,62,301)
target=target.view(-1)<line_sep>#data = nn.functional.interpolate(data,size=[300,300])
optimizer.zero_grad()<line_sep>output=model(data)<line_sep>#output = nn.CrossEntropyLoss(output)
# output = F.log_softmax(output, dim=1)
loss=F.nll_loss(output target)<line_sep>loss.backward()<line_sep>optimizer.step()<if_stmt>batch_idx%args.log_interval<eq>0<block_start>print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch batch_idx<times>len(data) len(train_loader.dataset) 100.<times>batch_idx/len(train_loader) loss.item()))<block_end><block_end><block_end><def_stmt>eval args model device test_loader<block_start>model.eval()<line_sep>test_loss=0<line_sep>correct=0<with_stmt>torch.no_grad()<block_start><for_stmt>data,target test_loader<block_start>data,target=data.to(device) target.to(device)<line_sep>#data = data.view(-1,1,62,data.shape[4])
output=model(data)<line_sep>#output = nn.CrossEntropyLoss(output)
#output = F.log_softmax(output, dim=1)
test_loss<augadd>F.nll_loss(output target reduction='sum').item()# sum up batch loss
pred=output.argmax(dim=1 keepdim=<true>)# get the index of the max log-probability
correct<augadd>pred.eq(target.view_as(pred)).sum().item()<block_end><block_end>test_loss<augdiv>len(test_loader.dataset)<line_sep>#print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(test_loss correct len(test_loader.dataset) 100.<times>correct/len(test_loader.dataset)))<line_sep><return>test_loss correct<block_end><def_stmt>windows data size step<block_start>start=0<while_stmt>((start+size)<l>data.shape[0])<block_start><yield>int(start) int(start+size)<line_sep>start<augadd>step<block_end><block_end><def_stmt>segment_signal_without_transition data window_size step<block_start>segments=[]<for_stmt>(start end) windows(data window_size step)<block_start><if_stmt>(len(data[start:end])<eq>window_size)<block_start>segments=segments+[data[start:end]]<block_end><block_end><return>np.array(segments)<block_end><def_stmt>segment_dataset X window_size step<block_start>win_x=[]<for_stmt>i range(X.shape[0])<block_start>win_x=win_x+[segment_signal_without_transition(X[i] window_size step)]<block_end>win_x=np.array(win_x)<line_sep><return>win_x<block_end>#%%
<class_stmt>TripletGiga(Dataset)<block_start><def_stmt>__init__ self x y valtype transform=<none> istrain=<true> sess=1 subj=<none><block_start>self.transform=transform<line_sep>self.istrain=istrain<line_sep>x_data=x.copy()<line_sep>y_data=y.copy()<line_sep>x_data=x_data.reshape(108 -1 1 62 500)<line_sep>y_data=y_data.reshape(108 -1)<if_stmt>valtype<eq>'sess'<block_start><if_stmt>istrain<block_start>x_data=x_data[np.s_[0:54] : : : :]<line_sep>y_data=y_data[np.s_[0:54] :]<block_end><else_stmt><block_start>x_data=x_data[np.s_[0+54:54+54] 100:200 : : :]#tests sess2 online
y_data=y_data[np.s_[0+54:54+54] 100:200]<block_end><block_end><elif_stmt>valtype<eq>'loso'<block_start><if_stmt>subj<eq><none><block_start><raise>AssertionError()<block_end><if_stmt>istrain<block_start>x_data=np.delete(x_data np.s_[subj subj+54] 0)#leave one subj
y_data=np.delete(y_data np.s_[subj subj+54] 0)<block_end><else_stmt><block_start>x_data=x_data[np.s_[subj+54] 100:200 : : :]# tests sess2 online
y_data=y_data[np.s_[subj+54] 100:200]<block_end><block_end><elif_stmt>valtype<eq>'subj'<block_start><if_stmt>subj<eq><none><block_start><raise>AssertionError()<block_end><if_stmt>istrain<block_start>x_data=x_data[subj : : : :]<line_sep>y_data=y_data[subj :]<block_end><else_stmt><block_start>x_data=x_data[subj 100:200 : : :]# tests sess2 online
y_data=y_data[subj 100:200]<block_end><block_end><else_stmt><block_start><raise>AssertionError()<block_end>self.x_data=x_data.reshape(-1 1 62 500)<line_sep>self.y_data=y_data.reshape(-1)<line_sep>self.len=self.y_data.shape[0]<line_sep>self.label_to_indices={label:np.where(self.y_data<eq>label)[0]<for>label self.y_data}<line_sep>random_state=np.random.RandomState(29)<if_stmt><not>istrain<block_start>self.labels_set=set(self.y_data)<line_sep>self.label_to_indices={label:np.where(self.y_data<eq>label)[0]<for>label self.labels_set}<line_sep>triplets=[[i random_state.choice(self.label_to_indices[self.y_data[i].item()]) random_state.choice(self.label_to_indices[np.random.choice(list(self.labels_set-set([self.y_data[i].item()])))])]<for>i range(len(self.x_data))]<line_sep>self.test_triplets=triplets<block_end><block_end><def_stmt>__getitem__ self index<block_start><if_stmt>self.istrain<block_start>img1=self.x_data[index : : 100:500]<line_sep>y1=self.y_data[index]<line_sep>positive_index=index<while_stmt>positive_index<eq>index<block_start>positive_index=np.random.choice(self.label_to_indices[y1])<block_end><if_stmt>y1<eq>1<block_start>negative_index=np.random.choice(self.label_to_indices[0])<block_end><else_stmt><block_start>negative_index=np.random.choice(self.label_to_indices[1])<block_end>img2=self.x_data[positive_index : : 100:500]<line_sep>img3=self.x_data[negative_index : : 100:500]<line_sep>y2=self.y_data[positive_index]<line_sep>y3=self.y_data[negative_index]<block_end><else_stmt><block_start>img1=self.x_data[self.test_triplets[index][0] : : 100:500]<line_sep>img2=self.x_data[self.test_triplets[index][1] : : 100:500]<line_sep>img3=self.x_data[self.test_triplets[index][2] : : 100:500]<line_sep>y1=self.y_data[self.test_triplets[index][0]]<line_sep>y2=self.y_data[self.test_triplets[index][1]]<line_sep>y3=self.y_data[self.test_triplets[index][2]]<block_end>img1=torch.from_numpy(img1).type(torch.FloatTensor)<line_sep>img2=torch.from_numpy(img2).type(torch.FloatTensor)<line_sep>img3=torch.from_numpy(img3).type(torch.FloatTensor)<if_stmt>self.transform<is><not><none><block_start>img1=self.transform(img1)<line_sep>img2=self.transform(img2)<line_sep>img3=self.transform(img3)<block_end><return>(img1 img2 img3) []<block_end><def_stmt>__len__ self<block_start><return>self.len<block_end><block_end>#%%
<class_stmt>GigaDataset(Dataset)<block_start><def_stmt>__init__ self x y valtype transform=<none> istrain=<true> sess=1 subj=<none><block_start>self.transform=transform<line_sep>self.istrain=istrain<line_sep>x_data=x.copy()<line_sep>y_data=y.copy()<line_sep>x_data=x_data.reshape(108 -1 1 62 500)<line_sep>y_data=y_data.reshape(108 -1)<if_stmt>valtype<eq>'sess'<block_start><if_stmt>istrain<block_start>x_data=x_data[np.s_[0:54] : : : :]<line_sep>y_data=y_data[np.s_[0:54] :]<block_end><else_stmt><block_start>x_data=x_data[np.s_[0+54:54+54] 100:200 : : :]#tests sess2 online
y_data=y_data[np.s_[0+54:54+54] 100:200]<block_end><block_end><elif_stmt>valtype<eq>'loso'<block_start><if_stmt>subj<eq><none><block_start><raise>AssertionError()<block_end><if_stmt>istrain<block_start>x_data=np.delete(x_data np.s_[subj subj+54] 0)#leave one subj
y_data=np.delete(y_data np.s_[subj subj+54] 0)<block_end><else_stmt><block_start>x_data=x_data[np.s_[subj+54] 100:200 : : :]# tests sess2 online
y_data=y_data[np.s_[subj+54] 100:200]<block_end><block_end><elif_stmt>valtype<eq>'subj'<block_start><if_stmt>subj<eq><none><block_start><raise>AssertionError()<block_end><if_stmt>istrain<block_start>x_data=x_data[subj : : : :]<line_sep>y_data=y_data[subj :]<block_end><else_stmt><block_start>x_data=x_data[subj 100:200 : : :]# tests sess2 online
y_data=y_data[subj 100:200]<block_end><block_end><else_stmt><block_start><raise>AssertionError()<block_end>x_data=x_data.reshape(-1 1 62 500)<line_sep>y_data=y_data.reshape(-1)<line_sep>self.len=y_data.shape[0]<line_sep>x_data=torch.from_numpy(x_data)<line_sep>self.x_data=x_data.type(torch.FloatTensor)<line_sep>y_data=torch.from_numpy(y_data)<line_sep>self.y_data=y_data.long()<block_end><def_stmt>__getitem__ self index<block_start>x=self.x_data[index : : 100:500]<line_sep>y=self.y_data[index]<line_sep># fs =100
# N = 400
# import librosa
# import librosa.display
#
# xtemp = x.clone().view(-1)
# f, t, Zxx = signal.spectrogram(xtemp,fs=fs,mode='psd')
#
# D = np.abs(librosa.stft(xtemp.numpy(),n_fft=30,center=False))
#
# librosa.display.specshow(librosa.amplitude_to_db(D,ref=np.max),y_axis='log', x_axis='time')
# f, t, Zxx = signal.spectrogram(x[0,:,:],fs=fs,nperseg=60,noverlap=49,mode='psd')
#
# plt.pcolormesh(t, f,Zxx)
# plt.title('STFT Magnitude')
# plt.ylabel('Frequency [Hz]')
# plt.xlabel('Time [sec]')
# plt.show()
# x = torch.from_numpy(Zxx)
# Normalize your data here
<if_stmt>self.transform<block_start>x=self.transform(x)<block_end><return>x y<block_end><def_stmt>__len__ self<block_start><return>self.len<block_end><block_end><def_stmt>load_smt path='C:/Users/dk/PycharmProjects/data/giga'<block_start><with_stmt>open(path+'/epoch_labels.pkl' 'rb')<as>f<block_start>y_data=pickle.load(f)<block_end><with_stmt>open(path+'/smt1_scale.pkl' 'rb')<as>f<block_start>x_data1=pickle.load(f)<block_end><with_stmt>open(path+'/smt2_scale.pkl' 'rb')<as>f<block_start>x_data2=pickle.load(f)<block_end>x_data=np.concatenate([x_data1 x_data2])<line_sep>x_data=np.expand_dims(x_data axis=1)<line_sep><return>x_data y_data<block_end><def_stmt>main <block_start><import_stmt>torch<import_from_stmt>torch.optim lr_scheduler<import_stmt>torch.optim<as>optim<import_from_stmt>torch.autograd Variable<import_from_stmt>trainer fit<import_stmt>numpy<as>np<line_sep>cuda=torch.cuda.is_available()<line_sep># Training settings
parser=argparse.ArgumentParser(description='PyTorch MNIST Example')<line_sep>parser.add_argument('--batch-size' type=int default=100 metavar='N' help='input batch size for training (default: 64)')<line_sep>parser.add_argument('--test-batch-size' type=int default=100 metavar='N' help='input batch size for testing (default: 1000)')<line_sep>parser.add_argument('--epochs' type=int default=100 metavar='N' help='number of epochs to train (default: 10)')<line_sep>parser.add_argument('--lr' type=float default=0.001 metavar='LR' help='learning rate (default: 0.01)')<line_sep>parser.add_argument('--momentum' type=float default=0.5 metavar='M' help='SGD momentum (default: 0.5)')<line_sep>parser.add_argument('--no-cuda' action='store_true' default=<false> help='disables CUDA training')<line_sep>parser.add_argument('--seed' type=int default=1 metavar='S' help='random seed (default: 1)')<line_sep>parser.add_argument('--log-interval' type=int default=10 metavar='N' help='how many batches to wait before logging training status')<line_sep>parser.add_argument('--save-model' action='store_true' default=<true> help='For Saving the current Model')<line_sep>args=parser.parse_args()<line_sep>use_cuda=<not>args.no_cuda<and>torch.cuda.is_available()<line_sep>torch.manual_seed(args.seed)<line_sep>np.random.seed(args.seed)<line_sep>torch.backends.cudnn.deterministic=<true><line_sep>torch.backends.cudnn.benchmark=<false><line_sep>device=torch.device("cuda"<if>use_cuda<else>"cpu")<line_sep>kwargs={'num_workers':1 'pin_memory':<true>}<if>use_cuda<else>{}<import_from_stmt>datetime datetime<import_stmt>os<line_sep>loging=<false><line_sep>ismultitask=<false><line_sep>loso=<false><if_stmt>(args.save_model)<block_start>model_save_path='model/triplet/'<if_stmt><not>os.path.isdir(model_save_path)<block_start>os.makedirs(model_save_path)<block_end><block_end><if_stmt>loging<block_start>fname=model_save_path+datetime.today().strftime("%m_%d_%H_%M")+".txt"<line_sep>f=open(fname 'w')<block_end>x_data,y_data=load_smt()<line_sep># nonbciilli = np.s_[0,1,2,4,5,8,16,17,18,20,21,27,28,29,30,32,35,36,38,42,43,44,51]
valtype='sess'<if_stmt>valtype<eq>'loso'<block_start><for_stmt>subj range(0 54)<block_start>model=Deep4CNN(ismult=ismultitask).to(device)<line_sep>#model.load_state_dict(torch.load(model_save_path+ "J_" + str(subj) + 'basecnn.pt'))
optimizer=optim.SGD(model.parameters() lr=args.lr momentum=args.momentum)<line_sep>optimizer_fine=optim.SGD(model.parameters() lr=0.005 momentum=args.momentum)<line_sep>dataset_train=GigaDataset(x=x_data y=y_data valtype=valtype istrain=<true> sess=1 subj=subj)<line_sep>train_loader=torch.utils.data.DataLoader(dataset_train batch_size=args.batch_size shuffle=<true> **kwargs)<line_sep>dataset_test=GigaDataset(x=x_data y=y_data valtype=valtype istrain=<false> sess=2 subj=subj)<line_sep>test_loader=torch.utils.data.DataLoader(dataset_test batch_size=args.batch_size shuffle=<false> **kwargs)<line_sep># dataset_fine = GigaDataset_LOSO(x=x_data, y=y_data, fine=True, istrain=True, sess=2, subj=subj)
# fine_loader = torch.utils.data.DataLoader(dataset_fine, batch_size=args.batch_size, shuffle=True, **kwargs)
<for_stmt>epoch range(1 args.epochs+1)<block_start>train(args model device train_loader optimizer epoch)<line_sep>print("joint-train")<line_sep>#LOSO joint training
j_loss,j_score=eval(args model device test_loader)<if_stmt>epoch<g>30<block_start><if_stmt>(args.save_model)<block_start>torch.save(model.state_dict() model_save_path+"model_"+str(subj)+"_"+str(epoch)+'.pt')<block_end><block_end><block_end># #fine tuning
# for epoch in range(1, 10):
# train_mt(args, model, device, fine_loader, optimizer_fine, epoch)
#
# print("fine-tuning")
# f_loss, f_score = eval(args, model, device, test_loader)
<if_stmt>(args.save_model)<block_start>torch.save(model.state_dict() model_save_path+"F_"+str(subj)+'basecnn.pt')<block_end><if_stmt>loging<block_start>f=open(fname 'a')<line_sep>f.write(str(subj)+" "+"jl : "+str(j_loss)+" "+str(j_score)+'\n')<line_sep>f.close()<block_end><block_end><block_end><elif_stmt>valtype<eq>'sess'<block_start><import_from_stmt>networks EmbeddingDeep4CNN TripletNet FineShallowCNN EmbeddingDeepCNN<import_from_stmt>losses TripletLoss<line_sep>margin=1<line_sep>embedding_net=EmbeddingDeep4CNN()<line_sep>print(embedding_net)<line_sep>model=TripletNet(embedding_net)<if_stmt>cuda<block_start>model.cuda()<block_end>loss_fn=TripletLoss(margin)<line_sep>lr=1e-3<line_sep>#optimizer = optim.Adam(model.parameters(), lr=lr)
n_epochs=5<line_sep>#%%
log_interval=10<if_stmt>n_epochs<eq>0<block_start><pass><line_sep>#model.load_state_dict(torch.load('triplet_deep4_1000_2.pt'))
<block_end><else_stmt>#트리플렛넷 학습
# For classification
<block_start>dataset_train=GigaDataset(x=x_data y=y_data valtype=valtype istrain=<true> sess=1)<line_sep>train_loader=torch.utils.data.DataLoader(dataset_train batch_size=args.batch_size shuffle=<true> **kwargs)<line_sep>dataset_test=GigaDataset(x=x_data y=y_data valtype=valtype istrain=<false> sess=2 subj=-1)<line_sep>test_loader=torch.utils.data.DataLoader(dataset_test batch_size=args.batch_size shuffle=<false> **kwargs)<line_sep>triplet_dataset_train=TripletGiga(x=x_data y=y_data valtype=valtype istrain=<true> sess=1)<line_sep>triplet_train_loader=torch.utils.data.DataLoader(triplet_dataset_train batch_size=args.batch_size shuffle=<true> **kwargs)<line_sep>triplet_dataset_test=TripletGiga(x=x_data y=y_data valtype=valtype istrain=<false> sess=2 subj=-1)<line_sep>triplet_test_loader=torch.utils.data.DataLoader(triplet_dataset_test batch_size=args.batch_size shuffle=<false> **kwargs)<line_sep>optimizer=optim.SGD(model.parameters() lr=0.001 momentum=0.9)<line_sep>scheduler=lr_scheduler.StepLR(optimizer 8 gamma=1 last_epoch=-1)<import_from_stmt>trainer fit<line_sep>fit(triplet_train_loader triplet_test_loader model loss_fn optimizer scheduler n_epochs cuda log_interval)<block_end>#%%
train_embeddings_tl,train_labels_tl=extract_embeddings(train_loader embedding_net 1000)<line_sep># plot_embeddings(train_embeddings_tl, train_labels_tl)
val_embeddings_tl,val_labels_tl=extract_embeddings(test_loader embedding_net 1000)<line_sep># plot_embeddings(val_embeddings_tl, val_labels_tl)
# #
<import_from_stmt>sklearn.pipeline Pipeline<import_from_stmt>sklearn.discriminant_analysis LinearDiscriminantAnalysis<import_from_stmt>sklearn.model_selection ShuffleSplit cross_val_score<line_sep>lda=LinearDiscriminantAnalysis()<line_sep>lda.fit(train_embeddings_tl train_labels_tl)<line_sep>print(lda.score(val_embeddings_tl val_labels_tl))<line_sep># from torchvision import datasets, models, transforms
# temp = model.embedding_net.children()
# newmodel = torch.nn.Sequential(*(list(model.embedding_net.children())[:]))
# for param in model.embedding_net.parameters():
# param.requires_grad = True
#newembedding_net = torch.nn.Sequential(*(list(model.embedding_net.children())[:]))
#
<import_from_stmt>sklearn.manifold TSNE<line_sep>tsne=TSNE(n_components=2 perplexity=30)<line_sep>train_tsne=tsne.fit_transform(val_embeddings_tl)<line_sep>plot_embeddings(train_tsne val_labels_tl)<for_stmt>param model.embedding_net.parameters()<block_start>param.requires_grad=<true><block_end>#embedding_net2 = EmbeddingDeep4CNN()
newmodel=nn.Sequential(model.embedding_net nn.Linear(1000 2) nn.LogSoftmax(dim=1)).to(device)<line_sep>print(newmodel)<line_sep>#newmodel.fc_lr = nn.Linear(1000,2)
newmodel.to(device)<line_sep>optimizer=optim.SGD(newmodel.parameters() lr=0.01 momentum=0.9)<line_sep>#optimizer = optim.Adam(newmodel.parameters())
<for_stmt>epoch range(1 20)<block_start>train(args newmodel device train_loader optimizer epoch)<line_sep>j_loss,j_score=eval(args newmodel device test_loader)<block_end><if_stmt>args.save_model<block_start>torch.save(model.state_dict() 'triplet_deep4_1000_2.pt')<block_end><block_end><block_end><class_stmt>FineNet(nn.Module)# shallowconv
<block_start><def_stmt>__init__ self EmbeddingNet<block_start>super(FineNet self).__init__()<line_sep>self.EmbeddingNet=EmbeddingNet<line_sep>self.fc_lr=nn.Linear(2000 2)<block_end><def_stmt>forward self x<block_start>x=self.EmbeddingNet(x)<line_sep>#x = x.view(x.size()[0], -1)
x=self.fc_lr(x)<line_sep>x=F.dropout(x training=self.training p=0.5)<line_sep>x=F.log_softmax(x dim=1)<line_sep><return>x<block_end><def_stmt>get_embedding self x<block_start><return>self.forward(x)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
|
<import_stmt>logging<import_stmt>re<import_from_stmt>contextlib contextmanager<import_from_stmt>sys exc_info<import_from_stmt>mondrian term<line_sep>logger=logging.getLogger(__name__)<line_sep>@contextmanager<def_stmt>sweeten_errors <block_start><try_stmt><block_start><yield><block_end><except_stmt>Exception<as>exc<block_start>SPACES=2<line_sep>w=term.white<line_sep>prefix=w("║"+" "<times>(SPACES-1))<line_sep>suffix=w(" "<times>(SPACES-1)+"║")<line_sep>pre_re=re.compile("([^`]*)`([^`]*)`([^`]*)")<def_stmt>format_arg arg<block_start>length=len(pre_re.sub("\\1\\2\\3" arg))<line_sep>arg=pre_re.sub(w("\\1")+term.bold("\\2")+w("\\3") arg)<line_sep>arg=re.sub(r"^ \$ (.*)" term.lightblack(" $ ")+term.reset("\\1") arg)<line_sep><return>(arg length)<block_end><def_stmt>f *args<block_start><return>"".join(args)<block_end>term_width,term_height=term.get_size()<line_sep>line_length=min(80 term_width)<for_stmt>arg exc.args<block_start>line_length=max(min(line_length len(arg)+2<times>SPACES) 120)<block_end>print(f(w("╔"+"═"<times>(line_length-2)+"╗")))<for_stmt>i,arg enumerate(exc.args)<block_start><if_stmt>i<eq>1<block_start>print(f(prefix " "<times>(line_length-2<times>SPACES) suffix))<block_end>arg_formatted,arg_length=format_arg(arg)<if_stmt><not>i# first line
<block_start>print(f(prefix term.red_bg(term.bold(" "+type(exc).__name__+" ")) " " w(arg_formatted) " "<times>(line_length-(arg_length+3+len(type(exc).__name__)+2<times>SPACES)) suffix ))<block_end><else_stmt># other lines
<block_start>print(f(prefix arg_formatted+" "<times>(line_length-arg_length-2<times>SPACES) suffix))<block_end><block_end>print(f(w("╚"+"═"<times>(line_length-2)+"╝")))<line_sep>logging.getLogger().debug("This error was caused by the following exception chain." exc_info=exc_info())<block_end><block_end>
|
<import_from_stmt>haven haven_wizard<as>hw<import_stmt>wandb<import_stmt>sys<import_stmt>os<import_stmt>pprint<line_sep>path=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))<line_sep>sys.path.insert(0 path)<if_stmt>__name__<eq>"__main__"# first way
<block_start>score_dict={"loss":loss}<line_sep>wandb.send(score_dict)<line_sep># second way
chk=load_checkpoint(savedir)<line_sep>hw.save_checkpoint(savedir score_dict=score_dict wandb_config={})<block_end>
|
<class_stmt>VectorizableBackedModel(object)<block_start>r"""
Mixin for models constructed from a set of :map:`Vectorizable` objects.
Supports models for which visualizing the meaning of a set of components
is trivial.
Requires that the following methods are implemented:
1. `component_vector(index)`
2. `instance_vector(weights)`
3. `project_vector(vector)`
4. `reconstruct_vector(vectors)`
5. `project_out_vector(vector)`
The constructor takes an instance of :map:`Vectorizable`. This is used for
all conversions to and from numpy vectors and instances.
Parameters
----------
template_instance : :map:`Vectorizable`
The template instance.
"""<def_stmt>__init__ self template_instance<block_start>self.template_instance=template_instance<block_end><def_stmt>component_vector self index<block_start>r"""
A particular component of the model, in vectorized form.
Parameters
----------
index : `int`
The component that is to be returned.
Returns
-------
component_vector : `ndarray`
The component vector.
"""<line_sep><raise>NotImplementedError()<block_end><def_stmt>component self index<block_start>r"""
A particular component of the model.
Parameters
----------
index : `int`
The component that is to be returned.
Returns
-------
component : `type(self.template_instance)`
The component instance.
"""<line_sep><return>self.template_instance.from_vector(self.component_vector(index))<block_end><def_stmt>instance_vector self weights<block_start>"""
Creates a new vector instance of the model using the first ``len(weights)``
components.
Parameters
----------
weights : ``(n_weights,)`` `ndarray` or `list`
``weights[i]`` is the linear contribution of the i'th component
to the instance vector.
Raises
------
ValueError
If n_weights > n_components
Returns
-------
instance_vector : `ndarray`
An instance of the model, in vectorized form.
"""<line_sep><raise>NotImplementedError()<block_end><def_stmt>instance self weights<block_start>"""
Creates a new instance of the model using the first ``len(weights)``
components.
Parameters
----------
weights : ``(n_weights,)`` `ndarray` or `list`
``weights[i]`` is the linear contribution of the i'th component
to the instance vector.
Raises
------
ValueError
If n_weights > n_components
Returns
-------
instance : `type(self.template_instance)`
An instance of the model.
"""<line_sep><return>self.template_instance.from_vector(self.instance_vector(weights))<block_end><def_stmt>project_vector self instance_vector<block_start>"""
Projects the `instance_vector` onto the model, retrieving the optimal
linear weightings.
Parameters
----------
instance_vector : `ndarray`
A novel instance vector.
Returns
-------
projected_vector : ``(n_components,)`` `ndarray`
A vector of optimal linear weightings.
"""<line_sep><raise>NotImplementedError()<block_end><def_stmt>project self instance<block_start>"""
Projects the `instance` onto the model, retrieving the optimal
linear weightings.
Parameters
----------
instance : :map:`Vectorizable`
A novel instance.
Returns
-------
projected : ``(n_components,)`` `ndarray`
A vector of optimal linear weightings.
"""<line_sep><return>self.project_vector(instance.as_vector())<block_end><def_stmt>reconstruct_vector self instance_vector<block_start>"""
Projects an `instance_vector` onto the linear space and rebuilds from the
weights found.
Syntactic sugar for: ::
instance_vector(project_vector(instance_vector))
but faster, as it avoids the conversion that takes place each time.
Parameters
----------
instance_vector : `ndarray`
A novel instance vector.
Returns
-------
reconstructed_vector : `ndarray`
The reconstructed vector.
"""<line_sep><raise>NotImplementedError()<block_end><def_stmt>reconstruct self instance<block_start>"""
Projects a `instance` onto the linear space and rebuilds from the
weights found.
Syntactic sugar for: ::
instance(project(instance))
but faster, as it avoids the conversion that takes place each time.
Parameters
----------
instance : :class:`Vectorizable`
A novel instance of :class:`Vectorizable`.
Returns
-------
reconstructed : `self.instance_class`
The reconstructed object.
"""<line_sep>reconstruction_vector=self.reconstruct_vector(instance.as_vector())<line_sep><return>instance.from_vector(reconstruction_vector)<block_end><def_stmt>project_out_vector self instance_vector<block_start>"""
Returns a version of `instance_vector` where all the basis of the model
have been projected out.
Parameters
----------
instance_vector : `ndarray`
A novel instance vector.
Returns
-------
projected_out_vector : `ndarray`
A copy of `instance_vector`, with all bases of the model projected out.
"""<line_sep><raise>NotImplementedError()<block_end><def_stmt>project_out self instance<block_start>"""
Returns a version of `instance` where all the basis of the model
have been projected out.
Parameters
----------
instance : :class:`Vectorizable`
A novel instance of :class:`Vectorizable`.
Returns
-------
projected_out : `self.instance_class`
A copy of `instance`, with all basis of the model projected out.
"""<line_sep>vector_instance=self.project_out_vector(instance.as_vector())<line_sep><return>instance.from_vector(vector_instance)<block_end><block_end>
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
<import_stmt>pytest<import_from_stmt>datadog_checks.dev.utils get_metadata_metrics<import_from_stmt>.common ALLOWED_METRICS CONFIG LEGACY_CONFIG METRICS_TO_TEST assert_check<line_sep>pytestmark=pytest.mark.e2e<def_stmt>test_e2e_legacy dd_agent_check<block_start>aggregator=dd_agent_check(LEGACY_CONFIG rate=<true>)<line_sep>assert_check(aggregator ALLOWED_METRICS)<line_sep>aggregator.assert_metrics_using_metadata(get_metadata_metrics())<block_end><def_stmt>test_e2e dd_agent_check<block_start>aggregator=dd_agent_check(CONFIG rate=<true>)<line_sep>assert_check(aggregator METRICS_TO_TEST)<line_sep># Excluding gitlab.rack.http_requests_total because it is a distribution metric
# (its sum and count metrics are in the metadata)
aggregator.assert_metrics_using_metadata(get_metadata_metrics() exclude=["gitlab.rack.http_requests_total"])<block_end>
|
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
<import_stmt>reframe<as>rfm<import_stmt>reframe.utility.sanity<as>sn<import_stmt>os<class_stmt>HelloFixture(rfm.RunOnlyRegressionTest)<block_start>executable='echo hello from fixture'<line_sep>@sanity_function<def_stmt>assert_output self<block_start><return>sn.assert_found(r'hello from fixture' self.stdout)<block_end><block_end>@rfm.simple_test<class_stmt>HelloTest(HelloFixture)<block_start>valid_systems=['*']<line_sep>valid_prog_environs=['*']<block_end>@rfm.simple_test<class_stmt>TestA(rfm.RunOnlyRegressionTest)<block_start>valid_systems=['*']<line_sep>valid_prog_environs=['*']<line_sep>executable='/bin/true'<line_sep># Declare the fixture
f=fixture(HelloFixture scope='session')<line_sep>@sanity_function<def_stmt>inspect_fixture self<block_start><return>sn.assert_found(r'hello from fixture' os.path.join(self.f.stagedir self.f.stdout.evaluate()))<block_end><block_end>@rfm.simple_test<class_stmt>TestB(TestA)<block_start>'''Use a test as a fixture'''<line_sep>ff=fixture(HelloTest scope='session')<block_end>
|
# License: BSD 3 clause
<import_stmt>io unittest<import_stmt>numpy<as>np<import_stmt>pickle<import_from_stmt>scipy.sparse csr_matrix<import_from_stmt>tick.base_model.tests.generalized_linear_model TestGLM<import_from_stmt>tick.prox ProxL1<import_from_stmt>tick.linear_model ModelLinReg SimuLinReg<import_from_stmt>tick.linear_model ModelLogReg SimuLogReg<import_from_stmt>tick.linear_model ModelPoisReg SimuPoisReg<import_from_stmt>tick.linear_model ModelHinge ModelQuadraticHinge ModelSmoothedHinge<import_from_stmt>tick.robust ModelAbsoluteRegression ModelEpsilonInsensitive ModelHuber ModelLinRegWithIntercepts ModelModifiedHuber<import_from_stmt>tick.simulation weights_sparse_gauss<class_stmt>Test(TestGLM)<block_start><def_stmt>test_robust_model_serialization self<block_start>"""...Test serialization of robust models
"""<line_sep>model_map={ModelAbsoluteRegression:SimuLinReg ModelEpsilonInsensitive:SimuLinReg ModelHuber:SimuLinReg ModelLinRegWithIntercepts:SimuLinReg ModelModifiedHuber:SimuLogReg}<for_stmt>mod model_map<block_start>np.random.seed(12)<line_sep>n_samples,n_features=100 5<line_sep>w0=np.random.randn(n_features)<line_sep>intercept0=50<times>weights_sparse_gauss(n_weights=n_samples nnz=30)<line_sep>c0=<none><line_sep>X,y=SimuLinReg(w0 c0 n_samples=n_samples verbose=<false> seed=2038).simulate()<if_stmt>mod<eq>ModelLinRegWithIntercepts<block_start>y<augadd>intercept0<block_end>model=mod(fit_intercept=<false>).fit(X y)<line_sep>pickled=pickle.loads(pickle.dumps(model))<line_sep>self.assertTrue(model._model.compare(pickled._model))<if_stmt>mod<eq>ModelLinRegWithIntercepts<block_start>test_vector=np.hstack((X[0] np.ones(n_samples)))<line_sep>self.assertEqual(model.loss(test_vector) pickled.loss(test_vector))<block_end><else_stmt><block_start>self.assertEqual(model.loss(X[0]) pickled.loss(X[0]))<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
|
<import_stmt>traceback<import_from_stmt>antlr4 *<import_from_stmt>.parser.specLexer specLexer<import_from_stmt>.parser.specParser specParser<import_from_stmt>.specParserVisitorImpl *<def_stmt>isolationTestHandler testFile fileWriter logger<block_start>testName=testFile.name.split('.')[0]<try_stmt><block_start>logger.info("Starting : {}".format(testName))<try_stmt><block_start>testSpec=parseSpecInput(str(testFile))<if_stmt>(testSpec<is><none>)<block_start><raise>Exception("TestSpec object is not generated")<block_end><else_stmt><block_start>print(testSpec)<line_sep>logger.info("Successfully parsed")<block_end><block_end><except_stmt>Exception<as>e<block_start>logger.error("Error while parsing : {}".format(str(e)))<line_sep><return><false><block_end>testSpec.logger=logger<line_sep>testSpec.fileWriter=fileWriter<line_sep>testSpec.initTestRun()<line_sep>logger.info("Completed : {}".format(testName))<line_sep><return><true><block_end><except_stmt>Exception<as>e<block_start>logger.error(str(e))<line_sep>traceback.print_exc()<block_end><return><false><block_end><def_stmt>parseSpecInput filename<block_start>input_stream=FileStream(filename)<line_sep>lexer=specLexer(input_stream)<line_sep>token_stream=CommonTokenStream(lexer)<line_sep>parser=specParser(token_stream)<line_sep>tree=parser.parse()<line_sep>visitor=specParserVisitorImpl()<line_sep>visitor.visit(tree)<line_sep><return>visitor.testSpec<block_end>
|
'''
Copyright (c) 2017 HERE Europe B.V.
See the LICENSE file in the root of this project for license details.
'''<import_stmt>glob<import_from_stmt>flatdata.generator.generators.go GoGenerator<import_from_stmt>.assertions generate_and_assert_in<import_from_stmt>.schemas schemas_and_expectations<import_from_stmt>nose.plugins.skip SkipTest<def_stmt>generate_and_compare test_case<block_start><with_stmt>open(test_case[0] 'r')<as>test_file<block_start>test=test_file.read()<block_end>expectations=list()<for_stmt>file glob.glob(test_case[1]+'*')<block_start><with_stmt>open(file 'r')<as>expectation_file<block_start>expectations.append(expectation_file.read())<block_end><block_end>generate_and_assert_in(test GoGenerator *expectations)<block_end><def_stmt>skip test_case<block_start><raise>SkipTest("Test %s is skipped"%test_case[0])<block_end><def_stmt>test_against_expectations <block_start><for_stmt>x schemas_and_expectations(generator='go' extension='go')# Go does not yet support namespaces, enums, ranges, or constants, skip those tests
<block_start><if_stmt>"enums"<not><in>x[0]<and>"constants"<not><in>x[0]<and>"namespaces"<not><in>x[0]<and>"ranges"<not><in>x[0]<block_start><yield>generate_and_compare x<block_end><else_stmt><block_start><yield>skip x<block_end><block_end><block_end>
|
<import_stmt>argparse<import_stmt>sys<import_stmt>tensorflow<as>tf<import_stmt>functools<import_from_stmt>ops *<import_from_stmt>loader *<def_stmt>doublewrap function<block_start>@functools.wraps(function)<def_stmt>decorator *args **kwargs<block_start><if_stmt>len(args)<eq>1<and>len(kwargs)<eq>0<and>callable(args[0])<block_start><return>function(args[0])<block_end><else_stmt><block_start><return><lambda>wrapee:function(wrapee *args **kwargs)<block_end><block_end><return>decorator<block_end>@doublewrap<def_stmt>define_scope function scope=<none> *args **kwargs<block_start>"""
A decorator for functions that define TensorFlow operations. The wrapped
function will only be executed once. Subsequent calls to it will directly
return the result so that operations are added to the graph only once.
The operations added by the function live within a tf.variable_scope(). If
this decorator is used with arguments, they will be forwarded to the
variable scope. The scope name defaults to the name of the wrapped
function.
"""<line_sep>attribute='_cache_'+function.__name__<line_sep>name=scope<or>function.__name__<line_sep>@[email protected](function)<def_stmt>decorator self<block_start><if_stmt><not>hasattr(self attribute)<block_start><with_stmt>tf.variable_scope(name *args **kwargs)<block_start>setattr(self attribute function(self))<block_end><block_end><return>getattr(self attribute)<block_end><return>decorator<block_end><class_stmt>Model<block_start><def_stmt>__init__ self image label dropout=0.5 conv_size=9 conv_stride=1 ksize=2 pool_stride=2 filter_num=128 padding="SAME"<block_start>self.image=image<line_sep>self.label=label<line_sep>self.dropout=dropout<line_sep>self.conv_size=conv_size<line_sep>self.conv_stride=conv_stride<line_sep>self.ksize=ksize<line_sep>self.pool_stride=pool_stride<line_sep>self.padding=padding<line_sep>self.filter_num=filter_num<line_sep>self.prediction<line_sep>self.optimize<line_sep>self.accuracy<block_end>@define_scope<def_stmt>prediction self<block_start><with_stmt>tf.variable_scope("model")<as>scope#input image
<block_start>input_image=self.image<line_sep>layers=[]<line_sep># conv_1 [batch, ngf, 5] => [batch, 64, ngf]
<with_stmt>tf.variable_scope("conv_1")<block_start>output=relu(conv1d(input_image self.filter_num name='conv_1'))<line_sep>layers.append(output)<block_end># conv_2 - conv_6
layer_specs=[(self.filter_num<times>2 0.5) # conv_2: [batch, 64, ngf] => [batch, 32, ngf * 2]
(self.filter_num<times>4 0.5) # conv_3: [batch, 32, ngf * 2] => [batch, 16, ngf * 4]
(self.filter_num<times>8 0.5) # conv_4: [batch, 16, ngf * 4] => [batch, 8, ngf * 8]
(self.filter_num<times>8 0.5) # conv_5: [batch, 8, ngf * 8] => [batch, 4, ngf * 8]
(self.filter_num<times>8 0.5)# conv_6: [batch, 4, ngf * 8] => [batch, 2, ngf * 8]
]<line_sep># adding layers
<for_stmt>_,(out_channels dropout) enumerate(layer_specs)<block_start><with_stmt>tf.variable_scope("conv_%d"%(len(layers)+1))<block_start>rectified=lrelu(layers[-1] 0.2)<line_sep># [batch, in_width, in_channels] => [batch, in_width/2, out_channels]
convolved=conv1d(rectified out_channels)<line_sep># batchnormalize convolved
output=batchnorm(convolved is_2d=<false>)<line_sep># dropout
<if_stmt>dropout<g>0.0<block_start>output=tf.nn.dropout(output keep_prob=1-dropout)<block_end>layers.append(output)<block_end><block_end>#fc1
h_fc1=relu(fully_connected(layers[-1] 256 name='fc1'))<line_sep>#dropout
h_fc1_drop=tf.nn.dropout(h_fc1 self.dropout)<line_sep>#fc2
result=tf.sigmoid(fully_connected(h_fc1_drop 2 name='fc2'))<line_sep><return>result<block_end><block_end>@define_scope<def_stmt>optimize self<block_start>cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.label logits=self.prediction))<line_sep><return>tf.train.AdamOptimizer(0.0001).minimize(cross_entropy)<block_end>@define_scope<def_stmt>accuracy self<block_start>correct_prediction=tf.equal(tf.argmax(self.label 1) tf.argmax(self.prediction 1))<line_sep><return>tf.reduce_mean(tf.cast(correct_prediction tf.float32))<block_end># @define_scope
# def optimize(self):
# with tf.name_scope("loss"):
# loss = tf.reduce_mean(tf.abs(self.p_loss))
# tvars = tf.trainable_variables()
# optim = tf.train.AdamOptimizer(0.0001)
# grads_and_vars = optim.compute_gradients(loss, var_list=tvars)
# print(grads_and_vars)
# train = optim.apply_gradients(grads_and_vars)
# @define_scope
# def p_loss(self):
# outputs = self.prediction
# loss = []
# for i in range(len(outputs.get_shape().as_list())):
# weights = tf.matmul(outputs[i], label[i])
# def if_up():
# return weights[0]
# def if_down():
# return weights[1]
# result = tf.cond(pred, if_true, if_false)
# if (outputs[i][0] > outputs[i][1]):
# if (label[i][0] > 0):
# loss.append(outputs[i][1] * label[i][0])
# else:
# loss.append(outputs[i][0] * label[i][0])
# else:
# if (label[i][0] < 0):
# loss.append(outputs[i][0] * label[i][0])
# else:
# loss.append(outputs[i][1] * label[i][0])
# loss = tf.cast(loss, tf.float32)
# loss = tf.abs(loss)
# return loss
<block_end><def_stmt>main # Import data
<block_start>db=load_stock_data("data/aapl/")<line_sep># Construct graph
image=tf.placeholder(tf.float32 [<none> 128 5])<line_sep>label=tf.placeholder(tf.float32 [<none> 2])<line_sep>dropout=tf.placeholder(tf.float32)<line_sep>model=Model(image label dropout=dropout)<line_sep># Saver
saver=tf.train.Saver()<line_sep># Session
config=tf.ConfigProto()<line_sep>config.gpu_options.allow_growth=<true><with_stmt>tf.Session(config=config)<as>sess<block_start>sess.run(tf.global_variables_initializer())<for_stmt>i range(500000)<block_start>images,labels=db.train.next_batch(10)<if_stmt>i%100<eq>0<block_start>images_eval,labels_eval=db.test.next_batch(1000)<line_sep>accuracy=sess.run(model.accuracy {image:images_eval label:labels_eval dropout:1.0})<line_sep>print('step %d, accuracy %g'%(i accuracy))<block_end>sess.run(model.optimize {image:images label:labels dropout:0.5})<if_stmt>i%10000<eq>0<block_start>save_path='checkpoints/'<line_sep>model_name='stocks_model.ckpt'<if_stmt><not>os.path.exists(save_path)<block_start>os.makedirs(save_path)<block_end>save_path_full=os.path.join(save_path model_name)<line_sep>saver.save(sess save_path_full global_step=i+1)<block_end><block_end>images_eval,labels_eval=db.test.next_batch(1000)<line_sep>accuracy=sess.run(model.accuracy {image:images_eval label:labels_eval dropout:1.0})<line_sep>print('final accuracy on testing set: %g'%(accuracy))<block_end>print("finished")<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
|
<import_from_stmt>tests.integration.feature_repos.integration_test_repo_config IntegrationTestRepoConfig <import_from_stmt>tests.integration.feature_repos.universal.online_store.hbase HbaseOnlineStoreCreator <line_sep>FULL_REPO_CONFIGS=[IntegrationTestRepoConfig(online_store_creator=HbaseOnlineStoreCreator) ]<line_sep>
|
<import_from_stmt>torch optim<import_from_stmt>..models.model Model<import_from_stmt>..utils tolist<import_from_stmt>..losses ELBO<class_stmt>VI(Model)<block_start>"""
Variational Inference (Amortized inference)
The ELBO for given distributions (p, approximate_dist) is set as the loss class of this model.
"""<def_stmt>__init__ self p approximate_dist other_distributions=[] optimizer=optim.Adam optimizer_params={} clip_grad_norm=<none> clip_grad_value=<none><block_start>"""
Parameters
----------
p : torch.distributions.Distribution
Generative model (distribution).
approximate_dist : torch.distributions.Distribution
Approximate posterior distribution.
optimizer : torch.optim
Optimization algorithm.
optimizer_params : dict
Parameters of optimizer
clip_grad_norm : float or int
Maximum allowed norm of the gradients.
clip_grad_value : float or int
Maximum allowed value of the gradients.
"""<line_sep># set distributions (for training)
distributions=[p approximate_dist]+tolist(other_distributions)<line_sep># set losses
elbo=ELBO(p approximate_dist)<line_sep>loss=-elbo.mean()<line_sep>super().__init__(loss test_loss=loss distributions=distributions optimizer=optimizer optimizer_params=optimizer_params clip_grad_norm=clip_grad_norm clip_grad_value=clip_grad_value)<block_end><def_stmt>train self train_x_dict={} **kwargs<block_start><return>super().train(train_x_dict **kwargs)<block_end><def_stmt>test self test_x_dict={} **kwargs<block_start><return>super().test(test_x_dict **kwargs)<block_end><block_end>
|
<import_from_stmt>distutils.core setup<line_sep>setup(name='alphabet-detector' packages=['alphabet_detector'] version='0.0.7' description='A library to detect what alphabet something is written in.' author='<NAME>' author_email='<EMAIL>' url='https://github.com/EliFinkelshteyn/alphabet-detector' download_url='https://github.com/EliFinkelshteyn/'<concat>'alphabet-detector/tarball/0.0.7' keywords=['alphabet' 'charset' 'detect' 'islatin'] classifiers=['Development Status :: 5 - Production/Stable' 'Intended Audience :: Developers' 'Natural Language :: English' 'License :: OSI Approved :: MIT License' 'Operating System :: OS Independent' 'Programming Language :: Python' 'Programming Language :: Python :: 2' 'Programming Language :: Python :: 2.6' 'Programming Language :: Python :: 2.7' 'Programming Language :: Python :: 3' 'Programming Language :: Python :: 3.3' 'Topic :: Software Development :: Libraries :: Python Modules' ] )<line_sep>
|
# SPDX-License-Identifier: MIT
<import_stmt>os<import_stmt>ida_bytes<import_stmt>idaapi<import_stmt>idc<line_sep># definitions from PE file structure
IMAGE_FILE_MACHINE_IA64=0x8664<line_sep>IMAGE_FILE_MACHINE_I386=0x014C<line_sep>PE_OFFSET=0x3C<line_sep>IMAGE_SUBSYSTEM_EFI_APPLICATION=0xA<line_sep>IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER=0xB<line_sep>IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER=0xC<class_stmt>Table<block_start>"""build table from array"""<def_stmt>__init__ self table_data<block_start>self.table_data=table_data<line_sep>self.max_sizes=self._get_max_sizes()<line_sep>self.angle="+"<line_sep>self.gl="-"<line_sep>self.vl="|"<block_end><def_stmt>_get_max_sizes self<block_start>num=len(self.table_data[0])<line_sep>sizes=[0<for>_ range(num)]<for_stmt>i range(len(self.table_data[0]))<block_start><for_stmt>j range(len(self.table_data))<block_start><if_stmt>len(self.table_data[j][i])<g>sizes[i]<block_start>sizes[i]=len(self.table_data[j][i])<block_end><block_end><block_end><return>sizes<block_end>@classmethod<def_stmt>display cls table_data<block_start>cls=Table(table_data)<line_sep>table=cls.angle+f"{cls.angle}".join([((cls.gl<times>(size+2)))<for>size cls.max_sizes])<line_sep>table<augadd>f"{cls.angle}\n{cls.vl} "<line_sep>table<augadd>f"{cls.vl} ".join([f"{cls.table_data[0][i]}{' '<times>(cls.max_sizes[i]-len(cls.table_data[0][i])+1)}"<for>i range(len(cls.table_data[0]))])<line_sep>table<augadd>f"{cls.vl}\n{cls.angle}"<line_sep>table<augadd>f"{cls.angle}".join([((cls.gl<times>(size+2)))<for>size cls.max_sizes])<line_sep>table<augadd>f"{cls.angle}\n"<for_stmt>j range(1 len(cls.table_data))<block_start>table<augadd>f"{cls.vl} "<line_sep>table<augadd>f"{cls.vl} ".join([f"{cls.table_data[j][i]}{' '<times>(cls.max_sizes[i]-len(cls.table_data[j][i])+1)}"<for>i range(len(cls.table_data[j]))])<line_sep>table<augadd>f"{cls.vl}\n"<block_end>table<augadd>cls.angle<line_sep>table<augadd>f"{cls.angle}".join([((cls.gl<times>(size+2)))<for>size cls.max_sizes])<line_sep>table<augadd>f"{cls.angle}"<line_sep><return>table<block_end><block_end><def_stmt>set_hexrays_comment address text<block_start>"""set comment in decompiled code"""<line_sep>cfunc=idaapi.decompile(address)<line_sep>tl=idaapi.treeloc_t()<line_sep>tl.ea=address<line_sep>tl.itp=idaapi.ITP_SEMI<line_sep>cfunc.set_user_cmt(tl text)<line_sep>cfunc.save_user_cmts()<block_end><def_stmt>check_guid address<block_start>"""correctness is determined based on the number of unique bytes"""<line_sep><return>len(set(ida_bytes.get_bytes(address 16)))<g>8<block_end><def_stmt>get_guid address<block_start>"""get GUID located by address"""<line_sep>guid=list()<line_sep>guid.append(idc.get_wide_dword(address))<line_sep>guid.append(idc.get_wide_word(address+4))<line_sep>guid.append(idc.get_wide_word(address+6))<for_stmt>addr range(address+8 address+16 1)<block_start>guid.append(idc.get_wide_byte(addr))<block_end><return>guid<block_end><def_stmt>get_guid_str guid_struct<block_start>guid=f"{guid_struct[0]:08X}-"<line_sep>guid<augadd>f"{guid_struct[1]:04X}-"<line_sep>guid<augadd>f"{guid_struct[2]:04X}-"<line_sep>guid<augadd>"".join([f"{guid_struct[i]:02X}"<for>i range(3 11)])<line_sep><return>guid<block_end><def_stmt>get_num_le bytearr<block_start>"""translate a set of bytes into a number in the little endian format"""<line_sep>num_le=0<for_stmt>i range(len(bytearr))<block_start>num_le<augadd>bytearr[i]<times>pow(256 i)<block_end><return>num_le<block_end><def_stmt>rev_endian num<block_start>"""reorders bytes in number"""<line_sep>num_str=f"{num:x}"<line_sep># yapf: disable
num_ba=([int('0x'+num_str[i:i+2] 16)<for>i range(0 len(num_str)-1 2)])<line_sep># yapf: enable
<return>get_num_le(num_ba)<block_end><def_stmt>get_machine_type header<block_start>"""get the architecture of the investigated file"""<if_stmt>len(header)<l>PE_OFFSET+1<block_start><return>"unknown"<block_end>PE_POINTER=header[PE_OFFSET]<line_sep>FH_POINTER=PE_POINTER+4<if_stmt>len(header)<l>FH_POINTER+3<block_start><return>"unknown"<block_end>machine_type=header[FH_POINTER:FH_POINTER+2:]<line_sep>type_value=get_num_le(machine_type)<if_stmt>type_value<eq>IMAGE_FILE_MACHINE_I386<block_start><return>"x86"<block_end><if_stmt>type_value<eq>IMAGE_FILE_MACHINE_IA64<block_start><return>"x64"<block_end><return>"unknown"<block_end><def_stmt>check_subsystem header<block_start>"""get the subsystem of the investigated file"""<if_stmt>len(header)<l>PE_OFFSET+1<block_start><return><false><block_end>PE_POINTER=header[PE_OFFSET]<if_stmt>len(header)<l>PE_POINTER+0x5D<block_start><return><false><block_end>subsystem=header[PE_POINTER+0x5C]<line_sep><return>(subsystem<eq>IMAGE_SUBSYSTEM_EFI_APPLICATION<or>subsystem<eq>IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER<or>subsystem<eq>IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER)<block_end><def_stmt>get_header_idb <block_start>"""get file header from idb"""<if_stmt>idc.get_segm_name(0)<eq>"HEADER"<block_start>header=bytearray([idc.get_wide_byte(ea)<for>ea range(0 idc.get_segm_end(0))])<line_sep><return>header<block_end><return>bytearray(b"")<block_end><def_stmt>get_header_file <block_start>"""get file header from analysing file"""<line_sep>buf=bytes()<if_stmt>os.path.isfile(idaapi.get_input_file_path())<block_start><with_stmt>open(idaapi.get_input_file_path() "rb")<as>f<block_start>buf=f.read(512)<block_end><block_end><return>bytearray(buf)<block_end><def_stmt>get_dep_json res_json<block_start>"""get json for dependency browser and dependency graph"""<line_sep>CLIENT_PROTOCOL_SERVICES=("LocateProtocol" "OpenProtocol")<line_sep>dep_json=list()<for_stmt>module_info res_json<block_start><for_stmt>protocol module_info["protocols"]<block_start><if_stmt>(protocol["service"]<eq>"InstallProtocolInterface"<or>protocol["service"]<eq>"InstallMultipleProtocolInterfaces")<block_start>dep_json_item={"module_name":module_info["module_name"] "protocol_name":protocol["protocol_name"] "guid":protocol["guid"] "service":protocol["service"] }<line_sep>dep_json_item["used_by"]=list()<for_stmt>module_info res_json<block_start><for_stmt>protocol module_info["protocols"]<block_start><if_stmt>(protocol["service"]<in>CLIENT_PROTOCOL_SERVICES<and>protocol["guid"]<eq>dep_json_item["guid"])<block_start>dep_json_item["used_by"].append(module_info["module_name"])<block_end><block_end><block_end>dep_json.append(dep_json_item)<block_end><block_end><block_end><return>dep_json<block_end>
|
<import_from_stmt>pycocotools.coco COCO<import_stmt>os<import_stmt>sys<import_stmt>random<import_stmt>numpy<as>np<import_stmt>skimage.io<as>io<import_stmt>scipy<import_stmt>tensorflow<as>tf<import_from_stmt>dataset_utils int64_feature float_feature bytes_feature<line_sep># TFRecords convertion parameters.
SAMPLES_PER_FILES=5000<class_stmt>CoCoDataset(object)<block_start><def_stmt>__init__ self dataset_dir image_set='val2017'<block_start>super(CoCoDataset self).__init__()<line_sep>self._image_set=image_set<line_sep>self._ann_file=self.get_ann_file(dataset_dir self._image_set)<line_sep>self._filename_pattern=self.get_image_file_pattern(dataset_dir self._image_set)+'{}'<line_sep>self._coco=COCO(self._ann_file)<line_sep>self._cats=self._coco.loadCats(self._coco.getCatIds())<line_sep>self._classes=tuple(['none']+[c['name']<for>c self._cats])<line_sep>self._num_classes=len(self._classes)<line_sep>self._class_to_ind=dict(zip(self._classes list(range(self._num_classes))))<line_sep>self._ind_to_class=dict(zip(list(range(self._num_classes)) self._classes))<line_sep>self._super_classes=tuple(['background']+[c['supercategory']<for>c self._cats])<line_sep>self._class_to_coco_cat_id=dict(zip([c['name']<for>c self._cats] self._coco.getCatIds()))<line_sep>self._labels={'none':(0 'background') }<for_stmt>ind,cls enumerate(self._classes[1:])<block_start>self._labels[cls]=(self._class_to_ind[cls] self._super_classes[ind+1])<block_end>self._image_index=self._coco.getImgIds()<line_sep>self._num_examples=len(self._image_index)<block_end><def_stmt>get_ann_file self dataset_dir data_type<block_start><return>'{}/annotations/instances_{}.json'.format(dataset_dir data_type)<block_end><def_stmt>get_image_file_pattern self dataset_dir data_type<block_start><return>'{}/{}/'.format(dataset_dir data_type)<block_end><def_stmt>validate_boxes self boxes width=0 height=0<block_start>"""Check that a set of boxes are valid."""<line_sep>x1=boxes[: 0]<line_sep>y1=boxes[: 1]<line_sep>x2=boxes[: 2]<line_sep>y2=boxes[: 3]<assert_stmt>(x1<ge>0).all()<assert_stmt>(y1<ge>0).all()<assert_stmt>(x2<ge>x1).all()<assert_stmt>(y2<ge>y1).all()<assert_stmt>(x2<l>width).all()<assert_stmt>(y2<l>height).all()<block_end><def_stmt>_load_coco_annotation self index<block_start>"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""<line_sep>im_ann=self._coco.loadImgs(index)[0]<line_sep>filaname=im_ann['file_name']<line_sep>width=im_ann['width']<line_sep>height=im_ann['height']<line_sep>annIds=self._coco.getAnnIds(imgIds=index iscrowd=<none>)<line_sep>objs=self._coco.loadAnns(annIds)<line_sep># Sanitize bboxes -- some are invalid
valid_objs=[]<for_stmt>obj objs<block_start>x1=np.max((0 obj['bbox'][0]))<line_sep>y1=np.max((0 obj['bbox'][1]))<line_sep>x2=np.min((width-1 x1+np.max((0 obj['bbox'][2]-1))))<line_sep>y2=np.min((height-1 y1+np.max((0 obj['bbox'][3]-1))))<if_stmt>obj['iscrowd']<and>(self._image_set<eq>'train2017'<or>self._image_set<eq>'val2017')<block_start><continue><block_end><if_stmt>obj['area']<g>0<and>x2<ge>x1<and>y2<ge>y1#obj['clean_bbox'] = [x1, y1, x2, y2]
<block_start>obj['clean_bbox']=[y1/height x1/width y2/height x2/width]<line_sep>valid_objs.append(obj)<block_end><block_end>objs=valid_objs<line_sep>num_objs=len(objs)<line_sep>has_boxes=1<if_stmt>num_objs<eq>0<block_start>has_boxes=0<block_end>boxes=np.zeros((num_objs 4) dtype=np.float32)<line_sep>gt_classes=np.zeros((num_objs) dtype=np.int32)<line_sep>gt_iscrowd=np.zeros((num_objs) dtype=np.int32)<line_sep>seg_areas=np.zeros((num_objs) dtype=np.float32)<line_sep># Lookup table to map from COCO category ids to our internal class
# indices
coco_cat_id_to_class_ind=dict([(self._class_to_coco_cat_id[cls] self._class_to_ind[cls])<for>cls self._classes[1:]])<for_stmt>ix,obj enumerate(objs)<block_start>cls=coco_cat_id_to_class_ind[obj['category_id']]<line_sep>boxes[ix :]=obj['clean_bbox']<line_sep>gt_classes[ix]=cls<line_sep>seg_areas[ix]=obj['area']<if_stmt>obj['iscrowd']<block_start>gt_iscrowd[ix]=1<block_end><block_end>self.validate_boxes(boxes width=width height=height)<line_sep><return>{'filaname':filaname 'boxes':boxes 'shape':(height width) 'gt_classes':gt_classes 'gt_iscrowd':gt_iscrowd 'has_boxes':has_boxes}<block_end><def_stmt>_get_statistic self<block_start>class_name_list=['none' 'total']<line_sep>class_name_list.extend([_<for>_ self._classes[1:]])<line_sep>stat_by_obj=dict(zip(class_name_list [0]<times>len(class_name_list)))<line_sep>stat_by_image=dict(zip(class_name_list [0]<times>len(class_name_list)))<for_stmt>index self._image_index<block_start>im_ann=self._coco.loadImgs(index)[0]<line_sep>width=im_ann['width']<line_sep>height=im_ann['height']<line_sep>annIds=self._coco.getAnnIds(imgIds=index iscrowd=<none>)<line_sep>objs=self._coco.loadAnns(annIds)<line_sep># Sanitize bboxes -- some are invalid
valid_objs=[]<for_stmt>obj objs<block_start>x1=np.max((0 obj['bbox'][0]))<line_sep>y1=np.max((0 obj['bbox'][1]))<line_sep>x2=np.min((width-1 x1+np.max((0 obj['bbox'][2]-1))))<line_sep>y2=np.min((height-1 y1+np.max((0 obj['bbox'][3]-1))))<if_stmt>obj['iscrowd']<and>(self._image_set<eq>'train'<or>self._image_set<eq>'trainval')<block_start><continue><block_end><if_stmt>obj['area']<g>0<and>x2<ge>x1<and>y2<ge>y1<block_start>valid_objs.append(obj)<block_end><block_end>objs=valid_objs<line_sep>num_objs=len(objs)<line_sep>coco_cat_id_to_name=dict(zip(self._coco.getCatIds() [c['name']<for>c self._cats]))<line_sep>cls_in_image_list={}<for_stmt>ix,obj enumerate(objs)<block_start>cls=coco_cat_id_to_name[obj['category_id']]<line_sep>stat_by_obj[cls]<augadd>1<line_sep>stat_by_obj['total']<augadd>1<line_sep>cls_in_image_list[cls]=0<block_end><for_stmt>key cls_in_image_list.keys()<block_start>stat_by_image[cls]<augadd>1<block_end>stat_by_image['total']<augadd>1<block_end>statistics=dict(zip(class_name_list [(stat_by_image[cls_name] stat_by_obj[cls_name])<for>cls_name class_name_list]))<line_sep><return>statistics<block_end><block_end># d = CoCoDataset(dataDir, dataType)
# STS = d._get_statistic()
# for k, v in STS.items():
# print('"%s": '%k, v, ',')
# print('ok')
# for k, v in d._labels.items():
# print('"%s": '%k, v, ',')
#print(len(d._image_index))
#print([d._load_coco_annotation(index) for index in d._image_index])
# {'filaname' : filaname,
# 'boxes' : boxes,
# 'shape' : (height, width),
# 'gt_classes': gt_classes,
# 'gt_iscrowd' : gt_iscrowd,
# 'has_boxes': has_boxes}
# boxes = np.zeros((num_objs, 4), dtype=np.float32)
# gt_classes = np.zeros((num_objs), dtype=np.int32)
# gt_iscrowd = np.zeros((num_objs), dtype=np.int32)
# seg_areas = np.zeros((num_objs), dtype=np.float32)
<def_stmt>_process_image filename_pattern ann_dict<block_start>"""Process a image and annotation file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""<line_sep># Read the image file.
filename=filename_pattern.format(ann_dict['filaname'])<line_sep>image_data=tf.gfile.FastGFile(filename 'rb').read()<line_sep># Find annotations.
bboxes=[]<line_sep>labels=[]<line_sep>iscrowd=[]<for_stmt>index range(ann_dict['boxes'].shape[0])<block_start>labels.append(int(ann_dict['gt_classes'][index]))<line_sep>iscrowd.append(int(ann_dict['gt_iscrowd'][index]))<line_sep>bboxes.append((ann_dict['boxes'][index 0] ann_dict['boxes'][index 1] ann_dict['boxes'][index 2] ann_dict['boxes'][index 3]))<block_end><return>image_data ann_dict['shape'] bboxes labels iscrowd<block_end><def_stmt>_convert_to_example image_data labels bboxes shape iscrowd<block_start>"""Build an Example proto for an image example.
Args:
image_data: string, JPEG encoding of RGB image;
labels: list of integers, identifier for the ground truth;
bboxes: list of bounding boxes; each box is a list of integers;
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong
to the same label as the image label.
shape: 3 integers, image shapes in pixels.
Returns:
Example proto
"""<line_sep>xmin=[]<line_sep>ymin=[]<line_sep>xmax=[]<line_sep>ymax=[]<for_stmt>b bboxes<block_start><assert_stmt>len(b)<eq>4<line_sep># pylint: disable=expression-not-assigned
# [(ymin_0, xmin_0, ymax_0, xmax_0), (ymin_1, xmin_1, ymax_1, xmax_1), ....]
# |
# [ymin_0, ymin_1, ...], [xmin_0, xmin_1, ...], [ymax_0, ymax_1, ...], [xmax_0, xmax_1, ...]
[l.append(point)<for>l,point zip([ymin xmin ymax xmax] b)]<line_sep># pylint: enable=expression-not-assigned
<block_end>image_format=b'JPEG'<line_sep>example=tf.train.Example(features=tf.train.Features(feature={'image/height':int64_feature(shape[0]) 'image/width':int64_feature(shape[1]) 'image/channels':int64_feature(3) 'image/shape':int64_feature([shape[0] shape[1] 3]) 'image/object/bbox/xmin':float_feature(xmin) 'image/object/bbox/xmax':float_feature(xmax) 'image/object/bbox/ymin':float_feature(ymin) 'image/object/bbox/ymax':float_feature(ymax) 'image/object/bbox/label':int64_feature(labels) 'image/object/bbox/iscrowd':int64_feature(iscrowd) 'image/format':bytes_feature(image_format) 'image/encoded':bytes_feature(image_data)}))<line_sep><return>example<block_end><def_stmt>_add_to_tfrecord filename_pattern ann_dict tfrecord_writer<block_start>image_data,shape,bboxes,labels,iscrowd=_process_image(filename_pattern ann_dict)<line_sep>example=_convert_to_example(image_data labels bboxes shape iscrowd)<line_sep>tfrecord_writer.write(example.SerializeToString())<block_end><def_stmt>_get_output_filename output_dir name idx<block_start><return>os.path.join(output_dir '%s_%03d.tfrecord'%(name idx))<block_end><def_stmt>run dataset_dir output_dir output_name name='train2017'<block_start>coco_dataset=CoCoDataset(dataset_dir name)<line_sep>num_examples=coco_dataset._num_examples<line_sep># Process dataset files.
i=0<line_sep>fidx=0<while_stmt><true># Open new TFRecord file.
<block_start>tf_filename=_get_output_filename(output_dir output_name fidx)<with_stmt>tf.python_io.TFRecordWriter(tf_filename)<as>tfrecord_writer<block_start>j=0<while_stmt>i<l>num_examples<and>j<l>SAMPLES_PER_FILES<block_start>sys.stdout.write('\r>> Converting image %d/%d'%(i+1 num_examples))<line_sep>sys.stdout.flush()<line_sep>ann_dict=coco_dataset._load_coco_annotation(coco_dataset._image_index[i])<line_sep>_add_to_tfrecord(coco_dataset._filename_pattern ann_dict tfrecord_writer)<line_sep>i<augadd>1<line_sep>j<augadd>1<block_end>fidx<augadd>1<block_end><if_stmt><not>i<l>num_examples<block_start><break><block_end><block_end>print('\nFinished converting the CoCo dataset!')<block_end><if_stmt>__name__<eq>'__main__'<block_start>split_name='train2017'# 'train2017' or 'val2017'
output_name='coco_{}'.format(split_name)<line_sep>dataset_dir='/media/rs/7A0EE8880EE83EAF/Detections/CoCo'<line_sep>output_dir='../CoCo/tfrecords/{}/'.format(split_name)<line_sep>run(dataset_dir output_dir output_name split_name)<line_sep>split_name='val2017'# 'train2017' or 'val2017'
output_name='coco_{}'.format(split_name)<line_sep>dataset_dir='/media/rs/7A0EE8880EE83EAF/Detections/CoCo'<line_sep>output_dir='../CoCo/tfrecords/{}/'.format(split_name)<line_sep>run(dataset_dir output_dir output_name split_name)<block_end>
|
<import_from_stmt>commons variables<def_stmt>validate prediction_y_list actual_y_list<block_start>right_num_dict={}<line_sep>prediction_num_dict={}<line_sep>actual_num_dict={}<for_stmt>(p_y a_y) zip(prediction_y_list actual_y_list)<block_start><if_stmt><not>prediction_num_dict.has_key(p_y)<block_start>prediction_num_dict[p_y]=0<block_end>prediction_num_dict[p_y]<augadd>1<if_stmt><not>actual_num_dict.has_key(a_y)<block_start>actual_num_dict[a_y]=0<block_end>actual_num_dict[a_y]<augadd>1<if_stmt>p_y<eq>a_y<block_start><if_stmt><not>right_num_dict.has_key(p_y)<block_start>right_num_dict[p_y]=0<block_end>right_num_dict[p_y]<augadd>1<block_end><block_end><return>right_num_dict prediction_num_dict actual_num_dict<block_end>
|
<import_from_stmt>seleniumbase.translate chinese# noqa
<import_from_stmt>seleniumbase.translate dutch# noqa
<import_from_stmt>seleniumbase.translate french# noqa
<import_from_stmt>seleniumbase.translate italian# noqa
<import_from_stmt>seleniumbase.translate japanese# noqa
<import_from_stmt>seleniumbase.translate korean# noqa
<import_from_stmt>seleniumbase.translate portuguese# noqa
<import_from_stmt>seleniumbase.translate russian# noqa
<import_from_stmt>seleniumbase.translate spanish# noqa
|
<import_from_stmt>.loco_interpreter LocoInterpreter<import_from_stmt>.get_memory_handler LocoGetMemoryHandler<import_from_stmt>.put_memory_handler PutMemoryHandler<line_sep>__all__=[LocoGetMemoryHandler PutMemoryHandler LocoInterpreter]<line_sep>
|
"""
mailthon.response
~~~~~~~~~~~~~~~~~
Response objects encapsulate responses returned
by SMTP servers.
:copyright: (c) 2015 by <NAME>
:license: MIT, see LICENSE for details.
"""<import_from_stmt>collections namedtuple<line_sep>_ResponseBase=namedtuple('Response' ['status_code' 'reason'])<class_stmt>Response(_ResponseBase)<block_start>"""
Encapsulates a (status_code, message) tuple
returned by a server when the ``NOOP``
command is called.
:param status_code: status code returned by server.
:param message: error/success message.
"""<line_sep>@property<def_stmt>ok self<block_start>"""
Returns true if the status code is 250, false
otherwise.
"""<line_sep><return>self.status_code<eq>250<block_end><block_end><class_stmt>SendmailResponse<block_start>"""
Encapsulates a (status_code, reason) tuple
as well as a mapping of email-address to
(status_code, reason) tuples that can be
attained by the NOOP and the SENDMAIL
command.
:param pair: The response pair.
:param rejected: Dictionary of rejected
addresses to status-code reason pairs.
"""<def_stmt>__init__ self status_code reason rejected<block_start>self.res=Response(status_code reason)<line_sep>self.rejected={}<for_stmt>addr,pair rejected.items()<block_start>self.rejected[addr]=Response(*pair)<block_end><block_end>@property<def_stmt>ok self<block_start>"""
Returns True only if no addresses were
rejected and if the status code is 250.
"""<line_sep><return>self.res.ok<and><not>self.rejected<block_end><block_end>
|
# terrascript/provider/clc.py
<import_stmt>terrascript<class_stmt>clc(terrascript.Provider)<block_start><pass><block_end>__all__=["clc"]<line_sep>
|
<import_from_stmt>datetime datetime timedelta<import_from_stmt>typing Any Callable Dict Iterable Union cast<import_from_stmt>iceberg.api types<as>IcebergTypes<import_from_stmt>iceberg.api.data_file DataFile<import_from_stmt>iceberg.api.manifest_file ManifestFile<import_from_stmt>iceberg.api.schema Schema<import_from_stmt>iceberg.api.snapshot Snapshot<import_from_stmt>iceberg.api.table Table<import_from_stmt>iceberg.api.types Conversions NestedField Type TypeID<import_from_stmt>iceberg.core.base_table BaseTable<import_from_stmt>iceberg.core.filesystem FileSystemInputFile<import_from_stmt>iceberg.core.manifest_reader ManifestReader<import_from_stmt>iceberg.exceptions.exceptions FileSystemNotFound<import_from_stmt>datahub.emitter.mce_builder get_sys_time<import_from_stmt>datahub.emitter.mcp MetadataChangeProposalWrapper<import_from_stmt>datahub.ingestion.api.workunit MetadataWorkUnit<import_from_stmt>datahub.ingestion.source.iceberg.iceberg_common IcebergProfilingConfig IcebergSourceReport <import_from_stmt>datahub.metadata.schema_classes ChangeTypeClass DatasetFieldProfileClass DatasetProfileClass <class_stmt>IcebergProfiler<block_start><def_stmt>__init__ self report:IcebergSourceReport config:IcebergProfilingConfig <arrow><none><block_start>self.report:IcebergSourceReport=report<line_sep>self.config:IcebergProfilingConfig=config<line_sep>self.platform:str="iceberg"<block_end><def_stmt>_aggregate_counts self aggregated_count:Dict[int int] manifest_counts:Dict[int int] <arrow>Dict[int int]<block_start><return>{k:aggregated_count.get(k 0)+manifest_counts.get(k 0)<for>k set(aggregated_count)|set(manifest_counts)}<block_end><def_stmt>_aggregate_bounds self schema:Schema aggregator:Callable aggregated_values:Dict[int Any] manifest_values:Dict[int Any] <arrow><none><block_start><for_stmt>field_id,value_encoded manifest_values.items()# type: int, Any
<block_start>field:NestedField=schema.find_field(field_id)<line_sep># Bounds in manifests can reference historical field IDs that are not part of the current schema.
# We simply not profile those since we only care about the current snapshot.
<if_stmt>field<and>IcebergProfiler._is_numeric_type(field.type)<block_start>value_decoded=Conversions.from_byte_buffer(field.type value_encoded)<if_stmt>value_decoded<block_start>agg_value=aggregated_values.get(field_id)<line_sep>aggregated_values[field_id]=(aggregator(agg_value value_decoded)<if>agg_value<else>value_decoded)<block_end><block_end><block_end><block_end><def_stmt>profile_table self dataset_name:str dataset_urn:str table:Table <arrow>Iterable[MetadataWorkUnit]<block_start>"""This method will profile the supplied Iceberg table by looking at the table's manifest.
The overall profile of the table is aggregated from the individual manifest files.
We can extract the following from those manifests:
- "field minimum values"
- "field maximum values"
- "field null occurences"
"field distinct value occurences" cannot be computed since the 'value_counts' only apply for
a manifest, making those values innacurate. For example, if manifest A has 2 unique values
and manifest B has 1, it is possible that the value in B is also in A, hence making the total
number of unique values 2 and not 3.
Args:
dataset_name (str): dataset name of the table to profile, mainly used in error reporting
dataset_urn (str): dataset urn of the table to profile
table (Table): Iceberg table to profile.
Raises:
Exception: Occurs when a table manifest cannot be loaded.
Yields:
Iterator[Iterable[MetadataWorkUnit]]: Workunits related to datasetProfile.
"""<if_stmt><not>table.snapshots()<or><not>isinstance(table BaseTable)# Table has no data, cannot profile, or we can't get current_snapshot.
<block_start><return><block_end>row_count:int=int(table.current_snapshot().summary["total-records"])<line_sep>column_count:int=len(table.schema()._id_to_name)<line_sep>dataset_profile=DatasetProfileClass(timestampMillis=get_sys_time() rowCount=row_count columnCount=column_count )<line_sep>dataset_profile.fieldProfiles=[]<line_sep>field_paths:Dict[int str]=table.schema()._id_to_name<line_sep>current_snapshot:Snapshot=table.current_snapshot()<line_sep>total_count:int=0<line_sep>null_counts:Dict[int int]={}<line_sep>min_bounds:Dict[int Any]={}<line_sep>max_bounds:Dict[int Any]={}<line_sep>manifest:ManifestFile<try_stmt><block_start><for_stmt>manifest current_snapshot.manifests<block_start>manifest_input_file=FileSystemInputFile.from_location(manifest.manifest_path table.ops.conf)<line_sep>manifest_reader=ManifestReader.read(manifest_input_file)<line_sep>data_file:DataFile<for_stmt>data_file manifest_reader.iterator()<block_start><if_stmt>self.config.include_field_null_count<block_start>null_counts=self._aggregate_counts(null_counts data_file.null_value_counts())<block_end><if_stmt>self.config.include_field_min_value<block_start>self._aggregate_bounds(table.schema() min min_bounds data_file.lower_bounds() )<block_end><if_stmt>self.config.include_field_max_value<block_start>self._aggregate_bounds(table.schema() max max_bounds data_file.upper_bounds() )<block_end>total_count<augadd>data_file.record_count()<block_end><block_end><block_end># TODO Work on error handling to provide better feedback. Iceberg exceptions are weak...
<except_stmt>FileSystemNotFound<as>e<block_start><raise>Exception("Error loading table manifests")<from>e<block_end><if_stmt>row_count# Iterating through fieldPaths introduces unwanted stats for list element fields...
<block_start><for_stmt>field_id,field_path field_paths.items()<block_start>field:NestedField=table.schema().find_field(field_id)<line_sep>column_profile=DatasetFieldProfileClass(fieldPath=field_path)<if_stmt>self.config.include_field_null_count<block_start>column_profile.nullCount=cast(int null_counts.get(field_id 0))<line_sep>column_profile.nullProportion=float(column_profile.nullCount/row_count)<block_end><if_stmt>self.config.include_field_min_value<block_start>column_profile.min=(self._renderValue(dataset_name field.type min_bounds.get(field_id))<if>field_id<in>min_bounds<else><none>)<block_end><if_stmt>self.config.include_field_max_value<block_start>column_profile.max=(self._renderValue(dataset_name field.type max_bounds.get(field_id))<if>field_id<in>max_bounds<else><none>)<block_end>dataset_profile.fieldProfiles.append(column_profile)<block_end><block_end># https://github.com/linkedin/datahub/blob/599edd22aeb6b17c71e863587f606c73b87e3b58/metadata-ingestion/src/datahub/ingestion/source/sql/sql_common.py#L829
mcp=MetadataChangeProposalWrapper(entityType="dataset" entityUrn=dataset_urn changeType=ChangeTypeClass.UPSERT aspectName="datasetProfile" aspect=dataset_profile )<line_sep>wu=MetadataWorkUnit(id=f"profile-{dataset_name}" mcp=mcp)<line_sep>self.report.report_workunit(wu)<line_sep>self.report.report_entity_profiled(dataset_name)<line_sep><yield>wu<block_end># The following will eventually be done by the Iceberg API (in the new Python refactored API).
<def_stmt>_renderValue self dataset_name:str value_type:Type value:Any<arrow>Union[str <none>]<block_start><try_stmt><block_start><if_stmt>value_type.type_id<eq>TypeID.TIMESTAMP<block_start><if_stmt>value_type.adjust_to_utc# TODO Deal with utc when required
<block_start>microsecond_unix_ts=value<block_end><else_stmt><block_start>microsecond_unix_ts=value<block_end><return>datetime.fromtimestamp(microsecond_unix_ts/1000000.0).strftime("%Y-%m-%d %H:%M:%S")<block_end><elif_stmt>value_type.type_id<eq>TypeID.DATE<block_start><return>(datetime(1970 1 1 0 0)+timedelta(value-1)).strftime("%Y-%m-%d")<block_end><return>str(value)<block_end><except_stmt>Exception<as>e<block_start>self.report.report_warning("profiling" f"Error in dataset {dataset_name} when profiling a {value_type} field with value {value}: {e}" )<line_sep><return><none><block_end><block_end>@staticmethod<def_stmt>_is_numeric_type type:Type<arrow>bool<block_start><return>isinstance(type (IcebergTypes.DateType IcebergTypes.DecimalType IcebergTypes.DoubleType IcebergTypes.FloatType IcebergTypes.IntegerType IcebergTypes.LongType IcebergTypes.TimestampType IcebergTypes.TimeType ) )<block_end><block_end>
|
<import_stmt>copy<as>copy_module<import_from_stmt>collections abc<class_stmt>Params(abc.MutableMapping)<block_start>"""
Read-only mapping to represent params passed in Task constructor. It
initializes with a copy of the passed dictionary. It verifies that the
dictionary does not have a key "upstream" nor "product" because they'd
clash with the ones added upon Task rendering
"""<def_stmt>__init__ self params=<none><block_start><if_stmt>params<is><none><block_start>self._dict={}<block_end><else_stmt><block_start><if_stmt><not>isinstance(params abc.Mapping)<block_start><raise>TypeError('Params must be initialized '<concat>f'with a mapping, got: {params!r} '<concat>f'({type(params).__name__!r})')<block_end><if_stmt>'upstream'<in>params<block_start><raise>ValueError('Task params cannot be initialized with an '<concat>'"upstream" key as it automatically added '<concat>'upon rendering')<block_end><if_stmt>'product'<in>params<block_start><raise>ValueError('Task params cannot be initialized with an '<concat>'"product" key as it automatically added '<concat>'upon rendering')<block_end>self._dict=copy_module.copy(params)<block_end><block_end>@classmethod<def_stmt>_from_dict cls params copy=<true><block_start>"""
Private API for initializing Params objects with arbitrary dictionary
"""<line_sep>obj=cls(params=<none>)<if_stmt>copy<block_start>obj._dict=copy_module.copy(params)<block_end><else_stmt><block_start>obj._dict=params<block_end><return>obj<block_end><def_stmt>_setitem self key value<block_start>"""Private method for updating the underlying data
"""<line_sep>self._dict[key]=value<block_end><def_stmt>to_dict self# NOTE: do we need this?
<block_start><return>copy_module.copy(self._dict)<block_end><def_stmt>to_json_serializable self params_only=<false><block_start>"""
Converts params into a dictionary
Parameters
----------
params_only : bool, default=False
If True, it only returns user params, excluding 'upstream' and
'product'
"""<line_sep>out=self.to_dict()<if_stmt>params_only<block_start>out.pop('product' <none>)<line_sep>out.pop('upstream' <none>)<block_end><elif_stmt>'upstream'<in>out<block_start>out['upstream']=out['upstream'].to_json_serializable()<block_end><return>out<block_end><def_stmt>__getitem__ self key<block_start><try_stmt><block_start><return>self._dict[key]<block_end><except_stmt>KeyError<block_start><raise>KeyError('Cannot obtain Task param named '<concat>'"{}", declared params are: {}'.format(key list(self._dict.keys())))<block_end><block_end><def_stmt>__setitem__ self key value<block_start><raise>RuntimeError('Task params are read-only, if you need a copy'<concat>' use Params.to_dict() (returns a shallow copy)'<concat>' of the underlying dictionary')<block_end><def_stmt>__iter__ self<block_start><for_stmt>name self._dict.keys()<block_start><yield>name<block_end><block_end><def_stmt>__len__ self<block_start><return>len(self._dict)<block_end><def_stmt>__str__ self<block_start><return>str(self._dict)<block_end><def_stmt>__repr__ self<block_start><return>'Params({})'.format(repr(self._dict))<block_end><def_stmt>get self key<block_start><return>self._dict.get(key)<block_end><def_stmt>__delitem__ self key<block_start><del_stmt>self._dict[key]<block_end><block_end>
|
# Time: O(4^n / n^(3/2)) ~= Catalan numbers
# Space: O(n)
# iterative solution
<class_stmt>Solution(object)<block_start><def_stmt>generateParenthesis self n<block_start>"""
:type n: int
:rtype: List[str]
"""<line_sep>result,curr=[] []<line_sep>stk=[(1 (n n))]<while_stmt>stk<block_start>step,args=stk.pop()<if_stmt>step<eq>1<block_start>left,right=args<if_stmt>left<eq>0<and>right<eq>0<block_start>result.append("".join(curr))<block_end><if_stmt>left<l>right<block_start>stk.append((3 tuple()))<line_sep>stk.append((1 (left right-1)))<line_sep>stk.append((2 (')')))<block_end><if_stmt>left<g>0<block_start>stk.append((3 tuple()))<line_sep>stk.append((1 (left-1 right)))<line_sep>stk.append((2 ('(')))<block_end><block_end><elif_stmt>step<eq>2<block_start>curr.append(args[0])<block_end><elif_stmt>step<eq>3<block_start>curr.pop()<block_end><block_end><return>result<block_end><block_end># Time: O(4^n / n^(3/2)) ~= Catalan numbers
# Space: O(n)
# recursive solution
<class_stmt>Solution2(object)<block_start><def_stmt>generateParenthesis self n<block_start>"""
:type n: int
:rtype: List[str]
"""<def_stmt>generateParenthesisRecu left right curr result<block_start><if_stmt>left<eq>0<and>right<eq>0<block_start>result.append("".join(curr))<block_end><if_stmt>left<g>0<block_start>curr.append('(')<line_sep>generateParenthesisRecu(left-1 right curr result)<line_sep>curr.pop()<block_end><if_stmt>left<l>right<block_start>curr.append(')')<line_sep>generateParenthesisRecu(left right-1 curr result)<line_sep>curr.pop()<block_end><block_end>result=[]<line_sep>generateParenthesisRecu(n n [] result)<line_sep><return>result<block_end><block_end>
|
# -*- coding: utf-8 -*-
<import_stmt>click<import_stmt>platform<import_stmt>tcfcli.common.base_infor<as>infor<import_from_stmt>tcfcli.help.message ConfigureHelp<as>help<import_from_stmt>tcfcli.common.user_config UserConfig<import_from_stmt>tcfcli.common.operation_msg Operation<line_sep>version=platform.python_version()<if_stmt>version<ge>'3'<block_start><import_from_stmt>functools reduce<block_end><def_stmt>report_info <block_start><pass><block_end>REGIONS=infor.REGIONS<line_sep>@click.command(short_help=help.GET_SHORT_HELP)@click.option('--secret-id' '-si' is_flag=<true> help=help.GET_SECRET_ID)@click.option('--secret-key' '-sk' is_flag=<true> help=help.GET_SECRET_KEY)@click.option('--region' '-r' is_flag=<true> help=help.GET_REGION)@click.option('--appid' '-a' is_flag=<true> help=help.GET_APPID)@click.option('--using-cos' '-uc' is_flag=<true> help=help.GET_USING_COS)@click.option('--python2-path' '-p2p' is_flag=<true> help=help.GET_PATHON_PATH)@click.option('--python3-path' '-p3p' is_flag=<true> help=help.GET_PATHON_PATH)@click.option('--no-color' '-nc' is_flag=<true> default=<false> help=help.NOCOLOR)<def_stmt>get **kwargs<block_start>'''
\b
Get your account parameters.
\b
Common usage:
\b
* Get the configured information
$ scf configure get
'''<line_sep>uc=UserConfig()<def_stmt>set_true k<block_start>kwargs[k]=<true><block_end>Operation(uc._get_curr_user_section()).process()<line_sep>bools=[v<for>k,v kwargs.items()]<if_stmt><not>reduce(<lambda>x y:bool(x<or>y) bools)<block_start>list(map(set_true kwargs))<block_end>attrs=uc.get_attrs(kwargs)<line_sep>#msg = "Config" #"{} config:".format(UserConfig.API)
# for section in uc.SECTION_LIST:
# for attr in attrs:
# if attr.replace("-", "_") in list(uc.section_map[section].keys()):
# attr_value = attrs[attr]
# if attr == "secret-id":
# attr_value = "*" * 32 + attr_value[32:]
# elif attr == "secret-key":
# attr_value = "*" * 28 + attr_value[28:]
# Operation("{} = {}".format(attr, attr_value), fg="cyan").process()
<for_stmt>section uc.SECTION_LIST<block_start><for_stmt>key sorted(list(uc.section_map[section].keys()))<block_start><if_stmt>key<in>list(attrs.keys())<block_start>attr_value=attrs[key]<if_stmt>key<eq>"secret_id"<block_start>attr_value="*"<times>32+attr_value[32:]<block_end><elif_stmt>key<eq>"secret_key"<block_start>attr_value="*"<times>28+attr_value[28:]<block_end>Operation("{} = {}".format(key.replace('_' '-') attr_value) fg="cyan").process()<block_end><block_end><block_end># for attr in sorted(attrs):
# attr_value = attrs[attr]
# if attr == "secret-id":
# attr_value = "*" * 32 + attr_value[32:]
# elif attr == "secret-key":
# attr_value = "*" * 28 + attr_value[28:]
# msg += Operation("\n[-] ", fg="cyan").style() + Operation("{} = {}".format(attr, attr_value), fg="cyan").style()
# Operation(msg.strip()).process()
<block_end>
|
<import_from_stmt>pip._vendor.certifi where<line_sep>print(where())<line_sep>
|
<import_stmt>re<import_stmt>sqlalchemy<as>sa<import_from_stmt>sqlalchemy_utils TSVectorType<import_from_stmt>sqlalchemy_searchable search<import_from_stmt>tests SchemaTestCase TestCase<class_stmt>WeightedBase(object)<block_start><def_stmt>create_models self<block_start><class_stmt>WeightedTextItem(self.Base)<block_start>__tablename__='textitem'<line_sep>id=sa.Column(sa.Integer primary_key=<true> autoincrement=<true>)<line_sep>name=sa.Column(sa.Unicode(255))<line_sep>content=sa.Column(sa.UnicodeText)<line_sep>search_vector=sa.Column(TSVectorType('name' 'content' weights={'name':'A' 'content':'B'}))<block_end>self.WeightedTextItem=WeightedTextItem<block_end><block_end><class_stmt>TestCreateWeightedSearchVector(WeightedBase SchemaTestCase)<block_start>should_create_indexes=[u'ix_textitem_search_vector']<line_sep>should_create_triggers=[u'textitem_search_vector_trigger']<def_stmt>test_search_function_weights self<block_start>func_name='textitem_search_vector_update'<line_sep>sql="""SELECT proname,prosrc FROM pg_proc
WHERE proname='{name}';"""<line_sep>name,src=self.session.execute(sql.format(name=func_name)).fetchone()<line_sep>pattern=(r"setweight\(to_tsvector\(.+?"<concat>r"coalesce\(NEW.(\w+).+?"<concat>r"\)\), '([A-D])'\)")<line_sep>first,second=(match.groups()<for>match re.finditer(pattern src))<assert_stmt>first<eq>('name' 'A')<assert_stmt>second<eq>('content' 'B')<block_end><block_end><class_stmt>TestWeightedSearchFunction(WeightedBase TestCase)<block_start><def_stmt>setup_method self method<block_start>TestCase.setup_method(self method)<line_sep>self.session.add(self.WeightedTextItem(name=u'Gort' content=u'Klaatu barada nikto'))<line_sep>self.session.add(self.WeightedTextItem(name=u'Klaatu' content=u'barada nikto'))<line_sep>self.session.commit()<block_end><def_stmt>test_weighted_search_results self<block_start>query=self.session.query(self.WeightedTextItem)<line_sep>first,second=search(query 'klaatu' sort=<true>).all()<assert_stmt>first.search_vector<eq>"'barada':2B 'klaatu':1A 'nikto':3B"<assert_stmt>(second.search_vector<eq>"'barada':3B 'gort':1A 'klaatu':2B 'nikto':4B")<block_end><block_end>
|
expected_output={"hsrp_common_process_state":"not running" "hsrp_ha_state":"capable" "hsrp_ipv4_process_state":"not running" "hsrp_ipv6_process_state":"not running" "hsrp_timer_wheel_state":"running" "mac_address_table":{166:{"group":10 "interface":"gi2/0/3" "mac_address":"0000.0cff.b311"} 169:{"group":5 "interface":"gi1/0/1" "mac_address":"0000.0cff.b30c"} 172:{"group":0 "interface":"gi2/0/3" "mac_address":"0000.0cff.b307"} 173:{"group":1 "interface":"gi2/0/3" "mac_address":"0000.0cff.b308"} } "msgQ_max_size":0 "msgQ_size":0 "v3_to_v4_transform":"disabled" "virtual_ip_hash_table":{"ipv6":{78:{"group":20 "interface":"gi1" "ip":"2001:DB8:10:1:1::254"}} "ipv4":{103:{"group":0 "interface":"gi1/0/1" "ip":"192.168.1.254"} 106:{"group":10 "interface":"gi1/0/2" "ip":"192.168.2.254"} } } }<line_sep>
|
<import_stmt>sys<import_stmt>time<import_from_stmt>pysys.basetest BaseTest<line_sep>"""
Validate changing the mqtt port using the tedge command that fails without restarting the mqtt server
Given a configured system, that is configured with certificate created and registered in a cloud
When `tedge mqtt.port set` with `sudo`
When the `sudo tedge mqtt sub` tries to subscribe for a topic and fails to connect to mqtt server
When the `sudo tedge mqtt pub` tries to publish a message and fails to connect to mqtt server
"""<class_stmt>MqttPortChangeConnectionFails(BaseTest)<block_start><def_stmt>setup self<block_start>self.tedge="/usr/bin/tedge"<line_sep>self.sudo="/usr/bin/sudo"<line_sep>self.addCleanupFunction(self.mqtt_cleanup)<block_end><def_stmt>execute self# set a new mqtt port for local communication
<block_start>mqtt_port=self.startProcess(command=self.sudo arguments=[self.tedge "config" "set" "mqtt.port" "8880"] stdouterr="mqtt_port_set" )<line_sep># publish a message
mqtt_pub=self.startProcess(command=self.sudo arguments=[self.tedge "mqtt" "pub" "tedge/measurements" '{ "temperature": 25 }' ] stdouterr="mqtt_pub" # dont exit test if status is 1, as the error messages are needed for validation
expectedExitStatus="==1" )<block_end><def_stmt>validate self<block_start>self.assertGrep("mqtt_pub.err" "ERROR: the message has not been published" contains=<true>)<line_sep>self.assertGrep("mqtt_pub.err" "Error: failed to publish the message" contains=<true>)<block_end><def_stmt>mqtt_cleanup self# unset a new mqtt port, falls back to default port (1883)
<block_start>mqtt_port=self.startProcess(command=self.sudo arguments=[self.tedge "config" "unset" "mqtt.port"] stdouterr="mqtt_port_unset" )<block_end><block_end>
|
# -*- coding: utf-8 -*-
<import_from_stmt>dart_fss.fs.extract extract<import_from_stmt>dart_fss.fs.fs FinancialStatement<line_sep>__all__=['extract' 'FinancialStatement']<line_sep>
|
# Copyright 2019 PIQuIL - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.nn functional<as>F<import_from_stmt>torch.nn.utils parameters_to_vector<import_from_stmt>qucumber.utils cplx auto_unsqueeze_args<import_from_stmt>qucumber _warn_on_missing_gpu<class_stmt>PurificationRBM(nn.Module)<block_start>r"""An RBM with a hidden and "auxiliary" layer, each separately connected to the visible units
:param num_visible: The number of visible units, i.e. the size of the system
:type num_visible: int
:param num_hidden: The number of units in the hidden layer
:type num_hidden: int
:param num_aux: The number of units in the auxiliary purification layer
:type num_aux: int
:param zero_weights: Whether or not to initialize the weights to zero
:type zero_weights: bool
:param gpu: Whether to perform computations on the default gpu.
:type gpu: bool
"""<def_stmt>__init__ self num_visible num_hidden=<none> num_aux=<none> zero_weights=<false> gpu=<false><block_start>super().__init__()<line_sep>self.num_visible=int(num_visible)<line_sep>self.num_hidden=(int(num_hidden)<if>num_hidden<is><not><none><else>self.num_visible)<line_sep>self.num_aux=int(num_aux)<if>num_aux<is><not><none><else>self.num_visible<line_sep># Parameters are:
# W: The weights of the visible-hidden edges
# U: The weights of the visible-auxiliary edges
# b: The biases of the visible nodes
# c: The biases of the hidden nobdes
# d: The biases of the auxiliary nodes
# The auxiliary bias of the phase RBM is always zero
self.num_pars=((self.num_visible<times>self.num_hidden)+(self.num_visible<times>self.num_aux)+self.num_visible+self.num_hidden+self.num_aux)<line_sep>_warn_on_missing_gpu(gpu)<line_sep>self.gpu=gpu<and>torch.cuda.is_available()<line_sep>self.device=torch.device("cuda")<if>self.gpu<else>torch.device("cpu")<line_sep>self.initialize_parameters(zero_weights=zero_weights)<block_end><def_stmt>__repr__ self<block_start><return>(f"PurificationBinaryRBM(num_visible={self.num_visible}, "<concat>f"num_hidden={self.num_hidden}, num_aux={self.num_aux}, gpu={self.gpu})")<block_end><def_stmt>initialize_parameters self zero_weights=<false><block_start>r"""Initialize the parameters of the RBM
:param zero_weights: Whether or not to initialize the weights to zero
:type zero_weights: bool
"""<line_sep>gen_tensor=torch.zeros<if>zero_weights<else>torch.randn<line_sep>self.weights_W=nn.Parameter((gen_tensor(self.num_hidden self.num_visible dtype=torch.double device=self.device )/np.sqrt(self.num_visible)) requires_grad=<false> )<line_sep>self.weights_U=nn.Parameter((gen_tensor(self.num_aux self.num_visible dtype=torch.double device=self.device )/np.sqrt(self.num_visible)) requires_grad=<false> )<line_sep>self.visible_bias=nn.Parameter(torch.zeros(self.num_visible dtype=torch.double device=self.device) requires_grad=<false> )<line_sep>self.hidden_bias=nn.Parameter(torch.zeros(self.num_hidden dtype=torch.double device=self.device) requires_grad=<false> )<line_sep>self.aux_bias=nn.Parameter(torch.zeros(self.num_aux dtype=torch.double device=self.device) requires_grad=<false> )<block_end>@auto_unsqueeze_args()<def_stmt>effective_energy self v a=<none><block_start>r"""Computes the equivalent of the "effective energy" for the RBM. If
`a` is `None`, will analytically trace out the auxiliary units.
:param v: The current state of the visible units. Shape (b, n_v) or (n_v,).
:type v: torch.Tensor
:param a: The current state of the auxiliary units. Shape (b, n_a) or (n_a,).
:type a: torch.Tensor or None
:returns: The "effective energy" of the RBM. Shape (b,) or (1,).
:rtype: torch.Tensor
"""<line_sep>v=v.to(self.weights_W)<line_sep>vis_term=torch.matmul(v self.visible_bias)+F.softplus(F.linear(v self.weights_W self.hidden_bias)).sum(-1)<if_stmt>a<is><not><none><block_start>a=(a.unsqueeze(0)<if>a.dim()<l>2<else>a).to(self.weights_W)<line_sep>aux_term=torch.matmul(a self.aux_bias)<line_sep>mix_term=torch.einsum("...v,av,...a->..." v self.weights_U.data a)<line_sep><return>-(vis_term+aux_term+mix_term)<block_end><else_stmt><block_start>aux_term=F.softplus(F.linear(v self.weights_U self.aux_bias)).sum(-1)<line_sep><return>-(vis_term+aux_term)<block_end><block_end><def_stmt>effective_energy_gradient self v reduce=<true><block_start>"""The gradients of the effective energies for the given visible states.
:param v: The visible states.
:type v: torch.Tensor
:param reduce: If `True`, will sum over the gradients resulting from
each visible state. Otherwise will return a batch of
gradient vectors.
:type reduce: bool
:returns: Will return a vector (or matrix if `reduce=False` and multiple
visible states were given as a matrix) containing the gradients
for all parameters (computed on the given visible states v).
:rtype: torch.Tensor
"""<line_sep>v=(v.unsqueeze(0)<if>v.dim()<l>2<else>v).to(self.weights_W)<line_sep>ph=self.prob_h_given_v(v)<line_sep>pa=self.prob_a_given_v(v)<if_stmt>reduce<block_start>W_grad=-torch.matmul(ph.transpose(0 -1) v)<line_sep>U_grad=-torch.matmul(pa.transpose(0 -1) v)<line_sep>vb_grad=-torch.sum(v 0)<line_sep>hb_grad=-torch.sum(ph 0)<line_sep>ab_grad=-torch.sum(pa 0)<line_sep><return>parameters_to_vector([W_grad U_grad vb_grad hb_grad ab_grad])<block_end><else_stmt><block_start>W_grad=-torch.einsum("...j,...k->...jk" ph v).view(*v.shape[:-1] -1)<line_sep>U_grad=-torch.einsum("...j,...k->...jk" pa v).view(*v.shape[:-1] -1)<line_sep>vb_grad=-v<line_sep>hb_grad=-ph<line_sep>ab_grad=-pa<line_sep>vec=[W_grad U_grad vb_grad hb_grad ab_grad]<line_sep><return>torch.cat(vec dim=-1)<block_end><block_end>@auto_unsqueeze_args()<def_stmt>prob_h_given_v self v out=<none><block_start>r"""Given a visible unit configuration, compute the probability
vector of the hidden units being on
:param v: The visible units
:type v: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The probability of the hidden units being active
given the visible state
:rtype torch.Tensor:
"""<line_sep><return>(torch.matmul(v self.weights_W.data.t() out=out).add_(self.hidden_bias.data).sigmoid_().clamp_(min=0 max=1))<block_end>@auto_unsqueeze_args()<def_stmt>prob_a_given_v self v out=<none><block_start>r"""Given a visible unit configuration, compute the probability
vector of the auxiliary units being on
:param v: The visible units
:type v: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The probability of the auxiliary units being active
given the visible state
:rtype torch.Tensor:
"""<line_sep><return>(torch.matmul(v self.weights_U.data.t() out=out).add_(self.aux_bias.data).sigmoid_().clamp_(min=0 max=1))<block_end>@auto_unsqueeze_args(1 2)<def_stmt>prob_v_given_ha self h a out=<none><block_start>r"""Given a hidden and auxiliary unit configuration, compute
the probability vector of the hidden units being on
:param h: The hidden units
:type h: torch.Tensor
:param a: The auxiliary units
:type a: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The probability of the visible units being
active given the hidden and auxiliary states
:rtype torch.Tensor:
"""<line_sep><return>(torch.matmul(h self.weights_W.data out=out).add_(self.visible_bias.data).add_(torch.matmul(a self.weights_U.data)).sigmoid_().clamp_(min=0 max=1))<block_end><def_stmt>sample_a_given_v self v out=<none><block_start>r"""Sample/generate an auxiliary state given a visible state
:param v: The visible state
:type v: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The sampled auxiliary state
:rtype: torch.Tensor
"""<line_sep>a=self.prob_a_given_v(v out=out)<line_sep>a=torch.bernoulli(a out=out)<line_sep><return>a<block_end><def_stmt>sample_h_given_v self v out=<none><block_start>r"""Sample/generate a hidden state given a visible state
:param v: The visible state
:type v: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The sampled hidden state
:rtype: torch.Tensor
"""<line_sep>h=self.prob_h_given_v(v out=out)<line_sep>h=torch.bernoulli(h out=out)<line_sep><return>h<block_end><def_stmt>sample_v_given_ha self h a out=<none><block_start>r"""Sample/generate a visible state given the
hidden and auxiliary states
:param h: The hidden state
:type h: torch.Tensor
:param a: The auxiliary state
:type a: torch.Tensor
:param out: The output tensor to write to
:type out: torch.Tensor
:returns: The sampled visible state
:rtype: torch.Tensor
"""<line_sep>v=self.prob_v_given_ha(h a out=out)<line_sep>v=torch.bernoulli(v out=out)<line_sep><return>v<block_end><def_stmt>gibbs_steps self k initial_state overwrite=<false><block_start>r"""Perform k steps of Block Gibbs sampling. One step consists of
sampling the hidden and auxiliary states from the visible state, and
then sampling the visible state from the hidden and auxiliary states
:param k: The number of Block Gibbs steps
:type k: int
:param initial_state: The initial visible state
:type initial_state: torch.Tensor
:param overwrite: Whether to overwrite the initial_state tensor.
Exception: If initial_state is not on the same device
as the RBM, it will NOT be overwritten.
:type overwrite: bool
:returns: Returns the visible states after k steps of
Block Gibbs sampling
:rtype: torch.Tensor
"""<line_sep>v=(initial_state<if>overwrite<else>initial_state.clone()).to(self.weights_W)<line_sep>h=torch.zeros(*v.shape[:-1] self.num_hidden).to(self.weights_W)<line_sep>a=torch.zeros(*v.shape[:-1] self.num_aux).to(self.weights_W)<for_stmt>_ range(k)<block_start>self.sample_h_given_v(v out=h)<line_sep>self.sample_a_given_v(v out=a)<line_sep>self.sample_v_given_ha(h a out=v)<block_end><return>v<block_end>@auto_unsqueeze_args()<def_stmt>mixing_term self v<block_start>r"""Describes the extent of mixing in the system,
:math:`V_\theta = \frac{1}{2}U_\theta \bm{\sigma} + d_\theta`
:param v: The visible state of the system
:type v: torch.Tensor
:returns: The term describing the mixing of the system
:rtype: torch.Tensor
"""<line_sep><return>F.linear(v 0.5<times>self.weights_U self.aux_bias)<block_end><def_stmt>gamma self v vp eta=1 expand=<true><block_start>r"""Calculates elements of the :math:`\Gamma^{(\eta)}` matrix,
where :math:`\eta = \pm`.
If `expand` is `True`, will return a complex matrix
:math:`A_{ij} = \langle\sigma_i|\Gamma^{(\eta)}|\sigma'_j\rangle`.
Otherwise will return a complex vector
:math:`A_{i} = \langle\sigma_i|\Gamma^{(\eta)}|\sigma'_i\rangle`.
:param v: A batch of visible states, :math:`\sigma`.
:type v: torch.Tensor
:param vp: The other batch of visible states, :math:`\sigma'`.
:type vp: torch.Tensor
:param eta: Determines which gamma matrix elements to compute.
:type eta: int
:param expand: Whether to return a matrix (`True`) or a vector (`False`).
Ignored if both inputs are vectors, in which case, a
scalar is returned.
:type expand: bool
:returns: The matrix element given by
:math:`\langle\sigma|\Gamma^{(\eta)}|\sigma'\rangle`
:rtype: torch.Tensor
"""<line_sep>sign=np.sign(eta)<if_stmt>v.dim()<l>2<and>vp.dim()<l>2<block_start>temp=torch.dot(v+sign<times>vp self.visible_bias)<line_sep>temp<augadd>F.softplus(F.linear(v self.weights_W self.hidden_bias)).sum()<line_sep>temp<augadd>(sign<times>F.softplus(F.linear(vp self.weights_W self.hidden_bias)).sum())<block_end><else_stmt><block_start>temp1=torch.matmul(v self.visible_bias)+(F.softplus(F.linear(v self.weights_W self.hidden_bias)).sum(-1))<line_sep>temp2=torch.matmul(vp self.visible_bias)+(F.softplus(F.linear(vp self.weights_W self.hidden_bias)).sum(-1))<if_stmt>expand<block_start>temp=temp1.unsqueeze_(1)+(sign<times>temp2.unsqueeze_(0))<block_end><else_stmt><block_start>temp=temp1+(sign<times>temp2)<block_end><block_end><return>0.5<times>temp<block_end><def_stmt>gamma_grad self v vp eta=1 expand=<false><block_start>r"""Calculates elements of the gradient of
the :math:`\Gamma^{(\eta)}` matrix, where :math:`\eta = \pm`.
:param v: A batch of visible states, :math:`\sigma`
:type v: torch.Tensor
:param vp: The other batch of visible states, :math:`\sigma'`
:type vp: torch.Tensor
:param eta: Determines which gamma matrix elements to compute.
:type eta: int
:param expand: Whether to return a rank-3 tensor (`True`) or a matrix (`False`).
:type expand: bool
:returns: The matrix element given by
:math:`\langle\sigma|\nabla_\lambda\Gamma^{(\eta)}|\sigma'\rangle`
:rtype: torch.Tensor
"""<line_sep>sign=np.sign(eta)<line_sep>unsqueezed=v.dim()<l>2<or>vp.dim()<l>2<line_sep>v=(v.unsqueeze(0)<if>v.dim()<l>2<else>v).to(self.weights_W)<line_sep>vp=(vp.unsqueeze(0)<if>vp.dim()<l>2<else>vp).to(self.weights_W)<line_sep>prob_h=self.prob_h_given_v(v)<line_sep>prob_hp=self.prob_h_given_v(vp)<line_sep>W_grad_=torch.einsum("...j,...k->...jk" prob_h v)<line_sep>W_grad_p=torch.einsum("...j,...k->...jk" prob_hp vp)<if_stmt>expand<block_start>W_grad=0.5<times>(W_grad_.unsqueeze_(1)+sign<times>W_grad_p.unsqueeze_(0))<line_sep>vb_grad=0.5<times>(v.unsqueeze(1)+sign<times>vp.unsqueeze(0))<line_sep>hb_grad=0.5<times>(prob_h.unsqueeze_(1)+sign<times>prob_hp.unsqueeze_(0))<block_end><else_stmt><block_start>W_grad=0.5<times>(W_grad_+sign<times>W_grad_p)<line_sep>vb_grad=0.5<times>(v+sign<times>vp)<line_sep>hb_grad=0.5<times>(prob_h+sign<times>prob_hp)<block_end>batch_sizes=((v.shape[0] vp.shape[0] *v.shape[1:-1])<if>expand<else>(*v.shape[:-1] ))<line_sep>U_grad=torch.zeros_like(self.weights_U).expand(*batch_sizes -1 -1)<line_sep>ab_grad=torch.zeros_like(self.aux_bias).expand(*batch_sizes -1)<line_sep>vec=[W_grad.view(*batch_sizes -1) U_grad.view(*batch_sizes -1) vb_grad hb_grad ab_grad ]<if_stmt>unsqueezed<and><not>expand<block_start>vec=[grad.squeeze_(0)<for>grad vec]<block_end><return>cplx.make_complex(torch.cat(vec dim=-1))<block_end><def_stmt>partition self space<block_start>r"""Computes the partition function
:param space: The Hilbert space of the visible units
:type space: torch.Tensor
:returns: The partition function
:rtype: torch.Tensor
"""<line_sep>logZ=(-self.effective_energy(space)).logsumexp(0)<line_sep><return>logZ.exp()<block_end><block_end>
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
<def_stmt>f_gold n<block_start>table=[0<for>i range(n+1)]<line_sep>table[0]=1<for_stmt>i range(3 n+1)<block_start>table[i]<augadd>table[i-3]<block_end><for_stmt>i range(5 n+1)<block_start>table[i]<augadd>table[i-5]<block_end><for_stmt>i range(10 n+1)<block_start>table[i]<augadd>table[i-10]<block_end><return>table[n]<block_end>#TOFILL
<if_stmt>__name__<eq>'__main__'<block_start>param=[(83 ) (29 ) (17 ) (12 ) (93 ) (55 ) (97 ) (75 ) (22 ) (52 )]<line_sep>n_success=0<for_stmt>i,parameters_set enumerate(param)<block_start><if_stmt>f_filled(*parameters_set)<eq>f_gold(*parameters_set)<block_start>n_success<augadd>1<block_end><block_end>print("#Results: %i, %i"%(n_success len(param)))<block_end>
|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interaction with Hartree-Fock solver in pyscf."""<line_sep># Abbreviations used:
# SCF: self-consistent field (method). Another name for Hartree-Fock
# HF: Hartree-Fock method.
# RHF: restricted Hartre-Fock. Require molecular orbital for the i-th alpha-spin
# and i-th beta-spin electrons to have the same spatial component.
# ROHF: restricted open-shell Hartree-Fock. Same as RHF except allows the number
# of alpha and beta electrons to differ.
# UHF: unrestricted Hartre-Fock. Permits breaking of spin symmetry and hence
# alpha and beta electrons to have different spatial components.
# AO: Atomic orbital. Underlying basis set (typically Gaussian-type orbitals and
# built into pyscf).
# MO: molecular orbitals/Hartree-Fock orbitals. Single-particle orbitals which
# are solutions to the Hartree-Fock equations.
<import_from_stmt>typing Sequence Tuple Optional<import_from_stmt>absl logging<import_from_stmt>ferminet.utils system<import_stmt>numpy<as>np<import_stmt>pyscf<class_stmt>Scf<block_start>"""Helper class for running Hartree-Fock (self-consistent field) with pyscf.
Attributes:
molecule: list of system.Atom objects giving the atoms in the
molecule and their positions.
nelectrons: Tuple with number of alpha electrons and beta
electrons.
basis: Basis set to use, best specified with the relevant string
for a built-in basis set in pyscf. A user-defined basis set can be used
(advanced). See https://sunqm.github.io/pyscf/gto.html#input-basis for
more details.
pyscf_mol: the PySCF 'Molecule'. If this is passed to the init,
the molecule, nelectrons, and basis will not be used, and the
calculations will be performed on the existing pyscf_mol
restricted: If true, use the restriced Hartree-Fock method, otherwise use
the unrestricted Hartree-Fock method.
"""<def_stmt>__init__ self molecule:Optional[Sequence[system.Atom]]=<none> nelectrons:Optional[Tuple[int int]]=<none> basis:Optional[str]='cc-pVTZ' pyscf_mol:Optional[pyscf.gto.Mole]=<none> restricted:bool=<true><block_start><if_stmt>pyscf_mol<block_start>self._mol=pyscf_mol<block_end><else_stmt><block_start>self.molecule=molecule<line_sep>self.nelectrons=nelectrons<line_sep>self.basis=basis<line_sep>self._spin=nelectrons[0]-nelectrons[1]<line_sep>self._mol=<none><block_end>self.restricted=restricted<line_sep>self._mean_field=<none><line_sep>pyscf.lib.param.TMPDIR=<none><block_end><def_stmt>run self<block_start>"""Runs the Hartree-Fock calculation.
Returns:
A pyscf scf object (i.e. pyscf.scf.rhf.RHF, pyscf.scf.uhf.UHF or
pyscf.scf.rohf.ROHF depending on the spin and restricted settings).
Raises:
RuntimeError: If the number of electrons in the PySCF molecule is not
consistent with self.nelectrons.
"""<line_sep># If not passed a pyscf molecule, create one
<if_stmt><not>self._mol<block_start><if_stmt>any(atom.atomic_number-atom.charge<g>1.e-8<for>atom self.molecule)<block_start>logging.info('Fractional nuclear charge detected. '<concat>'Running SCF on atoms with integer charge.')<block_end>nuclear_charge=sum(atom.atomic_number<for>atom self.molecule)<line_sep>charge=nuclear_charge-sum(self.nelectrons)<line_sep>self._mol=pyscf.gto.Mole(atom=[[atom.symbol atom.coords]<for>atom self.molecule] unit='bohr')<line_sep>self._mol.basis=self.basis<line_sep>self._mol.spin=self._spin<line_sep>self._mol.charge=charge<line_sep>self._mol.build()<if_stmt>self._mol.nelectron<ne>sum(self.nelectrons)<block_start><raise>RuntimeError('PySCF molecule not consistent with QMC molecule.')<block_end><block_end><if_stmt>self.restricted<block_start>self._mean_field=pyscf.scf.RHF(self._mol)<block_end><else_stmt><block_start>self._mean_field=pyscf.scf.UHF(self._mol)<block_end>self._mean_field.init_guess='atom'<line_sep>self._mean_field.kernel()<line_sep><return>self._mean_field<block_end><def_stmt>eval_mos self positions:np.ndarray deriv:bool=<false><arrow>Tuple[np.ndarray np.ndarray]<block_start>"""Evaluates the Hartree-Fock single-particle orbitals at a set of points.
Args:
positions: numpy array of shape (N, 3) of the positions in space at which
to evaluate the Hartree-Fock orbitals.
deriv: If True, also calculate the first derivatives of the
single-particle orbitals.
Returns:
Pair of numpy float64 arrays of shape (N, M) (deriv=False) or (4, N, M)
(deriv=True), where 2M is the number of Hartree-Fock orbitals. The (i-th,
j-th) element in the first (second) array gives the value of the j-th
alpha (beta) Hartree-Fock orbital at the i-th electron position in
positions. For restricted (RHF, ROHF) calculations, the two arrays will be
identical.
If deriv=True, the first index contains [value, x derivative, y
derivative, z derivative].
Raises:
RuntimeError: If Hartree-Fock calculation has not been performed using
`run`.
NotImplementedError: If Hartree-Fock calculation used Cartesian
Gaussian-type orbitals as the underlying basis set.
"""<if_stmt>self._mean_field<is><none><block_start><raise>RuntimeError('Mean-field calculation has not been run.')<block_end><if_stmt>self.restricted<block_start>coeffs=(self._mean_field.mo_coeff )<block_end><else_stmt><block_start>coeffs=self._mean_field.mo_coeff<block_end># Assumes self._mol.cart (use of Cartesian Gaussian-type orbitals and
# integrals) is False (default behaviour of pyscf).
<if_stmt>self._mol.cart<block_start><raise>NotImplementedError('Evaluation of molecular orbitals using cartesian GTOs.')<block_end># Note sph refers to the use of spherical GTO basis sets rather than
# Cartesian GO basis sets. The coordinate system used for the electron
# positions is Cartesian in both cases.
gto_op='GTOval_sph_deriv1'<if>deriv<else>'GTOval_sph'<line_sep>ao_values=self._mol.eval_gto(gto_op positions)<line_sep>mo_values=tuple(np.matmul(ao_values coeff)<for>coeff coeffs)<if_stmt>self.restricted# duplicate for beta electrons.
<block_start>mo_values<augmul>2<block_end><return>mo_values<block_end><block_end>
|
"""Random dot motion task."""<import_stmt>numpy<as>np<import_stmt>neurogym<as>ngym<import_from_stmt>neurogym spaces<class_stmt>ProbabilisticReasoning(ngym.TrialEnv)<block_start>"""Probabilistic reasoning.
The agent is shown a sequence of stimuli. Each stimulus is associated
with a certain log-likelihood of the correct response being one choice
versus the other. The final log-likelihood of the target response being,
for example, option 1, is the sum of all log-likelihood associated with
the presented stimuli. A delay period separates each stimulus, so the
agent is encouraged to lean the log-likelihood association and integrate
these values over time within a trial.
Args:
shape_weight: array-like, evidence weight of each shape
n_loc: int, number of location of show shapes
"""<line_sep>metadata={'paper_link':'https://www.nature.com/articles/nature05852' 'paper_name':'Probabilistic reasoning by neurons' 'tags':['perceptual' 'two-alternative' 'supervised']}<def_stmt>__init__ self dt=100 rewards=<none> timing=<none> shape_weight=<none> n_loc=4<block_start>super().__init__(dt=dt)<line_sep># The evidence weight of each stimulus
<if_stmt>shape_weight<is><not><none><block_start>self.shape_weight=shape_weight<block_end><else_stmt><block_start>self.shape_weight=[-10 -0.9 -0.7 -0.5 -0.3 0.3 0.5 0.7 0.9 10]<block_end>self.n_shape=len(self.shape_weight)<line_sep>dim_shape=self.n_shape<line_sep># Shape representation needs to be fixed cross-platform
self.shapes=np.eye(self.n_shape dim_shape)<line_sep>self.n_loc=n_loc<line_sep># Rewards
self.rewards={'abort':-0.1 'correct':+1. 'fail':0.}<if_stmt>rewards<block_start>self.rewards.update(rewards)<block_end>self.timing={'fixation':500 'delay':<lambda>:self.rng.uniform(450 550) 'decision':500}<for_stmt>i_loc range(n_loc)<block_start>self.timing['stimulus'+str(i_loc)]=500<block_end><if_stmt>timing<block_start>self.timing.update(timing)<block_end>self.abort=<false><line_sep>name={'fixation':0}<line_sep>start=1<for_stmt>i_loc range(n_loc)<block_start>name['loc'+str(i_loc)]=range(start start+dim_shape)<line_sep>start<augadd>dim_shape<block_end>self.observation_space=spaces.Box(-np.inf np.inf shape=(1+dim_shape<times>n_loc ) dtype=np.float32 name=name)<line_sep>name={'fixation':0 'choice':[1 2]}<line_sep>self.action_space=spaces.Discrete(3 name=name)<block_end><def_stmt>_new_trial self **kwargs# Trial info
<block_start>trial={'locs':self.rng.choice(range(self.n_loc) size=self.n_loc replace=<false>) 'shapes':self.rng.choice(range(self.n_shape) size=self.n_loc replace=<true>) }<line_sep>trial.update(kwargs)<line_sep>locs=trial['locs']<line_sep>shapes=trial['shapes']<line_sep>log_odd=sum([self.shape_weight[shape]<for>shape shapes])<line_sep>p=1./(10<power>(-log_odd)+1.)<line_sep>ground_truth=int(self.rng.rand()<l>p)<line_sep>trial['log_odd']=log_odd<line_sep>trial['ground_truth']=ground_truth<line_sep># Periods
periods=['fixation']<line_sep>periods<augadd>['stimulus'+str(i)<for>i range(self.n_loc)]<line_sep>periods<augadd>['delay' 'decision']<line_sep>self.add_period(periods)<line_sep># Observations
self.add_ob(1 where='fixation')<line_sep>self.set_ob(0 'decision' where='fixation')<for_stmt>i_loc range(self.n_loc)<block_start>loc=locs[i_loc]<line_sep>shape=shapes[i_loc]<line_sep>periods=['stimulus'+str(j)<for>j range(i_loc self.n_loc)]<line_sep>self.add_ob(self.shapes[shape] periods where='loc'+str(loc))<block_end># Ground truth
self.set_groundtruth(ground_truth period='decision' where='choice')<line_sep><return>trial<block_end><def_stmt>_step self action<block_start>new_trial=<false><line_sep># rewards
reward=0<line_sep>gt=self.gt_now<line_sep># observations
<if_stmt>self.in_period('decision')<block_start><if_stmt>action<ne>0<block_start>new_trial=<true><if_stmt>action<eq>gt<block_start>reward<augadd>self.rewards['correct']<line_sep>self.performance=1<block_end><else_stmt><block_start>reward<augadd>self.rewards['fail']<block_end><block_end><block_end><else_stmt><block_start><if_stmt>action<ne>0# action = 0 means fixating
<block_start>new_trial=self.abort<line_sep>reward<augadd>self.rewards['abort']<block_end><block_end><return>self.ob_now reward <false> {'new_trial':new_trial 'gt':gt}<block_end><block_end>
|
<import_stmt>torch<class_stmt>ResponseMapTrackingPostProcessing<block_start><def_stmt>__init__ self enable_gaussian_score_map_penalty search_feat_size window_penalty_ratio=<none><block_start>self.enable_gaussian_score_map_penalty=enable_gaussian_score_map_penalty<line_sep>self.search_feat_size=search_feat_size<if_stmt>enable_gaussian_score_map_penalty<block_start>self.window=torch.flatten(torch.outer(torch.hann_window(search_feat_size[1] periodic=<false>) torch.hann_window(search_feat_size[0] periodic=<false>)))<line_sep>self.window_penalty_ratio=window_penalty_ratio<block_end><block_end><def_stmt>__call__ self network_output<block_start><if_stmt>network_output<is><none><block_start><return><none><block_end>class_score_map,predicted_bbox=network_output['class_score'] network_output['bbox']# shape: (N, 1, H, W), (N, H, W, 4)
N,C,H,W=class_score_map.shape<assert_stmt>C<eq>1<line_sep>class_score_map=class_score_map.view(N H<times>W)<if_stmt>self.enable_gaussian_score_map_penalty# window penalty
<block_start>class_score_map=class_score_map<times>(1-self.window_penalty_ratio)+self.window.view(1 H<times>W)<times>self.window_penalty_ratio<block_end>confidence_score,best_idx=torch.max(class_score_map 1)<line_sep>predicted_bbox=predicted_bbox.view(N H<times>W 4)<line_sep>bounding_box=predicted_bbox[torch.arange(len(predicted_bbox)) best_idx :]<line_sep>processor_outputs={'bbox':bounding_box 'conf':confidence_score}<for_stmt>k,v network_output.items()<block_start><if_stmt>k<not><in>('class_score' 'bbox')<block_start>processor_outputs[k]=v<block_end><block_end><return>processor_outputs<block_end><def_stmt>to self device<block_start><if_stmt>self.enable_gaussian_score_map_penalty<block_start>self.window=self.window.to(device)<block_end><block_end><block_end>
|
# Time: O(n * iter), iter is the number of iterations
# Space: O(1)
# see reference:
# - https://en.wikipedia.org/wiki/Geometric_median
# - https://wikimedia.org/api/rest_v1/media/math/render/svg/b3fb215363358f12687100710caff0e86cd9d26b
# Weiszfeld's algorithm
<class_stmt>Solution(object)<block_start><def_stmt>getMinDistSum self positions<block_start>"""
:type positions: List[List[int]]
:rtype: float
"""<line_sep>EPS=1e-6<def_stmt>norm p1 p2<block_start><return>((p1[0]-p2[0])<power>2+(p1[1]-p2[1])<power>2)<power>0.5<block_end><def_stmt>geometry_median positions median<block_start>numerator,denominator=[0.0 0.0] 0.0<for_stmt>p positions<block_start>l=norm(median p)<if_stmt><not>l<block_start><continue><block_end>numerator[0]<augadd>p[0]/l<line_sep>numerator[1]<augadd>p[1]/l<line_sep>denominator<augadd>1/l<block_end><if_stmt>denominator<eq>0.0<block_start><return><true> <none><block_end><return><false> [numerator[0]/denominator numerator[1]/denominator]<block_end>median=[float(sum(p[0]<for>p positions))/len(positions) float(sum(p[1]<for>p positions))/len(positions)]<line_sep>prev_median=[float("-inf") float("-inf")]<while_stmt>norm(median prev_median)<times>len(positions)<g>EPS<block_start>stopped,new_median=geometry_median(positions median)<if_stmt>stopped<block_start><break><block_end>median,prev_median=new_median median<block_end><return>sum(norm(median p)<for>p positions)<block_end><block_end># Time: O(n * iter), iter is the number of iterations
# Space: O(1)
<class_stmt>Solution2(object)<block_start><def_stmt>getMinDistSum self positions<block_start>"""
:type positions: List[List[int]]
:rtype: float
"""<line_sep>DIRECTIONS=[(0 1) (1 0) (0 -1) (-1 0)]<line_sep>EPS=1e-6<def_stmt>dist positions p<block_start><return>sum(((p[0]-x)<power>2+(p[1]-y)<power>2)<power>0.5<for>x,y positions)<block_end>median=[0.0 0.0]<line_sep>median[0]=float(sum(x<for>x,_ positions))/len(positions)<line_sep>median[1]=float(sum(y<for>_,y positions))/len(positions)<line_sep>result=dist(positions median)<line_sep>delta=float(max(max(positions key=<lambda>x:x[0])[0] max(positions key=<lambda>x:x[1])[1]))-float(min(min(positions key=<lambda>x:x[0])[0] min(positions key=<lambda>x:x[1])[1]))<while_stmt>delta<g>EPS<block_start><for_stmt>dx,dy DIRECTIONS<block_start>new_median=[median[0]+delta<times>dx median[1]+delta<times>dy]<line_sep>nd=dist(positions new_median)<if_stmt>nd<l>result<block_start>result=nd<line_sep>median=new_median<line_sep><break><block_end><block_end><else_stmt><block_start>delta<augdiv>2.0<block_end><block_end><return>result<block_end><block_end>
|
<import_stmt>os<import_stmt>pytest<import_from_stmt>napari_plugin_engine PluginCallError<import_from_stmt>napari.plugins _builtins<line_sep># test_plugin_manager fixture is provided by napari_plugin_engine._testsupport
<def_stmt>test_get_writer_succeeds napari_plugin_manager tmpdir layer_data_and_types<block_start>"""Test writing layers data."""<line_sep>_,layer_data,layer_types,filenames=layer_data_and_types<line_sep>path=os.path.join(tmpdir 'layers_folder')<line_sep>writer=napari_plugin_manager.hook.napari_get_writer(path=path layer_types=layer_types)<line_sep># Write data
<assert_stmt>writer<eq>_builtins.write_layer_data_with_plugins<assert_stmt>writer(path layer_data plugin_name=<none>)<line_sep># Check folder and files exist
<assert_stmt>os.path.isdir(path)<for_stmt>f filenames<block_start><assert_stmt>os.path.isfile(os.path.join(path f))<block_end><assert_stmt>set(os.listdir(path))<eq>set(filenames)<assert_stmt>set(os.listdir(tmpdir))<eq>{'layers_folder'}<block_end># the layer_data_and_types fixture is defined in napari/conftest.py
# test_plugin_manager fixture is provided by napari_plugin_engine._testsupport
<def_stmt>test_get_writer_bad_plugin napari_plugin_manager tmpdir layer_data_and_types<block_start>"""Test cleanup when get_writer has an exception."""<import_from_stmt>napari_plugin_engine napari_hook_implementation<class_stmt>bad_plugin<block_start>@napari_hook_implementation<def_stmt>napari_write_points path data meta<block_start><raise>ValueError("shoot!")<block_end><block_end>_,layer_data,layer_types,filenames=layer_data_and_types<line_sep>napari_plugin_manager.register(bad_plugin)<line_sep># this time we try writing directly to the tmpdir (which already exists)
writer=_builtins.napari_get_writer(tmpdir layer_types)<line_sep># call writer with a bad hook implementation inserted
<with_stmt>pytest.raises(PluginCallError)<block_start>writer(tmpdir layer_data plugin_name=<none>)<block_end># should have deleted all new files, but not the tmpdir
<assert_stmt>os.path.isdir(tmpdir)<for_stmt>f filenames<block_start><assert_stmt><not>os.path.isfile(os.path.join(tmpdir f))<block_end># now try writing to a nested folder inside of tmpdir
path=os.path.join(tmpdir 'layers_folder')<line_sep>writer=_builtins.napari_get_writer(path layer_types)<line_sep># call writer with a bad hook implementation inserted
<with_stmt>pytest.raises(PluginCallError)<block_start>writer(tmpdir layer_data plugin_name=<none>)<block_end># should have deleted the new nested folder, but not the tmpdir
<assert_stmt>os.path.isdir(tmpdir)<assert_stmt><not>os.path.exists(path)<block_end>
|
# Generated by Django 2.2.10 on 2020-05-28 23:49
<import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('clist' '0038_problem_division') ]<line_sep>operations=[migrations.RenameField(model_name='problem' old_name='division' new_name='divisions' ) ]<block_end>
|
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
<import_stmt>bpy<line_sep>pass_keys=['NUMPAD_0' 'NUMPAD_1' 'NUMPAD_3' 'NUMPAD_4' 'NUMPAD_5' 'NUMPAD_6' 'NUMPAD_7' 'NUMPAD_8' 'NUMPAD_9' 'MIDDLEMOUSE' 'WHEELUPMOUSE' 'WHEELDOWNMOUSE' 'MOUSEMOVE']<def_stmt>get_input_pass pass_keys key_inputs event<block_start><if_stmt>event.type<in>pass_keys<block_start><return><true><block_end><if_stmt>key_inputs<eq>'Maya'<block_start><if_stmt>event.type<in>{'RIGHTMOUSE' 'LEFTMOUSE'}<and>event.alt<and><not>event.shift<and><not>event.ctrl<block_start><return><true><block_end><block_end><return><false><block_end>
|
<import_from_stmt>adapters.tuya.TS0012 TS0012<import_from_stmt>devices.switch.on_off_switch OnOffSwitch<class_stmt>TS0013(TS0012)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.devices.append(OnOffSwitch('center' 'state_center'))<block_end><block_end>
|
<import_from_stmt>torch.hub load_state_dict_from_url<import_stmt>torch.nn<as>nn<import_from_stmt>.utils.tokenizer ConvTokenizer<import_from_stmt>.utils.modules ConvStage BasicStage<line_sep>__all__=['ConvMLP' 'convmlp_s' 'convmlp_m' 'convmlp_l']<line_sep>model_urls={'convmlp_s':'http://ix.cs.uoregon.edu/~alih/conv-mlp/checkpoints/convmlp_s_imagenet.pth' 'convmlp_m':'http://ix.cs.uoregon.edu/~alih/conv-mlp/checkpoints/convmlp_m_imagenet.pth' 'convmlp_l':'http://ix.cs.uoregon.edu/~alih/conv-mlp/checkpoints/convmlp_l_imagenet.pth' }<class_stmt>ConvMLP(nn.Module)<block_start><def_stmt>__init__ self blocks dims mlp_ratios channels=64 n_conv_blocks=3 classifier_head=<true> num_classes=1000 *args **kwargs<block_start>super(ConvMLP self).__init__()<assert_stmt>len(blocks)<eq>len(dims)<eq>len(mlp_ratios) f"blocks, dims and mlp_ratios must agree in size, {len(blocks)}, {len(dims)} and {len(mlp_ratios)} passed."<line_sep>self.tokenizer=ConvTokenizer(embedding_dim=channels)<line_sep>self.conv_stages=ConvStage(n_conv_blocks embedding_dim_in=channels hidden_dim=dims[0] embedding_dim_out=dims[0])<line_sep>self.stages=nn.ModuleList()<for_stmt>i range(0 len(blocks))<block_start>stage=BasicStage(num_blocks=blocks[i] embedding_dims=dims[i:i+2] mlp_ratio=mlp_ratios[i] stochastic_depth_rate=0.1 downsample=(i+1<l>len(blocks)))<line_sep>self.stages.append(stage)<block_end><if_stmt>classifier_head<block_start>self.norm=nn.LayerNorm(dims[-1])<line_sep>self.head=nn.Linear(dims[-1] num_classes)<block_end><else_stmt><block_start>self.head=<none><block_end>self.apply(self.init_weight)<block_end><def_stmt>forward self x<block_start>x=self.tokenizer(x)<line_sep>x=self.conv_stages(x)<line_sep>x=x.permute(0 2 3 1)<for_stmt>stage self.stages<block_start>x=stage(x)<block_end><if_stmt>self.head<is><none><block_start><return>x<block_end>B,_,_,C=x.shape<line_sep>x=x.reshape(B -1 C)<line_sep>x=self.norm(x)<line_sep>x=x.mean(dim=1)<line_sep>x=self.head(x)<line_sep><return>x<block_end>@staticmethod<def_stmt>init_weight m<block_start><if_stmt>isinstance(m (nn.Linear nn.Conv1d))<block_start>nn.init.trunc_normal_(m.weight std=.02)<if_stmt>isinstance(m (nn.Linear nn.Conv1d))<and>m.bias<is><not><none><block_start>nn.init.constant_(m.bias 0)<block_end><block_end><elif_stmt>isinstance(m nn.LayerNorm)<block_start>nn.init.constant_(m.bias 0)<line_sep>nn.init.constant_(m.weight 1.0)<block_end><elif_stmt>isinstance(m nn.Conv2d)<block_start>nn.init.kaiming_normal_(m.weight mode='fan_out' nonlinearity='relu')<block_end><elif_stmt>isinstance(m nn.BatchNorm2d)<block_start>nn.init.constant_(m.weight 1.)<line_sep>nn.init.constant_(m.bias 0.)<block_end><block_end><block_end><def_stmt>_convmlp arch pretrained progress classifier_head blocks dims mlp_ratios *args **kwargs<block_start>model=ConvMLP(blocks=blocks dims=dims mlp_ratios=mlp_ratios classifier_head=classifier_head *args **kwargs)<if_stmt>pretrained<and>arch<in>model_urls<block_start>state_dict=load_state_dict_from_url(model_urls[arch] progress=progress)<line_sep>model.load_state_dict(state_dict)<block_end><return>model<block_end><def_stmt>convmlp_s pretrained=<false> progress=<false> classifier_head=<true> *args **kwargs<block_start><return>_convmlp('convmlp_s' pretrained=pretrained progress=progress blocks=[2 4 2] mlp_ratios=[2 2 2] dims=[128 256 512] channels=64 n_conv_blocks=2 classifier_head=classifier_head *args **kwargs)<block_end><def_stmt>convmlp_m pretrained=<false> progress=<false> classifier_head=<true> *args **kwargs<block_start><return>_convmlp('convmlp_m' pretrained=pretrained progress=progress blocks=[3 6 3] mlp_ratios=[3 3 3] dims=[128 256 512] channels=64 n_conv_blocks=3 classifier_head=classifier_head *args **kwargs)<block_end><def_stmt>convmlp_l pretrained=<false> progress=<false> classifier_head=<true> *args **kwargs<block_start><return>_convmlp('convmlp_l' pretrained=pretrained progress=progress blocks=[4 8 3] mlp_ratios=[3 3 3] dims=[192 384 768] channels=96 n_conv_blocks=3 classifier_head=classifier_head *args **kwargs)<block_end>
|
<import_stmt>re<import_stmt>sys<import_stmt>numpy<as>np<line_sep>logName=sys.argv[1]<line_sep>reSched=re.compile(r"^==== sched:\s+(\S+)")<line_sep>reFN=re.compile(r"^(\S+)\.")<line_sep>reNQs=re.compile(r"nQs=(\d+) .*range=(\d+).*prb=(\d+)")<line_sep>reSim=re.compile(' (Generic|AVX|AVX2|AVX512)$')<line_sep>rePars=re.compile(r'OMP_NUM_THREADS=(\d+) fusedSpan=(\d) fusedDepth=(\d+) wfnCapacity=(\d+)')<line_sep>reInfo=re.compile(r'sz=([.\d]+) nQs=([.\d]+) nCs=([.\d]+) flsh= *([.\de+-]+).*gts= *([.\de+-]+).*elap= *(\d+).*(.)gps= *([.\de+-]+).*fus= *([.\d]+).*ker= *([.\d]+)')<line_sep>found=reFN.search(logName)<line_sep>env=found.group(1)<line_sep>fp=open(logName 'r')<line_sep>gpss=[]<line_sep>print(f'"env","test","typ","sim","qs","threads","span","sz","gps"')<line_sep>sim=""<line_sep>totalQs=-1<line_sep>threads=-1<line_sep>span=-1<line_sep>sz=-1<line_sep>rng=1<line_sep>prb=-1<line_sep>sched="???"<line_sep>prbs=["ladder" "ladder" "shor_4" "shor_6" "shor_8" "shor_10" "shor_12" "suprem_44" "suprem_55" "suprem_56" "qulacs_5" "qulacs_10" "qulacs_15" "qulacs_20" "qulacs_25"]<def_stmt>dumpGpss <block_start><global>gpss env sim totalQs threads span sz rng prb sched<if_stmt>len(gpss)<g>0<block_start>gpsMed=np.median(gpss)<line_sep>cnt=0.0<line_sep>tot=0.0<for_stmt>gps gpss<block_start><if_stmt>gps<g>gpsMed/2.0<and>gps<l>gpsMed<times>1.5<block_start>cnt<augadd>1.0<line_sep>tot<augadd>gps<block_end><block_end><if_stmt>cnt<g>0<block_start>gps=tot/cnt<block_end><else_stmt><block_start>gps=gpsAvg<block_end>nam=prbs[prb]<if_stmt>rng<eq>0<block_start>nam=f'{env},{nam}L'<block_end><elif_stmt>rng<eq>2<block_start>nam=f'{env},{nam}H'<block_end><else_stmt><block_start>nam=f'{env},{nam}'<block_end>print(f"{nam},{sched},{sim},{totalQs},{threads},{span},{sz},{gps:.1f}")<line_sep>gpss=[]<block_end><block_end><while_stmt><true><block_start>inp=fp.readline()<if_stmt>inp<eq>""<block_start>dumpGpss()<line_sep><break><block_end>found=reSched.search(inp)<if_stmt>found<block_start>dumpGpss()<line_sep>sched=found.group(1)<line_sep><continue><block_end>found=reNQs.search(inp)<if_stmt>found<block_start>dumpGpss()<line_sep>totalQs=found.group(1)<line_sep>rng=int(found.group(2))<line_sep>prb=int(found.group(3))<line_sep><continue><block_end>found=reSim.search(inp)<if_stmt>found<block_start>dumpGpss()<line_sep>sim=found.group(1)<line_sep><continue><block_end>found=rePars.search(inp)<if_stmt>found<block_start>threads=found.group(1)<line_sep>span=found.group(2)<line_sep>limit=found.group(3)<line_sep>wfnSiz=found.group(4)<line_sep><continue><block_end>found=reInfo.search(inp)<if_stmt>found<block_start>sz=found.group(1)<line_sep>nQs=float(found.group(2))<line_sep>nCs=float(found.group(3))<line_sep>flushes=found.group(4)<line_sep>gates=found.group(5)<line_sep>elap=found.group(6)<if_stmt>(found.group(7)<eq>'k')<block_start>mul=1000.0<block_end><else_stmt><block_start>mul=1.0<block_end>gps=float(found.group(8))<times>mul<line_sep>fusions=found.group(9)<line_sep>kernel=found.group(10)<line_sep>gpss.append(gps)<line_sep><continue><block_end><block_end>fp.close()<line_sep>
|
<import_from_stmt>collections OrderedDict<class_stmt>SpyTestDict(OrderedDict)<block_start>"""
todo: Update Documentation
"""<def_stmt>__getattr__ self name<block_start><try_stmt><block_start><return>self[name]<block_end><except_stmt>KeyError<block_start><raise>AttributeError(name)<block_end><block_end><def_stmt>__setattr__ self name value<block_start><if_stmt><not>name.startswith('_OrderedDict__')<block_start>self[name]=value<block_end><else_stmt><block_start>OrderedDict.__setattr__(self name value)<block_end><block_end><def_stmt>__delattr__ self name<block_start><try_stmt><block_start>self.pop(name)<block_end><except_stmt>KeyError<block_start>OrderedDict.__delattr__(self name)<block_end><block_end># compare
<def_stmt>__eq__ self other<block_start><return>dict.__eq__(self other)<block_end># stringify
<def_stmt>__str__ self<block_start><return>'{%s}'%', '.join('%r: %r'%item<for>item self.items())<block_end># for PrettyPrinter
__repr__=OrderedDict.__repr__<block_end>
|
<import_stmt>inspect<import_from_stmt>collections namedtuple<import_from_stmt>..base TributaryException<line_sep># from boltons.funcutils import wraps
<import_from_stmt>..utils _compare _either_type _ismethod<import_from_stmt>.dd3 _DagreD3Mixin<class_stmt>Node(_DagreD3Mixin)<block_start>"""Class to represent an operation that is lazy"""<line_sep>_id_ref=0<def_stmt>__init__ self value=<none> name="?" derived=<false> callable=<none> callable_args=<none> callable_kwargs=<none> dynamic=<false> **kwargs <block_start>"""Construct a new lazy node, wrapping a callable or a value
Args:
name (str): name to use to represent the node
derived (bool): node is note instantiated directly,
e.g. via n + 10 where n is a preexisting node.
These default to dirty state
value (any): initial value of the node
callable (callable): function or other callable that the node is wrapping
callable_args (tuple): args for the wrapped callable
callable_kwargs (dict): kwargs for the wrapped callable
dynamic (bool): node should not be lazy - always access underlying value
"""<line_sep># Instances get an id but one id tracker for all nodes so we can
# uniquely identify them
# TODO different scheme
self._id=Node._id_ref<line_sep>Node._id_ref<augadd>1<line_sep># Every node gets a name so it can be uniquely identified in the graph
self._name="{}#{}".format(name<or>(callable.__name__<if>callable<else><none>)<or>self.__class__.__name__ self._id )<if_stmt>isinstance(value Node)<block_start><raise>TributaryException("Cannot set value to be itself a node")<block_end># Graphviz shape
self._graphvizshape=kwargs.get("graphvizshape" "box")<line_sep># default is box instead of ellipse
# because all lazy nodes are i/o nodes
# by default
# if using dagre-d3, this will be set
self._dd3g=<none><line_sep># starting value
self._values=[]<line_sep># use dual number operators
self._use_dual=kwargs.get("use_dual" <false>)<line_sep># threshold for calculating difference
self._compare=_compare<line_sep># callable and args
self._callable_is_method=_ismethod(callable)<line_sep>self._callable=callable<line_sep># map arguments of callable to nodes
self._callable_args=callable_args<or>[]<line_sep>self._callable_kwargs=callable_kwargs<or>{}<line_sep># callable_args_mapping maps the wrapped functions'
# arguments to nodes. It does so in 2 ways, either
# via the argument node's name, or the wrapped
# function's argument name
#
# e.g. if i have lambda x, y: x + y
# where x is set to a Node(name="One")
# and y is set to a Node(name="Two"),
# callable_args_mapping looks like:
# {0: {"node": "One", "arg": "x"},
# 1: {"node": "Two", "arg": "y"}}
#
# this way i can pass (x=5) or (One=5)
# to modify the node's value
self._callable_args_mapping={}<line_sep># map positional to kw
<if_stmt>callable<is><not><none><and><not>inspect.isgeneratorfunction(callable)# wrap args and kwargs of function to node
<block_start><try_stmt><block_start>signature=inspect.signature(callable)<block_end><except_stmt>ValueError# https://bugs.python.org/issue20189
<block_start>signature=namedtuple("Signature" ["parameters"])({})<block_end>parameters=[p<for>p signature.parameters.values()<if>p.kind<not><in>(inspect._ParameterKind.VAR_POSITIONAL inspect._ParameterKind.VAR_KEYWORD )]<line_sep># map argument index to name of argument, for later use
self._callable_args_mapping={i:{"arg":arg.name}<for>i,arg enumerate(parameters)}<line_sep># first, iterate through callable_args and callable_kwargs and convert to nodes
<for_stmt>i,arg enumerate(self._callable_args)# promote all args to nodes
<block_start><if_stmt><not>isinstance(arg Node)# see if arg in argspec
<block_start><if_stmt>i<l>len(parameters)<block_start>name=parameters[i].name<block_end><else_stmt><block_start>name="vararg"<block_end>self._callable_args[i]=Node(name=name value=arg)<block_end># ensure arg can be passed by either node name, or arg name
<if_stmt>i<not><in>self._callable_args_mapping# varargs, disallow by arg
<block_start>self._callable_args_mapping[i]={}<block_end>self._callable_args_mapping[i]["node"]=self._callable_args[i]._name_no_id()<block_end># first, iterate through callable_args and callable_kwargs and convert to nodes
<for_stmt>name,kwarg self._callable_kwargs.items()<block_start><if_stmt><not>isinstance(kwarg Node)<block_start>self._callable_kwargs[name]=Node(name=name value=kwarg)<block_end><block_end># now iterate through callable's args and ensure
# everything is matched up
<for_stmt>i,arg enumerate(parameters)<block_start><if_stmt>arg.name<eq>"self"# skip
<block_start><continue><block_end># passed in as arg
<if_stmt>i<l>len(self._callable_args)<or>arg.name<in>self._callable_kwargs# arg is passed in args/kwargs, continue
<block_start><continue><block_end># arg not accounted for, see if it has a default in the callable
# convert to node
node=Node(name=arg.name derived=<true> value=arg.default)<line_sep># set in kwargs
self._callable_kwargs[arg.name]=node<block_end># compare filtered parameters to original
<if_stmt>len(parameters)<ne>len(signature.parameters)# if varargs, can have more callable_args + callable_kwargs than listed arguments
<block_start>failif=len([arg<for>arg parameters<if>arg.name<ne>"self"])<g>(len(self._callable_args)+len(self._callable_kwargs))<block_end><else_stmt># should be exactly equal
<block_start>failif=len([arg<for>arg parameters<if>arg.name<ne>"self"])<ne>(len(self._callable_args)+len(self._callable_kwargs))<block_end><if_stmt>failif# something bad happened trying to align
# callable's args/kwargs with the provided
# callable_args and callable_kwargs, and we're
# now in an unrecoverable state.
<block_start><raise>TributaryException("Missing args (call or preprocessing error has occurred)")<block_end><try_stmt><block_start>self._callable._node_wrapper=<none># not known until program start
<block_end><except_stmt>AttributeError# can't set attributes on certain functions, so wrap with lambda
<block_start><if_stmt>self._callable_is_method<block_start>self._callable=<lambda>self *args **kwargs:callable(*args **kwargs)<block_end><else_stmt><block_start>self._callable=<lambda>*args **kwargs:callable(*args **kwargs)<block_end>self._callable._node_wrapper=<none><block_end><block_end># not known until program start
<elif_stmt>callable<is><not><none><block_start>self._callable_args=callable_args<or>[]<line_sep>self._callable_kwargs=callable_kwargs<or>{}<line_sep># FIXME this wont work for attribute inputs
<def_stmt>_callable gen=callable(*self._callable_args **self._callable_kwargs)<block_start><try_stmt><block_start>ret=next(gen)<line_sep><return>ret<block_end><except_stmt>StopIteration<block_start>self._dynamic=<false><line_sep>self._dirty=<false><line_sep><return>self.value()<block_end><block_end>self._callable=_callable<block_end># list out all upstream nodes
self._upstream=list(self._callable_args)+list(self._callable_kwargs.values())<line_sep># if always dirty, always reevaluate
# self._dynamic = dynamic # or self._callable is not None
self._dynamic=(dynamic<or>(self._callable<and>inspect.isgeneratorfunction(callable))<or><false>)<line_sep># parent nodes in graph
self._parents=[]<line_sep># self reference for method calls
self._self_reference=self<line_sep># cache node operations that have already been done
self._node_op_cache={}<line_sep># tweaks
self._tweaks=<none><line_sep># dependencies can be nodes
<if_stmt>self._callable<block_start>self._dependencies={self._callable:(self._callable_args self._callable_kwargs)}<block_end><else_stmt><block_start>self._dependencies={}<line_sep># insert initial value
self._setValue(value)<block_end># use this variable when manually overriding
# a callable to have a fixed value
self._dependencies_stashed={}<line_sep># if derived node, default to dirty to start
<if_stmt>derived<or>self._callable<is><not><none><block_start>self._dirty=<true><block_end><else_stmt><block_start>self._dirty=<false><block_end><block_end><def_stmt>inputs self name=""<block_start>"""get node inputs, optionally by name"""<line_sep>dat={n._name_no_id():n<for>n self._upstream}<line_sep><return>dat<if><not>name<else>dat.get(name)<block_end><def_stmt>_get_dirty self<block_start><return>self._is_dirty<block_end><def_stmt>_set_dirty self val<block_start>self._reddd3g()<if>val<else>self._whited3g()<line_sep>self._is_dirty=val<block_end>_dirty=property(_get_dirty _set_dirty)<def_stmt>_name_no_id self<block_start><return>self._name.rsplit("#" 1)[0]<block_end><def_stmt>_install_args self *args<block_start>"""set arguments' values to those given. this is a permanent operation"""<line_sep>kwargs=[]<for_stmt>i,arg enumerate(args)<block_start><if_stmt>(i<l>len(self._callable_args)<and>self._callable_args[i]._name_no_id()<in>self._callable_args_mapping[i].values())<block_start>self._callable_args[i].setValue(arg)<block_end><else_stmt><block_start>kwargs.append((self._callable_args_mapping[i]["node"] arg))<block_end><block_end><for_stmt>k,v kwargs<block_start>self._callable_kwargs[k].setValue(v)<block_end><block_end><def_stmt>_install_kwargs self **kwargs<block_start>"""set arguments' values to those given. this is a permanent operation"""<for_stmt>k,v kwargs.items()<block_start>self._callable_kwargs[k].setValue(v)<block_end><block_end><def_stmt>_get_arg self i<block_start><return>self._callable_args[i]<block_end><def_stmt>_get_kwarg self keyword<block_start>print(self._callable_args)<line_sep><return>self._callable_kwargs[keyword]<block_end><def_stmt>_bind self other_self=<none> *args **kwargs<block_start><if_stmt>other_self<is><not><none><block_start>self._self_reference=other_self<block_end>self._install_args(*args)<line_sep>self._install_kwargs(**kwargs)<line_sep><return>self<block_end><def_stmt>_tweak self node_tweaks# TODO context manager
<block_start>self._tweaks=node_tweaks<block_end><def_stmt>_untweak self# TODO context manager
<block_start>self._tweaks=<none><line_sep># mark myself as dirt for tweak side-effects
# TODO another way of doing this?
self._dirty=<true><block_end><def_stmt>_compute_from_dependencies self node_tweaks<block_start>"""recompute node's value from its dependencies, applying any temporary tweaks as necessary"""<line_sep># if i'm the one being tweaked, just return tweaked value
<if_stmt>self<in>node_tweaks<block_start><return>node_tweaks[self]<block_end># if i have upstream dependencies
<if_stmt>self._dependencies# mark graph as calculating
<block_start>self._greendd3g()<line_sep># iterate through upstream deps
<for_stmt>deps self._dependencies.values()# recompute args
<block_start><for_stmt>arg deps[0]# recompute
<block_start>arg._recompute(node_tweaks)<line_sep># Set yourself as parent if not set
<if_stmt>self<not><in>arg._parents<block_start>arg._parents.append(self)<block_end># mark as tweaking
<if_stmt>node_tweaks<block_start>arg._tweak(node_tweaks)<block_end><block_end># recompute kwargs
<for_stmt>kwarg deps[1].values()# recompute
<block_start>kwarg._recompute(node_tweaks)<line_sep># Set yourself as parent if not set
<if_stmt>self<not><in>kwarg._parents<block_start>kwarg._parents.append(self)<block_end># mark as tweaking
<if_stmt>node_tweaks<block_start>kwarg._tweak(node_tweaks)<block_end><block_end><block_end># fetch the callable
kallable=list(self._dependencies.keys())[0]<if_stmt>self._callable_is_method# if the callable is a method,
# pass this node as self
<block_start>new_value=kallable(self._self_reference *self._dependencies[kallable][0] **self._dependencies[kallable][1] )<block_end><else_stmt># else just call on deps
<block_start>new_value=kallable(*self._dependencies[kallable][0] **self._dependencies[kallable][1])<block_end><if_stmt>isinstance(new_value Node)# extract numerical value from node, if it is a node
<block_start>kallable._node_wrapper=new_value<line_sep>new_value=new_value()<block_end># get value
<if_stmt>isinstance(new_value Node)<block_start><raise>TributaryException("Value should not itself be a node!")<block_end># set my value as new value if not tweaking
<if_stmt><not>node_tweaks<block_start>self._setValue(new_value)<block_end><else_stmt># set value in tweak dict
<block_start>node_tweaks[self]=new_value<line_sep># iterate through upstream deps and unset tweak
<for_stmt>deps self._dependencies.values()<block_start><for_stmt>arg deps[0]<block_start>arg._untweak()<block_end><for_stmt>kwarg deps[1].values()<block_start>kwarg._untweak()<block_end><block_end><block_end><block_end><else_stmt># if i don't have upstream dependencies, my value is fixed
<block_start>new_value=self.value()<block_end># mark calculation complete
self._whited3g()<line_sep># return my value
<return>self.value()<block_end><def_stmt>_subtree_dirty self node_tweaks<block_start><for_stmt>call,deps self._dependencies.items()# callable node
<block_start><if_stmt>hasattr(call "_node_wrapper")<and>call._node_wrapper<is><not><none><block_start><if_stmt>call._node_wrapper.isDirty(node_tweaks)# CRITICAL
# always set self to be dirty if subtree is dirty
<block_start>self._dirty=<true><line_sep><return><true><block_end><block_end># check args
<for_stmt>arg deps[0]<block_start><if_stmt>arg.isDirty(node_tweaks)# CRITICAL
# always set self to be dirty if subtree is dirty
<block_start>self._dirty=<true><line_sep><return><true><block_end><block_end># check kwargs
<for_stmt>kwarg deps[1].values()<block_start><if_stmt>kwarg.isDirty(node_tweaks)# CRITICAL
# always set self to be dirty if subtree is dirty
<block_start>self._dirty=<true><line_sep><return><true><block_end><block_end><block_end><return><false><block_end><def_stmt>isDirty self node_tweaks=<none><block_start>"""Node needs to be re-evaluated, either because its value has changed
or because its value *could* change
Note that in evaluating if a node is dirty, you will have a side effect
of updating that node's status to be dirty or not.
"""<line_sep>node_tweaks=node_tweaks<or>{}<if_stmt>self<in>node_tweaks# return dirty but don't set
<block_start><return>_compare(node_tweaks[self] self.value())<block_end>self._dirty=self._dirty<or>self._subtree_dirty(node_tweaks)<or>self._dynamic<line_sep><return>self._dirty<block_end><def_stmt>isDynamic self<block_start>"""Node isnt necessarily dirty, but needs to be reevaluated"""<line_sep><return>self._dynamic<block_end><def_stmt>_recompute self node_tweaks<block_start>"""returns result of computation"""<line_sep># check if self or upstream dirty
self.isDirty(node_tweaks)<line_sep># if i'm dirty, recompute my value
<if_stmt>self._dirty# compute upstream and then apply to self
<block_start>new_value=self._compute_from_dependencies(node_tweaks)<line_sep># if my new value is not equal to my old value,
# make sure to indicate that i was really dirty
<if_stmt>self._compare(new_value self.value())# mark my parents as dirty
<block_start><if_stmt>self._parents<block_start><for_stmt>parent self._parents# let your parents know you were dirty!
<block_start>parent._dirty=<true><block_end><block_end># set my value if not tweaking
<if_stmt><not>node_tweaks<block_start>self._setValue(new_value)<block_end><block_end><block_end><else_stmt><block_start>new_value=self.value()<block_end># mark myself as no longer dirty
self._dirty=<false><line_sep># return result of computation
<return>new_value<block_end><def_stmt>_gennode self name foo foo_args **kwargs<block_start><if_stmt>name<not><in>self._node_op_cache<block_start>self._node_op_cache[name]=Node(name=name derived=<true> callable=foo callable_args=foo_args override_callable_dirty=<true> **kwargs )<block_end><return>self._node_op_cache[name]<block_end><def_stmt>_tonode self other<block_start><if_stmt>isinstance(other Node)<block_start><return>other<block_end><if_stmt>str(other)<not><in>self._node_op_cache<block_start>self._node_op_cache[str(other)]=Node(name="var("+str(other)[:5]+")" derived=<true> value=other)<block_end><return>self._node_op_cache[str(other)]<block_end><def_stmt>setValue self value<block_start>"""set the node's value, marking it as dirty as appropriate.
this operation is permanent"""<if_stmt>self._compare(value self.value())# if callable, stash and force a fixed value
<block_start><if_stmt>self._dependencies# stash dependency tree for later
<block_start>self._dependencies_stashed=self._dependencies<line_sep># reset to empty
self._dependencies={}<line_sep># mark as not dynamic anymore
self._dynamic=<false><block_end># set the value
self._setValue(value)# leave for dagre
# mark as dirty
self._dirty=<true><block_end><block_end><def_stmt>unlock self<block_start>"""if node has been set to a fixed value, reset to callable"""<line_sep># no-op if not previously stashed
<if_stmt>self._dependencies_stashed# restore dependency tree
<block_start>self._dependencies=self._dependencies_stashed<line_sep># clear out stashed
self._dependencies_stashed={}<line_sep># mark as dynamic again
self._dynamic=<true><block_end><block_end><def_stmt>_setValue self value<block_start>"""internal method to set value. this is a permanent operation"""<line_sep># if value != self.value():
self._values.append(value)<block_end><def_stmt>append self value# TODO is this better or worse than
# lst = []
# n = Node(value=lst)
# lst.append(x)
# n._dirty = True
<block_start>iter(self.value())<line_sep>self.value().append(value)<line_sep>self._dirty=<true><block_end><def_stmt>get self **kwargs<block_start><for_stmt>k,v kwargs.items()<block_start><for_stmt>deps self._dependencies.values()# try to set args
<block_start><for_stmt>i,arg enumerate(deps[0])<block_start><if_stmt>arg._name_no_id()<eq>k<block_start><return>arg<block_end><block_end># try to set kwargs
<for_stmt>key,kwarg deps[1].items()<block_start><if_stmt>kwarg._name_no_id()<eq>k<block_start><return>kwarg<block_end><block_end><block_end><block_end><block_end><def_stmt>set self **kwargs<block_start>"""this method sets upstream dependencys' values to those given"""<for_stmt>k,v kwargs.items()<block_start>_set=<false><for_stmt>deps self._dependencies.values()# try to set args
<block_start><for_stmt>i,arg enumerate(deps[0])<block_start><if_stmt>arg._name_no_id()<eq>k<block_start><if_stmt>isinstance(v Node)# overwrite node
<block_start>deps[0][i]=v<block_end><else_stmt><block_start>arg._dirty=arg.value()<ne>v<line_sep>arg.setValue(v)<line_sep>_set=<true><line_sep><break><block_end><block_end><block_end><if_stmt>_set<block_start><continue><block_end># try to set kwargs
<for_stmt>key,kwarg deps[1].items()<block_start><if_stmt>kwarg._name_no_id()<eq>k<block_start><if_stmt>isinstance(v Node)# overwrite node
<block_start>deps[1][key]=v<block_end><else_stmt><block_start>kwarg._dirty=kwarg.value()<ne>v<line_sep>kwarg._setValue(v)<line_sep># _set = True
<break><block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>getValue self<block_start><return>self.value()<block_end><def_stmt>value self# if tweaking, return my tweaked value
<block_start><if_stmt>self._tweaks<and>self<in>self._tweaks<block_start><return>self._tweaks[self]<block_end># otherwise return my latest value
<return>self._values[-1]<if>self._values<else><none><block_end><def_stmt>__call__ self node_tweaks=<none> *positional_tweaks **keyword_tweaks<block_start>"""Lazily re-evaluate the node
Args:
node_tweaks (dict): A dict mapping node to tweaked value
positional_tweaks (VAR_POSITIONAL): A tuple of positional tweaks to apply
keyword_tweaks (VAR_KEYWORD): A dict of keyword tweaks to apply
How it works: The "original caller" is the node being evaluted w/ tweaks.
It will consume the positional_tweaks` and `keyword_tweaks`, which look like:
(1, 2,) , {"a": 5, "b": 10}
and join them with `node_tweaks` in a dict mapping node->tweaked value, e.g.
{Node1: 1, Node2: 2, NodeA: 5, NodeB: 10}
and pass this dict up the call tree in `node_tweaks`.
This dict is carried through all node operations through the entire call tree.
If a node is being evaluated and is in `node_tweaks`, it ignores recalculation
and returns the tweaked value.
Returns:
Any: the value, either via re-evaluation (if self or upstream dirty),
or the previously computed value
"""<line_sep>node_tweaks=node_tweaks<or>{}<if_stmt><not>isinstance(node_tweaks dict)# treat node_tweak argument as positional tweak
<block_start>positional_tweaks=list(positional_tweaks)+[node_tweaks]<line_sep>node_tweaks={}<block_end># instantiate tweaks
tweaks={}<line_sep># update with provided
tweaks.update(node_tweaks)<for_stmt>i,positional_tweak enumerate(positional_tweaks)<block_start>tweaks[self._get_arg(i)]=positional_tweak<block_end><for_stmt>k,keyword_tweak keyword_tweaks.items()<block_start>tweaks[self._get_kwarg(k)]=keyword_tweak<block_end># tweak self
<if_stmt>tweaks<block_start>self._tweak(tweaks)<block_end># calculate new value
computed=self._recompute(tweaks)<if_stmt>tweaks# untweak self
<block_start>self._untweak()<line_sep># return the calculation result, not my current value
<return>computed<block_end># otherwise return my permanent value, should equal computed
# assert self.value() == computed
<return>self.value()<block_end><def_stmt>evaluate self node_tweaks=<none> *positional_tweaks **keyword_tweaks<block_start><return>self(node_tweaks *positional_tweaks **keyword_tweaks)<block_end><def_stmt>eval self node_tweaks=<none> *positional_tweaks **keyword_tweaks<block_start><return>self(node_tweaks *positional_tweaks **keyword_tweaks)<block_end><def_stmt>__repr__ self<block_start><return>self._name<block_end><block_end>@_either_type<def_stmt>node meth dynamic=<true> **default_attrs<block_start>"""Convert a method into a lazy node
Since `self` is not defined at the point of method creation, you can pass in
extra kwargs which represent attributes of the future `self`. These will be
converted to node args during instantiation
The format is:
@node(my_existing_attr_as_an_arg="_attribute_name"):
def my_method(self):
pass
this will be converted into a graph of the form:
self._attribute_name -> my_method
e.g. as if self._attribute_name was passed as an argument to my_method, and converted to a node in the usual manner
"""<line_sep>signature=inspect.signature(meth)<line_sep>parameters=[p<for>p signature.parameters.values()<if>p.kind<not><in>(inspect._ParameterKind.VAR_POSITIONAL inspect._ParameterKind.VAR_KEYWORD )]<line_sep># don't handle varargs yet
<if_stmt>len(parameters)<ne>len(signature.parameters)<block_start><raise>TributaryException("varargs not supported yet!")<block_end><if_stmt>inspect.isgeneratorfunction(meth)<and>default_attrs<block_start><raise>TributaryException("Not a supported pattern yet!")<block_end>node_args=[]<line_sep>node_kwargs={}<line_sep>is_method=_ismethod(meth)<line_sep># iterate through method's args and convert them to nodes
<for_stmt>i,arg enumerate(parameters)<block_start><if_stmt>arg.name<eq>"self"<block_start><continue><block_end>node_kwargs[arg.name]=Node(name=arg.name derived=<true> value=arg.default)<block_end># add all attribute args to the argspec
# see the docstring for more details
# argspec.args.extend(list(default_attrs.keys()))
node_kwargs.update(default_attrs)<if_stmt>(len(parameters)-1<if>is_method<else>len(parameters))<ne>(len(node_args)+len(node_kwargs))<block_start><raise>TributaryException("Missing args (call or preprocessing error has occurred)")<block_end><def_stmt>meth_wrapper self *args **kwargs<block_start><if_stmt>is_method# val = meth(self, *(arg.value() if isinstance(arg, Node) else getattr(self, arg).value() for arg in args if arg not in default_attrs), **
# {k: v.value() if isinstance(v, Node) else getattr(self, v).value() for k, v in kwargs.items() if k not in default_attrs})
<block_start>val=meth(self *(arg.value()<if>isinstance(arg Node)<else>getattr(self arg).value()<for>arg args) **{k:v.value()<if>isinstance(v Node)<else>getattr(self v).value()<for>k,v kwargs.items()} )<block_end><else_stmt><block_start>val=meth(*(arg.value()<if>isinstance(arg Node)<else>getattr(self arg).value()<for>arg args) **{k:v.value()<if>isinstance(v Node)<else>getattr(self v).value()<for>k,v kwargs.items()} )<block_end><return>val<block_end>new_node=Node(name=meth.__name__ derived=<true> callable=meth_wrapper callable_args=node_args callable_kwargs=node_kwargs dynamic=dynamic )<if_stmt>is_method<block_start>ret=<lambda>self *args **kwargs:new_node._bind(# noqa: E731
self *args **kwargs)<block_end><else_stmt><block_start>ret=<lambda>*args **kwargs:new_node._bind(# noqa: E731
<none> *args **kwargs)<block_end>ret._node_wrapper=new_node<line_sep># ret = wraps(meth)(ret)
<return>ret<block_end>
|
#
# BSD 3-Clause License
#
# This file is part of the RootBA project.
# https://github.com/NikolausDemmel/rootba
#
# Copyright (c) 2021, <NAME>.
# All rights reserved.
#
<import_stmt>os<import_stmt>re<import_from_stmt>collections Mapping<import_from_stmt>.log load_ba_log<import_from_stmt>.util load_toml_if_exists<import_from_stmt>.util load_text_if_exists<class_stmt>Run<block_start>"""Loads files from a single run of an experiment from a folder (config, status, output, log, ...)
A single run is one invocation of a solver with a specific config on a specific problem.
This is meant to be used on directories created with the 'generate-batch-configs' and 'run-all-in' scripts.
It's best-effort, loading as many of the files as are present.
"""<def_stmt>__init__ self dirpath seq_name_mapping<block_start>self.dirpath=dirpath<line_sep>self.config=load_toml_if_exists(os.path.join(dirpath 'rootba_config.toml'))<line_sep>self.status=load_text_if_exists(os.path.join(dirpath 'status.log'))<line_sep>self.output=load_text_if_exists(os.path.join(dirpath 'slurm-output.log'))<line_sep># if we have slurm output, it already contains the program output, so loading it would be redundant
<if_stmt>self.output<is><none><block_start>self.output=load_text_if_exists(os.path.join(dirpath 'output.log'))<block_end># backwards compatibility to older runs that had rootba-output.log instead of output.log
<if_stmt>self.output<is><none><block_start>self.output=load_text_if_exists(os.path.join(dirpath 'rootba-output.log'))<block_end>self.log=load_ba_log(dirpath)<line_sep>self.seq_name=self._infer_sequence_name(self.log dirpath seq_name_mapping)<line_sep>print("loaded {} from '{}'".format(self.seq_name dirpath))<block_end><def_stmt>is_ceres self<block_start><return>self.log.is_ceres()<block_end><def_stmt>is_failed self<block_start><if_stmt>self.log<is><none><block_start><return><true><block_end><if_stmt>"Completed"<not><in>self.status<block_start><return><true><block_end><return><false><block_end><def_stmt>failure_str self<block_start><if_stmt><not>self.is_failed()<block_start><return>""<block_end><if_stmt>self.output<block_start><if_stmt>"Some of your processes may have been killed by the cgroup out-of-memory handler"<in>self.output<block_start><return>"OOM"<block_end><if_stmt>"DUE TO TIME LIMIT"<in>self.output<block_start><return>"OOT"<block_end><block_end><return>"x"<block_end>@staticmethod<def_stmt>_infer_sequence_name log dirpath name_mapping<block_start>"""Tries to infer the problem name from the log, or falls back to the parent folder name"""<line_sep>seq_name=""<try_stmt><block_start>path=log._static.problem_info.input_path<line_sep>m=re.match(r".*/bal/([^/]+)/problem-([0-9]+)-[^/]+.txt" path)<if_stmt>m<block_start>seq_name="{}{}".format(m.group(1) m.group(2))<block_end><block_end><except_stmt><block_start><pass><block_end># Fallback to detecting the sequence name base on the last component of the parent folder. This is intended
# to work for run folders created with the 'generate-batch-configs' script, assuming the sequence is the
# last component in '_batch.combinations'.
<if_stmt>seq_name<eq>""<block_start>seq_name=os.path.basename(dirpath).split("_")[-1]<block_end># optionally remap the sequence name to something else as defined in the experiments config
<if_stmt>isinstance(name_mapping Mapping)<and>seq_name<in>name_mapping<block_start>seq_name=name_mapping[seq_name]<block_end><return>seq_name<block_end>@staticmethod<def_stmt>is_run_dir dirpath<block_start>"""Returns True if the folder may be a run directory, based on the present files
This is intended to be used for auto-detecting run directories in a file tree.
"""<line_sep>files=['status.log' 'slurm-output.log' 'output.log' 'ba_log.ubjson' 'ba_log.json']<for_stmt>f files<block_start><if_stmt>os.path.isfile(os.path.join(dirpath f))<block_start><return><true><block_end><block_end><return><false><block_end><block_end>
|
<import_from_stmt>pymoo.util.misc to_numpy<import_from_stmt>pymoo.util.termination.sliding_window_termination SlidingWindowTermination<class_stmt>SingleObjectiveSpaceToleranceTermination(SlidingWindowTermination)<block_start><def_stmt>__init__ self tol=1e-6 n_last=20 nth_gen=1 n_max_gen=<none> n_max_evals=<none> **kwargs<arrow><none><block_start>super().__init__(metric_window_size=n_last data_window_size=2 min_data_for_metric=2 nth_gen=nth_gen n_max_gen=n_max_gen n_max_evals=n_max_evals **kwargs)<line_sep>self.tol=tol<block_end><def_stmt>_store self algorithm<block_start><return>algorithm.opt.get("F").min()<block_end><def_stmt>_metric self data<block_start>last,current=data[-2] data[-1]<line_sep><return>last-current<block_end><def_stmt>_decide self metrics<block_start>delta_f=to_numpy(metrics)<line_sep><return>delta_f.max()<g>self.tol<block_end><block_end>
|
<import_stmt>os<import_stmt>numpy<as>np<import_stmt>librosa<import_from_stmt>audiomate tracks<import_from_stmt>audiomate.utils audio<def_stmt>test_read_blocks tmpdir<block_start>wav_path=os.path.join(tmpdir.strpath 'file.wav')<line_sep>wav_content=np.random.random(10000)<line_sep>librosa.output.write_wav(wav_path wav_content 16000)<line_sep>blocks=list(audio.read_blocks(wav_path buffer_size=1000))<assert_stmt>np.allclose(np.concatenate(blocks) wav_content atol=0.0001)<assert_stmt>np.concatenate(blocks).dtype<eq>np.float32<block_end><def_stmt>test_read_blocks_with_start_end tmpdir<block_start>wav_path=os.path.join(tmpdir.strpath 'file.wav')<line_sep>wav_content=np.random.random(10000)<line_sep>librosa.output.write_wav(wav_path wav_content 16000)<line_sep>blocks=list(audio.read_blocks(wav_path start=0.1 end=0.3 buffer_size=1000))<assert_stmt>np.concatenate(blocks).dtype<eq>np.float32<assert_stmt>np.allclose(np.concatenate(blocks) wav_content[1600:4800] atol=0.0001)<block_end><def_stmt>test_read_frames tmpdir<block_start>wav_path=os.path.join(tmpdir.strpath 'file.wav')<line_sep>wav_content=np.random.random(10044)<line_sep>librosa.output.write_wav(wav_path wav_content 16000)<line_sep>data=list(audio.read_frames(wav_path frame_size=400 hop_size=160))<line_sep>frames=np.array([x[0]<for>x data])<line_sep>last=[x[1]<for>x data]<assert_stmt>frames.shape<eq>(62 400)<assert_stmt>frames.dtype<eq>np.float32<assert_stmt>np.allclose(frames[0] wav_content[:400] atol=0.0001)<assert_stmt>np.allclose(frames[61] np.pad(wav_content[9760:] (0 116) mode='constant') atol=0.0001)<assert_stmt>last[:-1]<eq>[<false>]<times>(len(data)-1)<assert_stmt>last[-1]<block_end><def_stmt>test_read_frames_matches_length tmpdir<block_start>wav_path=os.path.join(tmpdir.strpath 'file.wav')<line_sep>wav_content=np.random.random(10000)<line_sep>librosa.output.write_wav(wav_path wav_content 16000)<line_sep>data=list(audio.read_frames(wav_path frame_size=400 hop_size=160))<line_sep>frames=np.array([x[0]<for>x data])<line_sep>last=[x[1]<for>x data]<assert_stmt>frames.shape<eq>(61 400)<assert_stmt>frames.dtype<eq>np.float32<assert_stmt>np.allclose(frames[0] wav_content[:400] atol=0.0001)<assert_stmt>np.allclose(frames[60] wav_content[9600:] atol=0.0001)<assert_stmt>last[:-1]<eq>[<false>]<times>(len(data)-1)<assert_stmt>last[-1]<block_end><def_stmt>test_write_wav tmpdir<block_start>samples=np.random.random(50000)<line_sep>sr=16000<line_sep>path=os.path.join(tmpdir.strpath 'audio.wav')<line_sep>audio.write_wav(path samples sr=sr)<assert_stmt>os.path.isfile(path)<line_sep>track=tracks.FileTrack('idx' path)<assert_stmt>np.allclose(samples track.read_samples() atol=1.e-04)<block_end>
|
<import_stmt>os<import_from_stmt>pathlib Path<import_stmt>tempfile<import_from_stmt>unittest TestCase<import_from_stmt>parameterized parameterized<import_from_stmt>aws_lambda_builders.actions CopyDependenciesAction MoveDependenciesAction<import_from_stmt>aws_lambda_builders.utils copytree<class_stmt>TestCopyDependenciesAction(TestCase)<block_start>@parameterized.expand([("single_file" ) ("multiple_files" ) ("empty_subfolders" ) ])<def_stmt>test_copy_dependencies_action self source_folder<block_start>curr_dir=Path(__file__).resolve().parent<line_sep>test_folder=os.path.join(curr_dir "testdata" source_folder)<with_stmt>tempfile.TemporaryDirectory()<as>tmpdir<block_start>empty_source=os.path.join(tmpdir "empty_source")<line_sep>target=os.path.join(tmpdir "target")<line_sep>os.mkdir(empty_source)<line_sep>copy_dependencies_action=CopyDependenciesAction(empty_source test_folder target)<line_sep>copy_dependencies_action.execute()<line_sep>self.assertEqual(os.listdir(test_folder) os.listdir(target))<block_end><block_end><block_end><class_stmt>TestMoveDependenciesAction(TestCase)<block_start>@parameterized.expand([("single_file" ) ("multiple_files" ) ("empty_subfolders" ) ])<def_stmt>test_move_dependencies_action self source_folder<block_start>curr_dir=Path(__file__).resolve().parent<line_sep>test_folder=os.path.join(curr_dir "testdata" source_folder)<with_stmt>tempfile.TemporaryDirectory()<as>tmpdir<block_start>test_source=os.path.join(tmpdir "test_source")<line_sep>empty_source=os.path.join(tmpdir "empty_source")<line_sep>target=os.path.join(tmpdir "target")<line_sep>os.mkdir(test_source)<line_sep>os.mkdir(empty_source)<line_sep>copytree(test_folder test_source)<line_sep>move_dependencies_action=MoveDependenciesAction(empty_source test_source target)<line_sep>move_dependencies_action.execute()<line_sep>self.assertEqual(os.listdir(test_folder) os.listdir(target))<block_end><block_end><block_end>
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/funnel_pymc3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="iPLM5TwcMcTe"
# In this notebook, we explore the "funnel of hell". This refers to a posterior in which
# the mean and variance of a variable are highly correlated, and have a funnel
# shape. (The term "funnel of hell" is from [this blog post](https://twiecki.io/blog/2014/03/17/bayesian-glms-3/) by <NAME>.)
#
# We illustrate this using a hierarchical Bayesian model for inferring Gaussian means, fit to synthetic data, similar to 8 schools (except we vary the same size and fix the variance). This code is based on [this notebook](http://bebi103.caltech.edu.s3-website-us-east-1.amazonaws.com/2017/tutorials/aux8_mcmc_tips.html) from <NAME>.
# + id="-sWa3BStE4ov"
# %matplotlib inline
<import_stmt>sklearn<import_stmt>scipy.stats<as>stats<import_stmt>scipy.optimize<import_stmt>matplotlib.pyplot<as>plt<import_stmt>seaborn<as>sns<import_stmt>time<import_stmt>numpy<as>np<import_stmt>os<import_stmt>pandas<as>pd<line_sep># + id="1UEFiUi-qZA1" colab={"base_uri": "https://localhost:8080/"} outputId="1a20ff5d-68e6-4f60-81e0-1456bfa83b5f"
# !pip install -U pymc3>=3.8
<import_stmt>pymc3<as>pm<line_sep>print(pm.__version__)<import_stmt>arviz<as>az<line_sep>print(az.__version__)<line_sep># + id="SS-lUcY9ovUd"
<import_stmt>math<import_stmt>pickle<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>scipy.stats<as>st<import_stmt>theano.tensor<as>tt<import_stmt>theano<line_sep># + id="H4iJ8eTAr3yF" colab={"base_uri": "https://localhost:8080/"} outputId="23291ee5-7822-41fb-d3ca-c829cd0891f5"
np.random.seed(0)<line_sep># Specify parameters for random data
mu_val=8<line_sep>tau_val=3<line_sep>sigma_val=10<line_sep>n_groups=10<line_sep># Generate number of replicates for each repeat
n=np.random.randint(low=3 high=10 size=n_groups dtype=int)<line_sep>print(n)<line_sep>print(sum(n))<line_sep># + id="oyyDYNGfsmUa" colab={"base_uri": "https://localhost:8080/"} outputId="f8d2cf60-fbbd-4a29-fcd6-747cd2e18870"
# Generate data set
mus=np.zeros(n_groups)<line_sep>x=np.array([])<for_stmt>i range(n_groups)<block_start>mus[i]=np.random.normal(mu_val tau_val)<line_sep>samples=np.random.normal(mus[i] sigma_val size=n[i])<line_sep>x=np.append(x samples)<block_end>print(x.shape)<line_sep>group_ind=np.concatenate([[i]<times>n_val<for>i,n_val enumerate(n)])<line_sep># + id="Vz-gdn-zuCcx" colab={"base_uri": "https://localhost:8080/", "height": 692} outputId="19b32b08-cffc-4800-9667-5ff22df6f387"
<with_stmt>pm.Model()<as>centered_model# Hyperpriors
<block_start>mu=pm.Normal('mu' mu=0 sd=5)<line_sep>tau=pm.HalfCauchy('tau' beta=2.5)<line_sep>log_tau=pm.Deterministic('log_tau' tt.log(tau))<line_sep># Prior on theta
theta=pm.Normal('theta' mu=mu sd=tau shape=n_groups)<line_sep># Likelihood
x_obs=pm.Normal('x_obs' mu=theta[group_ind] sd=sigma_val observed=x)<block_end>np.random.seed(0)<with_stmt>centered_model<block_start>centered_trace=pm.sample(10000 chains=2)<block_end>pm.summary(centered_trace).round(2)<line_sep># + id="UMLPIRMPsgej" colab={"base_uri": "https://localhost:8080/", "height": 963} outputId="3227aaef-1030-490f-8605-5744d27f269c"
<with_stmt>pm.Model()<as>noncentered_model# Hyperpriors
<block_start>mu=pm.Normal('mu' mu=0 sd=5)<line_sep>tau=pm.HalfCauchy('tau' beta=2.5)<line_sep>log_tau=pm.Deterministic('log_tau' tt.log(tau))<line_sep># Prior on theta
#theta = pm.Normal('theta', mu=mu, sd=tau, shape=n_trials)
var_theta=pm.Normal('var_theta' mu=0 sd=1 shape=n_groups)<line_sep>theta=pm.Deterministic('theta' mu+var_theta<times>tau)<line_sep># Likelihood
x_obs=pm.Normal('x_obs' mu=theta[group_ind] sd=sigma_val observed=x)<block_end>np.random.seed(0)<with_stmt>noncentered_model<block_start>noncentered_trace=pm.sample(1000 chains=2)<block_end>pm.summary(noncentered_trace).round(2)<line_sep># + id="XqQQUavXvFWT" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="88b33782-8b68-4057-e1c9-b582e6db8cc1"
fig,axs=plt.subplots(ncols=2 sharex=<true> sharey=<true>)<line_sep>x=pd.Series(centered_trace['mu'] name='mu')<line_sep>y=pd.Series(centered_trace['tau'] name='tau')<line_sep>axs[0].plot(x y '.')<line_sep>axs[0].set(title='Centered' xlabel='µ' ylabel='τ')<line_sep>axs[0].axhline(0.01)<line_sep>x=pd.Series(noncentered_trace['mu'] name='mu')<line_sep>y=pd.Series(noncentered_trace['tau'] name='tau')<line_sep>axs[1].plot(x y '.')<line_sep>axs[1].set(title='NonCentered' xlabel='µ' ylabel='τ')<line_sep>axs[1].axhline(0.01)<line_sep>xlim=axs[0].get_xlim()<line_sep>ylim=axs[0].get_ylim()<line_sep># + id="--jgSNVBLadC" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="6cf32ae5-ee7b-4abe-bf8f-b51450bb02d1"
x=pd.Series(centered_trace['mu'] name='mu')<line_sep>y=pd.Series(centered_trace['tau'] name='tau')<line_sep>g=sns.jointplot(x y xlim=xlim ylim=ylim)<line_sep>plt.suptitle('centered')<line_sep>plt.show()<line_sep># + id="tEfEJ8JuLX43" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="4869fb30-3d07-4e0c-a6da-03c1014923b3"
x=pd.Series(noncentered_trace['mu'] name='mu')<line_sep>y=pd.Series(noncentered_trace['tau'] name='tau')<line_sep>g=sns.jointplot(x y xlim=xlim ylim=ylim)<line_sep>plt.suptitle('noncentered')<line_sep>plt.show()<line_sep># + id="1-FQqDkTFEqy" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="b9804230-dc6c-4586-9a5a-1ad38a9cab82"
fig,axs=plt.subplots(ncols=2 sharex=<true> sharey=<true>)<line_sep>x=pd.Series(centered_trace['mu'] name='mu')<line_sep>y=pd.Series(centered_trace['log_tau'] name='log_tau')<line_sep>axs[0].plot(x y '.')<line_sep>axs[0].set(title='Centered' xlabel='µ' ylabel='log(τ)')<line_sep>x=pd.Series(noncentered_trace['mu'] name='mu')<line_sep>y=pd.Series(noncentered_trace['log_tau'] name='log_tau')<line_sep>axs[1].plot(x y '.')<line_sep>axs[1].set(title='NonCentered' xlabel='µ' ylabel='log(τ)')<line_sep>xlim=axs[0].get_xlim()<line_sep>ylim=axs[0].get_ylim()<line_sep># + id="5QqP9pOLHJR5" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="34dfd8db-fc63-44bb-c203-5b2c64cf9d3c"
#https://seaborn.pydata.org/generated/seaborn.jointplot.html
x=pd.Series(centered_trace['mu'] name='mu')<line_sep>y=pd.Series(centered_trace['log_tau'] name='log_tau')<line_sep>g=sns.jointplot(x y xlim=xlim ylim=ylim)<line_sep>plt.suptitle('centered')<line_sep>plt.show()<line_sep># + id="7jK4o4idIw_u" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="784cde75-c370-457f-e4df-5bb51595246a"
x=pd.Series(noncentered_trace['mu'] name='mu')<line_sep>y=pd.Series(noncentered_trace['log_tau'] name='log_tau')<line_sep>g=sns.jointplot(x y xlim=xlim ylim=ylim)<line_sep>plt.suptitle('noncentered')<line_sep>plt.show()<line_sep># + id="KNam0ZuYYhxw" colab={"base_uri": "https://localhost:8080/", "height": 581} outputId="6a73f609-35a5-433f-bb22-09509881998e"
az.plot_forest([centered_trace noncentered_trace] model_names=['centered' 'noncentered'] var_names="theta" combined=<true> hdi_prob=0.95)<line_sep># + id="sizu9bNdT4K0"
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Grab n Go environment specific constants.
Each one of the constants defined below is an environment specific constant.
A unique value will be required for every unique Google Cloud Project. These
will be stored in Google Cloud Storage in the bucket configured in the
loaner/deployments/config.yaml file for this project.
When adding a configurable project level constant the following procedure must
be used:
1. Add the name of the constant below, the value must be the name that is used
for the flag.
2. Create the flag with a default, no flag should be marked as required using
the `flags` package.
3. Add the name of the constant to the loaner/web_app/constants.py file.
4. (Optional) add a `Parser` object for the name in the `_PARSERS` dictionary.
The `parse` method on the `Parser` object will be used to validate the
current value of the constant, whether the default or a user provided
value. If the value is invalid, a ValueError is raised and the flag message
is used to prompt the user, only ever accepting a value that passes through
the `parse` method. If the manager is run in scripted mode an invalid value
for any constant defined below will cause an error and the script will
exit.
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>sys<import_from_stmt>absl flags<import_from_stmt>loaner.deployments.lib utils<line_sep>FLAGS=flags.FLAGS<line_sep># Constant Names #
# These are the configurable constants, the name of the constant matches the
# name in the constants.py file and the value is used as the name of the flag
# and used as the key for getting the respective `utils.Parser` object in the
# `_PARSERS` dictionary below.
APP_DOMAINS='app_domains'<line_sep>CHROME_CLIENT_ID='chrome_client_id'<line_sep>WEB_CLIENT_ID='web_client_id'<line_sep>ADMIN_EMAIL='admin_email'<line_sep>SEND_EMAIL_AS='send_email_as'<line_sep>SUPERADMINS_GROUP='superadmins_group'<line_sep>CUSTOMER_ID='customer_id'<line_sep># Required to be provided either by flag or by prompt.
flags.DEFINE_list(APP_DOMAINS [] 'A comma separated list of second-level domains that will be authorized to '<concat>'access the application. Only add domain names that you want to have access'<concat>' to the web application. Domains should be in the following format: '<concat>"'example.com'")<line_sep>flags.DEFINE_string(CHROME_CLIENT_ID '' 'The Chrome App OAuth2 Client ID.\n'<concat>'In order for the Chrome companion application to be able to make API calls'<concat>' to the management backend an OAuth2 Client ID must be provided. This can '<concat>'be created in the Google Cloud Console at: '<concat>"https://console.cloud.google.com/apis/credentials. The 'Application Type' "<concat>"for this credential is 'Chrome App'.\n"<concat>'Further instructions can be found here: https://support.google.com'<concat>'/cloud/answer/6158849?hl=en#installedapplications&chrome')<line_sep>flags.DEFINE_string(WEB_CLIENT_ID '' 'The Web App OAuth2 Client ID.\n'<concat>'In order for the Web application to be able to make API calls to the '<concat>'management backend an OAuth2 Client ID must be provided. This can '<concat>'be created in the Google Cloud Console at: '<concat>"https://console.cloud.google.com/apis/credentials. The 'Application Type' "<concat>"for this credential is 'Web Application'.\n"<concat>'Further instructions can be found here: https://support.google.com'<concat>'/cloud/answer/6158849?hl=en')<line_sep>flags.DEFINE_string(ADMIN_EMAIL '' 'The email address to use to access the Google Admin SDK Directory API.\n'<concat>'If this address does not exist we will attempt to create it with a strong '<concat>'password, which we will provide you. In order to create this account '<concat>'programmatically you will need to be a Super Admin in the G Suite domain '<concat>'this account is being created in.\nTo create this manually see the '<concat>'setup_guide in the Grab n Go documentation: '<concat>'https://github.com/google/loaner/blob/master/docs/setup_guide.md')<line_sep>flags.DEFINE_string(SEND_EMAIL_AS '' 'The email address from which application related emails will come from. '<concat>'Often a noreply address is used, e.g. <EMAIL>')<line_sep>flags.DEFINE_string(SUPERADMINS_GROUP '' 'The name of the group for whom to grant super admin privileges to. '<concat>'This should include anyone you want to be able to administer Grab n Go '<concat>'from the web application. This gives access to all in app data.')<line_sep># Not required to be provided either by flag or by prompt.
flags.DEFINE_string(CUSTOMER_ID 'my_customer' 'The G Suite customer ID.\nIf you are an administrator of the organization '<concat>'this application is running in leave the default. If you are a reseller '<concat>'you can get the customer ID by making a get user request: '<concat>'https://developers.google.com/admin-sdk/directory/v1/guides/manage-users'<concat>'.html#get_user')<line_sep># Dictionary where the flag name is the key and the value is a parser, an object
# that has `parse` as a public instance method. A parser is not required,
# without one any value will be accepted.
_PARSERS={APP_DOMAINS:utils.ListParser(allow_empty_list=<false>) CHROME_CLIENT_ID:utils.ClientIDParser() WEB_CLIENT_ID:utils.ClientIDParser() ADMIN_EMAIL:utils.EmailParser() SEND_EMAIL_AS:utils.EmailParser() SUPERADMINS_GROUP:utils.StringParser(allow_empty_string=<false>) CUSTOMER_ID:utils.StringParser(allow_empty_string=<false>) }<def_stmt>get_constants_from_flags module=__name__<block_start>"""Returns a dictionary of all constants from flags.
This should only be used when skipping user validation (e.g. scripting) since
it does not validate the provided values with the custom parsers until the
value is requested. If the flag provided does not meet the `Parser`
requirements an error will be raised when attempting to retrieve the value.
Args:
module: str, the name of the module to get the constants from.
Returns:
A dictionary of all constants with the flag value as the constant value.
The key for each constant is the name of the constant.
Raises:
ValueError: when any of the flag values does not meet the parsing
requirements.
"""<def_stmt>_from_flag name<block_start>"""Gets the value of a flag given the name.
If flags have not been parsed, the default value will be used.
Args:
name: str, the name of the flag.
Returns:
The value of the flag.
"""<if_stmt>FLAGS.is_parsed()<block_start><return>getattr(FLAGS name)<block_end><return>FLAGS[name].default<block_end><return>_get_all_constants(module=module func=_from_flag)<block_end><def_stmt>get_default_constants module=__name__<block_start>"""Returns a dictionary of all constants with the default flag value.
This is used to initialize project level constants for a new project from
user prompts.
Args:
module: str, the name of the module to get the constants from.
Returns:
A dictionary of all constants with the default flag value as the constant
value. The key for each constant is the name of the constant.
"""<line_sep><return>_get_all_constants(module=module func=<none>)<block_end><def_stmt>_get_all_constants module=__name__ func=<none><block_start>"""Returns a dictionary of all constants.
This function will return all of the flags configured above as `Constant`
objects. By default, the default value of the flag will be used.
Args:
module: str, the name of the module to get the constants from.
func: Callable, a function that returns the value of each constant given the
name of the flag.
Returns:
A dictionary of all key flags in this module represented as Constants,
keyed by the name of the constant.
"""<line_sep>constants={}<for_stmt>flag FLAGS.get_key_flags_for_module(sys.modules[module])<block_start>value=FLAGS[flag.name].default<if_stmt>func<block_start>value=func(flag.name)<block_end>constants[flag.name]=Constant(flag.name flag.help value _PARSERS.get(flag.name))<block_end><return>constants<block_end><class_stmt>Constant(object)<block_start>"""Grab n Go project level constant.
Attributes:
name: str, the unique key to reference this constant by (this is identical
to the name of the flag above).
message: str, the message shown to the user when they are being prompted
to provide the value of this constant (this is identical to the help
message for the flag).
valid: bool, whether or not the current value is valid.
value: Any, the value of this constant.
"""<def_stmt>__init__ self name message default parser=<none><block_start>"""Initializes the constant.
Args:
name: str, the unique key to reference this constant by (this should be
identical to the name of the flag above).
message: str, the message shown to the user when they are being prompted
to provide the value of this constant (this is identical to the help
message for the flag).
default: Any, the default value of this constant.
parser: Callable, an object to validate and parse the provided input.
A parser must meet the following requirements:
1) The object must have a parse() method that accepts a single
string as input and returns the parsed output.
2) Any error that occurs during parse() should raise a ValueError to
indicate bad user input with a helpful error message.
An example can be found in the utils module in this package.
"""<line_sep>self._name=name<line_sep>self._message=message<line_sep>self._value=default<line_sep>self._parser=parser<block_end><def_stmt>__str__ self<block_start><return>'{}: {}'.format(self.name self._value)<block_end><def_stmt>__repr__ self<block_start><return>'<{0}({1!r}, {2!r}, {3!r}, {4!r})>'.format(self.__class__.__name__ self.name self.message self._value self._parser)<block_end><def_stmt>__eq__ self other<block_start><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self.__eq__(other)<block_end>@property<def_stmt>name self<block_start>"""Getter for the name."""<line_sep><return>self._name<block_end>@property<def_stmt>message self<block_start>"""Getter for the user message."""<line_sep><return>self._message<block_end>@property<def_stmt>value self<block_start>"""Getter for the current value."""<line_sep><return>self._value<block_end>@value.setter<def_stmt>value self value<block_start>"""Setter for the current value."""<line_sep>self._value=value<if>self._parser<is><none><else>self._parser.parse(value)<block_end>@property<def_stmt>valid self<block_start>"""Getter for whether or not the current value is valid."""<if_stmt>self._parser<is><none><block_start><return><true><block_end><try_stmt><block_start>self._parser.parse(self.value)<block_end><except_stmt>ValueError<block_start><return><false><block_end><return><true><block_end><def_stmt>prompt self<block_start>"""Prompts the user for a new value."""<line_sep>self.value=utils.prompt(self.message default=self.value parser=self._parser)<block_end><block_end>
|
"""!
@brief Unit-tests for Oscillatory Neural Network based on Kuramoto model and Landau-Stuart.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""<import_stmt>unittest<line_sep># Generate images without having a window appear.
<import_stmt>matplotlib<line_sep>matplotlib.use('Agg')<import_from_stmt>pyclustering.nnet conn_type conn_represent<import_from_stmt>pyclustering.nnet.fsync fsync_network fsync_visualizer<class_stmt>FsyncUnitTest(unittest.TestCase)<block_start><def_stmt>templateCreateNetwork self network_size<block_start>oscillatory_network=fsync_network(network_size)<assert_stmt>len(oscillatory_network)<eq>network_size<line_sep><block_end><def_stmt>testCreateNetworkSize1 self<block_start>self.templateCreateNetwork(1)<line_sep><block_end><def_stmt>testCreateNetworkSize20 self<block_start>self.templateCreateNetwork(20)<line_sep><block_end><def_stmt>testCreateNetworkSize100 self<block_start>self.templateCreateNetwork(100)<line_sep><block_end><def_stmt>templateSimulateStaticOutputDynamic self num_osc steps time collect_dynamic<block_start>oscillatory_network=fsync_network(num_osc)<line_sep>output_dynamic=oscillatory_network.simulate(steps time collect_dynamic)<if_stmt>(collect_dynamic<is><true>)<block_start><assert_stmt>len(output_dynamic)<eq>steps+1<assert_stmt>output_dynamic.time[0]<eq>0<block_end><else_stmt><block_start><assert_stmt>len(output_dynamic)<eq>1<line_sep><block_end><assert_stmt>output_dynamic.time[len(output_dynamic)-1]<eq>time<line_sep><block_end><def_stmt>testSimulateStatic10StepsTime10 self<block_start>self.templateSimulateStaticOutputDynamic(10 10 10 <true>)<line_sep><block_end><def_stmt>testSimulateStatic100StepsTime10 self<block_start>self.templateSimulateStaticOutputDynamic(3 100 10 <true>)<line_sep><block_end><def_stmt>testSimulateStatic100StepsTime1 self<block_start>self.templateSimulateStaticOutputDynamic(3 100 1 <true>)<line_sep><block_end><def_stmt>testSimulateStatic50StepsTime10WithoutCollecting self<block_start>self.templateSimulateStaticOutputDynamic(3 50 10 <false>)<line_sep><block_end><def_stmt>testSimulateStatic100StepsTime10WithoutCollecting self<block_start>self.templateSimulateStaticOutputDynamic(1 100 10 <false>)<line_sep><block_end><def_stmt>templateGlobalSynchronization self size steps time frequency radius coupling amplitude_threshold connections representation<block_start>oscillatory_network=fsync_network(size frequency radius coupling connections representation)<line_sep>output_dynamic=oscillatory_network.simulate(steps time <true>)<for_stmt>index_oscillator range(len(oscillatory_network))<block_start><assert_stmt>output_dynamic.extract_number_oscillations(index_oscillator amplitude_threshold)<g>0<line_sep><block_end>sync_ensembles=output_dynamic.allocate_sync_ensembles(amplitude_threshold)<assert_stmt>len(sync_ensembles)<eq>1<assert_stmt>len(sync_ensembles[0])<eq>size<line_sep><block_end><def_stmt>testGlobalSyncOneOscillatorAllToAll self<block_start>self.templateGlobalSynchronization(1 50 10 1.0 1.0 1.0 0.8 conn_type.ALL_TO_ALL conn_represent.MATRIX)<line_sep><block_end><def_stmt>testGlobalSyncGroupOscillatorAllToAll self<block_start>self.templateGlobalSynchronization(5 50 10 1.0 1.0 1.0 0.8 conn_type.ALL_TO_ALL conn_represent.MATRIX)<line_sep><block_end><def_stmt>testGlobalSyncOneOscillatorGridFour self<block_start>self.templateGlobalSynchronization(1 50 10 1.0 1.0 1.0 0.8 conn_type.GRID_FOUR conn_represent.MATRIX)<line_sep><block_end><def_stmt>testGlobalSyncGroupOscillatorGridFour self<block_start>self.templateGlobalSynchronization(9 50 10 1.0 1.0 1.0 0.8 conn_type.GRID_FOUR conn_represent.MATRIX)<line_sep><block_end><def_stmt>testGlobalSyncOneOscillatorGridEight self<block_start>self.templateGlobalSynchronization(1 50 10 1.0 1.0 1.0 0.8 conn_type.GRID_EIGHT conn_represent.MATRIX)<line_sep><block_end><def_stmt>testGlobalSyncGroupOscillatorGridEight self<block_start>self.templateGlobalSynchronization(9 50 10 1.0 1.0 1.0 0.8 conn_type.GRID_EIGHT conn_represent.MATRIX)<line_sep><block_end><def_stmt>testGlobalSyncOneOscillatorBidir self<block_start>self.templateGlobalSynchronization(1 50 10 1.0 1.0 1.0 0.8 conn_type.LIST_BIDIR conn_represent.MATRIX)<line_sep><block_end><def_stmt>testGlobalSyncGroupOscillatorBidir self<block_start>self.templateGlobalSynchronization(5 50 10 1.0 1.0 1.0 0.8 conn_type.LIST_BIDIR conn_represent.MATRIX)<line_sep><block_end><def_stmt>testGlobalSyncOneOscillatorDifferentFrequency self<block_start>self.templateGlobalSynchronization(1 50 10 [1.0] 1.0 1.0 0.8 conn_type.ALL_TO_ALL conn_represent.MATRIX)<line_sep><block_end><def_stmt>testGlobalSyncGroupOscillatorDifferentFrequency self<block_start>self.templateGlobalSynchronization(5 100 20 [1.0 1.1 1.1 1.2 1.15] 1.0 1.0 0.8 conn_type.ALL_TO_ALL conn_represent.MATRIX)<line_sep><block_end><def_stmt>testGlobalSyncOneOscillatorDifferentRadius self<block_start>self.templateGlobalSynchronization(1 50 10 1.0 [1.0] 1.0 0.8 conn_type.ALL_TO_ALL conn_represent.MATRIX)<line_sep><block_end><def_stmt>testGlobalSyncGroupOscillatorDifferentRadius self<block_start>self.templateGlobalSynchronization(5 50 10 1.0 [1.0 2.0 3.0 4.0 5.0] 1.0 0.8 conn_type.ALL_TO_ALL conn_represent.MATRIX)<line_sep><block_end><def_stmt>testGlobalSyncOneOscillatorDifferentProperty self<block_start>self.templateGlobalSynchronization(1 50 10 [1.0] [1.0] 1.0 0.8 conn_type.ALL_TO_ALL conn_represent.MATRIX)<line_sep><block_end><def_stmt>testGlobalSyncGroupOscillatorDifferentProperty self<block_start>self.templateGlobalSynchronization(5 100 20 [1.0 1.1 1.1 1.2 1.15] [1.0 2.0 3.0 4.0 5.0] 1.0 0.8 conn_type.ALL_TO_ALL conn_represent.MATRIX)<line_sep><block_end><def_stmt>templateNoOscillations self size steps time frequency radius amplitude_threshold<block_start>oscillatory_network=fsync_network(size frequency radius)<line_sep>output_dynamic=oscillatory_network.simulate(steps time <true>)<for_stmt>index_oscillator range(len(oscillatory_network))<block_start><assert_stmt>output_dynamic.extract_number_oscillations(index_oscillator amplitude_threshold)<eq>0<line_sep><block_end><block_end><def_stmt>testNoOscillationsZeroFrequency self<block_start>self.templateNoOscillations(5 50 10 0.0 1.0 0.5)<block_end><def_stmt>testNoOscillationsZeroRadius self<block_start>self.templateNoOscillations(5 50 10 1.0 0.0 0.5)<block_end><def_stmt>testLackCrashGraphics self<block_start>oscillatory_network=fsync_network(5)<line_sep>output_dynamic=oscillatory_network.simulate(50 10 <true>)<line_sep>fsync_visualizer.show_output_dynamic(output_dynamic)<line_sep>fsync_visualizer.show_output_dynamics([output_dynamic])<block_end><def_stmt>testLackCrashGraphicsDynamicSet self<block_start>oscillatory_network_1=fsync_network(2)<line_sep>oscillatory_network_2=fsync_network(3)<line_sep>output_dynamic_1=oscillatory_network_1.simulate(50 10 <true>)<line_sep>output_dynamic_2=oscillatory_network_2.simulate(50 10 <true>)<line_sep>fsync_visualizer.show_output_dynamics([output_dynamic_1 output_dynamic_2])<block_end><block_end>
|
<import_stmt>os numpy<as>np<import_from_stmt>os.path dirname exists join splitext<import_stmt>json scipy<class_stmt>Dataset(object)<block_start><def_stmt>__init__ self dataset_name<block_start>self.work_dir=dirname(os.path.realpath('__file__'))<line_sep>info_path=join(self.work_dir 'datasets' dataset_name+'.json')<with_stmt>open(info_path 'r')<as>fp<block_start>info=json.load(fp)<block_end>self.palette=np.array(info['palette'] dtype=np.uint8)<block_end><block_end><def_stmt>get_semantic_map path<block_start>dataset=Dataset('cityscapes')<line_sep>semantic=scipy.misc.imread(path)<line_sep>tmp=np.zeros((semantic.shape[0] semantic.shape[1] dataset.palette.shape[0]) dtype=np.float32)<for_stmt>k range(dataset.palette.shape[0])<block_start>tmp[: : k]=np.float32((semantic[: : 0]<eq>dataset.palette[k 0])&(semantic[: : 1]<eq>dataset.palette[k 1])&(semantic[: : 2]<eq>dataset.palette[k 2]))<block_end><return>tmp.reshape((1 )+tmp.shape)<block_end><def_stmt>print_semantic_map semantic path<block_start>dataset=Dataset('cityscapes')<line_sep>semantic=semantic.transpose([1 2 3 0])<line_sep>prediction=np.argmax(semantic axis=2)<line_sep>color_image=dataset.palette[prediction.ravel()].reshape((prediction.shape[0] prediction.shape[1] 3))<line_sep>row,col,dump=np.where(np.sum(semantic axis=2)<eq>0)<line_sep>color_image[row col :]=0<line_sep>scipy.misc.imsave(path color_image)<block_end>
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>mindspore.dataset<as>ds<import_stmt>mindspore.dataset.audio.transforms<as>audio<import_from_stmt>mindspore log<as>logger<def_stmt>count_unequal_element data_expected data_me rtol atol<block_start><assert_stmt>data_expected.shape<eq>data_me.shape<line_sep>total_count=len(data_expected.flatten())<line_sep>error=np.abs(data_expected-data_me)<line_sep>greater=np.greater(error atol+np.abs(data_expected)<times>rtol)<line_sep>loss_count=np.count_nonzero(greater)<assert_stmt>(loss_count/total_count)<l>rtol "\ndata_expected_std:{0}\ndata_me_error:{1}\nloss:{2}".format(data_expected[greater] data_me[greater] error[greater])<block_end><def_stmt>test_func_biquad_eager <block_start>""" mindspore eager mode normal testcase:biquad op"""<line_sep># Original waveform
waveform=np.array([[1 2 3] [4 5 6]] dtype=np.float64)<line_sep># Expect waveform
expect_waveform=np.array([[0.0100 0.0388 0.1923] [0.0400 0.1252 0.6530]] dtype=np.float64)<line_sep>biquad_op=audio.Biquad(0.01 0.02 0.13 1 0.12 0.3)<line_sep># Filtered waveform by biquad
output=biquad_op(waveform)<line_sep>count_unequal_element(expect_waveform output 0.0001 0.0001)<block_end><def_stmt>test_func_biquad_pipeline <block_start>""" mindspore pipeline mode normal testcase:biquad op"""<line_sep># Original waveform
waveform=np.array([[3.2 2.1 1.3] [6.2 5.3 6]] dtype=np.float64)<line_sep># Expect waveform
expect_waveform=np.array([[1.0000 1.0000 0.5844] [1.0000 1.0000 1.0000]] dtype=np.float64)<line_sep>dataset=ds.NumpySlicesDataset(waveform ["audio"] shuffle=<false>)<line_sep>biquad_op=audio.Biquad(1 0.02 0.13 1 0.12 0.3)<line_sep># Filtered waveform by biquad
dataset=dataset.map(input_columns=["audio"] operations=biquad_op num_parallel_workers=8)<line_sep>i=0<for_stmt>item dataset.create_dict_iterator(num_epochs=1 output_numpy=<true>)<block_start>count_unequal_element(expect_waveform[i :] item['audio'] 0.0001 0.0001)<line_sep>i<augadd>1<block_end><block_end><def_stmt>test_biquad_invalid_input <block_start><def_stmt>test_invalid_input test_name b0 b1 b2 a0 a1 a2 error error_msg<block_start>logger.info("Test Biquad with bad input: {0}".format(test_name))<with_stmt>pytest.raises(error)<as>error_info<block_start>audio.Biquad(b0 b1 b2 a0 a1 a2)<block_end><assert_stmt>error_msg<in>str(error_info.value)<block_end>test_invalid_input("invalid b0 parameter type as a String" "0.01" 0.02 0.13 1 0.12 0.3 TypeError "Argument b0 with value 0.01 is not of type [<class 'float'>, <class 'int'>],"<concat>" but got <class 'str'>.")<line_sep>test_invalid_input("invalid b0 parameter value" 441324343243242342345300 0.02 0.13 1 0.12 0.3 ValueError "Input b0 is not within the required interval of [-16777216, 16777216].")<line_sep>test_invalid_input("invalid b1 parameter type as a String" 0.01 "0.02" 0.13 0 0.12 0.3 TypeError "Argument b1 with value 0.02 is not of type [<class 'float'>, <class 'int'>],"<concat>" but got <class 'str'>.")<line_sep>test_invalid_input("invalid b1 parameter value" 0.01 441324343243242342345300 0.13 1 0.12 0.3 ValueError "Input b1 is not within the required interval of [-16777216, 16777216].")<line_sep>test_invalid_input("invalid b2 parameter type as a String" 0.01 0.02 "0.13" 0 0.12 0.3 TypeError "Argument b2 with value 0.13 is not of type [<class 'float'>, <class 'int'>],"<concat>" but got <class 'str'>.")<line_sep>test_invalid_input("invalid b2 parameter value" 0.01 0.02 441324343243242342345300 1 0.12 0.3 ValueError "Input b2 is not within the required interval of [-16777216, 16777216].")<line_sep>test_invalid_input("invalid a0 parameter type as a String" 0.01 0.02 0.13 '1' 0.12 0.3 TypeError "Argument a0 with value 1 is not of type [<class 'float'>, <class 'int'>],"<concat>" but got <class 'str'>.")<line_sep>test_invalid_input("invalid a0 parameter value" 0.01 0.02 0.13 0 0.12 0.3 ValueError "Input a0 is not within the required interval of [-16777216, 0) and (0, 16777216].")<line_sep>test_invalid_input("invalid a0 parameter value" 0.01 0.02 0.13 441324343243242342345300 0.12 0.3 ValueError "Input a0 is not within the required interval of [-16777216, 0) and (0, 16777216].")<line_sep>test_invalid_input("invalid a1 parameter type as a String" 0.01 0.02 0.13 1 '0.12' 0.3 TypeError "Argument a1 with value 0.12 is not of type [<class 'float'>, <class 'int'>],"<concat>" but got <class 'str'>.")<line_sep>test_invalid_input("invalid a1 parameter value" 0.01 0.02 0.13 1 441324343243242342345300 0.3 ValueError "Input a1 is not within the required interval of [-16777216, 16777216].")<line_sep>test_invalid_input("invalid a2 parameter type as a String" 0.01 0.02 0.13 1 0.12 '0.3' TypeError "Argument a2 with value 0.3 is not of type [<class 'float'>, <class 'int'>],"<concat>" but got <class 'str'>.")<line_sep>test_invalid_input("invalid a1 parameter value" 0.01 0.02 0.13 1 0.12 441324343243242342345300 ValueError "Input a2 is not within the required interval of [-16777216, 16777216].")<block_end><if_stmt>__name__<eq>'__main__'<block_start>test_func_biquad_eager()<line_sep>test_func_biquad_pipeline()<line_sep>test_biquad_invalid_input()<block_end>
|
<import_stmt>os<import_stmt>argparse<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_from_stmt>torchvision utils<import_from_stmt>model Generator<import_from_stmt>tqdm tqdm<def_stmt>generate args g_ema device mean_latent<block_start><with_stmt>torch.no_grad()<block_start>g_ema.eval()<line_sep>count=0<for_stmt>i tqdm(range(args.pics))<block_start>sample_z=torch.randn(args.sample args.latent device=device)<line_sep>sample_w=g_ema.style_forward(sample_z)<line_sep>sample,_=g_ema([sample_w] truncation=args.truncation truncation_latent=mean_latent input_is_w=<true>)<line_sep>sample_w=mean_latent+args.truncation<times>(sample_w-mean_latent)<for_stmt>j range(args.sample)<block_start>utils.save_image(sample[j] f'{args.save_path}/{str(count).zfill(6)}.png' nrow=1 normalize=<true> range=(-1 1) )<line_sep>torch.save(sample_w[j] f'{args.save_path}/latents/{str(count).zfill(6)}.pt')<line_sep>count<augadd>1<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>device='cuda'<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--size' type=int default=1024)<line_sep>parser.add_argument('--sample' type=int default=1)<line_sep>parser.add_argument('--pics' type=int default=20)<line_sep>parser.add_argument('--truncation' type=float default=0.7)<line_sep>parser.add_argument('--truncation_mean' type=int default=4096)<line_sep>parser.add_argument('--ckpt' type=str default="stylegan2-ffhq-config-f.pt")<line_sep>parser.add_argument('--channel_multiplier' type=int default=2)<line_sep>parser.add_argument('--seed' type=int default=0)<line_sep>parser.add_argument('--gpu' type=int default=0)<line_sep>parser.add_argument('--save_path' type=str default='sample')<line_sep>args=parser.parse_args()<line_sep>args.latent=512<line_sep>args.n_mlp=8<line_sep>torch.manual_seed(args.seed)# also sets cuda seeds
<if_stmt><not>os.path.exists(args.save_path)<block_start>os.makedirs(args.save_path)<line_sep>os.makedirs(args.save_path+'/latents')<block_end>g_ema=Generator(args.size args.latent args.n_mlp channel_multiplier=args.channel_multiplier).to(device)<line_sep>checkpoint=torch.load(args.ckpt)<line_sep>g_ema.load_state_dict(checkpoint['g_ema'] strict=<false>)<if_stmt>args.truncation<l>1<block_start><with_stmt>torch.no_grad()<block_start>mean_latent=g_ema.mean_latent(args.truncation_mean)<block_end><block_end><else_stmt><block_start>mean_latent=<none><block_end>generate(args g_ema device mean_latent)<block_end>
|
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>rplidar RPLidar<as>RPLidarAPI<import_stmt>numpy<as>np<import_stmt>math<import_from_stmt>opendr.engine.data PointCloud<import_stmt>threading<import_stmt>atexit<def_stmt>create_point_cloud scan z=0<block_start>points=np.empty((len(scan) 4) dtype=np.float32)<for_stmt>i,s enumerate(scan)<block_start>r,angle_degrees,distance_mm=s<line_sep>angle_rad=angle_degrees<times>math.pi/180<line_sep>y=math.sin(angle_rad)<times>distance_mm/1000<line_sep>x=math.cos(angle_rad)<times>distance_mm/1000<line_sep>points[i]=[x y z r/16]<block_end><return>points<block_end><class_stmt>RPLidar<block_start><def_stmt>__init__ self port baudrate=115200 timeout=1<block_start>lidar=RPLidarAPI(port=port baudrate=baudrate timeout=timeout)<line_sep>self.lidar=lidar<line_sep>lidar.clean_input()<line_sep>info=lidar.get_info()<line_sep>print(info)<line_sep>health=lidar.get_health()<line_sep>print(health)<line_sep>self.iterate_thread=threading.Thread(target=self.__itereate_scans)<line_sep>self.iterate_thread.start()<line_sep>self.lock=threading.Lock()<line_sep>self.last_point_cloud=np.zeros((0 3) dtype=np.float32)<line_sep>self.running=<true><line_sep>atexit.register(self.stop)<block_end><def_stmt>__itereate_scans self<block_start><for_stmt>scan self.lidar.iter_scans(min_len=1)<block_start>pc=create_point_cloud(scan)<with_stmt>self.lock<block_start>self.last_point_cloud=pc<block_end><if_stmt><not>self.running<block_start><return><block_end><block_end><block_end><def_stmt>next self<block_start><with_stmt>self.lock<block_start><return>PointCloud(self.last_point_cloud)<block_end><block_end><def_stmt>stop self<block_start>self.running=<false><line_sep>self.iterate_thread.join()<line_sep>self.lidar.stop()<line_sep>self.lidar.stop_motor()<line_sep>self.lidar.disconnect()<block_end><block_end>
|
"""
144. Binary Tree Preorder Traversal
Given a binary tree, return the preorder traversal of its nodes' values.
Example:
Input: [1,null,2,3]
1
\
2
/
3
Output: [1,2,3]
Follow up: Recursive solution is trivial, could you do it iteratively?
"""<class_stmt>Solution<block_start><def_stmt>preorderTraversal self root<block_start>"""
:type root: TreeNode
:rtype: List[int]
"""<line_sep>res=[]<line_sep>cur=root<while_stmt>cur<block_start><if_stmt><not>cur.left<block_start>res.append(cur.val)<line_sep>cur=cur.right<block_end><else_stmt><block_start>pre=cur.left<while_stmt>pre.right<and>pre.right<ne>cur<block_start>pre=pre.right<block_end><if_stmt><not>pre.right<block_start>res.append(cur.val)#第一次的pass 就加上
pre.right=cur<line_sep>cur=cur.left<block_end><else_stmt><block_start>pre.right=<none>#重复pass parent,不加
cur=cur.right<block_end><block_end><block_end><return>res<block_end><block_end><class_stmt>Solution<block_start><def_stmt>preorderTraversal self root<block_start>res=[]<line_sep>stack=[]<line_sep>cur=root<while_stmt>cur<or>stack<block_start><if_stmt>cur<block_start>res.append(cur.val)<line_sep>stack.append(cur)<line_sep>cur=cur.left<block_end><else_stmt><block_start>cur=stack.pop()<line_sep>cur=cur.right<block_end><block_end><return>res<block_end><block_end><class_stmt>Solution<block_start><def_stmt>preorderTraversal self root<block_start>res=[]<line_sep>stack=[(root <false>)]<line_sep>cur=root<while_stmt>stack<block_start>node,visited=stack.pop()<if_stmt>node<block_start><if_stmt>visited<block_start>res.append(node.val)<block_end><else_stmt><block_start>stack.append((node.right <false>))<line_sep>stack.append((node.left <false>))<line_sep>stack.append((node <true>))<block_end><block_end><block_end><return>res<block_end><block_end><class_stmt>Solution<block_start><def_stmt>preorderTraversal self root<block_start>ret=[]<line_sep>stack=[root]<line_sep># iteratively through the stack
<while_stmt>stack<block_start>node=stack.pop()<if_stmt>node<block_start>ret.append(node.val)<line_sep>stack.append(node.right)<line_sep>stack.append(node.left)<block_end><block_end><return>ret<block_end><block_end><class_stmt>Solution<block_start><def_stmt>preorderTraversal self root<block_start><return>[]<if>root<is><none><else>[root.val]+self.preorderTraversal(root.left)+self.preorderTraversal(root.right)<block_end><block_end>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.