content
stringlengths 0
1.55M
|
---|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>json<import_from_stmt>tencentcloud.common.exception.tencent_cloud_sdk_exception TencentCloudSDKException<import_from_stmt>tencentcloud.common.abstract_client AbstractClient<import_from_stmt>tencentcloud.facefusion.v20181201 models<class_stmt>FacefusionClient(AbstractClient)<block_start>_apiVersion='2018-12-01'<line_sep>_endpoint='facefusion.tencentcloudapi.com'<line_sep>_service='facefusion'<def_stmt>DescribeMaterialList self request<block_start>"""通常通过腾讯云人脸融合的控制台可以查看到素材相关的参数数据,可以满足使用。本接口返回活动的素材数据,包括素材状态等。用于用户通过Api查看素材相关数据,方便使用。
:param request: Request instance for DescribeMaterialList.
:type request: :class:`tencentcloud.facefusion.v20181201.models.DescribeMaterialListRequest`
:rtype: :class:`tencentcloud.facefusion.v20181201.models.DescribeMaterialListResponse`
"""<try_stmt><block_start>params=request._serialize()<line_sep>body=self.call("DescribeMaterialList" params)<line_sep>response=json.loads(body)<if_stmt>"Error"<not><in>response["Response"]<block_start>model=models.DescribeMaterialListResponse()<line_sep>model._deserialize(response["Response"])<line_sep><return>model<block_end><else_stmt><block_start>code=response["Response"]["Error"]["Code"]<line_sep>message=response["Response"]["Error"]["Message"]<line_sep>reqid=response["Response"]["RequestId"]<line_sep><raise>TencentCloudSDKException(code message reqid)<block_end><block_end><except_stmt>Exception<as>e<block_start><if_stmt>isinstance(e TencentCloudSDKException)<block_start><raise><block_end><else_stmt><block_start><raise>TencentCloudSDKException(e.message e.message)<block_end><block_end><block_end><def_stmt>FaceFusion self request<block_start>"""本接口用于人脸融合,用户上传人脸图片,获取与模板融合后的人脸图片。未发布的活动请求频率限制为1次/秒,已发布的活动请求频率限制50次/秒。如有需要提高活动的请求频率限制,请在控制台中申请。
>
- 公共参数中的签名方式必须指定为V3版本,即配置SignatureMethod参数为TC3-HMAC-SHA256。
:param request: Request instance for FaceFusion.
:type request: :class:`tencentcloud.facefusion.v20181201.models.FaceFusionRequest`
:rtype: :class:`tencentcloud.facefusion.v20181201.models.FaceFusionResponse`
"""<try_stmt><block_start>params=request._serialize()<line_sep>body=self.call("FaceFusion" params)<line_sep>response=json.loads(body)<if_stmt>"Error"<not><in>response["Response"]<block_start>model=models.FaceFusionResponse()<line_sep>model._deserialize(response["Response"])<line_sep><return>model<block_end><else_stmt><block_start>code=response["Response"]["Error"]["Code"]<line_sep>message=response["Response"]["Error"]["Message"]<line_sep>reqid=response["Response"]["RequestId"]<line_sep><raise>TencentCloudSDKException(code message reqid)<block_end><block_end><except_stmt>Exception<as>e<block_start><if_stmt>isinstance(e TencentCloudSDKException)<block_start><raise><block_end><else_stmt><block_start><raise>TencentCloudSDKException(e.message e.message)<block_end><block_end><block_end><def_stmt>FaceFusionLite self request<block_start>"""人脸融合活动专用版,不推荐使用。人脸融合接口建议使用[人脸融合](https://cloud.tencent.com/document/product/670/31061)或[选脸融合](https://cloud.tencent.com/document/product/670/37736)接口
:param request: Request instance for FaceFusionLite.
:type request: :class:`tencentcloud.facefusion.v20181201.models.FaceFusionLiteRequest`
:rtype: :class:`tencentcloud.facefusion.v20181201.models.FaceFusionLiteResponse`
"""<try_stmt><block_start>params=request._serialize()<line_sep>body=self.call("FaceFusionLite" params)<line_sep>response=json.loads(body)<if_stmt>"Error"<not><in>response["Response"]<block_start>model=models.FaceFusionLiteResponse()<line_sep>model._deserialize(response["Response"])<line_sep><return>model<block_end><else_stmt><block_start>code=response["Response"]["Error"]["Code"]<line_sep>message=response["Response"]["Error"]["Message"]<line_sep>reqid=response["Response"]["RequestId"]<line_sep><raise>TencentCloudSDKException(code message reqid)<block_end><block_end><except_stmt>Exception<as>e<block_start><if_stmt>isinstance(e TencentCloudSDKException)<block_start><raise><block_end><else_stmt><block_start><raise>TencentCloudSDKException(e.message e.message)<block_end><block_end><block_end><def_stmt>FuseFace self request<block_start>"""本接口用于单脸、多脸融合,用户上传人脸图片,获取与模板融合后的人脸图片。查看 <a href="https://cloud.tencent.com/document/product/670/38247" target="_blank">选脸融合接入指引</a>。
未发布的活动请求频率限制为1次/秒,已发布的活动请求频率限制50次/秒。如有需要提高活动的请求频率限制,请在控制台中申请。
>
- 公共参数中的签名方式必须指定为V3版本,即配置SignatureMethod参数为TC3-HMAC-SHA256。
:param request: Request instance for FuseFace.
:type request: :class:`tencentcloud.facefusion.v20181201.models.FuseFaceRequest`
:rtype: :class:`tencentcloud.facefusion.v20181201.models.FuseFaceResponse`
"""<try_stmt><block_start>params=request._serialize()<line_sep>body=self.call("FuseFace" params)<line_sep>response=json.loads(body)<if_stmt>"Error"<not><in>response["Response"]<block_start>model=models.FuseFaceResponse()<line_sep>model._deserialize(response["Response"])<line_sep><return>model<block_end><else_stmt><block_start>code=response["Response"]["Error"]["Code"]<line_sep>message=response["Response"]["Error"]["Message"]<line_sep>reqid=response["Response"]["RequestId"]<line_sep><raise>TencentCloudSDKException(code message reqid)<block_end><block_end><except_stmt>Exception<as>e<block_start><if_stmt>isinstance(e TencentCloudSDKException)<block_start><raise><block_end><else_stmt><block_start><raise>TencentCloudSDKException(e.message e.message)<block_end><block_end><block_end><block_end> |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>random Random<import_from_stmt>robotide.lib.robot.model SuiteVisitor<class_stmt>Randomizer(SuiteVisitor)<block_start><def_stmt>__init__ self randomize_suites=<true> randomize_tests=<true> seed=<none><block_start>self.randomize_suites=randomize_suites<line_sep>self.randomize_tests=randomize_tests<line_sep>self.seed=seed<line_sep># Cannot use just Random(seed) due to
# https://ironpython.codeplex.com/workitem/35155
args=(seed )<if>seed<is><not><none><else>()<line_sep>self._shuffle=Random(*args).shuffle<block_end><def_stmt>start_suite self suite<block_start><if_stmt><not>self.randomize_suites<and><not>self.randomize_tests<block_start><return><false><block_end><if_stmt>self.randomize_suites<block_start>self._shuffle(suite.suites)<block_end><if_stmt>self.randomize_tests<block_start>self._shuffle(suite.tests)<block_end><if_stmt><not>suite.parent<block_start>suite.metadata['Randomized']=self._get_message()<block_end><block_end><def_stmt>_get_message self<block_start>possibilities={(<true> <true>):'Suites and tests' (<true> <false>):'Suites' (<false> <true>):'Tests'}<line_sep>randomized=(self.randomize_suites self.randomize_tests)<line_sep><return>'%s (seed %s)'%(possibilities[randomized] self.seed)<block_end><def_stmt>visit_test self test<block_start><pass><block_end><def_stmt>visit_keyword self kw<block_start><pass><block_end><block_end> |
<import_stmt>os<import_stmt>cv2<import_stmt>dlib<try_stmt><block_start><import_stmt>urllib.request<as>request_file<block_end><except_stmt>BaseException<block_start><import_stmt>urllib<as>request_file<block_end><import_from_stmt>..core FaceDetector<import_from_stmt>...utils appdata_dir<class_stmt>DlibDetector(FaceDetector)<block_start><def_stmt>__init__ self device path_to_detector=<none> verbose=<false><block_start>super().__init__(device verbose)<line_sep>base_path=os.path.join(appdata_dir('face_alignment') "data")<line_sep># Initialise the face detector
<if_stmt>'cuda'<in>device<block_start><if_stmt>path_to_detector<is><none><block_start>path_to_detector=os.path.join(base_path "mmod_human_face_detector.dat")<if_stmt><not>os.path.isfile(path_to_detector)<block_start>print("Downloading the face detection CNN. Please wait...")<line_sep>path_to_temp_detector=os.path.join(base_path "mmod_human_face_detector.dat.download")<if_stmt>os.path.isfile(path_to_temp_detector)<block_start>os.remove(os.path.join(path_to_temp_detector))<block_end>request_file.urlretrieve("https://www.adrianbulat.com/downloads/dlib/mmod_human_face_detector.dat" os.path.join(path_to_temp_detector))<line_sep>os.rename(os.path.join(path_to_temp_detector) os.path.join(path_to_detector))<block_end><block_end>self.face_detector=dlib.cnn_face_detection_model_v1(path_to_detector)<block_end><else_stmt><block_start>self.face_detector=dlib.get_frontal_face_detector()<block_end><block_end><def_stmt>detect_from_image self tensor_or_path<block_start>image=self.tensor_or_path_to_ndarray(tensor_or_path rgb=<false>)<line_sep>detected_faces=self.face_detector(cv2.cvtColor(image cv2.COLOR_BGR2GRAY))<if_stmt>'cuda'<not><in>self.device<block_start>detected_faces=[[d.left() d.top() d.right() d.bottom()]<for>d detected_faces]<block_end><else_stmt><block_start>detected_faces=[[d.rect.left() d.rect.top() d.rect.right() d.rect.bottom()]<for>d detected_faces]<block_end><return>detected_faces<block_end>@property<def_stmt>reference_scale self<block_start><return>195<block_end>@property<def_stmt>reference_x_shift self<block_start><return>0<block_end>@property<def_stmt>reference_y_shift self<block_start><return>0<block_end><block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>l1GtPatternGenerator=cms.EDAnalyzer("L1GtPatternGenerator" # input tags for various records
GtInputTag=cms.InputTag("gtDigis") GmtInputTag=cms.InputTag("gmtDigis") GctInputTag=cms.InputTag("gctDigis") CscInputTag=cms.InputTag("gtDigis" "CSC") DtInputTag=cms.InputTag("gtDigis" "DT") RpcbInputTag=cms.InputTag("gtDigis" "RPCb") RpcfInputTag=cms.InputTag("gtDigis" "RPCf") # file name
PatternFileName=cms.string("GT_GMT_patterns.txt") # bunch crossing numbers to write
bx=cms.vint32(0) # header
PatternFileHeader=cms.string("""#GT_GMT_patterns_VD
#
# editors - HB 220606
#
# remarks:
# values in this template are for version VD (same as VB) for the cond-chips of GTL9U (from IVAN)
#
# syntax:
# character "#" indicates a comment line
# header line 1 => hardware of sim- and spy-memories
# header line 2 => hardware location (FPGA-chip) of sim-memories
# header line 3 => channel number of sim-memories (PSB)
# header line 4 => hardware location (FPGA-chip) of spy-memories
# header line 5 => name of patterns
# header line 6 => number of objects (calos, muons) or other declarations
# (header line 7 => only graphics)
# (header line 8 => only text and graphics)
# header line 9 => number of columns, starting with 0
#
# patterns:
# values in column 0 are event numbers (decimal), starting with 0 (synchronisation data)
# patterns for 1024 events (memories of cond-chips on GTL9U can contain only 1024 events) are in this file
# values in columns 1-119 are the hexadecimal patterns, the rightmost digit in a string is LSB
#
# header:
# e |<--------------------------------------------------------------------------PSB/GTL9U(REC)------------------------------------------------------------------------------------------------------------->|<--------------------------------------------------------------------------PSB/GMT(AUF,AUB)--------------------------------------------------------------------------------------------------------------------------------------------------->|<----------------------------------------------------------------GMT REGIONAL MUONs----------------------------------------------------------->|<----GMT(SORT)/GTL9U(REC)----->|<--------------GTL9U(COND)/FDL(ALGO)---------------->|<-----------FDL----------->|
# v |PSB slot13/ch6+7 |PSB slot13/ch4+5 |PSB slot13/ch2+3 |PSB slot13/ch0+1 |PSB slot14/ch6+7 |PSB slot14/ch4+5 |PSB slot14/ch2+3 |PSB slot14/ch0+1 |PSB slot15/ch2+3 |PSB slot15/ch0+1 |PSB slot19/ch6+7 |PSB slot19/ch4+5 |PSB slot19/ch2+3 |PSB slot19/ch0+1 |PSB slot20/ch6+7 |PSB slot20/ch4+5 |PSB slot20/ch2+3 |PSB slot20/ch0+1 |PSB slot21/ch6+7 |PSB slot21/ch4+5 |PSB slot21/ch2+3 |PSB slot21/ch0+1 |GMT INF |GMT INC |GMT IND |GMT INB |GMT SORT |COND1 |COND2 |PSB slot9/ch0+1 |FINOR |
# e |ch6 ch7 ch6 ch7 |ch4 ch5 ch4 ch5 |ch2 ch3 ch2 ch3 |ch0 ch1 ch0 ch1 |ch6 ch7 ch6 ch7 |ch4 ch5 ch4 ch5 |ch2 ch3 ch2 ch3 |ch0 ch1 ch0 ch1 |ch2 ch3 ch2 ch3 |ch0 ch1 ch0 ch1 |ch6 ch7 ch6 ch7 |ch4 ch5 ch4 ch5 |ch2 ch3 ch2 ch3 |ch0 ch1 ch0 ch1 |ch6 ch7 ch6 ch7 |ch4 ch5 ch4 ch5 |ch2 ch3 ch2 ch3 |ch0 ch1 ch0 ch1 |ch6 ch7 ch6 ch7 |ch4 ch5 ch4 ch5 |ch2 ch3 ch2 ch3 |ch0 ch1 ch0 ch1 | | | | | | | |ch0 ch1 ch0 ch1 | |
# n |GTL9U REC1 |GTL9U REC1 |GTL9U REC2 |GTL9U REC2 |GTL9U REC2 |GTL9U REC2 |GTL9U REC3 |GTL9U REC3 |GTL9U REC3 |GTL9U REC3 |GMT AUF |GMT AUF |GMT AUB |GMT AUB |GMT AUF |GMT AUF |GMT AUB |GMT AUB |GMT AUF |GMT AUF |GMT AUB |GMT AUB | | | | |GTL9U REC1 |FDL ALGO |FDL ALGO |FDL ALGO | |
# t |calo1 (ieg) |calo2 (eg) |calo3 (jet) |calo4 (fwdjet) |calo5 (tau) |calo6 (esums) |calo7 (hfbc/etsums)|calo8 (free) |calo9 (totem) |calo10 (free) |MQF4 |MQF3 |MQB2 |MQB1 |MQF8 |MQF7 |MQB6 |MQB5 |MQF12 |MQF11 |MQB10 |MQB9 |RPC forward |CSC |DT |RPC barrel |muon (sorted four) |algo |algo |techtrigger | |
# | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 |45M 45Q 6M 6Q |45M 45Q 6M 6Q |01M 01Q 23M 23Q |01M 01Q 23M 23Q |45M 45Q 6M 6Q |45M 45Q 6M 6Q |01M 01Q 23M 23Q |01M 01Q 23M 23Q |45M 45Q 6M 6Q |45M 45Q 6M 6Q |01M 01Q 23M 23Q |01M 01Q 23M 23Q | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 | 1 2 3 4 |191--160 159--128 127---96|95----64 63----32 31-----0|15-0 47-32 31-16 63-48| |
# | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
# columns: | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
# 0 | 1 2 3 4 | 5 6 7 8 | 9 10 11 12 | 13 14 15 16 | 17 18 19 20 | 21 22 23 24 | 25 26 27 28 | 29 30 31 32 | 33 34 35 36 | 37 38 39 40 | 41 42 43 44 | 45 46 47 48 | 49 50 51 52 | 53 54 55 56 | 57 58 59 60 | 61 62 63 64 | 65 66 67 68 | 69 70 71 72 | 73 74 75 76 | 77 78 79 80 | 81 82 83 84 | 85 86 87 88 | 89 90 91 92 | 93 94 95 96 | 97 98 99 100 | 101 102 103 104 | 105 106 107 108 | 109 110 111 | 112 113 114 | 115 116 117 118|119 |
""") # footer
PatternFileFooter=cms.string("") # A vector of column names to be written for each pattern file line
PatternFileColumns=cms.vstring() # A vector of the lengths (in bits!) of each column
PatternFileLengths=cms.vuint32() # A vector of default values for each column
PatternFileDefaultValues=cms.vuint32() # By default, do not add comments with detailed information
DebugOutput=cms.bool(<false>))<def_stmt>addBlock analyzer name count length default<block_start><for_stmt>i range(1 count+1)<block_start>analyzer.PatternFileColumns.append("%s%d"%(name i))<line_sep>analyzer.PatternFileLengths.append(length)<line_sep>analyzer.PatternFileDefaultValues.append(default)<block_end><block_end><def_stmt>addPSB analyzer name<block_start>addBlock(analyzer name 4 16 0)<block_end><def_stmt>addRegionalMuons analyzer name# regional muons are different - they need to have a default of 0x0000ff00 when
# empty to make input cable disconnects recognizable
<block_start>addBlock(analyzer name 4 32 0x0000ff00)<block_end><def_stmt>addGMTMuons analyzer name<block_start>addBlock(analyzer name 4 26 0)<block_end># set up format:
fields=l1GtPatternGenerator.PatternFileColumns<line_sep>lengths=l1GtPatternGenerator.PatternFileLengths<line_sep>defaults=l1GtPatternGenerator.PatternFileDefaultValues<line_sep># column 1..20: some fairly standard PSBs (calo1 - calo5)
<for_stmt>name ["gctIsoEm" "gctEm" "cenJet" "forJet" "tauJet"]<block_start>addPSB(l1GtPatternGenerator name)<block_end># then the energy sums, which are slightly more complicated
# (calo6)
fields<augadd>["etTotal1" "etMiss1" "etHad1" "etMissPhi1"]<line_sep>lengths<augadd>[16 16 16 16]<line_sep>defaults<augadd>[0 0 0 0]<line_sep># HF bit counts / etsums (which are mangled in the C++ code)
# (calo7)
fields<augadd>["hfPsbValue1_l" "htMiss1" "hfPsbValue1_h" "unknown"]<line_sep>lengths<augadd>[16 16 16 16]<line_sep>defaults<augadd>[0 0 0 0]<line_sep># calo8 - free
addPSB(l1GtPatternGenerator "unknown")<line_sep># calo9 - "totem", currently
addPSB(l1GtPatternGenerator "unknown")<line_sep># calo 10
# BPTX/Castor and TBD data - default to 0xffff to get BPTX triggers matching GT emulator
addBlock(l1GtPatternGenerator "unknown" 4 16 0xffff)<line_sep># 12 more PSBs we don't fill
<for_stmt>i range(12)<block_start>addPSB(l1GtPatternGenerator "unknown")<block_end># regional muons
addRegionalMuons(l1GtPatternGenerator "fwdMuon")<line_sep>addRegionalMuons(l1GtPatternGenerator "cscMuon")<line_sep>addRegionalMuons(l1GtPatternGenerator "dtMuon")<line_sep>addRegionalMuons(l1GtPatternGenerator "brlMuon")<line_sep># global muons
addGMTMuons(l1GtPatternGenerator "gmtMuon")<line_sep># GT stuff
addBlock(l1GtPatternGenerator "gtDecisionExt" 2 32 0)<line_sep>addBlock(l1GtPatternGenerator "gtDecision" 4 32 0)<line_sep># tech triggers: a bit complicated, since we like to mix up
# half-words (see header)
fields<augadd>["gtTechTrigger1_l" "gtTechTrigger2_l" "gtTechTrigger1_h" "gtTechTrigger2_h"]<line_sep>lengths<augadd>[16 16 16 16]<line_sep>defaults<augadd>[0 0 0 0]<line_sep>fields<augadd>["gtFinalOr"]<line_sep>lengths<augadd>[9]<line_sep>defaults<augadd>[0]<line_sep># just to make sure the python magic adds up to the proper output format
<if_stmt>len(fields)<ne>119<block_start><raise>ValueError("Expecting 119 data fields (120 - event number) in pattern file format, got %d!"%len(fields))<block_end># For debugging: Get an overview of your pattern file format
#print fields
#print lengths
#print defaults
|
<import_stmt>argparse<import_stmt>math<import_from_stmt>typing List Mapping Optional Tuple Any<import_stmt>os<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>time<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.optim.optimizer Optimizer<import_from_stmt>torch.optim.lr_scheduler _LRScheduler<import_from_stmt>torch.nn.modules.loss _Loss<import_from_stmt>torch.utils.data DataLoader<import_stmt>torchvision<import_stmt>torchvision.transforms<as>transforms<import_stmt>yaml<import_from_stmt>archai.common utils<import_from_stmt>archai cifar10_models<def_stmt>train epochs train_dl val_dal net device crit optim sched sched_on_epoch half quiet grad_clip:float<arrow>List[Mapping]<block_start>train_acc,test_acc=0.0 0.0<line_sep>metrics=[]<for_stmt>epoch range(epochs)<block_start>lr=optim.param_groups[0]['lr']<line_sep>train_acc,loss=train_epoch(epoch net train_dl device crit optim sched sched_on_epoch half grad_clip)<line_sep>val_acc=test(net val_dal device half)<if>val_dal<is><not><none><else>math.nan<line_sep>metrics.append({'val_top1':val_acc 'train_top1':train_acc 'lr':lr 'epoch':epoch 'train_loss':loss})<if_stmt><not>quiet<block_start>logging.info(f'train_epoch={epoch}, val_top1={val_acc},'<concat>f' train_top1={train_acc}, lr={lr:.4g}')<block_end><block_end><return>metrics<block_end><def_stmt>optim_sched_orig net epochs<block_start>lr,momentum,weight_decay=0.1 0.9 1.0e-4<line_sep>optim=torch.optim.SGD(net.parameters() lr momentum=momentum weight_decay=weight_decay)<line_sep>logging.info(f'lr={lr}, momentum={momentum}, weight_decay={weight_decay}')<line_sep>sched=torch.optim.lr_scheduler.MultiStepLR(optim milestones=[100 150 200 400 600])<line_sep># resnet original paper
sched_on_epoch=<true><line_sep>logging.info(f'sched_on_epoch={sched_on_epoch}, sched={str(sched)}')<line_sep><return>optim sched sched_on_epoch<block_end><def_stmt>optim_sched_cosine net epochs<block_start>lr,momentum,weight_decay=0.025 0.9 1.0e-4<line_sep>optim=torch.optim.SGD(net.parameters() lr momentum=momentum weight_decay=weight_decay)<line_sep>logging.info(f'lr={lr}, momentum={momentum}, weight_decay={weight_decay}')<line_sep>sched=torch.optim.lr_scheduler.CosineAnnealingLR(optim epochs)<line_sep>sched_on_epoch=<true><line_sep>logging.info(f'sched_on_epoch={sched_on_epoch}, sched={str(sched)}')<line_sep><return>optim sched sched_on_epoch<block_end><def_stmt>get_data datadir:str train_batch_size=128 test_batch_size=4096 cutout=0 train_num_workers=-1 test_num_workers=-1 val_percent=10.0<arrow>Tuple[DataLoader Optional[DataLoader] DataLoader]<block_start><if_stmt>utils.is_debugging()<block_start>train_num_workers=test_num_workers=0<line_sep>logging.info('debugger=true, num_workers=0')<block_end><if_stmt>train_num_workers<le>-1<block_start>train_num_workers=torch.cuda.device_count()<times>4<block_end><if_stmt>test_num_workers<le>-1<block_start>test_num_workers=torch.cuda.device_count()<times>4<block_end>train_transform=cifar10_transform(aug=<true> cutout=cutout)<line_sep>trainset=torchvision.datasets.CIFAR10(root=datadir train=<true> download=<true> transform=train_transform)<line_sep>val_len=int(len(trainset)<times>val_percent/100.0)<line_sep>train_len=len(trainset)-val_len<line_sep>valset=<none><if_stmt>val_len<block_start>trainset,valset=torch.utils.data.random_split(trainset [train_len val_len])<block_end>train_dl=torch.utils.data.DataLoader(trainset batch_size=train_batch_size shuffle=<true> num_workers=train_num_workers pin_memory=<true>)<if_stmt>valset<is><not><none><block_start>val_dl=torch.utils.data.DataLoader(valset batch_size=test_batch_size shuffle=<false> num_workers=test_num_workers pin_memory=<true>)<block_end><else_stmt><block_start>val_dl=<none><block_end>test_transform=cifar10_transform(aug=<false> cutout=0)<line_sep>testset=torchvision.datasets.CIFAR10(root=datadir train=<false> download=<true> transform=test_transform)<line_sep>test_dl=torch.utils.data.DataLoader(testset batch_size=test_batch_size shuffle=<false> num_workers=test_num_workers pin_memory=<true>)<line_sep>logging.info(f'train_len={train_len}, val_len={val_len}, test_len={len(testset)}')<line_sep><return>train_dl val_dl test_dl<block_end><def_stmt>train_epoch epoch net train_dl device crit optim sched sched_on_epoch half grad_clip:float<arrow>Tuple[float float]<block_start>correct,total,loss_total=0 0 0.0<line_sep>net.train()<for_stmt>batch_idx,(inputs targets) enumerate(train_dl)<block_start>inputs=inputs.to(device non_blocking=<true>)<line_sep>targets=targets.to(device non_blocking=<true>)<if_stmt>half<block_start>inputs=inputs.half()<block_end>outputs,loss=train_step(net crit optim sched sched_on_epoch inputs targets grad_clip)<line_sep>loss_total<augadd>loss<line_sep>_,predicted=outputs.max(1)<line_sep>total<augadd>targets.size(0)<line_sep>correct<augadd>predicted.eq(targets).sum().item()<block_end><if_stmt>sched<and>sched_on_epoch<block_start>sched.step()<block_end><return>100.0<times>correct/total loss_total<block_end><def_stmt>train_step net:nn.Module crit:_Loss optim:Optimizer sched:_LRScheduler sched_on_epoch:bool inputs:torch.Tensor targets:torch.Tensor grad_clip:float<arrow>Tuple[torch.Tensor float]<block_start>outputs=net(inputs)<line_sep>loss=crit(outputs targets)<line_sep>optim.zero_grad()<line_sep>loss.backward()<line_sep>nn.utils.clip_grad_norm_(net.parameters() grad_clip)<line_sep>optim.step()<if_stmt>sched<and><not>sched_on_epoch<block_start>sched.step()<block_end><return>outputs loss.item()<block_end><def_stmt>test net test_dl device half<arrow>float<block_start>correct,total=0 0<line_sep>net.eval()<with_stmt>torch.no_grad()<block_start><for_stmt>batch_idx,(inputs targets) enumerate(test_dl)<block_start>inputs=inputs.to(device non_blocking=<false>)<line_sep>targets=targets.to(device)<if_stmt>half<block_start>inputs=inputs.half()<block_end>outputs=net(inputs)<line_sep>_,predicted=outputs.max(1)<line_sep>total<augadd>targets.size(0)<line_sep>correct<augadd>predicted.eq(targets).sum().item()<block_end><block_end><return>100.0<times>correct/total<block_end><def_stmt>param_size model:torch.nn.Module<arrow>int<block_start>"""count all parameters excluding auxiliary"""<line_sep><return>sum(v.numel()<for>name,v model.named_parameters()<if>"auxiliary"<not><in>name)<block_end><def_stmt>cifar10_transform aug:bool cutout=0<block_start>MEAN=[0.49139968 0.48215827 0.44653124]<line_sep>STD=[0.24703233 0.24348505 0.26158768]<line_sep>transf=[transforms.ToTensor() transforms.Normalize(MEAN STD)]<if_stmt>aug<block_start>aug_transf=[transforms.RandomCrop(32 padding=4) transforms.RandomHorizontalFlip()]<line_sep>transf=aug_transf+transf<block_end><if_stmt>cutout<g>0# must be after normalization
<block_start>transf<augadd>[CutoutDefault(cutout)]<block_end><return>transforms.Compose(transf)<block_end><class_stmt>CutoutDefault<block_start>"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""<def_stmt>__init__ self length<block_start>self.length=length<block_end><def_stmt>__call__ self img<block_start>h,w=img.size(1) img.size(2)<line_sep>mask=np.ones((h w) np.float32)<line_sep>y=np.random.randint(h)<line_sep>x=np.random.randint(w)<line_sep>y1=np.clip(y-self.length<floordiv>2 0 h)<line_sep>y2=np.clip(y+self.length<floordiv>2 0 h)<line_sep>x1=np.clip(x-self.length<floordiv>2 0 w)<line_sep>x2=np.clip(x+self.length<floordiv>2 0 w)<line_sep>mask[y1:y2 x1:x2]=0.<line_sep>mask=torch.from_numpy(mask)<line_sep>mask=mask.expand_as(img)<line_sep>img<augmul>mask<line_sep><return>img<block_end><block_end><def_stmt>log_metrics expdir:str filename:str metrics test_acc:float args<arrow><none><block_start>print('filename:' f'test_acc: {test_acc}' metrics[-1])<line_sep>results=[('test_acc' test_acc) ('val_acc' metrics[-1]['val_top1']) ('epochs' args.epochs) ('train_batch_size' args.train_batch_size) ('test_batch_size' args.test_batch_size) ('model_name' args.model_name) ('exp_name' args.experiment_name) ('exp_desc' args.experiment_description) ('seed' args.seed) ('devices' utils.cuda_device_names()) ('half' args.half) ('cutout' args.cutout) ('train_acc' metrics[-1]['train_top1']) ('loader_workers' args.loader_workers) ('date' str(time.time())) ]<line_sep>utils.append_csv_file(os.path.join(expdir f'{filename}.tsv') results)<with_stmt>open(os.path.join(expdir f'{filename}.yaml') 'w')<as>f<block_start>yaml.dump(metrics f)<block_end><block_end><def_stmt>create_crit device half<block_start>crit=nn.CrossEntropyLoss().to(device)<if_stmt>half<block_start>crit.half()<block_end><return>crit<block_end><def_stmt>create_model model_name device half<arrow>nn.Module<block_start>model_class=getattr(cifar10_models model_name)<line_sep>net=model_class()<line_sep>logging.info(f'param_size_m={param_size(net):.1e}')<line_sep>net=net.to(device)<if_stmt>half<block_start>net.half()<block_end><return>net<block_end><def_stmt>main <block_start>parser=argparse.ArgumentParser(description='Pytorch cifar training')<line_sep>parser.add_argument('--experiment-name' '-n' default='train_pytorch')<line_sep>parser.add_argument('--experiment-description' '-d' default='Train cifar usin pure PyTorch code')<line_sep>parser.add_argument('--epochs' '-e' type=int default=1)<line_sep>parser.add_argument('--model-name' '-m' default='resnet34')<line_sep>parser.add_argument('--device' default='' help='"cuda" or "cpu" or "" in which case use cuda if available')<line_sep>parser.add_argument('--train-batch-size' '-b' type=int default=128)<line_sep>parser.add_argument('--test-batch-size' type=int default=4096)<line_sep>parser.add_argument('--seed' '-s' type=float default=42)<line_sep>parser.add_argument('--half' type=<lambda>x:x.lower()<eq>'true' nargs='?' const=<true> default=<false>)<line_sep>parser.add_argument('--cutout' type=int default=0)<line_sep>parser.add_argument('--grad-clip' type=float default=5.0)<line_sep>parser.add_argument('--datadir' default='' help='where to find dataset files, default is ~/torchvision_data_dir')<line_sep>parser.add_argument('--outdir' default='' help='where to put results, default is ~/logdir')<line_sep>parser.add_argument('--loader-workers' type=int default=-1 help='number of thread/workers for data loader (-1 means auto)')<line_sep>args=parser.parse_args()<if_stmt><not>args.datadir<block_start>args.datadir=os.environ.get('PT_DATA_DIR' '')<or>'~/dataroot'<block_end><if_stmt><not>args.outdir<block_start>args.outdir=os.environ.get('PT_OUTPUT_DIR' '')<if_stmt><not>args.outdir<block_start>args.outdir=os.path.join('~/logdir' 'cifar_testbed' args.experiment_name)<block_end><block_end>expdir=utils.full_path(args.outdir)<line_sep>os.makedirs(expdir exist_ok=<true>)<line_sep>utils.setup_cuda(args.seed)<line_sep>datadir=utils.full_path(args.datadir)<line_sep>os.makedirs(datadir exist_ok=<true>)<line_sep>utils.create_logger(filepath=os.path.join(expdir 'logs.log'))<line_sep># log config for reference
logging.info(f'exp_name="{args.experiment_name}", exp_desc="{args.experiment_description}"')<line_sep>logging.info(f'model_name="{args.model_name}", seed={args.seed}, epochs={args.epochs}')<line_sep>logging.info(f'half={args.half}, cutout={args.cutout}')<line_sep>logging.info(f'datadir="{datadir}"')<line_sep>logging.info(f'expdir="{expdir}"')<line_sep>logging.info(f'train_batch_size={args.train_batch_size}')<if_stmt>args.device<block_start>device=torch.device(args.device)<block_end><else_stmt><block_start>device=torch.device('cuda'<if>torch.cuda.is_available()<else>'cpu')<block_end># load data just before train start so any errors so far is not delayed
train_dl,val_dl,test_dl=get_data(datadir=datadir train_batch_size=args.train_batch_size test_batch_size=args.test_batch_size train_num_workers=args.loader_workers test_num_workers=args.loader_workers cutout=args.cutout)<line_sep>epochs=args.epochs<line_sep>net=create_model(args.model_name device args.half)<line_sep>crit=create_crit(device args.half)<line_sep>optim,sched,sched_on_epoch=optim_sched_orig(net epochs)<line_sep>train_metrics=train(epochs train_dl val_dl net device crit optim sched sched_on_epoch args.half <false> grad_clip=args.grad_clip)<line_sep>test_acc=test(net test_dl device args.half)<line_sep>log_metrics(expdir 'train_metrics' train_metrics test_acc args)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<import_stmt>argparse<import_stmt>os<import_stmt>time<import_stmt>numpy<as>np<import_stmt>megengine<as>mge<import_stmt>megengine.distributed<as>dist<import_stmt>megengine.functional<as>F<import_from_stmt>megengine.autodiff GradManager<import_from_stmt>megengine.data DataLoader Infinite RandomSampler dataset<import_from_stmt>megengine.data transform<as>T<import_from_stmt>megengine.optimizer SGD<import_from_stmt>official.vision.segmentation.tools.utils AverageMeter get_config_info import_from_file<line_sep>logger=mge.get_logger(__name__)<line_sep>logger.setLevel("INFO")<line_sep>mge.device.set_prealloc_config(1024 1024 256<times>1024<times>1024 4.0)<def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("-f" "--file" default="net.py" type=str help="net description file")<line_sep>parser.add_argument("-n" "--devices" type=int default=8 help="batch size for training")<line_sep>parser.add_argument("-d" "--dataset_dir" type=str default="/data/datasets" )<line_sep>parser.add_argument("-r" "--resume" type=str default=<none> help="resume model file")<line_sep>args=parser.parse_args()<line_sep># ------------------------ begin training -------------------------- #
logger.info("Device Count = %d" args.devices)<line_sep>log_dir="log-of-{}".format(os.path.basename(args.file).split(".")[0])<if_stmt><not>os.path.isdir(log_dir)<block_start>os.makedirs(log_dir)<block_end><if_stmt>args.devices<g>1<block_start>trainer=dist.launcher(worker n_gpus=args.devices)<line_sep>trainer(args)<block_end><else_stmt><block_start>worker(args)<block_end><block_end># pylint: disable=too-many-branches
<def_stmt>worker args<block_start>current_network=import_from_file(args.file)<line_sep>model=current_network.Net(current_network.Cfg())<line_sep>model.train()<if_stmt>dist.get_rank()<eq>0<block_start>logger.info(get_config_info(model.cfg))<line_sep>logger.info(repr(model))<block_end>backbone_params=[]<line_sep>head_params=[]<for_stmt>name,param model.named_parameters()<block_start><if_stmt>"backbone"<in>name<block_start>backbone_params.append(param)<block_end><else_stmt><block_start>head_params.append(param)<block_end><block_end>opt=SGD([{"params":backbone_params "lr":model.cfg.learning_rate<times>dist.get_world_size()<times>0.1 } {"params":head_params} ] lr=model.cfg.learning_rate<times>dist.get_world_size() momentum=model.cfg.momentum weight_decay=model.cfg.weight_decay )<line_sep>gm=GradManager()<if_stmt>dist.get_world_size()<g>1<block_start>gm.attach(model.parameters() callbacks=[dist.make_allreduce_cb("mean" dist.WORLD)])<block_end><else_stmt><block_start>gm.attach(model.parameters())<block_end>cur_epoch=0<if_stmt>args.resume<is><not><none><block_start>pretrained=mge.load(args.resume)<line_sep>cur_epoch=pretrained["epoch"]+1<line_sep>model.load_state_dict(pretrained["state_dict"])<line_sep>opt.load_state_dict(pretrained["opt"])<if_stmt>dist.get_rank()<eq>0<block_start>logger.info("load success: epoch %d" cur_epoch)<block_end><block_end><if_stmt>dist.get_world_size()<g>1<block_start>dist.bcast_list_(model.parameters())# sync parameters
dist.bcast_list_(model.buffers())<block_end># sync buffers
<if_stmt>dist.get_rank()<eq>0<block_start>logger.info("Prepare dataset")<block_end>train_loader=iter(build_dataloader(model.cfg.batch_size args.dataset_dir model.cfg))<for_stmt>epoch range(cur_epoch model.cfg.max_epoch)<block_start>train_one_epoch(model train_loader opt gm epoch)<if_stmt>dist.get_rank()<eq>0<block_start>save_path="log-of-{}/epoch_{}.pkl".format(os.path.basename(args.file).split(".")[0] epoch)<line_sep>mge.save({"epoch":epoch "state_dict":model.state_dict() "opt":opt.state_dict()} save_path)<line_sep>logger.info("dump weights to %s" save_path)<block_end><block_end><block_end><def_stmt>train_one_epoch model data_queue opt gm epoch<block_start><def_stmt>train_func data label<block_start><with_stmt>gm<block_start>pred=model(data)<line_sep>loss=cross_entropy(pred label ignore_label=model.cfg.ignore_label)<line_sep>gm.backward(loss)<block_end>opt.step().clear_grad()<line_sep><return>loss<block_end>meter=AverageMeter(record_len=1)<line_sep>time_meter=AverageMeter(record_len=2)<line_sep>log_interval=model.cfg.log_interval<line_sep>tot_step=model.cfg.nr_images_epoch<floordiv>(model.cfg.batch_size<times>dist.get_world_size())<for_stmt>step range(tot_step)<block_start>adjust_learning_rate(opt epoch step tot_step model.cfg)<line_sep>data_tik=time.time()<line_sep>inputs,labels=next(data_queue)<line_sep>labels=np.squeeze(labels axis=1).astype(np.int32)<line_sep>data_tok=time.time()<line_sep>tik=time.time()<line_sep>loss=train_func(mge.tensor(inputs) mge.tensor(labels))<line_sep>tok=time.time()<line_sep>time_meter.update([tok-tik data_tok-data_tik])<if_stmt>dist.get_rank()<eq>0<block_start>info_str="e%d, %d/%d, lr:%f, "<line_sep>loss_str=", ".join(["{}:%f".format(loss)<for>loss ["loss"]])<line_sep>time_str=", train_time:%.3fs, data_time:%.3fs"<line_sep>log_info_str=info_str+loss_str+time_str<line_sep>meter.update([loss.numpy()<for>loss [loss]])<if_stmt>step%log_interval<eq>0<block_start>logger.info(log_info_str epoch step tot_step opt.param_groups[1]["lr"] *meter.average() *time_meter.average())<line_sep>meter.reset()<line_sep>time_meter.reset()<block_end><block_end><block_end><block_end><def_stmt>adjust_learning_rate optimizer epoch step tot_step cfg<block_start>max_iter=cfg.max_epoch<times>tot_step<line_sep>cur_iter=epoch<times>tot_step+step<line_sep>cur_lr=cfg.learning_rate<times>(1-cur_iter/(max_iter+1))<power>0.9<line_sep>optimizer.param_groups[0]["lr"]=cur_lr<times>0.1<line_sep>optimizer.param_groups[1]["lr"]=cur_lr<block_end><def_stmt>cross_entropy pred label axis=1 ignore_label=255<block_start>mask=label<ne>ignore_label<line_sep>pred=pred.transpose(0 2 3 1)<line_sep><return>F.loss.cross_entropy(pred[mask] label[mask] axis)<block_end><def_stmt>build_dataloader batch_size dataset_dir cfg<block_start><if_stmt>cfg.dataset<eq>"VOC2012"<block_start>train_dataset=dataset.PascalVOC(dataset_dir cfg.data_type order=["image" "mask"])<block_end><elif_stmt>cfg.dataset<eq>"Cityscapes"<block_start>train_dataset=dataset.Cityscapes(dataset_dir "train" mode='gtFine' order=["image" "mask"])<block_end><else_stmt><block_start><raise>ValueError("Unsupported dataset {}".format(cfg.dataset))<block_end>train_sampler=Infinite(RandomSampler(train_dataset batch_size drop_last=<true>))<line_sep>train_dataloader=DataLoader(train_dataset sampler=train_sampler transform=T.Compose(transforms=[T.RandomHorizontalFlip(0.5) T.RandomResize(scale_range=(0.5 2)) T.RandomCrop(output_size=(cfg.img_height cfg.img_width) padding_value=[0 0 0] padding_maskvalue=255 ) T.Normalize(mean=cfg.img_mean std=cfg.img_std) T.ToMode() ] order=["image" "mask"] ) num_workers=2 )<line_sep><return>train_dataloader<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
<import_from_stmt>typing Dict Tuple<def_stmt>get_colormap <arrow>Dict[str Tuple[int int int]]<block_start>"""
Get the defined colormap.
:return: A mapping from the class names to the respective RGB values.
"""<line_sep>classname_to_color={# RGB.
"noise":(0 0 0) # Black.
"animal":(70 130 180) # Steelblue
"human.pedestrian.adult":(0 0 230) # Blue
"human.pedestrian.child":(135 206 235) # Skyblue,
"human.pedestrian.construction_worker":(100 149 237) # Cornflowerblue
"human.pedestrian.personal_mobility":(219 112 147) # Palevioletred
"human.pedestrian.police_officer":(0 0 128) # Navy,
"human.pedestrian.stroller":(240 128 128) # Lightcoral
"human.pedestrian.wheelchair":(138 43 226) # Blueviolet
"movable_object.barrier":(112 128 144) # Slategrey
"movable_object.debris":(210 105 30) # Chocolate
"movable_object.pushable_pullable":(105 105 105) # Dimgrey
"movable_object.trafficcone":(47 79 79) # Darkslategrey
"static_object.bicycle_rack":(188 143 143) # Rosybrown
"vehicle.bicycle":(220 20 60) # Crimson
"vehicle.bus.bendy":(255 127 80) # Coral
"vehicle.bus.rigid":(255 69 0) # Orangered
"vehicle.car":(255 158 0) # Orange
"vehicle.construction":(233 150 70) # Darksalmon
"vehicle.emergency.ambulance":(255 83 0) "vehicle.emergency.police":(255 215 0) # Gold
"vehicle.motorcycle":(255 61 99) # Red
"vehicle.trailer":(255 140 0) # Darkorange
"vehicle.truck":(255 99 71) # Tomato
"flat.driveable_surface":(0 207 191) # nuTonomy green
"flat.other":(175 0 75) "flat.sidewalk":(75 0 75) "flat.terrain":(112 180 60) "static.manmade":(222 184 135) # Burlywood
"static.other":(255 228 196) # Bisque
"static.vegetation":(0 175 0) # Green
"vehicle.ego":(255 240 245)}<line_sep><return>classname_to_color<block_end> |
<import_stmt>sys<import_stmt>builtins<import_from_stmt>dataclasses MISSING<line_sep>## STUFF FROM python/lib/dataclasses.py
<def_stmt>_set_new_attribute cls name value# Never overwrites an existing attribute. Returns True if the
# attribute already exists.
<block_start><if_stmt>name<in>cls.__dict__<block_start><return><true><block_end>setattr(cls name value)<line_sep><return><false><block_end><def_stmt>_create_fn name args body * globals=<none> locals=<none> return_type=MISSING doc=<none># Note that we mutate locals when exec() is called. Caller
# beware! The only callers are internal to this module, so no
# worries about external callers.
<block_start><if_stmt>locals<is><none><block_start>locals={}<block_end><if_stmt>"BUILTINS"<not><in>locals<block_start>locals["BUILTINS"]=builtins<block_end>return_annotation=""<if_stmt>return_type<is><not>MISSING<block_start>locals["_return_type"]=return_type<line_sep>return_annotation="->_return_type"<block_end>args=",".join(args)<line_sep>body="\n".join(f" {b}"<for>b body)<line_sep># Compute the text of the entire function.
txt=f" def {name}({args}){return_annotation}:\n{body}"<line_sep>local_vars=", ".join(locals.keys())<line_sep>txt=f"def __create_fn__({local_vars}):\n{txt}\n return {name}"<line_sep>ns={}<line_sep>exec(txt globals ns)# noqa: W0122
fn=ns["__create_fn__"](**locals)<if_stmt>doc<is><not><none><block_start>fn.__doc__=doc<block_end><return>fn<block_end><def_stmt>get_class_globals clz<block_start><if_stmt>clz.__module__<in>sys.modules<block_start>globals=sys.modules[clz.__module__].__dict__<block_end><else_stmt><block_start>globals={}<block_end><return>globals<block_end> |
<import_from_future_stmt> print_function<import_stmt>sys<line_sep>sys.path.insert(1 "../../../")<import_stmt>h2o<import_from_stmt>tests pyunit_utils<import_from_stmt>h2o.estimators.psvm H2OSupportVectorMachineEstimator<def_stmt>svm_svmguide3 <block_start>svmguide3=h2o.import_file(pyunit_utils.locate("smalldata/svm_test/svmguide3scale.svm"))<line_sep>svmguide3_test=h2o.import_file(pyunit_utils.locate("smalldata/svm_test/svmguide3scale_test.svm"))<line_sep># parameters taken from libsvm guide
svm_tuned=H2OSupportVectorMachineEstimator(hyper_param=128 gamma=0.125 disable_training_metrics=<false>)<line_sep>svm_tuned.train(y="C1" training_frame=svmguide3 validation_frame=svmguide3_test)<line_sep>accuracy=svm_tuned.model_performance(valid=<true>).accuracy()[0][1]<assert_stmt>accuracy<ge>0.80<block_end># guide has 87% - this just shows it is not completely off
<if_stmt>__name__<eq>"__main__"<block_start>pyunit_utils.standalone_test(svm_svmguide3)<block_end><else_stmt><block_start>svm_svmguide3()<block_end> |
<import_from_stmt>util *<line_sep>send_gdb('c')<line_sep>expect_rr('EXIT-SUCCESS')<line_sep>expect_gdb('SIGSEGV')<line_sep>send_gdb('reverse-stepi')<line_sep>expect_gdb('SIGSEGV')<line_sep>send_gdb('reverse-stepi')<line_sep>expect_gdb('start_thread')<line_sep>ok()<line_sep> |
# Copyright (c) OpenMMLab. All rights reserved.
<import_stmt>torch<import_from_stmt>torch.utils.data Dataset<import_from_stmt>.builder DATASETS<line_sep>@DATASETS.register_module()<class_stmt>QuickTestImageDataset(Dataset)<block_start>"""Dataset for quickly testing the correctness.
Args:
size (tuple[int]): The size of the images. Defaults to `None`.
"""<def_stmt>__init__ self *args size=<none> **kwargs<block_start>super().__init__()<line_sep>self.size=size<line_sep>self.img_tensor=torch.randn(3 self.size[0] self.size[1])<block_end><def_stmt>__len__ self<block_start><return>10000<block_end><def_stmt>__getitem__ self idx<block_start><return>dict(real_img=self.img_tensor)<block_end><block_end> |
<import_stmt>http.server<import_stmt>sys<class_stmt>RequestHandler(http.server.BaseHTTPRequestHandler)<block_start><def_stmt>do_HEAD self<block_start><if_stmt>self.path.startswith("/get-my-path/")<block_start>self.send_response(200)<line_sep>self.send_header("Content-Type" "text/plain")<line_sep>self.end_headers()<block_end><elif_stmt>self.path<eq>"/"<block_start>self.send_response(200)<line_sep>self.send_header("Content-Type" "text/plain")<line_sep>self.end_headers()<block_end><else_stmt><block_start>self.send_response(404)<line_sep>self.send_header("Content-Type" "text/plain")<line_sep>self.end_headers()<block_end><block_end><def_stmt>do_GET self<block_start>self.do_HEAD()<if_stmt>self.path.startswith("/get-my-path/")<block_start>self.wfile.write(b'/'+self.path.split('/' maxsplit=2)[2].encode())<block_end><elif_stmt>self.path<eq>"/"<block_start>self.wfile.write(b"OK")<block_end><block_end><block_end>httpd=http.server.HTTPServer(("0.0.0.0" int(sys.argv[1])) RequestHandler)<line_sep>httpd.serve_forever()<line_sep> |
<import_from_stmt>._predictions AirportOnTime<line_sep>__all__=['AirportOnTime']<line_sep> |
"""Platform for sensor integration."""<import_from_future_stmt> annotations<import_from_stmt>collections.abc Callable<import_from_stmt>dataclasses dataclass<import_from_stmt>typing cast<import_from_stmt>geocachingapi.models GeocachingStatus<import_from_stmt>homeassistant.components.sensor SensorEntity SensorEntityDescription<import_from_stmt>homeassistant.config_entries ConfigEntry<import_from_stmt>homeassistant.core HomeAssistant<import_from_stmt>homeassistant.helpers.device_registry DeviceEntryType<import_from_stmt>homeassistant.helpers.entity DeviceInfo<import_from_stmt>homeassistant.helpers.entity_platform AddEntitiesCallback<import_from_stmt>homeassistant.helpers.update_coordinator CoordinatorEntity<import_from_stmt>.const DOMAIN<import_from_stmt>.coordinator GeocachingDataUpdateCoordinator<line_sep>@dataclass<class_stmt>GeocachingRequiredKeysMixin<block_start>"""Mixin for required keys."""<line_sep>value_fn:Callable[[GeocachingStatus] str|int|<none>]<block_end>@dataclass<class_stmt>GeocachingSensorEntityDescription(SensorEntityDescription GeocachingRequiredKeysMixin)<block_start>"""Define Sensor entity description class."""<block_end>SENSORS:tuple[GeocachingSensorEntityDescription <ellipsis>]=(GeocachingSensorEntityDescription(key="find_count" name="Total finds" icon="mdi:notebook-edit-outline" native_unit_of_measurement="caches" value_fn=<lambda>status:status.user.find_count ) GeocachingSensorEntityDescription(key="hide_count" name="Total hides" icon="mdi:eye-off-outline" native_unit_of_measurement="caches" entity_registry_visible_default=<false> value_fn=<lambda>status:status.user.hide_count ) GeocachingSensorEntityDescription(key="favorite_points" name="Favorite points" icon="mdi:heart-outline" native_unit_of_measurement="points" entity_registry_visible_default=<false> value_fn=<lambda>status:status.user.favorite_points ) GeocachingSensorEntityDescription(key="souvenir_count" name="Total souvenirs" icon="mdi:license" native_unit_of_measurement="souvenirs" value_fn=<lambda>status:status.user.souvenir_count ) GeocachingSensorEntityDescription(key="awarded_favorite_points" name="Awarded favorite points" icon="mdi:heart" native_unit_of_measurement="points" entity_registry_visible_default=<false> value_fn=<lambda>status:status.user.awarded_favorite_points ) )<async_keyword><def_stmt>async_setup_entry hass:HomeAssistant entry:ConfigEntry async_add_entities:AddEntitiesCallback<arrow><none><block_start>"""Set up a Geocaching sensor entry."""<line_sep>coordinator=hass.data[DOMAIN][entry.entry_id]<line_sep>async_add_entities(GeocachingSensor(coordinator description)<for>description SENSORS)<block_end><class_stmt>GeocachingSensor(CoordinatorEntity[GeocachingDataUpdateCoordinator] SensorEntity)<block_start>"""Representation of a Sensor."""<line_sep>entity_description:GeocachingSensorEntityDescription<def_stmt>__init__ self coordinator:GeocachingDataUpdateCoordinator description:GeocachingSensorEntityDescription <arrow><none><block_start>"""Initialize the Geocaching sensor."""<line_sep>super().__init__(coordinator)<line_sep>self.entity_description=description<line_sep>self._attr_name=(f"Geocaching {coordinator.data.user.username} {description.name}")<line_sep>self._attr_unique_id=(f"{coordinator.data.user.reference_code}_{description.key}")<line_sep>self._attr_device_info=DeviceInfo(name=f"Geocaching {coordinator.data.user.username}" identifiers={(DOMAIN cast(str coordinator.data.user.reference_code))} entry_type=DeviceEntryType.SERVICE manufacturer="Groundspeak, Inc." )<block_end>@property<def_stmt>native_value self<arrow>str|int|<none><block_start>"""Return the state of the sensor."""<line_sep><return>self.entity_description.value_fn(self.coordinator.data)<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db models migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('kannel' '0001_initial') ]<line_sep>operations=[migrations.AlterField(model_name='deliveryreport' name='message_id' field=models.CharField(max_length=255 verbose_name='Message ID') preserve_default=<true> ) migrations.AlterField(model_name='deliveryreport' name='sms_id' field=models.CharField(max_length=36 verbose_name='SMS ID') preserve_default=<true> ) migrations.AlterField(model_name='deliveryreport' name='smsc' field=models.CharField(max_length=255 verbose_name='SMSC') preserve_default=<true> ) migrations.AlterField(model_name='deliveryreport' name='status' field=models.SmallIntegerField(choices=[(1 'Delivery Success') (2 'Delivery Failure') (4 'Message Buffered') (8 'SMSC Submit') (16 'SMSC Reject')]) preserve_default=<true> ) ]<block_end> |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utility functions."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>concurrent futures<import_stmt>os<import_stmt>pickle<import_from_stmt>absl flags<import_from_stmt>absl logging<import_stmt>gym<import_stmt>numpy<as>np<import_stmt>tensorflow.compat.v1<as>tf<import_from_stmt>tf_agents.environments suite_mujoco<import_from_stmt>tf_agents.specs array_spec<line_sep>flags.DEFINE_integer('checkpoint_iterations' 50 'Periodicity of checkpoints.')<line_sep>flags.DEFINE_integer('eval_iterations' 50 'Periodicity of evaluations.')<line_sep>flags.DEFINE_integer('num_evals' 10 'Number of evaluations.')<line_sep>FLAGS=flags.FLAGS<line_sep>_CHECKPOINT_FILENAME='model.ckpt'<def_stmt>get_state_and_action_specs gym_env action_bounds=<none><block_start>"""Returns state and action specs for a Gym environment.
Args:
gym_env: gym.core.Env. A Gym environment.
action_bounds: list of strings. Min and max values in string for action
variables.
Returns:
(BoundedArraySpec, BoundedArraySpec). The first is a state spec and the
second is a action spec.
"""<if_stmt>isinstance(gym_env.observation_space gym.spaces.Box)<block_start>state_spec=array_spec.BoundedArraySpec(shape=gym_env.observation_space.shape dtype=gym_env.observation_space.dtype minimum=gym_env.observation_space.low maximum=gym_env.observation_space.high)<block_end><else_stmt><block_start><raise>NotImplementedError(type(gym_env.observation_space))<block_end><if_stmt>action_bounds<block_start><assert_stmt>len(action_bounds)<eq>2<line_sep>action_min=np.tile(float(action_bounds[0]) gym_env.action_space.shape)<line_sep>action_max=np.tile(float(action_bounds[1]) gym_env.action_space.shape)<block_end><else_stmt><block_start>action_min=gym_env.action_space.low<line_sep>action_max=gym_env.action_space.high<block_end><if_stmt>isinstance(gym_env.action_space gym.spaces.Box)<block_start>action_spec=array_spec.BoundedArraySpec(shape=gym_env.action_space.shape dtype=gym_env.action_space.dtype minimum=action_min maximum=action_max)<block_end><else_stmt><block_start><raise>NotImplementedError(type(gym_env.action_space))<block_end><return>state_spec action_spec<block_end><def_stmt>create_env env_name<block_start>"""Creates Environment."""<if_stmt>env_name<eq>'Pendulum'<block_start>env=gym.make('Pendulum-v0')<block_end><elif_stmt>env_name<eq>'Hopper'<block_start>env=suite_mujoco.load('Hopper-v2')<block_end><elif_stmt>env_name<eq>'Walker2D'<block_start>env=suite_mujoco.load('Walker2d-v2')<block_end><elif_stmt>env_name<eq>'HalfCheetah'<block_start>env=suite_mujoco.load('HalfCheetah-v2')<block_end><elif_stmt>env_name<eq>'Ant'<block_start>env=suite_mujoco.load('Ant-v2')<block_end><elif_stmt>env_name<eq>'Humanoid'<block_start>env=suite_mujoco.load('Humanoid-v2')<block_end><else_stmt><block_start><raise>ValueError('Unsupported environment: %s'%env_name)<block_end><return>env<block_end><def_stmt>_env_reset env<block_start><if_stmt>hasattr(env 'time_step_spec')<block_start><return>env.reset().observation<block_end><else_stmt><block_start><return>env.reset()<block_end><block_end><def_stmt>_env_step env action<block_start><if_stmt>hasattr(env 'time_step_spec')<block_start>ts=env.step(action)<line_sep><return>ts.observation ts.reward env.done env.get_info()<block_end><else_stmt><block_start><return>env.step(action)<block_end><block_end><def_stmt>warm_up_replay_memory session behavior_policy time_out discount_factor replay_memory# The number of events in an epsidoe could be less than the maximum episode
# length (i.e., time_out) when the environment has a termination state.
<block_start>min_replay_memory_size=FLAGS.batch_size<times>FLAGS.train_steps_per_iteration<while_stmt>replay_memory.size<l>min_replay_memory_size<block_start>num_events=min_replay_memory_size-replay_memory.size<line_sep>num_episodes=int(num_events/time_out)+1<line_sep>collect_experience_parallel(num_episodes session behavior_policy time_out discount_factor replay_memory)<block_end><block_end><def_stmt>collect_experience_parallel num_episodes session behavior_policy time_out discount_factor replay_memory collect_init_state_step=<false><block_start>"""Executes threads for data collection."""<line_sep>old_size=replay_memory.size<if_stmt>num_episodes<g>1<block_start><with_stmt>futures.ThreadPoolExecutor(max_workers=FLAGS.collect_experience_parallelism)<as>executor<block_start><for_stmt>_ range(num_episodes)<block_start>executor.submit(collect_experience session behavior_policy time_out discount_factor replay_memory collect_init_state_step)<block_end><block_end><block_end><else_stmt><block_start>collect_experience(session behavior_policy time_out discount_factor replay_memory collect_init_state_step)<block_end><return>replay_memory.size-old_size<block_end><def_stmt>collect_experience session behavior_policy time_out discount_factor replay_memory collect_init_state_step=<false><block_start>"""Adds experiences into replay memory.
Generates an episode, computes Q targets for state and action pairs in the
episode, and adds them into the replay memory.
"""<with_stmt>session.as_default()<block_start><with_stmt>session.graph.as_default()<block_start>env=create_env(FLAGS.env_name)<line_sep>episode,_,_=_collect_episode(env time_out discount_factor behavior_policy collect_init_state_step)<line_sep>replay_memory.extend(episode)<if_stmt>hasattr(env 'close')<block_start>env.close()<block_end><block_end><block_end><block_end><def_stmt>_collect_episode env time_out discount_factor behavior_policy collect_init_state_step=<false><block_start>"""Collects episodes of trajectories by following a behavior policy."""<line_sep>episode=[]<line_sep>episode_lengths=[]<line_sep>episode_rewards=[]<line_sep>state=_env_reset(env)<line_sep>init_state=_env_reset(env)<line_sep>done=<false><line_sep>episode_step_count=0<line_sep>e_reward=0<for_stmt>_ range(time_out)# First, sample an action
<block_start>action=behavior_policy.action(state use_action_function=<true>)<if_stmt>action<is><none><block_start><break><block_end>next_state,reward,done,info=_env_step(env action)<line_sep>reward=reward<if><not>done<else>0.0<line_sep># Save the experience to our buffer
<if_stmt>collect_init_state_step<block_start>episode.append([init_state state action reward next_state episode_step_count done info])<block_end><else_stmt><block_start>episode.append([state action reward next_state done info])<block_end># update state, e_reward and step count
state=next_state<if_stmt>discount_factor<l>1<block_start>e_reward<augadd>(discount_factor<power>episode_step_count)<times>reward<block_end><else_stmt><block_start>e_reward<augadd>reward<block_end>episode_step_count<augadd>1<if_stmt>done<block_start><break><block_end><block_end><if_stmt>episode_step_count<g>0<block_start>episode_lengths.append(episode_step_count)<line_sep>episode_rewards.append(e_reward)<block_end><return>(episode episode_lengths episode_rewards)<block_end><def_stmt>periodic_updates iteration train_step replay_memories greedy_policy saver sess time_out use_action_function=<true> tf_summary=<none><block_start>"""Evaluates the algorithm."""<if_stmt>(FLAGS.checkpoint_dir<and>FLAGS.checkpoint_iterations<and>iteration%FLAGS.checkpoint_iterations<eq>0)<block_start>logging.info('Iteration: %d, writing checkpoints..' iteration)<if_stmt><not>tf.gfile.Exists(FLAGS.checkpoint_dir)<block_start>tf.gfile.MakeDirs(FLAGS.checkpoint_dir)<block_end>checkpoint_file=os.path.join(FLAGS.checkpoint_dir _CHECKPOINT_FILENAME)<line_sep>saver.save(sess checkpoint_file global_step=train_step write_meta_graph=<false>)<for_stmt>replay_memory replay_memories<block_start>replay_memory.save(FLAGS.checkpoint_dir delete_old=<true>)<block_end>logging.info('Iteration: %d, completed writing checkpoints.' iteration)<block_end><if_stmt>FLAGS.eval_iterations<and>iteration%FLAGS.eval_iterations<eq>0<block_start>logging.info('Iteration: %d, evaluating the model..' iteration)<line_sep>scores=[]<line_sep>action_magnitudes=[]<line_sep>episode_lens=[]<line_sep>future_list=[]<with_stmt>futures.ThreadPoolExecutor(max_workers=FLAGS.num_evals)<as>executor<block_start><for_stmt>_ range(FLAGS.num_evals)<block_start>future_list.append(executor.submit(_evaluate_model time_out greedy_policy use_action_function=use_action_function render=<false>))<block_end><for_stmt>future futures.as_completed(future_list)<block_start>score,action_magnitude,episode_len=future.result()<line_sep>scores.append(score)<line_sep>action_magnitudes.append(action_magnitude)<line_sep>episode_lens.append(episode_len)<block_end><block_end>avg_score=np.mean(scores)<line_sep>avg_action_magitude=np.mean(action_magnitudes)<line_sep>avg_episode_len=np.mean(episode_lens)<line_sep>logging.info('Iteration: %d, avg_score: %.3f, avg_episode_len: %.3f, '<concat>'avg_action_magnitude: %.3f' iteration avg_score avg_episode_len avg_action_magitude)<if_stmt>tf_summary<block_start>tf_summary.value.extend([tf.Summary.Value(tag='avg_score' simple_value=avg_score) tf.Summary.Value(tag='avg_action_magnitude' simple_value=avg_action_magitude) tf.Summary.Value(tag='avg_episode_len' simple_value=avg_episode_len)])<block_end><block_end><block_end><def_stmt>_evaluate_model time_out greedy_policy use_action_function=<false> render=<false><block_start>"""Evaluates the model."""<line_sep>env=create_env(FLAGS.env_name)<line_sep>state=_env_reset(env)<line_sep>total_reward=0.0<line_sep>total_action_magnitude=0.0<line_sep>episode_len=0<for_stmt>_ range(time_out)<block_start><if_stmt>render<block_start>env.render()<block_end>action=greedy_policy.action(np.reshape(state [1 -1]) use_action_function)<if_stmt>action<is><none><block_start><break><block_end>next_state,reward,done,_=_env_step(env action)<line_sep>state=next_state<line_sep>total_reward<augadd>reward<if_stmt>greedy_policy.continuous_action<block_start>total_action_magnitude<augadd>np.linalg.norm(action np.inf)<block_end>episode_len<augadd>1<if_stmt>done<block_start><break><block_end><block_end><return>total_reward total_action_magnitude/episode_len episode_len<block_end><def_stmt>save_hparam_config dict_to_save config_dir<block_start>"""Saves config file of hparam."""<line_sep>filename=os.path.join(config_dir 'hparam.pickle')<line_sep>print('Saving results to %s'%filename)<if_stmt><not>tf.gfile.Exists(config_dir)<block_start>tf.gfile.MakeDirs(config_dir)<block_end><with_stmt>tf.gfile.GFile(filename 'w')<as>f<block_start>pickle.dump(dict_to_save f protocol=2)<block_end><block_end><def_stmt>action_projection action action_spec softmax=<false><block_start>"""Projects action tensor onto a bound."""<if_stmt>isinstance(action np.ndarray)<block_start><if_stmt>softmax<block_start>e_x=np.exp(action-np.max(action axis=1))<line_sep><return>e_x/np.sum(e_x axis=1)<block_end><else_stmt><block_start><return>np.minimum(action_spec.maximum np.maximum(action_spec.minimum action))<block_end><block_end><else_stmt># TF version
<block_start><if_stmt>softmax<block_start><return>tf.nn.softmax(action axis=1)<block_end><else_stmt><block_start><return>tf.minimum(action_spec.maximum tf.maximum(action_spec.minimum action))<block_end><block_end><block_end><def_stmt>create_placeholders_for_q_net tf_vars<block_start>"""Creates placeholders for feeding values to TF variables.
Args:
tf_vars: list. A list of TF variables. These are variables for a neural
network approximating a Q function.
Returns:
dict. A dictionary mapping a string to a tf.placeholder.
"""<line_sep>ph_dict={}<for_stmt>var tf_vars<block_start>ph_dict['{}_ph'.format(var.name)]=tf.placeholder(dtype=var.dtype shape=var.shape)<block_end><return>ph_dict<block_end><def_stmt>build_dummy_q_net state action ph_dict q_net_vars<block_start>"""Builds a dummy Q network.
This function builds a neural network where parameters are given by
placeholders.
Args:
state: TF Tensor. State tensor.
action: TF Tensor. Action tensor.
ph_dict: dict. A dictionary mapping a TF variable's name to a
tf.placeholder. There is one placeholder for each variable in
`q_net_vars`.
q_net_vars: list. A list of TF variables. The list should have even number
of variables. One for weights and other for bias for each layer of a
neural network.
Returns:
TF Tensor. Output tensor of a Q network.
"""<assert_stmt>bool(q_net_vars)<and>len(q_net_vars)%2<eq>0<line_sep>net=tf.concat([state action] axis=1)<line_sep># Specific for MLP
<for_stmt>itr,var enumerate(q_net_vars)<block_start><if_stmt>itr%2<eq>0# even itr, multiplicative weights
<block_start>net=tf.einsum('ij,jk->ik' net ph_dict['{}_ph'.format(var.name)])<block_end><else_stmt># odd itr, additive weights
<block_start>net=tf.nn.bias_add(net ph_dict['{}_ph'.format(var.name)])<line_sep># Output layer doesn't have an activation function.
<if_stmt>itr<l>len(q_net_vars)-1<block_start>net=tf.nn.relu(net)<block_end><block_end><block_end><return>net<block_end><def_stmt>make_tf_summary_histogram values num_bins=10<block_start>"""Constructs a tf Summary of type histogram from a np array of values.
Args:
values: list or np.array.
num_bins: int. Number of histogram bins.
Returns:
tf.HistogramProto.
"""<line_sep>values=np.reshape(values [-1])<line_sep>counts,limits=np.histogram(values bins=num_bins)<line_sep><return>tf.HistogramProto(min=np.amin(values) max=np.amax(values) num=values.size sum=np.sum(values) sum_squares=np.sum(values<power>2) bucket_limit=limits.tolist()[1:] bucket=counts.tolist())<block_end> |
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limit
"""Common utility functions used by multiple scripts."""<import_stmt>os<def_stmt>GetFontList path exts negate=<false><block_start>"""Recursively gets the list of files that from path such that."""<line_sep># negate = False: files that match one of the extensions in exts.
# negate = True: files that match no extension in exts.
paths=[]<line_sep># for root, dirs, files in os.walk(path): makes the lint tool unhappy
# because of dirs being unused :(
<for_stmt>entry os.walk(path)<block_start>root=entry[0]<line_sep>files=entry[2]<for_stmt>path files<block_start>has_ext_list=map(<lambda>ext:path[-len(ext):]<eq>ext exts)<line_sep>result=reduce(<lambda>a h:a<or>h has_ext_list <false>)<line_sep># normal: we want to include a file that matches at least one extension
# negated: we want to include a file that matches no extension
<if_stmt>negate<ne>result<block_start>paths.append(os.path.join(root path))<block_end><block_end><block_end><return>paths<block_end><def_stmt>GetLevelList path max_level=1 negate=<false><block_start>"""Recursively gets the list of files that from path such that."""<line_sep># negate = False: files that are at most |max_level|s deep.
# negate = True: files that are more than |max_level|s deep.
paths=[]<for_stmt>entry os.walk(path)<block_start>root=entry[0]<line_sep>files=entry[2]<for_stmt>path files<block_start>root_path=os.path.join(root path)<line_sep>level=path.count(os.path.sep)<if_stmt>(<not>negate<and>level<le>max_level)<or>(negate<and>level<g>max_level)<block_start>paths.append(root_path)<block_end><block_end><block_end><return>paths<block_end><def_stmt>FixPath path<block_start><if_stmt>path[-1]<ne>'/'<block_start><return>path+'/'<block_end><return>path<block_end> |
# -*- coding: utf-8 -*-
<import_stmt>datetime<import_stmt>logging<import_from_stmt>dateutil.relativedelta relativedelta<import_from_stmt>django.conf settings<import_from_stmt>django.core.exceptions ValidationError<import_from_stmt>django.core.validators MinValueValidator<import_from_stmt>django.db models<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>mptt.models MPTTModel TreeForeignKey<import_from_stmt>ralph.accounts.models Team<import_from_stmt>ralph.admin.autocomplete AutocompleteTooltipMixin<import_from_stmt>ralph.assets.models.base BaseObject<import_from_stmt>ralph.assets.models.choices ModelVisualizationLayout ObjectModelType <import_from_stmt>ralph.lib.custom_fields.models CustomFieldMeta WithCustomFieldsMixin <import_from_stmt>ralph.lib.mixins.fields NullableCharField<import_from_stmt>ralph.lib.mixins.models AdminAbsoluteUrlMixin NamedMixin PriceMixin TimeStampMixin <import_from_stmt>ralph.lib.permissions PermByFieldMixin<import_from_stmt>ralph.lib.permissions.models PermissionsBase<line_sep>logger=logging.getLogger(__name__)<class_stmt>AssetHolder(AdminAbsoluteUrlMixin NamedMixin.NonUnique TimeStampMixin models.Model)<block_start><pass><block_end><class_stmt>BusinessSegment(AdminAbsoluteUrlMixin NamedMixin models.Model)<block_start><pass><block_end><class_stmt>ProfitCenter(AdminAbsoluteUrlMixin NamedMixin models.Model)<block_start>description=models.TextField(blank=<true>)<block_end><class_stmt>Environment(AdminAbsoluteUrlMixin NamedMixin TimeStampMixin models.Model)<block_start><pass><block_end><class_stmt>Service(PermByFieldMixin AdminAbsoluteUrlMixin NamedMixin TimeStampMixin models.Model)# Fixme: let's do service catalog replacement from that
<block_start>_allow_in_dashboard=<true><line_sep>active=models.BooleanField(default=<true>)<line_sep>uid=NullableCharField(max_length=40 unique=<true> blank=<true> null=<true>)<line_sep>profit_center=models.ForeignKey(ProfitCenter null=<true> blank=<true>)<line_sep>business_segment=models.ForeignKey(BusinessSegment null=<true> blank=<true>)<line_sep>cost_center=models.CharField(max_length=100 blank=<true>)<line_sep>environments=models.ManyToManyField('Environment' through='ServiceEnvironment')<line_sep>business_owners=models.ManyToManyField(settings.AUTH_USER_MODEL related_name='services_business_owner' blank=<true> )<line_sep>technical_owners=models.ManyToManyField(settings.AUTH_USER_MODEL related_name='services_technical_owner' blank=<true> )<line_sep>support_team=models.ForeignKey(Team null=<true> blank=<true> related_name='services' )<def_stmt>__str__ self<block_start><return>'{}'.format(self.name)<block_end>@classmethod<def_stmt>get_autocomplete_queryset cls<block_start><return>cls._default_manager.filter(active=<true>)<block_end><block_end><class_stmt>ServiceEnvironment(AdminAbsoluteUrlMixin AutocompleteTooltipMixin BaseObject)<block_start>_allow_in_dashboard=<true><line_sep>service=models.ForeignKey(Service)<line_sep>environment=models.ForeignKey(Environment)<line_sep>autocomplete_tooltip_fields=['service__business_owners' 'service__technical_owners' 'service__support_team' ]<def_stmt>__str__ self<block_start><return>'{} - {}'.format(self.service.name self.environment.name)<block_end><class_stmt>Meta<block_start>unique_together=('service' 'environment')<line_sep>ordering=('service__name' 'environment__name')<block_end>@property<def_stmt>service_name self<block_start><return>self.service.name<block_end>@property<def_stmt>service_uid self<block_start><return>self.service.uid<block_end>@property<def_stmt>environment_name self<block_start><return>self.environment.name<block_end>@classmethod<def_stmt>get_autocomplete_queryset cls<block_start><return>cls._default_manager.filter(service__active=<true>)<block_end><block_end><class_stmt>ManufacturerKind(AdminAbsoluteUrlMixin NamedMixin models.Model)<block_start><pass><block_end><class_stmt>Manufacturer(AdminAbsoluteUrlMixin NamedMixin TimeStampMixin models.Model)<block_start>_allow_in_dashboard=<true><line_sep>manufacturer_kind=models.ForeignKey(ManufacturerKind verbose_name=_('manufacturer kind') null=<true> blank=<true> on_delete=models.SET_NULL )<block_end>AssetModelMeta=type('AssetModelMeta' (CustomFieldMeta PermissionsBase) {})<class_stmt>AssetModel(PermByFieldMixin NamedMixin.NonUnique TimeStampMixin AdminAbsoluteUrlMixin WithCustomFieldsMixin models.Model metaclass=AssetModelMeta)# TODO: should type be determined based on category?
<block_start>_allow_in_dashboard=<true><line_sep>type=models.PositiveIntegerField(verbose_name=_('type') choices=ObjectModelType() )<line_sep>manufacturer=models.ForeignKey(Manufacturer on_delete=models.PROTECT blank=<true> null=<true>)<line_sep>category=TreeForeignKey('Category' null=<true> related_name='models')<line_sep>power_consumption=models.PositiveIntegerField(verbose_name=_("Power consumption") default=0 )<line_sep>height_of_device=models.FloatField(verbose_name=_("Height of device") default=0 validators=[MinValueValidator(0)] )<line_sep>cores_count=models.PositiveIntegerField(verbose_name=_("Cores count") default=0 )<line_sep>visualization_layout_front=models.PositiveIntegerField(verbose_name=_("visualization layout of front side") choices=ModelVisualizationLayout() default=ModelVisualizationLayout().na.id blank=<true> )<line_sep>visualization_layout_back=models.PositiveIntegerField(verbose_name=_("visualization layout of back side") choices=ModelVisualizationLayout() default=ModelVisualizationLayout().na.id blank=<true> )<line_sep># Used in the visualization Data Center as is_blade
has_parent=models.BooleanField(default=<false>)<class_stmt>Meta<block_start>verbose_name=_('model')<line_sep>verbose_name_plural=_('models')<block_end><def_stmt>__str__ self<block_start><if_stmt>self.category_id<block_start><return>'[{}] {} {}'.format(self.category self.manufacturer self.name)<block_end><else_stmt><block_start><return>'{} {}'.format(self.manufacturer self.name)<block_end><block_end><def_stmt>_get_layout_class self field<block_start>item=ModelVisualizationLayout.from_id(field)<line_sep><return>getattr(item 'css_class' '')<block_end><def_stmt>get_front_layout_class self<block_start><return>self._get_layout_class(self.visualization_layout_front)<block_end><def_stmt>get_back_layout_class self<block_start><return>self._get_layout_class(self.visualization_layout_back)<block_end><block_end><class_stmt>Category(AdminAbsoluteUrlMixin MPTTModel NamedMixin.NonUnique TimeStampMixin models.Model)<block_start>_allow_in_dashboard=<true><line_sep>code=models.CharField(max_length=4 blank=<true> default='')<line_sep>parent=TreeForeignKey('self' null=<true> blank=<true> related_name='children' db_index=<true>)<line_sep>imei_required=models.BooleanField(default=<false>)<line_sep>allow_deployment=models.BooleanField(default=<false>)<line_sep>show_buyout_date=models.BooleanField(default=<false>)<line_sep>default_depreciation_rate=models.DecimalField(blank=<true> decimal_places=2 default=settings.DEFAULT_DEPRECIATION_RATE help_text=_('This value is in percentage.'<concat>' For example value: "100" means it depreciates during a year.'<concat>' Value: "25" means it depreciates during 4 years, and so on... .') max_digits=5 )<class_stmt>Meta<block_start>verbose_name=_('category')<line_sep>verbose_name_plural=_('categories')<block_end><class_stmt>MPTTMeta<block_start>order_insertion_by=['name']<block_end><def_stmt>__str__ self<block_start><return>self.name<block_end><def_stmt>get_default_depreciation_rate self category=<none><block_start><if_stmt>category<is><none><block_start>category=self<block_end><if_stmt>category.default_depreciation_rate<block_start><return>category.default_depreciation_rate<block_end><elif_stmt>category.parent<block_start><return>self.get_default_depreciation_rate(category.parent)<block_end><return>0<block_end><block_end><class_stmt>AssetLastHostname(models.Model)<block_start>prefix=models.CharField(max_length=30 db_index=<true>)<line_sep>counter=models.PositiveIntegerField(default=1)<line_sep>postfix=models.CharField(max_length=30 db_index=<true>)<class_stmt>Meta<block_start>unique_together=('prefix' 'postfix')<block_end><def_stmt>formatted_hostname self fill=5<block_start><return>'{prefix}{counter:0{fill}}{postfix}'.format(prefix=self.prefix counter=int(self.counter) fill=fill postfix=self.postfix )<block_end>@classmethod# TODO: select_for_update
<def_stmt>increment_hostname cls prefix postfix=''<block_start>obj,created=cls.objects.get_or_create(prefix=prefix postfix=postfix )<if_stmt><not>created# F() avoid race condition problem
<block_start>obj.counter=models.F('counter')+1<line_sep>obj.save()<line_sep><return>cls.objects.get(pk=obj.pk)<block_end><else_stmt><block_start><return>obj<block_end><block_end>@classmethod<def_stmt>get_next_free_hostname cls prefix postfix fill=5 availability_checker=<none> _counter=1<block_start><try_stmt><block_start>last_hostname=cls.objects.get(prefix=prefix postfix=postfix)<block_end><except_stmt>cls.DoesNotExist<block_start>last_hostname=cls(prefix=prefix postfix=postfix counter=0)<block_end>last_hostname.counter<augadd>_counter<line_sep>hostname=last_hostname.formatted_hostname(fill=fill)<if_stmt>availability_checker<is><none><or>availability_checker(hostname)<block_start><return>hostname<block_end><else_stmt><block_start><return>cls.get_next_free_hostname(prefix postfix fill availability_checker _counter+1)<block_end><block_end><def_stmt>__str__ self<block_start><return>self.formatted_hostname()<block_end><block_end><class_stmt>BudgetInfo(AdminAbsoluteUrlMixin NamedMixin TimeStampMixin models.Model)<block_start><class_stmt>Meta<block_start>verbose_name=_('Budget info')<line_sep>verbose_name_plural=_('Budgets info')<block_end><def_stmt>__str__ self<block_start><return>self.name<block_end><block_end><class_stmt>Asset(AdminAbsoluteUrlMixin PriceMixin BaseObject)<block_start>model=models.ForeignKey(AssetModel related_name='assets' on_delete=models.PROTECT)<line_sep># TODO: unify hostname for DCA, VirtualServer, Cluster and CloudHost
# (use another model?)
hostname=NullableCharField(blank=<true> default=<none> max_length=255 null=<true> verbose_name=_('hostname') # TODO: unique
)<line_sep>sn=NullableCharField(blank=<true> max_length=200 null=<true> verbose_name=_('SN') unique=<true> )<line_sep>barcode=NullableCharField(blank=<true> default=<none> max_length=200 null=<true> unique=<true> verbose_name=_('barcode'))<line_sep>niw=NullableCharField(blank=<true> default=<none> max_length=200 null=<true> verbose_name=_('inventory number') )<line_sep>required_support=models.BooleanField(default=<false>)<line_sep>order_no=models.CharField(verbose_name=_('order number') blank=<true> max_length=50 null=<true> )<line_sep>invoice_no=models.CharField(verbose_name=_('invoice number') blank=<true> db_index=<true> max_length=128 null=<true> )<line_sep>invoice_date=models.DateField(blank=<true> null=<true>)<line_sep># to discuss: foreign key?
provider=models.CharField(blank=<true> max_length=100 null=<true> )<line_sep>depreciation_rate=models.DecimalField(blank=<true> decimal_places=2 default=settings.DEFAULT_DEPRECIATION_RATE help_text=_('This value is in percentage.'<concat>' For example value: "100" means it depreciates during a year.'<concat>' Value: "25" means it depreciates during 4 years, and so on... .') max_digits=5 )<line_sep>force_depreciation=models.BooleanField(help_text=('Check if you no longer want to bill for this asset') default=<false> )<line_sep>depreciation_end_date=models.DateField(blank=<true> null=<true>)<line_sep>buyout_date=models.DateField(blank=<true> null=<true> db_index=<true>)<line_sep>task_url=models.URLField(blank=<true> help_text=('External workflow system URL') max_length=2048 null=<true> )<line_sep>budget_info=models.ForeignKey(BudgetInfo blank=<true> default=<none> null=<true> on_delete=models.PROTECT )<line_sep>property_of=models.ForeignKey(AssetHolder on_delete=models.PROTECT null=<true> blank=<true> )<line_sep>start_usage=models.DateField(blank=<true> null=<true> help_text=('Fill it if date of first usage is different then date of creation'))<def_stmt>__str__ self<block_start><return>self.hostname<or>''<block_end><def_stmt>calculate_buyout_date self<block_start>"""
Get buyout date.
Calculate buyout date:
invoice_date + depreciation_rate months + custom buyout date delay
Returns:
Deprecation date
"""<if_stmt>self.depreciation_end_date<block_start><return>self.depreciation_end_date<block_end><elif_stmt>self.invoice_date<block_start>months=self.get_depreciation_months()+1+settings.ASSET_BUYOUT_DELAY_MONTHS<line_sep><return>self.invoice_date+relativedelta(months=months)<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>get_depreciation_months self<block_start><return>int((1/(self.depreciation_rate/100)<times>12)<if>self.depreciation_rate<else>0)<block_end><def_stmt>is_depreciated self date=<none><block_start>date=date<or>datetime.date.today()<if_stmt>self.force_depreciation<or><not>self.invoice_date<block_start><return><true><block_end><if_stmt>self.depreciation_end_date<block_start>deprecation_date=self.deprecation_end_date<block_end><else_stmt><block_start>deprecation_date=self.invoice_date+relativedelta(months=self.get_depreciation_months() )<block_end><return>deprecation_date<l>date<block_end><def_stmt>get_depreciated_months self# DEPRECATED
# BACKWARD_COMPATIBILITY
<block_start><return>self.get_depreciation_months()<block_end><def_stmt>is_deprecated self date=<none># DEPRECATED
# BACKWARD_COMPATIBILITY
<block_start><return>self.is_depreciated()<block_end><def_stmt>_liquidated_at self date<block_start>liquidated_history=self.get_history().filter(new_value='liquidated' field_name='status' ).order_by('-date')[:1]<line_sep><return>liquidated_history<and>liquidated_history[0].date.date()<le>date<block_end><def_stmt>clean self<block_start><if_stmt><not>self.sn<and><not>self.barcode<block_start>error_message=[_('SN or BARCODE field is required')]<line_sep><raise>ValidationError({'sn':error_message 'barcode':error_message})<block_end><block_end><def_stmt>save self *args **kwargs# if you save barcode as empty string (instead of None) you could have
# only one asset with empty barcode (because of `unique` constraint)
# if you save barcode as None you could have many assets with empty
# barcode (becasue `unique` constrainst is skipped)
<block_start><for_stmt>unique_field ['barcode' 'sn']<block_start>value=getattr(self unique_field <none>)<if_stmt>value<eq>''<block_start>value=<none><block_end>setattr(self unique_field value)<block_end><if_stmt><not>self.buyout_date<block_start>self.buyout_date=self.calculate_buyout_date()<block_end><return>super(Asset self).save(*args **kwargs)<block_end><block_end> |
<import_from_stmt>django.db models<import_from_stmt>django.db.models Q Max<import_stmt>logging<import_from_stmt>wouso.core.config.models IntegerSetting<import_from_stmt>wouso.core.game.models Game<import_from_stmt>wouso.core.user.models Player<import_from_stmt>wouso.games.challenge.models Challenge ChallengeUser<class_stmt>GrandChallengeUser(Player)<block_start>""" Extension of the user profile for GrandChallenge """<line_sep>lost=models.IntegerField(default=0)<line_sep>last_round=models.IntegerField(default=0)<def_stmt>get_challenges self<block_start>"""
Return a queryset of grandchallenges for this player
"""<line_sep><return>Challenge.objects.filter(id__in=GrandChallenge.objects.filter(Q(challenge__user_from__user__id=self.id)|Q(challenge__user_to__user__id=self.id)).order_by('round').values('challenge'))<block_end><def_stmt>get_active self<block_start>"""
Return a list of active GrandChallenges for this user
"""<line_sep><return>self.get_challenges().filter(status='A')<block_end><def_stmt>get_played self<block_start>"""
Return a list of played GrandChallenges, ordered by round
"""<line_sep><return>self.get_challenges().filter(status__in=('D' 'P'))<block_end><def_stmt>increase_lost self<block_start>self.lost<augadd>1<line_sep>self.save()<block_end><def_stmt>set_last_round self round_number<block_start>self.last_round=round_number<line_sep>self.save()<block_end><block_end><class_stmt>GrandChallenge(models.Model)<block_start>challenge=models.ForeignKey(Challenge blank=<true> null=<true>)<line_sep>round=models.IntegerField(blank=<true> null=<true>)<line_sep>ALL=[]<line_sep>OUT_PLAY=[]<line_sep>CHALLENGES=[]<def_stmt>__oldinit__ self user_from user_to# TODO: change this constructor to a classmethod
<block_start><if_stmt><not>GrandChallengeGame.is_final()<and><not>GrandChallengeGame.is_winner()<block_start>self.branch=max(user_from.lost user_to.lost)<block_end><else_stmt><block_start>self.branch=min(user_from.lost user_to.lost)<block_end>self.user_from=user_from<line_sep>self.user_to=user_to<line_sep>self.__class__.ALL.append(self)<line_sep>self.won,self.lost=<none> <none><line_sep>self.active=<true><line_sep>self.round_number=<none><line_sep>challenge_user_to=user_to.user.get_profile().get_extension(ChallengeUser)<line_sep>challenge_user_from=user_from.user.get_profile().get_extension(ChallengeUser)<line_sep>chall=Challenge.create(challenge_user_from challenge_user_to)<line_sep>chall.accept()<line_sep>self.challenge_id=chall.id<line_sep>self.__class__.CHALLENGES.append(chall.id)<block_end>@classmethod<def_stmt>create cls user_from user_to round<block_start>""" Create a new Challenge and automatically accept it.
"""<line_sep>grand_challenge=cls.objects.create(round=round)<line_sep>user_from=user_from.user.get_profile()<line_sep>user_to=user_to.user.get_profile()<line_sep>grand_challenge.challenge=Challenge.create(user_from.get_extension(ChallengeUser) user_to.get_extension(ChallengeUser))<line_sep>grand_challenge.challenge.accept()<line_sep>grand_challenge.save()<line_sep><return>grand_challenge<block_end>@classmethod<def_stmt>get_challenges cls<block_start><return>cls.ALL<block_end>@classmethod<def_stmt>active cls<block_start><return>filter(<lambda>c:c.active cls.ALL)<block_end>@classmethod<def_stmt>all_done cls<block_start><for_stmt>i cls.CHALLENGES<block_start>x=Challenge.objects.get(id=i)<if_stmt>x.status<ne>"P"<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>play self round_number<block_start>winner=Challenge.objects.get(id=self.challenge_id).winner#trebuie generat de joc
<if_stmt>winner.user<eq>self.user_from.user<block_start>self.won=self.user_from<line_sep>self.lost=self.user_to<line_sep>self.user_to.lost<augadd>1<block_end><else_stmt><block_start>self.won=self.user_to<line_sep>self.lost=self.user_from<line_sep>self.user_from.lost<augadd>1<block_end>self.active=<false><line_sep>self.round_number=round_number<block_end>@classmethod<def_stmt>played_with cls user<block_start>ret=[]<for_stmt>c [c<for>c cls.ALL<if><not>c.active]<block_start><if_stmt>c.user_from<eq>user<block_start>ret.append(c.user_to)<block_end><elif_stmt>c.user_to<eq>user<block_start>ret.append(c.user_from)<block_end><block_end><return>ret<block_end>@classmethod<def_stmt>joaca cls round_number<block_start><for_stmt>c GrandChallenge.active()#numarul rundei...
<block_start>c.play(round_number)<if_stmt>(c.lost.lost<eq>2)<block_start>cls.OUT_PLAY.append(c.lost)<line_sep>#print c.lost
<block_end><block_end><block_end>@classmethod<def_stmt>clasament cls<block_start>arb_win=GrandChallengeGame.eligible(0)<line_sep>arb_lose=GrandChallengeGame.eligible(1)<if_stmt>(len(arb_win)<eq>1)<block_start>cls.OUT_PLAY.append(arb_win[0])<block_end><if_stmt>(len(arb_lose)<eq>1)<block_start>cls.OUT_PLAY.append(arb_lose[0])<block_end>results=cls.OUT_PLAY<line_sep>results.reverse()<line_sep><return>results<block_end><block_end><class_stmt>Round(object)<block_start><def_stmt>__init__ self round_number<block_start>self.round_number=int(round_number)<block_end><def_stmt>challenges self<block_start>"""
Return a list of challenges in this round, ordered by status
"""<line_sep><return>[gc.challenge<for>gc GrandChallenge.objects.filter(round=self.round_number).order_by('challenge__status')]<block_end><def_stmt>info self<block_start>"""
Return a dictionary with information about this round
"""<line_sep><return>{}<block_end><def_stmt>participants self<block_start>ps=set([c.user_from.user<for>c self.challenges()]+[c.user_to.user<for>c self.challenges()])<line_sep>ps=map(<lambda>a:a.get_extension(GrandChallengeUser) ps)<line_sep><return>ps<block_end><def_stmt>rounds self<block_start>"""
Return a list of previous rounds, as an iterator
"""<if_stmt>self.round_number<g>0<block_start><for_stmt>i range(self.round_number)<block_start><yield>Round(i+1)<block_end><block_end><block_end><def_stmt>__repr__ self<block_start><return>'<'+'Round '+unicode(self.round_number)+'>'<block_end><block_end><class_stmt>GrandChallengeGame(Game)<block_start>ALL=[]<line_sep>round_number=0<def_stmt>__init__ self *args **kwargs# Set parent's fields
<block_start>self._meta.get_field('verbose_name').default="GrandChallenges"<line_sep>self._meta.get_field('short_name').default=""<line_sep># the url field takes as value only a named url from module's urls.py
self._meta.get_field('url').default="grandchallenge_index_view"<line_sep>super(GrandChallengeGame self).__init__(*args **kwargs)<block_end>@classmethod<def_stmt>base_query cls<block_start><return>GrandChallengeUser.objects.exclude(user__is_superuser=<true>).exclude(race__can_play=<false>)<block_end>@classmethod<def_stmt>is_started cls<block_start>setting_round=IntegerSetting.get('gc_round')<line_sep><return>setting_round.get_value()<g>0<block_end>@classmethod<def_stmt>reset cls<block_start>"""
Reset a GC game, set every user lost to 0
"""<line_sep>GrandChallenge.objects.all().delete()<line_sep>GrandChallengeUser.objects.update(lost=0 last_round=0)<line_sep>cls.set_current_round(0)<block_end>@classmethod<def_stmt>create_users cls<block_start>"""
Create GrandChallengeUser extensions for all eligibile players.
"""<for_stmt>p Player.objects.exclude(race__can_play=<false>)<block_start>p.get_extension(GrandChallengeUser)<block_end><block_end>@classmethod<def_stmt>start cls<block_start>"""
Create challenges for each consecutive players. Return a list of created challenges.
"""<line_sep>cls.create_users()<line_sep>challenges=[]<line_sep>round=1<line_sep>last=<none><for_stmt>user cls.base_query()<block_start>u=user.user.get_profile()<if_stmt>last<is><none><block_start>last=u<block_end><else_stmt><block_start>c=GrandChallenge.create(u last round)<line_sep>challenges.append(c)<line_sep>last=<none><block_end><block_end>setting_round=IntegerSetting.get('gc_round')<line_sep>setting_round.set_value(round)<line_sep><return>challenges<block_end>@classmethod<def_stmt>eligible cls lost_count<block_start>""" Return a queryset with players of lost_count
"""<line_sep><return>cls.base_query().filter(lost=lost_count)<block_end>@classmethod<def_stmt>is_final cls<block_start>arb_win=cls.eligible(0)<line_sep>arb_lose=cls.eligible(1)<if_stmt>(len(arb_win)<eq>1)<and>(len(arb_lose)<eq>1)<block_start><return><true><block_end><return><false><block_end>@classmethod<def_stmt>final_round cls<block_start>arb_win=cls.eligible(0)<line_sep>arb_lose=cls.eligible(1)<line_sep>GrandChallenge(arb_win[0] arb_lose[0])<block_end>@classmethod<def_stmt>final_second_round cls<block_start>GrandChallengeGame.play_round(1)<block_end>@classmethod<def_stmt>is_winner cls<block_start>arb_win=cls.eligible(0)<line_sep>arb_lose=cls.eligible(1)<if_stmt>(len(arb_win)<eq>0)<and>(len(arb_lose)<eq>2)<block_start><return><false><block_end><return><true><block_end>@classmethod<def_stmt>is_finished cls<block_start>arb_win=cls.eligible(0)<line_sep>arb_lose=cls.eligible(1)<if_stmt>len(arb_win)<eq>0<or>(len(arb_win)<eq>1<and>len(arb_lose)<ne>1)<block_start><return><true><block_end><return><false><block_end>@classmethod<def_stmt>play_round cls lost_count round_number<block_start>"""
Create new challenges.
"""<if_stmt>lost_count<eq>0<block_start>all=GrandChallengeGame.eligible(0)<block_end><elif_stmt>lost_count<eq>1<block_start>all=GrandChallengeGame.eligible(1)<block_end>all=list(all)<line_sep>challenges=[]<while_stmt>len(all)<block_start>u=all[0]<line_sep>played_with=GrandChallenge.played_with(u)<line_sep>adversari=[eu<for>eu all<if>((eu.lost<eq>u.lost)<and>(eu<ne>u)<and>((eu<not><in>played_with)<or>(eu<eq>all[-1])))]<if_stmt><not>len(adversari)<block_start><break><block_end><try_stmt><block_start>adversar=adversari[0]<line_sep>all.remove(adversar)<line_sep>all.remove(u)<line_sep>c=GrandChallenge.create(u adversar round_number)<line_sep>challenges.append(c)<block_end><except_stmt>Exception<as>e<block_start>logging.exception(e)<block_end><block_end><return>challenges<block_end>@classmethod<def_stmt>set_current_round cls number<block_start>setting_round=IntegerSetting.get('gc_round')<line_sep>setting_round.set_value(number)<block_end>@classmethod<def_stmt>get_current_round cls<block_start>setting_round=IntegerSetting.get('gc_round')<line_sep>round=setting_round.get_value()<if_stmt>round<eq>0<block_start><return><none><block_end><return>cls.get_round(round)<block_end>@classmethod<def_stmt>get_round cls round<block_start><return>Round(round_number=round)<block_end>@classmethod<def_stmt>get_winner cls<block_start>"""
Return gc winner
"""<if_stmt>cls.is_finished()<block_start>final_gc=GrandChallenge.objects.filter(round=cls.get_current_round().round_number)[0]<line_sep><return>final_gc.challenge.winner.user.get_profile()<block_end><return><none><block_end>@classmethod<def_stmt>force_round_close cls round<block_start>"""
Finish every challenge in the round
"""<for_stmt>c round.challenges()<block_start><if_stmt>c.is_runnable()<block_start>c.set_expired()<block_end><if_stmt>c.is_draw()# Temporary hack FIXME
<block_start><if_stmt>c.user_from.seconds_took<l>c.user_to.seconds_took<block_start>c.set_won_by_player(c.user_from.user)<block_end><else_stmt><block_start>c.set_won_by_player(c.user_to.user)<block_end><block_end>gc_user_from=c.user_from.user.get_extension(GrandChallengeUser)<line_sep>gc_user_to=c.user_to.user.get_extension(GrandChallengeUser)<line_sep># Upgrade lost count
<if_stmt>c.user_from.user<eq>c.winner<block_start><if_stmt>gc_user_to.last_round<l>round.round_number<block_start>gc_user_to.increase_lost()<block_end><block_end><elif_stmt>c.user_to.user<eq>c.winner<block_start><if_stmt>gc_user_from.last_round<l>round.round_number<block_start>gc_user_from.increase_lost()<block_end><block_end>gc_user_from.set_last_round(round.round_number)<line_sep>gc_user_to.set_last_round(round.round_number)<block_end><block_end>@classmethod<def_stmt>round_next cls<block_start>"""
Progress to next round
"""<if_stmt>cls.is_finished()<block_start>logging.error('Grand challenge finished.')<line_sep><return><none><block_end>round=cls.get_current_round()<line_sep>cls.force_round_close(round)<line_sep>challenges=[]<if_stmt>cls.is_final()# Only two players left in the game
<block_start>arb_win=cls.eligible(0)<line_sep>arb_lose=cls.eligible(1)<line_sep>challenges.append(GrandChallenge.create(arb_win[0] arb_lose[0] round.round_number+1))<block_end><else_stmt># More than two players, create new challenges
<block_start><if_stmt>round.round_number%2<eq>1<block_start>challenges<augadd>cls.play_round(1 round.round_number+1)<line_sep>challenges<augadd>cls.play_round(0 round.round_number+1)<block_end><else_stmt><block_start>challenges<augadd>cls.play_round(1 round.round_number+1)<block_end><block_end><if_stmt>challenges# Update round number
<block_start>round.round_number<augadd>1<line_sep>cls.set_current_round(round.round_number)<block_end>logging.debug('Played round %s'%round.round_number)<line_sep><return>round<block_end><block_end> |
""" Filtering Module Tests. """<import_stmt>pytest<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_from_stmt>rdtools csi_filter poa_filter tcell_filter clip_filter quantile_clip_filter normalized_filter logic_clip_filter xgboost_clip_filter <import_stmt>warnings<def_stmt>test_csi_filter <block_start>''' Unit tests for clear sky index filter.'''<line_sep>measured_poa=np.array([1 1 0 1.15 0.85])<line_sep>clearsky_poa=np.array([1 2 1 1.00 1.00])<line_sep>filtered=csi_filter(measured_poa clearsky_poa threshold=0.15)<line_sep># Expect clearsky index is filtered with threshold of +/- 0.15.
expected_result=np.array([<true> <false> <false> <true> <true>])<assert_stmt>filtered.tolist()<eq>expected_result.tolist()<block_end><def_stmt>test_poa_filter <block_start>''' Unit tests for plane of array insolation filter.'''<line_sep>measured_poa=np.array([201 1199 500 200 1200])<line_sep>filtered=poa_filter(measured_poa poa_global_low=200 poa_global_high=1200)<line_sep># Expect high and low POA cutoffs to be non-inclusive.
expected_result=np.array([<true> <true> <true> <false> <false>])<assert_stmt>filtered.tolist()<eq>expected_result.tolist()<block_end><def_stmt>test_tcell_filter <block_start>''' Unit tests for cell temperature filter.'''<line_sep>tcell=np.array([-50 -49 0 109 110])<line_sep>filtered=tcell_filter(tcell temperature_cell_low=-50 temperature_cell_high=110)<line_sep># Expected high and low tcell cutoffs to be non-inclusive.
expected_result=np.array([<false> <true> <true> <true> <false>])<assert_stmt>filtered.tolist()<eq>expected_result.tolist()<block_end>@pytest.fixture<def_stmt>generate_power_time_series_no_clipping <block_start>power_no_datetime_index=pd.Series(np.arange(1 101))<line_sep>power_datetime_index=pd.Series(np.arange(1 101))<line_sep># Add datetime index to second series
time_range=pd.date_range('2016-12-02T11:00:00.000Z' '2017-06-06T07:00:00.000Z' freq='H')<line_sep>power_datetime_index.index=pd.to_datetime(time_range[:100])<line_sep># Create a series that is tz-naive to test on
power_datetime_index_tz_naive=power_datetime_index.copy()<line_sep>power_datetime_index_tz_naive.index=power_datetime_index_tz_naive.index.tz_localize(<none>)<line_sep># Note: Power is expected to be Series object with a datetime index.
<return>power_no_datetime_index power_datetime_index power_datetime_index_tz_naive<block_end>@pytest.fixture<def_stmt>generate_power_time_series_irregular_intervals <block_start>power_datetime_index=pd.Series(np.arange(1 62))<line_sep># Add datetime index to second series
time_range_1=pd.date_range('2016-12-02T11:00:00.000Z' '2017-06-06T07:00:00.000Z' freq='1T')<line_sep>power_datetime_index.index=pd.to_datetime(time_range_1[:61])<line_sep>power_datetime_index_2=pd.Series(np.arange(100 200))<line_sep>time_range_2=pd.date_range(power_datetime_index.index.max() '2017-06-06T07:00:00.000Z' freq='15T')<line_sep>power_datetime_index_2.index=pd.to_datetime(time_range_2[:100])<line_sep>power_datetime_index_2=power_datetime_index_2.iloc[1:]<line_sep>power_datetime_index=pd.concat([power_datetime_index power_datetime_index_2])<line_sep>power_datetime_index_3=pd.Series(list(reversed(np.arange(100 200))))<line_sep>time_range_3=pd.date_range(power_datetime_index.index.max() '2017-06-06T07:00:00.000Z' freq='5T')<line_sep>power_datetime_index_3.index=pd.to_datetime(time_range_3[:100])<line_sep>power_datetime_index_3=power_datetime_index_3.iloc[1:]<line_sep>power_datetime_index=pd.concat([power_datetime_index power_datetime_index_3])<line_sep>power_datetime_index.sort_index()<line_sep># Note: Power is expected to be Series object with a datetime index.
<return>power_datetime_index<block_end>@pytest.fixture<def_stmt>generate_power_time_series_one_min_intervals <block_start>power_datetime_index=pd.Series(np.arange(1 51))<line_sep>power_datetime_index=pd.concat([power_datetime_index power_datetime_index[::-1]])<line_sep># Add datetime index to second series
time_range=pd.date_range('2016-12-02T11:00:00.000Z' '2017-06-06T07:00:00.000Z' freq='1T')<line_sep>power_datetime_index.index=pd.to_datetime(time_range[:100])<line_sep># Note: Power is expected to be Series object with a datetime index.
<return>power_datetime_index<block_end>@pytest.fixture<def_stmt>generate_power_time_series_clipping <block_start>power_no_datetime_index=pd.Series(np.arange(2 101 2))<line_sep>power_no_datetime_index=pd.concat([power_no_datetime_index power_no_datetime_index[::-1]])<line_sep>power_no_datetime_index[48:52]=110<line_sep>power_no_datetime_index=power_no_datetime_index.reset_index(drop=<true>)<line_sep>power_datetime_index=power_no_datetime_index.copy()<line_sep># Add datetime index to second series
time_range=pd.date_range('2016-12-02T11:00:00.000Z' '2017-06-06T07:00:00.000Z' freq='H')<line_sep>power_datetime_index.index=pd.to_datetime(time_range[:100])<line_sep># Note: Power is expected to be Series object with a datetime index.
<return>power_no_datetime_index power_datetime_index<block_end><def_stmt>test_quantile_clip_filter <block_start>''' Unit tests for inverter clipping filter.'''<line_sep>power=pd.Series(np.arange(1 101))<line_sep># Note: Power is expected to be Series object because clip_filter makes
# use of the Series.quantile() method.
filtered=quantile_clip_filter(power quantile=0.98)<line_sep># Expect 99% of the 98th quantile to be filtered
expected_result=power<l>(98<times>0.99)<assert_stmt>((expected_result<eq>filtered).all())<block_end><def_stmt>test_logic_clip_filter generate_power_time_series_no_clipping generate_power_time_series_clipping generate_power_time_series_one_min_intervals generate_power_time_series_irregular_intervals<block_start>''' Unit tests for logic clipping filter.'''<line_sep>power_no_datetime_index_nc,power_datetime_index_nc,power_nc_tz_naive=generate_power_time_series_no_clipping<line_sep># Test that a Type Error is raised when a pandas series
# without a datetime index is used.
pytest.raises(TypeError logic_clip_filter power_no_datetime_index_nc)<line_sep># Test that an error is thrown when we don't include the correct
# mounting configuration input
pytest.raises(ValueError logic_clip_filter power_datetime_index_nc 'not_fixed')<line_sep># Test that an error is thrown when there are 10 or fewer readings
# in the time series
pytest.raises(Exception logic_clip_filter power_datetime_index_nc[:9])<line_sep># Test that a warning is thrown when the time series is tz-naive
warnings.simplefilter("always")<with_stmt>warnings.catch_warnings(record=<true>)<as>w<block_start>logic_clip_filter(power_nc_tz_naive)<line_sep># Warning thrown for it being an experimental filter + tz-naive
<assert_stmt>len(w)<eq>2<block_end># Scramble the index and run through the filter. This should throw
# an IndexError.
power_datetime_index_nc_shuffled=power_datetime_index_nc.sample(frac=1)<line_sep>pytest.raises(IndexError logic_clip_filter power_datetime_index_nc_shuffled 'fixed')<line_sep># Generate 1-minute interval data, run it through the function, and
# check that the associated data returned is 1-minute
power_datetime_index_one_min_intervals=generate_power_time_series_one_min_intervals<line_sep>mask_one_min=logic_clip_filter(power_datetime_index_one_min_intervals)<line_sep># Generate irregular interval data, and run it through the XGBoost model
power_datetime_index_irregular=generate_power_time_series_irregular_intervals<line_sep># Make sure that the routine throws a warning when the data sampling
# frequency is less than 95% consistent
warnings.simplefilter("always")<with_stmt>warnings.catch_warnings(record=<true>)<as>w<block_start>logic_clip_filter(power_datetime_index_irregular)<line_sep># Warning thrown for it being an experimental filter + irregular
# sampling frequency.
<assert_stmt>len(w)<eq>2<block_end># Check that the returned time series index for the logic filter is
# the same as the passed time series index
mask_irregular=logic_clip_filter(power_datetime_index_irregular)<line_sep># Expect none of the sequence to be clipped (as it's
# constantly increasing)
mask_nc=logic_clip_filter(power_datetime_index_nc)<line_sep># Test the time series where the data is clipped
power_no_datetime_index_c,power_datetime_index_c=generate_power_time_series_clipping<line_sep># Expect 4 values in middle of sequence to be clipped (when x=50)
mask_c=logic_clip_filter(power_datetime_index_c)<line_sep>filtered_c=power_datetime_index_c[mask_c]<assert_stmt>bool(mask_nc.all(axis=<none>))<assert_stmt>(len(filtered_c)<eq>96)<assert_stmt>bool((mask_one_min.index.to_series().diff()[1:]<eq>np.timedelta64(60 's')).all(axis=<none>))<assert_stmt>bool((mask_irregular.index<eq>power_datetime_index_irregular.index).all(axis=<none>))<block_end><def_stmt>test_xgboost_clip_filter generate_power_time_series_no_clipping generate_power_time_series_clipping generate_power_time_series_one_min_intervals generate_power_time_series_irregular_intervals<block_start>''' Unit tests for XGBoost clipping filter.'''<line_sep># Test the time series where the data isn't clipped
power_no_datetime_index_nc,power_datetime_index_nc,power_nc_tz_naive=generate_power_time_series_no_clipping<line_sep># Test that a Type Error is raised when a pandas series
# without a datetime index is used.
pytest.raises(TypeError xgboost_clip_filter power_no_datetime_index_nc)<line_sep># Test that an error is thrown when we don't include the correct
# mounting configuration input
pytest.raises(ValueError xgboost_clip_filter power_datetime_index_nc 'not_fixed')<line_sep># Test that an error is thrown when there are 10 or fewer readings
# in the time series
pytest.raises(Exception xgboost_clip_filter power_datetime_index_nc[:9])<line_sep># Test that a warning is thrown when the time series is tz-naive
warnings.simplefilter("always")<with_stmt>warnings.catch_warnings(record=<true>)<as>w<block_start>xgboost_clip_filter(power_nc_tz_naive)<line_sep># Warning thrown for it being an experimental filter + tz-naive
<assert_stmt>len(w)<eq>2<block_end># Scramble the index and run through the filter. This should throw
# an IndexError.
power_datetime_index_nc_shuffled=power_datetime_index_nc.sample(frac=1)<line_sep>pytest.raises(IndexError xgboost_clip_filter power_datetime_index_nc_shuffled 'fixed')<line_sep># Generate 1-minute interval data, run it through the function, and
# check that the associated data returned is 1-minute
power_datetime_index_one_min_intervals=generate_power_time_series_one_min_intervals<line_sep>mask_one_min=xgboost_clip_filter(power_datetime_index_one_min_intervals)<line_sep># Generate irregular interval data, and run it through the XGBoost model
power_datetime_index_irregular=generate_power_time_series_irregular_intervals<line_sep># Check that the returned time series index for XGBoost is the same
# as the passed time series index
mask_irregular=xgboost_clip_filter(power_datetime_index_irregular)<line_sep># Expect none of the sequence to be clipped (as it's
# constantly increasing)
mask_nc=xgboost_clip_filter(power_datetime_index_nc)<line_sep># Test the time series where the data is clipped
power_no_datetime_index_c,power_datetime_index_c=generate_power_time_series_clipping<line_sep># Expect 4 values in middle of sequence to be clipped (when x=50)
mask_c=xgboost_clip_filter(power_datetime_index_c)<line_sep>filtered_c=power_datetime_index_c[mask_c]<assert_stmt>bool(mask_nc.all(axis=<none>))<assert_stmt>(len(filtered_c)<eq>96)<assert_stmt>bool((mask_one_min.index.to_series().diff()[1:]<eq>np.timedelta64(60 's')).all(axis=<none>))<assert_stmt>bool((mask_irregular.index<eq>power_datetime_index_irregular.index).all(axis=<none>))<block_end><def_stmt>test_clip_filter generate_power_time_series_no_clipping<block_start>''' Unit tests for inverter clipping filter.'''<line_sep># Create a time series to test
power_no_datetime_index_nc,power_datetime_index_nc,power_nc_tz_naive=generate_power_time_series_no_clipping<line_sep># Check that the master wrapper defaults to the
# quantile_clip_filter_function.
# Note: Power is expected to be Series object because clip_filter makes
# use of the Series.quantile() method.
filtered_quantile=clip_filter(power_no_datetime_index_nc quantile=0.98)<line_sep># Expect 99% of the 98th quantile to be filtered
expected_result_quantile=power_no_datetime_index_nc<l>(98<times>0.99)<line_sep># Check that the clip filter defaults to quantile clip filter when
# deprecated params are passed
warnings.simplefilter("always")<with_stmt>warnings.catch_warnings(record=<true>)<as>w<block_start>clip_filter(power_datetime_index_nc 0.98)<assert_stmt>len(w)<eq>1<block_end># Check that a ValueError is thrown when a model is passed that
# is not in the acceptable list.
pytest.raises(ValueError clip_filter power_datetime_index_nc 'random_forest')<line_sep># Check that the wrapper handles the xgboost clipping
# function with kwargs.
filtered_xgboost=clip_filter(power_datetime_index_nc 'xgboost' mounting_type="fixed")<line_sep># Check that the wrapper handles the logic clipping
# function with kwargs.
filtered_logic=clip_filter(power_datetime_index_nc 'logic' mounting_type="fixed" rolling_range_max_cutoff=0.3)<line_sep># Check that the function returns a Typr Error if a wrong keyword
# arg is passed in the kwarg arguments.
pytest.raises(TypeError clip_filter power_datetime_index_nc 'xgboost' rolling_range_max_cutoff=0.3)<assert_stmt>bool((expected_result_quantile<eq>filtered_quantile).all(axis=<none>))<assert_stmt>bool(filtered_xgboost.all(axis=<none>))<assert_stmt>bool(filtered_logic.all(axis=<none>))<block_end><def_stmt>test_normalized_filter_default <block_start>pd.testing.assert_series_equal(normalized_filter(pd.Series([-5 5])) pd.Series([<false> <true>]))<line_sep>pd.testing.assert_series_equal(normalized_filter(pd.Series([-1e6 1e6]) energy_normalized_low=<none> energy_normalized_high=<none>) pd.Series([<true> <true>]))<line_sep>pd.testing.assert_series_equal(normalized_filter(pd.Series([-2 2]) energy_normalized_low=-1 energy_normalized_high=1) pd.Series([<false> <false>]))<line_sep>eps=1e-16<line_sep>pd.testing.assert_series_equal(normalized_filter(pd.Series([0.01-eps 0.01+eps 1e308])) pd.Series([<false> <true> <true>]))<block_end> |
"""
Quick smoke test that our implementation of salsa20 does the right thing.
"""<import_from_stmt>hypothesis given<import_stmt>hypothesis.strategies<as>st<import_from_stmt>Crypto.Cipher Salsa20<import_from_stmt>umash C FFI<line_sep>@given(length=st.integers(min_value=1 max_value=512) nonce=st.binary(min_size=8 max_size=8) key=st.binary(min_size=32 max_size=32) )<def_stmt>test_salsa20 length nonce key<block_start>expected=Salsa20.new(key nonce).encrypt(b"\x00"<times>length)<line_sep>buf=FFI.new("char[]" length)<line_sep>C.salsa20_stream(buf length nonce key)<assert_stmt>bytes(FFI.buffer(buf length))<eq>expected<block_end> |
<def_stmt>extractPenguTaichou item<block_start>"""
<NAME>
"""<line_sep>vol,chp,frag,postfix=extractVolChapterFragmentPostfix(item['title'])<if_stmt><not>(chp<or>vol<or>frag)<or>'preview'<in>item['title'].lower()<block_start><return><none><block_end><if_stmt>item['title'].lower().startswith('sword shisho chapter')<block_start><return>buildReleaseMessageWithType(item 'I was a Sword when I Reincarnated!' vol chp frag=frag postfix=postfix)<block_end><return><false><block_end> |
"""CrowdStrike Falcon Host Groups API interface class
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""<import_from_stmt>._util generate_error_result force_default<import_from_stmt>._util handle_single_argument process_service_request<import_from_stmt>._payload host_group_create_payload host_group_update_payload<import_from_stmt>._payload generic_payload_list<import_from_stmt>._service_class ServiceClass<import_from_stmt>._endpoint._host_group _host_group_endpoints<as>Endpoints<class_stmt>HostGroup(ServiceClass)<block_start>"""The only requirement to instantiate an instance of this class is one of the following:
- a valid client_id and client_secret provided as keywords.
- a credential dictionary with client_id and client_secret containing valid API credentials
{
"client_id": "CLIENT_ID_HERE",
"client_secret": "CLIENT_SECRET_HERE"
}
- a previously-authenticated instance of the authentication service class (oauth2.py)
- a valid token provided by the authentication service class (OAuth2.token())
"""<line_sep>@force_default(defaults=["parameters"] default_types=["dict"])<def_stmt>query_combined_group_members self:object parameters:dict=<none> **kwargs<arrow>dict<block_start>"""Search for members of a Host Group in your environment by providing an FQL filter
and paging details. Returns a set of host details which match the filter criteria.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
An asterisk wildcard '*' includes all results.
id -- The ID of the Host Group to search for members of. String
limit -- The maximum number of records to return in this response. [Integer, 1-5000]
Use with the offset parameter to manage pagination of results.
offset -- The offset to start retrieving records from.
Use with the limit parameter to manage pagination of results.
parameters - full parameters payload, not required if using other keywords.
sort -- The property to sort by. FQL syntax (e.g. name|asc).
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/queryCombinedGroupMembers
"""<line_sep><return>process_service_request(calling_object=self endpoints=Endpoints operation_id="queryCombinedGroupMembers" keywords=kwargs params=parameters)<block_end>@force_default(defaults=["parameters"] default_types=["dict"])<def_stmt>query_combined_host_groups self:object parameters:dict=<none> **kwargs<arrow>dict<block_start>"""Search for Host Groups in your environment by providing an FQL filter and
paging details. Returns a set of Host Groups which match the filter criteria.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
An asterisk wildcard '*' includes all results.
Available filter fields:
created_by modified_by
created_timestamp modified_timestamp
group_type name
limit -- The maximum number of records to return in this response. [Integer, 1-5000]
Use with the offset parameter to manage pagination of results.
offset -- The offset to start retrieving records from. Integer.
Use with the limit parameter to manage pagination of results.
parameters - full parameters payload, not required if using other keywords.
sort -- The property to sort by. FQL syntax (e.g. created_timestamp|asc).
Available sort fields:
created_by modified_by
created_timestamp modified_timestamp
group_type name
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/queryCombinedHostGroups
"""<line_sep><return>process_service_request(calling_object=self endpoints=Endpoints operation_id="queryCombinedHostGroups" keywords=kwargs params=parameters)<block_end>@force_default(defaults=["body" "parameters"] default_types=["dict" "dict"])<def_stmt>perform_group_action self:object body:dict=<none> parameters:dict=<none> **kwargs<arrow>dict<block_start>"""Perform the specified action on the Host Groups specified in the request.
Keyword arguments:
action_name -- Action to perform on the host group. String.
Allowed values: 'add-hosts' or 'remove-hosts'.
action_parameters - List of dictionaries containing action specific parameter settings.
body -- full body payload, not required when using other keywords.
{
"action_parameters": [
{
"name": "string",
"value": "string"
}
],
"ids": [
"string"
]
}
ids -- List of host group IDs to perform an action against. String or list of strings.
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/performGroupAction
"""<if_stmt><not>body<block_start>body=generic_payload_list(submitted_keywords=kwargs payload_value="ids")<if_stmt>kwargs.get("action_parameters" <none>)<block_start>body["action_parameters"]=kwargs.get("action_parameters" <none>)<block_end><block_end># _allowed_actions = ['add-hosts', 'remove-hosts']
# operation_id = "performGroupAction"
# parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
# action_name = parameter_payload.get("action_name", "Not Specified")
# act = kwargs.get("action_name", "Not Specified")
<if_stmt>kwargs.get("action_name" "Not Specified").lower()<in>['add-hosts' 'remove-hosts']<block_start>returned=process_service_request(calling_object=self endpoints=Endpoints operation_id="performGroupAction" body=body keywords=kwargs params=parameters)<block_end><else_stmt><block_start>returned=generate_error_result("Invalid value specified for action_name parameter.")<block_end><return>returned<block_end>@force_default(defaults=["parameters"] default_types=["dict"])<def_stmt>get_host_groups self:object *args parameters:dict=<none> **kwargs<arrow>dict<block_start>"""Retrieve a set of Host Groups by specifying their IDs.
Keyword arguments:
ids -- List of host group IDs to retrieve. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/getHostGroups
"""<line_sep><return>process_service_request(calling_object=self endpoints=Endpoints operation_id="getHostGroups" keywords=kwargs params=handle_single_argument(args parameters "ids"))<block_end>@force_default(defaults=["body"] default_types=["dict"])<def_stmt>create_host_groups self:object body:dict=<none> **kwargs<arrow>dict<block_start>"""Create Host Groups by specifying details about the group to create.
Keyword arguments:
assignment_rule -- Assignment rule to apply. String.
body -- full body payload, not required when using other keywords.
{
"resources": [
{
"assignment_rule": "string",
"description": "string",
"group_type": "static",
"name": "string"
}
]
}
description -- Description of the host group. String.
group_type -- Type of Host Group to create. String.
name -- The Host Group name. String.
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/createHostGroups
"""<if_stmt><not>body<block_start>body=host_group_create_payload(passed_keywords=kwargs)<block_end><return>process_service_request(calling_object=self endpoints=Endpoints operation_id="createHostGroups" body=body)<block_end>@force_default(defaults=["parameters"] default_types=["dict"])<def_stmt>delete_host_groups self:object *args parameters:dict=<none> **kwargs<arrow>dict<block_start>"""Delete a set of Host Groups by specifying their IDs.
Keyword arguments:
ids -- List of host group IDs to delete. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: DELETE
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/deleteHostGroups
"""<line_sep><return>process_service_request(calling_object=self endpoints=Endpoints operation_id="deleteHostGroups" keywords=kwargs params=handle_single_argument(args parameters "ids"))<block_end>@force_default(defaults=["body"] default_types=["dict"])<def_stmt>update_host_groups self:object body:dict=<none> **kwargs<arrow>dict<block_start>"""
Update Host Groups by specifying the ID of the group and details to update.
Keyword arguments:
assignment_rule -- Assignment rule to apply. String.
body -- full body payload, not required when using other keywords.
{
"resources": [
{
"assignment_rule": "string",
"description": "string",
"id": "string",
"name": "string"
}
]
}
description -- Description of the host group. String.
id -- Host Group ID to be updated. String.
name -- The Host Group name. String.
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: PATCH
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/updateHostGroups
"""<if_stmt><not>body<block_start>body=host_group_update_payload(passed_keywords=kwargs)<block_end><return>process_service_request(calling_object=self endpoints=Endpoints operation_id="updateHostGroups" body=body)<block_end>@force_default(defaults=["parameters"] default_types=["dict"])<def_stmt>query_group_members self:object parameters:dict=<none> **kwargs<arrow>dict<block_start>"""Search for members of a Host Group in your environment by providing an FQL filter
and paging details. Returns a set of Agent IDs which match the filter criteria.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
An asterisk wildcard '*' includes all results.
id -- The ID of the Host Group to search for members of. String.
limit -- The maximum number of records to return in this response. [Integer, 1-5000]
Use with the offset parameter to manage pagination of results.
offset -- The offset to start retrieving records from.
Use with the limit parameter to manage pagination of results.
parameters - full parameters payload, not required if using other keywords.
sort -- The property to sort by. FQL syntax (e.g. name|asc).
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/queryGroupMembers
"""<line_sep><return>process_service_request(calling_object=self endpoints=Endpoints operation_id="queryGroupMembers" keywords=kwargs params=parameters)<block_end>@force_default(defaults=["parameters"] default_types=["dict"])<def_stmt>query_host_groups self:object parameters:dict=<none> **kwargs<arrow>dict<block_start>"""Search for Host Groups in your environment by providing an FQL filter and
paging details. Returns a set of Host Group IDs which match the filter criteria.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
An asterisk wildcard '*' includes all results.
Available filter fields:
created_by modified_by
created_timestamp modified_timestamp
group_type name
limit -- The maximum number of records to return in this response. [Integer, 1-5000]
Use with the offset parameter to manage pagination of results.
offset -- The offset to start retrieving records from.
Use with the limit parameter to manage pagination of results.
parameters - full parameters payload, not required if using other keywords.
sort -- The property to sort by. FQL syntax (e.g. created_timestamp|asc).
Available sort fields:
created_by modified_by
created_timestamp modified_timestamp
group_type name
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/queryHostGroups
"""<line_sep><return>process_service_request(calling_object=self endpoints=Endpoints operation_id="queryHostGroups" keywords=kwargs params=parameters)<block_end># These method names align to the operation IDs in the API but
# do not conform to snake_case / PEP8 and are defined here for
# backwards compatibility / ease of use purposes
queryCombinedGroupMembers=query_combined_group_members<line_sep>queryCombinedHostGroups=query_combined_host_groups<line_sep>performGroupAction=perform_group_action<line_sep>getHostGroups=get_host_groups<line_sep>createHostGroups=create_host_groups<line_sep>deleteHostGroups=delete_host_groups<line_sep>updateHostGroups=update_host_groups<line_sep>queryGroupMembers=query_group_members<line_sep>queryHostGroups=query_host_groups<block_end># The legacy name for this class does not conform to PascalCase / PEP8
# It is defined here for backwards compatibility purposes only.
Host_Group=HostGroup# pylint: disable=C0103
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
<import_stmt>os<import_stmt>sys<import_stmt>logging<import_stmt>multiprocessing<import_stmt>time<import_from_stmt>functools partial<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>preprocessing<line_sep># Allow import of top level python files
<import_stmt>inspect<line_sep>currentdir=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))<line_sep>parentdir=os.path.dirname(currentdir)<line_sep>sys.path.insert(0 parentdir)<import_from_stmt>benchmark_args BaseCommandLineAPI<import_from_stmt>benchmark_runner BaseBenchmarkRunner<class_stmt>CommandLineAPI(BaseCommandLineAPI)<block_start>SAMPLES_IN_VALIDATION_SET=50000<def_stmt>__init__ self<block_start>super(CommandLineAPI self).__init__()<line_sep>self._parser.add_argument('--input_size' type=int default=224 help='Size of input images expected by the '<concat>'model')<line_sep>self._parser.add_argument('--num_classes' type=int default=1001 help='Number of classes used when training '<concat>'the model')<line_sep>self._parser.add_argument('--preprocess_method' type=str choices=['vgg' 'inception' 'resnet50_v1_5_tf1_ngc_preprocess'] default='vgg' help='The image preprocessing method used in '<concat>'dataloading.')<block_end><block_end><class_stmt>BenchmarkRunner(BaseBenchmarkRunner)<block_start>ACCURACY_METRIC_NAME="accuracy"<def_stmt>before_benchmark self **kwargs<block_start>self._labels_shift=1<if>kwargs["num_classes"]<eq>1001<else>0<block_end><def_stmt>compute_accuracy_metric self predictions expected **kwargs<block_start><return>np.mean(np.equal(predictions["outputs"] expected))<block_end><def_stmt>process_model_output self outputs **kwargs<block_start>outputs=outputs.numpy()<if_stmt>(len(outputs.shape)<ne>1)<block_start>outputs=np.argmax(outputs axis=1).reshape(-1)<block_end><return>{"outputs":outputs-self._labels_shift}<block_end><block_end><def_stmt>get_dataset data_files batch_size use_synthetic_data preprocess_method input_size<block_start><def_stmt>deserialize_image_record record<block_start>feature_map={'image/encoded':tf.io.FixedLenFeature([] tf.string '') 'image/class/label':tf.io.FixedLenFeature([1] tf.int64 -1) 'image/class/text':tf.io.FixedLenFeature([] tf.string '') 'image/object/bbox/xmin':tf.io.VarLenFeature(dtype=tf.float32) 'image/object/bbox/ymin':tf.io.VarLenFeature(dtype=tf.float32) 'image/object/bbox/xmax':tf.io.VarLenFeature(dtype=tf.float32) 'image/object/bbox/ymax':tf.io.VarLenFeature(dtype=tf.float32)}<with_stmt>tf.compat.v1.name_scope('deserialize_image_record')<block_start>obj=tf.io.parse_single_example(serialized=record features=feature_map)<line_sep>imgdata=obj['image/encoded']<line_sep>label=tf.cast(obj['image/class/label'] tf.int32)<block_end><return>imgdata label<block_end><def_stmt>get_preprocess_fn preprocess_method input_size<block_start>"""Creates a function to parse and process a TFRecord
preprocess_method: string
input_size: int
returns: function, the preprocessing function for a record
"""<if_stmt>preprocess_method<eq>'vgg'<block_start>preprocess_fn=preprocessing.vgg_preprocess<block_end><elif_stmt>preprocess_method<eq>'inception'<block_start>preprocess_fn=preprocessing.inception_preprocess<block_end><elif_stmt>preprocess_method<eq>'resnet50_v1_5_tf1_ngc_preprocess'<block_start>preprocess_fn=preprocessing.resnet50_v1_5_tf1_ngc_preprocess<block_end><else_stmt><block_start><raise>ValueError('Invalid preprocessing method {}'.format(preprocess_method))<block_end><def_stmt>preprocess_sample_fn record# Parse TFRecord
<block_start>imgdata,label=deserialize_image_record(record)<line_sep>label<augsub>1# Change to 0-based (don't use background class)
<try_stmt><block_start>image=tf.image.decode_jpeg(imgdata channels=3 fancy_upscaling=<false> dct_method='INTEGER_FAST')<block_end><except_stmt><block_start>image=tf.image.decode_png(imgdata channels=3)<block_end># Use model's preprocessing function
image=preprocess_fn(image input_size input_size)<line_sep><return>image label<block_end><return>preprocess_sample_fn<block_end>dataset=tf.data.Dataset.from_tensor_slices(data_files)<line_sep>dataset=dataset.interleave(tf.data.TFRecordDataset cycle_length=min(8 multiprocessing.cpu_count()) block_length=max(batch_size 32))<line_sep># preprocess function for input data
preprocess_fn=get_preprocess_fn(preprocess_method=preprocess_method input_size=input_size)<line_sep>dataset=dataset.map(map_func=preprocess_fn num_parallel_calls=min(8 multiprocessing.cpu_count()))<line_sep>dataset=dataset.batch(batch_size=batch_size drop_remainder=<true>)<if_stmt>use_synthetic_data<block_start>dataset=dataset.take(count=1)# loop over 1 batch
dataset=dataset.cache()<line_sep>dataset=dataset.repeat()<block_end>dataset=dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)<line_sep><return>dataset<block_end><if_stmt>__name__<eq>'__main__'<block_start>cmdline_api=CommandLineAPI()<line_sep>args=cmdline_api.parse_args()<def_stmt>get_files data_dir filename_pattern<block_start><if_stmt>data_dir<is><none><block_start><return>[]<block_end>files=tf.io.gfile.glob(os.path.join(data_dir filename_pattern))<if_stmt><not>files<block_start><raise>ValueError('Can not find any files in {} with '<concat>'pattern "{}"'.format(data_dir filename_pattern))<block_end><return>files<block_end>data_files=get_files(args.data_dir 'validation*')<line_sep>calib_files=([]<if>args.precision<ne>'INT8'<else>get_files(args.calib_data_dir 'train*'))<def_stmt>_input_fn input_files build_steps model_phase<block_start>dataset=get_dataset(data_files=input_files batch_size=args.batch_size # even when using synthetic data, we need to
# build and/or calibrate using real training data
# to be in a realistic scenario
use_synthetic_data=<false> preprocess_method=args.preprocess_method input_size=args.input_size)<for_stmt>i,(batch_images _) enumerate(dataset)<block_start><if_stmt>i<ge>build_steps<block_start><break><block_end>print("* [%s] - step %04d/%04d"%(model_phase i+1 build_steps))<line_sep><yield>batch_images <block_end><block_end>calibration_input_fn=partial(_input_fn input_files=calib_files build_steps=args.num_calib_inputs<floordiv>args.batch_size model_phase="Calibration")<line_sep>optimize_offline_input_fn=partial(_input_fn input_files=data_files build_steps=1 model_phase="Building")<line_sep>runner=BenchmarkRunner(input_saved_model_dir=args.input_saved_model_dir output_saved_model_dir=args.output_saved_model_dir allow_build_at_runtime=args.allow_build_at_runtime calibration_input_fn=calibration_input_fn debug=args.debug gpu_mem_cap=args.gpu_mem_cap input_signature_key=args.input_signature_key max_workspace_size_bytes=args.max_workspace_size minimum_segment_size=args.minimum_segment_size num_calib_inputs=args.num_calib_inputs optimize_offline=args.optimize_offline optimize_offline_input_fn=optimize_offline_input_fn output_tensor_indices=args.output_tensor_indices output_tensor_names=args.output_tensor_names precision_mode=args.precision use_dynamic_shape=args.use_dynamic_shape use_tftrt=args.use_tftrt)<line_sep>get_benchmark_input_fn=partial(get_dataset data_files=data_files input_size=args.input_size preprocess_method=args.preprocess_method)<line_sep>runner.execute_benchmark(batch_size=args.batch_size display_every=args.display_every get_benchmark_input_fn=get_benchmark_input_fn num_iterations=args.num_iterations num_warmup_iterations=args.num_warmup_iterations skip_accuracy_testing=(args.use_synthetic_data<or>args.skip_accuracy_testing) use_synthetic_data=args.use_synthetic_data use_xla=args.use_xla ########### Additional Settings ############
num_classes=args.num_classes )<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>tests.test_utils run_clip_tests<import_from_stmt>soundata annotations<import_from_stmt>soundata.datasets eigenscape_raw<line_sep>TEST_DATA_HOME="tests/resources/sound_datasets/eigenscape_raw"<def_stmt>test_clip <block_start>default_clipid="Beach-01-Raw"<line_sep>dataset=eigenscape_raw.Dataset(TEST_DATA_HOME)<line_sep>clip=dataset.clip(default_clipid)<line_sep>expected_attributes={"audio_path":("tests/resources/sound_datasets/eigenscape_raw/Beach-01-Raw.wav") "clip_id":"Beach-01-Raw" }<line_sep>expected_property_types={"audio":tuple "tags":annotations.Tags "location":str "date":str "time":str "additional_information":str }<line_sep>run_clip_tests(clip expected_attributes expected_property_types)<block_end><def_stmt>test_load_audio <block_start>default_clipid="Beach-01-Raw"<line_sep>dataset=eigenscape_raw.Dataset(TEST_DATA_HOME)<line_sep>clip=dataset.clip(default_clipid)<line_sep>audio_path=clip.audio_path<line_sep>audio,sr=eigenscape_raw.load_audio(audio_path)<assert_stmt>sr<eq>48000<assert_stmt>type(audio)<is>np.ndarray<assert_stmt>len(audio.shape)<eq>2# check audio is loaded correctly
<assert_stmt>audio.shape[0]<eq>32# check audio is 32ch (HOA 4th order)
<assert_stmt>audio.shape[1]<eq>48000<times>1.0<block_end># Check audio duration is as expected
<def_stmt>test_load_tags # dataset
<block_start>default_clipid="Beach-01-Raw"<line_sep>dataset=eigenscape_raw.Dataset(TEST_DATA_HOME)<line_sep>clip=dataset.clip(default_clipid)<assert_stmt>len(clip.tags.labels)<eq>1<assert_stmt>clip.tags.labels[0]<eq>"Beach"<assert_stmt>np.allclose([1.0] clip.tags.confidence)<block_end><def_stmt>test_load_metadata # dataset
<block_start>default_clipid="Beach-01-Raw"<line_sep>dataset=eigenscape_raw.Dataset(TEST_DATA_HOME)<line_sep>clip=dataset.clip(default_clipid)<assert_stmt>clip.location<eq>"Bridlington Beach"<assert_stmt>clip.time<eq>"10:42"<assert_stmt>clip.date<eq>"09/05/2017"<assert_stmt>clip.additional_information<eq>""<block_end><def_stmt>test_to_jams <block_start>default_clipid="Beach-01-Raw"<line_sep>dataset=eigenscape_raw.Dataset(TEST_DATA_HOME)<line_sep>clip=dataset.clip(default_clipid)<line_sep>jam=clip.to_jams()<assert_stmt>jam.validate()<line_sep># Validate Tags
tags=jam.search(namespace="tag_open")[0]["data"]<assert_stmt>len(tags)<eq>1<assert_stmt>tags[0].time<eq>0<assert_stmt>tags[0].duration<eq>1.0<assert_stmt>tags[0].value<eq>"Beach"<assert_stmt>tags[0].confidence<eq>1<line_sep># validate metadata
<assert_stmt>jam.file_metadata.duration<eq>1.0<assert_stmt>jam.sandbox.location<eq>"Bridlington Beach"<assert_stmt>jam.sandbox.time<eq>"10:42"<assert_stmt>jam.sandbox.date<eq>"09/05/2017"<assert_stmt>jam.annotations[0].annotation_metadata.data_source<eq>"soundata"<block_end> |
# MIT license
#
# Copyright (C) 2015-2021 by <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>difflib<import_stmt>os.path<import_stmt>re<import_from_stmt>builtins object<import_stmt>openpyxl<import_from_stmt>.py_2_3 *<line_sep>COLUMN_NAMES={"pin":"num" "num":"num" "name":"name" "type":"type" "style":"style" "side":"side" "unit":"unit" "bank":"unit" "hidden":"hidden" "":"" # Blank column names stay blank.
}<line_sep># This is just a vanilla object class for device pins.
# We'll add attributes to it as needed.
<class_stmt>Pin(object)<block_start><pass><block_end>DEFAULT_PIN=Pin()<line_sep>DEFAULT_PIN.num=<none><line_sep>DEFAULT_PIN.name=""<line_sep>DEFAULT_PIN.type="io"<line_sep>DEFAULT_PIN.style="line"<line_sep>DEFAULT_PIN.unit=1<line_sep>DEFAULT_PIN.side="left"<line_sep>DEFAULT_PIN.hidden="no"<def_stmt>num_row_elements row<block_start>"""Get number of elements in CSV row."""<try_stmt><block_start>rowset=set(row)<line_sep>rowset.discard("")<line_sep><return>len(rowset)<block_end><except_stmt>TypeError<block_start><return>0<block_end><block_end><def_stmt>get_nonblank_row csv_reader<block_start>"""Return the first non-blank row encountered from the current point in a CSV file."""<for_stmt>row csv_reader<block_start><if_stmt>num_row_elements(row)<g>0<block_start><return>row<block_end><block_end><return>[]<block_end><def_stmt>get_part_info csv_reader<block_start>"""Get the part number, ref prefix, footprint, MPN, datasheet link, and description from a row of the CSV file."""<line_sep># Read the first, nonblank row and pad it with None's to make sure it's long enough.
(part_num part_ref_prefix part_footprint part_manf_num part_datasheet part_desc )=list(get_nonblank_row(csv_reader)+[<none>]<times>6)[:6]<line_sep># Put in the default part reference identifier if it isn't present.
<if_stmt>part_ref_prefix<in>(<none> "" " ")<block_start>part_ref_prefix="U"<block_end># Check to see if the row with the part identifier is missing.
<if_stmt>part_num<and>part_num.lower()<in>list(COLUMN_NAMES.keys())<block_start>issue("Row with part number is missing in CSV file." "error")<block_end><return>(part_num part_ref_prefix part_footprint part_manf_num part_datasheet part_desc )<block_end><def_stmt>find_closest_match name name_dict fuzzy_match threshold=0.0<block_start>"""Approximate matching subroutine"""<line_sep># Scrub non-alphanumerics from name and lowercase it.
scrubber=re.compile("[\W.]+")<line_sep>name=scrubber.sub("" name).lower()<line_sep># Return regular dictionary lookup if fuzzy matching is not enabled.
<if_stmt>fuzzy_match<eq><false><block_start><return>name_dict[name]<block_end># Find the closest fuzzy match to the given name in the scrubbed list.
# Set the matching threshold to 0 so it always gives some result.
match=difflib.get_close_matches(name list(name_dict.keys()) 1 threshold)[0]<line_sep><return>name_dict[match]<block_end><def_stmt>clean_headers headers<block_start>"""Return a list of the closest valid column headers for the headers found in the file."""<line_sep><return>[find_closest_match(h COLUMN_NAMES <true>)<for>h headers]<block_end><def_stmt>issue msg level="warning"<block_start><if_stmt>level<eq>"warning"<block_start>print("Warning: {}".format(msg))<block_end><elif_stmt>level<eq>"error"<block_start>print("ERROR: {}".format(msg))<line_sep><raise>Exception("Unrecoverable error")<block_end><else_stmt><block_start>print(msg)<block_end><block_end><def_stmt>fix_pin_data pin_data part_num<block_start>"""Fix common errors in pin data."""<line_sep>fixed_pin_data=pin_data.strip()# Remove leading/trailing spaces.
<if_stmt>re.search("\s" fixed_pin_data)<is><not><none><block_start>fixed_pin_data=re.sub("\s" "_" fixed_pin_data)<line_sep>issue("Replaced whitespace with '_' in pin '{pin_data}' of part {part_num}.".format(**locals()))<block_end><return>fixed_pin_data<block_end><def_stmt>is_xlsx filename<block_start><return>os.path.splitext(filename)[1]<eq>".xlsx"<block_end><def_stmt>convert_xlsx_to_csv xlsx_file sheetname=<none><block_start>"""
Convert sheet of an Excel workbook into a CSV file in the same directory
and return the read handle of the CSV file.
"""<line_sep>wb=openpyxl.load_workbook(xlsx_file)<if_stmt>sheetname<block_start>sh=wb[sheetname]<block_end><else_stmt><block_start>sh=wb.active<block_end><if_stmt>USING_PYTHON2# Python 2 doesn't accept newline parameter when opening file.
<block_start>newline={}<block_end><else_stmt># kipart fails on Python 3 unless file is opened with this newline.
<block_start>newline={"newline":""}<block_end>csv_filename="xlsx_to_csv_file.csv"<with_stmt>open(csv_filename "w" **newline)<as>f<block_start>col=csv.writer(f)<for_stmt>row sh.rows<block_start><try_stmt><block_start>col.writerow([cell.value<for>cell row])<block_end><except_stmt>UnicodeEncodeError<block_start><for_stmt>cell row<block_start><if_stmt>cell.value<block_start>cell.value="".join([c<for>c cell.value<if>ord(c)<l>128])<block_end><block_end>col.writerow([cell.value<for>cell row])<block_end><block_end><block_end><return>open(csv_filename "r")<block_end> |
<import_stmt>os<line_sep># SECURITY WARNING: don't run with debug turned on in production!
DEBUG=<false><line_sep>BASE_DIR=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))<line_sep>SECRET_KEY=os.getenv('SECRET_KEY' 'sekrit')<line_sep>YOUTUBE_API_KEY=os.getenv('YOUTUBE_API_KEY' '')<line_sep>CPU_SEPARATION=bool(int(os.getenv('CPU_SEPARATION' '1')))<line_sep>ALLOWED_HOSTS=[os.getenv('APP_HOST') '0.0.0.0' '127.0.0.1' 'localhost']<line_sep>DEFAULT_FILE_STORAGE='api.storage.AzureStorage'<line_sep># DEFAULT_FILE_STORAGE = 'api.storage.S3Boto3Storage'
# DEFAULT_FILE_STORAGE = 'api.storage.FileSystemStorage'
STATICFILES_STORAGE='whitenoise.storage.CompressedManifestStaticFilesStorage'<line_sep>##################################
# Azure storage backend settings #
##################################
AZURE_ACCOUNT_KEY=os.getenv('AZURE_ACCOUNT_KEY' '')<line_sep>AZURE_ACCOUNT_NAME=os.getenv('AZURE_ACCOUNT_NAME' '')<line_sep>AZURE_CONTAINER=os.getenv('AZURE_CONTAINER' '')<line_sep>AZURE_CUSTOM_DOMAIN=os.getenv('AZURE_CUSTOM_DOMAIN')<line_sep>AZURE_OBJECT_PARAMETERS={'content_disposition':'attachment'}<line_sep>################################
# AWS storage backend settings #
################################
AWS_ACCESS_KEY_ID=os.getenv('AWS_ACCESS_KEY_ID' '')<line_sep>AWS_SECRET_ACCESS_KEY=os.getenv('AWS_SECRET_ACCESS_KEY' '')<line_sep>AWS_STORAGE_BUCKET_NAME=os.getenv('AWS_STORAGE_BUCKET_NAME' '')<line_sep>AWS_S3_CUSTOM_DOMAIN=os.getenv('AWS_S3_CUSTOM_DOMAIN')<line_sep># A path prefix that will be prepended to all uploads
AWS_LOCATION='media'<line_sep># Disable query parameter authentication (for public reads)
AWS_QUERYSTRING_AUTH=<false><line_sep># Make uploaded files publicly accessible and downloadable
AWS_S3_OBJECT_PARAMETERS={'ACL':'public-read' 'ContentDisposition':'attachment'}<line_sep># S3 region
AWS_S3_REGION_NAME='us-east-1'<line_sep>CELERY_BROKER_URL=os.getenv('CELERY_BROKER_URL' 'redis://localhost:6379/0')<line_sep>CELERY_RESULT_BACKEND=os.getenv('CELERY_RESULT_BACKEND' 'redis://localhost:6379/0')<line_sep>CELERY_TASK_ROUTES={'api.tasks.create_static_mix':{'queue':'slow_queue'} 'api.tasks.create_dynamic_mix':{'queue':'slow_queue'} 'api.tasks.fetch_youtube_audio':{'queue':'fast_queue'} }<line_sep># Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES={'default':{'ENGINE':'django.db.backends.sqlite3' 'NAME':'spleeter-web.sqlite3' }}<line_sep>MEDIA_ROOT='media'<line_sep>MEDIA_URL='/media/'<line_sep>SEPARATE_DIR='separate'<line_sep>UPLOAD_DIR='uploads'<line_sep>VALID_MIME_TYPES=['audio/aac' 'audio/aiff' 'audio/x-aiff' 'audio/ogg' 'video/ogg' 'application/ogg' 'audio/opus' 'audio/vorbis' 'audio/mpeg' 'audio/mp3' 'audio/mpeg3' 'audio/x-mpeg-3' 'video/mpeg' 'audio/m4a' 'audio/x-m4a' 'audio/x-hx-aac-adts' 'audio/mp4' 'video/x-mpeg' 'audio/flac' 'audio/x-flac' 'audio/wav' 'audio/x-wav' 'audio/webm' 'video/webm']<line_sep>VALID_FILE_EXT=[# Lossless
'.aif' '.aifc' '.aiff' '.flac' '.wav' # Lossy
'.aac' '.m4a' '.mp3' '.opus' '.weba' '.webm' # Ogg (Lossy)
'.ogg' '.oga' '.mogg']<line_sep>UPLOAD_FILE_SIZE_LIMIT=100<times>1024<times>1024<line_sep>YOUTUBE_LENGTH_LIMIT=30<times>60<line_sep>YOUTUBE_MAX_RETRIES=3<line_sep># Application definition
INSTALLED_APPS=['whitenoise.runserver_nostatic' 'django.contrib.admin' 'django.contrib.auth' 'django.contrib.contenttypes' 'django.contrib.sessions' 'django.contrib.messages' 'django.contrib.staticfiles' 'api.apps.ApiConfig' 'frontend.apps.FrontendConfig' 'rest_framework' 'webpack_loader']<line_sep>WEBPACK_LOADER={'DEFAULT':{'BUNDLE_DIR_NAME':'dist/' 'STATS_FILE':os.path.join(BASE_DIR 'frontend' 'assets' 'webpack-stats.json')}}<line_sep>REST_FRAMEWORK={'DEFAULT_RENDERER_CLASSES':('rest_framework.renderers.JSONRenderer' )}<line_sep>MIDDLEWARE=['django.middleware.security.SecurityMiddleware' 'whitenoise.middleware.WhiteNoiseMiddleware' 'django.contrib.sessions.middleware.SessionMiddleware' 'django.middleware.common.CommonMiddleware' 'django.middleware.csrf.CsrfViewMiddleware' 'django.contrib.auth.middleware.AuthenticationMiddleware' 'django.contrib.messages.middleware.MessageMiddleware' 'django.middleware.clickjacking.XFrameOptionsMiddleware']<line_sep>ROOT_URLCONF='django_react.urls'<line_sep>TEMPLATES=[{'BACKEND':'django.template.backends.django.DjangoTemplates' 'DIRS':[os.path.join(BASE_DIR 'frontend' 'templates')] 'APP_DIRS':<true> 'OPTIONS':{'context_processors':['frontend.context_processors.debug' 'django.template.context_processors.debug' 'django.template.context_processors.request' 'django.contrib.auth.context_processors.auth' 'django.contrib.messages.context_processors.messages' ] } } ]<line_sep>WSGI_APPLICATION='django_react.wsgi.application'<line_sep># Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE='en-us'<line_sep>TIME_ZONE='UTC'<line_sep>USE_I18N=<true><line_sep>USE_L10N=<true><line_sep>USE_TZ=<true><line_sep># Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL='/static/'<line_sep>STATIC_ROOT=os.path.join(BASE_DIR 'staticfiles')<line_sep>STATICFILES_DIRS=(os.path.join(BASE_DIR 'frontend' 'assets') )<line_sep># Override production variables if DJANGO_DEVELOPMENT env variable is set
<if_stmt>os.getenv('DJANGO_DEVELOPMENT')<block_start><import_from_stmt>.settings_dev *<block_end> |
<import_stmt>unittest<import_from_stmt>teamcity.unittestpy TeamcityTestRunner<import_from_stmt>teamcity is_running_under_teamcity<class_stmt>TestXXX(unittest.TestCase)<block_start><def_stmt>runTest self<block_start><assert_stmt>1<eq>1<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><if_stmt>is_running_under_teamcity()<block_start>runner=TeamcityTestRunner()<block_end><else_stmt><block_start>runner=unittest.TextTestRunner()<block_end>nested_suite=unittest.TestSuite()<line_sep>nested_suite.addTest(TestXXX())<line_sep>suite=unittest.TestSuite()<line_sep>suite.addTest(nested_suite)<line_sep>runner.run(suite)<block_end> |
<import_from_stmt>pyecharts options<as>opts<import_from_stmt>pyecharts.charts Bar<import_from_stmt>pyecharts.commons.utils JsCode<import_from_stmt>pyecharts.faker Faker<line_sep>c=(Bar().add_xaxis(Faker.choose()).add_yaxis("商家A" Faker.values() category_gap="60%").set_series_opts(itemstyle_opts={"normal":{"color":JsCode("""new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: 'rgba(0, 244, 255, 1)'
}, {
offset: 1,
color: 'rgba(0, 77, 167, 1)'
}], false)""") "barBorderRadius":[30 30 30 30] "shadowColor":"rgb(0, 160, 221)" }}).set_global_opts(title_opts=opts.TitleOpts(title="Bar-渐变圆柱")).render("bar_border_radius.html"))<line_sep> |
"""Bootstrap the 'hidden_extensions' setting for the XML syntax.
The XML package includes a `XML.sublime-settings` file
that sets `hidden_extensions` to include some of the extensions
we want to highlight with our package.
There is currently no other way to override this,
so we manually override this extension list
in a User settings file with a plugin.
See also:
https://github.com/sublimehq/Packages/issues/823
https://github.com/SublimeTextIssues/Core/issues/1326
"""<import_stmt>sublime<import_from_stmt>sublime_lib ResourcePath<line_sep>__all__=["plugin_loaded" ]<line_sep>DEFAULT_VALUE=["rss" "sublime-snippet" "vcproj" "tmLanguage" "tmTheme" "tmSnippet" "tmPreferences" "dae"]<line_sep>MODIFIED_VALUE=["rss" "vcproj" "tmLanguage" "tmTheme" "tmSnippet" "dae"]<line_sep># Encode ST build and date of last change (of this file) into the bootstrap value.
# I'm not sure what exactly I'm gonna do with it, so just include info I might find useful later.
BOOTSTRAP_VALUE=[3126 2017 3 13]<def_stmt>override_extensions expected modified<block_start>settings=sublime.load_settings("XML.sublime-settings")<if_stmt>settings.get('hidden_extensions')<eq>expected<block_start>settings.set('hidden_extensions' modified)<line_sep>settings.set('package_dev.bootstrapped' BOOTSTRAP_VALUE)<line_sep>sublime.save_settings("XML.sublime-settings")<line_sep>print("[PackageDev] Bootstrapped XML's `hidden_extensions` setting")<block_end><block_end><def_stmt>remove_override <block_start>settings=sublime.load_settings("XML.sublime-settings")<if_stmt>settings.get('package_dev.bootstrapped')<block_start>settings.erase('package_dev.bootstrapped')<if_stmt>settings.get('hidden_extensions')<eq>MODIFIED_VALUE<block_start>settings.erase('hidden_extensions')<block_end>print("[PackageDev] Unbootstrapped XML's `hidden_extensions` setting")<line_sep>sublime.save_settings("XML.sublime-settings")<line_sep>sublime.set_timeout(remove_file_if_empty 2000)<block_end><block_end># Give ST time to write the file
<def_stmt>remove_file_if_empty <block_start>path=ResourcePath("Packages/User/XML.sublime-settings").file_path()<try_stmt><block_start><with_stmt>path.open()<as>f<block_start>data=sublime.decode_value(f.read())<block_end><block_end><except_stmt>(FileNotFoundError ValueError)<block_start><pass><block_end><else_stmt><block_start><if_stmt><not>data<or>len(data)<eq>1<and>'extensions'<in>data<and><not>data['extensions']<block_start>path.unlink()<line_sep>print("[PackageDev] Removed now-empty XML.sublime-settings")<block_end><block_end><block_end><def_stmt>plugin_loaded <block_start>version=int(sublime.version())<if_stmt>version<l>3153<block_start>override_extensions(DEFAULT_VALUE MODIFIED_VALUE)<block_end># "csproj" was added for 3153.
# https://github.com/sublimehq/Packages/commit/4a3712b7e236f8c4b443282d97bad17f68df318c
# Technically there was a change in 4050, but nobody should be using that anymore.
# https://github.com/sublimehq/Packages/commit/7866273af18398bce324408ff23c7a22f30486c8
<elif_stmt>version<l>4075<block_start>override_extensions(DEFAULT_VALUE+["csproj"] MODIFIED_VALUE+["csproj"])<block_end><elif_stmt>version<ge>4075# The settings were move to the syntax file
# https://github.com/sublimehq/Packages/commit/73b16ff196d3cbaf7df2cf5807fda6ab68a2434e
<block_start>remove_override()<block_end><block_end> |
<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<class_stmt>Equation(object)<block_start>"""Base class for defining PDE related function."""<def_stmt>__init__ self eqn_config<block_start>self.dim=eqn_config.dim<line_sep>self.total_time=eqn_config.total_time<line_sep>self.num_time_interval=eqn_config.num_time_interval<line_sep>self.delta_t=self.total_time/self.num_time_interval<line_sep>self.sqrt_delta_t=np.sqrt(self.delta_t)<line_sep>self.y_init=<none><block_end><def_stmt>sample self num_sample<block_start>"""Sample forward SDE."""<line_sep><raise>NotImplementedError<block_end><def_stmt>f_tf self t x y z<block_start>"""Generator function in the PDE."""<line_sep><raise>NotImplementedError<block_end><def_stmt>g_tf self t x<block_start>"""Terminal condition of the PDE."""<line_sep><raise>NotImplementedError<block_end><block_end><class_stmt>HJBLQ(Equation)<block_start>"""HJB equation in PNAS paper doi.org/10.1073/pnas.1718942115"""<def_stmt>__init__ self eqn_config<block_start>super(HJBLQ self).__init__(eqn_config)<line_sep>self.x_init=np.zeros(self.dim)<line_sep>self.sigma=np.sqrt(2.0)<line_sep>self.lambd=1.0<block_end><def_stmt>sample self num_sample<block_start>dw_sample=np.random.normal(size=[num_sample self.dim self.num_time_interval])<times>self.sqrt_delta_t<line_sep>x_sample=np.zeros([num_sample self.dim self.num_time_interval+1])<line_sep>x_sample[: : 0]=np.ones([num_sample self.dim])<times>self.x_init<for_stmt>i range(self.num_time_interval)<block_start>x_sample[: : i+1]=x_sample[: : i]+self.sigma<times>dw_sample[: : i]<block_end><return>dw_sample x_sample<block_end><def_stmt>f_tf self t x y z<block_start><return>-self.lambd<times>tf.reduce_sum(tf.square(z) 1 keepdims=<true>)<block_end><def_stmt>g_tf self t x<block_start><return>tf.math.log((1+tf.reduce_sum(tf.square(x) 1 keepdims=<true>))/2)<block_end><block_end><class_stmt>AllenCahn(Equation)<block_start>"""Allen-Cahn equation in PNAS paper doi.org/10.1073/pnas.1718942115"""<def_stmt>__init__ self eqn_config<block_start>super(AllenCahn self).__init__(eqn_config)<line_sep>self.x_init=np.zeros(self.dim)<line_sep>self.sigma=np.sqrt(2.0)<block_end><def_stmt>sample self num_sample<block_start>dw_sample=np.random.normal(size=[num_sample self.dim self.num_time_interval])<times>self.sqrt_delta_t<line_sep>x_sample=np.zeros([num_sample self.dim self.num_time_interval+1])<line_sep>x_sample[: : 0]=np.ones([num_sample self.dim])<times>self.x_init<for_stmt>i range(self.num_time_interval)<block_start>x_sample[: : i+1]=x_sample[: : i]+self.sigma<times>dw_sample[: : i]<block_end><return>dw_sample x_sample<block_end><def_stmt>f_tf self t x y z<block_start><return>y-tf.pow(y 3)<block_end><def_stmt>g_tf self t x<block_start><return>0.5/(1+0.2<times>tf.reduce_sum(tf.square(x) 1 keepdims=<true>))<block_end><block_end><class_stmt>PricingDefaultRisk(Equation)<block_start>"""
Nonlinear Black-Scholes equation with default risk in PNAS paper
doi.org/10.1073/pnas.1718942115
"""<def_stmt>__init__ self eqn_config<block_start>super(PricingDefaultRisk self).__init__(eqn_config)<line_sep>self.x_init=np.ones(self.dim)<times>100.0<line_sep>self.sigma=0.2<line_sep>self.rate=0.02# interest rate R
self.delta=2.0/3<line_sep>self.gammah=0.2<line_sep>self.gammal=0.02<line_sep>self.mu_bar=0.02<line_sep>self.vh=50.0<line_sep>self.vl=70.0<line_sep>self.slope=(self.gammah-self.gammal)/(self.vh-self.vl)<block_end><def_stmt>sample self num_sample<block_start>dw_sample=np.random.normal(size=[num_sample self.dim self.num_time_interval])<times>self.sqrt_delta_t<line_sep>x_sample=np.zeros([num_sample self.dim self.num_time_interval+1])<line_sep>x_sample[: : 0]=np.ones([num_sample self.dim])<times>self.x_init<for_stmt>i range(self.num_time_interval)<block_start>x_sample[: : i+1]=(1+self.mu_bar<times>self.delta_t)<times>x_sample[: : i]+(self.sigma<times>x_sample[: : i]<times>dw_sample[: : i])<block_end><return>dw_sample x_sample<block_end><def_stmt>f_tf self t x y z<block_start>piecewise_linear=tf.nn.relu(tf.nn.relu(y-self.vh)<times>self.slope+self.gammah-self.gammal)+self.gammal<line_sep><return>(-(1-self.delta)<times>piecewise_linear-self.rate)<times>y<block_end><def_stmt>g_tf self t x<block_start><return>tf.reduce_min(x 1 keepdims=<true>)<block_end><block_end><class_stmt>PricingDiffRate(Equation)<block_start>"""
Nonlinear Black-Scholes equation with different interest rates for borrowing and lending
in Section 4.4 of Comm. Math. Stat. paper doi.org/10.1007/s40304-017-0117-6
"""<def_stmt>__init__ self eqn_config<block_start>super(PricingDiffRate self).__init__(eqn_config)<line_sep>self.x_init=np.ones(self.dim)<times>100<line_sep>self.sigma=0.2<line_sep>self.mu_bar=0.06<line_sep>self.rl=0.04<line_sep>self.rb=0.06<line_sep>self.alpha=1.0/self.dim<block_end><def_stmt>sample self num_sample<block_start>dw_sample=np.random.normal(size=[num_sample self.dim self.num_time_interval])<times>self.sqrt_delta_t<line_sep>x_sample=np.zeros([num_sample self.dim self.num_time_interval+1])<line_sep>x_sample[: : 0]=np.ones([num_sample self.dim])<times>self.x_init<line_sep>factor=np.exp((self.mu_bar-(self.sigma<power>2)/2)<times>self.delta_t)<for_stmt>i range(self.num_time_interval)<block_start>x_sample[: : i+1]=(factor<times>np.exp(self.sigma<times>dw_sample[: : i]))<times>x_sample[: : i]<block_end><return>dw_sample x_sample<block_end><def_stmt>f_tf self t x y z<block_start>temp=tf.reduce_sum(z 1 keepdims=<true>)/self.sigma<line_sep><return>-self.rl<times>y-(self.mu_bar-self.rl)<times>temp+((self.rb-self.rl)<times>tf.maximum(temp-y 0))<block_end><def_stmt>g_tf self t x<block_start>temp=tf.reduce_max(x 1 keepdims=<true>)<line_sep><return>tf.maximum(temp-120 0)-2<times>tf.maximum(temp-150 0)<block_end><block_end><class_stmt>BurgersType(Equation)<block_start>"""
Multidimensional Burgers-type PDE in Section 4.5 of Comm. Math. Stat. paper
doi.org/10.1007/s40304-017-0117-6
"""<def_stmt>__init__ self eqn_config<block_start>super(BurgersType self).__init__(eqn_config)<line_sep>self.x_init=np.zeros(self.dim)<line_sep>self.y_init=1-1.0/(1+np.exp(0+np.sum(self.x_init)/self.dim))<line_sep>self.sigma=self.dim+0.0<block_end><def_stmt>sample self num_sample<block_start>dw_sample=np.random.normal(size=[num_sample self.dim self.num_time_interval])<times>self.sqrt_delta_t<line_sep>x_sample=np.zeros([num_sample self.dim self.num_time_interval+1])<line_sep>x_sample[: : 0]=np.ones([num_sample self.dim])<times>self.x_init<for_stmt>i range(self.num_time_interval)<block_start>x_sample[: : i+1]=x_sample[: : i]+self.sigma<times>dw_sample[: : i]<block_end><return>dw_sample x_sample<block_end><def_stmt>f_tf self t x y z<block_start><return>(y-(2+self.dim)/2.0/self.dim)<times>tf.reduce_sum(z 1 keepdims=<true>)<block_end><def_stmt>g_tf self t x<block_start><return>1-1.0/(1+tf.exp(t+tf.reduce_sum(x 1 keepdims=<true>)/self.dim))<block_end><block_end><class_stmt>QuadraticGradient(Equation)<block_start>"""
An example PDE with quadratically growing derivatives in Section 4.6 of Comm. Math. Stat. paper
doi.org/10.1007/s40304-017-0117-6
"""<def_stmt>__init__ self eqn_config<block_start>super(QuadraticGradient self).__init__(eqn_config)<line_sep>self.alpha=0.4<line_sep>self.x_init=np.zeros(self.dim)<line_sep>base=self.total_time+np.sum(np.square(self.x_init)/self.dim)<line_sep>self.y_init=np.sin(np.power(base self.alpha))<block_end><def_stmt>sample self num_sample<block_start>dw_sample=np.random.normal(size=[num_sample self.dim self.num_time_interval])<times>self.sqrt_delta_t<line_sep>x_sample=np.zeros([num_sample self.dim self.num_time_interval+1])<line_sep>x_sample[: : 0]=np.ones([num_sample self.dim])<times>self.x_init<for_stmt>i range(self.num_time_interval)<block_start>x_sample[: : i+1]=x_sample[: : i]+dw_sample[: : i]<block_end><return>dw_sample x_sample<block_end><def_stmt>f_tf self t x y z<block_start>x_square=tf.reduce_sum(tf.square(x) 1 keepdims=<true>)<line_sep>base=self.total_time-t+x_square/self.dim<line_sep>base_alpha=tf.pow(base self.alpha)<line_sep>derivative=self.alpha<times>tf.pow(base self.alpha-1)<times>tf.cos(base_alpha)<line_sep>term1=tf.reduce_sum(tf.square(z) 1 keepdims=<true>)<line_sep>term2=-4.0<times>(derivative<power>2)<times>x_square/(self.dim<power>2)<line_sep>term3=derivative<line_sep>term4=-0.5<times>(2.0<times>derivative+4.0/(self.dim<power>2)<times>x_square<times>self.alpha<times>((self.alpha-1)<times>tf.pow(base self.alpha-2)<times>tf.cos(base_alpha)-(self.alpha<times>tf.pow(base 2<times>self.alpha-2)<times>tf.sin(base_alpha))))<line_sep><return>term1+term2+term3+term4<block_end><def_stmt>g_tf self t x<block_start><return>tf.sin(tf.pow(tf.reduce_sum(tf.square(x) 1 keepdims=<true>)/self.dim self.alpha))<block_end><block_end><class_stmt>ReactionDiffusion(Equation)<block_start>"""
Time-dependent reaction-diffusion-type example PDE in Section 4.7 of Comm. Math. Stat. paper
doi.org/10.1007/s40304-017-0117-6
"""<def_stmt>__init__ self eqn_config<block_start>super(ReactionDiffusion self).__init__(eqn_config)<line_sep>self._kappa=0.6<line_sep>self.lambd=1/np.sqrt(self.dim)<line_sep>self.x_init=np.zeros(self.dim)<line_sep>self.y_init=1+self._kappa+np.sin(self.lambd<times>np.sum(self.x_init))<times>np.exp(-self.lambd<times>self.lambd<times>self.dim<times>self.total_time/2)<block_end><def_stmt>sample self num_sample<block_start>dw_sample=np.random.normal(size=[num_sample self.dim self.num_time_interval])<times>self.sqrt_delta_t<line_sep>x_sample=np.zeros([num_sample self.dim self.num_time_interval+1])<line_sep>x_sample[: : 0]=np.ones([num_sample self.dim])<times>self.x_init<for_stmt>i range(self.num_time_interval)<block_start>x_sample[: : i+1]=x_sample[: : i]+dw_sample[: : i]<block_end><return>dw_sample x_sample<block_end><def_stmt>f_tf self t x y z<block_start>exp_term=tf.exp((self.lambd<power>2)<times>self.dim<times>(t-self.total_time)/2)<line_sep>sin_term=tf.sin(self.lambd<times>tf.reduce_sum(x 1 keepdims=<true>))<line_sep>temp=y-self._kappa-1-sin_term<times>exp_term<line_sep><return>tf.minimum(tf.constant(1.0 dtype=tf.float64) tf.square(temp))<block_end><def_stmt>g_tf self t x<block_start><return>1+self._kappa+tf.sin(self.lambd<times>tf.reduce_sum(x 1 keepdims=<true>))<block_end><block_end> |
<import_stmt>argparse<import_stmt>gc<import_from_stmt>functools partial<import_stmt>os<import_stmt>sys<import_stmt>time<import_stmt>numpy<as>np<import_from_stmt>megatron.utils average_losses_across_data_parallel_group<import_from_stmt>megatron.model BertModel GPTModel<import_from_stmt>megatron.model ModelType<import_from_stmt>megatron mpu initialize_megatron get_args get_timers<import_from_stmt>megatron.training train_step setup_model_and_optimizer<import_stmt>torch<import_from_stmt>util write_tsv benchmark_func compute_gpt_tflops compute_gpt_parameter_count<line_sep>GB=1024<power>3<def_stmt>get_gpt_functions <block_start>args=get_args()<line_sep>micro_batch_size=args.micro_batch_size<line_sep>seq_len=args.encoder_seq_length<def_stmt>model_provider pre_process=<true> post_process=<true><block_start>model=GPTModel(num_tokentypes=0 parallel_output=<true> pre_process=pre_process post_process=post_process)<line_sep><return>model<block_end><def_stmt>loss_func loss_mask output_tensor<block_start>losses=output_tensor.float()<line_sep>loss_mask=loss_mask.view(-1).float()<line_sep>loss=torch.sum(losses.view(-1)<times>loss_mask)/loss_mask.sum()<line_sep># Reduce loss for logging.
#averaged_loss = average_losses_across_data_parallel_group([loss])
averaged_loss=[0]<line_sep><return>loss {'lm loss':averaged_loss[0]}<block_end>tokens=torch.ones((micro_batch_size seq_len)).cuda().long()<line_sep>labels=torch.ones((micro_batch_size seq_len)).cuda().long()<line_sep>loss_mask=torch.ones((micro_batch_size seq_len)).cuda().int()<line_sep>attention_mask=torch.ones(micro_batch_size 1 seq_len seq_len).cuda().bool()<line_sep>position_ids=torch.ones((micro_batch_size seq_len)).cuda().long()<def_stmt>forward_step data_iterator model<block_start>output_tensor=model(tokens position_ids attention_mask labels=labels)<line_sep><return>output_tensor partial(loss_func loss_mask)<block_end><return>model_provider loss_func forward_step<block_end><def_stmt>get_bert_functions <block_start>args=get_args()<line_sep>micro_batch_size=args.micro_batch_size<line_sep>seq_len=args.encoder_seq_length<def_stmt>model_provider pre_process=<true> post_process=<true><block_start>num_tokentypes=2<if>args.bert_binary_head<else>0<line_sep>model=BertModel(num_tokentypes=num_tokentypes add_binary_head=args.bert_binary_head parallel_output=<true> pre_process=pre_process post_process=post_process)<line_sep><return>model<block_end><def_stmt>loss_func loss_mask sentence_order output_tensor<block_start>lm_loss_,sop_logits=output_tensor<line_sep>lm_loss_=lm_loss_.float()<line_sep>loss_mask=loss_mask.float()<line_sep>lm_loss=torch.sum(lm_loss_.view(-1)<times>loss_mask.reshape(-1))/loss_mask.sum()<if_stmt>sop_logits<is><not><none><block_start>sop_loss=F.cross_entropy(sop_logits.view(-1 2).float() sentence_order.view(-1) ignore_index=-1)<line_sep>sop_loss=sop_loss.float()<line_sep>loss=lm_loss+sop_loss<line_sep>#averaged_losses = average_losses_across_data_parallel_group(
# [lm_loss, sop_loss])
averaged_losses=[0 0]<line_sep><return>loss {'lm loss':averaged_losses[0] 'sop loss':averaged_losses[1]}<block_end><else_stmt><block_start>loss=lm_loss<line_sep>#averaged_losses = average_losses_across_data_parallel_group(
# [lm_loss])
averaged_losses=[0]<line_sep><return>loss {'lm loss':averaged_losses[0]}<block_end><block_end>tokens=torch.ones((micro_batch_size seq_len)).cuda().long()<line_sep>padding_mask=torch.ones(micro_batch_size seq_len).cuda().bool()<line_sep>types=torch.ones((micro_batch_size seq_len)).cuda().long()<line_sep>lm_labels=torch.ones((micro_batch_size seq_len)).cuda().long()<line_sep>loss_mask=torch.ones((micro_batch_size seq_len)).cuda().int()<line_sep>sentence_order=<none><def_stmt>forward_step data_iterator model<block_start><if_stmt><not>args.bert_binary_head<block_start>types=<none><block_end>output_tensor=model(tokens padding_mask tokentype_ids=types lm_labels=lm_labels)<line_sep><return>output_tensor partial(loss_func loss_mask sentence_order)<block_end><return>model_provider loss_func forward_step<block_end><def_stmt>benchmark_gpt_bert_one_case benchmark_case output_file_name# Model configs
<block_start>(model_type global_batch_size seq_len hidden_size num_layers num_heads vocab_size num_micro_batches parallel_mode parallel_args)=benchmark_case<assert_stmt>parallel_mode<eq>"manual"<line_sep>(prefer_reduce_scatter use_remat (dp op pp) force_batch_dim_mapping)=parallel_args<line_sep>dp_size,tensor_mp_size,pipeline_mp_size=dp op pp<line_sep>checkpoint_activations=use_remat<line_sep>num_gpus=dp_size<times>tensor_mp_size<times>pipeline_mp_size<assert_stmt>global_batch_size%(dp_size<times>num_micro_batches)<eq>0<line_sep>micro_batch_size=global_batch_size<floordiv>dp_size<floordiv>num_micro_batches<line_sep># always use local DDP
ddp_impl=<true><line_sep># Parallel configs
# Initialize megatron
sys.argv<augadd>["--micro-batch-size" str(micro_batch_size)]<line_sep>sys.argv<augadd>["--tensor-model-parallel-size" str(tensor_mp_size)]<line_sep>sys.argv<augadd>["--pipeline-model-parallel-size" str(pipeline_mp_size)]<line_sep>sys.argv<augadd>["--global-batch-size" str(global_batch_size)]<line_sep>sys.argv<augadd>["--num-layers" str(num_layers)]<line_sep>sys.argv<augadd>["--hidden-size" str(hidden_size)]<line_sep>sys.argv<augadd>["--num-attention-heads" str(num_heads)]<line_sep>sys.argv<augadd>["--seq-length" str(seq_len)]<line_sep>sys.argv<augadd>["--max-position-embeddings" str(seq_len)]<line_sep>sys.argv<augadd>["--optimizer" "adam"]<line_sep>sys.argv<augadd>["--train-iters" "100"]<line_sep>sys.argv<augadd>["--lr" "0.00015"]<line_sep>sys.argv<augadd>["--bert-no-binary-head"]<line_sep>sys.argv<augadd>["--DDP-impl" "local"<if>ddp_impl<else>"torch"]<line_sep>sys.argv<augadd>["--fp16"]<line_sep>sys.argv<augadd>["--loss-scale" "8"]<if_stmt>checkpoint_activations<block_start>sys.argv<augadd>["--checkpoint-activations"]<block_end># sys.argv += ["--no-masked-softmax-fusion"]
# sys.argv += ["--no-async-tensor-model-parallel-allreduce"]
# sys.argv += ["--no-scatter-gather-tensors-in-pipeline"]
initialize_megatron()<line_sep>args=get_args()<line_sep>args.padded_vocab_size=vocab_size<line_sep>rank=torch.distributed.get_rank()<line_sep># Check initialization
<assert_stmt>dp_size<eq>mpu.get_data_parallel_world_size()<assert_stmt>tensor_mp_size<eq>mpu.get_tensor_model_parallel_world_size()<assert_stmt>pipeline_mp_size<eq>mpu.get_pipeline_model_parallel_world_size()<line_sep># Build model
<if_stmt>model_type<eq>"gpt"<block_start>model_provider,loss_func,forward_step=get_gpt_functions()<block_end><elif_stmt>model_type<eq>"bert"<block_start>model_provider,loss_func,forward_step=get_bert_functions()<block_end>model,optimizer,lr_scheduler=setup_model_and_optimizer(model_provider model_type=ModelType.encoder_or_decoder)<line_sep>parameter_count=compute_gpt_parameter_count(num_layers hidden_size vocab_size)<def_stmt>run_func <block_start>train_step(forward_step <none> model optimizer lr_scheduler)<block_end># Warmup and reset timers
run_func()<line_sep>timers=get_timers()<line_sep>names=list(timers.timers.keys())<for_stmt>name names<block_start>timers(name).reset()<block_end># Benchmark step time
repeat=2<line_sep>number=1<line_sep>costs=benchmark_func(run_func sync_func=<none> warmup=0 repeat=repeat number=number)<line_sep>timers.log(names normalizer=repeat<times>number)<line_sep># Print results
<if_stmt>rank<eq>0<block_start>peak_mem=torch.cuda.max_memory_allocated(0)<line_sep>tflops=compute_gpt_tflops(global_batch_size seq_len num_layers hidden_size vocab_size torch.distributed.get_world_size() np.mean(costs))<line_sep>tflops_ckpt=compute_gpt_tflops(global_batch_size seq_len num_layers hidden_size vocab_size torch.distributed.get_world_size() np.mean(costs) <true>)<line_sep>heads=["Type" "Model Config" "Parallel Config" "P-mesh shape" "#Microbatch" "Force DP" "Remat" "Mean Time" "Std Time" "#Params" "TFLOPs" "TFLOPs (ckpt)" "Peak Mem"]<line_sep>values=[model_type str(benchmark_case[1:6]) str((dp_size tensor_mp_size pipeline_mp_size)) "N/A" str(num_micro_batches) "N/A" str(checkpoint_activations) f"{np.mean(costs):.3f}" f"{np.std(costs):.3f}" f"{parameter_count/1e9:.3f}" f"{tflops:.2f}" f"{tflops_ckpt:.2f}" f"{peak_mem/GB:5.3f}"]<line_sep>write_tsv(heads values f"{model_type}_megatron_{output_file_name}_rank{rank}.tsv")<line_sep>print("Sleeping for 30 seconds before starting the next case. ")<line_sep>time.sleep(30)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>case=eval(sys.argv[-2])<line_sep>output_file_name=sys.argv[-1]<del_stmt>sys.argv[-1]<del_stmt>sys.argv[-1]<line_sep>benchmark_gpt_bert_one_case(case output_file_name)<block_end> |
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WikiAuto dataset for Text Simplification"""<import_stmt>json<import_stmt>datasets<line_sep>_CITATION="""\
@inproceedings{acl/JiangMLZX20,
author = {<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME>},
editor = {<NAME> and
<NAME> and
<NAME> and
<NAME>},
title = {Neural {CRF} Model for Sentence Alignment in Text Simplification},
booktitle = {Proceedings of the 58th Annual Meeting of the Association for Computational
Linguistics, {ACL} 2020, Online, July 5-10, 2020},
pages = {7943--7960},
publisher = {Association for Computational Linguistics},
year = {2020},
url = {https://www.aclweb.org/anthology/2020.acl-main.709/}
}
"""<line_sep># TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION="""\
WikiAuto provides a set of aligned sentences from English Wikipedia and Simple English Wikipedia
as a resource to train sentence simplification systems. The authors first crowd-sourced a set of manual alignments
between sentences in a subset of the Simple English Wikipedia and their corresponding versions in English Wikipedia
(this corresponds to the `manual` config), then trained a neural CRF system to predict these alignments.
The trained model was then applied to the other articles in Simple English Wikipedia with an English counterpart to
create a larger corpus of aligned sentences (corresponding to the `auto`, `auto_acl`, `auto_full_no_split`, and `auto_full_with_split` configs here).
"""<line_sep># TODO: Add the licence for the dataset here if you can find it
_LICENSE="CC-BY-SA 3.0"<line_sep># TODO: Add link to the official dataset URLs here
# The HuggingFace dataset library don't host the datasets but only point to the original files
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLs={"manual":{"train":"https://www.dropbox.com/sh/ohqaw41v48c7e5p/AACdl4UPKtu7CMMa-CJhz4G7a/wiki-manual/train.tsv?dl=1" "dev":"https://github.com/chaojiang06/wiki-auto/raw/master/wiki-manual/dev.tsv" "test":"https://github.com/chaojiang06/wiki-auto/raw/master/wiki-manual/test.tsv" } "auto_acl":{"normal":"https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/ACL2020/train.src" "simple":"https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/ACL2020/train.dst" } "auto_full_no_split":{"normal":"https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_no_split/train.src" "simple":"https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_no_split/train.dst" } "auto_full_with_split":{"normal":"https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_with_split/train.src" "simple":"https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_with_split/train.dst" } "auto":{"part_1":"https://www.dropbox.com/sh/ohqaw41v48c7e5p/AAATBDhU1zpdcT5x5WgO8DMaa/wiki-auto-all-data/wiki-auto-part-1-data.json?dl=1" "part_2":"https://www.dropbox.com/sh/ohqaw41v48c7e5p/AAATgPkjo_tPt9z12vZxJ3MRa/wiki-auto-all-data/wiki-auto-part-2-data.json?dl=1" } }<line_sep># TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
<class_stmt>WikiAuto(datasets.GeneratorBasedBuilder)<block_start>"""WikiAuto dataset for sentence simplification"""<line_sep>VERSION=datasets.Version("1.0.0")<line_sep>BUILDER_CONFIGS=[datasets.BuilderConfig(name="manual" version=VERSION description="A set of 10K Wikipedia sentence pairs aligned by crowd workers." ) datasets.BuilderConfig(name="auto_acl" version=VERSION description="Automatically aligned and filtered sentence pairs used to train the ACL2020 system." ) datasets.BuilderConfig(name="auto_full_no_split" version=VERSION description="All automatically aligned sentence pairs without sentence splitting." ) datasets.BuilderConfig(name="auto_full_with_split" version=VERSION description="All automatically aligned sentence pairs with sentence splitting." ) datasets.BuilderConfig(name="auto" version=VERSION description="A large set of automatically aligned sentence pairs.") ]<line_sep>DEFAULT_CONFIG_NAME="auto"<def_stmt>_info self<block_start><if_stmt>self.config.name<eq>"manual"# This is the name of the configuration selected in BUILDER_CONFIGS above
<block_start>features=datasets.Features({"alignment_label":datasets.ClassLabel(names=["notAligned" "aligned" "partialAligned"]) "normal_sentence_id":datasets.Value("string") "simple_sentence_id":datasets.Value("string") "normal_sentence":datasets.Value("string") "simple_sentence":datasets.Value("string") "gleu_score":datasets.Value("float32") })<block_end><elif_stmt>(self.config.name<eq>"auto_acl"<or>self.config.name<eq>"auto_full_no_split"<or>self.config.name<eq>"auto_full_with_split")<block_start>features=datasets.Features({"normal_sentence":datasets.Value("string") "simple_sentence":datasets.Value("string") })<block_end><else_stmt><block_start>features=datasets.Features({"example_id":datasets.Value("string") "normal":{"normal_article_id":datasets.Value("int32") "normal_article_title":datasets.Value("string") "normal_article_url":datasets.Value("string") "normal_article_content":datasets.Sequence({"normal_sentence_id":datasets.Value("string") "normal_sentence":datasets.Value("string") }) } "simple":{"simple_article_id":datasets.Value("int32") "simple_article_title":datasets.Value("string") "simple_article_url":datasets.Value("string") "simple_article_content":datasets.Sequence({"simple_sentence_id":datasets.Value("string") "simple_sentence":datasets.Value("string") }) } "paragraph_alignment":datasets.Sequence({"normal_paragraph_id":datasets.Value("string") "simple_paragraph_id":datasets.Value("string") }) "sentence_alignment":datasets.Sequence({"normal_sentence_id":datasets.Value("string") "simple_sentence_id":datasets.Value("string") }) })<block_end><return>datasets.DatasetInfo(description=_DESCRIPTION features=features supervised_keys=<none> homepage="https://github.com/chaojiang06/wiki-auto" license=_LICENSE citation=_CITATION )<block_end><def_stmt>_split_generators self dl_manager<block_start>my_urls=_URLs[self.config.name]<line_sep>data_dir=dl_manager.download_and_extract(my_urls)<if_stmt>self.config.name<in>["manual" "auto"]<block_start><return>[datasets.SplitGenerator(name=spl gen_kwargs={"filepaths":data_dir "split":spl } )<for>spl data_dir]<block_end><else_stmt><block_start><return>[datasets.SplitGenerator(name="full" gen_kwargs={"filepaths":data_dir "split":"full"} )]<block_end><block_end><def_stmt>_generate_examples self filepaths split<block_start><if_stmt>self.config.name<eq>"manual"<block_start>keys=["alignment_label" "simple_sentence_id" "normal_sentence_id" "simple_sentence" "normal_sentence" "gleu_score" ]<with_stmt>open(filepaths[split] encoding="utf-8")<as>f<block_start><for_stmt>id_,line enumerate(f)<block_start>values=line.strip().split("\t")<assert_stmt>len(values)<eq>6 f"Not enough fields in ---- {line} --- {values}"<line_sep><yield>id_ dict([(k val)<if>k<ne>"gleu_score"<else>(k float(val))<for>k,val zip(keys values)])<block_end><block_end><block_end><elif_stmt>(self.config.name<eq>"auto_acl"<or>self.config.name<eq>"auto_full_no_split"<or>self.config.name<eq>"auto_full_with_split")<block_start><with_stmt>open(filepaths["normal"] encoding="utf-8")<as>fi<block_start><with_stmt>open(filepaths["simple"] encoding="utf-8")<as>fo<block_start><for_stmt>id_,(norm_se simp_se) enumerate(zip(fi fo))<block_start><yield>id_ {"normal_sentence":norm_se "simple_sentence":simp_se }<block_end><block_end><block_end><block_end><else_stmt><block_start>dataset_dict=json.load(open(filepaths[split] encoding="utf-8"))<for_stmt>id_,(eid example_dict) enumerate(dataset_dict.items())<block_start>res={"example_id":eid "normal":{"normal_article_id":example_dict["normal"]["id"] "normal_article_title":example_dict["normal"]["title"] "normal_article_url":example_dict["normal"]["url"] "normal_article_content":{"normal_sentence_id":[sen_id<for>sen_id,sen_txt example_dict["normal"]["content"].items()] "normal_sentence":[sen_txt<for>sen_id,sen_txt example_dict["normal"]["content"].items()] } } "simple":{"simple_article_id":example_dict["simple"]["id"] "simple_article_title":example_dict["simple"]["title"] "simple_article_url":example_dict["simple"]["url"] "simple_article_content":{"simple_sentence_id":[sen_id<for>sen_id,sen_txt example_dict["simple"]["content"].items()] "simple_sentence":[sen_txt<for>sen_id,sen_txt example_dict["simple"]["content"].items()] } } "paragraph_alignment":{"normal_paragraph_id":[norm_id<for>simp_id,norm_id example_dict.get("paragraph_alignment" [])] "simple_paragraph_id":[simp_id<for>simp_id,norm_id example_dict.get("paragraph_alignment" [])] } "sentence_alignment":{"normal_sentence_id":[norm_id<for>simp_id,norm_id example_dict.get("sentence_alignment" [])] "simple_sentence_id":[simp_id<for>simp_id,norm_id example_dict.get("sentence_alignment" [])] } }<line_sep><yield>id_ res<block_end><block_end><block_end><block_end> |
<import_from_future_stmt> absolute_import<import_stmt>os<import_stmt>sys<import_stmt>warnings<import_stmt>numpy.distutils.system_info<import_stmt>pkg_resources<import_stmt>chainer<def_stmt>_check_python_350 <block_start><if_stmt>sys.version_info[:3]<eq>(3 5 0)<block_start><if_stmt><not>int(os.getenv('CHAINER_PYTHON_350_FORCE' '0'))<block_start>msg="""
Chainer does not work with Python 3.5.0.
We strongly recommend to use another version of Python.
If you want to use Chainer with Python 3.5.0 at your own risk,
set 1 to CHAINER_PYTHON_350_FORCE environment variable."""<line_sep><raise>Exception(msg)<block_end><block_end><block_end><def_stmt>_check_osx_numpy_backend <block_start><if_stmt>sys.platform<ne>'darwin'<block_start><return><block_end>blas_opt_info=numpy.distutils.system_info.get_info('blas_opt')<if_stmt>blas_opt_info<block_start>extra_link_args=blas_opt_info.get('extra_link_args')<if_stmt>extra_link_args<and>'-Wl,Accelerate'<in>extra_link_args<block_start>warnings.warn('''\
Accelerate has been detected as a NumPy backend library.
vecLib, which is a part of Accelerate, is known not to work correctly with Chainer.
We recommend using other BLAS libraries such as OpenBLAS.
For details of the issue, please see
https://docs.chainer.org/en/stable/tips.html#mnist-example-does-not-converge-in-cpu-mode-on-mac-os-x.
Please be aware that Mac OS X is not an officially supported OS.
''')<block_end><block_end><block_end># NOQA
<def_stmt>_check_optional_dependencies <block_start><for_stmt>dep chainer._version._optional_dependencies<block_start>name=dep['name']<line_sep>pkgs=dep['packages']<line_sep>spec=dep['specifier']<line_sep>help=dep['help']<line_sep>installed=<false><for_stmt>pkg pkgs<block_start>found=<false><line_sep>requirement=pkg<if_stmt>os.environ.get('CHAINER_WARN_VERSION_MISMATCH' '1')<eq>'1'<block_start>requirement='{}{}'.format(pkg spec)<block_end><try_stmt><block_start>pkg_resources.require(requirement)<line_sep>found=<true><block_end><except_stmt>pkg_resources.DistributionNotFound<block_start><continue><block_end><except_stmt>pkg_resources.VersionConflict<block_start>msg='''
--------------------------------------------------------------------------------
{name} ({pkg}) version {version} may not be compatible with this version of Chainer.
Please consider installing the supported version by running:
$ pip install '{requirement}'
See the following page for more details:
{help}
--------------------------------------------------------------------------------
'''<line_sep># NOQA
warnings.warn(msg.format(name=name pkg=pkg version=pkg_resources.get_distribution(pkg).version requirement=requirement help=help))<line_sep>found=<true><block_end><except_stmt>Exception<block_start>warnings.warn('Failed to check requirement: {}'.format(requirement))<line_sep><break><block_end><if_stmt>found<block_start><if_stmt>installed<block_start>warnings.warn('''
--------------------------------------------------------------------------------
Multiple installations of {name} package has been detected.
You should select only one package from from {pkgs}.
Follow these steps to resolve this issue:
1. `pip list` to list {name} packages installed
2. `pip uninstall <package name>` to uninstall all {name} packages
3. `pip install <package name>` to install the proper one
--------------------------------------------------------------------------------
'''.format(name=name pkgs=pkgs))<block_end>installed=<true><block_end><block_end><block_end><block_end><def_stmt>check <block_start>_check_python_350()<line_sep>_check_osx_numpy_backend()<line_sep>_check_optional_dependencies()<block_end> |
"""
Copyright (c) 2017, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on Oct 4, 2017
@author: jrm
"""<import_stmt>sh<import_stmt>sys<def_stmt>main # Make sure instance is cleared
<block_start><import_from_stmt>enaml.application Application<line_sep>Application._instance=<none><import_from_stmt>enamlnative.android.app AndroidApplication<line_sep>app=AndroidApplication(debug=<true> dev='remote' # "10.0.2.2" # or 'server'
load_view=load_view)<line_sep>app.timed_call(5000 run_gestures app)<line_sep>app.start()<block_end><def_stmt>run_gestures app<block_start><for_stmt>i range(30)#: Swipe to next page
<block_start>t=i<times>2000<line_sep>app.timed_call(t sh.adb *'shell input swipe 250 300 -800 300'.split() _bg=<true>)<line_sep>#: Tap a few places
<for_stmt>j range(4)<block_start>app.timed_call(t+i<times>200 sh.adb *'shell input tap 500 150'.split() _bg=<true>)<block_end><block_end>app.timed_call(120000 app.stop)<block_end><def_stmt>load_view app<block_start><import_stmt>enaml<line_sep>#: For debug purposes only!
app.widget.resetBridgeStats()<line_sep>app.widget.resetBridgeCache()<with_stmt>enaml.imports()<block_start><import_stmt>view<if_stmt>app.view<block_start>reload(view)<block_end>app.view=view.ContentView()<block_end>#: Time how long it takes
app.show_view()<block_end><def_stmt>test_remote_debug #sh.pip('install tornado --user'.split())
<block_start>enaml_native=sh.Command('enaml-native')<line_sep>enaml_native('start' '--remote-debugging' _bg=<true>)<line_sep>#: Add
sys.path.append('src/apps/')<line_sep>sys.path.append('src/')<line_sep>#: Init remote nativehooks implementation
<import_from_stmt>enamlnative.core remotehooks<line_sep>remotehooks.init()<line_sep>main()<block_end> |
# Copyright (c) OpenMMLab. All rights reserved.
<import_stmt>argparse<import_stmt>glob<import_stmt>json<import_stmt>os.path<as>osp<import_stmt>mmcv<try_stmt><block_start><import_stmt>xlrd<block_end><except_stmt>ImportError<block_start>xlrd=<none><block_end><try_stmt><block_start><import_stmt>xlutils<import_from_stmt>xlutils.copy copy<block_end><except_stmt>ImportError<block_start>xlutils=<none><block_end><def_stmt>parse_args <block_start>parser=argparse.ArgumentParser(description='Gather benchmarked models metric')<line_sep>parser.add_argument('root' type=str help='root path of benchmarked models to be gathered')<line_sep>parser.add_argument('txt_path' type=str help='txt path output by benchmark_filter')<line_sep>parser.add_argument('--excel' type=str help='input path of excel to be recorded')<line_sep>parser.add_argument('--ncol' type=int help='Number of column to be modified or appended')<line_sep>args=parser.parse_args()<line_sep><return>args<block_end><if_stmt>__name__<eq>'__main__'<block_start>args=parse_args()<if_stmt>args.excel<block_start><assert_stmt>args.ncol 'Please specify "--excel" and "--ncol" '<concat>'at the same time'<if_stmt>xlrd<is><none><block_start><raise>RuntimeError('xlrd is not installed,'<concat>'Please use “pip install xlrd==1.2.0” to install')<block_end><if_stmt>xlutils<is><none><block_start><raise>RuntimeError('xlutils is not installed,'<concat>'Please use “pip install xlutils==2.0.0” to install')<block_end>readbook=xlrd.open_workbook(args.excel)<block_end>root_path=args.root<line_sep>all_results_dict={}<with_stmt>open(args.txt_path 'r')<as>f<block_start>model_cfgs=f.readlines()<line_sep>model_cfgs=[_<for>_ model_cfgs<if>'configs'<in>_]<for_stmt>i,config enumerate(model_cfgs)<block_start>config=config.strip()<if_stmt>len(config)<eq>0<block_start><continue><block_end>config_name=osp.split(config)[-1]<line_sep>config_name=osp.splitext(config_name)[0]<line_sep>result_path=osp.join(root_path config_name)<if_stmt>osp.exists(result_path)# 1 read config and excel
<block_start>cfg=mmcv.Config.fromfile(config)<line_sep>total_epochs=cfg.total_epochs<line_sep># the first metric will be used to find the best ckpt
has_final_ckpt=<true><if_stmt>'vid'<in>config<block_start>eval_metrics=['bbox_mAP_50']<block_end><elif_stmt>'mot'<in>config<block_start>eval_metrics=['MOTA' 'IDF1']<line_sep># tracktor and deepsort don't have ckpt.
has_final_ckpt=<false><block_end><elif_stmt>'sot'<in>config<block_start>eval_metrics=['success' 'norm_precision' 'precision']<block_end><else_stmt><block_start><raise>NotImplementedError(f'Not supported config: {config}')<block_end><if_stmt>args.excel<block_start>xlrw=copy(readbook)<if_stmt>'vid'<in>config<block_start>sheet=readbook.sheet_by_name('vid')<line_sep>table=xlrw.get_sheet('vid')<block_end><elif_stmt>'mot'<in>config<block_start>sheet=readbook.sheet_by_name('mot')<line_sep>table=xlrw.get_sheet('mot')<block_end><elif_stmt>'sot'<in>config<block_start>sheet=readbook.sheet_by_name('sot')<line_sep>table=xlrw.get_sheet('sot')<block_end>sheet_info={}<for_stmt>i range(6 sheet.nrows)<block_start>sheet_info[sheet.row_values(i)[0]]=i<block_end><block_end># 2 determine whether total_epochs ckpt exists
ckpt_path=f'epoch_{total_epochs}.pth'<if_stmt>osp.exists(osp.join(result_path ckpt_path))<or><not>has_final_ckpt<block_start>log_json_path=list(sorted(glob.glob(osp.join(result_path '*.log.json'))))[-1]<line_sep># 3 read metric
result_dict=dict()<with_stmt>open(log_json_path 'r')<as>f<block_start><for_stmt>line f.readlines()<block_start>log_line=json.loads(line)<if_stmt>'mode'<not><in>log_line.keys()<block_start><continue><block_end><if_stmt>log_line['mode']<eq>'val'<or>log_line['mode']<eq>'test'<block_start>result_dict[f"epoch_{log_line['epoch']}"]={key:log_line[key]<for>key eval_metrics<if>key<in>log_line}<block_end><block_end><block_end># 4 find the best ckpt
best_epoch_results=dict()<for_stmt>epoch result_dict<block_start><if_stmt>len(best_epoch_results)<eq>0<block_start>best_epoch_results=result_dict[epoch]<block_end><else_stmt><block_start><if_stmt>best_epoch_results[eval_metrics[0]]<l>result_dict[epoch][eval_metrics[0]]<block_start>best_epoch_results=result_dict[epoch]<block_end><block_end><for_stmt>metric best_epoch_results<block_start><if_stmt>'success'<in>best_epoch_results<block_start>performance=round(best_epoch_results[metric] 1)<block_end><else_stmt><block_start>performance=round(best_epoch_results[metric]<times>100 1)<block_end>best_epoch_results[metric]=performance<block_end><block_end>all_results_dict[config]=best_epoch_results<line_sep># update and append excel content
<if_stmt>args.excel<block_start>performance=''<for_stmt>metric best_epoch_results<block_start>performance<augadd>f'{best_epoch_results[metric]}/'<block_end>row_num=sheet_info.get(config <none>)<if_stmt>row_num<block_start>table.write(row_num args.ncol performance)<block_end><else_stmt><block_start>table.write(sheet.nrows 0 config)<line_sep>table.write(sheet.nrows args.ncol performance)<block_end>filename,sufflx=osp.splitext(args.excel)<line_sep>xlrw.save(f'{filename}_o{sufflx}')<line_sep>readbook=xlrd.open_workbook(f'{filename}_o{sufflx}')<block_end><block_end><else_stmt><block_start>print(f'{config} not exist: {ckpt_path}')<block_end><block_end><else_stmt><block_start>print(f'not exist: {config}')<block_end><block_end># 4 save or print results
print('===================================')<for_stmt>config_name,metrics all_results_dict.items()<block_start>print(config_name metrics)<block_end>print('===================================')<if_stmt>args.excel<block_start>print(f'>>> Output {filename}_o{sufflx}')<block_end><block_end><block_end> |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2019-03-18 23:46
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations models<import_stmt>django.core.validators<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('gravity' '0002_tilt_refactor') ]<line_sep>operations=[# Converting from AlterField to RemoveField/AddField because of issues with Django 2.0+ migration:
# https://docs.djangoproject.com/en/3.0/releases/2.0/#foreign-key-constraints-are-now-enabled-on-sqlite
migrations.RemoveField(model_name='tiltbridge' name='api_key' ) migrations.AddField(model_name='tiltbridge' name='mdns_id' field=models.CharField(help_text="mDNS ID used by the TiltBridge to identify itself both on your network and to Fermentrack. NOTE - Prefix only - do not include '.local'" max_length=64 primary_key=<true> serialize=<false> validators=[django.core.validators.RegexValidator(regex='^[a-zA-Z0-9]+$')]) ) migrations.AlterField(model_name='tiltbridge' name='mdns_id' field=models.CharField(default='tiltbridge' help_text="mDNS ID used by the TiltBridge to identify itself both on your network and to Fermentrack. NOTE - Prefix only - do not include '.local'" max_length=64 primary_key=<true> serialize=<false>) preserve_default=<false> ) ]<block_end> |
# Generated by Django 2.0.8 on 2018-09-13 13:38
<import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("menu" "0008_menu_json_content_new")]<line_sep>operations=[migrations.RemoveField(model_name="menu" name="json_content")]<block_end> |
# Copyright 2016 TensorLab. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# _predict.py
# Implements PredictCommand.
<import_stmt>json<import_stmt>os<import_stmt>sys<import_stmt>tensorflow<as>tf<import_stmt>tensorfx<as>tfx<class_stmt>PredictCommand(object)<block_start>"""Implements the tfx predict command to use a model to produce predictions.
"""<line_sep>name='predict'<line_sep>help='Produces predictions using a model.'<line_sep>extra=<false><line_sep>@staticmethod<def_stmt>build_parser parser<block_start>parser.add_argument('--model' metavar='path' type=str required=<true> help='The path to a previously trained model.')<line_sep>parser.add_argument('--input' metavar='path' type=str help='The path to a file with input instances. Uses stdin by default.')<line_sep>parser.add_argument('--output' metavar='path' type=str help='The path to a file to write outputs to. Uses stdout by default.')<line_sep>parser.add_argument('--batch-size' metavar='instances' type=int default=10 help='The number of instances to predict per batch.')<block_end>@staticmethod<def_stmt>run args# TODO: Figure out where to do JSON and TF initialization in more common way.
<block_start>json.encoder.FLOAT_REPR=<lambda>f:('%.5f'%f)<line_sep>tf.logging.set_verbosity(tf.logging.ERROR)<line_sep>os.environ['TF_CPP_MIN_LOG_LEVEL']=str(tf.logging.ERROR)<line_sep>model=tfx.prediction.Model.load(args.model)<with_stmt>TextSource(args.input args.batch_size)<as>source TextSink(args.output)<as>sink<block_start><for_stmt>instances source<block_start>predictions=model.predict(instances)<line_sep>lines=map(<lambda>p:json.dumps(p sort_keys=<true>) predictions)<line_sep>sink.write(lines)<block_end><block_end><block_end><block_end><class_stmt>TextSource(object)<block_start><def_stmt>__init__ self file=<none> batch_size=1<block_start>self._file=file<line_sep>self._batch_size=batch_size<block_end><def_stmt>__enter__ self<block_start>self._stream=open(self._file 'r')<if>self._file<else>sys.stdin<line_sep><return>self<block_end><def_stmt>__exit__ self type value traceback<block_start><if_stmt>self._stream<and>self._file<block_start>self._stream.close()<block_end><block_end><def_stmt>__iter__ self<block_start>instances=[]<while_stmt><true><block_start>instance=self._stream.readline().strip()<if_stmt><not>instance# EOF
<block_start><break><block_end>instances.append(instance)<if_stmt>len(instances)<eq>self._batch_size# A desired batch of instances is available
<block_start><yield>instances<line_sep>instances=[]<block_end><block_end><if_stmt>instances<block_start><yield>instances<block_end><block_end><block_end><class_stmt>TextSink(object)<block_start><def_stmt>__init__ self file=<none><block_start>self._file=file<block_end><def_stmt>__enter__ self<block_start>self._stream=open(self._file 'w')<if>self._file<else>sys.stdout<line_sep><return>self<block_end><def_stmt>__exit__ self type value traceback<block_start><if_stmt>self._stream<and>self._file<block_start>self._stream.close()<block_end><block_end><def_stmt>write self lines<block_start><for_stmt>l lines<block_start>self._stream.write(l+'\n')<block_end><block_end><block_end> |
# insert CR, insert line above
keys(':setf vim\<CR>jw')<line_sep>keys('4\<C-Down>')<line_sep>keys('Ea')<line_sep>keys('\<CR>')<line_sep>keys('CARRYING OVER ')<line_sep>keys('\<Esc>A')<line_sep>keys('\<CR>')<line_sep>keys('CR at EOL')<line_sep>keys('\<Esc>k')<line_sep>keys('O')<line_sep>keys('above CR')<line_sep>keys('\<Esc>\<Esc>')<line_sep> |
<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_stmt>matplotlib<as>mpl<import_stmt>sys<import_stmt>os<import_from_stmt>collections defaultdict<line_sep>labelsize=16<line_sep>legendsize=14<line_sep>mpl.rcParams['xtick.labelsize']=labelsize<line_sep>mpl.rcParams['ytick.labelsize']=labelsize<line_sep>mpl.rcParams['axes.labelsize']=labelsize<line_sep>mpl.rcParams['axes.titlesize']=labelsize<line_sep>mpl.rcParams['font.size']=labelsize<line_sep>plt.style.use('seaborn-deep')<line_sep># plt.rcParams.update({
# "text.usetex": True,
# "font.family": "sans-serif",
# "font.sans-serif": ["Helvetica"]})
plt.rcParams['pdf.fonttype']=42<line_sep>plt.rcParams['text.usetex']=<true><line_sep>colormap=plt.cm.gist_ncar<def_stmt>plot_ax ax params ys legends ylabel full title=<none> add_legend=<true><block_start>labelsize=20<line_sep>legendsize=20<line_sep>mpl.rcParams['xtick.labelsize']=labelsize<line_sep>mpl.rcParams['ytick.labelsize']=labelsize<line_sep>mpl.rcParams['axes.labelsize']=labelsize<line_sep>mpl.rcParams['axes.titlesize']=labelsize<line_sep>mpl.rcParams['font.size']=labelsize<line_sep>color_base=["blue" "red" "green" "tab:orange" "purple" "tab:cyan"]<line_sep>markers=["o" "v" "s" "*" "8"]<line_sep>sorted_xs=list(set([x<for>xs params<for>x xs]))<line_sep>sorted_xs=sorted(sorted_xs)<line_sep>xticks=[format(xx)<for>xx sorted_xs]<for_stmt>ii,(x y) enumerate(zip(params[::-1] ys[::-1]))<block_start>ax.plot(x y c=color_base[ii] marker=markers[ii] ms=10 linewidth=3)<block_end>ax.set_xlim(ax.get_xlim()[0] 15)<line_sep>p1=ax.get_xlim()<line_sep>p1=[p1[0]-0.1 p1[1]+1.0]<line_sep>p2=[full full]<line_sep>ax.plot(p1 p2 "--" ms=6 c="black" linewidth=2)<line_sep># ax.set_xscale('log', basex=10)
legends=legends[::-1]+["Full Fine-tuning" "Ours"]<if_stmt>add_legend<block_start>ax.legend(legends loc="best" fontsize=legendsize)<block_end># ax.set_xticks(sorted_xs, xticks)
<if_stmt>title<is><not><none><block_start>ax.set(xlabel=r"Fine-tuned Parameters (\%)" ylabel=ylabel)<block_end><else_stmt><block_start>ax.set(title=title xlabel=r"Fine-tuned Parameters (\%)" ylabel=ylabel)<block_end>ax.grid()<line_sep>ax.set_facecolor("white")<block_end><def_stmt>plot_intro <block_start>color_base=["blue" "purple" "green" "tab:orange" "red" "tab:cyan"]<line_sep># color_base = ["blue", "blue", "blue", "blue", "red", "tab:cyan"]
color_base=["dodgerblue" "mediumvioletred" "olivedrab" "goldenrod" "firebrick" "tab:cyan"]<line_sep>color_base=["dodgerblue" "hotpink" "olivedrab" "goldenrod" "crimson" "tab:cyan"]<line_sep>color_base=["gray" "dodgerblue" "olivedrab" "hotpink" "crimson" "tab:cyan"]<line_sep>markers=["o" "v" "s" "*" "D"]<line_sep>markers=["o" "o" "o" "o" "D"]<line_sep>fig,ax=plt.subplots(1 1)<line_sep>full=21.94<line_sep>legends=["Full Fine-tuning" "BitFit" "PrefixTuning" "Adapter" "LoRA" "Ours"]<line_sep>params=[0.08 3.6 12.3 14.4 6.7]<line_sep>xsum=[17.32 20.46 20.98 20.5 21.9]<for_stmt>ii,(param r2) enumerate(zip(params xsum))<block_start>ax.scatter(param r2 c=color_base[ii] marker=markers[ii] edgecolor='black' linewidth=1 s=300)<block_end>ax.set_xlim(ax.get_xlim()[0] 15)<line_sep>p1=ax.get_xlim()<line_sep>p1=[p1[0]-0.1 p1[1]+1.0]<line_sep>p2=[full full]<line_sep>ax.plot(p1 p2 "--" ms=6 c="black" linewidth=2)<line_sep># ax.legend(legends, loc='best', fontsize=12)
ax.grid()<line_sep>ax.set_facecolor("white")<line_sep>ax.set(xlabel=r"Fine-tuned Parameters (\%)" ylabel="ROUGE-2")<line_sep>fig.set_size_inches(5 5)<line_sep>fig.savefig("intro.pdf" bbox_inches='tight')<block_end><def_stmt>compute_params r<block_start>base=200<times>2<times>3<times>1024<times>12<line_sep>base_params=3.6<line_sep>print(r<times>1.0/base<times>base_params)<line_sep><return>r<times>1.0/base<times>base_params<block_end><def_stmt>format n<block_start><return>r"{:.1f}%".format(n)<block_end><def_stmt>plot_overview <block_start>d,L=1024 12<line_sep># fig, axes = plt.subplots(2, 1)
# percentage of parameters
params_bitfit=[0.08]<line_sep># params_prompt = [compute_params(d * 1), compute_params(d * 30), compute_params(d * 200), compute_params(d * 300)]
params_prompt=[compute_params(d<times>300)]<line_sep>params_pt=[compute_params(1<times>2<times>3<times>d<times>L) compute_params(30<times>2<times>3<times>d<times>L) compute_params(200<times>2<times>3<times>d<times>L) compute_params(512<times>2<times>3<times>d<times>L)]<line_sep>params_hously_adapter_ffn_ho=[compute_params(30<times>2<times>2<times>d<times>L) compute_params(200<times>2<times>2<times>d<times>L) compute_params(512<times>2<times>2<times>d<times>L) compute_params(1024<times>2<times>2<times>d<times>L)]<line_sep>params_lora_attn=[compute_params(1<times>4<times>3<times>d<times>L) compute_params(30<times>4<times>3<times>d<times>L) compute_params(200<times>4<times>3<times>d<times>L) compute_params(400<times>4<times>3<times>d<times>L)]<line_sep>params_lora_ffn=[compute_params(1<times>10<times>2<times>d<times>L) compute_params(102<times>10<times>2<times>d<times>L) compute_params(120<times>10<times>2<times>d<times>L)]<line_sep>params_hously_adapter_attn_ho=[compute_params(1<times>2<times>3<times>d<times>L) compute_params(30<times>2<times>3<times>d<times>L) compute_params(200<times>2<times>3<times>d<times>L) compute_params(512<times>2<times>3<times>d<times>L) compute_params(1024<times>2<times>3<times>d<times>L)]<line_sep># print("prompt: 300")
# print(params_prompt)
# print("pt: 1, 30, 200, 512")
# print(params_pt)
# print("ho/hi ffn: 1, 30, 200, 512, 1024")
# print(params_hously_adapter_ffn_ho)
# print("ho/hi attn: 1, 30, 200, 512, 1024")
# print(params_hously_adapter_attn_ho)
# print("lora attn: 1, 30, 200, 400")
# print(params_lora_attn)
# print("lora ffn: 1, 102, 120")
# print(params_lora_ffn)
# xsum
xsum_bitfit=[17.32]<line_sep># xsum_prompt = [5.33, 14, 15.49, 15.98] # 1, 30?, 200, 300
# xsum_prompt = [15.98] # 300
xsum_pt=[18.14 20.01 20.46 20.40]# 1, 30, 200, 512
xsum_hously_adapter_ffn_ho=[17 18.81 20.4 20.58 20.98]# 1, 30, 200?, 512?, 1024?
xsum_hously_adapter_ffn_ho=[18.81 20.4 20.58 20.98]# 1, 30, 200?, 512?, 1024?
xsum_lora_attn=[17.4 19.59 20.29 20.5]# 1, 30, 200, 400
# mt
mt_bitfit=[26.4]<line_sep># mt_prompt = [6.0, 16.7, 21] # 1, 30, 200
# mt_prompt = [21] # 200
mt_pt=[30.2 35.2 35.6 35.1]# 1, 30, 200, 512
mt_hously_adapter_ffn_ho=[24.3 33.0 35.6 36.3 36.7]# 1, 30, 200, 512, 1024
mt_hously_adapter_ffn_ho=[33.0 35.6 36.3 36.7]# 1, 30, 200, 512, 1024
mt_lora_attn=[25.5 34.2 36.2 36.6]# 1, 30, 200, 400
# legends = ["BitFit (bias)", "PromptTuning (input)", "PrefixTuning (attn)", "Adapter (ffn)", "LoRA (attn)"]
# plot_ax(axes[0], [params_bitfit, params_prompt, params_pt, params_hously_adapter_ffn_ho, params_lora_attn],
# [xsum_bitfit, xsum_prompt, xsum_pt, xsum_hously_adapter_ffn_ho, xsum_lora_attn], legends, "ROUGE-2", full=21.94, ours=21.90,
# title="(a) abstractive text summarization", add_legend=False)
# plot_ax(axes[1], [params_bitfit, params_prompt, params_pt, params_hously_adapter_ffn_ho, params_lora_attn],
# [mt_bitfit, mt_prompt, mt_pt, mt_hously_adapter_ffn_ho, mt_lora_attn], legends, "BLEU", full=37.3, ours=37.5,
# title="(b) machine translation")
fig,ax=plt.subplots(1 1)<line_sep>legends=["BitFit" "PrefixTuning" "Adapter" "LoRA"]<line_sep>plot_ax(ax [params_bitfit params_pt params_hously_adapter_ffn_ho params_lora_attn] [xsum_bitfit xsum_pt xsum_hously_adapter_ffn_ho xsum_lora_attn] legends "XSum ROUGE-2" full=21.94 title=<none> add_legend=<false>)<line_sep>fig.set_size_inches(5 5)<line_sep>fig.savefig("xsum_overview.pdf" bbox_inches='tight')<line_sep>fig,ax=plt.subplots(1 1)<line_sep>plot_ax(ax [params_bitfit params_pt params_hously_adapter_ffn_ho params_lora_attn] [mt_bitfit mt_pt mt_hously_adapter_ffn_ho mt_lora_attn] legends "MT BLEU" full=37.3 title=<none>)<line_sep>fig.set_size_inches(5 5)<line_sep>fig.savefig("mt_overview.pdf" bbox_inches='tight')<block_end><def_stmt>plot_table4 <block_start>color_base=["blue" "red" "green" "tab:orange" "tab:cyan" "purple" ]<line_sep>markers=["o" "v" "s" "*" "D"]<line_sep>fig,ax=plt.subplots(1 1)<line_sep>ylabel="XSum ROUGE-2"<line_sep>params_pt=[3.6 9.2]<line_sep>params_lora=[7.2]<line_sep>params_adapter=[3.6 9.2]<line_sep>r2_pt=[20.46 20.40]<line_sep>r2_lora=[20.29]<line_sep>r2_adapter=[20.31 20.83]<line_sep>ffn_params_lora=[6.1]<line_sep>ffn_r2_lora=[21.31]<line_sep>ffn_params_adapter=[2.4 6.1 12.3]<line_sep>ffn_r2_adapter=[20.66 20.98 21.24]<line_sep>ax.plot(params_pt r2_pt c=color_base[0] marker=markers[0] ms=10 linewidth=2)<line_sep>ax.plot(params_adapter r2_adapter c=color_base[0] marker=markers[1] ms=10 linewidth=2)<line_sep>ax.plot(params_lora r2_lora c=color_base[0] marker=markers[2] ms=10 linewidth=2)<line_sep>ax.plot(ffn_params_adapter ffn_r2_adapter "--" c=color_base[1] marker=markers[1] ms=10 linewidth=2)<line_sep>ax.plot(ffn_params_lora ffn_r2_lora "--" c=color_base[1] marker=markers[2] ms=10 linewidth=2)<line_sep># legends = ["attn-PT", "attn-PA", "attn-LoRA", "ffn-PA",
# "ffn-LoRA"]
# ax.legend(legends, loc="lower right", fontsize=12)
ax.set(xlabel=r"Fine-tuned Parameters (\%)" ylabel=ylabel)<line_sep>ax.grid()<line_sep>ax.set_facecolor("white")<line_sep>fig.set_size_inches(5 3)<line_sep>fig.savefig("xsum_modification_position.pdf" bbox_inches='tight')<line_sep>fig,ax=plt.subplots(1 1)<line_sep>ylabel="MT BLEU"<line_sep>params_pt=[3.6 9.2]<line_sep>params_lora=[7.2]<line_sep>params_adapter=[3.6 9.2]<line_sep>bleu_pt=[35.6 35.1]<line_sep>bleu_lora=[36.2]<line_sep>bleu_adapter=[35.6 36.2]<line_sep>ffn_params_lora=[6.1]<line_sep>ffn_params_adapter=[2.4 6.1 12.3]<line_sep>ffn_bleu_lora=[36.5]<line_sep>ffn_bleu_adapter=[36.4 37.1 37.3]<line_sep>ax.plot(params_pt bleu_pt c=color_base[0] marker=markers[0] ms=10 linewidth=2)<line_sep>ax.plot(params_adapter bleu_adapter c=color_base[0] marker=markers[1] ms=10 linewidth=2)<line_sep>ax.plot(params_lora bleu_lora c=color_base[0] marker=markers[2] ms=10 linewidth=2)<line_sep>ax.plot(ffn_params_adapter ffn_bleu_adapter "--" c=color_base[1] marker=markers[1] ms=10 linewidth=2)<line_sep>ax.plot(ffn_params_lora ffn_bleu_lora "--" c=color_base[1] marker=markers[2] ms=10 linewidth=2)<line_sep># legends = ["attn-Prefix Tuning", "attn-Parallel Adapter", "attn-LoRA", "ffn-Parallel Adaptaer", "ffn-LoRA"]
# ax.legend(legends, loc="lower right", fontsize=12, bbox_to_anchor=(1.27, 0.005))
legends=["Prefix (attn)" "PA (attn)" "LoRA (attn)" "PA (ffn)" "LoRA (ffn)"]<line_sep>ax.legend(legends loc="lower right" fontsize=12 bbox_to_anchor=(1.11 0.00))<line_sep>ax.set(xlabel=r"Fine-tuned Parameters (\%)" ylabel=ylabel)<line_sep>ax.grid()<line_sep>ax.set_facecolor("white")<line_sep>fig.set_size_inches(5 3)<line_sep>fig.savefig("mt_modification_position.pdf" bbox_inches='tight')<block_end># plot_overview()
plot_intro()<line_sep># plot_table4()
|
<import_stmt>random<import_from_stmt>functools lru_cache<import_from_stmt>hypothesis core<class_stmt>Settings<block_start><def_stmt>__init__ self<arrow><none><block_start>self.seed=random.getrandbits(128)# type: int
self.unicode_enabled=<true># type: bool
self.enable_color=<true># type: bool
<block_end>@property<def_stmt>seed self<arrow>int<block_start><return>self._seed<block_end>@seed.setter<def_stmt>seed self value:int<arrow><none><block_start>self._seed=value<line_sep>core.global_force_seed=value# type: ignore
random.seed(value)<block_end><block_end>@lru_cache(maxsize=1)<def_stmt>get_settings <arrow>Settings<block_start><return>Settings()<block_end> |
# -*- coding: utf-8 -*-
# @File : model.py
# @Author : AaronJny
# @Time : 2019/12/25
# @Desc :
<import_from_stmt>bert4keras.models build_transformer_model<import_stmt>tensorflow<as>tf<import_from_stmt>dataset keep_words<import_stmt>settings<line_sep>model=build_transformer_model(settings.CONFIG_PATH settings.CHECKPOINT_PATH application='lm' keep_tokens=keep_words)<line_sep>model.summary()<line_sep># loss fun,交叉熵
# 输入的数据,从第二个字符开始,可以作为正确的目标结果(输入是没有经过one-hot编码的)
y_true=model.input[0][: 1:]<line_sep># 目标mask
y_mask=model.get_layer('Embedding-Token').output_mask[: 1:]<line_sep>y_mask=tf.cast(y_mask tf.float32)<line_sep># 预测结果,到倒数第二个(包括)时结束
y_pred=model.output[: :-1]<line_sep>cross_entropy=tf.keras.losses.sparse_categorical_crossentropy(y_true y_pred)<line_sep>cross_entropy=tf.reduce_sum(cross_entropy<times>y_mask)/tf.reduce_sum(y_mask)<line_sep>model.add_loss(cross_entropy)<line_sep>model.compile(tf.keras.optimizers.Adam(1e-5))<line_sep> |
<import_from_stmt>django.apps AppConfig<class_stmt>CollectorConfig(AppConfig)<block_start>name='collector'<block_end> |
<import_from_stmt>office365.runtime.client_object ClientObject<class_stmt>SocialRestActor(ClientObject)<block_start><pass><block_end> |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_from_stmt>oslo_db.sqlalchemy test_fixtures<import_from_stmt>oslo_db.sqlalchemy utils<as>db_utils<import_from_stmt>glance.tests.functional.db test_migrations<import_stmt>glance.tests.utils<as>test_utils<class_stmt>TestPikeExpand01Mixin(test_migrations.AlembicMigrationsMixin)<block_start>artifacts_table_names=['artifact_blob_locations' 'artifact_properties' 'artifact_blobs' 'artifact_dependencies' 'artifact_tags' 'artifacts']<def_stmt>_get_revisions self config<block_start><return>test_migrations.AlembicMigrationsMixin._get_revisions(self config head='pike_expand01')<block_end><def_stmt>_pre_upgrade_pike_expand01 self engine# verify presence of the artifacts tables
<block_start><for_stmt>table_name self.artifacts_table_names<block_start>table=db_utils.get_table(engine table_name)<line_sep>self.assertIsNotNone(table)<block_end><block_end><def_stmt>_check_pike_expand01 self engine data# should be no changes, so re-run pre-upgrade check
<block_start>self._pre_upgrade_pike_expand01(engine)<block_end><block_end><class_stmt>TestPikeExpand01MySQL(TestPikeExpand01Mixin test_fixtures.OpportunisticDBTestMixin test_utils.BaseTestCase )<block_start>FIXTURE=test_fixtures.MySQLOpportunisticFixture<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>statsmodels.multivariate.factor Factor<import_from_stmt>numpy.testing assert_allclose assert_equal<import_from_stmt>scipy.optimize approx_fprime<import_stmt>warnings<line_sep># A small model for basic testing
<def_stmt>_toy <block_start>uniq=np.r_[4 9 16]<line_sep>load=np.asarray([[3 1 2] [2 5 8]]).T<line_sep>par=np.r_[2 3 4 3 1 2 2 5 8]<line_sep>corr=np.asarray([[1 .5 .25] [.5 1 .5] [.25 .5 1]])<line_sep><return>uniq load corr par<block_end><def_stmt>test_loglike <block_start>uniq,load,corr,par=_toy()<line_sep>fa=Factor(n_factor=2 corr=corr)<line_sep># Two ways of passing the parameters to loglike
ll1=fa.loglike((load uniq))<line_sep>ll2=fa.loglike(par)<line_sep>assert_allclose(ll1 ll2)<block_end><def_stmt>test_score <block_start>uniq,load,corr,par=_toy()<line_sep>fa=Factor(n_factor=2 corr=corr)<def_stmt>f par<block_start><return>fa.loglike(par)<block_end>par2=np.r_[0.1 0.2 0.3 0.4 0.3 0.1 0.2 -0.2 0 0.8 0.5 0]<for_stmt>pt (par par2)<block_start>g1=approx_fprime(pt f 1e-8)<line_sep>g2=fa.score(pt)<line_sep>assert_allclose(g1 g2 atol=1e-3)<block_end><block_end><def_stmt>test_exact # Test if we can recover exact factor-structured matrices with
# default starting values.
<block_start>np.random.seed(23324)<line_sep># Works for larger k_var but slow for routine testing.
<for_stmt>k_var 5 10 25<block_start><for_stmt>n_factor 1 2 3<block_start>load=np.random.normal(size=(k_var n_factor))<line_sep>uniq=np.linspace(1 2 k_var)<line_sep>c=np.dot(load load.T)<line_sep>c.flat[::c.shape[0]+1]<augadd>uniq<line_sep>s=np.sqrt(np.diag(c))<line_sep>c<augdiv>np.outer(s s)<line_sep>fa=Factor(corr=c n_factor=n_factor method='ml')<line_sep>rslt=fa.fit()<line_sep>assert_allclose(rslt.fitted_cov c rtol=1e-4 atol=1e-4)<line_sep>rslt.summary()<block_end><block_end><block_end># smoke test
<def_stmt>test_exact_em # Test if we can recover exact factor-structured matrices with
# default starting values using the EM algorithm.
<block_start>np.random.seed(23324)<line_sep># Works for larger k_var but slow for routine testing.
<for_stmt>k_var 5 10 25<block_start><for_stmt>n_factor 1 2 3<block_start>load=np.random.normal(size=(k_var n_factor))<line_sep>uniq=np.linspace(1 2 k_var)<line_sep>c=np.dot(load load.T)<line_sep>c.flat[::c.shape[0]+1]<augadd>uniq<line_sep>s=np.sqrt(np.diag(c))<line_sep>c<augdiv>np.outer(s s)<line_sep>fa=Factor(corr=c n_factor=n_factor method='ml')<line_sep>load_e,uniq_e=fa._fit_ml_em(2000)<line_sep>c_e=np.dot(load_e load_e.T)<line_sep>c_e.flat[::c_e.shape[0]+1]<augadd>uniq_e<line_sep>assert_allclose(c_e c rtol=1e-4 atol=1e-4)<block_end><block_end><block_end><def_stmt>test_fit_ml_em_random_state # Ensure Factor._fit_ml_em doesn't change numpy's singleton random state
# see #7357
<block_start>T=10<line_sep>epsilon=np.random.multivariate_normal(np.zeros(3) np.eye(3) size=T).T<line_sep>initial=np.random.get_state()<with_stmt>warnings.catch_warnings()<block_start>warnings.filterwarnings("ignore" message='Fitting did not converge')<line_sep>Factor(endog=epsilon n_factor=2 method='ml').fit()<block_end>final=np.random.get_state()<assert_stmt>(initial[0]<eq>final[0])<line_sep>assert_equal(initial[1] final[1])<assert_stmt>(initial[2:]<eq>final[2:])<block_end><def_stmt>test_em <block_start>n_factor=1<line_sep>cor=np.asarray([[1 0.5 0.3] [0.5 1 0] [0.3 0 1]])<line_sep>fa=Factor(corr=cor n_factor=n_factor method='ml')<line_sep>rslt=fa.fit(opt={'gtol':1e-3})<line_sep>load_opt=rslt.loadings<line_sep>uniq_opt=rslt.uniqueness<line_sep>load_em,uniq_em=fa._fit_ml_em(1000)<line_sep>cc=np.dot(load_em load_em.T)<line_sep>cc.flat[::cc.shape[0]+1]<augadd>uniq_em<line_sep>assert_allclose(cc rslt.fitted_cov rtol=1e-2 atol=1e-2)<block_end><def_stmt>test_1factor <block_start>"""
# R code:
r = 0.4
p = 4
ii = seq(0, p-1)
ii = outer(ii, ii, "-")
ii = abs(ii)
cm = r^ii
fa = factanal(covmat=cm, factors=1)
print(fa, digits=10)
"""<line_sep>r=0.4<line_sep>p=4<line_sep>ii=np.arange(p)<line_sep>cm=r<power>np.abs(np.subtract.outer(ii ii))<line_sep>fa=Factor(corr=cm n_factor=1 method='ml')<line_sep>rslt=fa.fit()<if_stmt>rslt.loadings[0 0]<l>0<block_start>rslt.loadings[: 0]<augmul>-1<block_end># R solution, but our likelihood is higher
# uniq = np.r_[0.8392472054, 0.5820958187, 0.5820958187, 0.8392472054]
# load = np.asarray([[0.4009399224, 0.6464550935, 0.6464550935,
# 0.4009399224]]).T
# l1 = fa.loglike(fa._pack(load, uniq))
# l2 = fa.loglike(fa._pack(rslt.loadings, rslt.uniqueness))
# So use a smoke test
uniq=np.r_[0.85290232 0.60916033 0.55382266 0.82610666]<line_sep>load=np.asarray([[0.38353316] [0.62517171] [0.66796508] [0.4170052]])<line_sep>assert_allclose(load rslt.loadings rtol=1e-3 atol=1e-3)<line_sep>assert_allclose(uniq rslt.uniqueness rtol=1e-3 atol=1e-3)<line_sep>assert_equal(rslt.df 2)<block_end><def_stmt>test_2factor <block_start>"""
# R code:
r = 0.4
p = 6
ii = seq(0, p-1)
ii = outer(ii, ii, "-")
ii = abs(ii)
cm = r^ii
factanal(covmat=cm, factors=2)
"""<line_sep>r=0.4<line_sep>p=6<line_sep>ii=np.arange(p)<line_sep>cm=r<power>np.abs(np.subtract.outer(ii ii))<line_sep>fa=Factor(corr=cm n_factor=2 nobs=100 method='ml')<line_sep>rslt=fa.fit()<for_stmt>j 0 1<block_start><if_stmt>rslt.loadings[0 j]<l>0<block_start>rslt.loadings[: j]<augmul>-1<block_end><block_end>uniq=np.r_[0.782 0.367 0.696 0.696 0.367 0.782]<line_sep>assert_allclose(uniq rslt.uniqueness rtol=1e-3 atol=1e-3)<line_sep>loads=[np.r_[0.323 0.586 0.519 0.519 0.586 0.323] np.r_[0.337 0.538 0.187 -0.187 -0.538 -0.337]]<for_stmt>k 0 1<block_start><if_stmt>np.dot(loads[k] rslt.loadings[: k])<l>0<block_start>loads[k]<augmul>-1<block_end>assert_allclose(loads[k] rslt.loadings[: k] rtol=1e-3 atol=1e-3)<block_end>assert_equal(rslt.df 4)<line_sep># Smoke test for standard errors
e=np.asarray([0.11056836 0.05191071 0.09836349 0.09836349 0.05191071 0.11056836])<line_sep>assert_allclose(rslt.uniq_stderr e atol=1e-4)<line_sep>e=np.asarray([[0.08842151 0.08842151] [0.06058582 0.06058582] [0.08339874 0.08339874] [0.08339874 0.08339874] [0.06058582 0.06058582] [0.08842151 0.08842151]])<line_sep>assert_allclose(rslt.load_stderr e atol=1e-4)<block_end> |
<import_stmt>os<import_stmt>numpy<as>np<import_stmt>logging<import_from_stmt>..base float_ int_<import_from_stmt>.util dataset_home download checksum archive_extract checkpoint<line_sep>log=logging.getLogger(__name__)<line_sep>_URL='http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz'<line_sep>_SHA1='b22ebbd7f3c4384ebc9ba3152939186d3750b902'<class_stmt>STL10(object)<block_start>'''
The STL-10 dataset [1]
http://cs.stanford.edu/~acoates/stl10
References:
[1]: An Analysis of Single Layer Networks in Unsupervised Feature Learning,
<NAME>, <NAME>, <NAME>, AISTATS, 2011.
'''<def_stmt>__init__ self<block_start>self.name='stl10'<line_sep>self.n_classes=10<line_sep>self.n_train=5000<line_sep>self.n_test=8000<line_sep>self.n_unlabeled=100000<line_sep>self.img_shape=(3 96 96)<line_sep>self.data_dir=os.path.join(dataset_home self.name)<line_sep>self._npz_path=os.path.join(self.data_dir 'stl10.npz')<line_sep>self._install()<line_sep>self._arrays,self.folds=self._load()<block_end><def_stmt>arrays self dp_dtypes=<false><block_start>x_train,y_train,x_test,y_test,x_unlabeled=self._arrays<if_stmt>dp_dtypes<block_start>x_train=x_train.astype(float_)<line_sep>y_train=y_train.astype(int_)<line_sep>x_test=x_test.astype(float_)<line_sep>y_test=y_test.astype(int_)<line_sep>x_unlabeled=x_unlabeled.astype(float_)<block_end><return>x_train y_train x_test y_test x_unlabeled<block_end><def_stmt>_install self<block_start>checkpoint_file=os.path.join(self.data_dir '__install_check')<with_stmt>checkpoint(checkpoint_file)<as>exists<block_start><if_stmt>exists<block_start><return><block_end>log.info('Downloading %s' _URL)<line_sep>filepath=download(_URL self.data_dir)<if_stmt>_SHA1<ne>checksum(filepath method='sha1')<block_start><raise>RuntimeError('Checksum mismatch for %s.'%_URL)<block_end>log.info('Unpacking %s' filepath)<line_sep>archive_extract(filepath self.data_dir)<line_sep>unpack_dir=os.path.join(self.data_dir 'stl10_binary')<line_sep>log.info('Converting data to Numpy arrays')<line_sep>filenames=['train_X.bin' 'train_y.bin' 'test_X.bin' 'test_y.bin' 'unlabeled_X.bin']<def_stmt>bin2numpy filepath<block_start><with_stmt>open(filepath 'rb')<as>f<block_start>arr=np.fromfile(f dtype=np.uint8)<if_stmt>'_X'<in>filepath<block_start>arr=np.reshape(arr (-1 )+self.img_shape)<block_end><return>arr<block_end><block_end>filepaths=[os.path.join(unpack_dir f)<for>f filenames]<line_sep>x_train,y_train,x_test,y_test,x_unlabeled=map(bin2numpy filepaths)<line_sep>folds=[]<with_stmt>open(os.path.join(unpack_dir 'fold_indices.txt') 'r')<as>f<block_start><for_stmt>line f<block_start>folds.append([int(s)<for>s line.strip().split(' ')])<block_end><block_end>folds=np.array(folds)<with_stmt>open(self._npz_path 'wb')<as>f<block_start>np.savez(f x_train=x_train y_train=y_train x_test=x_test y_test=y_test x_unlabeled=x_unlabeled folds=folds)<block_end><block_end><block_end><def_stmt>_load self<block_start><with_stmt>open(self._npz_path 'rb')<as>f<block_start>dic=np.load(f)<line_sep><return>((dic['x_train'] dic['y_train'] dic['x_test'] dic['y_test'] dic['x_unlabeled']) dic['folds'])<block_end><block_end><block_end> |
<import_stmt>data_utils<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>math random itertools<import_stmt>pickle<import_stmt>time<import_stmt>json<import_stmt>os<import_stmt>math<import_stmt>data_utils<import_stmt>pickle<import_from_stmt>sklearn.ensemble RandomForestClassifier<import_from_stmt>sklearn.metrics accuracy_score precision_score recall_score roc_curve auc precision_recall_curve<import_stmt>copy<import_from_stmt>scipy.stats sem<line_sep>print("Starting TSTR experiment.")<line_sep>print("loading data...")<line_sep>samples,labels=data_utils.eICU_task()<line_sep>train_seqs=samples['train'].reshape(-1 16 4)<line_sep>vali_seqs=samples['vali'].reshape(-1 16 4)<line_sep>test_seqs=samples['test'].reshape(-1 16 4)<line_sep>train_targets=labels['train']<line_sep>vali_targets=labels['vali']<line_sep>test_targets=labels['test']<line_sep>train_seqs,vali_seqs,test_seqs=data_utils.scale_data(train_seqs vali_seqs test_seqs)<line_sep>print("data loaded.")<line_sep># iterate over all dataset versions generated after running the GAN for 5 times
aurocs_all_runs=[]<line_sep>auprcs_all_runs=[]<for_stmt>oo range(5)<block_start>print(oo)<line_sep># find the best "dataset epoch", meaning the GAN epoch that generated the dataset
# validation is only done in some of the tasks, and the others are considered unknown
# (use validation set to pick best GAN epoch, then get result on test set)
vali_seqs_r=vali_seqs.reshape((vali_seqs.shape[0] -1))<line_sep>test_seqs_r=test_seqs.reshape((test_seqs.shape[0] -1))<line_sep>all_aurocs_exp=[]<line_sep>all_auprcs_exp=[]<for_stmt>nn np.arange(50 1050 50)<block_start><with_stmt>open('./synthetic_eICU_datasets/samples_eICU_cdgan_synthetic_dataset_r'+str(oo)+'_'+str(nn)+'.pk' 'rb')<as>f<block_start>synth_data=pickle.load(file=f)<block_end><with_stmt>open('./synthetic_eICU_datasets/labels_eICU_cdgan_synthetic_dataset_r'+str(oo)+'_'+str(nn)+'.pk' 'rb')<as>f<block_start>synth_labels=pickle.load(file=f)<block_end>train_seqs=synth_data<line_sep>train_targets=synth_labels<line_sep>train_seqs_r=train_seqs.reshape((train_seqs.shape[0] -1))<line_sep>all_aurocs=[]<line_sep>all_auprcs=[]<line_sep># in case we want to train each random forest multiple times with each dataset
<for_stmt>exp_num range(1)<block_start>accuracies=[]<line_sep>precisions=[]<line_sep>recalls=[]<line_sep>aurocs=[]<line_sep>auprcs=[]<for_stmt>col_num range(train_targets.shape[1])<block_start>estimator=RandomForestClassifier(n_estimators=100)<line_sep>estimator.fit(train_seqs_r train_targets[: col_num])<line_sep>accuracies.append(estimator.score(vali_seqs_r vali_targets[: col_num]))<line_sep>preds=estimator.predict(vali_seqs_r)<line_sep>precisions.append(precision_score(y_pred=preds y_true=vali_targets[: col_num]))<line_sep>recalls.append(recall_score(y_pred=preds y_true=vali_targets[: col_num]))<line_sep>preds=estimator.predict_proba(vali_seqs_r)<line_sep>fpr,tpr,thresholds=roc_curve(vali_targets[: col_num] preds[: 1])<line_sep>aurocs.append(auc(fpr tpr))<line_sep>precision,recall,thresholds=precision_recall_curve(vali_targets[: col_num] preds[: 1])<line_sep>auprcs.append(auc(recall precision))<block_end>all_aurocs.append(aurocs)<line_sep>all_auprcs.append(auprcs)<block_end>all_aurocs_exp.append(all_aurocs)<line_sep>all_auprcs_exp.append(all_auprcs)<block_end>#with open('all_aurocs_exp_r' + str(oo) + '.pk', 'wb') as f:
# pickle.dump(file=f, obj=all_aurocs_exp)
#with open('all_auprcs_exp_r' + str(oo) + '.pk', 'wb') as f:
# pickle.dump(file=f, obj=all_auprcs_exp)
best_idx=np.argmax(np.array(all_aurocs_exp).sum(axis=1)[: [0 2 4]].sum(axis=1)+np.array(all_auprcs_exp).sum(axis=1)[: [0 2 4]].sum(axis=1))<line_sep>best=np.arange(50 1050 50)[best_idx]<with_stmt>open('./synthetic_eICU_datasets/samples_eICU_cdgan_synthetic_dataset_r'+str(oo)+'_'+str(best)+'.pk' 'rb')<as>f<block_start>synth_data=pickle.load(file=f)<block_end><with_stmt>open('./synthetic_eICU_datasets/labels_eICU_cdgan_synthetic_dataset_r'+str(oo)+'_'+str(best)+'.pk' 'rb')<as>f<block_start>synth_labels=pickle.load(file=f)<block_end>train_seqs=synth_data<line_sep>train_targets=synth_labels<line_sep>train_seqs_r=train_seqs.reshape((train_seqs.shape[0] -1))<line_sep>accuracies=[]<line_sep>precisions=[]<line_sep>recalls=[]<line_sep>aurocs=[]<line_sep>auprcs=[]<for_stmt>col_num range(train_targets.shape[1])<block_start>estimator=RandomForestClassifier(n_estimators=100)<line_sep>estimator.fit(train_seqs_r train_targets[: col_num])<line_sep>accuracies.append(estimator.score(test_seqs_r test_targets[: col_num]))<line_sep>preds=estimator.predict(test_seqs_r)<line_sep>precisions.append(precision_score(y_pred=preds y_true=test_targets[: col_num]))<line_sep>recalls.append(recall_score(y_pred=preds y_true=test_targets[: col_num]))<line_sep>preds=estimator.predict_proba(test_seqs_r)<line_sep>fpr,tpr,thresholds=roc_curve(test_targets[: col_num] preds[: 1])<line_sep>aurocs.append(auc(fpr tpr))<line_sep>precision,recall,thresholds=precision_recall_curve(test_targets[: col_num] preds[: 1])<line_sep>auprcs.append(auc(recall precision))<block_end>print(accuracies)<line_sep>print(precisions)<line_sep>print(recalls)<line_sep>print(aurocs)<line_sep>print(auprcs)<line_sep>print("----------------------------")<line_sep>aurocs_all_runs.append(aurocs)<line_sep>auprcs_all_runs.append(auprcs)<block_end>allr=np.vstack(aurocs_all_runs)<line_sep>allp=np.vstack(auprcs_all_runs)<line_sep>tstr_aurocs_mean=allr.mean(axis=0)<line_sep>tstr_aurocs_sem=sem(allr axis=0)<line_sep>tstr_auprcs_mean=allp.mean(axis=0)<line_sep>tstr_auprcs_sem=sem(allp axis=0)<line_sep># get AUROC/AUPRC for real, random data
print("Experiment with real data.")<line_sep>print("loading data...")<line_sep>samples,labels=data_utils.eICU_task()<line_sep>train_seqs=samples['train'].reshape(-1 16 4)<line_sep>vali_seqs=samples['vali'].reshape(-1 16 4)<line_sep>test_seqs=samples['test'].reshape(-1 16 4)<line_sep>train_targets=labels['train']<line_sep>vali_targets=labels['vali']<line_sep>test_targets=labels['test']<line_sep>train_seqs,vali_seqs,test_seqs=data_utils.scale_data(train_seqs vali_seqs test_seqs)<line_sep>print("data loaded.")<line_sep>train_seqs_r=train_seqs.reshape((train_seqs.shape[0] -1))<line_sep>vali_seqs_r=vali_seqs.reshape((vali_seqs.shape[0] -1))<line_sep>test_seqs_r=test_seqs.reshape((test_seqs.shape[0] -1))<line_sep>aurocs_all=[]<line_sep>auprcs_all=[]<for_stmt>i range(5)<block_start>accuracies=[]<line_sep>precisions=[]<line_sep>recalls=[]<line_sep>aurocs=[]<line_sep>auprcs=[]<for_stmt>col_num range(train_targets.shape[1])<block_start>estimator=RandomForestClassifier(n_estimators=100)<line_sep>estimator.fit(train_seqs_r train_targets[: col_num])<line_sep>accuracies.append(estimator.score(test_seqs_r test_targets[: col_num]))<line_sep>preds=estimator.predict(test_seqs_r)<line_sep>precisions.append(precision_score(y_pred=preds y_true=test_targets[: col_num]))<line_sep>recalls.append(recall_score(y_pred=preds y_true=test_targets[: col_num]))<line_sep>preds=estimator.predict_proba(test_seqs_r)<line_sep>fpr,tpr,thresholds=roc_curve(test_targets[: col_num] preds[: 1])<line_sep>aurocs.append(auc(fpr tpr))<line_sep>precision,recall,thresholds=precision_recall_curve(test_targets[: col_num] preds[: 1])<line_sep>auprcs.append(auc(recall precision))<block_end>print(accuracies)<line_sep>print(precisions)<line_sep>print(recalls)<line_sep>print(aurocs)<line_sep>print(auprcs)<line_sep>aurocs_all.append(aurocs)<line_sep>auprcs_all.append(auprcs)<block_end>real_aurocs_mean=np.array(aurocs_all).mean(axis=0)<line_sep>real_aurocs_sem=sem(aurocs_all axis=0)<line_sep>real_auprcs_mean=np.array(auprcs_all).mean(axis=0)<line_sep>real_auprcs_sem=sem(auprcs_all axis=0)<line_sep>print("Experiment with random predictions.")<line_sep>#random score
test_targets_random=copy.deepcopy(test_targets)<line_sep>random.shuffle(test_targets_random)<line_sep>accuracies=[]<line_sep>precisions=[]<line_sep>recalls=[]<line_sep>aurocs=[]<line_sep>auprcs=[]<for_stmt>col_num range(train_targets.shape[1])<block_start>accuracies.append(accuracy_score(y_pred=test_targets_random[: col_num] y_true=test_targets[: col_num]))<line_sep>precisions.append(precision_score(y_pred=test_targets_random[: col_num] y_true=test_targets[: col_num]))<line_sep>recalls.append(recall_score(y_pred=test_targets_random[: col_num] y_true=test_targets[: col_num]))<line_sep>preds=np.random.rand(len(test_targets[: col_num]))<line_sep>fpr,tpr,thresholds=roc_curve(test_targets[: col_num] preds)<line_sep>aurocs.append(auc(fpr tpr))<line_sep>precision,recall,thresholds=precision_recall_curve(test_targets[: col_num] preds)<line_sep>auprcs.append(auc(recall precision))<block_end>print(accuracies)<line_sep>print(precisions)<line_sep>print(recalls)<line_sep>print(aurocs)<line_sep>print(auprcs)<line_sep>random_aurocs=aurocs<line_sep>random_auprcs=auprcs<line_sep>print("Results")<line_sep>print("------------")<line_sep>print("------------")<line_sep>print("TSTR")<line_sep>print(tstr_aurocs_mean)<line_sep>print(tstr_aurocs_sem)<line_sep>print(tstr_auprcs_mean)<line_sep>print(tstr_auprcs_sem)<line_sep>print("------------")<line_sep>print("Real")<line_sep>print(real_aurocs_mean)<line_sep>print(real_aurocs_sem)<line_sep>print(real_auprcs_mean)<line_sep>print(real_auprcs_sem)<line_sep>print("------------")<line_sep>print("Random")<line_sep>print(random_aurocs)<line_sep>print(random_auprcs)<line_sep> |
<import_from_stmt>chalice Blueprint<import_from_stmt>chalice Cron<import_from_stmt>chalicelib _overrides<line_sep>app=Blueprint(__name__)<line_sep>_overrides.chalice_app(app)<line_sep> |
<import_stmt>config<import_stmt>constants<as>c<line_sep>WEBSITE_URL=config.KIBANA_URL<line_sep>ADMIN_EMAIL=config.ADMIN_EMAIL<line_sep>USER_EMAIL=config.USER_EMAIL<line_sep>EMAIL_CHARSET='UTF-8'<line_sep>HEADER="<html>"<line_sep>FOOTER="</html>"<line_sep>EXPERIMENT_STATUS_EMAIL_TEMPLATE="""
<p>Hello,</p>
<p>Your experiment has ended.</p>
<p><b>Name:</b> %s</p>
<p><b>Status:</b> %s</p>
<p><b>Status Msg:</b> %s</p>
<p><a href="%s">View Dashboard</a></p>
<p><b>Experiment Results:</b></p>
<p>%s</p>
<p><b>Experiment Config:</b></p>
<p>%s</p>
<p><b>Thanks,<br>
Team</p>
"""<line_sep>EXPERIMENT_STATUS_EMAIL_BODY=(HEADER+EXPERIMENT_STATUS_EMAIL_TEMPLATE+FOOTER)<line_sep>EXPERIMENT_STATUS_EMAIL={'subject':'New Experiment Results' 'body':EXPERIMENT_STATUS_EMAIL_BODY}<line_sep> |
# Author: <NAME>
# Datetime:2021/7/3
# Copyright belongs to the author.
# Please indicate the source for reprinting.
<import_stmt>platform<import_stmt>os<import_from_stmt>distutils.sysconfig get_python_lib<import_from_stmt>qpt.kernel.qlog Logging<def_stmt>init_wrapper var=<true><block_start><def_stmt>i_wrapper func<block_start><if_stmt>var<block_start>@property<def_stmt>render self<block_start><if_stmt>func.__name__<in>self.memory<block_start>out=self.memory[func.__name__]<block_end><else_stmt><block_start>out=func(self)<line_sep>self.memory[func.__name__]=out<block_end><return>out<block_end><block_end><else_stmt><block_start><def_stmt>render self *args **kwargs<block_start><if_stmt>func.__name__<in>self.memory<block_start>out=self.memory[func.__name__]<block_end><else_stmt><block_start>out=func(self *args **kwargs)<line_sep>self.memory[func.__name__]=out<block_end><return>out<block_end><block_end><return>render<block_end><return>i_wrapper<block_end><class_stmt>QPTMemory<block_start><def_stmt>__init__ self<block_start>self.memory=dict()<block_end><def_stmt>set_mem self name variable<block_start>self.memory[name]=variable<line_sep><return>variable<block_end><def_stmt>free_mem self name<block_start>self.memory.pop(name)<block_end>@init_wrapper()<def_stmt>platform_bit self<block_start>arc=platform.machine()<line_sep>Logging.debug(f"操作系统位数:{arc}")<line_sep><return>arc<block_end>@init_wrapper()<def_stmt>platform_os self<block_start>p_os=platform.system()<line_sep>Logging.debug(f"操作系统类型:{p_os}")<line_sep><return>p_os<block_end>@init_wrapper()<def_stmt>site_packages_path self<block_start>site_package_path=os.path.abspath(get_python_lib())<line_sep><return>site_package_path<block_end>@init_wrapper()<def_stmt>pip_tool self<block_start><import_from_stmt>qpt.kernel.qinterpreter PipTools<line_sep>pip_tools=PipTools()<line_sep><return>pip_tools<block_end>@init_wrapper()<def_stmt>get_win32con self<block_start><import_stmt>win32con<line_sep><return>win32con<block_end>@init_wrapper()<def_stmt>get_win32api self<block_start><import_stmt>win32api<line_sep><return>win32api<block_end>@init_wrapper(var=<false>)<def_stmt>get_env_vars self work_dir="."<block_start><return>get_env_vars(work_dir)<block_end><block_end>QPT_MEMORY=QPTMemory()<def_stmt>check_bit <block_start>arc=QPT_MEMORY.platform_bit<assert_stmt>"64"<in>arc "当前QPT不支持32位操作系统"<block_end><def_stmt>check_os <block_start>p_os=QPT_MEMORY.platform_os<assert_stmt>"Windows"<in>p_os "当前QPT只支持Windows系统"<block_end>IGNORE_ENV_FIELD=["conda" "Conda" "Python" "python"]<def_stmt>get_env_vars work_dir="."<block_start>"""
获取当前待设置的环境变量字典
:param work_dir:
:return: dict
"""<line_sep>env_vars=dict()<line_sep># Set PATH ENV
path_env=os.environ.get("PATH").split(";")<line_sep>pre_add_env=os.path.abspath("./Python/Lib/site-packages")+";"+os.path.abspath("./Python/Lib")+";"+os.path.abspath("./Python/Lib/ext")+";"+os.path.abspath("./Python")+";"+os.path.abspath("./Python/Scripts")+";"<for_stmt>pe path_env<block_start><if_stmt>pe<block_start>add_flag=<true><for_stmt>ief IGNORE_ENV_FIELD<block_start><if_stmt>ief<in>pe<block_start>add_flag=<false><line_sep><break><block_end><block_end><if_stmt>add_flag<block_start>pre_add_env<augadd>pe+";"<block_end><block_end><block_end>env_vars["PATH"]=pre_add_env+"%SYSTEMROOT%/System32/WindowsPowerShell/v1.0;"+"C:/Windows/System32/WindowsPowerShell/v1.0;"+"%ProgramFiles%/WindowsPowerShell/Modules;"+"%SystemRoot%/system32/WindowsPowerShell/v1.0/Modules;"+f"{os.path.join(os.path.abspath(work_dir) 'opt/CUDA')};"<line_sep># Set PYTHON PATH ENV
env_vars["PYTHONPATH"]=os.path.abspath("./Python/Lib/site-packages")+";"+work_dir+";"+os.path.abspath("./Python")<line_sep>os_env=os.environ.copy()<line_sep>os_env.update(env_vars)<if_stmt>QPT_MODE<and>QPT_MODE.lower()<eq>"debug"<block_start>Logging.debug(msg="Python所识别到的环境变量如下:\n"+"".join([_ek+":"+_e_v+" \n"<for>_ek,_ev env_vars.items()<for>_e_v _ev.split(";")]))<block_end><return>os_env<block_end>PYTHON_IGNORE_DIRS=[".idea" ".git" ".github" "venv"]<line_sep># 被忽略的Python包
IGNORE_PACKAGES=["virtualenv" "pip" "setuptools" "cpython"]<line_sep># QPT运行状态 Run/Debug
QPT_MODE=os.getenv("QPT_MODE")<line_sep># QPT检测到的运行状态 Run/本地Run - 目的给予开发者警告,避免软件包膨胀
QPT_RUN_MODE=<none><class_stmt>CheckRun<block_start>@staticmethod<def_stmt>make_run_file configs_path<block_start><with_stmt>open(os.path.join(configs_path "run_act.lock") "w")<as>f<block_start>f.write("Run Done")<block_end><block_end>@staticmethod<def_stmt>check_run_file configs_path<block_start><global>QPT_RUN_MODE<if_stmt>QPT_RUN_MODE<is><none><block_start>QPT_RUN_MODE=os.path.exists(os.path.join(configs_path "run_act.lock"))<block_end><return>QPT_RUN_MODE<block_end><block_end><def_stmt>check_all # 检查系统
<block_start>check_os()<line_sep># 检查arc
check_bit()<block_end>check_all()<line_sep> |
<import_stmt>io<import_stmt>json<import_from_stmt>copy deepcopy<import_stmt>GetAwayUsers<import_stmt>demistomock<as>demisto<def_stmt>util_load_json path<block_start><with_stmt>io.open(path mode='r' encoding='utf-8')<as>f<block_start><return>json.loads(f.read())<block_end><block_end>away_user_data=util_load_json('test_data/away_user.json')<def_stmt>test_script_valid mocker<block_start>"""
Given:
When:
- Calling to GetAwayUsers Script.
Then:
- Ensure expected outputs are returned.
"""<import_from_stmt>GetAwayUsers main<line_sep>return_results_mock=mocker.patch.object(GetAwayUsers 'return_results')<line_sep>away_user=away_user_data<line_sep>not_away_user=deepcopy(away_user_data)<line_sep>not_away_user['isAway']=<false><line_sep>mocker.patch.object(demisto 'executeCommand' return_value=[{'Type':'1' 'Contents':[away_user not_away_user]}])<line_sep>main()<line_sep>command_results=return_results_mock.call_args[0][0]<assert_stmt>command_results.outputs<eq>[{'email':'' 'id':'admin' 'name':'Admin' 'phone':'+650-123456' 'roles':{'demisto':['Administrator']} 'username':'admin'}]<block_end><def_stmt>test_script_invalid mocker<block_start>"""
Given:
When:
- Calling to GetAwayUsers Script. Error during the demisto.executeCommand to getUsers.
Then:
- Ensure error is returned.
"""<import_from_stmt>GetAwayUsers main<line_sep>error_entry_type:int=4<line_sep>mocker.patch.object(GetAwayUsers 'return_error')<line_sep>mocker.patch.object(demisto 'error')<line_sep>away_user=away_user_data<line_sep>not_away_user=deepcopy(away_user_data)<line_sep>not_away_user['isAway']=<false><line_sep>mocker.patch.object(demisto 'executeCommand' return_value=[{'Type':error_entry_type 'Contents':[away_user not_away_user]}])<line_sep>main()<assert_stmt>GetAwayUsers.return_error.called<block_end> |
<import_stmt>os<import_from_stmt>datasets.types.data_split DataSplit<import_from_stmt>datasets.SOT.constructor.base_interface SingleObjectTrackingDatasetConstructor<import_stmt>numpy<as>np<def_stmt>construct_TrackingNet constructor:SingleObjectTrackingDatasetConstructor seed<block_start>root_path=seed.root_path<line_sep>data_type=seed.data_split<line_sep>enable_set_ids=seed.enable_set_ids<line_sep>sequence_name_class_map_file_path=seed.sequence_name_class_map_file_path<if_stmt>data_type<ne>DataSplit.Training<and>enable_set_ids<is><not><none><block_start><raise>Exception("unsupported configuration")<block_end>sequence_name_class_map={}<if_stmt>sequence_name_class_map_file_path<is><none><block_start>sequence_name_class_map_file_path=os.path.join(os.path.dirname(__file__) 'data_specs' 'trackingnet_sequence_classes_map.txt')<block_end><for_stmt>line open(sequence_name_class_map_file_path 'r' encoding='utf-8')<block_start>line=line.strip()<line_sep>name,category=line.split('\t')<line_sep>sequence_name_class_map[name]=category<block_end>categories=set(sequence_name_class_map.values())<line_sep>category_id_name_map={i:v<for>i,v enumerate(categories)}<line_sep>category_name_id_map={v:i<for>i,v enumerate(categories)}<if_stmt>enable_set_ids<is><not><none><block_start>trackingNetSubsets=['TRAIN_{}'.format(v)<for>v enable_set_ids]<block_end><else_stmt><block_start>trackingNetSubsets=[]<if_stmt>data_type&DataSplit.Training<block_start>trackingNetSubsets=['TRAIN_{}'.format(v)<for>v range(12)]<block_end><if_stmt>data_type&DataSplit.Testing<block_start>trackingNetSubsets.append('TEST')<block_end><block_end>sequence_list=[]<for_stmt>subset trackingNetSubsets<block_start>subset_path=os.path.join(root_path subset)<line_sep>frames_path=os.path.join(subset_path 'frames')<line_sep>anno_path=os.path.join(subset_path 'anno')<line_sep>bounding_box_annotation_files=os.listdir(anno_path)<line_sep>bounding_box_annotation_files=[bounding_box_annotation_file<for>bounding_box_annotation_file bounding_box_annotation_files<if>bounding_box_annotation_file.endswith('.txt')]<line_sep>bounding_box_annotation_files.sort()<line_sep>sequences=[sequence[:-4]<for>sequence bounding_box_annotation_files]<for_stmt>sequence,bounding_box_annotation_file zip(sequences bounding_box_annotation_files)<block_start>sequence_image_path=os.path.join(frames_path sequence)<line_sep>bounding_box_annotation_file_path=os.path.join(anno_path bounding_box_annotation_file)<line_sep>sequence_list.append((sequence sequence_image_path bounding_box_annotation_file_path))<block_end><block_end>constructor.set_category_id_name_map(category_id_name_map)<line_sep>constructor.set_total_number_of_sequences(len(sequence_list))<for_stmt>sequence,sequence_image_path,sequence_bounding_box_annotation_file_path sequence_list<block_start><with_stmt>constructor.new_sequence(category_name_id_map[sequence_name_class_map[sequence]])<as>sequence_constructor<block_start>sequence_constructor.set_name(sequence)<line_sep>bounding_boxes=np.loadtxt(sequence_bounding_box_annotation_file_path dtype=np.float delimiter=',')<line_sep>images=os.listdir(sequence_image_path)<line_sep>images=[image<for>image images<if>image.endswith('.jpg')]<if_stmt>bounding_boxes.ndim<eq>2<block_start>is_testing_sequence=<false><assert_stmt>len(images)<eq>len(bounding_boxes)<block_end><else_stmt><block_start>is_testing_sequence=<true><assert_stmt>bounding_boxes.ndim<eq>1<and>bounding_boxes.shape[0]<eq>4<block_end><for_stmt>i range(len(images))<block_start>image_file_name='{}.jpg'.format(i)<line_sep>image_file_path=os.path.join(sequence_image_path image_file_name)<with_stmt>sequence_constructor.new_frame()<as>frame_constructor<block_start>frame_constructor.set_path(image_file_path)<if_stmt>is_testing_sequence<block_start><if_stmt>i<eq>0<block_start>frame_constructor.set_bounding_box(bounding_boxes.tolist())<block_end><block_end><else_stmt><block_start>frame_constructor.set_bounding_box(bounding_boxes[i].tolist())<block_end><block_end><block_end><block_end><block_end><block_end> |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Key Vault component edit."""<import_from_stmt>.._version VERSION<import_from_stmt>.ce_simple_settings CESimpleSettings<line_sep>__version__=VERSION<line_sep>__author__="<NAME>"<class_stmt>CEKeyVault(CESimpleSettings)<block_start>"""Key Vault settings edit component."""<line_sep>_DESCRIPTION="Key Vault Setup"<line_sep>_COMP_PATH="KeyVault"<line_sep>_HELP_TEXT="""
Set the parameters for your Key Vault here to store secret
values such as API Keys.<br>
Check <b>UseKeyring</b> if you have Keyring installed and want to be
able to cache the secrets locally. (Note: keyring is not supported
by default on many Linux distributions)<br>
The first five items are mandatory.<br>
The value for <b>Authority</b> should be set to the Azure Cloud that you use.<br>
Options are:
<ul>
<li>global (Commercial Azure cloud)</li>
<li>usgov (US Government cloud)</li>
<li>cn (China national cloud)</li>
<li>de (German national cloud)</li>
</ul>
The default is "global".<br>
"""<line_sep>_HELP_URI={"Key Vault Settings":("https://msticpy.readthedocs.io/en/latest/getting_started/"+"msticpyconfig.html#specifying-secrets-as-key-vault-secrets")}<block_end> |
"""converted from vga_8x8.bin """<line_sep>WIDTH=8<line_sep>HEIGHT=8<line_sep>FIRST=0x20<line_sep>LAST=0x7f<line_sep>_FONT=b'\x00\x00\x00\x00\x00\x00\x00\x00'<concat>b'\x18\x3c\x3c\x18\x18\x00\x18\x00'<concat>b'\x66\x66\x24\x00\x00\x00\x00\x00'<concat>b'\x6c\x6c\xfe\x6c\xfe\x6c\x6c\x00'<concat>b'\x18\x3e\x60\x3c\x06\x7c\x18\x00'<concat>b'\x00\xc6\xcc\x18\x30\x66\xc6\x00'<concat>b'\x38\x6c\x38\x76\xdc\xcc\x76\x00'<concat>b'\x18\x18\x30\x00\x00\x00\x00\x00'<concat>b'\x0c\x18\x30\x30\x30\x18\x0c\x00'<concat>b'\x30\x18\x0c\x0c\x0c\x18\x30\x00'<concat>b'\x00\x66\x3c\xff\x3c\x66\x00\x00'<concat>b'\x00\x18\x18\x7e\x18\x18\x00\x00'<concat>b'\x00\x00\x00\x00\x00\x18\x18\x30'<concat>b'\x00\x00\x00\x7e\x00\x00\x00\x00'<concat>b'\x00\x00\x00\x00\x00\x18\x18\x00'<concat>b'\x06\x0c\x18\x30\x60\xc0\x80\x00'<concat>b'\x38\x6c\xc6\xd6\xc6\x6c\x38\x00'<concat>b'\x18\x38\x18\x18\x18\x18\x7e\x00'<concat>b'\x7c\xc6\x06\x1c\x30\x66\xfe\x00'<concat>b'\x7c\xc6\x06\x3c\x06\xc6\x7c\x00'<concat>b'\x1c\x3c\x6c\xcc\xfe\x0c\x1e\x00'<concat>b'\xfe\xc0\xc0\xfc\x06\xc6\x7c\x00'<concat>b'\x38\x60\xc0\xfc\xc6\xc6\x7c\x00'<concat>b'\xfe\xc6\x0c\x18\x30\x30\x30\x00'<concat>b'\x7c\xc6\xc6\x7c\xc6\xc6\x7c\x00'<concat>b'\x7c\xc6\xc6\x7e\x06\x0c\x78\x00'<concat>b'\x00\x18\x18\x00\x00\x18\x18\x00'<concat>b'\x00\x18\x18\x00\x00\x18\x18\x30'<concat>b'\x06\x0c\x18\x30\x18\x0c\x06\x00'<concat>b'\x00\x00\x7e\x00\x00\x7e\x00\x00'<concat>b'\x60\x30\x18\x0c\x18\x30\x60\x00'<concat>b'\x7c\xc6\x0c\x18\x18\x00\x18\x00'<concat>b'\x7c\xc6\xde\xde\xde\xc0\x78\x00'<concat>b'\x38\x6c\xc6\xfe\xc6\xc6\xc6\x00'<concat>b'\xfc\x66\x66\x7c\x66\x66\xfc\x00'<concat>b'\x3c\x66\xc0\xc0\xc0\x66\x3c\x00'<concat>b'\xf8\x6c\x66\x66\x66\x6c\xf8\x00'<concat>b'\xfe\x62\x68\x78\x68\x62\xfe\x00'<concat>b'\xfe\x62\x68\x78\x68\x60\xf0\x00'<concat>b'\x3c\x66\xc0\xc0\xce\x66\x3a\x00'<concat>b'\xc6\xc6\xc6\xfe\xc6\xc6\xc6\x00'<concat>b'\x3c\x18\x18\x18\x18\x18\x3c\x00'<concat>b'\x1e\x0c\x0c\x0c\xcc\xcc\x78\x00'<concat>b'\xe6\x66\x6c\x78\x6c\x66\xe6\x00'<concat>b'\xf0\x60\x60\x60\x62\x66\xfe\x00'<concat>b'\xc6\xee\xfe\xfe\xd6\xc6\xc6\x00'<concat>b'\xc6\xe6\xf6\xde\xce\xc6\xc6\x00'<concat>b'\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00'<concat>b'\xfc\x66\x66\x7c\x60\x60\xf0\x00'<concat>b'\x7c\xc6\xc6\xc6\xc6\xce\x7c\x0e'<concat>b'\xfc\x66\x66\x7c\x6c\x66\xe6\x00'<concat>b'\x3c\x66\x30\x18\x0c\x66\x3c\x00'<concat>b'\x7e\x7e\x5a\x18\x18\x18\x3c\x00'<concat>b'\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00'<concat>b'\xc6\xc6\xc6\xc6\xc6\x6c\x38\x00'<concat>b'\xc6\xc6\xc6\xd6\xd6\xfe\x6c\x00'<concat>b'\xc6\xc6\x6c\x38\x6c\xc6\xc6\x00'<concat>b'\x66\x66\x66\x3c\x18\x18\x3c\x00'<concat>b'\xfe\xc6\x8c\x18\x32\x66\xfe\x00'<concat>b'\x3c\x30\x30\x30\x30\x30\x3c\x00'<concat>b'\xc0\x60\x30\x18\x0c\x06\x02\x00'<concat>b'\x3c\x0c\x0c\x0c\x0c\x0c\x3c\x00'<concat>b'\x10\x38\x6c\xc6\x00\x00\x00\x00'<concat>b'\x00\x00\x00\x00\x00\x00\x00\xff'<concat>b'\x30\x18\x0c\x00\x00\x00\x00\x00'<concat>b'\x00\x00\x78\x0c\x7c\xcc\x76\x00'<concat>b'\xe0\x60\x7c\x66\x66\x66\xdc\x00'<concat>b'\x00\x00\x7c\xc6\xc0\xc6\x7c\x00'<concat>b'\x1c\x0c\x7c\xcc\xcc\xcc\x76\x00'<concat>b'\x00\x00\x7c\xc6\xfe\xc0\x7c\x00'<concat>b'\x3c\x66\x60\xf8\x60\x60\xf0\x00'<concat>b'\x00\x00\x76\xcc\xcc\x7c\x0c\xf8'<concat>b'\xe0\x60\x6c\x76\x66\x66\xe6\x00'<concat>b'\x18\x00\x38\x18\x18\x18\x3c\x00'<concat>b'\x06\x00\x06\x06\x06\x66\x66\x3c'<concat>b'\xe0\x60\x66\x6c\x78\x6c\xe6\x00'<concat>b'\x38\x18\x18\x18\x18\x18\x3c\x00'<concat>b'\x00\x00\xec\xfe\xd6\xd6\xd6\x00'<concat>b'\x00\x00\xdc\x66\x66\x66\x66\x00'<concat>b'\x00\x00\x7c\xc6\xc6\xc6\x7c\x00'<concat>b'\x00\x00\xdc\x66\x66\x7c\x60\xf0'<concat>b'\x00\x00\x76\xcc\xcc\x7c\x0c\x1e'<concat>b'\x00\x00\xdc\x76\x60\x60\xf0\x00'<concat>b'\x00\x00\x7e\xc0\x7c\x06\xfc\x00'<concat>b'\x30\x30\xfc\x30\x30\x36\x1c\x00'<concat>b'\x00\x00\xcc\xcc\xcc\xcc\x76\x00'<concat>b'\x00\x00\xc6\xc6\xc6\x6c\x38\x00'<concat>b'\x00\x00\xc6\xd6\xd6\xfe\x6c\x00'<concat>b'\x00\x00\xc6\x6c\x38\x6c\xc6\x00'<concat>b'\x00\x00\xc6\xc6\xc6\x7e\x06\xfc'<concat>b'\x00\x00\x7e\x4c\x18\x32\x7e\x00'<concat>b'\x0e\x18\x18\x70\x18\x18\x0e\x00'<concat>b'\x18\x18\x18\x18\x18\x18\x18\x00'<concat>b'\x70\x18\x18\x0e\x18\x18\x70\x00'<concat>b'\x76\xdc\x00\x00\x00\x00\x00\x00'<concat>b'\x00\x10\x38\x6c\xc6\xc6\xfe\x00'<line_sep>FONT=memoryview(_FONT)<line_sep> |
<import_stmt>os<import_stmt>joblib<import_stmt>pandas<as>pd<import_from_stmt>sklearn.preprocessing StandardScaler MinMaxScaler<class_stmt>FeatureNorm(object)<block_start><def_stmt>__init__ self type='minmax'<block_start>self.type=type<block_end><def_stmt>__call__ self x mode='train' model_dir='../weights' name='scaler'<block_start><assert_stmt>len(x.shape)<eq>2 "Input rank for FeatureNorm should be 2"<if_stmt>self.type<eq>'standard'<block_start>scaler=StandardScaler()<block_end><elif_stmt>self.type<eq>'minmax'<block_start>scaler=MinMaxScaler()<block_end><else_stmt><block_start><raise>ValueError("Unsupported norm type yet: {}".format(self.type))<block_end><if_stmt>mode<eq>'train'<block_start>scaler.fit(x)<line_sep>joblib.dump(scaler os.path.join(model_dir name+'.pkl'))<block_end><else_stmt><block_start>scaler=joblib.load(os.path.join(model_dir name+'.pkl'))<block_end>output=scaler.transform(x)<try_stmt><block_start><return>pd.DataFrame(output index=x.index columns=x.columns)<block_end><except_stmt><block_start><return>output<block_end><block_end><block_end> |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
<import_from_stmt>msrest.serialization Model<class_stmt>ElasticPoolPerformanceLevelCapability(Model)<block_start>"""The Elastic Pool performance level capability.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar performance_level: The performance level for the pool.
:vartype performance_level:
~azure.mgmt.sql.models.PerformanceLevelCapability
:ivar sku: The sku.
:vartype sku: ~azure.mgmt.sql.models.Sku
:ivar supported_license_types: List of supported license types.
:vartype supported_license_types:
list[~azure.mgmt.sql.models.LicenseTypeCapability]
:ivar max_database_count: The maximum number of databases supported.
:vartype max_database_count: int
:ivar included_max_size: The included (free) max size for this performance
level.
:vartype included_max_size: ~azure.mgmt.sql.models.MaxSizeCapability
:ivar supported_max_sizes: The list of supported max sizes.
:vartype supported_max_sizes:
list[~azure.mgmt.sql.models.MaxSizeRangeCapability]
:ivar supported_per_database_max_sizes: The list of supported per database
max sizes.
:vartype supported_per_database_max_sizes:
list[~azure.mgmt.sql.models.MaxSizeRangeCapability]
:ivar supported_per_database_max_performance_levels: The list of supported
per database max performance levels.
:vartype supported_per_database_max_performance_levels:
list[~azure.mgmt.sql.models.ElasticPoolPerDatabaseMaxPerformanceLevelCapability]
:ivar status: The status of the capability. Possible values include:
'Visible', 'Available', 'Default', 'Disabled'
:vartype status: str or ~azure.mgmt.sql.models.CapabilityStatus
:param reason: The reason for the capability not being available.
:type reason: str
"""<line_sep>_validation={'performance_level':{'readonly':<true>} 'sku':{'readonly':<true>} 'supported_license_types':{'readonly':<true>} 'max_database_count':{'readonly':<true>} 'included_max_size':{'readonly':<true>} 'supported_max_sizes':{'readonly':<true>} 'supported_per_database_max_sizes':{'readonly':<true>} 'supported_per_database_max_performance_levels':{'readonly':<true>} 'status':{'readonly':<true>} }<line_sep>_attribute_map={'performance_level':{'key':'performanceLevel' 'type':'PerformanceLevelCapability'} 'sku':{'key':'sku' 'type':'Sku'} 'supported_license_types':{'key':'supportedLicenseTypes' 'type':'[LicenseTypeCapability]'} 'max_database_count':{'key':'maxDatabaseCount' 'type':'int'} 'included_max_size':{'key':'includedMaxSize' 'type':'MaxSizeCapability'} 'supported_max_sizes':{'key':'supportedMaxSizes' 'type':'[MaxSizeRangeCapability]'} 'supported_per_database_max_sizes':{'key':'supportedPerDatabaseMaxSizes' 'type':'[MaxSizeRangeCapability]'} 'supported_per_database_max_performance_levels':{'key':'supportedPerDatabaseMaxPerformanceLevels' 'type':'[ElasticPoolPerDatabaseMaxPerformanceLevelCapability]'} 'status':{'key':'status' 'type':'CapabilityStatus'} 'reason':{'key':'reason' 'type':'str'} }<def_stmt>__init__ self * reason:str=<none> **kwargs<arrow><none><block_start>super(ElasticPoolPerformanceLevelCapability self).__init__(**kwargs)<line_sep>self.performance_level=<none><line_sep>self.sku=<none><line_sep>self.supported_license_types=<none><line_sep>self.max_database_count=<none><line_sep>self.included_max_size=<none><line_sep>self.supported_max_sizes=<none><line_sep>self.supported_per_database_max_sizes=<none><line_sep>self.supported_per_database_max_performance_levels=<none><line_sep>self.status=<none><line_sep>self.reason=reason<block_end><block_end> |
<import_from_future_stmt> absolute_import division print_function unicode_literals<import_from_stmt>.spyfile SpyFile<import_from_stmt>..io aviris<import_from_stmt>..io erdas<import_from_stmt>..io envi<line_sep> |
# tests/test_provider_hashicorp_aws.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:12:25 UTC)
<def_stmt>test_provider_import <block_start><import_stmt>terrascript.provider.hashicorp.aws<block_end><def_stmt>test_resource_import <block_start><import_from_stmt>terrascript.resource.hashicorp.aws aws_accessanalyzer_analyzer<import_from_stmt>terrascript.resource.hashicorp.aws aws_acm_certificate<import_from_stmt>terrascript.resource.hashicorp.aws aws_acm_certificate_validation<import_from_stmt>terrascript.resource.hashicorp.aws aws_acmpca_certificate<import_from_stmt>terrascript.resource.hashicorp.aws aws_acmpca_certificate_authority<import_from_stmt>terrascript.resource.hashicorp.aws aws_acmpca_certificate_authority_certificate <import_from_stmt>terrascript.resource.hashicorp.aws aws_alb<import_from_stmt>terrascript.resource.hashicorp.aws aws_alb_listener<import_from_stmt>terrascript.resource.hashicorp.aws aws_alb_listener_certificate<import_from_stmt>terrascript.resource.hashicorp.aws aws_alb_listener_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_alb_target_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_alb_target_group_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_ami<import_from_stmt>terrascript.resource.hashicorp.aws aws_ami_copy<import_from_stmt>terrascript.resource.hashicorp.aws aws_ami_from_instance<import_from_stmt>terrascript.resource.hashicorp.aws aws_ami_launch_permission<import_from_stmt>terrascript.resource.hashicorp.aws aws_amplify_app<import_from_stmt>terrascript.resource.hashicorp.aws aws_amplify_backend_environment<import_from_stmt>terrascript.resource.hashicorp.aws aws_amplify_branch<import_from_stmt>terrascript.resource.hashicorp.aws aws_amplify_domain_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_amplify_webhook<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_account<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_api_key<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_authorizer<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_base_path_mapping<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_client_certificate<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_deployment<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_documentation_part<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_documentation_version<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_domain_name<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_gateway_response<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_integration<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_integration_response<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_method<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_method_response<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_method_settings<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_model<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_request_validator<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_resource<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_rest_api<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_rest_api_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_stage<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_usage_plan<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_usage_plan_key<import_from_stmt>terrascript.resource.hashicorp.aws aws_api_gateway_vpc_link<import_from_stmt>terrascript.resource.hashicorp.aws aws_apigatewayv2_api<import_from_stmt>terrascript.resource.hashicorp.aws aws_apigatewayv2_api_mapping<import_from_stmt>terrascript.resource.hashicorp.aws aws_apigatewayv2_authorizer<import_from_stmt>terrascript.resource.hashicorp.aws aws_apigatewayv2_deployment<import_from_stmt>terrascript.resource.hashicorp.aws aws_apigatewayv2_domain_name<import_from_stmt>terrascript.resource.hashicorp.aws aws_apigatewayv2_integration<import_from_stmt>terrascript.resource.hashicorp.aws aws_apigatewayv2_integration_response<import_from_stmt>terrascript.resource.hashicorp.aws aws_apigatewayv2_model<import_from_stmt>terrascript.resource.hashicorp.aws aws_apigatewayv2_route<import_from_stmt>terrascript.resource.hashicorp.aws aws_apigatewayv2_route_response<import_from_stmt>terrascript.resource.hashicorp.aws aws_apigatewayv2_stage<import_from_stmt>terrascript.resource.hashicorp.aws aws_apigatewayv2_vpc_link<import_from_stmt>terrascript.resource.hashicorp.aws aws_app_cookie_stickiness_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_appautoscaling_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_appautoscaling_scheduled_action<import_from_stmt>terrascript.resource.hashicorp.aws aws_appautoscaling_target<import_from_stmt>terrascript.resource.hashicorp.aws aws_appconfig_application<import_from_stmt>terrascript.resource.hashicorp.aws aws_appconfig_configuration_profile<import_from_stmt>terrascript.resource.hashicorp.aws aws_appconfig_deployment<import_from_stmt>terrascript.resource.hashicorp.aws aws_appconfig_deployment_strategy<import_from_stmt>terrascript.resource.hashicorp.aws aws_appconfig_environment<import_from_stmt>terrascript.resource.hashicorp.aws aws_appconfig_hosted_configuration_version <import_from_stmt>terrascript.resource.hashicorp.aws aws_appmesh_gateway_route<import_from_stmt>terrascript.resource.hashicorp.aws aws_appmesh_mesh<import_from_stmt>terrascript.resource.hashicorp.aws aws_appmesh_route<import_from_stmt>terrascript.resource.hashicorp.aws aws_appmesh_virtual_gateway<import_from_stmt>terrascript.resource.hashicorp.aws aws_appmesh_virtual_node<import_from_stmt>terrascript.resource.hashicorp.aws aws_appmesh_virtual_router<import_from_stmt>terrascript.resource.hashicorp.aws aws_appmesh_virtual_service<import_from_stmt>terrascript.resource.hashicorp.aws aws_apprunner_auto_scaling_configuration_version <import_from_stmt>terrascript.resource.hashicorp.aws aws_apprunner_connection<import_from_stmt>terrascript.resource.hashicorp.aws aws_apprunner_custom_domain_association <import_from_stmt>terrascript.resource.hashicorp.aws aws_apprunner_service<import_from_stmt>terrascript.resource.hashicorp.aws aws_appstream_fleet<import_from_stmt>terrascript.resource.hashicorp.aws aws_appstream_stack<import_from_stmt>terrascript.resource.hashicorp.aws aws_appsync_api_key<import_from_stmt>terrascript.resource.hashicorp.aws aws_appsync_datasource<import_from_stmt>terrascript.resource.hashicorp.aws aws_appsync_function<import_from_stmt>terrascript.resource.hashicorp.aws aws_appsync_graphql_api<import_from_stmt>terrascript.resource.hashicorp.aws aws_appsync_resolver<import_from_stmt>terrascript.resource.hashicorp.aws aws_athena_database<import_from_stmt>terrascript.resource.hashicorp.aws aws_athena_named_query<import_from_stmt>terrascript.resource.hashicorp.aws aws_athena_workgroup<import_from_stmt>terrascript.resource.hashicorp.aws aws_autoscaling_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_autoscaling_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_autoscaling_group_tag<import_from_stmt>terrascript.resource.hashicorp.aws aws_autoscaling_lifecycle_hook<import_from_stmt>terrascript.resource.hashicorp.aws aws_autoscaling_notification<import_from_stmt>terrascript.resource.hashicorp.aws aws_autoscaling_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_autoscaling_schedule<import_from_stmt>terrascript.resource.hashicorp.aws aws_autoscalingplans_scaling_plan<import_from_stmt>terrascript.resource.hashicorp.aws aws_backup_global_settings<import_from_stmt>terrascript.resource.hashicorp.aws aws_backup_plan<import_from_stmt>terrascript.resource.hashicorp.aws aws_backup_region_settings<import_from_stmt>terrascript.resource.hashicorp.aws aws_backup_selection<import_from_stmt>terrascript.resource.hashicorp.aws aws_backup_vault<import_from_stmt>terrascript.resource.hashicorp.aws aws_backup_vault_notifications<import_from_stmt>terrascript.resource.hashicorp.aws aws_backup_vault_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_batch_compute_environment<import_from_stmt>terrascript.resource.hashicorp.aws aws_batch_job_definition<import_from_stmt>terrascript.resource.hashicorp.aws aws_batch_job_queue<import_from_stmt>terrascript.resource.hashicorp.aws aws_budgets_budget<import_from_stmt>terrascript.resource.hashicorp.aws aws_budgets_budget_action<import_from_stmt>terrascript.resource.hashicorp.aws aws_chime_voice_connector<import_from_stmt>terrascript.resource.hashicorp.aws aws_chime_voice_connector_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_chime_voice_connector_logging<import_from_stmt>terrascript.resource.hashicorp.aws aws_chime_voice_connector_origination<import_from_stmt>terrascript.resource.hashicorp.aws aws_chime_voice_connector_streaming<import_from_stmt>terrascript.resource.hashicorp.aws aws_chime_voice_connector_termination<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloud9_environment_ec2<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudformation_stack<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudformation_stack_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudformation_stack_set_instance<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudformation_type<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudfront_cache_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudfront_distribution<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudfront_function<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudfront_key_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudfront_monitoring_subscription <import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudfront_origin_access_identity<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudfront_origin_request_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudfront_public_key<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudfront_realtime_log_config<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudhsm_v2_cluster<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudhsm_v2_hsm<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudtrail<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_composite_alarm<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_dashboard<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_event_api_destination<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_event_archive<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_event_bus<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_event_bus_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_event_connection<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_event_permission<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_event_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_event_target<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_log_destination<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_log_destination_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_log_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_log_metric_filter<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_log_resource_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_log_stream<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_log_subscription_filter <import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_metric_alarm<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_metric_stream<import_from_stmt>terrascript.resource.hashicorp.aws aws_cloudwatch_query_definition<import_from_stmt>terrascript.resource.hashicorp.aws aws_codeartifact_domain<import_from_stmt>terrascript.resource.hashicorp.aws aws_codeartifact_domain_permissions_policy <import_from_stmt>terrascript.resource.hashicorp.aws aws_codeartifact_repository<import_from_stmt>terrascript.resource.hashicorp.aws aws_codeartifact_repository_permissions_policy <import_from_stmt>terrascript.resource.hashicorp.aws aws_codebuild_project<import_from_stmt>terrascript.resource.hashicorp.aws aws_codebuild_report_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_codebuild_source_credential<import_from_stmt>terrascript.resource.hashicorp.aws aws_codebuild_webhook<import_from_stmt>terrascript.resource.hashicorp.aws aws_codecommit_repository<import_from_stmt>terrascript.resource.hashicorp.aws aws_codecommit_trigger<import_from_stmt>terrascript.resource.hashicorp.aws aws_codedeploy_app<import_from_stmt>terrascript.resource.hashicorp.aws aws_codedeploy_deployment_config<import_from_stmt>terrascript.resource.hashicorp.aws aws_codedeploy_deployment_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_codepipeline<import_from_stmt>terrascript.resource.hashicorp.aws aws_codepipeline_webhook<import_from_stmt>terrascript.resource.hashicorp.aws aws_codestarconnections_connection<import_from_stmt>terrascript.resource.hashicorp.aws aws_codestarconnections_host<import_from_stmt>terrascript.resource.hashicorp.aws aws_codestarnotifications_notification_rule <import_from_stmt>terrascript.resource.hashicorp.aws aws_cognito_identity_pool<import_from_stmt>terrascript.resource.hashicorp.aws aws_cognito_identity_pool_roles_attachment <import_from_stmt>terrascript.resource.hashicorp.aws aws_cognito_identity_provider<import_from_stmt>terrascript.resource.hashicorp.aws aws_cognito_resource_server<import_from_stmt>terrascript.resource.hashicorp.aws aws_cognito_user_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_cognito_user_pool<import_from_stmt>terrascript.resource.hashicorp.aws aws_cognito_user_pool_client<import_from_stmt>terrascript.resource.hashicorp.aws aws_cognito_user_pool_domain<import_from_stmt>terrascript.resource.hashicorp.aws aws_cognito_user_pool_ui_customization <import_from_stmt>terrascript.resource.hashicorp.aws aws_config_aggregate_authorization<import_from_stmt>terrascript.resource.hashicorp.aws aws_config_config_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_config_configuration_aggregator<import_from_stmt>terrascript.resource.hashicorp.aws aws_config_configuration_recorder<import_from_stmt>terrascript.resource.hashicorp.aws aws_config_configuration_recorder_status <import_from_stmt>terrascript.resource.hashicorp.aws aws_config_conformance_pack<import_from_stmt>terrascript.resource.hashicorp.aws aws_config_delivery_channel<import_from_stmt>terrascript.resource.hashicorp.aws aws_config_organization_conformance_pack <import_from_stmt>terrascript.resource.hashicorp.aws aws_config_organization_custom_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_config_organization_managed_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_config_remediation_configuration<import_from_stmt>terrascript.resource.hashicorp.aws aws_connect_contact_flow<import_from_stmt>terrascript.resource.hashicorp.aws aws_connect_instance<import_from_stmt>terrascript.resource.hashicorp.aws aws_cur_report_definition<import_from_stmt>terrascript.resource.hashicorp.aws aws_customer_gateway<import_from_stmt>terrascript.resource.hashicorp.aws aws_datapipeline_pipeline<import_from_stmt>terrascript.resource.hashicorp.aws aws_datasync_agent<import_from_stmt>terrascript.resource.hashicorp.aws aws_datasync_location_efs<import_from_stmt>terrascript.resource.hashicorp.aws aws_datasync_location_fsx_windows_file_system <import_from_stmt>terrascript.resource.hashicorp.aws aws_datasync_location_nfs<import_from_stmt>terrascript.resource.hashicorp.aws aws_datasync_location_s3<import_from_stmt>terrascript.resource.hashicorp.aws aws_datasync_location_smb<import_from_stmt>terrascript.resource.hashicorp.aws aws_datasync_task<import_from_stmt>terrascript.resource.hashicorp.aws aws_dax_cluster<import_from_stmt>terrascript.resource.hashicorp.aws aws_dax_parameter_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_dax_subnet_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_db_cluster_snapshot<import_from_stmt>terrascript.resource.hashicorp.aws aws_db_event_subscription<import_from_stmt>terrascript.resource.hashicorp.aws aws_db_instance<import_from_stmt>terrascript.resource.hashicorp.aws aws_db_instance_role_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_db_option_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_db_parameter_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_db_proxy<import_from_stmt>terrascript.resource.hashicorp.aws aws_db_proxy_default_target_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_db_proxy_endpoint<import_from_stmt>terrascript.resource.hashicorp.aws aws_db_proxy_target<import_from_stmt>terrascript.resource.hashicorp.aws aws_db_security_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_db_snapshot<import_from_stmt>terrascript.resource.hashicorp.aws aws_db_subnet_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_default_network_acl<import_from_stmt>terrascript.resource.hashicorp.aws aws_default_route_table<import_from_stmt>terrascript.resource.hashicorp.aws aws_default_security_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_default_subnet<import_from_stmt>terrascript.resource.hashicorp.aws aws_default_vpc<import_from_stmt>terrascript.resource.hashicorp.aws aws_default_vpc_dhcp_options<import_from_stmt>terrascript.resource.hashicorp.aws aws_devicefarm_project<import_from_stmt>terrascript.resource.hashicorp.aws aws_directory_service_conditional_forwarder <import_from_stmt>terrascript.resource.hashicorp.aws aws_directory_service_directory<import_from_stmt>terrascript.resource.hashicorp.aws aws_directory_service_log_subscription <import_from_stmt>terrascript.resource.hashicorp.aws aws_dlm_lifecycle_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_dms_certificate<import_from_stmt>terrascript.resource.hashicorp.aws aws_dms_endpoint<import_from_stmt>terrascript.resource.hashicorp.aws aws_dms_event_subscription<import_from_stmt>terrascript.resource.hashicorp.aws aws_dms_replication_instance<import_from_stmt>terrascript.resource.hashicorp.aws aws_dms_replication_subnet_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_dms_replication_task<import_from_stmt>terrascript.resource.hashicorp.aws aws_docdb_cluster<import_from_stmt>terrascript.resource.hashicorp.aws aws_docdb_cluster_instance<import_from_stmt>terrascript.resource.hashicorp.aws aws_docdb_cluster_parameter_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_docdb_cluster_snapshot<import_from_stmt>terrascript.resource.hashicorp.aws aws_docdb_subnet_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_bgp_peer<import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_connection<import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_connection_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_gateway<import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_gateway_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_gateway_association_proposal<import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_hosted_private_virtual_interface <import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_hosted_private_virtual_interface_accepter <import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_hosted_public_virtual_interface <import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_hosted_public_virtual_interface_accepter <import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_hosted_transit_virtual_interface <import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_hosted_transit_virtual_interface_accepter <import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_lag<import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_private_virtual_interface<import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_public_virtual_interface<import_from_stmt>terrascript.resource.hashicorp.aws aws_dx_transit_virtual_interface<import_from_stmt>terrascript.resource.hashicorp.aws aws_dynamodb_global_table<import_from_stmt>terrascript.resource.hashicorp.aws aws_dynamodb_kinesis_streaming_destination <import_from_stmt>terrascript.resource.hashicorp.aws aws_dynamodb_table<import_from_stmt>terrascript.resource.hashicorp.aws aws_dynamodb_table_item<import_from_stmt>terrascript.resource.hashicorp.aws aws_dynamodb_tag<import_from_stmt>terrascript.resource.hashicorp.aws aws_ebs_default_kms_key<import_from_stmt>terrascript.resource.hashicorp.aws aws_ebs_encryption_by_default<import_from_stmt>terrascript.resource.hashicorp.aws aws_ebs_snapshot<import_from_stmt>terrascript.resource.hashicorp.aws aws_ebs_snapshot_copy<import_from_stmt>terrascript.resource.hashicorp.aws aws_ebs_snapshot_import<import_from_stmt>terrascript.resource.hashicorp.aws aws_ebs_volume<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_availability_zone_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_capacity_reservation<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_carrier_gateway<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_client_vpn_authorization_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_client_vpn_endpoint<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_client_vpn_network_association <import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_client_vpn_route<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_fleet<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_local_gateway_route<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_local_gateway_route_table_vpc_association <import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_managed_prefix_list<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_managed_prefix_list_entry<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_tag<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_traffic_mirror_filter<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_traffic_mirror_filter_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_traffic_mirror_session<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_traffic_mirror_target<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_transit_gateway<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_transit_gateway_peering_attachment <import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_transit_gateway_peering_attachment_accepter <import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_transit_gateway_prefix_list_reference <import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_transit_gateway_route<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_transit_gateway_route_table<import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_transit_gateway_route_table_association <import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_transit_gateway_route_table_propagation <import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_transit_gateway_vpc_attachment <import_from_stmt>terrascript.resource.hashicorp.aws aws_ec2_transit_gateway_vpc_attachment_accepter <import_from_stmt>terrascript.resource.hashicorp.aws aws_ecr_lifecycle_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_ecr_registry_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_ecr_replication_configuration<import_from_stmt>terrascript.resource.hashicorp.aws aws_ecr_repository<import_from_stmt>terrascript.resource.hashicorp.aws aws_ecr_repository_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_ecrpublic_repository<import_from_stmt>terrascript.resource.hashicorp.aws aws_ecs_capacity_provider<import_from_stmt>terrascript.resource.hashicorp.aws aws_ecs_cluster<import_from_stmt>terrascript.resource.hashicorp.aws aws_ecs_service<import_from_stmt>terrascript.resource.hashicorp.aws aws_ecs_tag<import_from_stmt>terrascript.resource.hashicorp.aws aws_ecs_task_definition<import_from_stmt>terrascript.resource.hashicorp.aws aws_efs_access_point<import_from_stmt>terrascript.resource.hashicorp.aws aws_efs_backup_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_efs_file_system<import_from_stmt>terrascript.resource.hashicorp.aws aws_efs_file_system_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_efs_mount_target<import_from_stmt>terrascript.resource.hashicorp.aws aws_egress_only_internet_gateway<import_from_stmt>terrascript.resource.hashicorp.aws aws_eip<import_from_stmt>terrascript.resource.hashicorp.aws aws_eip_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_eks_addon<import_from_stmt>terrascript.resource.hashicorp.aws aws_eks_cluster<import_from_stmt>terrascript.resource.hashicorp.aws aws_eks_fargate_profile<import_from_stmt>terrascript.resource.hashicorp.aws aws_eks_identity_provider_config<import_from_stmt>terrascript.resource.hashicorp.aws aws_eks_node_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_elastic_beanstalk_application<import_from_stmt>terrascript.resource.hashicorp.aws aws_elastic_beanstalk_application_version <import_from_stmt>terrascript.resource.hashicorp.aws aws_elastic_beanstalk_configuration_template <import_from_stmt>terrascript.resource.hashicorp.aws aws_elastic_beanstalk_environment<import_from_stmt>terrascript.resource.hashicorp.aws aws_elasticache_cluster<import_from_stmt>terrascript.resource.hashicorp.aws aws_elasticache_global_replication_group <import_from_stmt>terrascript.resource.hashicorp.aws aws_elasticache_parameter_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_elasticache_replication_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_elasticache_security_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_elasticache_subnet_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_elasticache_user<import_from_stmt>terrascript.resource.hashicorp.aws aws_elasticache_user_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_elasticsearch_domain<import_from_stmt>terrascript.resource.hashicorp.aws aws_elasticsearch_domain_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_elasticsearch_domain_saml_options<import_from_stmt>terrascript.resource.hashicorp.aws aws_elastictranscoder_pipeline<import_from_stmt>terrascript.resource.hashicorp.aws aws_elastictranscoder_preset<import_from_stmt>terrascript.resource.hashicorp.aws aws_elb<import_from_stmt>terrascript.resource.hashicorp.aws aws_elb_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_emr_cluster<import_from_stmt>terrascript.resource.hashicorp.aws aws_emr_instance_fleet<import_from_stmt>terrascript.resource.hashicorp.aws aws_emr_instance_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_emr_managed_scaling_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_emr_security_configuration<import_from_stmt>terrascript.resource.hashicorp.aws aws_flow_log<import_from_stmt>terrascript.resource.hashicorp.aws aws_fms_admin_account<import_from_stmt>terrascript.resource.hashicorp.aws aws_fms_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_fsx_backup<import_from_stmt>terrascript.resource.hashicorp.aws aws_fsx_lustre_file_system<import_from_stmt>terrascript.resource.hashicorp.aws aws_fsx_ontap_file_system<import_from_stmt>terrascript.resource.hashicorp.aws aws_fsx_windows_file_system<import_from_stmt>terrascript.resource.hashicorp.aws aws_gamelift_alias<import_from_stmt>terrascript.resource.hashicorp.aws aws_gamelift_build<import_from_stmt>terrascript.resource.hashicorp.aws aws_gamelift_fleet<import_from_stmt>terrascript.resource.hashicorp.aws aws_gamelift_game_session_queue<import_from_stmt>terrascript.resource.hashicorp.aws aws_glacier_vault<import_from_stmt>terrascript.resource.hashicorp.aws aws_glacier_vault_lock<import_from_stmt>terrascript.resource.hashicorp.aws aws_globalaccelerator_accelerator<import_from_stmt>terrascript.resource.hashicorp.aws aws_globalaccelerator_endpoint_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_globalaccelerator_listener<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_catalog_database<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_catalog_table<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_classifier<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_connection<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_crawler<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_data_catalog_encryption_settings <import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_dev_endpoint<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_job<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_ml_transform<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_partition<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_registry<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_resource_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_schema<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_security_configuration<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_trigger<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_user_defined_function<import_from_stmt>terrascript.resource.hashicorp.aws aws_glue_workflow<import_from_stmt>terrascript.resource.hashicorp.aws aws_guardduty_detector<import_from_stmt>terrascript.resource.hashicorp.aws aws_guardduty_filter<import_from_stmt>terrascript.resource.hashicorp.aws aws_guardduty_invite_accepter<import_from_stmt>terrascript.resource.hashicorp.aws aws_guardduty_ipset<import_from_stmt>terrascript.resource.hashicorp.aws aws_guardduty_member<import_from_stmt>terrascript.resource.hashicorp.aws aws_guardduty_organization_admin_account <import_from_stmt>terrascript.resource.hashicorp.aws aws_guardduty_organization_configuration <import_from_stmt>terrascript.resource.hashicorp.aws aws_guardduty_publishing_destination<import_from_stmt>terrascript.resource.hashicorp.aws aws_guardduty_threatintelset<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_access_key<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_account_alias<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_account_password_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_group_membership<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_group_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_group_policy_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_instance_profile<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_openid_connect_provider<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_policy_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_role<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_role_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_role_policy_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_saml_provider<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_server_certificate<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_service_linked_role<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_user<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_user_group_membership<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_user_login_profile<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_user_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_user_policy_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_iam_user_ssh_key<import_from_stmt>terrascript.resource.hashicorp.aws aws_imagebuilder_component<import_from_stmt>terrascript.resource.hashicorp.aws aws_imagebuilder_distribution_configuration <import_from_stmt>terrascript.resource.hashicorp.aws aws_imagebuilder_image<import_from_stmt>terrascript.resource.hashicorp.aws aws_imagebuilder_image_pipeline<import_from_stmt>terrascript.resource.hashicorp.aws aws_imagebuilder_image_recipe<import_from_stmt>terrascript.resource.hashicorp.aws aws_imagebuilder_infrastructure_configuration <import_from_stmt>terrascript.resource.hashicorp.aws aws_inspector_assessment_target<import_from_stmt>terrascript.resource.hashicorp.aws aws_inspector_assessment_template<import_from_stmt>terrascript.resource.hashicorp.aws aws_inspector_resource_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_instance<import_from_stmt>terrascript.resource.hashicorp.aws aws_internet_gateway<import_from_stmt>terrascript.resource.hashicorp.aws aws_iot_certificate<import_from_stmt>terrascript.resource.hashicorp.aws aws_iot_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_iot_policy_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_iot_role_alias<import_from_stmt>terrascript.resource.hashicorp.aws aws_iot_thing<import_from_stmt>terrascript.resource.hashicorp.aws aws_iot_thing_principal_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_iot_thing_type<import_from_stmt>terrascript.resource.hashicorp.aws aws_iot_topic_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_key_pair<import_from_stmt>terrascript.resource.hashicorp.aws aws_kinesis_analytics_application<import_from_stmt>terrascript.resource.hashicorp.aws aws_kinesis_firehose_delivery_stream<import_from_stmt>terrascript.resource.hashicorp.aws aws_kinesis_stream<import_from_stmt>terrascript.resource.hashicorp.aws aws_kinesis_stream_consumer<import_from_stmt>terrascript.resource.hashicorp.aws aws_kinesis_video_stream<import_from_stmt>terrascript.resource.hashicorp.aws aws_kinesisanalyticsv2_application<import_from_stmt>terrascript.resource.hashicorp.aws aws_kinesisanalyticsv2_application_snapshot <import_from_stmt>terrascript.resource.hashicorp.aws aws_kms_alias<import_from_stmt>terrascript.resource.hashicorp.aws aws_kms_ciphertext<import_from_stmt>terrascript.resource.hashicorp.aws aws_kms_external_key<import_from_stmt>terrascript.resource.hashicorp.aws aws_kms_grant<import_from_stmt>terrascript.resource.hashicorp.aws aws_kms_key<import_from_stmt>terrascript.resource.hashicorp.aws aws_lakeformation_data_lake_settings<import_from_stmt>terrascript.resource.hashicorp.aws aws_lakeformation_permissions<import_from_stmt>terrascript.resource.hashicorp.aws aws_lakeformation_resource<import_from_stmt>terrascript.resource.hashicorp.aws aws_lambda_alias<import_from_stmt>terrascript.resource.hashicorp.aws aws_lambda_code_signing_config<import_from_stmt>terrascript.resource.hashicorp.aws aws_lambda_event_source_mapping<import_from_stmt>terrascript.resource.hashicorp.aws aws_lambda_function<import_from_stmt>terrascript.resource.hashicorp.aws aws_lambda_function_event_invoke_config <import_from_stmt>terrascript.resource.hashicorp.aws aws_lambda_layer_version<import_from_stmt>terrascript.resource.hashicorp.aws aws_lambda_permission<import_from_stmt>terrascript.resource.hashicorp.aws aws_lambda_provisioned_concurrency_config <import_from_stmt>terrascript.resource.hashicorp.aws aws_launch_configuration<import_from_stmt>terrascript.resource.hashicorp.aws aws_launch_template<import_from_stmt>terrascript.resource.hashicorp.aws aws_lb<import_from_stmt>terrascript.resource.hashicorp.aws aws_lb_cookie_stickiness_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_lb_listener<import_from_stmt>terrascript.resource.hashicorp.aws aws_lb_listener_certificate<import_from_stmt>terrascript.resource.hashicorp.aws aws_lb_listener_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_lb_ssl_negotiation_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_lb_target_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_lb_target_group_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_lex_bot<import_from_stmt>terrascript.resource.hashicorp.aws aws_lex_bot_alias<import_from_stmt>terrascript.resource.hashicorp.aws aws_lex_intent<import_from_stmt>terrascript.resource.hashicorp.aws aws_lex_slot_type<import_from_stmt>terrascript.resource.hashicorp.aws aws_licensemanager_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_licensemanager_license_configuration <import_from_stmt>terrascript.resource.hashicorp.aws aws_lightsail_domain<import_from_stmt>terrascript.resource.hashicorp.aws aws_lightsail_instance<import_from_stmt>terrascript.resource.hashicorp.aws aws_lightsail_instance_public_ports<import_from_stmt>terrascript.resource.hashicorp.aws aws_lightsail_key_pair<import_from_stmt>terrascript.resource.hashicorp.aws aws_lightsail_static_ip<import_from_stmt>terrascript.resource.hashicorp.aws aws_lightsail_static_ip_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_load_balancer_backend_server_policy <import_from_stmt>terrascript.resource.hashicorp.aws aws_load_balancer_listener_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_load_balancer_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_macie2_account<import_from_stmt>terrascript.resource.hashicorp.aws aws_macie2_classification_job<import_from_stmt>terrascript.resource.hashicorp.aws aws_macie2_custom_data_identifier<import_from_stmt>terrascript.resource.hashicorp.aws aws_macie2_findings_filter<import_from_stmt>terrascript.resource.hashicorp.aws aws_macie2_invitation_accepter<import_from_stmt>terrascript.resource.hashicorp.aws aws_macie2_member<import_from_stmt>terrascript.resource.hashicorp.aws aws_macie2_organization_admin_account<import_from_stmt>terrascript.resource.hashicorp.aws aws_macie_member_account_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_macie_s3_bucket_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_main_route_table_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_media_convert_queue<import_from_stmt>terrascript.resource.hashicorp.aws aws_media_package_channel<import_from_stmt>terrascript.resource.hashicorp.aws aws_media_store_container<import_from_stmt>terrascript.resource.hashicorp.aws aws_media_store_container_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_mq_broker<import_from_stmt>terrascript.resource.hashicorp.aws aws_mq_configuration<import_from_stmt>terrascript.resource.hashicorp.aws aws_msk_cluster<import_from_stmt>terrascript.resource.hashicorp.aws aws_msk_configuration<import_from_stmt>terrascript.resource.hashicorp.aws aws_msk_scram_secret_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_mwaa_environment<import_from_stmt>terrascript.resource.hashicorp.aws aws_nat_gateway<import_from_stmt>terrascript.resource.hashicorp.aws aws_neptune_cluster<import_from_stmt>terrascript.resource.hashicorp.aws aws_neptune_cluster_endpoint<import_from_stmt>terrascript.resource.hashicorp.aws aws_neptune_cluster_instance<import_from_stmt>terrascript.resource.hashicorp.aws aws_neptune_cluster_parameter_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_neptune_cluster_snapshot<import_from_stmt>terrascript.resource.hashicorp.aws aws_neptune_event_subscription<import_from_stmt>terrascript.resource.hashicorp.aws aws_neptune_parameter_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_neptune_subnet_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_network_acl<import_from_stmt>terrascript.resource.hashicorp.aws aws_network_acl_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_network_interface<import_from_stmt>terrascript.resource.hashicorp.aws aws_network_interface_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_network_interface_sg_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_networkfirewall_firewall<import_from_stmt>terrascript.resource.hashicorp.aws aws_networkfirewall_firewall_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_networkfirewall_logging_configuration <import_from_stmt>terrascript.resource.hashicorp.aws aws_networkfirewall_resource_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_networkfirewall_rule_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_application<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_custom_layer<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_ganglia_layer<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_haproxy_layer<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_instance<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_java_app_layer<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_memcached_layer<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_mysql_layer<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_nodejs_app_layer<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_permission<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_php_app_layer<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_rails_app_layer<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_rds_db_instance<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_stack<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_static_web_layer<import_from_stmt>terrascript.resource.hashicorp.aws aws_opsworks_user_profile<import_from_stmt>terrascript.resource.hashicorp.aws aws_organizations_account<import_from_stmt>terrascript.resource.hashicorp.aws aws_organizations_delegated_administrator <import_from_stmt>terrascript.resource.hashicorp.aws aws_organizations_organization<import_from_stmt>terrascript.resource.hashicorp.aws aws_organizations_organizational_unit<import_from_stmt>terrascript.resource.hashicorp.aws aws_organizations_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_organizations_policy_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_pinpoint_adm_channel<import_from_stmt>terrascript.resource.hashicorp.aws aws_pinpoint_apns_channel<import_from_stmt>terrascript.resource.hashicorp.aws aws_pinpoint_apns_sandbox_channel<import_from_stmt>terrascript.resource.hashicorp.aws aws_pinpoint_apns_voip_channel<import_from_stmt>terrascript.resource.hashicorp.aws aws_pinpoint_apns_voip_sandbox_channel <import_from_stmt>terrascript.resource.hashicorp.aws aws_pinpoint_app<import_from_stmt>terrascript.resource.hashicorp.aws aws_pinpoint_baidu_channel<import_from_stmt>terrascript.resource.hashicorp.aws aws_pinpoint_email_channel<import_from_stmt>terrascript.resource.hashicorp.aws aws_pinpoint_event_stream<import_from_stmt>terrascript.resource.hashicorp.aws aws_pinpoint_gcm_channel<import_from_stmt>terrascript.resource.hashicorp.aws aws_pinpoint_sms_channel<import_from_stmt>terrascript.resource.hashicorp.aws aws_placement_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_prometheus_workspace<import_from_stmt>terrascript.resource.hashicorp.aws aws_proxy_protocol_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_qldb_ledger<import_from_stmt>terrascript.resource.hashicorp.aws aws_quicksight_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_quicksight_group_membership<import_from_stmt>terrascript.resource.hashicorp.aws aws_quicksight_user<import_from_stmt>terrascript.resource.hashicorp.aws aws_ram_principal_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_ram_resource_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_ram_resource_share<import_from_stmt>terrascript.resource.hashicorp.aws aws_ram_resource_share_accepter<import_from_stmt>terrascript.resource.hashicorp.aws aws_rds_cluster<import_from_stmt>terrascript.resource.hashicorp.aws aws_rds_cluster_endpoint<import_from_stmt>terrascript.resource.hashicorp.aws aws_rds_cluster_instance<import_from_stmt>terrascript.resource.hashicorp.aws aws_rds_cluster_parameter_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_rds_cluster_role_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_rds_global_cluster<import_from_stmt>terrascript.resource.hashicorp.aws aws_redshift_cluster<import_from_stmt>terrascript.resource.hashicorp.aws aws_redshift_event_subscription<import_from_stmt>terrascript.resource.hashicorp.aws aws_redshift_parameter_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_redshift_security_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_redshift_snapshot_copy_grant<import_from_stmt>terrascript.resource.hashicorp.aws aws_redshift_snapshot_schedule<import_from_stmt>terrascript.resource.hashicorp.aws aws_redshift_snapshot_schedule_association <import_from_stmt>terrascript.resource.hashicorp.aws aws_redshift_subnet_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_resourcegroups_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_route<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_delegation_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_health_check<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_hosted_zone_dnssec<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_key_signing_key<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_query_log<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_record<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_resolver_dnssec_config<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_resolver_endpoint<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_resolver_firewall_config<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_resolver_firewall_domain_list <import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_resolver_firewall_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_resolver_firewall_rule_group <import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_resolver_firewall_rule_group_association <import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_resolver_query_log_config<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_resolver_query_log_config_association <import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_resolver_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_resolver_rule_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_vpc_association_authorization <import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_zone<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53_zone_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53recoverycontrolconfig_cluster <import_from_stmt>terrascript.resource.hashicorp.aws aws_route53recoverycontrolconfig_control_panel <import_from_stmt>terrascript.resource.hashicorp.aws aws_route53recoverycontrolconfig_routing_control <import_from_stmt>terrascript.resource.hashicorp.aws aws_route53recoverycontrolconfig_safety_rule <import_from_stmt>terrascript.resource.hashicorp.aws aws_route53recoveryreadiness_cell<import_from_stmt>terrascript.resource.hashicorp.aws aws_route53recoveryreadiness_readiness_check <import_from_stmt>terrascript.resource.hashicorp.aws aws_route53recoveryreadiness_recovery_group <import_from_stmt>terrascript.resource.hashicorp.aws aws_route53recoveryreadiness_resource_set <import_from_stmt>terrascript.resource.hashicorp.aws aws_route_table<import_from_stmt>terrascript.resource.hashicorp.aws aws_route_table_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3_access_point<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3_account_public_access_block<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3_bucket<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3_bucket_analytics_configuration<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3_bucket_inventory<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3_bucket_metric<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3_bucket_notification<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3_bucket_object<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3_bucket_ownership_controls<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3_bucket_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3_bucket_public_access_block<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3_object_copy<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3control_bucket<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3control_bucket_lifecycle_configuration <import_from_stmt>terrascript.resource.hashicorp.aws aws_s3control_bucket_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_s3outposts_endpoint<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_app<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_app_image_config<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_code_repository<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_device_fleet<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_domain<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_endpoint<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_endpoint_configuration<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_feature_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_flow_definition<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_human_task_ui<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_image<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_image_version<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_model<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_model_package_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_notebook_instance<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_notebook_instance_lifecycle_configuration <import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_user_profile<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_workforce<import_from_stmt>terrascript.resource.hashicorp.aws aws_sagemaker_workteam<import_from_stmt>terrascript.resource.hashicorp.aws aws_schemas_discoverer<import_from_stmt>terrascript.resource.hashicorp.aws aws_schemas_registry<import_from_stmt>terrascript.resource.hashicorp.aws aws_schemas_schema<import_from_stmt>terrascript.resource.hashicorp.aws aws_secretsmanager_secret<import_from_stmt>terrascript.resource.hashicorp.aws aws_secretsmanager_secret_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_secretsmanager_secret_rotation<import_from_stmt>terrascript.resource.hashicorp.aws aws_secretsmanager_secret_version<import_from_stmt>terrascript.resource.hashicorp.aws aws_security_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_security_group_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_securityhub_account<import_from_stmt>terrascript.resource.hashicorp.aws aws_securityhub_action_target<import_from_stmt>terrascript.resource.hashicorp.aws aws_securityhub_insight<import_from_stmt>terrascript.resource.hashicorp.aws aws_securityhub_invite_accepter<import_from_stmt>terrascript.resource.hashicorp.aws aws_securityhub_member<import_from_stmt>terrascript.resource.hashicorp.aws aws_securityhub_organization_admin_account <import_from_stmt>terrascript.resource.hashicorp.aws aws_securityhub_organization_configuration <import_from_stmt>terrascript.resource.hashicorp.aws aws_securityhub_product_subscription<import_from_stmt>terrascript.resource.hashicorp.aws aws_securityhub_standards_control<import_from_stmt>terrascript.resource.hashicorp.aws aws_securityhub_standards_subscription <import_from_stmt>terrascript.resource.hashicorp.aws aws_serverlessapplicationrepository_cloudformation_stack <import_from_stmt>terrascript.resource.hashicorp.aws aws_service_discovery_http_namespace<import_from_stmt>terrascript.resource.hashicorp.aws aws_service_discovery_instance<import_from_stmt>terrascript.resource.hashicorp.aws aws_service_discovery_private_dns_namespace <import_from_stmt>terrascript.resource.hashicorp.aws aws_service_discovery_public_dns_namespace <import_from_stmt>terrascript.resource.hashicorp.aws aws_service_discovery_service<import_from_stmt>terrascript.resource.hashicorp.aws aws_servicecatalog_budget_resource_association <import_from_stmt>terrascript.resource.hashicorp.aws aws_servicecatalog_constraint<import_from_stmt>terrascript.resource.hashicorp.aws aws_servicecatalog_organizations_access <import_from_stmt>terrascript.resource.hashicorp.aws aws_servicecatalog_portfolio<import_from_stmt>terrascript.resource.hashicorp.aws aws_servicecatalog_portfolio_share<import_from_stmt>terrascript.resource.hashicorp.aws aws_servicecatalog_principal_portfolio_association <import_from_stmt>terrascript.resource.hashicorp.aws aws_servicecatalog_product<import_from_stmt>terrascript.resource.hashicorp.aws aws_servicecatalog_product_portfolio_association <import_from_stmt>terrascript.resource.hashicorp.aws aws_servicecatalog_provisioned_product <import_from_stmt>terrascript.resource.hashicorp.aws aws_servicecatalog_provisioning_artifact <import_from_stmt>terrascript.resource.hashicorp.aws aws_servicecatalog_service_action<import_from_stmt>terrascript.resource.hashicorp.aws aws_servicecatalog_tag_option<import_from_stmt>terrascript.resource.hashicorp.aws aws_servicecatalog_tag_option_resource_association <import_from_stmt>terrascript.resource.hashicorp.aws aws_servicequotas_service_quota<import_from_stmt>terrascript.resource.hashicorp.aws aws_ses_active_receipt_rule_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_ses_configuration_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_ses_domain_dkim<import_from_stmt>terrascript.resource.hashicorp.aws aws_ses_domain_identity<import_from_stmt>terrascript.resource.hashicorp.aws aws_ses_domain_identity_verification<import_from_stmt>terrascript.resource.hashicorp.aws aws_ses_domain_mail_from<import_from_stmt>terrascript.resource.hashicorp.aws aws_ses_email_identity<import_from_stmt>terrascript.resource.hashicorp.aws aws_ses_event_destination<import_from_stmt>terrascript.resource.hashicorp.aws aws_ses_identity_notification_topic<import_from_stmt>terrascript.resource.hashicorp.aws aws_ses_identity_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_ses_receipt_filter<import_from_stmt>terrascript.resource.hashicorp.aws aws_ses_receipt_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_ses_receipt_rule_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_ses_template<import_from_stmt>terrascript.resource.hashicorp.aws aws_sfn_activity<import_from_stmt>terrascript.resource.hashicorp.aws aws_sfn_state_machine<import_from_stmt>terrascript.resource.hashicorp.aws aws_shield_protection<import_from_stmt>terrascript.resource.hashicorp.aws aws_shield_protection_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_signer_signing_job<import_from_stmt>terrascript.resource.hashicorp.aws aws_signer_signing_profile<import_from_stmt>terrascript.resource.hashicorp.aws aws_signer_signing_profile_permission<import_from_stmt>terrascript.resource.hashicorp.aws aws_simpledb_domain<import_from_stmt>terrascript.resource.hashicorp.aws aws_snapshot_create_volume_permission<import_from_stmt>terrascript.resource.hashicorp.aws aws_sns_platform_application<import_from_stmt>terrascript.resource.hashicorp.aws aws_sns_sms_preferences<import_from_stmt>terrascript.resource.hashicorp.aws aws_sns_topic<import_from_stmt>terrascript.resource.hashicorp.aws aws_sns_topic_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_sns_topic_subscription<import_from_stmt>terrascript.resource.hashicorp.aws aws_spot_datafeed_subscription<import_from_stmt>terrascript.resource.hashicorp.aws aws_spot_fleet_request<import_from_stmt>terrascript.resource.hashicorp.aws aws_spot_instance_request<import_from_stmt>terrascript.resource.hashicorp.aws aws_sqs_queue<import_from_stmt>terrascript.resource.hashicorp.aws aws_sqs_queue_policy<import_from_stmt>terrascript.resource.hashicorp.aws aws_ssm_activation<import_from_stmt>terrascript.resource.hashicorp.aws aws_ssm_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_ssm_document<import_from_stmt>terrascript.resource.hashicorp.aws aws_ssm_maintenance_window<import_from_stmt>terrascript.resource.hashicorp.aws aws_ssm_maintenance_window_target<import_from_stmt>terrascript.resource.hashicorp.aws aws_ssm_maintenance_window_task<import_from_stmt>terrascript.resource.hashicorp.aws aws_ssm_parameter<import_from_stmt>terrascript.resource.hashicorp.aws aws_ssm_patch_baseline<import_from_stmt>terrascript.resource.hashicorp.aws aws_ssm_patch_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_ssm_resource_data_sync<import_from_stmt>terrascript.resource.hashicorp.aws aws_ssoadmin_account_assignment<import_from_stmt>terrascript.resource.hashicorp.aws aws_ssoadmin_managed_policy_attachment <import_from_stmt>terrascript.resource.hashicorp.aws aws_ssoadmin_permission_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_ssoadmin_permission_set_inline_policy <import_from_stmt>terrascript.resource.hashicorp.aws aws_storagegateway_cache<import_from_stmt>terrascript.resource.hashicorp.aws aws_storagegateway_cached_iscsi_volume <import_from_stmt>terrascript.resource.hashicorp.aws aws_storagegateway_file_system_association <import_from_stmt>terrascript.resource.hashicorp.aws aws_storagegateway_gateway<import_from_stmt>terrascript.resource.hashicorp.aws aws_storagegateway_nfs_file_share<import_from_stmt>terrascript.resource.hashicorp.aws aws_storagegateway_smb_file_share<import_from_stmt>terrascript.resource.hashicorp.aws aws_storagegateway_stored_iscsi_volume <import_from_stmt>terrascript.resource.hashicorp.aws aws_storagegateway_tape_pool<import_from_stmt>terrascript.resource.hashicorp.aws aws_storagegateway_upload_buffer<import_from_stmt>terrascript.resource.hashicorp.aws aws_storagegateway_working_storage<import_from_stmt>terrascript.resource.hashicorp.aws aws_subnet<import_from_stmt>terrascript.resource.hashicorp.aws aws_swf_domain<import_from_stmt>terrascript.resource.hashicorp.aws aws_synthetics_canary<import_from_stmt>terrascript.resource.hashicorp.aws aws_timestreamwrite_database<import_from_stmt>terrascript.resource.hashicorp.aws aws_timestreamwrite_table<import_from_stmt>terrascript.resource.hashicorp.aws aws_transfer_access<import_from_stmt>terrascript.resource.hashicorp.aws aws_transfer_server<import_from_stmt>terrascript.resource.hashicorp.aws aws_transfer_ssh_key<import_from_stmt>terrascript.resource.hashicorp.aws aws_transfer_user<import_from_stmt>terrascript.resource.hashicorp.aws aws_volume_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpc<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpc_dhcp_options<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpc_dhcp_options_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpc_endpoint<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpc_endpoint_connection_notification <import_from_stmt>terrascript.resource.hashicorp.aws aws_vpc_endpoint_route_table_association <import_from_stmt>terrascript.resource.hashicorp.aws aws_vpc_endpoint_service<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpc_endpoint_service_allowed_principal <import_from_stmt>terrascript.resource.hashicorp.aws aws_vpc_endpoint_subnet_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpc_ipv4_cidr_block_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpc_peering_connection<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpc_peering_connection_accepter<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpc_peering_connection_options<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpn_connection<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpn_connection_route<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpn_gateway<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpn_gateway_attachment<import_from_stmt>terrascript.resource.hashicorp.aws aws_vpn_gateway_route_propagation<import_from_stmt>terrascript.resource.hashicorp.aws aws_waf_byte_match_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_waf_geo_match_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_waf_ipset<import_from_stmt>terrascript.resource.hashicorp.aws aws_waf_rate_based_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_waf_regex_match_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_waf_regex_pattern_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_waf_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_waf_rule_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_waf_size_constraint_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_waf_sql_injection_match_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_waf_web_acl<import_from_stmt>terrascript.resource.hashicorp.aws aws_waf_xss_match_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafregional_byte_match_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafregional_geo_match_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafregional_ipset<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafregional_rate_based_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafregional_regex_match_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafregional_regex_pattern_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafregional_rule<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafregional_rule_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafregional_size_constraint_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafregional_sql_injection_match_set <import_from_stmt>terrascript.resource.hashicorp.aws aws_wafregional_web_acl<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafregional_web_acl_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafregional_xss_match_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafv2_ip_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafv2_regex_pattern_set<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafv2_rule_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafv2_web_acl<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafv2_web_acl_association<import_from_stmt>terrascript.resource.hashicorp.aws aws_wafv2_web_acl_logging_configuration <import_from_stmt>terrascript.resource.hashicorp.aws aws_worklink_fleet<import_from_stmt>terrascript.resource.hashicorp.aws aws_worklink_website_certificate_authority_association <import_from_stmt>terrascript.resource.hashicorp.aws aws_workspaces_directory<import_from_stmt>terrascript.resource.hashicorp.aws aws_workspaces_ip_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_workspaces_workspace<import_from_stmt>terrascript.resource.hashicorp.aws aws_xray_encryption_config<import_from_stmt>terrascript.resource.hashicorp.aws aws_xray_group<import_from_stmt>terrascript.resource.hashicorp.aws aws_xray_sampling_rule<block_end><def_stmt>test_datasource_import <block_start><import_from_stmt>terrascript.data.hashicorp.aws aws_acm_certificate<import_from_stmt>terrascript.data.hashicorp.aws aws_acmpca_certificate<import_from_stmt>terrascript.data.hashicorp.aws aws_acmpca_certificate_authority<import_from_stmt>terrascript.data.hashicorp.aws aws_alb<import_from_stmt>terrascript.data.hashicorp.aws aws_alb_listener<import_from_stmt>terrascript.data.hashicorp.aws aws_alb_target_group<import_from_stmt>terrascript.data.hashicorp.aws aws_ami<import_from_stmt>terrascript.data.hashicorp.aws aws_ami_ids<import_from_stmt>terrascript.data.hashicorp.aws aws_api_gateway_api_key<import_from_stmt>terrascript.data.hashicorp.aws aws_api_gateway_domain_name<import_from_stmt>terrascript.data.hashicorp.aws aws_api_gateway_resource<import_from_stmt>terrascript.data.hashicorp.aws aws_api_gateway_rest_api<import_from_stmt>terrascript.data.hashicorp.aws aws_api_gateway_vpc_link<import_from_stmt>terrascript.data.hashicorp.aws aws_apigatewayv2_api<import_from_stmt>terrascript.data.hashicorp.aws aws_apigatewayv2_apis<import_from_stmt>terrascript.data.hashicorp.aws aws_appmesh_mesh<import_from_stmt>terrascript.data.hashicorp.aws aws_appmesh_virtual_service<import_from_stmt>terrascript.data.hashicorp.aws aws_arn<import_from_stmt>terrascript.data.hashicorp.aws aws_autoscaling_group<import_from_stmt>terrascript.data.hashicorp.aws aws_autoscaling_groups<import_from_stmt>terrascript.data.hashicorp.aws aws_availability_zone<import_from_stmt>terrascript.data.hashicorp.aws aws_availability_zones<import_from_stmt>terrascript.data.hashicorp.aws aws_backup_plan<import_from_stmt>terrascript.data.hashicorp.aws aws_backup_selection<import_from_stmt>terrascript.data.hashicorp.aws aws_backup_vault<import_from_stmt>terrascript.data.hashicorp.aws aws_batch_compute_environment<import_from_stmt>terrascript.data.hashicorp.aws aws_batch_job_queue<import_from_stmt>terrascript.data.hashicorp.aws aws_billing_service_account<import_from_stmt>terrascript.data.hashicorp.aws aws_caller_identity<import_from_stmt>terrascript.data.hashicorp.aws aws_canonical_user_id<import_from_stmt>terrascript.data.hashicorp.aws aws_cloudformation_export<import_from_stmt>terrascript.data.hashicorp.aws aws_cloudformation_stack<import_from_stmt>terrascript.data.hashicorp.aws aws_cloudformation_type<import_from_stmt>terrascript.data.hashicorp.aws aws_cloudfront_cache_policy<import_from_stmt>terrascript.data.hashicorp.aws aws_cloudfront_distribution<import_from_stmt>terrascript.data.hashicorp.aws aws_cloudfront_function<import_from_stmt>terrascript.data.hashicorp.aws aws_cloudfront_log_delivery_canonical_user_id <import_from_stmt>terrascript.data.hashicorp.aws aws_cloudfront_origin_request_policy<import_from_stmt>terrascript.data.hashicorp.aws aws_cloudhsm_v2_cluster<import_from_stmt>terrascript.data.hashicorp.aws aws_cloudtrail_service_account<import_from_stmt>terrascript.data.hashicorp.aws aws_cloudwatch_event_connection<import_from_stmt>terrascript.data.hashicorp.aws aws_cloudwatch_event_source<import_from_stmt>terrascript.data.hashicorp.aws aws_cloudwatch_log_group<import_from_stmt>terrascript.data.hashicorp.aws aws_cloudwatch_log_groups<import_from_stmt>terrascript.data.hashicorp.aws aws_codeartifact_authorization_token<import_from_stmt>terrascript.data.hashicorp.aws aws_codeartifact_repository_endpoint<import_from_stmt>terrascript.data.hashicorp.aws aws_codecommit_repository<import_from_stmt>terrascript.data.hashicorp.aws aws_codestarconnections_connection<import_from_stmt>terrascript.data.hashicorp.aws aws_cognito_user_pools<import_from_stmt>terrascript.data.hashicorp.aws aws_connect_contact_flow<import_from_stmt>terrascript.data.hashicorp.aws aws_connect_instance<import_from_stmt>terrascript.data.hashicorp.aws aws_cur_report_definition<import_from_stmt>terrascript.data.hashicorp.aws aws_customer_gateway<import_from_stmt>terrascript.data.hashicorp.aws aws_db_cluster_snapshot<import_from_stmt>terrascript.data.hashicorp.aws aws_db_event_categories<import_from_stmt>terrascript.data.hashicorp.aws aws_db_instance<import_from_stmt>terrascript.data.hashicorp.aws aws_db_snapshot<import_from_stmt>terrascript.data.hashicorp.aws aws_db_subnet_group<import_from_stmt>terrascript.data.hashicorp.aws aws_default_tags<import_from_stmt>terrascript.data.hashicorp.aws aws_directory_service_directory<import_from_stmt>terrascript.data.hashicorp.aws aws_docdb_engine_version<import_from_stmt>terrascript.data.hashicorp.aws aws_docdb_orderable_db_instance<import_from_stmt>terrascript.data.hashicorp.aws aws_dx_connection<import_from_stmt>terrascript.data.hashicorp.aws aws_dx_gateway<import_from_stmt>terrascript.data.hashicorp.aws aws_dx_location<import_from_stmt>terrascript.data.hashicorp.aws aws_dx_locations<import_from_stmt>terrascript.data.hashicorp.aws aws_dynamodb_table<import_from_stmt>terrascript.data.hashicorp.aws aws_ebs_default_kms_key<import_from_stmt>terrascript.data.hashicorp.aws aws_ebs_encryption_by_default<import_from_stmt>terrascript.data.hashicorp.aws aws_ebs_snapshot<import_from_stmt>terrascript.data.hashicorp.aws aws_ebs_snapshot_ids<import_from_stmt>terrascript.data.hashicorp.aws aws_ebs_volume<import_from_stmt>terrascript.data.hashicorp.aws aws_ebs_volumes<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_coip_pool<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_coip_pools<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_instance_type<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_instance_type_offering<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_instance_type_offerings<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_local_gateway<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_local_gateway_route_table<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_local_gateway_route_tables<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_local_gateway_virtual_interface<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_local_gateway_virtual_interface_group <import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_local_gateway_virtual_interface_groups <import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_local_gateways<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_managed_prefix_list<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_spot_price<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_transit_gateway<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_transit_gateway_dx_gateway_attachment <import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_transit_gateway_peering_attachment <import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_transit_gateway_route_table<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_transit_gateway_route_tables<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_transit_gateway_vpc_attachment<import_from_stmt>terrascript.data.hashicorp.aws aws_ec2_transit_gateway_vpn_attachment<import_from_stmt>terrascript.data.hashicorp.aws aws_ecr_authorization_token<import_from_stmt>terrascript.data.hashicorp.aws aws_ecr_image<import_from_stmt>terrascript.data.hashicorp.aws aws_ecr_repository<import_from_stmt>terrascript.data.hashicorp.aws aws_ecs_cluster<import_from_stmt>terrascript.data.hashicorp.aws aws_ecs_container_definition<import_from_stmt>terrascript.data.hashicorp.aws aws_ecs_service<import_from_stmt>terrascript.data.hashicorp.aws aws_ecs_task_definition<import_from_stmt>terrascript.data.hashicorp.aws aws_efs_access_point<import_from_stmt>terrascript.data.hashicorp.aws aws_efs_access_points<import_from_stmt>terrascript.data.hashicorp.aws aws_efs_file_system<import_from_stmt>terrascript.data.hashicorp.aws aws_efs_mount_target<import_from_stmt>terrascript.data.hashicorp.aws aws_eip<import_from_stmt>terrascript.data.hashicorp.aws aws_eks_addon<import_from_stmt>terrascript.data.hashicorp.aws aws_eks_cluster<import_from_stmt>terrascript.data.hashicorp.aws aws_eks_cluster_auth<import_from_stmt>terrascript.data.hashicorp.aws aws_eks_clusters<import_from_stmt>terrascript.data.hashicorp.aws aws_eks_node_group<import_from_stmt>terrascript.data.hashicorp.aws aws_eks_node_groups<import_from_stmt>terrascript.data.hashicorp.aws aws_elastic_beanstalk_application<import_from_stmt>terrascript.data.hashicorp.aws aws_elastic_beanstalk_hosted_zone<import_from_stmt>terrascript.data.hashicorp.aws aws_elastic_beanstalk_solution_stack<import_from_stmt>terrascript.data.hashicorp.aws aws_elasticache_cluster<import_from_stmt>terrascript.data.hashicorp.aws aws_elasticache_replication_group<import_from_stmt>terrascript.data.hashicorp.aws aws_elasticache_user<import_from_stmt>terrascript.data.hashicorp.aws aws_elasticsearch_domain<import_from_stmt>terrascript.data.hashicorp.aws aws_elb<import_from_stmt>terrascript.data.hashicorp.aws aws_elb_hosted_zone_id<import_from_stmt>terrascript.data.hashicorp.aws aws_elb_service_account<import_from_stmt>terrascript.data.hashicorp.aws aws_globalaccelerator_accelerator<import_from_stmt>terrascript.data.hashicorp.aws aws_glue_connection<import_from_stmt>terrascript.data.hashicorp.aws aws_glue_data_catalog_encryption_settings<import_from_stmt>terrascript.data.hashicorp.aws aws_glue_script<import_from_stmt>terrascript.data.hashicorp.aws aws_guardduty_detector<import_from_stmt>terrascript.data.hashicorp.aws aws_iam_account_alias<import_from_stmt>terrascript.data.hashicorp.aws aws_iam_group<import_from_stmt>terrascript.data.hashicorp.aws aws_iam_instance_profile<import_from_stmt>terrascript.data.hashicorp.aws aws_iam_policy<import_from_stmt>terrascript.data.hashicorp.aws aws_iam_policy_document<import_from_stmt>terrascript.data.hashicorp.aws aws_iam_role<import_from_stmt>terrascript.data.hashicorp.aws aws_iam_roles<import_from_stmt>terrascript.data.hashicorp.aws aws_iam_server_certificate<import_from_stmt>terrascript.data.hashicorp.aws aws_iam_session_context<import_from_stmt>terrascript.data.hashicorp.aws aws_iam_user<import_from_stmt>terrascript.data.hashicorp.aws aws_iam_users<import_from_stmt>terrascript.data.hashicorp.aws aws_identitystore_group<import_from_stmt>terrascript.data.hashicorp.aws aws_identitystore_user<import_from_stmt>terrascript.data.hashicorp.aws aws_imagebuilder_component<import_from_stmt>terrascript.data.hashicorp.aws aws_imagebuilder_distribution_configuration <import_from_stmt>terrascript.data.hashicorp.aws aws_imagebuilder_image<import_from_stmt>terrascript.data.hashicorp.aws aws_imagebuilder_image_pipeline<import_from_stmt>terrascript.data.hashicorp.aws aws_imagebuilder_image_recipe<import_from_stmt>terrascript.data.hashicorp.aws aws_imagebuilder_infrastructure_configuration <import_from_stmt>terrascript.data.hashicorp.aws aws_inspector_rules_packages<import_from_stmt>terrascript.data.hashicorp.aws aws_instance<import_from_stmt>terrascript.data.hashicorp.aws aws_instances<import_from_stmt>terrascript.data.hashicorp.aws aws_internet_gateway<import_from_stmt>terrascript.data.hashicorp.aws aws_iot_endpoint<import_from_stmt>terrascript.data.hashicorp.aws aws_ip_ranges<import_from_stmt>terrascript.data.hashicorp.aws aws_kinesis_stream<import_from_stmt>terrascript.data.hashicorp.aws aws_kinesis_stream_consumer<import_from_stmt>terrascript.data.hashicorp.aws aws_kms_alias<import_from_stmt>terrascript.data.hashicorp.aws aws_kms_ciphertext<import_from_stmt>terrascript.data.hashicorp.aws aws_kms_key<import_from_stmt>terrascript.data.hashicorp.aws aws_kms_public_key<import_from_stmt>terrascript.data.hashicorp.aws aws_kms_secret<import_from_stmt>terrascript.data.hashicorp.aws aws_kms_secrets<import_from_stmt>terrascript.data.hashicorp.aws aws_lakeformation_data_lake_settings<import_from_stmt>terrascript.data.hashicorp.aws aws_lakeformation_permissions<import_from_stmt>terrascript.data.hashicorp.aws aws_lakeformation_resource<import_from_stmt>terrascript.data.hashicorp.aws aws_lambda_alias<import_from_stmt>terrascript.data.hashicorp.aws aws_lambda_code_signing_config<import_from_stmt>terrascript.data.hashicorp.aws aws_lambda_function<import_from_stmt>terrascript.data.hashicorp.aws aws_lambda_invocation<import_from_stmt>terrascript.data.hashicorp.aws aws_lambda_layer_version<import_from_stmt>terrascript.data.hashicorp.aws aws_launch_configuration<import_from_stmt>terrascript.data.hashicorp.aws aws_launch_template<import_from_stmt>terrascript.data.hashicorp.aws aws_lb<import_from_stmt>terrascript.data.hashicorp.aws aws_lb_listener<import_from_stmt>terrascript.data.hashicorp.aws aws_lb_target_group<import_from_stmt>terrascript.data.hashicorp.aws aws_lex_bot<import_from_stmt>terrascript.data.hashicorp.aws aws_lex_bot_alias<import_from_stmt>terrascript.data.hashicorp.aws aws_lex_intent<import_from_stmt>terrascript.data.hashicorp.aws aws_lex_slot_type<import_from_stmt>terrascript.data.hashicorp.aws aws_mq_broker<import_from_stmt>terrascript.data.hashicorp.aws aws_msk_broker_nodes<import_from_stmt>terrascript.data.hashicorp.aws aws_msk_cluster<import_from_stmt>terrascript.data.hashicorp.aws aws_msk_configuration<import_from_stmt>terrascript.data.hashicorp.aws aws_msk_kafka_version<import_from_stmt>terrascript.data.hashicorp.aws aws_nat_gateway<import_from_stmt>terrascript.data.hashicorp.aws aws_neptune_engine_version<import_from_stmt>terrascript.data.hashicorp.aws aws_neptune_orderable_db_instance<import_from_stmt>terrascript.data.hashicorp.aws aws_network_acls<import_from_stmt>terrascript.data.hashicorp.aws aws_network_interface<import_from_stmt>terrascript.data.hashicorp.aws aws_network_interfaces<import_from_stmt>terrascript.data.hashicorp.aws aws_organizations_delegated_administrators <import_from_stmt>terrascript.data.hashicorp.aws aws_organizations_delegated_services<import_from_stmt>terrascript.data.hashicorp.aws aws_organizations_organization<import_from_stmt>terrascript.data.hashicorp.aws aws_organizations_organizational_units<import_from_stmt>terrascript.data.hashicorp.aws aws_outposts_outpost<import_from_stmt>terrascript.data.hashicorp.aws aws_outposts_outpost_instance_type<import_from_stmt>terrascript.data.hashicorp.aws aws_outposts_outpost_instance_types<import_from_stmt>terrascript.data.hashicorp.aws aws_outposts_outposts<import_from_stmt>terrascript.data.hashicorp.aws aws_outposts_site<import_from_stmt>terrascript.data.hashicorp.aws aws_outposts_sites<import_from_stmt>terrascript.data.hashicorp.aws aws_partition<import_from_stmt>terrascript.data.hashicorp.aws aws_prefix_list<import_from_stmt>terrascript.data.hashicorp.aws aws_pricing_product<import_from_stmt>terrascript.data.hashicorp.aws aws_qldb_ledger<import_from_stmt>terrascript.data.hashicorp.aws aws_ram_resource_share<import_from_stmt>terrascript.data.hashicorp.aws aws_rds_certificate<import_from_stmt>terrascript.data.hashicorp.aws aws_rds_cluster<import_from_stmt>terrascript.data.hashicorp.aws aws_rds_engine_version<import_from_stmt>terrascript.data.hashicorp.aws aws_rds_orderable_db_instance<import_from_stmt>terrascript.data.hashicorp.aws aws_redshift_cluster<import_from_stmt>terrascript.data.hashicorp.aws aws_redshift_orderable_cluster<import_from_stmt>terrascript.data.hashicorp.aws aws_redshift_service_account<import_from_stmt>terrascript.data.hashicorp.aws aws_region<import_from_stmt>terrascript.data.hashicorp.aws aws_regions<import_from_stmt>terrascript.data.hashicorp.aws aws_resourcegroupstaggingapi_resources<import_from_stmt>terrascript.data.hashicorp.aws aws_route<import_from_stmt>terrascript.data.hashicorp.aws aws_route53_delegation_set<import_from_stmt>terrascript.data.hashicorp.aws aws_route53_resolver_endpoint<import_from_stmt>terrascript.data.hashicorp.aws aws_route53_resolver_rule<import_from_stmt>terrascript.data.hashicorp.aws aws_route53_resolver_rules<import_from_stmt>terrascript.data.hashicorp.aws aws_route53_zone<import_from_stmt>terrascript.data.hashicorp.aws aws_route_table<import_from_stmt>terrascript.data.hashicorp.aws aws_route_tables<import_from_stmt>terrascript.data.hashicorp.aws aws_s3_bucket<import_from_stmt>terrascript.data.hashicorp.aws aws_s3_bucket_object<import_from_stmt>terrascript.data.hashicorp.aws aws_s3_bucket_objects<import_from_stmt>terrascript.data.hashicorp.aws aws_sagemaker_prebuilt_ecr_image<import_from_stmt>terrascript.data.hashicorp.aws aws_secretsmanager_secret<import_from_stmt>terrascript.data.hashicorp.aws aws_secretsmanager_secret_rotation<import_from_stmt>terrascript.data.hashicorp.aws aws_secretsmanager_secret_version<import_from_stmt>terrascript.data.hashicorp.aws aws_security_group<import_from_stmt>terrascript.data.hashicorp.aws aws_security_groups<import_from_stmt>terrascript.data.hashicorp.aws aws_serverlessapplicationrepository_application <import_from_stmt>terrascript.data.hashicorp.aws aws_service_discovery_dns_namespace<import_from_stmt>terrascript.data.hashicorp.aws aws_servicecatalog_constraint<import_from_stmt>terrascript.data.hashicorp.aws aws_servicecatalog_launch_paths<import_from_stmt>terrascript.data.hashicorp.aws aws_servicecatalog_portfolio<import_from_stmt>terrascript.data.hashicorp.aws aws_servicecatalog_portfolio_constraints<import_from_stmt>terrascript.data.hashicorp.aws aws_servicecatalog_product<import_from_stmt>terrascript.data.hashicorp.aws aws_servicequotas_service<import_from_stmt>terrascript.data.hashicorp.aws aws_servicequotas_service_quota<import_from_stmt>terrascript.data.hashicorp.aws aws_sfn_activity<import_from_stmt>terrascript.data.hashicorp.aws aws_sfn_state_machine<import_from_stmt>terrascript.data.hashicorp.aws aws_signer_signing_job<import_from_stmt>terrascript.data.hashicorp.aws aws_signer_signing_profile<import_from_stmt>terrascript.data.hashicorp.aws aws_sns_topic<import_from_stmt>terrascript.data.hashicorp.aws aws_sqs_queue<import_from_stmt>terrascript.data.hashicorp.aws aws_ssm_document<import_from_stmt>terrascript.data.hashicorp.aws aws_ssm_parameter<import_from_stmt>terrascript.data.hashicorp.aws aws_ssm_patch_baseline<import_from_stmt>terrascript.data.hashicorp.aws aws_ssoadmin_instances<import_from_stmt>terrascript.data.hashicorp.aws aws_ssoadmin_permission_set<import_from_stmt>terrascript.data.hashicorp.aws aws_storagegateway_local_disk<import_from_stmt>terrascript.data.hashicorp.aws aws_subnet<import_from_stmt>terrascript.data.hashicorp.aws aws_subnet_ids<import_from_stmt>terrascript.data.hashicorp.aws aws_subnets<import_from_stmt>terrascript.data.hashicorp.aws aws_transfer_server<import_from_stmt>terrascript.data.hashicorp.aws aws_vpc<import_from_stmt>terrascript.data.hashicorp.aws aws_vpc_dhcp_options<import_from_stmt>terrascript.data.hashicorp.aws aws_vpc_endpoint<import_from_stmt>terrascript.data.hashicorp.aws aws_vpc_endpoint_service<import_from_stmt>terrascript.data.hashicorp.aws aws_vpc_peering_connection<import_from_stmt>terrascript.data.hashicorp.aws aws_vpc_peering_connections<import_from_stmt>terrascript.data.hashicorp.aws aws_vpcs<import_from_stmt>terrascript.data.hashicorp.aws aws_vpn_gateway<import_from_stmt>terrascript.data.hashicorp.aws aws_waf_ipset<import_from_stmt>terrascript.data.hashicorp.aws aws_waf_rate_based_rule<import_from_stmt>terrascript.data.hashicorp.aws aws_waf_rule<import_from_stmt>terrascript.data.hashicorp.aws aws_waf_web_acl<import_from_stmt>terrascript.data.hashicorp.aws aws_wafregional_ipset<import_from_stmt>terrascript.data.hashicorp.aws aws_wafregional_rate_based_rule<import_from_stmt>terrascript.data.hashicorp.aws aws_wafregional_rule<import_from_stmt>terrascript.data.hashicorp.aws aws_wafregional_web_acl<import_from_stmt>terrascript.data.hashicorp.aws aws_wafv2_ip_set<import_from_stmt>terrascript.data.hashicorp.aws aws_wafv2_regex_pattern_set<import_from_stmt>terrascript.data.hashicorp.aws aws_wafv2_rule_group<import_from_stmt>terrascript.data.hashicorp.aws aws_wafv2_web_acl<import_from_stmt>terrascript.data.hashicorp.aws aws_workspaces_bundle<import_from_stmt>terrascript.data.hashicorp.aws aws_workspaces_directory<import_from_stmt>terrascript.data.hashicorp.aws aws_workspaces_image<import_from_stmt>terrascript.data.hashicorp.aws aws_workspaces_workspace<block_end># TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.hashicorp.aws
#
# t = terrascript.provider.hashicorp.aws.aws()
# s = str(t)
#
# assert 'https://github.com/hashicorp/terraform-provider-aws' in s
# assert '3.60.0' in s
|
<import_from_stmt>..files ObjectReader<import_from_stmt>..streams EndianBinaryWriter<import_from_stmt>..helpers ImportHelper<import_from_stmt>.. files<import_from_stmt>..enums FileType ClassIDType<import_stmt>os<import_from_stmt>.. environment<def_stmt>save_ptr obj writer:EndianBinaryWriter<block_start><if_stmt>isinstance(obj PPtr)<block_start>writer.write_int(obj.file_id)<block_end><else_stmt><block_start>writer.write_int(0)# it's usually 0......
<block_end><if_stmt>obj._version<l>14<block_start>writer.write_int(obj.path_id)<block_end><else_stmt><block_start>writer.write_long(obj.path_id)<block_end><block_end>cached_managers=dict()<class_stmt>PPtr<block_start><def_stmt>__init__ self reader:ObjectReader<block_start>self._version=reader.version2<line_sep>self.index=-2<line_sep>self.file_id=reader.read_int()<line_sep>self.path_id=reader.read_int()<if>self._version<l>14<else>reader.read_long()<line_sep>self.assets_file=reader.assets_file<line_sep>self._obj=<none><block_end><def_stmt>save self writer:EndianBinaryWriter<block_start>save_ptr(self writer)<block_end><def_stmt>get_obj self<block_start><if_stmt>self._obj<ne><none><block_start><return>self._obj<block_end>manager=<none><if_stmt>self.file_id<eq>0<block_start>manager=self.assets_file<block_end><elif_stmt>self.file_id<g>0<and>self.file_id-1<l>len(self.assets_file.externals)<block_start><if_stmt>self.index<eq>-2<block_start>external_name=self.assets_file.externals[self.file_id-1].name<line_sep>parent=self.assets_file.parent<if_stmt>parent<is><not><none><block_start><if_stmt>external_name<in>parent.files<block_start>manager=parent.files[external_name]<block_end><elif_stmt>external_name.upper()<in>parent.files<block_start>manager=parent.files[external_name.upper()]<block_end><else_stmt><block_start><while_stmt><not>isinstance(parent environment.Environment)<block_start>parent=parent.parent<block_end><if_stmt>parent.path<block_start>path=parent.path<line_sep>files=os.listdir(path)<if_stmt>external_name<in>files<block_start>parent.load_files([os.path.join(path external_name)])<line_sep>manager=parent.files[external_name]<block_end><block_end><block_end><block_end><else_stmt><block_start><if_stmt>external_name<not><in>cached_managers<block_start>typ,reader=ImportHelper.check_file_type(external_name)<if_stmt>typ<eq>FileType.AssetsFile<block_start>cached_managers[external_name]=files.SerializedFile(reader)<block_end><block_end><if_stmt>external_name<in>cached_managers<block_start>manager=cached_managers[external_name]<block_end><block_end><block_end><block_end><if_stmt>manager<and>self.path_id<in>manager.objects<block_start>self._obj=manager.objects[self.path_id]<block_end><else_stmt><block_start>self._obj=<none><block_end><return>self._obj<block_end><def_stmt>__getattr__ self key<block_start>obj=self.get_obj()<if_stmt>obj<is><none><block_start><if_stmt>key<eq>"type"<block_start><return>ClassIDType.UnknownType<block_end><raise>AttributeError(key)<block_end><return>getattr(obj key)<block_end><def_stmt>__repr__ self<block_start><return>"<%s %s>"%(self.__class__.__name__ self._obj.__class__.__repr__(self.get_obj())<if>self.get_obj()<else>"Not Found")<block_end><def_stmt>__bool__ self<block_start><return><true><if>self.get_obj()<else><false><block_end><block_end> |
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>StringIO<import_stmt>apsw<import_stmt>pytest<import_stmt>struct<import_stmt>bayeslite<import_stmt>bayeslite.ast<as>ast<import_stmt>bayeslite.compiler<as>compiler<import_stmt>bayeslite.core<as>core<import_stmt>bayeslite.guess<as>guess<import_stmt>bayeslite.backends.troll_rng<as>troll<import_stmt>bayeslite.parse<as>parse<import_from_stmt>bayeslite.exception BQLError<import_from_stmt>bayeslite.math_util relerr<import_from_stmt>bayeslite.backends.cgpm_backend CGPM_Backend<import_from_stmt>bayeslite.util cursor_value<import_stmt>test_core<import_stmt>test_csv<import_from_stmt>stochastic stochastic<def_stmt>bql2sql string setup=<none><block_start><with_stmt>bayeslite.bayesdb_open(':memory:')<as>bdb<block_start>test_core.t1_schema(bdb)<line_sep>test_core.t1_data(bdb)<line_sep>bdb.execute('''
create population p1 for t1 (
id ignore;
label nominal;
age numerical;
weight numerical
)
''')<if_stmt>setup<is><not><none><block_start>setup(bdb)<block_end>phrases=parse.parse_bql_string(string)<line_sep>out=compiler.Output(0 {} ())<for_stmt>phrase phrases<block_start><assert_stmt>ast.is_query(phrase)<line_sep>compiler.compile_query(bdb phrase out)<line_sep>out.write(';')<block_end><return>out.getvalue()<block_end><block_end># XXX Kludgey mess. Please reorganize.
<def_stmt>bql2sqlparam string<block_start><with_stmt>bayeslite.bayesdb_open(':memory:')<as>bdb<block_start>test_core.t1_schema(bdb)<line_sep>test_core.t1_data(bdb)<line_sep>bdb.execute('''
create population p1 for t1 (
id ignore;
label nominal;
age numerical;
weight numerical
)
''')<line_sep>phrases=parse.parse_bql_string(string)<line_sep>out0=StringIO.StringIO()<for_stmt>phrase phrases<block_start>out=<none><if_stmt>isinstance(phrase ast.Parametrized)<block_start>bindings=(<none> )<times>phrase.n_numpar<line_sep>out=compiler.Output(phrase.n_numpar phrase.nampar_map bindings)<line_sep>phrase=phrase.phrase<block_end><else_stmt><block_start>out=StringIO.StringIO()<block_end><assert_stmt>ast.is_query(phrase)<line_sep>compiler.compile_query(bdb phrase out)<line_sep># XXX Do something about the parameters.
out0.write(out.getvalue())<line_sep>out0.write(';')<block_end><return>out0.getvalue()<block_end><block_end><def_stmt>bql_execute bdb string bindings=()<block_start><return>map(tuple bdb.execute(string bindings))<block_end><def_stmt>empty cursor<block_start><assert_stmt>cursor<is><not><none><assert_stmt>cursor.description<is><not><none><assert_stmt>len(cursor.description)<eq>0<with_stmt>pytest.raises(StopIteration)<block_start>cursor.next()<block_end><block_end><def_stmt>test_trivial_population <block_start><with_stmt>test_csv.bayesdb_csv_file(test_csv.csv_data)<as>(bdb fname)<block_start><with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true>)<block_end># XXX if (not) exists
bdb.execute('''
create population p for t (
guess stattypes of (*);
age numerical
)
''')<line_sep>bdb.execute('drop population p')<block_end><block_end><def_stmt>test_population_invalid_numerical <block_start><with_stmt>test_csv.bayesdb_csv_file(test_csv.csv_data)<as>(bdb fname)<block_start><with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true>)<block_end><with_stmt>pytest.raises(BQLError)<block_start>bdb.execute('''
create population p for t (
guess stattypes of (*);
gender numerical
)
''')<block_end><block_end><block_end><def_stmt>test_population_invalid_numerical_alterpop_addvar <block_start><with_stmt>test_csv.bayesdb_csv_file(test_csv.csv_data)<as>(bdb fname)<block_start><with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true>)<block_end>bdb.execute('''
create population p for t (
guess stattypes of (*);
ignore gender
)
''')<with_stmt>pytest.raises(BQLError)<block_start>bdb.execute('alter population p add variable gender numerical')<block_end>bdb.execute('drop population p')<block_end><block_end><def_stmt>test_population_invalid_numerical_alterpop_stattype <block_start><with_stmt>test_csv.bayesdb_csv_file(test_csv.csv_data)<as>(bdb fname)<block_start><with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true>)<block_end>bdb.execute('''
create population p for t (
guess stattypes of (*);
gender nominal
)
''')<with_stmt>pytest.raises(BQLError)<block_start>bdb.execute('''
alter population p set stattype of gender to numerical
''')<block_end>bdb.execute('drop population p')<block_end><block_end><def_stmt>test_similarity_identity <block_start><with_stmt>test_core.t1()<as>(bdb population_id _generator_id)<block_start>bdb.execute('initialize 6 models for p1_cc;')<line_sep>rowids=bdb.sql_execute('select rowid from t1')<for_stmt>rowid rowids<block_start>c=bdb.execute('''
estimate similarity of (rowid=?) to (rowid=?)
in the context of age by p1
''' (rowid[0] rowid[0])).fetchall()<assert_stmt>len(c)<eq>1<assert_stmt>c[0][0]<eq>1<block_end><block_end><block_end><def_stmt>test_predictive_relevance <block_start><assert_stmt>bql2sql('''
estimate predictive relevance
of (label = 'Uganda')
to existing rows (rowid < 4)
and hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'Europe', "weight" = 7)
)
in the context of "weight"
by p1
''')<eq>'SELECT bql_row_predictive_relevance(1, NULL, NULL, '<concat>'(SELECT _rowid_ FROM "t1" WHERE ("label" = \'Uganda\')), '<concat>'\'[1, 2, 3]\', 3, '<concat>'2, 82, 3, 14, NULL, 2, 74, 1, \'Europe\', 3, 7, NULL);'<assert_stmt>bql2sql('''
estimate predictive relevance
of (label = 'mumble')
to existing rows (label = 'frotz' or age <= 4)
in the context of "label"
by p1
''')<eq>'SELECT bql_row_predictive_relevance(1, NULL, NULL, '<concat>'(SELECT _rowid_ FROM "t1" WHERE ("label" = \'mumble\')), '<concat>'\'[5, 8]\', 1);'<assert_stmt>bql2sql('''
estimate label,
predictive relevance
to hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'hunf', "weight" = 7)
)
in the context of "age",
_rowid_ + 1
from p1
''')<eq>'SELECT "label", bql_row_predictive_relevance(1, NULL, NULL, _rowid_, '<concat>'\'[]\', 2, 2, 82, 3, 14, NULL, 2, 74, 1, \'hunf\', 3, 7, NULL), '<concat>'("_rowid_" + 1) FROM "t1";'<line_sep># No matching rows should still compile.
<assert_stmt>bql2sql('''
estimate label,
predictive relevance to existing rows (rowid < 0)
in the context of "age"
from p1
''')<eq>'SELECT "label", bql_row_predictive_relevance(1, NULL, NULL, _rowid_, '<concat>'\'[]\', 2) FROM "t1";'<line_sep># When using `BY`, require OF to be specified.
<with_stmt>pytest.raises(BQLError)<block_start>bql2sql('''
estimate predictive relevance
to hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'Europe', "weight" = 7)
)
in the context of "age"
by p1
''')<block_end># When using `FROM`, require OF to be unspecified.
<with_stmt>pytest.raises(BQLError)<block_start>bql2sql('''
estimate predictive relevance
of (name = 'mansour')
to hypothetical rows with values (
("age" = 82, "weight" = 14)
)
in the context of "age"
from p1
''')<block_end><assert_stmt>bql2sql('''
estimate label from p1
where
(predictive relevance to existing rows (label = 'quux' and age < 5)
in the context of "weight") > 1
order by
predictive relevance
to hypothetical rows with values ((label='zot'))
in the context of "age"
''')<eq>'SELECT "label" FROM "t1" WHERE '<concat>'(bql_row_predictive_relevance(1, NULL, NULL, '<concat>'_rowid_, \'[5]\', 3) > 1) '<concat>'ORDER BY bql_row_predictive_relevance(1, NULL, NULL, '<concat>'_rowid_, \'[]\', 2, 1, \'zot\', NULL);'<block_end>@stochastic(max_runs=2 min_passes=1)<def_stmt>test_conditional_probability seed<block_start><with_stmt>test_core.t1(seed=seed)<as>(bdb _population_id _generator_id)<block_start>bdb.execute('drop generator p1_cc')<line_sep>bdb.execute('drop population p1')<line_sep>bdb.execute('''
create population p1 for t1 (
ignore id, label;
set stattype of age to numerical;
set stattype of weight to numerical
)
''')<line_sep>bdb.execute('''
create generator p1_cond_prob_cc for p1;
''')<line_sep>bdb.execute('initialize 1 model for p1_cond_prob_cc')<line_sep>bdb.execute('alter generator p1_cond_prob_cc '<concat>'ensure variables * dependent')<line_sep>bdb.execute('analyze p1_cond_prob_cc for 1 iteration')<line_sep>q0='estimate probability density of age = 8 by p1'<line_sep>q1='estimate probability density of age = 8 given () by p1'<line_sep>age_is_8=bdb.execute(q0).fetchvalue()<assert_stmt>age_is_8<eq>bdb.execute(q1).fetchvalue()<line_sep>q2='estimate probability density of age = 8 given (weight = 16)'<concat>' by p1'<line_sep>age_is_8_given_weight_is_16=bdb.execute(q2).fetchvalue()<assert_stmt>age_is_8<l>age_is_8_given_weight_is_16<line_sep>probs=bdb.execute('estimate probability density of value 8 given (weight = 16)'<concat>' from columns of p1 where v.name != \'weight\'').fetchall()<assert_stmt>[(age_is_8_given_weight_is_16 )]<eq>probs<block_end><block_end>@stochastic(max_runs=2 min_passes=1)<def_stmt>test_joint_probability seed<block_start><with_stmt>test_core.t1(seed=seed)<as>(bdb _population_id _generator_id)<block_start>bdb.execute('initialize 10 models for p1_cc')<line_sep>bdb.execute('analyze p1_cc for 10 iterations')<line_sep>q0='estimate probability density of age = 8 by p1'<line_sep>q1='estimate probability density of (age = 8) by p1'<assert_stmt>bdb.execute(q0).fetchvalue()<eq>bdb.execute(q1).fetchvalue()<line_sep>q1='estimate probability density of (age = 8) given () by p1'<assert_stmt>bdb.execute(q0).fetchvalue()<eq>bdb.execute(q1).fetchvalue()<line_sep>q2='estimate probability density of age = 8 given (weight = 16)'<concat>' by p1'<assert_stmt>bdb.execute(q0).fetchvalue()<l>bdb.execute(q2).fetchvalue()<line_sep>q0='estimate probability density of age = 8 by p1'<line_sep>q1='estimate probability density of (age = 8, weight = 16) by p1'<assert_stmt>bdb.execute(q1).fetchvalue()<l>bdb.execute(q0).fetchvalue()<line_sep>q2='estimate probability density of (age = 8, weight = 16)'<concat>" given (label = 'mumble') by p1"<assert_stmt>bdb.execute(q1).fetchvalue()<l>bdb.execute(q2).fetchvalue()<block_end><block_end><def_stmt>test_badbql <block_start><with_stmt>test_core.t1()<as>(bdb _population_id _generator_id)<block_start><with_stmt>pytest.raises(ValueError)<block_start>bdb.execute('')<block_end><with_stmt>pytest.raises(ValueError)<block_start>bdb.execute(';')<block_end><with_stmt>pytest.raises(ValueError)<block_start>bdb.execute('select 0; select 1')<block_end><block_end><block_end><def_stmt>test_select_trivial <block_start><assert_stmt>bql2sql('select null;')<eq>'SELECT NULL;'<assert_stmt>bql2sql("select 'x';")<eq>"SELECT 'x';"<assert_stmt>bql2sql("select 'x''y';")<eq>"SELECT 'x''y';"<assert_stmt>bql2sql('select "x";')<eq>'SELECT "x";'<assert_stmt>bql2sql('select "x""y";')<eq>'SELECT "x""y";'<assert_stmt>bql2sql('select 0;')<eq>'SELECT 0;'<assert_stmt>bql2sql('select 0.;')<eq>'SELECT 0.0;'<assert_stmt>bql2sql('select .0;')<eq>'SELECT 0.0;'<assert_stmt>bql2sql('select 0.0;')<eq>'SELECT 0.0;'<assert_stmt>bql2sql('select 1e0;')<eq>'SELECT 1.0;'<assert_stmt>bql2sql('select 1e+1;')<eq>'SELECT 10.0;'<assert_stmt>bql2sql('select 1e-1;')<eq>'SELECT 0.1;'<assert_stmt>bql2sql('select -1e+1;')<eq>'SELECT (- 10.0);'<assert_stmt>bql2sql('select +1e-1;')<eq>'SELECT (+ 0.1);'<assert_stmt>bql2sql('select SQRT(1-EXP(-2*value)) FROM bm_mi;')<eq>'SELECT "SQRT"((1 - "EXP"(((- 2) * "value")))) FROM "bm_mi";'<assert_stmt>bql2sql('select .1e0;')<eq>'SELECT 0.1;'<assert_stmt>bql2sql('select 1.e10;')<eq>'SELECT 10000000000.0;'<assert_stmt>bql2sql('select all 0;')<eq>'SELECT 0;'<assert_stmt>bql2sql('select distinct 0;')<eq>'SELECT DISTINCT 0;'<assert_stmt>bql2sql('select 0 as z;')<eq>'SELECT 0 AS "z";'<assert_stmt>bql2sql('select * from t;')<eq>'SELECT * FROM "t";'<assert_stmt>bql2sql('select t.* from t;')<eq>'SELECT "t".* FROM "t";'<assert_stmt>bql2sql('select c from t;')<eq>'SELECT "c" FROM "t";'<assert_stmt>bql2sql('select c as d from t;')<eq>'SELECT "c" AS "d" FROM "t";'<assert_stmt>bql2sql('select t.c as d from t;')<eq>'SELECT "t"."c" AS "d" FROM "t";'<assert_stmt>bql2sql('select t.c as d, p as q, x from t;')<eq>'SELECT "t"."c" AS "d", "p" AS "q", "x" FROM "t";'<assert_stmt>bql2sql('select * from t, u;')<eq>'SELECT * FROM "t", "u";'<assert_stmt>bql2sql('select * from t as u;')<eq>'SELECT * FROM "t" AS "u";'<assert_stmt>bql2sql('select * from (select 0);')<eq>'SELECT * FROM (SELECT 0);'<assert_stmt>bql2sql('select t.c from (select d as c from u) as t;')<eq>'SELECT "t"."c" FROM (SELECT "d" AS "c" FROM "u") AS "t";'<assert_stmt>bql2sql('select * where x;')<eq>'SELECT * WHERE "x";'<assert_stmt>bql2sql('select * from t where x;')<eq>'SELECT * FROM "t" WHERE "x";'<assert_stmt>bql2sql('select * group by x;')<eq>'SELECT * GROUP BY "x";'<assert_stmt>bql2sql('select * from t where x group by y;')<eq>'SELECT * FROM "t" WHERE "x" GROUP BY "y";'<assert_stmt>bql2sql('select * from t where x group by y, z;')<eq>'SELECT * FROM "t" WHERE "x" GROUP BY "y", "z";'<assert_stmt>bql2sql('select * from t where x group by y having sum(z) < 1')<eq>'SELECT * FROM "t" WHERE "x" GROUP BY "y" HAVING ("sum"("z") < 1);'<assert_stmt>bql2sql('select * order by x;')<eq>'SELECT * ORDER BY "x";'<assert_stmt>bql2sql('select * order by x asc;')<eq>'SELECT * ORDER BY "x";'<assert_stmt>bql2sql('select * order by x desc;')<eq>'SELECT * ORDER BY "x" DESC;'<assert_stmt>bql2sql('select * order by x, y;')<eq>'SELECT * ORDER BY "x", "y";'<assert_stmt>bql2sql('select * order by x desc, y;')<eq>'SELECT * ORDER BY "x" DESC, "y";'<assert_stmt>bql2sql('select * order by x, y asc;')<eq>'SELECT * ORDER BY "x", "y";'<assert_stmt>bql2sql('select * limit 32;')<eq>'SELECT * LIMIT 32;'<assert_stmt>bql2sql('select * limit 32 offset 16;')<eq>'SELECT * LIMIT 32 OFFSET 16;'<assert_stmt>bql2sql('select * limit 16, 32;')<eq>'SELECT * LIMIT 32 OFFSET 16;'<assert_stmt>bql2sql('select (select0);')<eq>'SELECT "select0";'<assert_stmt>bql2sql('select (select 0);')<eq>'SELECT (SELECT 0);'<assert_stmt>bql2sql('select f(f(), f(x), y);')<eq>'SELECT "f"("f"(), "f"("x"), "y");'<assert_stmt>bql2sql('select a and b or c or not d is e is not f like j;')<eq>'SELECT ((("a" AND "b") OR "c") OR'+' (NOT ((("d" IS "e") IS NOT "f") LIKE "j")));'<assert_stmt>bql2sql('select a like b not like c like d escape e;')<eq>'SELECT ((("a" LIKE "b") NOT LIKE "c") LIKE "d" ESCAPE "e");'<assert_stmt>bql2sql('select a like b escape c glob d not glob e;')<eq>'SELECT ((("a" LIKE "b" ESCAPE "c") GLOB "d") NOT GLOB "e");'<assert_stmt>bql2sql('select a not glob b glob c escape d;')<eq>'SELECT (("a" NOT GLOB "b") GLOB "c" ESCAPE "d");'<assert_stmt>bql2sql('select a glob b escape c regexp e not regexp f;')<eq>'SELECT ((("a" GLOB "b" ESCAPE "c") REGEXP "e") NOT REGEXP "f");'<assert_stmt>bql2sql('select a not regexp b regexp c escape d;')<eq>'SELECT (("a" NOT REGEXP "b") REGEXP "c" ESCAPE "d");'<assert_stmt>bql2sql('select a regexp b escape c not regexp d escape e;')<eq>'SELECT (("a" REGEXP "b" ESCAPE "c") NOT REGEXP "d" ESCAPE "e");'<assert_stmt>bql2sql('select a not regexp b escape c match e not match f;')<eq>'SELECT ((("a" NOT REGEXP "b" ESCAPE "c") MATCH "e") NOT MATCH "f");'<assert_stmt>bql2sql('select a not match b match c escape d;')<eq>'SELECT (("a" NOT MATCH "b") MATCH "c" ESCAPE "d");'<assert_stmt>bql2sql('select a match b escape c not match d escape e;')<eq>'SELECT (("a" MATCH "b" ESCAPE "c") NOT MATCH "d" ESCAPE "e");'<assert_stmt>bql2sql('select a not match b escape c between d and e;')<eq>'SELECT (("a" NOT MATCH "b" ESCAPE "c") BETWEEN "d" AND "e");'<assert_stmt>bql2sql('select a between b and c and d;')<eq>'SELECT (("a" BETWEEN "b" AND "c") AND "d");'<assert_stmt>bql2sql('select a like b like c escape d between e and f;')<eq>'SELECT ((("a" LIKE "b") LIKE "c" ESCAPE "d") BETWEEN "e" AND "f");'<assert_stmt>bql2sql('select a between b and c not between d and e;')<eq>'SELECT (("a" BETWEEN "b" AND "c") NOT BETWEEN "d" AND "e");'<assert_stmt>bql2sql('select a not between b and c in (select f);')<eq>'SELECT (("a" NOT BETWEEN "b" AND "c") IN (SELECT "f"));'<assert_stmt>bql2sql('select a in (select b) and c not in (select d);')<eq>'SELECT (("a" IN (SELECT "b")) AND ("c" NOT IN (SELECT "d")));'<assert_stmt>bql2sql("select a in (1 + 2, '3') and b not in (select c);")<eq>'SELECT (("a" IN ((1 + 2), \'3\')) AND ("b" NOT IN (SELECT "c")));'<assert_stmt>bql2sql('select a in (select b) isnull notnull!=c<>d<e<=f>g;')<eq>'SELECT ((((("a" IN (SELECT "b")) ISNULL) NOTNULL) != "c") !='+' ((("d" < "e") <= "f") > "g"));'<assert_stmt>bql2sql('select a>b>=c<<d>>e&f|g+h-i*j/k;')<eq>'SELECT (("a" > "b") >= (((("c" << "d") >> "e") & "f") |'+' (("g" + "h") - (("i" * "j") / "k"))));'<assert_stmt>bql2sql('select a/b%c||~~d collate e collate\'f\'||1;')<eq>'SELECT (("a" / "b") % (("c" || (((~ (~ "d")) COLLATE "e")'+' COLLATE "f")) || 1));'<assert_stmt>bql2sql('select cast(f(x) as binary blob);')<eq>'SELECT CAST("f"("x") AS "binary" "blob");'<assert_stmt>bql2sql('select cast(42 as varint(73));')<eq>'SELECT CAST(42 AS "varint"(73));'<assert_stmt>bql2sql('select cast(f(x, y, z) as varchar(12 ,34));')<eq>'SELECT CAST("f"("x", "y", "z") AS "varchar"(12, 34));'<assert_stmt>bql2sql('select exists (select a) and not exists (select b);')<eq>'SELECT (EXISTS (SELECT "a") AND (NOT EXISTS (SELECT "b")));'<assert_stmt>bql2sql('select case when a - b then c else d end from t;')<eq>'SELECT CASE WHEN ("a" - "b") THEN "c" ELSE "d" END FROM "t";'<assert_stmt>bql2sql('select case f(a) when b + c then d else e end from t;')<eq>'SELECT CASE "f"("a") WHEN ("b" + "c") THEN "d" ELSE "e" END FROM "t";'<block_end><def_stmt>test_estimate_bql # PREDICTIVE PROBABILITY
<block_start><assert_stmt>bql2sql('estimate predictive probability of weight from p1;')<eq>'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '<concat>'\'[3]\', \'[]\')'<concat>' FROM "t1";'<assert_stmt>bql2sql('estimate predictive probability of (age, weight) '<concat>'from p1;')<eq>'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '<concat>'\'[2, 3]\', \'[]\')'<concat>' FROM "t1";'<assert_stmt>bql2sql('estimate predictive probability of (age, weight) given '<concat>'(label) from p1;')<eq>'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '<concat>'\'[2, 3]\', \'[1]\')'<concat>' FROM "t1";'<assert_stmt>bql2sql('estimate predictive probability of (*) from p1;')<eq>'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '<concat>'\'[1, 2, 3]\', \'[]\')'<concat>' FROM "t1";'<assert_stmt>bql2sql('estimate predictive probability of (*) given (age, weight) '<concat>'from p1;')<eq>'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '<concat>'\'[1]\', \'[2, 3]\')'<concat>' FROM "t1";'<assert_stmt>bql2sql('estimate predictive probability of age given (*) '<concat>'from p1;')<eq>'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '<concat>'\'[2]\', \'[1, 3]\')'<concat>' FROM "t1";'<assert_stmt>bql2sql('estimate label, predictive probability of weight'<concat>' from p1;')<eq>'SELECT "label", '<concat>'bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '<concat>'\'[3]\', \'[]\')'<concat>' FROM "t1";'<assert_stmt>bql2sql('estimate predictive probability of weight, label'<concat>' from p1;')<eq>'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '<concat>'\'[3]\', \'[]\'),'<concat>' "label"'<concat>' FROM "t1";'<assert_stmt>bql2sql('estimate predictive probability of weight + 1'<concat>' from p1;')<eq>'SELECT (bql_row_column_predictive_probability(1, NULL, NULL, '<concat>'_rowid_, \'[3]\', \'[]\') + 1)'<concat>' FROM "t1";'<assert_stmt>bql2sql('estimate predictive probability of weight given (*) + 1'<concat>' from p1;')<eq>'SELECT (bql_row_column_predictive_probability(1, NULL, NULL, '<concat>'_rowid_, \'[3]\', \'[1, 2]\') + 1)'<concat>' FROM "t1";'<line_sep># PREDICTIVE PROBABILITY parse and compilation errors.
<with_stmt>pytest.raises(parse.BQLParseError)# Need a table.
<block_start>bql2sql('estimate predictive probability of weight;')<block_end><with_stmt>pytest.raises(parse.BQLParseError)# Need at most one generator.
<block_start>bql2sql('estimate predictive probability of weight'<concat>' from p1, p1;')<block_end><with_stmt>pytest.raises(parse.BQLParseError)# Need a generator name, not a subquery.
<block_start>bql2sql('estimate predictive probability of weight'<concat>' from (select 0);')<block_end><with_stmt>pytest.raises(parse.BQLParseError)# Need a column.
<block_start>bql2sql('estimate predictive probability from p1;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Using (*) in both targets and constraints.
<block_start>bql2sql('estimate predictive probability of (*) given (*) from p1;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Using (weight, *) in targets.
<block_start>bql2sql('estimate predictive probability of (weight, *) given (age) '<concat>'from p1;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Using (age, *) in constraints.
<block_start>bql2sql('estimate predictive probability of weight given (*, age) '<concat>'from p1;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Using duplicate column age.
<block_start>bql2sql('estimate predictive probability of age given (weight, age) '<concat>'from p1;')<block_end># PROBABILITY DENISTY.
<assert_stmt>bql2sql('estimate probability density of weight = 20 from p1;')<eq>'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20) FROM "t1";'<assert_stmt>bql2sql('estimate probability density of weight = 20'<concat>' given (age = 8)'<concat>' from p1;')<eq>'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, NULL, 2, 8) FROM "t1";'<assert_stmt>bql2sql('estimate probability density of (weight = 20, age = 8)'<concat>' from p1;')<eq>'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, 2, 8) FROM "t1";'<assert_stmt>bql2sql('estimate probability density of (weight = 20, age = 8)'<concat>" given (label = 'mumble') from p1;")<eq>"SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, 2, 8, NULL, 1, 'mumble')"<concat>' FROM "t1";'<assert_stmt>bql2sql('estimate probability density of weight = (c + 1)'<concat>' from p1;')<eq>'SELECT bql_pdf_joint(1, NULL, NULL, 3, ("c" + 1)) FROM "t1";'<assert_stmt>bql2sql('estimate probability density of weight = f(c)'<concat>' from p1;')<eq>'SELECT bql_pdf_joint(1, NULL, NULL, 3, "f"("c")) FROM "t1";'<assert_stmt>bql2sql('estimate similarity to (rowid = 5) '<concat>'in the context of weight from p1;')<eq>'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,'<concat>' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 3) FROM "t1";'<assert_stmt>bql2sql('estimate similarity of (rowid = 12) to (rowid = 5) '<concat>'in the context of weight from p1;')<eq>'SELECT bql_row_similarity(1, NULL, NULL,'<concat>' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 12)),'<concat>' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 3) FROM "t1";'<assert_stmt>bql2sql('estimate similarity to (rowid = 5) in the context of age'<concat>' from p1')<eq>'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,'<concat>' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 2) FROM "t1";'<assert_stmt>bql2sql('estimate similarity of (rowid = 5) to (height = 7 and age < 10)'<concat>' in the context of weight from p1;')<eq>'SELECT bql_row_similarity(1, NULL, NULL,'<concat>' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)),'<concat>' (SELECT _rowid_ FROM "t1" WHERE (("height" = 7) AND ("age" < 10))),'<concat>' 3) FROM "t1";'<with_stmt>pytest.raises(bayeslite.BQLError)# Cannot use all variables for similarity.
<block_start>bql2sql('estimate similarity to (rowid = 5) in the context of * from p1;')<block_end><assert_stmt>bql2sql('estimate similarity to (rowid = 5)'<concat>' in the context of age from p1;')<eq>'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,'<concat>' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 2) FROM "t1";'<assert_stmt>bql2sql('estimate dependence probability of age with weight'<concat>' from p1;')<eq>'SELECT bql_column_dependence_probability(1, NULL, NULL, 2, 3) '<concat>'FROM "t1";'<with_stmt>pytest.raises(bayeslite.BQLError)# Need both rows fixed.
<block_start>bql2sql('estimate similarity to (rowid=2) in the context of r by p1')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Need both rows fixed.
<block_start>bql2sql('estimate similarity in the context of r within p1')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Need both columns fixed.
<block_start>bql2sql('estimate dependence probability with age from p1;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Need both columns fixed.
<block_start>bql2sql('estimate dependence probability from p1;')<block_end><assert_stmt>bql2sql('estimate mutual information of age with weight'+' from p1;')<eq>'SELECT bql_column_mutual_information('<concat>'1, NULL, NULL, \'[2]\', \'[3]\', NULL)'<concat>' FROM "t1";'<assert_stmt>bql2sql('estimate mutual information of age with weight'+' using 42 samples from p1;')<eq>'SELECT bql_column_mutual_information('<concat>'1, NULL, NULL, \'[2]\', \'[3]\', 42)'<concat>' FROM "t1";'<with_stmt>pytest.raises(bayeslite.BQLError)# Need both columns fixed.
<block_start>bql2sql('estimate mutual information with age from p1;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Need both columns fixed.
<block_start>bql2sql('estimate mutual information from p1;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Need both columns fixed.
<block_start>bql2sql('estimate mutual information with age using 42 samples'<concat>' from p1;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Need both columns fixed.
<block_start>bql2sql('estimate mutual information using 42 samples from p1;')<block_end># XXX Should be SELECT, not ESTIMATE, here?
<assert_stmt>bql2sql('estimate correlation of age with weight from p1;')<eq>'SELECT bql_column_correlation(1, NULL, NULL, 2, 3) FROM "t1";'<with_stmt>pytest.raises(bayeslite.BQLError)# Need both columns fixed.
<block_start>bql2sql('estimate correlation with age from p1;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Need both columns fixed.
<block_start>bql2sql('estimate correlation from p1;')<block_end><with_stmt>pytest.raises(BQLError)# Variable must exist.
<block_start>bql2sql('estimate correlation with agee from variables of p1')<block_end><block_end><def_stmt>test_predict_outside_infer <block_start><with_stmt>pytest.raises(bayeslite.BQLError)# No PREDICT outside INFER.
<block_start>bql2sql('estimate predict age with confidence 0.9 from p1;')<block_end><block_end><def_stmt>test_infer_explicit_predict_confidence <block_start><assert_stmt>bql2sql('infer explicit predict age with confidence 0.9'<concat>' from p1;')<eq>'SELECT bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL) FROM "t1";'<block_end><def_stmt>test_infer_explicit_predict_confidence_nsamples <block_start><assert_stmt>bql2sql('infer explicit'<concat>' predict age with confidence 0.9 using 42 samples'<concat>' from p1;')<eq>'SELECT bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42) FROM "t1";'<block_end><def_stmt>test_infer_explicit_verbatim_and_predict_confidence <block_start><assert_stmt>bql2sql('infer explicit rowid, age,'<concat>' predict age confidence age_conf from p1')<eq>'SELECT c0 AS "rowid", c1 AS "age",'<concat>' bql_json_get(c2, \'value\') AS "age",'<concat>' bql_json_get(c2, \'confidence\') AS "age_conf"'<concat>' FROM (SELECT "rowid" AS c0, "age" AS c1,'<concat>' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)'<concat>' AS c2 FROM "t1");'<block_end><def_stmt>test_infer_explicit_verbatim_and_predict_noconfidence <block_start><assert_stmt>bql2sql('infer explicit rowid, age,'<concat>' predict age from p1')<eq>'SELECT c0 AS "rowid", c1 AS "age",'<concat>' bql_json_get(c2, \'value\') AS "age"'<concat>' FROM (SELECT "rowid" AS c0, "age" AS c1,'<concat>' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)'<concat>' AS c2 FROM "t1");'<block_end><def_stmt>test_infer_explicit_verbatim_and_predict_confidence_nsamples <block_start><assert_stmt>bql2sql('infer explicit rowid, age,'<concat>' predict age confidence age_conf using 42 samples from p1')<eq>'SELECT c0 AS "rowid", c1 AS "age",'<concat>' bql_json_get(c2, \'value\') AS "age",'<concat>' bql_json_get(c2, \'confidence\') AS "age_conf"'<concat>' FROM (SELECT "rowid" AS c0, "age" AS c1,'<concat>' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 42)'<concat>' AS c2 FROM "t1");'<block_end><def_stmt>test_infer_explicit_verbatim_and_predict_noconfidence_nsamples <block_start><assert_stmt>bql2sql('infer explicit rowid, age,'<concat>' predict age using 42 samples from p1')<eq>'SELECT c0 AS "rowid", c1 AS "age",'<concat>' bql_json_get(c2, \'value\') AS "age"'<concat>' FROM (SELECT "rowid" AS c0, "age" AS c1,'<concat>' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 42)'<concat>' AS c2 FROM "t1");'<block_end><def_stmt>test_infer_explicit_verbatim_and_predict_confidence_as <block_start><assert_stmt>bql2sql('infer explicit rowid, age,'<concat>' predict age as age_inf confidence age_conf from p1')<eq>'SELECT c0 AS "rowid", c1 AS "age",'<concat>' bql_json_get(c2, \'value\') AS "age_inf",'<concat>' bql_json_get(c2, \'confidence\') AS "age_conf"'<concat>' FROM (SELECT "rowid" AS c0, "age" AS c1,'<concat>' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)'<concat>' AS c2 FROM "t1");'<block_end><def_stmt>test_infer_explicit_verbatim_and_predict_noconfidence_as <block_start><assert_stmt>bql2sql('infer explicit rowid, age,'<concat>' predict age as age_inf from p1')<eq>'SELECT c0 AS "rowid", c1 AS "age",'<concat>' bql_json_get(c2, \'value\') AS "age_inf"'<concat>' FROM (SELECT "rowid" AS c0, "age" AS c1,'<concat>' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)'<concat>' AS c2 FROM "t1");'<block_end><def_stmt>test_infer_explicit_verbatim_and_predict_confidence_as_nsamples <block_start><assert_stmt>bql2sql('infer explicit rowid, age,'<concat>' predict age as age_inf confidence age_conf using 87 samples'<concat>' from p1')<eq>'SELECT c0 AS "rowid", c1 AS "age",'<concat>' bql_json_get(c2, \'value\') AS "age_inf",'<concat>' bql_json_get(c2, \'confidence\') AS "age_conf"'<concat>' FROM (SELECT "rowid" AS c0, "age" AS c1,'<concat>' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 87)'<concat>' AS c2 FROM "t1");'<block_end><def_stmt>test_infer_explicit_verbatim_and_predict_noconfidence_as_nsamples <block_start><assert_stmt>bql2sql('infer explicit rowid, age,'<concat>' predict age as age_inf using 87 samples'<concat>' from p1')<eq>'SELECT c0 AS "rowid", c1 AS "age",'<concat>' bql_json_get(c2, \'value\') AS "age_inf"'<concat>' FROM (SELECT "rowid" AS c0, "age" AS c1,'<concat>' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 87)'<concat>' AS c2 FROM "t1");'<block_end><def_stmt>test_infer_auto <block_start><assert_stmt>bql2sql('infer rowid, age, weight from p1')<eq>'SELECT "rowid" AS "rowid",'<concat>' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, NULL))'<concat>' AS "age",'<concat>' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, NULL))'<concat>' AS "weight"'<concat>' FROM "t1";'<block_end><def_stmt>test_infer_auto_nsamples <block_start><assert_stmt>bql2sql('infer rowid, age, weight using (1+2) samples from p1')<eq>'SELECT "rowid" AS "rowid",'<concat>' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, (1 + 2)))'<concat>' AS "age",'<concat>' "IFNULL"("weight",'<concat>' bql_predict(1, NULL, NULL, _rowid_, 3, 0, (1 + 2)))'<concat>' AS "weight"'<concat>' FROM "t1";'<block_end><def_stmt>test_infer_auto_with_confidence <block_start><assert_stmt>bql2sql('infer rowid, age, weight with confidence 0.9 from p1')<eq>'SELECT "rowid" AS "rowid",'<concat>' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))'<concat>' AS "age",'<concat>' "IFNULL"("weight",'<concat>' bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, NULL))'<concat>' AS "weight"'<concat>' FROM "t1";'<block_end><def_stmt>test_infer_auto_with_confidence_nsamples <block_start><assert_stmt>bql2sql('infer rowid, age, weight with confidence 0.9'<concat>' using sqrt(2) samples'<concat>' from p1')<eq>'SELECT "rowid" AS "rowid",'<concat>' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9,'<concat>' "sqrt"(2)))'<concat>' AS "age",'<concat>' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,'<concat>' "sqrt"(2)))'<concat>' AS "weight"'<concat>' FROM "t1";'<block_end><def_stmt>test_infer_auto_with_confidence_where <block_start><assert_stmt>bql2sql('infer rowid, age, weight with confidence 0.9 from p1'<concat>' where label = \'foo\'')<eq>'SELECT "rowid" AS "rowid",'<concat>' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))'<concat>' AS "age",'<concat>' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,'<concat>' NULL))'<concat>' AS "weight"'<concat>' FROM "t1"'<concat>' WHERE ("label" = \'foo\');'<block_end><def_stmt>test_infer_auto_with_confidence_nsamples_where <block_start><assert_stmt>bql2sql('infer rowid, age, weight with confidence 0.9'<concat>' using 42 samples'<concat>' from p1'<concat>' where label = \'foo\'')<eq>'SELECT "rowid" AS "rowid",'<concat>' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42))'<concat>' AS "age",'<concat>' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, 42))'<concat>' AS "weight"'<concat>' FROM "t1"'<concat>' WHERE ("label" = \'foo\');'<block_end><def_stmt>test_infer_auto_with_confidence_nsamples_where_predict <block_start><assert_stmt>bql2sql('infer rowid, age, weight with confidence 0.9 from p1'<concat>' where ifnull(label, predict label with confidence 0.7)'<concat>' = \'foo\'')<eq>'SELECT "rowid" AS "rowid",'<concat>' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))'<concat>' AS "age",'<concat>' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,'<concat>' NULL))'<concat>' AS "weight"'<concat>' FROM "t1"'<concat>' WHERE ("ifnull"("label",'<concat>' bql_predict(1, NULL, NULL, _rowid_, 1, 0.7, NULL))'<concat>' = \'foo\');'<block_end><def_stmt>test_infer_auto_with_confidence_nsamples_where_predict_nsamples <block_start><assert_stmt>bql2sql('infer rowid, age, weight with confidence 0.9'<concat>' using 42 samples'<concat>' from p1'<concat>' where ifnull(label, predict label with confidence 0.7'<concat>' using 73 samples)'<concat>' = \'foo\'')<eq>'SELECT "rowid" AS "rowid",'<concat>' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42))'<concat>' AS "age",'<concat>' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, 42))'<concat>' AS "weight"'<concat>' FROM "t1"'<concat>' WHERE ("ifnull"("label",'<concat>' bql_predict(1, NULL, NULL, _rowid_, 1, 0.7, 73))'<concat>' = \'foo\');'<block_end><def_stmt>test_infer_auto_star <block_start><assert_stmt>bql2sql('infer rowid, * from p1')<eq>'SELECT "rowid" AS "rowid", "id" AS "id",'<concat>' "IFNULL"("label", bql_predict(1, NULL, NULL, _rowid_, 1, 0, NULL))'<concat>' AS "label",'<concat>' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, NULL))'<concat>' AS "age",'<concat>' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, NULL))'<concat>' AS "weight"'<concat>' FROM "t1";'<block_end><def_stmt>test_infer_auto_star_nsamples <block_start><assert_stmt>bql2sql('infer rowid, * using 1 samples from p1')<eq>'SELECT "rowid" AS "rowid", "id" AS "id",'<concat>' "IFNULL"("label", bql_predict(1, NULL, NULL, _rowid_, 1, 0, 1))'<concat>' AS "label",'<concat>' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, 1))'<concat>' AS "age",'<concat>' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, 1))'<concat>' AS "weight"'<concat>' FROM "t1";'<block_end><def_stmt>test_estimate_columns_trivial <block_start>prefix0='SELECT v.name AS name'<line_sep>prefix1=' FROM bayesdb_variable AS v'<concat>' WHERE v.population_id = 1'<concat>' AND v.generator_id IS NULL'<line_sep>prefix=prefix0+prefix1<assert_stmt>bql2sql('estimate * from columns of p1;')<eq>prefix+';'<assert_stmt>bql2sql('estimate * from columns of p1 where'+' (probability density of value 42) > 0.5')<eq>prefix+' AND (bql_column_value_probability(1, NULL, NULL, v.colno, 42) > 0.5);'<assert_stmt>bql2sql('estimate * from columns of p1'<concat>' where (probability density of value 8)'<concat>' > (probability density of age = 16)')<eq>prefix+' AND (bql_column_value_probability(1, NULL, NULL, v.colno, 8) >'<concat>' bql_pdf_joint(1, NULL, NULL, 2, 16));'<assert_stmt>bql2sql('estimate *, probability density of value 8 given (age = 8)'<concat>' from columns of p1;')<eq>prefix0+', bql_column_value_probability(1, NULL, NULL, v.colno, 8, 2, 8)'+prefix1+';'<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bql2sql('estimate probability density of value 8 given (agee = 8)'<concat>' from columns of p1')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# PREDICTIVE PROBABILITY makes no sense without row.
<block_start>bql2sql('estimate * from columns of p1 where'+' predictive probability of x > 0;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# SIMILARITY makes no sense without row.
<block_start>bql2sql('estimate * from columns of p1 where'+' similarity to (rowid = x) in the context of c > 0;')<block_end><assert_stmt>bql2sql('estimate * from columns of p1 where'+' dependence probability with age > 0.5;')<eq>prefix+' AND (bql_column_dependence_probability(1, NULL, NULL, 2, v.colno)'<concat>' > 0.5);'<with_stmt>pytest.raises(bayeslite.BQLError)# Must omit exactly one column.
<block_start>bql2sql('estimate * from columns of p1 where'+' dependence probability of age with weight > 0.5;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Must omit exactly one column.
<block_start>bql2sql('estimate * from columns of p1'<concat>' where dependence probability > 0.5;')<block_end><assert_stmt>bql2sql('estimate * from columns of p1 order by'+' mutual information with age;')<eq>prefix+' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2]\','<concat>' \'[\' || v.colno || \']\', NULL);'<assert_stmt>bql2sql('estimate * from columns of p1 order by'+' mutual information with (age, label) using 42 samples;')<eq>prefix+' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2, 1]\','<concat>' \'[\' || v.colno || \']\', 42);'<assert_stmt>bql2sql('estimate * from columns of p1 order by'+' mutual information with (age, label)'<concat>' given (weight=12) using 42 samples;')<eq>prefix+' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2, 1]\','<concat>' \'[\' || v.colno || \']\', 42, 3, 12);'<with_stmt>pytest.raises(bayeslite.BQLError)# Must omit exactly one column.
<block_start>bql2sql('estimate * from columns of p1 order by'+' mutual information of age with weight;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Must omit exactly one column.
<block_start>bql2sql('estimate * from columns of p1'<concat>' where mutual information > 0.5;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Must omit exactly one column.
<block_start>bql2sql('estimate * from columns of p1 order by'+' mutual information of age with weight using 42 samples;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Must omit exactly one column.
<block_start>bql2sql('estimate * from columns of p1 where'+' mutual information using 42 samples > 0.5;')<block_end><assert_stmt>bql2sql('estimate * from columns of p1 order by'+' correlation with age desc;')<eq>prefix+' ORDER BY bql_column_correlation(1, NULL, NULL, 2, v.colno)'<concat>' DESC;'<with_stmt>pytest.raises(bayeslite.BQLError)# Must omit exactly one column.
<block_start>bql2sql('estimate * from columns of p1 order by'+' correlation of age with weight;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Must omit exactly one column.
<block_start>bql2sql('estimate * from columns of p1 where correlation > 0.5;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Makes no sense.
<block_start>bql2sql('estimate * from columns of p1'<concat>' where predict age with confidence 0.9 > 30;')<block_end><assert_stmt>bql2sql('estimate'<concat>' *, dependence probability with weight as depprob,'<concat>' mutual information with weight as mutinf'<concat>' from columns of p1'<concat>' where depprob > 0.5 order by mutinf desc')<eq>prefix0+', bql_column_dependence_probability(1, NULL, NULL, 3, v.colno)'<concat>' AS "depprob"'<concat>', bql_column_mutual_information(1, NULL, NULL, \'[3]\','<concat>' \'[\' || v.colno || \']\', NULL) AS "mutinf"'+prefix1+' AND ("depprob" > 0.5)'<concat>' ORDER BY "mutinf" DESC;'<assert_stmt>bql2sql('estimate'<concat>' *, dependence probability with weight as depprob,'<concat>' mutual information with (age, weight) as mutinf'<concat>' from columns of p1'<concat>' where depprob > 0.5 order by mutinf desc')<eq>prefix0+', bql_column_dependence_probability(1, NULL, NULL, 3, v.colno)'<concat>' AS "depprob"'<concat>', bql_column_mutual_information(1, NULL, NULL, \'[2, 3]\','<concat>' \'[\' || v.colno || \']\', NULL) AS "mutinf"'+prefix1+' AND ("depprob" > 0.5)'<concat>' ORDER BY "mutinf" DESC;'<line_sep># XXX This mixes up target and reference variables, which is OK,
# because MI is symmetric, but...oops.
<assert_stmt>bql2sql('estimate * from variables of p1'<concat>' where probability of (mutual information with age < 0.1)'<concat>' > 0.8')<eq>prefix+' AND ((SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"'<concat>' FROM (SELECT mi AS "v0" FROM bql_mutinf'<concat>' WHERE population_id = 1'<concat>" AND target_vars = '[2]'"<concat>" AND reference_vars = '[' || v.colno || ']'))) > 0.8);"<assert_stmt>bql2sql('estimate * from variables of p1'<concat>' order by probability of (mutual information with age < 0.1)')<eq>prefix+' ORDER BY (SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"'<concat>' FROM (SELECT mi AS "v0" FROM bql_mutinf'<concat>' WHERE population_id = 1'<concat>" AND target_vars = '[2]'"<concat>" AND reference_vars = '[' || v.colno || ']')));"<block_end><def_stmt>test_estimate_pairwise_trivial <block_start>prefix='SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1, '<line_sep>infix=' AS value'<line_sep>infix0=' FROM bayesdb_population AS p,'<line_sep>infix0<augadd>' bayesdb_variable AS v0,'<line_sep>infix0<augadd>' bayesdb_variable AS v1'<line_sep>infix0<augadd>' WHERE p.id = 1'<line_sep>infix0<augadd>' AND v0.population_id = p.id AND v1.population_id = p.id'<line_sep>infix0<augadd>' AND v0.generator_id IS NULL'<line_sep>infix0<augadd>' AND v1.generator_id IS NULL'<line_sep>infix<augadd>infix0<assert_stmt>bql2sql('estimate dependence probability'<concat>' from pairwise columns of p1;')<eq>prefix+'bql_column_dependence_probability(1, NULL, NULL, v0.colno,'<concat>' v1.colno)'+infix+';'<assert_stmt>bql2sql('estimate mutual information'<concat>' from pairwise columns of p1 where'<concat>' (probability density of age = 0) > 0.5;')<eq>prefix+'bql_column_mutual_information(1, NULL, NULL, '<concat>'\'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL)'+infix+' AND (bql_pdf_joint(1, NULL, NULL, 2, 0) > 0.5);'<assert_stmt>bql2sql('estimate mutual information given (label=\'go\', weight)'<concat>' from pairwise columns of p1 where'<concat>' (probability density of age = 0) > 0.5;')<eq>prefix+'bql_column_mutual_information(1, NULL, NULL,'<concat>' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL,'<concat>' 1, \'go\', 3, NULL)'+infix+' AND (bql_pdf_joint(1, NULL, NULL, 2, 0) > 0.5);'<with_stmt>pytest.raises(bayeslite.BQLError)# PROBABILITY DENSITY OF VALUE is 1-column.
<block_start>bql2sql('estimate correlation from pairwise columns of p1 where'+' (probability density of value 0) > 0.5;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# PREDICTIVE PROBABILITY OF is a row function.
<block_start>bql2sql('estimate dependence probability'<concat>' from pairwise columns of p1'+' where predictive probability of x > 0.5;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Must omit both columns.
<block_start>bql2sql('estimate dependence probability'<concat>' from pairwise columns of p1'<concat>' where dependence probability of age with weight > 0.5;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Must omit both columns.
<block_start>bql2sql('estimate mutual information from pairwise columns of p1'<concat>' where dependence probability with weight > 0.5;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Must omit both columns.
<block_start>bql2sql('estimate mutual information using 42 samples'<concat>' from pairwise columns of p1'<concat>' where dependence probability with weight > 0.5;')<block_end><assert_stmt>bql2sql('estimate correlation from pairwise columns of p1'<concat>' where dependence probability > 0.5;')<eq>prefix+'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)'+infix+' AND'<concat>' (bql_column_dependence_probability(1, NULL, NULL, v0.colno,'<concat>' v1.colno)'<concat>' > 0.5);'<with_stmt>pytest.raises(bayeslite.BQLError)# Must omit both columns.
<block_start>bql2sql('estimate dependence probability'<concat>' from pairwise columns of p1'<concat>' where mutual information of age with weight > 0.5;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Must omit both columns.
<block_start>bql2sql('estimate dependence probability'<concat>' from pairwise columns of p1'<concat>' where mutual information of age with weight using 42 samples'<concat>' > 0.5;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Must omit both columns.
<block_start>bql2sql('estimate mutual information from pairwise columns of p1'<concat>' where mutual information with weight > 0.5;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Must omit both columns.
<block_start>bql2sql('estimate mutual information using 42 samples'<concat>' from pairwise columns of p1'<concat>' where mutual information with weight using 42 samples > 0.5;')<block_end><assert_stmt>bql2sql('estimate correlation from pairwise columns of p1'+' where mutual information > 0.5;')<eq>prefix+'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)'+infix+' AND'+' (bql_column_mutual_information(1, NULL, NULL,'<concat>' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL) > 0.5);'<assert_stmt>bql2sql('estimate correlation from pairwise columns of p1'+' where mutual information using 42 samples > 0.5;')<eq>prefix+'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)'+infix+' AND'+' (bql_column_mutual_information(1, NULL, NULL,'<concat>' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', 42) > 0.5);'<with_stmt>pytest.raises(bayeslite.BQLError)# Must omit both columns.
<block_start>bql2sql('estimate dependence probability'<concat>' from pairwise columns of p1'<concat>' where correlation of age with weight > 0.5;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Must omit both columns.
<block_start>bql2sql('estimate mutual information from pairwise columns of p1'<concat>' where correlation with weight > 0.5;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Must omit both columns.
<block_start>bql2sql('estimate mutual information using 42 samples'<concat>' from pairwise columns of p1'<concat>' where correlation with weight > 0.5;')<block_end><assert_stmt>bql2sql('estimate correlation from pairwise columns of p1'<concat>' where correlation > 0.5;')<eq>prefix+'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)'+infix+' AND'+' (bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno) > 0.5);'<with_stmt>pytest.raises(bayeslite.BQLError)# Makes no sense.
<block_start>bql2sql('estimate dependence probability'<concat>' from pairwise columns of p1'<concat>' where predict age with confidence 0.9 > 30;')<block_end><assert_stmt>bql2sql('estimate dependence probability as depprob,'<concat>' mutual information as mutinf'<concat>' from pairwise columns of p1'<concat>' where depprob > 0.5 order by mutinf desc')<eq>prefix+'bql_column_dependence_probability(1, NULL, NULL, v0.colno, v1.colno)'<concat>' AS "depprob",'<concat>' bql_column_mutual_information(1, NULL, NULL,'<concat>' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL)'<concat>' AS "mutinf"'+infix0+' AND ("depprob" > 0.5)'<concat>' ORDER BY "mutinf" DESC;'<block_end><def_stmt>test_estimate_pairwise_row <block_start>prefix='SELECT r0._rowid_ AS rowid0, r1._rowid_ AS rowid1'<line_sep>infix=' AS value FROM "t1" AS r0, "t1" AS r1'<assert_stmt>bql2sql('estimate similarity in the context of age'+' from pairwise p1;')<eq>prefix+', bql_row_similarity(1, NULL, NULL,'<concat>' r0._rowid_, r1._rowid_, 2)'+infix+';'<with_stmt>pytest.raises(bayeslite.BQLError)# PREDICT is a 1-row function.
<block_start>bql2sql('estimate predict age with confidence 0.9 from pairwise t1;')<block_end><block_end><def_stmt>test_estimate_pairwise_selected_columns <block_start><assert_stmt>bql2sql('estimate dependence probability'<concat>' from pairwise columns of p1 for label, age')<eq>'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,'<concat>' bql_column_dependence_probability(1, NULL, NULL,'<concat>' v0.colno, v1.colno)'<concat>' AS value'<concat>' FROM bayesdb_population AS p,'<concat>' bayesdb_variable AS v0,'<concat>' bayesdb_variable AS v1'<concat>' WHERE p.id = 1'<concat>' AND v0.population_id = p.id AND v1.population_id = p.id'<concat>' AND v0.generator_id IS NULL AND v1.generator_id IS NULL'<concat>' AND v0.colno IN (1, 2) AND v1.colno IN (1, 2);'<assert_stmt>bql2sql('estimate dependence probability'<concat>' from pairwise columns of p1'<concat>' for (ESTIMATE * FROM COLUMNS OF p1'<concat>' ORDER BY name DESC LIMIT 2)')<eq>'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,'<concat>' bql_column_dependence_probability(1, NULL, NULL, v0.colno,'<concat>' v1.colno)'<concat>' AS value'<concat>' FROM bayesdb_population AS p,'<concat>' bayesdb_variable AS v0,'<concat>' bayesdb_variable AS v1'<concat>' WHERE p.id = 1'<concat>' AND v0.population_id = p.id AND v1.population_id = p.id'<concat>' AND v0.generator_id IS NULL AND v1.generator_id IS NULL'<concat>' AND v0.colno IN (3, 1) AND v1.colno IN (3, 1);'<block_end><def_stmt>test_select_columns_subquery <block_start><assert_stmt>bql2sql('select id, t1.(estimate * from columns of p1'<concat>' order by name asc limit 2) from t1')<eq>'SELECT "id", "t1"."age", "t1"."label" FROM "t1";'<block_end>@pytest.mark.xfail(strict=<true> reason='no simulate vars from models of')<def_stmt>test_simulate_models_columns_subquery <block_start><assert_stmt>bql2sql('simulate weight, t1.(estimate * from columns of p1'<concat>' order by name asc limit 2) from models of p1')<eq>'SELECT * FROM "bayesdb_temp_0";'<assert_stmt>bql2sql('simulate 0, weight, t1.(estimate * from columns of p1'<concat>' order by name asc limit 2) from models of p1')<eq>'SELECT 0, "v0" AS "weight", "v1" AS "age", "v2" AS "label" FROM'<concat>' (SELECT * FROM "bayesdb_temp_0");'<assert_stmt>bql2sql('simulate weight + 1, t1.(estimate * from columns of p1'<concat>' order by name asc limit 2) from models of p1')<eq>'SELECT ("v0" + 1), "v1" AS "age", "v2" AS "label" FROM'<concat>' (SELECT * FROM "bayesdb_temp_0");'<assert_stmt>bql2sql('simulate weight + 1 AS wp1,'<concat>' t1.(estimate * from columns of p1'<concat>' order by name asc limit 2) from models of p1')<eq>'SELECT ("v0" + 1) AS "wp1", "v1" AS "age", "v2" AS "label" FROM'<concat>' (SELECT * FROM "bayesdb_temp_0");'<block_end><def_stmt>test_simulate_columns_subquery # XXX This test is a little unsatisfactory -- we do not get to see
# what the variables in the result are named...
<block_start><assert_stmt>bql2sql('simulate weight, t1.(estimate * from columns of p1'<concat>' order by name asc limit 2) from p1 limit 10')<eq>'SELECT * FROM "bayesdb_temp_0";'<with_stmt>pytest.raises(parse.BQLParseError)# Compound columns not yet implemented for SIMULATE.
<block_start>bql2sql('simulate weight + 1, t1.(estimate * from columns of p1'<concat>' order by name asc limit 2) from p1 limit 10')<block_end><block_end><def_stmt>test_simulate_models # Base case.
<block_start><assert_stmt>bql2sql('simulate mutual information of age with weight'<concat>' from models of p1')<eq>'SELECT mi FROM bql_mutinf'<concat>' WHERE population_id = 1'<concat>" AND target_vars = '[2]'"<concat>" AND reference_vars = '[3]';"<line_sep># Multiple target variables.
<assert_stmt>bql2sql('simulate mutual information of (label, age) with weight'<concat>' from models of p1')<eq>'SELECT mi FROM bql_mutinf'<concat>' WHERE population_id = 1'<concat>" AND target_vars = '[1, 2]'"<concat>" AND reference_vars = '[3]';"<line_sep># Multiple reference variables.
<assert_stmt>bql2sql('simulate mutual information of age with (label, weight)'<concat>' from models of p1')<eq>'SELECT mi FROM bql_mutinf'<concat>' WHERE population_id = 1'<concat>" AND target_vars = '[2]'"<concat>" AND reference_vars = '[1, 3]';"<line_sep># Specified number of samples.
<assert_stmt>bql2sql('simulate mutual information of age with weight'<concat>' using 42 samples from models of p1')<eq>'SELECT mi FROM bql_mutinf'<concat>' WHERE population_id = 1'<concat>" AND target_vars = '[2]'"<concat>" AND reference_vars = '[3]'"<concat>' AND nsamples = 42;'<line_sep># Conditional.
<assert_stmt>bql2sql('simulate mutual information of age with weight'<concat>" given (label = 'foo') from models of p1")<eq>'SELECT mi FROM bql_mutinf'<concat>' WHERE population_id = 1'<concat>" AND target_vars = '[2]'"<concat>" AND reference_vars = '[3]'"<concat>" AND conditions = '{\"1\": \"foo\"}';"<line_sep># Modeled by a specific generator.
<assert_stmt>bql2sql('simulate mutual information of age with weight'<concat>' from models of p1 modeled by g1' <lambda>bdb:bdb.execute('create generator g1 for p1'))<eq>'SELECT mi FROM bql_mutinf'<concat>' WHERE population_id = 1'<concat>' AND generator_id = 1'<concat>" AND target_vars = '[2]'"<concat>" AND reference_vars = '[3]';"<line_sep># Two mutual informations.
<assert_stmt>bql2sql('simulate mutual information of age with weight AS "mi(aw)",'<concat>' mutual information of label with weight AS "mi(lw)"'<concat>' from models of p1')<eq>'SELECT t0."mi(aw)" AS "mi(aw)", t1."mi(lw)" AS "mi(lw)"'<concat>' FROM (SELECT _rowid_, mi AS "mi(aw)" FROM bql_mutinf'<concat>' WHERE population_id = 1'<concat>" AND target_vars = '[2]'"<concat>" AND reference_vars = '[3]') AS t0,"<concat>' (SELECT _rowid_, mi AS "mi(lw)" FROM bql_mutinf'<concat>' WHERE population_id = 1'<concat>" AND target_vars = '[1]'"<concat>" AND reference_vars = '[3]') AS t1"<concat>' WHERE t0._rowid_ = t1._rowid_;'<block_end><def_stmt>test_probability_of_mutinf <block_start><assert_stmt>bql2sql('estimate probability of'<concat>' (mutual information of age with weight < 0.1) > 0.5'<concat>' within p1')<eq>'SELECT ((SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"'<concat>' FROM (SELECT mi AS "v0" FROM bql_mutinf'<concat>' WHERE population_id = 1'<concat>" AND target_vars = '[2]'"<concat>" AND reference_vars = '[3]'))) > 0.5);"<block_end><def_stmt>test_modeledby_usingmodels_trival <block_start><def_stmt>setup bdb<block_start>bdb.execute('create generator m1 for p1 using cgpm;')<block_end><assert_stmt>bql2sql('estimate predictive probability of weight + 1'<concat>' from p1 modeled by m1 using models 1-3, 5;' setup=setup)<eq>'SELECT (bql_row_column_predictive_probability(1, 1, \'[1, 2, 3, 5]\','<concat>' _rowid_, \'[3]\', \'[]\') + 1)'<concat>' FROM "t1";'<assert_stmt>bql2sql('infer rowid, age, weight from p1 modeled by m1 using model 7' setup=setup)<eq>'SELECT "rowid" AS "rowid",'<concat>' "IFNULL"("age", bql_predict(1, 1, \'[7]\', _rowid_, 2, 0, NULL))'<concat>' AS "age",'<concat>' "IFNULL"("weight", bql_predict(1, 1, \'[7]\', _rowid_, 3, 0, NULL))'<concat>' AS "weight"'<concat>' FROM "t1";'<assert_stmt>bql2sql('infer explicit predict age with confidence 0.9'<concat>' from p1 using models 0, 3-5;' setup=setup)<eq>'SELECT bql_predict(1, NULL, \'[0, 3, 4, 5]\', _rowid_, 2, 0.9, NULL)'<concat>' FROM "t1";'<assert_stmt>bql2sql('''
estimate predictive relevance
of (label = 'Uganda')
to existing rows (rowid < 4)
and hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'Europe', "weight" = 7)
)
in the context of "weight"
by p1 modeled by m1 using models 8, 10-12
''' setup=setup)<eq>'SELECT bql_row_predictive_relevance(1, 1, \'[8, 10, 11, 12]\', '<concat>'(SELECT _rowid_ FROM "t1" WHERE ("label" = \'Uganda\')), '<concat>'\'[1, 2, 3]\', 3, '<concat>'2, 82, 3, 14, NULL, 2, 74, 1, \'Europe\', 3, 7, NULL);'<assert_stmt>bql2sql('''
estimate dependence probability
from pairwise columns of p1
for label, age
modeled by m1
using models 1, 4, 12
''' setup=setup)<eq>'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,'<concat>' bql_column_dependence_probability(1, 1, \'[1, 4, 12]\','<concat>' v0.colno, v1.colno)'<concat>' AS value'<concat>' FROM bayesdb_population AS p,'<concat>' bayesdb_variable AS v0,'<concat>' bayesdb_variable AS v1'<concat>' WHERE p.id = 1'<concat>' AND v0.population_id = p.id AND v1.population_id = p.id'<concat>' AND (v0.generator_id IS NULL OR v0.generator_id = 1)'<concat>' AND (v1.generator_id IS NULL OR v1.generator_id = 1)'<concat>' AND v0.colno IN (1, 2) AND v1.colno IN (1, 2);'<assert_stmt>bql2sql('''
estimate mutual information of age with weight
from p1 modeled by m1 using model 1;
''' setup=setup)<eq>'SELECT bql_column_mutual_information('<concat>'1, 1, \'[1]\', \'[2]\', \'[3]\', NULL)'<concat>' FROM "t1";'<block_end><def_stmt>test_simulate_columns_all <block_start><with_stmt>pytest.raises(parse.BQLParseError)<block_start>bql2sql('simulate * from p1 limit 1')<block_end><block_end><def_stmt>test_trivial_commands <block_start><with_stmt>test_csv.bayesdb_csv_file(test_csv.csv_data)<as>(bdb fname)# XXX Query parameters!
<block_start><with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true>)<block_end><with_stmt>open(fname 'rU')<as>f<block_start><with_stmt>pytest.raises(ValueError)<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true>)<block_end><block_end><with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true> ifnotexists=<true>)<block_end>guess.bayesdb_guess_population(bdb 'p' 't')<with_stmt>pytest.raises(ValueError)<block_start>guess.bayesdb_guess_population(bdb 'p' 't')<block_end>guess.bayesdb_guess_population(bdb 'p' 't' ifnotexists=<true>)<line_sep>bdb.execute('create generator p_cc for p;')<line_sep>bdb.execute('initialize 2 models for p_cc')<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('initialize 2 models for p_cc')<block_end>bdb.execute('drop models from p_cc')<line_sep>bdb.execute('drop models from p_cc')<line_sep>bdb.execute('initialize 2 models for p_cc')<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('initialize 2 models for p_cc')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('drop models 0-2 from p_cc')<block_end>bdb.execute('drop models 0-1 from p_cc')<with_stmt>bdb.savepoint()<block_start>bdb.execute('initialize 2 models for p_cc')<line_sep>bdb.execute('drop models 0-1 from p_cc')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('drop models 0-1 from p_cc')<block_end>bdb.execute('initialize 2 models for p_cc')<line_sep>bdb.execute('initialize 1 model if not exists for p_cc')<line_sep>bdb.execute('initialize 2 models if not exists for p_cc')<line_sep>population_id=core.bayesdb_get_population(bdb 'p')<line_sep>generator_id=core.bayesdb_get_generator(bdb population_id 'p_cc')<assert_stmt>core.bayesdb_generator_table(bdb generator_id)<eq>'t'<line_sep>bdb.execute('alter table t rename to t')<assert_stmt>core.bayesdb_generator_table(bdb generator_id)<eq>'t'<line_sep>bdb.execute('alter table t rename to T')<assert_stmt>core.bayesdb_generator_table(bdb generator_id)<eq>'T'<line_sep>bdb.execute('alter population p rename to p')<assert_stmt>core.bayesdb_population_name(bdb population_id)<eq>'p'<line_sep>bdb.execute('alter population p rename to p2')<assert_stmt>core.bayesdb_population_name(bdb population_id)<eq>'p2'<line_sep>bdb.execute('alter population p2 rename to p')<assert_stmt>core.bayesdb_population_name(bdb population_id)<eq>'p'<line_sep>bdb.execute('estimate count(*) from p').fetchall()<line_sep>bdb.execute('alter table t rename to t')<assert_stmt>core.bayesdb_generator_table(bdb generator_id)<eq>'t'<line_sep>bdb.execute('alter generator p_cc rename to p0_cc')<assert_stmt>core.bayesdb_generator_name(bdb generator_id)<eq>'p0_cc'<line_sep>bdb.execute('alter generator p0_cc rename to zot, rename to P0_CC')<assert_stmt>core.bayesdb_generator_name(bdb generator_id)<eq>'P0_CC'<line_sep>bdb.execute('alter generator P0_cc rename to P0_cc')<assert_stmt>core.bayesdb_generator_name(bdb generator_id)<eq>'P0_cc'<line_sep>bdb.execute('alter generator p0_CC rename to p0_cc')<assert_stmt>core.bayesdb_generator_name(bdb generator_id)<eq>'p0_cc'<line_sep>bdb.execute('estimate count(*) from p').fetchall()<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('estimate count(*) from p_cc')<block_end>bdb.execute('alter generator p0_cc rename to P0_cc')<line_sep>bdb.execute('analyze p0_cc for 1 iteration')<line_sep>colno=core.bayesdb_variable_number(bdb population_id generator_id 'gender')<with_stmt>pytest.raises(parse.BQLParseError)# Rename the table's columns, not the generator's columns.
<block_start>bdb.execute('alter generator p0_cc rename gender to sex')<block_end><with_stmt>pytest.raises(NotImplementedError)# XXX
<block_start>bdb.execute('alter table t rename to t0, rename gender to sex')<assert_stmt>core.bayesdb_variable_number(bdb population_id generator_id 'sex')<eq>colno<line_sep>bdb.execute('analyze p0_cc model 0 for 1 iteration')<line_sep>bdb.execute('alter generator p0_cc rename to p_cc')<assert_stmt>core.bayesdb_variable_number(bdb population_id generator_id 'sex')<eq>colno<line_sep>bdb.execute('select sex from t0').fetchall()<with_stmt>pytest.raises(AssertionError)# XXX
<block_start>bdb.execute('select gender from t0')<assert_stmt><false> 'Need to fix quoting of unknown columns!'<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('estimate predict sex with confidence 0.9'<concat>' from p').fetchall()<block_end>bdb.execute('infer explicit predict sex with confidence 0.9'<concat>' from p').fetchall()<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('estimate predict gender with confidence 0.9'<concat>' from p')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('infer explicit predict gender with confidence 0.9'<concat>' from p')<block_end>bdb.execute('alter table t0 rename sex to gender')<assert_stmt>core.bayesdb_variable_number(bdb population_id generator_id 'gender')<eq>colno<block_end>bdb.execute('alter generator p0_cc rename to p_cc')# XXX
bdb.execute('alter table t rename to T0')# XXX
bdb.sql_execute('create table t0_temp(x)')<line_sep>bdb.execute('alter table T0 rename to t0')<assert_stmt>bdb.execute('select count(*) from t0_temp').fetchvalue()<eq>0<assert_stmt>bdb.execute('select count(*) from t0').fetchvalue()<g>0<with_stmt>pytest.raises(bayeslite.BQLError)# Cannot specify models with rename.
<block_start>bdb.execute('alter generator p_cc models (1) rename to p_cc_fail')<block_end>bdb.execute('drop table T0_TEMP')<line_sep>bdb.execute('analyze p_cc model 0 for 1 iteration')<line_sep>bdb.execute('analyze p_cc model 1 for 1 iteration')<line_sep>bdb.execute('analyze p_cc models 0-1 for 1 iteration')<line_sep>bdb.execute('analyze p_cc models 0,1 for 1 iteration')<line_sep>bdb.execute('analyze p_cc for 1 iteration')<line_sep>bdb.execute('select * from t0').fetchall()<line_sep>bdb.execute('select * from T0').fetchall()<line_sep>bdb.execute('estimate * from p').fetchall()<line_sep>bdb.execute('estimate * from P').fetchall()<line_sep># SIMIARITY IN THE CONTEXT OF requires exactly 1 variable.
<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('estimate similarity in the context of * '<concat>'from pairwise p').fetchall()<block_end>bdb.execute('estimate similarity in the context of age '<concat>'from pairwise p').fetchall()<line_sep>bdb.execute('alter population p rename to p2')<assert_stmt>core.bayesdb_population_name(bdb population_id)<eq>'p2'<line_sep>bdb.execute('estimate similarity to (rowid=1) in the context of rank '<concat>'from p2').fetchall()<line_sep>bdb.execute('select value from'<concat>' (estimate correlation from pairwise columns of p2)').fetchall()<line_sep>bdb.execute('infer explicit predict age with confidence 0.9'<concat>' from p2').fetchall()<line_sep>bdb.execute('infer explicit predict AGE with confidence 0.9'<concat>' from P2').fetchall()<line_sep>bdb.execute('infer explicit predict aGe with confidence 0.9'<concat>' from P2').fetchall()<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('estimate predict agee with confidence 0.9 from p2')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('infer explicit predict agee with confidence 0.9'<concat>' from p2')<block_end>guess.bayesdb_guess_population(bdb 'pe' 't0' overrides=[('age' 'numerical') ('rank' 'numerical') ])<line_sep>bdb.execute('create generator pe_cc for pe;')<with_stmt>pytest.raises(bayeslite.BQLError)# No models to analyze.
<block_start>bdb.execute('analyze pe_cc for 1 iteration')<block_end>bdb.execute('initialize 1 model if not exists for pe_cc')<line_sep>bdb.execute('analyze pe_cc for 1 iteration')<line_sep>bdb.execute('estimate correlation'<concat>' from pairwise columns of pe').fetchall()<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('initialize 4 models if not exists for t')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('analyze t0 for 1 iteration')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('estimate * from t')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('estimate * from columns of t')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('estimate correlation from pairwise columns of t')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('estimate similarity in the context of age '<concat>'from pairwise t')<block_end>bdb.execute('initialize 6 models if not exists for p_cc')<line_sep>bdb.execute('analyze p_cc for 1 iteration')<block_end><block_end><def_stmt>test_trivial_deadline <block_start><with_stmt>test_core.t1()<as>(bdb _population_id _generator_id)<block_start>bdb.execute('initialize 1 model for p1_cc')<line_sep>bdb.execute('analyze p1_cc for 1 second')<block_end><block_end><def_stmt>test_parametrized <block_start><assert_stmt>bql2sqlparam('select * from t where id = ?')<eq>'SELECT * FROM "t" WHERE ("id" = ?1);'<assert_stmt>bql2sqlparam('select * from t where id = :foo')<eq>'SELECT * FROM "t" WHERE ("id" = ?1);'<assert_stmt>bql2sqlparam('select * from t where id = $foo')<eq>'SELECT * FROM "t" WHERE ("id" = ?1);'<assert_stmt>bql2sqlparam('select * from t where id = @foo')<eq>'SELECT * FROM "t" WHERE ("id" = ?1);'<assert_stmt>bql2sqlparam('select * from t where id = ?123')<eq>'SELECT * FROM "t" WHERE ("id" = ?1);'<assert_stmt>bql2sqlparam('select * from t where a = $foo and b = ?1;')<eq>'SELECT * FROM "t" WHERE (("a" = ?1) AND ("b" = ?1));'<assert_stmt>bql2sqlparam('select * from t'+' where a = ?123 and b = :foo and c = ?124')<eq>'SELECT * FROM "t" WHERE'+' ((("a" = ?1) AND ("b" = ?2)) AND ("c" = ?2));'<with_stmt>test_csv.bayesdb_csv_file(test_csv.csv_data)<as>(bdb fname)<block_start><with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true>)<block_end><assert_stmt>bql_execute(bdb 'select count(*) from t')<eq>[(7 )]<assert_stmt>bql_execute(bdb 'select count(distinct division) from t')<eq>[(6 )]<assert_stmt>bql_execute(bdb 'select * from t where height > ?' (70 ))<eq>[(41 'M' 65600 72 'marketing' 4) (30 'M' 70000 73 'sales' 4) (30 'F' 81000 73 'engineering' 3) ]<assert_stmt>bql_execute(bdb 'select * from t where height > ?123' (0 )<times>122+(70 ))<eq>[(41 'M' 65600 72 'marketing' 4) (30 'M' 70000 73 'sales' 4) (30 'F' 81000 73 'engineering' 3) ]<assert_stmt>bql_execute(bdb 'select age from t where division = :division' {':division':'sales'})<eq>[(34 ) (30 )]<assert_stmt>bql_execute(bdb 'select division from t'+' where age < @age and rank > ?;' (40 4))<eq>[('accounting' )]<assert_stmt>bql_execute(bdb 'select division from t'+' where age < @age and rank > :rank;' {':RANK':4 '@aGe':40})<eq>[('accounting' )]<with_stmt>pytest.raises(ValueError)<block_start>bdb.execute('select * from t where age < ? and rank > :r' {':r':4})<block_end><def_stmt>traced_execute query *args<block_start>bql=[]<def_stmt>trace string _bindings<block_start>bql.append(' '.join(string.split()))<block_end>bdb.trace(trace)<with_stmt>bdb.savepoint()<block_start>bdb.execute(query *args)<block_end>bdb.untrace(trace)<line_sep><return>bql<block_end><def_stmt>sqltraced_execute query *args<block_start>sql=[]<def_stmt>trace string _bindings<block_start>sql.append(' '.join(string.split()))<block_end>bdb.sql_trace(trace)<with_stmt>bdb.savepoint()<block_start>bdb.execute(query *args)<block_end>bdb.sql_untrace(trace)<line_sep><return>sql<block_end>guess.bayesdb_guess_population(bdb 'p' 't')<line_sep>bdb.execute('create generator p_cc for p;')<line_sep>bdb.execute('initialize 1 model for p_cc;')<assert_stmt>traced_execute('estimate similarity to (rowid = 1)'<concat>' in the context of (estimate * from columns of p limit 1)'<concat>' from p;')<eq>['estimate similarity to (rowid = 1)'<concat>' in the context of (estimate * from columns of p limit 1)'<concat>' from p;' ]<assert_stmt>sqltraced_execute('estimate similarity to (rowid = 1)'<concat>' in the context of (estimate * from columns of p limit 1)'<concat>' from p;')<eq>['SELECT COUNT(*) FROM bayesdb_population WHERE name = ?' 'SELECT id FROM bayesdb_population WHERE name = ?' 'SELECT tabname FROM bayesdb_population WHERE id = ?' 'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?' 'SELECT id FROM bayesdb_population WHERE name = ?' 'SELECT v.name AS name FROM bayesdb_variable AS v'<concat>' WHERE v.population_id = 1'<concat>' AND v.generator_id IS NULL'<concat>' LIMIT 1' 'SELECT colno FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT tabname FROM bayesdb_population'<concat>' WHERE id = ?' 'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,'<concat>' (SELECT _rowid_ FROM "t" WHERE ("rowid" = 1)), 0) FROM "t"' 'SELECT id FROM bayesdb_generator WHERE population_id = ?' 'SELECT backend FROM bayesdb_generator WHERE id = ?' 'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'<concat>' WHERE generator_id = ? AND table_rowid = ?' 'SELECT cgpm_rowid FROM bayesdb_cgpm_individual '<concat>'WHERE generator_id = ? AND table_rowid = ?' 'SELECT engine_stamp FROM bayesdb_cgpm_generator '<concat>'WHERE generator_id = ?']<assert_stmt>sqltraced_execute('estimate similarity to (rowid = 1)'<concat>' in the context of (estimate * from columns of p limit ?)'<concat>' from p;' (1 ))<eq>['SELECT COUNT(*) FROM bayesdb_population'<concat>' WHERE name = ?' 'SELECT id FROM bayesdb_population'<concat>' WHERE name = ?' 'SELECT tabname FROM bayesdb_population WHERE id = ?' 'SELECT COUNT(*) FROM bayesdb_population'<concat>' WHERE name = ?' 'SELECT id FROM bayesdb_population'<concat>' WHERE name = ?' # ESTIMATE * FROM COLUMNS OF:
'SELECT v.name AS name'<concat>' FROM bayesdb_variable AS v'<concat>' WHERE v.population_id = 1'<concat>' AND v.generator_id IS NULL'<concat>' LIMIT ?1' 'SELECT colno FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT tabname FROM bayesdb_population WHERE id = ?' # ESTIMATE SIMILARITY TO (rowid=1):
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,'<concat>' (SELECT _rowid_ FROM "t" WHERE ("rowid" = 1)), 0) FROM "t"' 'SELECT id FROM bayesdb_generator WHERE population_id = ?' 'SELECT backend FROM bayesdb_generator WHERE id = ?' 'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'<concat>' WHERE generator_id = ? AND table_rowid = ?' 'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'<concat>' WHERE generator_id = ? AND table_rowid = ?' 'SELECT engine_stamp FROM bayesdb_cgpm_generator'<concat>' WHERE generator_id = ?']<assert_stmt>sqltraced_execute('create temp table if not exists sim as '<concat>'simulate age, RANK, division '<concat>'from p given gender = \'F\' limit 4')<eq>['PRAGMA table_info("sim")' 'PRAGMA table_info("bayesdb_temp_0")' 'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?' 'SELECT id FROM bayesdb_population WHERE name = ?' 'SELECT COUNT(*) FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT COUNT(*) FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT COUNT(*) FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT COUNT(*) FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT CAST(4 AS INTEGER), \'F\'' 'SELECT token FROM bayesdb_rowid_tokens' 'SELECT COUNT(*) FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT colno FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT token FROM bayesdb_rowid_tokens' 'SELECT COUNT(*) FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT colno FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT token FROM bayesdb_rowid_tokens' 'SELECT COUNT(*) FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT colno FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT token FROM bayesdb_rowid_tokens' 'SELECT COUNT(*) FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT colno FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT tabname FROM bayesdb_population WHERE id = ?' 'SELECT MAX(_rowid_) FROM "t"' 'SELECT token FROM bayesdb_rowid_tokens' 'SELECT token FROM bayesdb_rowid_tokens' 'SELECT id FROM bayesdb_generator'<concat>' WHERE population_id = ?' 'SELECT backend FROM bayesdb_generator WHERE id = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT tabname FROM bayesdb_population WHERE id = ?' 'SELECT 1 FROM "t" WHERE oid = ?' 'SELECT 1 FROM bayesdb_cgpm_individual'<concat>' WHERE generator_id = ? AND table_rowid = ? LIMIT 1' 'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'<concat>' WHERE generator_id = ? AND table_rowid = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT code FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND value = ?' 'SELECT engine_stamp FROM bayesdb_cgpm_generator'<concat>' WHERE generator_id = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'CREATE TEMP TABLE "bayesdb_temp_0"'<concat>' ("age","RANK","division")' 'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'<concat>' VALUES (?,?,?)' 'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'<concat>' VALUES (?,?,?)' 'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'<concat>' VALUES (?,?,?)' 'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'<concat>' VALUES (?,?,?)' 'CREATE TEMP TABLE IF NOT EXISTS "sim" AS'<concat>' SELECT * FROM "bayesdb_temp_0"' 'DROP TABLE "bayesdb_temp_0"']<assert_stmt>sqltraced_execute('select * from (simulate age from p '<concat>'given gender = \'F\' limit 4)')<eq>['PRAGMA table_info("bayesdb_temp_1")' 'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?' 'SELECT id FROM bayesdb_population WHERE name = ?' 'SELECT COUNT(*) FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT COUNT(*) FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT CAST(4 AS INTEGER), \'F\'' 'SELECT token FROM bayesdb_rowid_tokens' 'SELECT COUNT(*) FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT colno FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT token FROM bayesdb_rowid_tokens' 'SELECT COUNT(*) FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT colno FROM bayesdb_variable'<concat>' WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?)'<concat>' AND name = ?' 'SELECT tabname FROM bayesdb_population WHERE id = ?' 'SELECT MAX(_rowid_) FROM "t"' 'SELECT token FROM bayesdb_rowid_tokens' 'SELECT token FROM bayesdb_rowid_tokens' 'SELECT id FROM bayesdb_generator WHERE population_id = ?' 'SELECT backend FROM bayesdb_generator WHERE id = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT tabname FROM bayesdb_population WHERE id = ?' 'SELECT 1 FROM "t" WHERE oid = ?' 'SELECT 1 FROM bayesdb_cgpm_individual'<concat>' WHERE generator_id = ? AND table_rowid = ? LIMIT 1' 'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'<concat>' WHERE generator_id = ? AND table_rowid = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT code FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND value = ?' 'SELECT engine_stamp FROM bayesdb_cgpm_generator'<concat>' WHERE generator_id = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'<concat>' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?' 'SELECT value FROM bayesdb_cgpm_category'<concat>' WHERE generator_id = ? AND colno = ? AND code = ?' 'CREATE TEMP TABLE "bayesdb_temp_1" ("age")' 'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)' 'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)' 'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)' 'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)' 'SELECT * FROM (SELECT * FROM "bayesdb_temp_1")' 'DROP TABLE "bayesdb_temp_1"' ]<line_sep>bdb.execute('''
create population q for t (
age NUMERICAL;
gender NOMINAL; -- Not binary!
salary NUMERICAL;
height NUMERICAL;
division NOMINAL;
rank NOMINAL;
)
''')<line_sep>bdb.execute('create generator q_cc for q;')<line_sep>bdb.execute('initialize 1 model for q_cc;')<assert_stmt>sqltraced_execute('analyze q_cc for 1 iteration;')<eq>['SELECT COUNT(*) FROM bayesdb_generator WHERE name = ?' 'SELECT id FROM bayesdb_generator WHERE name = ?' 'SELECT backend FROM bayesdb_generator WHERE id = ?' 'SELECT engine_json, engine_stamp FROM bayesdb_cgpm_generator'<concat>' WHERE generator_id = ?' 'SELECT population_id FROM bayesdb_generator WHERE id = ?' 'SELECT engine_stamp FROM bayesdb_cgpm_generator'<concat>' WHERE generator_id = ?' 'UPDATE bayesdb_cgpm_generator'<concat>' SET engine_json = :engine_json, engine_stamp = :engine_stamp'<concat>' WHERE generator_id = :generator_id']<block_end><block_end><def_stmt>test_create_table_ifnotexists_as_simulate <block_start><with_stmt>test_csv.bayesdb_csv_file(test_csv.csv_data)<as>(bdb fname)<block_start><with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true>)<line_sep># If not exists table tests
guess.bayesdb_guess_population(bdb 'p' 't' overrides=[('age' 'numerical')])<line_sep>bdb.execute('create generator p_cc for p;')<line_sep>bdb.execute('initialize 1 model for p_cc')<line_sep>bdb.execute('analyze p_cc for 1 iteration')<line_sep>bdb.execute('''
create table if not exists u as
simulate age from p limit 10
''')<line_sep>bdb.execute("drop table u")<line_sep>bdb.execute('''
create table if not exists w as simulate age from p
given division='sales' limit 10
''')<line_sep>bdb.execute("drop table w")<line_sep>bdb.execute("create table u as simulate age from p limit 10")<line_sep>x=bdb.execute("select count (*) from u").fetchvalue()<line_sep>bdb.execute('''
create table if not exists u as simulate age from p limit 10
''')<line_sep>bdb.execute('''
create table if not exists u as simulate age from p
given division='sales' limit 10
''')<assert_stmt>x<eq>bdb.execute("select count (*) from u").fetchvalue()<block_end><block_end><block_end><def_stmt>test_createtab <block_start><with_stmt>test_csv.bayesdb_csv_file(test_csv.csv_data)<as>(bdb fname)<block_start><with_stmt>pytest.raises(apsw.SQLError)<block_start>bdb.execute('drop table t')<block_end>bdb.execute('drop table if exists t')<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('drop population p')<block_end>bdb.execute('drop population if exists p')<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('drop generator p_cc')<block_end>bdb.execute('drop generator if exists p_cc')<with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true>)<block_end><with_stmt>bdb.savepoint()# Savepoint because we don't actually want the new data to
# be inserted.
<block_start><with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true> ifnotexists=<true>)<block_end><block_end>guess.bayesdb_guess_population(bdb 'p' 't' overrides=[('age' 'numerical')])<line_sep>bdb.execute('create generator p_cc for p;')<with_stmt>pytest.raises(bayeslite.BQLError)# Redefining population.
<block_start>bdb.execute('create population p for t (age numerical)')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# Redefining generator.
<block_start>bdb.execute('create generator p_cc for p;')<block_end># Make sure ignore columns work.
#
# XXX Also check key columns.
guess.bayesdb_guess_population(bdb 'p0' 't' overrides=[('age' 'ignore')])<line_sep>bdb.execute('drop population p0')<line_sep>population_id=core.bayesdb_get_population(bdb 'p')<line_sep>colno=core.bayesdb_variable_number(bdb population_id <none> 'age')<assert_stmt>core.bayesdb_variable_stattype(bdb population_id <none> colno)<eq>'numerical'<line_sep>bdb.execute('initialize 1 model for p_cc')<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('drop table t')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('drop population p')<block_end>bdb.execute('drop generator p_cc')<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('drop generator p_cc')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('drop table t')<block_end>bdb.execute('drop generator if exists p_cc')<line_sep>bdb.execute('drop population p')<line_sep>bdb.execute('drop population if exists p')<line_sep>bdb.execute('drop table t')<line_sep>bdb.execute('drop table if exists t')<with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true>)<block_end>guess.bayesdb_guess_population(bdb 'p' 't')<line_sep>bdb.execute("create table u as select * from t where gender = 'F'")<assert_stmt>bql_execute(bdb 'select * from u')<eq>[(23 'F' 81000 67 'data science' 3) (36 'F' 96000 70 'management' 2) (30 'F' 81000 73 'engineering' 3) ]<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute("create table u as select * from t where gender = 'F'")<block_end>bdb.execute('drop table u')<with_stmt>pytest.raises(apsw.SQLError)<block_start>bql_execute(bdb 'select * from u')<block_end>bdb.execute("create temp table u as"<concat>" select * from t where gender = 'F'")<assert_stmt>bql_execute(bdb 'select * from u')<eq>[(23 'F' 81000 67 'data science' 3) (36 'F' 96000 70 'management' 2) (30 'F' 81000 73 'engineering' 3) ]<line_sep># XXX Test to make sure TEMP is passed through, and the table
# doesn't persist on disk.
<block_end><block_end><def_stmt>test_alterpop_addvar <block_start><with_stmt>bayeslite.bayesdb_open()<as>bdb<block_start>bayeslite.bayesdb_read_csv(bdb 't' StringIO.StringIO(test_csv.csv_data) header=<true> create=<true>)<line_sep>bdb.execute('''
create population p for t with schema(
age numerical;
gender nominal;
salary numerical;
height ignore;
division ignore;
rank ignore;
)
''')<line_sep>population_id=core.bayesdb_get_population(bdb 'p')<line_sep>bdb.execute('create generator m for p;')<line_sep># Fail when variable does not exist in base table.
<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('alter population p add variable quux;')<block_end># Fail when variable already in population.
<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('alter population p add variable age numerical;')<block_end># Fail when given invalid statistical type.
<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('alter population p add variable heigh numr;')<block_end># Alter pop with stattype.
<assert_stmt><not>core.bayesdb_has_variable(bdb population_id <none> 'height')<line_sep>bdb.execute('alter population p add variable height numerical;')<assert_stmt>core.bayesdb_has_variable(bdb population_id <none> 'height')<line_sep># Alter pop multiple without stattype.
<assert_stmt><not>core.bayesdb_has_variable(bdb population_id <none> 'rank')<assert_stmt><not>core.bayesdb_has_variable(bdb population_id <none> 'division')<line_sep>bdb.execute('''
alter population p
add variable rank,
add variable division;
''')<assert_stmt>core.bayesdb_has_variable(bdb population_id <none> 'rank')<assert_stmt>core.bayesdb_has_variable(bdb population_id <none> 'division')<line_sep># Add a new column weight to the base table.
bdb.sql_execute('alter table t add column weight real;')<line_sep># Fail when no values in new column.
<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('alter population p add variable weight numerical;')<block_end><assert_stmt><not>core.bayesdb_has_variable(bdb population_id <none> 'weight')<line_sep># Update a single value and update the population.
bdb.sql_execute('update t set weight = 1 where oid = 1;')<line_sep>bdb.execute('alter population p add variable weight numerical;')<assert_stmt>core.bayesdb_has_variable(bdb population_id <none> 'weight')<block_end><block_end><def_stmt>test_txn <block_start><with_stmt>test_csv.bayesdb_csv_file(test_csv.csv_data)<as>(bdb fname)# Make sure rollback and commit fail outside a transaction.
<block_start><with_stmt>pytest.raises(bayeslite.BayesDBTxnError)<block_start>bdb.execute('ROLLBACK')<block_end><with_stmt>pytest.raises(bayeslite.BayesDBTxnError)<block_start>bdb.execute('COMMIT')<block_end># Open a transaction which we'll roll back.
bdb.execute('BEGIN')<try_stmt># Make sure transactions don't nest. (Use savepoints.)
<block_start><with_stmt>pytest.raises(bayeslite.BayesDBTxnError)<block_start>bdb.execute('BEGIN')<block_end><block_end><finally_stmt><block_start>bdb.execute('ROLLBACK')<block_end># Make sure rollback and commit still fail outside a transaction.
<with_stmt>pytest.raises(bayeslite.BayesDBTxnError)<block_start>bdb.execute('ROLLBACK')<block_end><with_stmt>pytest.raises(bayeslite.BayesDBTxnError)<block_start>bdb.execute('COMMIT')<block_end># Open a transaction which we'll commit.
bdb.execute('BEGIN')<try_stmt><block_start><with_stmt>pytest.raises(bayeslite.BayesDBTxnError)<block_start>bdb.execute('BEGIN')<block_end><block_end><finally_stmt><block_start>bdb.execute('COMMIT')<block_end><with_stmt>pytest.raises(bayeslite.BayesDBTxnError)<block_start>bdb.execute('ROLLBACK')<block_end><with_stmt>pytest.raises(bayeslite.BayesDBTxnError)<block_start>bdb.execute('COMMIT')<block_end># Make sure ROLLBACK undoes the effects of the transaction.
bdb.execute('BEGIN')<try_stmt><block_start><with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true>)<block_end>bdb.execute('SELECT * FROM t').fetchall()<line_sep>guess.bayesdb_guess_population(bdb 'p' 't')<line_sep>bdb.execute('ESTIMATE * FROM p').fetchall()<block_end><finally_stmt><block_start>bdb.execute('ROLLBACK')<block_end><with_stmt>pytest.raises(apsw.SQLError)<block_start>bdb.execute('SELECT * FROM t')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('ESTIMATE * FROM p')<block_end># Make sure CREATE and DROP both work in the transaction.
bdb.execute('BEGIN')<try_stmt><block_start><with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true>)<block_end>bdb.execute('SELECT * FROM t').fetchall()<line_sep>guess.bayesdb_guess_population(bdb 'p' 't')<line_sep>bdb.execute('ESTIMATE * FROM p').fetchall()<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('DROP TABLE t')<block_end>bdb.execute('DROP POPULATION p')<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('ESTIMATE * FROM p')<block_end>bdb.execute('DROP TABLE t')<with_stmt>pytest.raises(apsw.SQLError)<block_start>bdb.execute('SELECT * FROM t')<block_end><block_end><finally_stmt><block_start>bdb.execute('ROLLBACK')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('ESTIMATE * FROM p')<block_end><with_stmt>pytest.raises(apsw.SQLError)<block_start>bdb.execute('SELECT * FROM t')<block_end># Make sure CREATE and DROP work even if we commit.
bdb.execute('BEGIN')<try_stmt><block_start><with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true>)<block_end>bdb.execute('SELECT * FROM t').fetchall()<line_sep>guess.bayesdb_guess_population(bdb 'p' 't')<line_sep>bdb.execute('ESTIMATE * FROM p').fetchall()<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('DROP TABLE t')<block_end>bdb.execute('DROP POPULATION p')<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('ESTIMATE * FROM p')<block_end>bdb.execute('DROP TABLE t')<with_stmt>pytest.raises(apsw.SQLError)<block_start>bdb.execute('SELECT * FROM t')<block_end><block_end><finally_stmt><block_start>bdb.execute('COMMIT')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('ESTIMATE * FROM p')<block_end><with_stmt>pytest.raises(apsw.SQLError)<block_start>bdb.execute('SELECT * FROM t')<block_end># Make sure CREATE persists if we commit.
bdb.execute('BEGIN')<try_stmt><block_start><with_stmt>open(fname 'rU')<as>f<block_start>bayeslite.bayesdb_read_csv(bdb 't' f header=<true> create=<true>)<block_end>bdb.execute('SELECT * FROM t').fetchall()<line_sep>guess.bayesdb_guess_population(bdb 'p' 't')<line_sep>bdb.execute('ESTIMATE * FROM p').fetchall()<block_end><finally_stmt><block_start>bdb.execute('COMMIT')<block_end>bdb.execute('SELECT * FROM t').fetchall()<line_sep>bdb.execute('ESTIMATE * FROM p').fetchall()<line_sep># Make sure bdb.transaction works, rolls back on exception,
# and handles nesting correctly in the context of savepoints.
<try_stmt><block_start><with_stmt>bdb.transaction()<block_start>bdb.sql_execute('create table quagga(x)')<line_sep><raise>StopIteration<block_end><block_end><except_stmt>StopIteration<block_start><pass><block_end><with_stmt>pytest.raises(apsw.SQLError)<block_start>bdb.execute('select * from quagga')<block_end><with_stmt>bdb.transaction()<block_start><with_stmt>bdb.savepoint()<block_start><with_stmt>bdb.savepoint()<block_start><pass><block_end><block_end><block_end><with_stmt>bdb.savepoint()<block_start><with_stmt>pytest.raises(bayeslite.BayesDBTxnError)<block_start><with_stmt>bdb.transaction()<block_start><pass><block_end><block_end><block_end># XXX To do: Make sure other effects (e.g., analysis) get
# rolled back by ROLLBACK.
<block_end><block_end><def_stmt>test_predprob_null <block_start>backend=CGPM_Backend({} multiprocess=<false>)<with_stmt>test_core.bayesdb(backend=backend)<as>bdb<block_start>bdb.sql_execute('''
create table foo (
id integer primary key not null,
x numeric,
y numeric,
z numeric
)
''')<line_sep>bdb.sql_execute("insert into foo values (1, 1, 'strange', 3)")<line_sep>bdb.sql_execute("insert into foo values (2, 1.2, 'strange', 1)")<line_sep>bdb.sql_execute("insert into foo values (3, 0.8, 'strange', 3)")<line_sep>bdb.sql_execute("insert into foo values (4, NULL, 'strange', 9)")<line_sep>bdb.sql_execute("insert into foo values (5, 73, 'up', 11)")<line_sep>bdb.sql_execute("insert into foo values (6, 80, 'up', -1)")<line_sep>bdb.sql_execute("insert into foo values (7, 60, 'up', NULL)")<line_sep>bdb.sql_execute("insert into foo values (8, 67, NULL, NULL)")<line_sep>bdb.sql_execute("insert into foo values (9, 3.1415926, 'down', 1)")<line_sep>bdb.sql_execute("insert into foo values (10, 1.4142135, 'down', 0)")<line_sep>bdb.sql_execute("insert into foo values (11, 2.7182818, 'down', -1)")<line_sep>bdb.sql_execute("insert into foo values (12, NULL, 'down', 10)")<line_sep>bdb.execute('''
create population pfoo for foo (
id ignore;
x numerical;
y nominal;
z numerical;
)
''')<line_sep>bdb.execute('create generator pfoo_cc for pfoo using cgpm;')<line_sep>bdb.execute('initialize 1 model for pfoo_cc')<line_sep>bdb.execute('analyze pfoo_cc for 1 iteration')<line_sep># Null value => null predictive probability.
<assert_stmt>bdb.execute('estimate predictive probability of x'<concat>' from pfoo where id = 4;').fetchall()<eq>[(<none> )]<line_sep># Nonnull value => nonnull predictive probability.
x=bdb.execute('estimate predictive probability of x'<concat>' from pfoo where id = 5').fetchall()<assert_stmt>len(x)<eq>1<assert_stmt>len(x[0])<eq>1<assert_stmt>isinstance(x[0][0] (int float))<line_sep># All null values => null predictive probability.
<assert_stmt>bdb.execute('estimate predictive probability of (y, z)'<concat>' from pfoo where id = 8;').fetchall()<eq>[(<none> )]<line_sep># Some nonnull values => nonnull predictive probability.
x=bdb.execute('estimate predictive probability of (x, z)'<concat>' from pfoo where id = 8;').fetchall()<assert_stmt>len(x)<eq>1<assert_stmt>len(x[0])<eq>1<assert_stmt>isinstance(x[0][0] (int float))<line_sep># All NULL constraints => same result regardless of given clause.
c0=bdb.execute('estimate predictive probability of x'<concat>' from pfoo where id = 8;')<line_sep>v0=cursor_value(c0)<assert_stmt>v0<is><not><none><line_sep>c1=bdb.execute('estimate predictive probability of x given (y, z)'<concat>' from pfoo where id = 8;')<line_sep>v1=cursor_value(c1)<assert_stmt>relerr(v0 v1)<l>0.0001<block_end><block_end><def_stmt>test_guess_all <block_start><with_stmt>test_core.bayesdb()<as>bdb<block_start>bdb.sql_execute('create table foo (x numeric, y numeric, z numeric)')<line_sep>bdb.sql_execute('insert into foo values (1, 2, 3)')<line_sep>bdb.sql_execute('insert into foo values (4, 5, 6)')<line_sep># XXX GUESS(*)
guess.bayesdb_guess_population(bdb 'pfoo' 'foo')<block_end><block_end><def_stmt>test_misc_errors <block_start><with_stmt>test_core.t1()<as>(bdb _population_id _generator_id)<block_start><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('create table t1 as SELECT 1 FROM t1'# t1 already exists as a table.
<concat>' limit 1')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# t1 already exists as a table.
<block_start>bdb.execute('create table t1 as simulate weight from p1'<concat>' limit 1')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# t1x does not exist as a population.
<block_start>bdb.execute('create table t1_sim as simulate weight from t1x'<concat>' limit 1')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# p1 does not have a variable waught.
<block_start>bdb.execute('create table t1_sim as simulate waught from p1'<concat>' limit 1')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# p1 does not have a variable agee.
<block_start>bdb.execute('create table t1_sim as simulate weight from p1'<concat>' given agee = 42 limit 1')<block_end><with_stmt>bdb.savepoint()<block_start>bdb.sql_execute('create table t2(x)')<with_stmt>pytest.raises(bayeslite.BQLError)# t1 already exists as a table.
<block_start>bdb.execute('alter table t2 rename to t1')<block_end><block_end><with_stmt>pytest.raises(NotImplementedError)# Renaming columns is not yet implemented.
<block_start>bdb.execute('alter table t1 rename weight to mass')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# xcat does not exist as a backend.
<block_start>bdb.execute('create generator p1_xc for p1 using xcat()')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# p1 already exists as a population.
<block_start>bdb.execute('create generator p1_cc for p1;')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# multinomial is not a known statistical type.
<block_start>bdb.execute('''
create population q1 for t1(
ignore id, label, weight;
weight multinomial
)
''')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)# p1_xc does not exist as a generator.
<block_start>bdb.execute('alter generator p1_xc rename to p1_xcat')<block_end><with_stmt>bdb.savepoint()<block_start>bdb.execute('create generator p1_xc for p1;')<with_stmt>pytest.raises(bayeslite.BQLError)# p1_xc already exists as a generator.
<block_start>bdb.execute('alter generator p1_cc rename to p1_xc')<block_end><block_end><with_stmt>pytest.raises(bayeslite.BQLParseError)# WAIT is not allowed.
<block_start>bdb.execute('analyze p1_cc for 1 iteration wait')<block_end><with_stmt>bdb.savepoint()<block_start>bdb.execute('initialize 1 model for p1_cc')<line_sep>bdb.execute('analyze p1_cc for 1 iteration')<line_sep>bdb.execute('initialize 1 model for p1_xc')<line_sep>bdb.execute('analyze p1_xc for 1 iteration')<with_stmt>pytest.raises(apsw.SQLError)<block_start>bdb.execute('select'<concat>' nonexistent((simulate age from p1 limit 1));')<block_end><block_end><with_stmt>pytest.raises(ValueError)<block_start>bdb.execute('select :x' {'y':42})<block_end><with_stmt>pytest.raises(ValueError)<block_start>bdb.execute('select :x' {'x':53 'y':42})<block_end><with_stmt>pytest.raises(ValueError)<block_start>bdb.execute('select ?, ?' (1 ))<block_end><with_stmt>pytest.raises(ValueError)<block_start>bdb.execute('select ?' (1 2))<block_end><with_stmt>pytest.raises(TypeError)<block_start>bdb.execute('select ?' 42)<block_end><with_stmt>pytest.raises(NotImplementedError)<block_start>bdb.execute('infer explicit predict age confidence ac, *'<concat>' from p1')<block_end><with_stmt>pytest.raises(NotImplementedError)<block_start>bdb.execute('infer explicit predict age confidence ac,'<concat>' t1.(select age from t1 limit 1) from p1')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start><try_stmt><block_start>bdb.execute('estimate similarity to (rowid=1)'<concat>' in the context of agee from p1')<block_end><except_stmt>bayeslite.BQLError<as>e<block_start><assert_stmt>'No such columns in population:'<in>str(e)<line_sep><raise><block_end><block_end><block_end><block_end><def_stmt>test_nested_simulate <block_start><with_stmt>test_core.t1()<as>(bdb _population_id _generator_id)<block_start>bdb.execute('initialize 1 model for p1_cc')<line_sep>bdb.execute('analyze p1_cc for 1 iteration')<line_sep>bdb.execute('select (simulate age from p1 limit 1),'<concat>' (simulate weight from p1 limit 1)').fetchall()<assert_stmt>bdb.temp_table_name()<eq>'bayesdb_temp_2'<assert_stmt><not>core.bayesdb_has_table(bdb 'bayesdb_temp_0')<assert_stmt><not>core.bayesdb_has_table(bdb 'bayesdb_temp_1')<line_sep>bdb.execute('simulate weight from p1'<concat>' given age = (simulate age from p1 limit 1)'<concat>' limit 1').fetchall()<line_sep># Make sure unwinding doesn't raise an exception. Calling
# __del__ directly, rather than via del(), has two effects:
#
# (a) It actually raises any exceptions in the method, unlike
# del(), which suppresses them.
#
# (b) It may cause a subsequent __del__ to fail and raise an
# exception, so that a subsequent del(), including an implicit
# one at the end of a scope, may print a message to stderr.
#
# Effect (a) is what we are actually trying to test. Effect
# (b) is a harmless consequence as far as pytest is concerned,
# as long as the test otherwise passes.
bdb.execute('simulate weight from p1'<concat>' given age = (simulate age from p1 limit 1)'<concat>' limit 1').__del__()<block_end><block_end><def_stmt>test_checkpoint__ci_slow <block_start><with_stmt>test_core.t1()<as>(bdb population_id generator_id)<block_start>bdb.execute('initialize 1 model for p1_cc')<line_sep>bdb.execute('analyze p1_cc for 10 iterations checkpoint 1 iteration')<line_sep># No checkpoint by seconds.
<with_stmt>pytest.raises(NotImplementedError)<block_start>bdb.execute('analyze p1_cc for 5 seconds checkpoint 1 second')<block_end>bdb.execute('drop models from p1_cc')<line_sep>bdb.execute('initialize 1 model for p1_cc')<line_sep># No checkpoint by seconds.
<with_stmt>pytest.raises(NotImplementedError)<block_start>bdb.execute('analyze p1_cc for 5 iterations checkpoint 1 second')<block_end>bdb.execute('drop models from p1_cc')<line_sep>bdb.execute('initialize 1 model for p1_cc')<line_sep>bdb.execute('analyze p1_cc for 1 iteration checkpoint 2 iterations')<block_end><block_end><def_stmt>test_infer_confidence__ci_slow <block_start><with_stmt>test_core.t1()<as>(bdb _population_id _generator_id)<block_start>bdb.execute('initialize 1 model for p1_cc')<line_sep>bdb.execute('analyze p1_cc for 1 iteration')<line_sep>bdb.execute('infer explicit rowid, rowid as another_rowid, 4,'<concat>' age, predict age as age_inf confidence age_conf'<concat>' from p1').fetchall()<block_end><block_end><def_stmt>test_infer_as_estimate <block_start><with_stmt>test_core.t1()<as>(bdb _population_id _generator_id)<block_start>bdb.execute('initialize 1 model for p1_cc')<line_sep>bdb.execute('analyze p1_cc for 1 iteration')<line_sep>bdb.execute('infer explicit predictive probability of age'<concat>' from p1').fetchall()<block_end><block_end><def_stmt>test_infer_error <block_start><with_stmt>test_core.t1()<as>(bdb _population_id _generator_id)<block_start>bdb.execute('initialize 1 model for p1_cc')<line_sep>bdb.execute('infer explicit predict age confidence age_conf'<concat>' from p1').fetchall()<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('infer explicit predict agee confidence age_conf'<concat>' from p1').fetchall()<block_end><block_end><block_end><def_stmt>test_estimate_by <block_start><with_stmt>test_core.t1()<as>(bdb _population_id _generator_id)<block_start>bdb.execute('initialize 1 model for p1_cc')<line_sep>bdb.execute('analyze p1_cc for 1 iteration')<with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('estimate predictive probability of age'<concat>' by p1')<block_end><with_stmt>pytest.raises(bayeslite.BQLError)<block_start>bdb.execute('estimate similarity to (rowid=1) '<concat>'in the context of age by p1')<block_end><def_stmt>check x bindings=<none><block_start><assert_stmt>len(bdb.execute(x bindings=bindings).fetchall())<eq>1<block_end>check('estimate probability density of age = 42 by p1')<line_sep>check('estimate dependence probability of age with weight by p1')<line_sep>check('estimate mutual information of age with weight by p1')<line_sep>check('estimate correlation of age with weight by p1')<line_sep>check('estimate correlation pvalue of age with weight by p1')<line_sep>rowid=bdb.execute('select min(rowid) from t1').fetchall()[0][0]<line_sep>check('''
estimate similarity of (rowid=?) to (rowid=?)
in the context of weight by p1
''' (rowid rowid ))<block_end><block_end><def_stmt>test_empty_cursor <block_start><with_stmt>bayeslite.bayesdb_open()<as>bdb<block_start><assert_stmt>bdb.execute('SELECT 0').connection<eq>bdb<line_sep>empty(bdb.execute('BEGIN'))<line_sep>empty(bdb.execute('COMMIT'))<line_sep>empty(bdb.sql_execute('CREATE TABLE t(x, y, z)'))<line_sep>empty(bdb.sql_execute('INSERT INTO t VALUES(1,2,3)'))<line_sep>empty(bdb.sql_execute('INSERT INTO t VALUES(4,5,6)'))<line_sep>empty(bdb.sql_execute('INSERT INTO t VALUES(7,8,9)'))<line_sep>empty(bdb.execute('CREATE POPULATION p FOR t '<concat>'(IGNORE z,y; x NOMINAL)'))<line_sep>empty(bdb.execute('CREATE GENERATOR p_cc FOR p;'))<line_sep>empty(bdb.execute('INITIALIZE 1 MODEL FOR p_cc'))<line_sep>empty(bdb.execute('DROP GENERATOR p_cc'))<line_sep>empty(bdb.execute('DROP POPULATION p'))<line_sep>empty(bdb.execute('DROP TABLE t'))<block_end><block_end><def_stmt>test_create_generator_ifnotexists # XXX Test other backends too, because they have a role in ensuring that
# this works. Their create_generator will still be called.
#
# [TRC 20160627: The above comment appears to be no longer true --
# if it was ever true.]
<block_start><for_stmt>using_clause ('cgpm()' )<block_start><with_stmt>bayeslite.bayesdb_open()<as>bdb<block_start>bdb.sql_execute('CREATE TABLE t(x, y, z)')<line_sep>bdb.sql_execute('INSERT INTO t VALUES(1,2,3)')<line_sep>bdb.execute('''
CREATE POPULATION p FOR t (
x NUMERICAL;
y NUMERICAL;
z NOMINAL;
)
''')<for_stmt>_i (0 1)<block_start>bdb.execute('CREATE GENERATOR IF NOT EXISTS p_cc FOR p USING '+using_clause)<block_end><try_stmt><block_start>bdb.execute('CREATE GENERATOR p_cc FOR p USING '+using_clause)<assert_stmt><false># Should have said it exists.
<block_end><except_stmt>bayeslite.BQLError<block_start><pass><block_end><block_end><block_end><block_end><def_stmt>test_bql_rand <block_start><with_stmt>bayeslite.bayesdb_open()<as>bdb<block_start>bdb.sql_execute('CREATE TABLE frobotz(x)')<for_stmt>_ range(10)<block_start>bdb.sql_execute('INSERT INTO frobotz VALUES(2)')<block_end>cursor=bdb.execute('SELECT bql_rand() FROM frobotz LIMIT 10;')<line_sep>rands=cursor.fetchall()<line_sep># These are "the" random numbers (internal PRNG is seeded to 0)
ans=[(0.28348770982811367 ) (0.4789774612650598 ) (0.07824908989551316 ) (0.6091223239372148 ) (0.03906608409906187 ) (0.3690599096081546 ) (0.8223420512129717 ) (0.7777771914916722 ) (0.061856771629497986 ) (0.6492586781908201 )]<assert_stmt>rands<eq>ans<block_end><block_end><def_stmt>test_bql_rand2 <block_start>seed=struct.pack('<QQQQ' 0 0 0 3)<with_stmt>bayeslite.bayesdb_open(seed=seed)<as>bdb<block_start>bdb.sql_execute('CREATE TABLE frobotz(x)')<for_stmt>_ range(10)<block_start>bdb.sql_execute('INSERT INTO frobotz VALUES(2)')<block_end>cursor=bdb.execute('SELECT bql_rand() FROM frobotz LIMIT 10;')<line_sep>rands=cursor.fetchall()<line_sep>ans=[(0.8351877951287725 ) (0.9735099617243271 ) (0.026142315910925418 ) (0.09380653289687524 ) (0.1097050387582088 ) (0.33154896906379605 ) (0.4579314980719317 ) (0.09072802203491703 ) (0.5276180968829105 ) (0.9993280772797679 )]<assert_stmt>rands<eq>ans<block_end><block_end><class_stmt>MockTracerOneQuery(bayeslite.IBayesDBTracer)<block_start><def_stmt>__init__ self q qid<block_start>self.q=q<line_sep>self.qid=qid<line_sep>self.start_calls=0<line_sep>self.ready_calls=0<line_sep>self.error_calls=0<line_sep>self.finished_calls=0<line_sep>self.abandoned_calls=0<block_end><def_stmt>start self qid query bindings<block_start><assert_stmt>qid<eq>self.qid<assert_stmt>query<eq>self.q<assert_stmt>bindings<eq>()<line_sep>self.start_calls<augadd>1<block_end><def_stmt>ready self qid _cursor<block_start><assert_stmt>qid<eq>self.qid<line_sep>self.ready_calls<augadd>1<block_end><def_stmt>error self qid _e<block_start><assert_stmt>qid<eq>self.qid<line_sep>self.error_calls<augadd>1<block_end><def_stmt>finished self qid<block_start><assert_stmt>qid<eq>self.qid<line_sep>self.finished_calls<augadd>1<block_end><def_stmt>abandoned self qid<block_start><assert_stmt>qid<eq>self.qid<line_sep>self.abandoned_calls<augadd>1<block_end><block_end><def_stmt>test_tracing_smoke <block_start><with_stmt>test_core.t1()<as>(bdb _population_id _generator_id)<block_start>q='SELECT * FROM t1'<line_sep>tracer=MockTracerOneQuery(q 1)<line_sep>bdb.trace(tracer)<line_sep>cursor=bdb.execute(q)<assert_stmt>tracer.start_calls<eq>1<assert_stmt>tracer.ready_calls<eq>1<assert_stmt>tracer.error_calls<eq>0<assert_stmt>tracer.finished_calls<eq>0<assert_stmt>tracer.abandoned_calls<eq>0<line_sep>cursor.fetchall()<assert_stmt>tracer.start_calls<eq>1<assert_stmt>tracer.ready_calls<eq>1<assert_stmt>tracer.error_calls<eq>0<assert_stmt>tracer.finished_calls<eq>1<assert_stmt>tracer.abandoned_calls<eq>0<del_stmt>cursor<assert_stmt>tracer.start_calls<eq>1<assert_stmt>tracer.ready_calls<eq>1<assert_stmt>tracer.error_calls<eq>0<assert_stmt>tracer.finished_calls<eq>1<assert_stmt>tracer.abandoned_calls<eq>1<line_sep>bdb.untrace(tracer)<line_sep># XXX Make sure the whole cursor API works.
q='SELECT 42'<line_sep>tracer=MockTracerOneQuery(q 2)<line_sep>bdb.trace(tracer)<line_sep>cursor=bdb.execute(q)<assert_stmt>tracer.start_calls<eq>1<assert_stmt>tracer.ready_calls<eq>1<assert_stmt>tracer.error_calls<eq>0<assert_stmt>tracer.finished_calls<eq>0<assert_stmt>tracer.abandoned_calls<eq>0<assert_stmt>cursor.fetchvalue()<eq>42<assert_stmt>tracer.start_calls<eq>1<assert_stmt>tracer.ready_calls<eq>1<assert_stmt>tracer.error_calls<eq>0<assert_stmt>tracer.finished_calls<eq>1<assert_stmt>tracer.abandoned_calls<eq>0<del_stmt>cursor<assert_stmt>tracer.start_calls<eq>1<assert_stmt>tracer.ready_calls<eq>1<assert_stmt>tracer.error_calls<eq>0<assert_stmt>tracer.finished_calls<eq>1<assert_stmt>tracer.abandoned_calls<eq>1<block_end><block_end><def_stmt>test_tracing_error_smoke <block_start><with_stmt>test_core.t1()<as>(bdb _population_id _generator_id)<block_start>q='SELECT * FROM wrong'<line_sep>tracer=MockTracerOneQuery(q 1)<line_sep>bdb.trace(tracer)<with_stmt>pytest.raises(apsw.SQLError)<block_start>bdb.execute(q)<block_end><assert_stmt>tracer.start_calls<eq>1<assert_stmt>tracer.ready_calls<eq>0<assert_stmt>tracer.error_calls<eq>1<assert_stmt>tracer.finished_calls<eq>0<assert_stmt>tracer.abandoned_calls<eq>0<block_end><block_end><class_stmt>Boom(Exception)<block_start><pass><block_end><class_stmt>ErroneousBackend(troll.TrollBackend)<block_start><def_stmt>__init__ self<block_start>self.call_ct=0<block_end><def_stmt>name self<block_start><return>'erroneous'<block_end><def_stmt>logpdf_joint self *_args **_kwargs<block_start><if_stmt>self.call_ct<g>10# Wait to avoid raising during sqlite's prefetch
<block_start><raise>Boom()<block_end>self.call_ct<augadd>1<line_sep><return>0<block_end><block_end><def_stmt>test_tracing_execution_error_smoke <block_start><with_stmt>test_core.t1()<as>(bdb _population_id _generator_id)<block_start>bayeslite.bayesdb_register_backend(bdb ErroneousBackend())<line_sep>bdb.execute('DROP GENERATOR p1_cc')<line_sep>bdb.execute('CREATE GENERATOR p1_err FOR p1 USING erroneous()')<line_sep>q='ESTIMATE PREDICTIVE PROBABILITY OF age FROM p1'<line_sep>tracer=MockTracerOneQuery(q 1)<line_sep>bdb.trace(tracer)<line_sep>cursor=bdb.execute(q)<assert_stmt>tracer.start_calls<eq>1<assert_stmt>tracer.ready_calls<eq>1<assert_stmt>tracer.error_calls<eq>0<assert_stmt>tracer.finished_calls<eq>0<assert_stmt>tracer.abandoned_calls<eq>0<with_stmt>pytest.raises(Boom)<block_start>cursor.fetchall()<block_end><assert_stmt>tracer.start_calls<eq>1<assert_stmt>tracer.ready_calls<eq>1<assert_stmt>tracer.error_calls<eq>1<assert_stmt>tracer.finished_calls<eq>0<assert_stmt>tracer.abandoned_calls<eq>0<block_end><block_end><def_stmt>test_pdf_var <block_start><with_stmt>test_core.t1()<as>(bdb population_id _generator_id)<block_start>bdb.execute('initialize 6 models for p1_cc;')<line_sep>c=bdb.execute('estimate probability density of label = label from p1')<line_sep>c.fetchall()<assert_stmt>bql2sql('estimate probability density of label = label from p1')<eq>'SELECT bql_pdf_joint(1, NULL, NULL, 1, "label") FROM "t1";'<block_end><block_end> |
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>json<import_stmt>logging<import_stmt>random<import_from_stmt>threading Lock<import_from_stmt>typing Any Optional<import_from_stmt>tornado.ioloop PeriodicCallback<import_from_stmt>.constants DEFAULT_THROTTLER_REFRESH_INTERVAL<import_from_stmt>.metrics Metrics MetricsFactory<import_from_stmt>.utils ErrorReporter<line_sep>MINIMUM_CREDITS=1.0<line_sep>default_logger=logging.getLogger('jaeger_tracing')<class_stmt>Throttler(object)<block_start><def_stmt>set_client_id self client_id:int<arrow><none><block_start>"""
Called by tracer to set client ID of throttler.
"""<line_sep><pass><block_end><def_stmt>is_allowed self operation:str<arrow>bool<block_start><raise>NotImplementedError()<block_end><def_stmt>close self<arrow><none><block_start><pass><block_end><block_end><class_stmt>RemoteThrottler(Throttler)<block_start>"""
RemoteThrottler controls the flow of spans emitted from client to prevent
flooding. RemoteThrottler requests credits from the throttling service
periodically. These credits determine the amount of debug spans a client
may emit for a particular operation without receiving more credits.
:param channel: channel for communicating with jaeger-agent
:param service_name: name of this application
:param kwargs: optional parameters
- refresh_interval: interval in seconds for requesting more credits
- logger: Logger instance
- metrics_factory: factory to create throttler-specific metrics
- error_reporter: ErrorReporter instance
"""<def_stmt>__init__ self channel:Any service_name:str **kwargs:Any<arrow><none><block_start>self.channel=channel<line_sep>self.service_name=service_name<line_sep>self.client_id:Optional[int]=<none><line_sep>self.refresh_interval=kwargs.get('refresh_interval' DEFAULT_THROTTLER_REFRESH_INTERVAL)<line_sep>self.logger=kwargs.get('logger' default_logger)<line_sep>metrics_factory=kwargs.get('metrics_factory' MetricsFactory())<line_sep>self.metrics=ThrottlerMetrics(metrics_factory)<line_sep>self.error_reporter=kwargs.get('error_reporter' ErrorReporter(Metrics()))<line_sep>self.credits:dict={}<line_sep>self.lock=Lock()<line_sep>self.running=<true><line_sep>self.periodic=<none><if_stmt><not>self.channel.io_loop<block_start>self.logger.error('Cannot acquire IOLoop, throttler will not be updated')<block_end><else_stmt><block_start>self.channel.io_loop.add_callback(self._init_polling)<block_end><block_end><def_stmt>is_allowed self operation:str<arrow>bool<block_start><with_stmt>self.lock<block_start><if_stmt>operation<not><in>self.credits<block_start>self.credits[operation]=0.0<line_sep>self.metrics.throttled_debug_spans(1)<line_sep><return><false><block_end>value=self.credits[operation]<if_stmt>value<l>MINIMUM_CREDITS<block_start>self.metrics.throttled_debug_spans(1)<line_sep><return><false><block_end>self.credits[operation]=value-MINIMUM_CREDITS<line_sep><return><true><block_end><block_end><def_stmt>set_client_id self client_id:int<arrow><none><block_start><with_stmt>self.lock<block_start><if_stmt>self.client_id<is><none><block_start>self.client_id=client_id<block_end><block_end><block_end><def_stmt>_init_polling self<block_start>"""
Bootstrap polling for throttler.
To avoid spiky traffic from throttler clients, we use a random delay
before the first poll.
"""<with_stmt>self.lock<block_start><if_stmt><not>self.running<block_start><return><block_end>r=random.Random()<line_sep>delay=r.random()<times>self.refresh_interval<line_sep>self.channel.io_loop.call_later(delay=delay callback=self._delayed_polling)<line_sep>self.logger.info('Delaying throttling credit polling by %d sec' delay)<block_end><block_end><def_stmt>_operations self<block_start><with_stmt>self.lock<block_start><return>self.credits.keys()<block_end><block_end><def_stmt>_delayed_polling self<block_start><def_stmt>callback <block_start>self._fetch_credits(self._operations())<block_end>periodic=PeriodicCallback(callback=callback # convert interval to milliseconds
callback_time=self.refresh_interval<times>1000)<line_sep>self._fetch_credits(self._operations())<with_stmt>self.lock<block_start><if_stmt><not>self.running<block_start><return><block_end>self.periodic=periodic<line_sep>self.periodic.start()<line_sep>self.logger.info('Throttling client started with refresh interval %d sec' self.refresh_interval)<block_end><block_end><def_stmt>_fetch_credits self operations<block_start><if_stmt><not>operations<block_start><return><block_end>self.logger.debug('Requesting throttling credits')<line_sep>fut=self.channel.request_throttling_credits(self.service_name self.client_id operations)<line_sep>fut.add_done_callback(self._request_callback)<block_end><def_stmt>_request_callback self future<block_start>exception=future.exception()<if_stmt>exception<block_start>self.metrics.throttler_update_failure(1)<line_sep>self.error_reporter.error('Failed to get throttling credits from jaeger-agent: %s' exception)<line_sep><return><block_end>response=future.result()<line_sep># In Python 3.5 response.body is of type bytes and json.loads() does only support str
# See: https://github.com/jaegertracing/jaeger-client-python/issues/180
<if_stmt>hasattr(response.body 'decode')<and>callable(response.body.decode)<block_start>response_body=response.body.decode('utf-8')<block_end><else_stmt><block_start>response_body=response.body<block_end><try_stmt><block_start>throttling_response=json.loads(response_body)<line_sep>self.logger.debug('Received throttling response: %s' throttling_response)<line_sep>self._update_credits(throttling_response)<line_sep>self.metrics.throttler_update_success(1)<block_end><except_stmt>Exception<as>e<block_start>self.metrics.throttler_update_failure(1)<line_sep>self.error_reporter.error('Failed to parse throttling credits response '<concat>'from jaeger-agent: %s [%s]' e response_body)<line_sep><return><block_end><block_end><def_stmt>_update_credits self response<block_start><with_stmt>self.lock<block_start><for_stmt>op_balance response['balances']<block_start>op=op_balance['operation']<line_sep>balance=op_balance['balance']<if_stmt>op<not><in>self.credits<block_start>self.credits[op]=0<block_end>self.credits[op]<augadd>balance<block_end>self.logger.debug('credits = %s' self.credits)<block_end><block_end><def_stmt>close self<arrow><none><block_start><with_stmt>self.lock<block_start>self.running=<false><if_stmt>self.periodic<block_start>self.periodic.stop()<block_end><block_end><block_end><block_end><class_stmt>ThrottlerMetrics(object)<block_start>"""
Metrics specific to throttler.
"""<def_stmt>__init__ self metrics_factory:MetricsFactory<arrow><none><block_start>self.throttled_debug_spans=metrics_factory.create_counter(name='jaeger:throttled_debug_spans')<line_sep>self.throttler_update_success=metrics_factory.create_counter(name='jaeger:throttler_update' tags={'result':'ok'})<line_sep>self.throttler_update_failure=metrics_factory.create_counter(name='jaeger:throttler_update' tags={'result':'err'})<block_end><block_end> |
"""ESMValTool CMORizer for GHCN-CAMS data.
Tier
Tier 2: other freely-available dataset.
Source
https://www.esrl.noaa.gov/psd/data/gridded/data.ghcncams.html
ftp://ftp.cdc.noaa.gov/Datasets/ghcncams/air.mon.mean.nc
Last access
20200304
"""<import_stmt>logging<import_stmt>os<import_stmt>iris<import_from_stmt>. utilities<as>utils<line_sep>logger=logging.getLogger(__name__)<def_stmt>_extract_variable short_name var cfg filepath out_dir<block_start>"""Extract variable."""<line_sep>raw_var=var.get('raw' short_name)<line_sep>cube=iris.load_cube(filepath utils.var_name_constraint(raw_var))<line_sep># Fix units
<if_stmt>'raw_units'<in>var<block_start>cube.units=var['raw_units']<block_end>cmor_info=cfg['cmor_table'].get_variable(var['mip'] short_name)<line_sep>cube.convert_units(cmor_info.units)<line_sep>utils.convert_timeunits(cube 1950)<line_sep># Fix coordinates
utils.fix_coords(cube)<if_stmt>'height2m'<in>cmor_info.dimensions<block_start>utils.add_height2m(cube)<block_end># Fix metadata
attrs=cfg['attributes']<line_sep>attrs['mip']=var['mip']<line_sep>utils.fix_var_metadata(cube cmor_info)<line_sep>utils.set_global_atts(cube attrs)<line_sep># Save variable
utils.save_variable(cube short_name out_dir attrs unlimited_dimensions=['time'])<block_end><def_stmt>cmorization in_dir out_dir cfg _<block_start>"""Cmorization func call."""<line_sep>filepath=os.path.join(in_dir cfg['filename'])<line_sep># Run the cmorization
<for_stmt>(short_name var) cfg['variables'].items()<block_start>logger.info("CMORizing variable '%s'" short_name)<line_sep>_extract_variable(short_name var cfg filepath out_dir)<block_end><block_end> |
# ServiceSchema.py
<import_from_future_stmt> print_function<import_from_future_stmt> absolute_import<import_from_stmt>optparse OptionParser OptionValueError<import_stmt>os<import_stmt>platform<as>plat<import_stmt>sys<if_stmt>sys.version_info<ge>(3 8)<and>plat.system().lower()<eq>"windows"# pylint: disable=no-member
<block_start><with_stmt>os.add_dll_directory(os.getenv('BLPAPI_LIBDIR'))<block_start><import_stmt>blpapi<block_end><block_end><else_stmt><block_start><import_stmt>blpapi<block_end>REFERENCE_DATA_RESPONSE=blpapi.Name("ReferenceDataResponse")<line_sep>ELEMENT_DATATYPE_NAMES={blpapi.DataType.BOOL:"BOOL" blpapi.DataType.CHAR:"CHAR" blpapi.DataType.BYTE:"BYTE" blpapi.DataType.INT32:"INT32" blpapi.DataType.INT64:"INT64" blpapi.DataType.FLOAT32:"FLOAT32" blpapi.DataType.FLOAT64:"FLOAT64" blpapi.DataType.STRING:"STRING" blpapi.DataType.BYTEARRAY:"BYTEARRAY" blpapi.DataType.DATE:"DATE" blpapi.DataType.TIME:"TIME" blpapi.DataType.DECIMAL:"DECIMAL" blpapi.DataType.DATETIME:"DATETIME" blpapi.DataType.ENUMERATION:"ENUMERATION" blpapi.DataType.SEQUENCE:"SEQUENCE" blpapi.DataType.CHOICE:"CHOICE" blpapi.DataType.CORRELATION_ID:"CORRELATION_ID"}<line_sep>SCHEMA_STATUS_NAMES={blpapi.SchemaStatus.ACTIVE:"ACTIVE" blpapi.SchemaStatus.DEPRECATED:"DEPRECATED" blpapi.SchemaStatus.INACTIVE:"INACTIVE" blpapi.SchemaStatus.PENDING_DEPRECATION:"PENDING"}<def_stmt>authOptionCallback _option _opt value parser<block_start>"""Parse authorization options from user input"""<line_sep>vals=value.split('=' 1)<if_stmt>value<eq>"user"<block_start>authUser=blpapi.AuthUser.createWithLogonName()<line_sep>authOptions=blpapi.AuthOptions.createWithUser(authUser)<block_end><elif_stmt>value<eq>"none"<block_start>authOptions=<none><block_end><elif_stmt>vals[0]<eq>"app"<and>len(vals)<eq>2<block_start>appName=vals[1]<line_sep>authOptions=blpapi.AuthOptions.createWithApp(appName)<block_end><elif_stmt>vals[0]<eq>"userapp"<and>len(vals)<eq>2<block_start>appName=vals[1]<line_sep>authUser=blpapi.AuthUser.createWithLogonName()<line_sep>authOptions=blpapi.AuthOptions.createWithUserAndApp(authUser appName)<block_end><elif_stmt>vals[0]<eq>"dir"<and>len(vals)<eq>2<block_start>activeDirectoryProperty=vals[1]<line_sep>authUser=blpapi.AuthUser.createWithActiveDirectoryProperty(activeDirectoryProperty)<line_sep>authOptions=blpapi.AuthOptions.createWithUser(authUser)<block_end><elif_stmt>vals[0]<eq>"manual"<block_start>parts=[]<if_stmt>len(vals)<eq>2<block_start>parts=vals[1].split(',')<block_end><if_stmt>len(parts)<ne>3<block_start><raise>OptionValueError("Invalid auth option {}".format(value))<block_end>appName,ip,userId=parts<line_sep>authUser=blpapi.AuthUser.createWithManualOptions(userId ip)<line_sep>authOptions=blpapi.AuthOptions.createWithUserAndApp(authUser appName)<block_end><else_stmt><block_start><raise>OptionValueError("Invalid auth option '{}'".format(value))<block_end>parser.values.auth={'option':authOptions}<block_end><def_stmt>parseCmdLine <block_start>parser=OptionParser()<line_sep>parser.add_option("-a" "--host" dest="host" help="HOST address to connect to" metavar="HOST" default="localhost")<line_sep>parser.add_option("-p" "--port" dest="port" type="int" help="PORT to connect to (%default)" metavar="PORT" default=8194)<line_sep>parser.add_option("-s" "--service" default="//blp/apiflds" help="SERVICE to print the schema of "<concat>"('//blp/apiflds' by default)")<line_sep>parser.add_option("--auth" dest="auth" help="authentication option: "<concat>"user|none|app=<app>|userapp=<app>|dir=<property>"<concat>"|manual=<app,ip,user>"<concat>" (default: user)\n"<concat>"'none' is applicable to Desktop API product "<concat>"that requires Bloomberg Professional service "<concat>"to be installed locally." metavar="option" action="callback" callback=authOptionCallback type="string" default={"option":blpapi.AuthOptions.createWithUser(blpapi.AuthUser.createWithLogonName())})<line_sep>(options _)=parser.parse_args()<line_sep><return>options<block_end><def_stmt>printMessage msg<block_start>print("[{0}]: {1}".format(", ".join(map(str msg.correlationIds())) msg))<block_end><def_stmt>getIndent level<block_start><return>""<if>level<eq>0<else>" ".ljust(level<times>2)<block_end># Print enumeration (constant list)
<def_stmt>printEnumeration cl level<block_start>indent=getIndent(level+1)<line_sep>print(indent+" {0} {1} {2} \"{3}\" possible values:".format(cl.name() SCHEMA_STATUS_NAMES[cl.status()] ELEMENT_DATATYPE_NAMES[cl.datatype()] cl.description()))<line_sep># Enumerate and print all constant list's values (constants)
<for_stmt>i cl<block_start>print(indent+" {0} {1} {2} \"{3}\" = {4!s}".format(i.name() SCHEMA_STATUS_NAMES[i.status()] ELEMENT_DATATYPE_NAMES[i.datatype()] i.description() i.getValue()))<block_end><block_end># Recursively print element definition
<def_stmt>printElementDefinition ed level=0<block_start>indent=getIndent(level)<line_sep>maxValues=ed.maxValues()<if_stmt>maxValues<eq>blpapi.SchemaElementDefinition.UNBOUNDED<block_start>valuesRange="[{0}, INF)".format(ed.minValues())<block_end><else_stmt><block_start>valuesRange="[{0}, {1}]".format(ed.minValues() maxValues)<block_end># Get and print alternate element names
alternateNames=ed.alternateNames()<if_stmt>alternateNames<block_start>alternateNames="[{0}]".format(",".join(map(str alternateNames)))<block_end><else_stmt><block_start>alternateNames=""<block_end>print(indent+"* {0} {1} {2} {3} \"{4}\"".format(ed.name() SCHEMA_STATUS_NAMES[ed.status()] valuesRange alternateNames ed.description()))<line_sep># Get and print related type definition
td=ed.typeDefinition()<line_sep>print(indent+" {0} {1} {2} {3}{4}{5}\"{6}\"".format(td.name() SCHEMA_STATUS_NAMES[td.status()] ELEMENT_DATATYPE_NAMES[td.datatype()] "complex "<if>td.isComplexType()<else>"" "simple "<if>td.isSimpleType()<else>"" "enum "<if>td.isEnumerationType()<else>"" td.description()))<line_sep># Get and print all possible values for enumeration type
enumeration=td.enumeration()<if_stmt><not>enumeration<is><none><block_start>printEnumeration(enumeration level)<block_end><if_stmt>td.numElementDefinitions()<block_start>print(indent+" Elements[{0}]:".format(td.numElementDefinitions()))<line_sep># Enumerate and print all sub-element definitions
<for_stmt>i td.elementDefinitions()<block_start>printElementDefinition(i level+1)<block_end><block_end><block_end><def_stmt>printOperation operation _service<block_start>print("{0} \"{1}\" Request:".format(operation.name() operation.description()))<line_sep># Print operation's request definition
printElementDefinition(operation.requestDefinition() 1)<line_sep>print("Responses[{0}]:".format(operation.numResponseDefinitions()))<line_sep># Enumerate and print all operation's response definitions
<for_stmt>r operation.responseDefinitions()<block_start>printElementDefinition(r 1)<block_end>print()<block_end><def_stmt>main <block_start>options=parseCmdLine()<line_sep># Fill SessionOptions
sessionOptions=blpapi.SessionOptions()<line_sep>sessionOptions.setServerHost(options.host)<line_sep>sessionOptions.setServerPort(options.port)<line_sep>sessionOptions.setSessionIdentityOptions(options.auth['option'])<line_sep># Create a Session
session=blpapi.Session(sessionOptions)<line_sep># Start a Session
<if_stmt><not>session.start()<block_start><raise>Exception("Can't start session.")<block_end><try_stmt><block_start>print("Session started.")<line_sep># Open service to get reference data from
<if_stmt><not>session.openService(options.service)<block_start><raise>Exception("Can't open '{0}' service.".format(options.service))<block_end># Obtain previously opened service
service=session.getService(options.service)<line_sep>print("Service {0}:".format(options.service))<line_sep>print("Service event definitions[{0}]:".format(service.numEventDefinitions()))<line_sep># Enumerate and print all service's event definitions
<for_stmt>ed service.eventDefinitions()<block_start>printElementDefinition(ed)<block_end>print()<line_sep>print("Operations[{0}]:".format(service.numOperations()))<line_sep># Enumerate and print all service's operations
<for_stmt>operation service.operations()<block_start>printOperation(operation service)<block_end><block_end><finally_stmt># Stop the session
<block_start>session.stop()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>print("ServiceSchema")<try_stmt><block_start>main()<block_end><except_stmt>KeyboardInterrupt<block_start>print("Ctrl+C pressed. Stopping...")<block_end><block_end>__copyright__="""
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""<line_sep> |
##########################################################
# pytorch-kaldi v.0.1
# <NAME>, <NAME>
# Mila, University of Montreal
# October 2018
#
# Description: This script generates kaldi ark files containing raw features.
# The file list must be a file containing "snt_id file.wav".
# Note that only wav files are supported here (sphere or other format are not supported)
##########################################################
<import_stmt>scipy.io.wavfile<import_stmt>math<import_stmt>numpy<as>np<import_stmt>os<import_from_stmt>data_io read_vec_int_ark write_mat<line_sep># Run it for all the data chunks (e.g., train, dev, test) => uncomment
lab_folder="/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/exp/dnn4_pretrain-dbn_dnn_ali_test"<line_sep>lab_opts="ali-to-pdf"<line_sep>out_folder="/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/raw_TIMIT_200ms/test"<line_sep>wav_lst="/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/test/wav.lst"<line_sep>scp_file_out="/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/raw_TIMIT_200ms/test/feats_raw.scp"<line_sep># lab_folder='quick_test/dnn4_pretrain-dbn_dnn_ali_dev'
# lab_opts='ali-to-pdf'
# out_folder='raw_TIMIT_200ms/dev'
# wav_lst='/home/mirco/pytorch-kaldi-new/quick_test/data/dev/wav_lst.scp'
# scp_file_out='quick_test/data/dev/feats_raw.scp'
# lab_folder='quick_test/dnn4_pretrain-dbn_dnn_ali_test'
# lab_opts='ali-to-pdf'
# out_folder='raw_TIMIT_200ms/test'
# wav_lst='/home/mirco/pytorch-kaldi-new/quick_test/data/test/wav_lst.scp'
# scp_file_out='quick_test/data/test/feats_raw.scp'
sig_fs=16000# Hz
sig_wlen=200# ms
lab_fs=16000# Hz
lab_wlen=25# ms
lab_wshift=10# ms
sig_wlen_samp=int((sig_fs<times>sig_wlen)/1000)<line_sep>lab_wlen_samp=int((lab_fs<times>lab_wlen)/1000)<line_sep>lab_wshift_samp=int((lab_fs<times>lab_wshift)/1000)<line_sep># Create the output folder
<try_stmt><block_start>os.stat(out_folder)<block_end><except_stmt><block_start>os.makedirs(out_folder)<block_end># Creare the scp file
scp_file=open(scp_file_out "w")<line_sep># reading the labels
lab={k:v<for>k,v read_vec_int_ark("gunzip -c "+lab_folder+"/ali*.gz | "+lab_opts+" "+lab_folder+"/final.mdl ark:- ark:-|" out_folder)}<line_sep># reading the list file
<with_stmt>open(wav_lst)<as>f<block_start>sig_lst=f.readlines()<block_end>sig_lst=[x.strip()<for>x sig_lst]<for_stmt>sig_file sig_lst<block_start>sig_id=sig_file.split(" ")[0]<line_sep>sig_path=sig_file.split(" ")[1]<line_sep>[fs signal]=scipy.io.wavfile.read(sig_path)<line_sep>signal=signal.astype(float)/32768<line_sep>signal=signal/np.max(np.abs(signal))<line_sep>cnt_fr=0<line_sep>beg_samp=0<line_sep>frame_all=[]<while_stmt>beg_samp+lab_wlen_samp<l>signal.shape[0]<block_start>sample_fr=np.zeros(sig_wlen_samp)<line_sep>central_sample_lab=int(((beg_samp+lab_wlen_samp/2)-1))<line_sep>central_fr_index=int(((sig_wlen_samp/2)-1))<line_sep>beg_signal_fr=int(central_sample_lab-(sig_wlen_samp/2))<line_sep>end_signal_fr=int(central_sample_lab+(sig_wlen_samp/2))<if_stmt>beg_signal_fr<ge>0<and>end_signal_fr<le>signal.shape[0]<block_start>sample_fr=signal[beg_signal_fr:end_signal_fr]<block_end><else_stmt><block_start><if_stmt>beg_signal_fr<l>0<block_start>n_left_samples=central_sample_lab<line_sep>sample_fr[central_fr_index-n_left_samples+1:]=signal[0:end_signal_fr]<block_end><if_stmt>end_signal_fr<g>signal.shape[0]<block_start>n_right_samples=signal.shape[0]-central_sample_lab<line_sep>sample_fr[0:central_fr_index+n_right_samples+1]=signal[beg_signal_fr:]<block_end><block_end>frame_all.append(sample_fr)<line_sep>cnt_fr=cnt_fr+1<line_sep>beg_samp=beg_samp+lab_wshift_samp<block_end>frame_all=np.asarray(frame_all)<line_sep># Save the matrix into a kaldi ark
out_file=out_folder+"/"+sig_id+".ark"<line_sep>write_mat(out_folder out_file frame_all key=sig_id)<line_sep>print(sig_id)<line_sep>scp_file.write(sig_id+" "+out_folder+"/"+sig_id+".ark:"+str(len(sig_id)+1)+"\n")<line_sep>N_fr_comp=1+math.floor((signal.shape[0]-400)/160)<line_sep># print("%s %i %i "%(lab[sig_id].shape[0],N_fr_comp,cnt_fr))
<block_end>scp_file.close()<line_sep> |
# Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for DP-enabled DNNClassifier."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>functools<import_from_stmt>absl.testing parameterized<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow_privacy.privacy.estimators test_utils<import_from_stmt>tensorflow_privacy.privacy.estimators.v1 dnn<import_from_stmt>tensorflow_privacy.privacy.optimizers.dp_optimizer DPGradientDescentGaussianOptimizer<class_stmt>DPDNNClassifierTest(tf.test.TestCase parameterized.TestCase)<block_start>"""Tests for DP-enabled DNNClassifier."""<line_sep>@parameterized.named_parameters(('BinaryClassDNN' 2) ('MultiClassDNN 3' 3) ('MultiClassDNN 4' 4) )<def_stmt>testDNN self n_classes<block_start>train_features,train_labels=test_utils.make_input_data(256 n_classes)<line_sep>feature_columns=[]<for_stmt>key train_features<block_start>feature_columns.append(tf.feature_column.numeric_column(key=key))<block_end>optimizer=functools.partial(DPGradientDescentGaussianOptimizer learning_rate=0.5 l2_norm_clip=1.0 noise_multiplier=0.0 num_microbatches=1)<line_sep>classifier=dnn.DNNClassifier(hidden_units=[10] activation_fn='relu' feature_columns=feature_columns n_classes=n_classes optimizer=optimizer loss_reduction=tf.losses.Reduction.NONE)<line_sep>classifier.train(input_fn=test_utils.make_input_fn(train_features train_labels <true> 16))<line_sep>test_features,test_labels=test_utils.make_input_data(64 n_classes)<line_sep>classifier.evaluate(input_fn=test_utils.make_input_fn(test_features test_labels <false> 16))<line_sep>predict_features,predict_labels=test_utils.make_input_data(64 n_classes)<line_sep>classifier.predict(input_fn=test_utils.make_input_fn(predict_features predict_labels <false>))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end> |
""" Add an owner to a resource or resources
Usage: add_owner {username} {resource list}
"""<import_from_stmt>django.core.management.base BaseCommand<import_from_stmt>django.contrib.auth.models User<import_from_stmt>hs_core.models BaseResource<import_from_stmt>hs_core.hydroshare.utils get_resource_by_shortkey<import_from_stmt>hs_access_control.models.privilege UserResourcePrivilege PrivilegeCodes<import_from_stmt>django_irods.icommands SessionException<import_from_stmt>django.db transaction<def_stmt>set_quota_holder resource user<block_start><try_stmt><block_start>resource.set_quota_holder(user user)<block_end><except_stmt>SessionException<as>ex# some resources copied from www for testing do not exist in the iRODS backend,
# hence need to skip these test artifects
<block_start>print(resource.short_id+' raised SessionException when setting quota holder: '+ex.stderr)<block_end><except_stmt>AttributeError<as>ex# when federation is not set up correctly, istorage does not have a session
# attribute, hence raise AttributeError - ignore for testing
<block_start>print((resource.short_id+' raised AttributeError when setting quota holder: '+str(ex)))<block_end><except_stmt>ValueError<as>ex# when federation is not set up correctly, istorage does not have a session
# attribute, hence raise AttributeError - ignore for testing
<block_start>print((resource.short_id+' raised ValueError when setting quota holder: '+str(ex)))<block_end><block_end><class_stmt>Command(BaseCommand)<block_start>help="add owner to resource"<def_stmt>add_arguments self parser<block_start>parser.add_argument('new_owner' type=str)<line_sep>parser.add_argument('--owned_by' dest='owned_by' help='prior owner of the resources')<line_sep>parser.add_argument('--set_quota_holder' action='store_true' # True for presence, False for absence
dest='set_quota_holder' # value is options['set_quota_holder']
help='set quota holder as new owner')<line_sep># a list of resource id's: none does nothing.
parser.add_argument('resource_ids' nargs='*' type=str)<block_end><def_stmt>handle self *args **options<block_start>user=User.objects.get(username=options['new_owner'])<line_sep>admin=User.objects.get(username='admin')<if_stmt>options['owned_by']<is><not><none><block_start>prior=User.objects.get(username=options['owned_by'])<for_stmt>res BaseResource.objects.filter(r2urp__user=prior r2urp__privilege=PrivilegeCodes.OWNER)<block_start><with_stmt>transaction.atomic()<block_start>resource=res.get_content_model()<line_sep>UserResourcePrivilege.share(user=user resource=resource privilege=PrivilegeCodes.OWNER grantor=admin)<line_sep>print("added owner {} to {}".format(options['new_owner'] resource.short_id))<if_stmt>options['set_quota_holder']<block_start>set_quota_holder(resource user)<line_sep>print("set quota holder to {} for {}".format(options['new_owner'] resource.short_id))<block_end><block_end><block_end><block_end><if_stmt>len(options['resource_ids'])<g>0# an array of resource short_id to check.
<block_start><for_stmt>rid options['resource_ids']<block_start>resource=get_resource_by_shortkey(rid or_404=<false>)<with_stmt>transaction.atomic()<block_start>UserResourcePrivilege.share(user=user resource=resource privilege=PrivilegeCodes.OWNER grantor=admin)<line_sep>print("added owner {} to {}".format(options['new_owner'] rid))<if_stmt>options['set_quota_holder']<block_start>set_quota_holder(resource user)<line_sep>print("set quota holder to {} for {}".format(options['new_owner'] resource.short_id))<block_end><block_end><block_end><block_end><block_end><block_end> |
"""
NamedConf parser - file ``/etc/named.conf``
===========================================
NamedConf parser the file named configuration file.
Named is a name server used by BIND.
"""<import_from_stmt>insights.specs Specs<import_from_stmt>insights.core.plugins parser<import_from_stmt>insights.parsers SkipException<import_from_stmt>insights.parsers.named_checkconf NamedCheckconf<line_sep>@parser(Specs.named_conf)<class_stmt>NamedConf(NamedCheckconf)<block_start>"""
Class for parsing the file ``/etc/named.conf```, We use class ``NamedCheckConf`` to parse most
of the named.conf configurations and class ``NamedConf`` to parse the `include` directives.
.. note::
Please refer to the super-class :py:class:`insights.parsers.named_checkconf:NamedCheckConf`
for more usage information.
Attributes:
includes (list): List of files in 'include' section.
Raises:
SkipException: When content is empty or cannot be parsed.
Examples:
>>> named_conf.includes
['/etc/crypto-policies/back-ends/bind.config']
"""<def_stmt>parse_content self content<block_start>includes=[]<line_sep>super(NamedConf self).parse_content(content)<try_stmt><block_start><for_stmt>line [l<for>l content<if>l.strip().startswith('include ')<and>';'<in>l]<block_start>includes.append(line.split(';')[0].replace('"' '').split()[1])<block_end><block_end><except_stmt>IndexError<block_start><raise>SkipException("Syntax error of include directive")<block_end>self.includes=includes<block_end><block_end> |
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
<import_stmt>onnx<import_from_stmt>onnx helper TensorProto<line_sep>QUERY_TENSOR=helper.make_tensor_value_info('query_tensor' TensorProto.FLOAT ['batch' 4])<line_sep>ATTRIBUTE_TENSOR=helper.make_tensor_value_info('attribute_tensor' TensorProto.FLOAT [4 1])<line_sep>BIAS_TENSOR=helper.make_tensor_value_info('bias_tensor' TensorProto.FLOAT ['batch' -1])<line_sep>OUTPUT=helper.make_tensor_value_info('output' TensorProto.FLOAT ['batch' 1])<line_sep>nodes=[helper.make_node('MatMul' ['query_tensor' 'attribute_tensor'] ['matmul'] ) helper.make_node('ReduceSum' ['bias_tensor'] ['reduce'] axes=[1]) helper.make_node('Add' ['matmul' 'reduce'] ['output'] ) ]<line_sep>graph_def=helper.make_graph(nodes 'dynamic_scoring' [QUERY_TENSOR ATTRIBUTE_TENSOR BIAS_TENSOR ] [OUTPUT] )<line_sep>model_def=helper.make_model(graph_def producer_name='dynamic.py' opset_imports=[onnx.OperatorSetIdProto(version=12)])<line_sep>onnx.save(model_def 'dynamic.onnx')<line_sep> |
# Copyright (c) 2010 Resolver Systems Ltd.
# All Rights Reserved
#
<try_stmt><block_start><import_stmt>unittest2<as>unittest<block_end><except_stmt>ImportError<block_start><import_stmt>unittest<block_end><import_from_stmt>functionaltest FunctionalTest<import_stmt>key_codes<import_from_stmt>textwrap dedent<class_stmt>Test_2734_ClearCells(FunctionalTest)<block_start><def_stmt>test_delete_key_clears_selected_cells self<block_start>self.assert_key_deletes_cells(key_codes.DELETE)<block_end><def_stmt>test_backspace_key_clears_selected_cells self<block_start>self.assert_key_deletes_cells(key_codes.BACKSPACE)<block_end><def_stmt>assert_key_deletes_cells self key_code# * Harold logs in and creates a new sheet
<block_start>self.login_and_create_new_sheet()<line_sep># * He enters some data in A1:A3
self.enter_cell_text(1 1 'a1')<line_sep>self.enter_cell_text(1 2 'a2')<line_sep>self.enter_cell_text(1 3 'a3')<line_sep>self.wait_for_cell_value(1 3 'a3')<line_sep># * He clicks on A1 and hits delete
self.click_on_cell(1 1)<line_sep>self.human_key_press(key_code)<line_sep># * He sees the value in A1 disappear while the others remain
self.wait_for_cell_value(1 1 '')<line_sep>self.wait_for_cell_value(1 2 'a2')<line_sep>self.wait_for_cell_value(1 3 'a3')<line_sep># * He selects the range a2:a3
self.select_range_with_shift_click((1 2) (1 3))<line_sep># He hits delete
self.human_key_press(key_code)<line_sep># * He sees that all the cells are now cleared
self.wait_for_cell_value(1 1 '')<line_sep>self.wait_for_cell_value(1 2 '')<line_sep>self.wait_for_cell_value(1 3 '')<block_end><def_stmt>test_delete_key_while_editing_still_does_what_it_should self# * Harold logs in and creates a new sheet
<block_start>self.login_and_create_new_sheet()<line_sep># * He enters three characters in A1
self.open_cell_for_editing(1 1)<line_sep>self.human_key_press(key_codes.NUMBER_1)<line_sep>self.human_key_press(key_codes.NUMBER_2)<line_sep>self.human_key_press(key_codes.NUMBER_3)<line_sep># * He moves left twice
self.human_key_press(key_codes.LEFT)<line_sep>self.human_key_press(key_codes.LEFT)<line_sep># He hits delete
self.human_key_press(key_codes.DELETE)<line_sep># the middle character is now missing
self.wait_for_cell_editor_content('13')<block_end><def_stmt>test_backspace_key_while_editing_still_does_what_it_should self# * Harold logs in and creates a new sheet
<block_start>self.login_and_create_new_sheet()<line_sep># * He enters three characters in A1
self.open_cell_for_editing(1 1)<line_sep>self.human_key_press(key_codes.NUMBER_1)<line_sep>self.human_key_press(key_codes.NUMBER_2)<line_sep>self.human_key_press(key_codes.NUMBER_3)<line_sep># * He moves left once
self.human_key_press(key_codes.LEFT)<line_sep># He hits backspace
self.human_key_press(key_codes.BACKSPACE)<line_sep># the middle character is now missing
self.wait_for_cell_editor_content('13')<block_end><def_stmt>test_can_clear_cell_from_usercode self# * Harold logs in and creates a new sheet
<block_start>self.login_and_create_new_sheet()<line_sep># * He enters some data in A1:A3
self.enter_cell_text(1 1 'a1')<line_sep>self.enter_cell_text(1 2 'a2')<line_sep>self.enter_cell_text(1 3 'a3')<line_sep>self.wait_for_cell_value(1 3 'a3')<line_sep># * He tries to use the clear() function from usercode on a cell
# and then tries to access some of the supposedly cleared attributes of the cell
self.prepend_usercode(dedent('''
worksheet.a1.error = 'harold puts a deliberate pointless error in'
worksheet.a1.clear()
worksheet.b1.formula = str(worksheet.a1.value)
worksheet.b2.formula = str(worksheet.a1.formula)
worksheet.b3.formula = str(worksheet.a1.formatted_value)
worksheet.b4.formula = str(worksheet.a1.error)
'''))<line_sep># * He sees the value in a1 disappear
self.wait_for_cell_value(1 1 '')<line_sep>self.wait_for_cell_value(1 2 'a2')<line_sep>self.wait_for_cell_value(1 3 'a3')<line_sep># * He sees his little investigations also produce the expected results
self.wait_for_cell_value(2 1 '<undefined>')<line_sep>self.wait_for_cell_value(2 2 'None')<line_sep>self.wait_for_cell_value(2 3 '')<line_sep>self.wait_for_cell_value(2 4 'None')<block_end><def_stmt>test_can_clear_cell_range_from_usercode self# * Harold logs in and creates a new sheet
<block_start>self.login_and_create_new_sheet()<line_sep># * He enters some data in A1:A3
self.enter_cell_text(1 1 'a1')<line_sep>self.enter_cell_text(1 2 'a2')<line_sep>self.enter_cell_text(1 3 'a3')<line_sep>self.wait_for_cell_value(1 3 'a3')<line_sep># * He tries to use the clear() function from usercode on a cell range
self.prepend_usercode(dedent('''
worksheet.a1.error = 'harold puts a deliberate pointless error in'
worksheet.a2.error = 'harold puts another deliberate pointless error in'
worksheet.cell_range("a1:a2").clear()
worksheet.b1.formula = str(worksheet.a1.value)
worksheet.b2.formula = str(worksheet.a1.formula)
worksheet.b3.formula = str(worksheet.a1.formatted_value)
worksheet.b4.formula = str(worksheet.a1.error)
worksheet.c1.formula = str(worksheet.a2.value)
worksheet.c2.formula = str(worksheet.a2.formula)
worksheet.c3.formula = str(worksheet.a2.formatted_value)
worksheet.c4.formula = str(worksheet.a2.error)
'''))<line_sep># * He sees the value in a1 and a2 disappear
self.wait_for_cell_value(1 1 '')<line_sep>self.wait_for_cell_value(1 2 '')<line_sep>self.wait_for_cell_value(1 3 'a3')<line_sep># * He sees his little investigations also produce the expected results
self.wait_for_cell_value(2 1 '<undefined>')<line_sep>self.wait_for_cell_value(2 2 'None')<line_sep>self.wait_for_cell_value(2 3 '')<line_sep>self.wait_for_cell_value(2 4 'None')<line_sep>self.wait_for_cell_value(3 1 '<undefined>')<line_sep>self.wait_for_cell_value(3 2 'None')<line_sep>self.wait_for_cell_value(3 3 '')<line_sep>self.wait_for_cell_value(3 4 'None')<block_end><block_end> |
<import_stmt>os<import_stmt>numpy<as>np<import_from_stmt>libcity.data.dataset TrafficStateGridOdDataset<import_from_stmt>libcity.data.utils generate_dataloader<import_from_stmt>libcity.utils ensure_dir<class_stmt>CSTNDataset(TrafficStateGridOdDataset)<block_start><def_stmt>__init__ self config<block_start>super().__init__(config)<line_sep>self.feature_name={'X':'float' 'W':'float' 'y':'float'}<block_end><def_stmt>_generate_ext_data self ext_data<block_start>num_samples=ext_data.shape[0]<line_sep>offsets=np.sort(np.concatenate((np.arange(-self.input_window-self.output_window+1 1 1) )))<line_sep>min_t=abs(min(offsets))<line_sep>max_t=abs(num_samples-abs(max(offsets)))<line_sep>W=[]<for_stmt>t range(min_t max_t)<block_start>W_t=ext_data[t+offsets <ellipsis>]<line_sep>W.append(W_t)<block_end>W=np.stack(W axis=0)<line_sep><return>W<block_end><def_stmt>_generate_data self<block_start>"""
加载数据文件(.gridod)和外部数据(.ext),以X, W, y的形式返回
Returns:
tuple: tuple contains:
X(np.ndarray): 模型输入数据,(num_samples, input_length, ..., feature_dim) \n
W(np.ndarray): 模型外部数据,(num_samples, input_length, ext_dim)
y(np.ndarray): 模型输出数据,(num_samples, output_length, ..., feature_dim)
"""<line_sep># 处理多数据文件问题
<if_stmt>isinstance(self.data_files list)<block_start>data_files=self.data_files.copy()<block_end><else_stmt><block_start>data_files=[self.data_files].copy()<block_end># 加载外部数据
ext_data=self._load_ext()# (len_time, ext_dim)
W=self._generate_ext_data(ext_data)<line_sep># 加载基本特征数据
X_list,y_list=[] []<for_stmt>filename data_files<block_start>df=self._load_dyna(filename)# (len_time, ..., feature_dim)
X,y=self._generate_input_data(df)<line_sep># x: (num_samples, input_length, input_dim)
# y: (num_samples, output_length, ..., output_dim)
X_list.append(X)<line_sep>y_list.append(y)<block_end>X=np.concatenate(X_list)<line_sep>y=np.concatenate(y_list)<line_sep>df=self._load_dyna(data_files[0]).squeeze()<line_sep>self._logger.info("Dataset created")<line_sep>self._logger.info("X shape: {}, W shape: {}, y shape: ".format(str(X.shape) str(W.shape) y.shape))<line_sep><return>X W y<block_end><def_stmt>_split_train_val_test self X W y<block_start>test_rate=1-self.train_rate-self.eval_rate<line_sep>num_samples=X.shape[0]<line_sep>num_test=round(num_samples<times>test_rate)<line_sep>num_train=round(num_samples<times>self.train_rate)<line_sep>num_eval=num_samples-num_test-num_train<line_sep># train
x_train,w_train,y_train=X[:num_train] W[:num_train] y[:num_train]<line_sep># eval
x_eval,w_eval,y_eval=X[num_train:num_train+num_eval] W[num_train:num_train+num_eval] y[num_train:num_train+num_eval]<line_sep># test
x_test,w_test,y_test=X[-num_test:] W[-num_test:] y[-num_test:]<line_sep># log
self._logger.info("train\tX: {}, W: {}, y: {}".format(str(x_train.shape) str(w_train.shape) str(y_train.shape)))<line_sep>self._logger.info("eval\tX: {}, W: {}, y: {}".format(str(x_eval.shape) str(w_eval.shape) str(y_eval.shape)))<line_sep>self._logger.info("test\tX: {}, W: {}, y: {}".format(str(x_test.shape) str(w_test.shape) str(y_test.shape)))<line_sep><return>x_train w_train y_train x_eval w_eval y_eval x_test w_test y_test<block_end><def_stmt>_load_cache_train_val_test self<block_start>self._logger.info('Loading '+self.cache_file_name)<line_sep>cat_data=np.load(self.cache_file_name)<line_sep>x_train,w_train,y_train,x_eval,w_eval,y_eval,x_test,w_test,y_test=cat_data['x_train'] cat_data['w_train'] cat_data['y_train'] cat_data['x_eval'] cat_data['w_eval'] cat_data['y_eval'] cat_data['x_test'] cat_data['w_test'] cat_data['y_test']<line_sep>self._logger.info("train\tX: {}, W: {}, y: {}".format(str(x_train.shape) str(w_train.shape) str(y_train.shape)))<line_sep>self._logger.info("eval\tX: {}, W: {}, y: {}".format(str(x_eval.shape) str(w_eval.shape) str(y_eval.shape)))<line_sep>self._logger.info("test\tX: {}, W: {}, y: {}".format(str(x_test.shape) str(w_test.shape) str(y_test.shape)))<line_sep><return>x_train w_train y_train x_eval w_eval y_eval x_test w_test y_test<block_end><def_stmt>_generate_train_val_test self<block_start>X,W,y=self._generate_data()<line_sep>x_train,w_train,y_train,x_eval,w_eval,y_eval,x_test,w_test,y_test=self._split_train_val_test(X W y)<if_stmt>self.cache_dataset<block_start>ensure_dir(self.cache_file_folder)<line_sep>np.savez_compressed(self.cache_file_name x_train=x_train w_train=w_train y_train=y_train x_test=x_test w_test=w_test y_test=y_test x_eval=x_eval w_eval=w_eval y_eval=y_eval )<line_sep>self._logger.info('Saved at '+self.cache_file_name)<block_end><return>x_train w_train y_train x_eval w_eval y_eval x_test w_test y_test<block_end><def_stmt>get_data self# 加载数据集
<block_start>x_train,w_train,y_train,x_eval,w_eval,y_eval,x_test,w_test,y_test=[] [] [] [] [] [] [] [] []<if_stmt>self.data<is><none><block_start><if_stmt>self.cache_dataset<and>os.path.exists(self.cache_file_name)<block_start>x_train,w_train,y_train,x_eval,w_eval,y_eval,x_test,w_test,y_test=self._load_cache_train_val_test()<block_end><else_stmt><block_start>x_train,w_train,y_train,x_eval,w_eval,y_eval,x_test,w_test,y_test=self._generate_train_val_test()<block_end><block_end># 数据归一化
self.feature_dim=x_train.shape[-1]<line_sep>self.ext_dim=w_train.shape[-1]<line_sep>self.scaler=self._get_scalar(self.scaler_type x_train y_train)<line_sep>x_train[<ellipsis> :self.output_dim]=self.scaler.transform(x_train[<ellipsis> :self.output_dim])<line_sep>w_train[<ellipsis> :self.output_dim]=self.scaler.transform(w_train[<ellipsis> :self.output_dim])<line_sep>y_train[<ellipsis> :self.output_dim]=self.scaler.transform(y_train[<ellipsis> :self.output_dim])<line_sep>x_eval[<ellipsis> :self.output_dim]=self.scaler.transform(x_eval[<ellipsis> :self.output_dim])<line_sep>w_eval[<ellipsis> :self.output_dim]=self.scaler.transform(w_eval[<ellipsis> :self.output_dim])<line_sep>y_eval[<ellipsis> :self.output_dim]=self.scaler.transform(y_eval[<ellipsis> :self.output_dim])<line_sep>x_test[<ellipsis> :self.output_dim]=self.scaler.transform(x_test[<ellipsis> :self.output_dim])<line_sep>w_test[<ellipsis> :self.output_dim]=self.scaler.transform(w_test[<ellipsis> :self.output_dim])<line_sep>y_test[<ellipsis> :self.output_dim]=self.scaler.transform(y_test[<ellipsis> :self.output_dim])<line_sep>train_data=list(zip(x_train w_train y_train))<line_sep>eval_data=list(zip(x_eval w_eval y_eval))<line_sep>test_data=list(zip(x_test w_test y_test))<line_sep># 转Dataloader
self.train_dataloader,self.eval_dataloader,self.test_dataloader=generate_dataloader(train_data eval_data test_data self.feature_name self.batch_size self.num_workers pad_with_last_sample=self.pad_with_last_sample)<line_sep>self.num_batches=len(self.train_dataloader)<line_sep><return>self.train_dataloader self.eval_dataloader self.test_dataloader<block_end><def_stmt>get_data_feature self<block_start>"""
返回数据集特征,scaler是归一化方法,adj_mx是邻接矩阵,num_nodes是网格的个数,
len_row是网格的行数,len_column是网格的列数,
feature_dim是输入数据的维度,output_dim是模型输出的维度
Returns:
dict: 包含数据集的相关特征的字典
"""<line_sep><return>{"scaler":self.scaler "num_nodes":self.num_nodes "feature_dim":self.feature_dim "ext_dim":self.ext_dim "output_dim":self.output_dim "len_row":self.len_row "len_column":self.len_column "num_batches":self.num_batches}<block_end><block_end> |
<import_stmt>pytest<import_from_stmt>cutadapt.__main__ main parse_cutoffs parse_lengths CommandLineError setup_logging<def_stmt>test_help <block_start><with_stmt>pytest.raises(SystemExit)<as>e<block_start>main(["--help"])<block_end><assert_stmt>e.value.args[0]<eq>0<block_end><def_stmt>test_parse_cutoffs <block_start><assert_stmt>parse_cutoffs("5")<eq>(0 5)<assert_stmt>parse_cutoffs("6,7")<eq>(6 7)<with_stmt>pytest.raises(CommandLineError)<block_start>parse_cutoffs("a,7")<block_end><with_stmt>pytest.raises(CommandLineError)<block_start>parse_cutoffs("a")<block_end><with_stmt>pytest.raises(CommandLineError)<block_start>parse_cutoffs("a,7")<block_end><with_stmt>pytest.raises(CommandLineError)<block_start>parse_cutoffs("1,2,3")<block_end><block_end><def_stmt>test_parse_lengths <block_start><assert_stmt>parse_lengths("25")<eq>(25 )<assert_stmt>parse_lengths("17:25")<eq>(17 25)<assert_stmt>parse_lengths("25:")<eq>(25 <none>)<assert_stmt>parse_lengths(":25")<eq>(<none> 25)<with_stmt>pytest.raises(CommandLineError)<block_start>parse_lengths("1:2:3")<block_end><with_stmt>pytest.raises(CommandLineError)<block_start>parse_lengths("a:2")<block_end><with_stmt>pytest.raises(CommandLineError)<block_start>parse_lengths("a")<block_end><with_stmt>pytest.raises(CommandLineError)<block_start>parse_lengths("2:a")<block_end><with_stmt>pytest.raises(CommandLineError)<block_start>parse_lengths(":")<block_end><block_end><def_stmt>test_setup_logging <block_start><import_stmt>logging<line_sep>logger=logging.getLogger(__name__)<line_sep>setup_logging(logger log_to_stderr=<false> quiet=<false> minimal=<false> debug=<false>)<line_sep>logger.info("Log message")<line_sep>setup_logging(logger log_to_stderr=<false> debug=1)<line_sep>setup_logging(logger log_to_stderr=<false> quiet=<true>)<line_sep>setup_logging(logger log_to_stderr=<false> minimal=<true>)<block_end> |
# -*- coding: utf-8 -*-
'''
Mrknow TV Add-on
Copyright (C) 2016 mrknow
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''<import_stmt>urlparse base64 urllib<import_stmt>re time datetime<import_stmt>json<import_from_stmt>resources.lib.lib control<import_from_stmt>resources.lib.lib client<import_from_stmt>resources.lib.lib stale<def_stmt>get url params={}<block_start><try_stmt><block_start>params['api_id']=stale.pierwszatv_apiid<line_sep>params['checksum']=stale.pierwszatv_checksum<line_sep>url=urlparse.urljoin('http://pierwsza.tv' url)<line_sep>url=url+'?'+urllib.urlencode(params)<line_sep>headers={'Content-Type':'application/json'}<line_sep>result=client.request(url headers=headers output='response' error=<true>)<if_stmt><not>(result[0]<eq>'401'<or>result[0]<eq>'405')<block_start><return>result[1]<block_end>result=client.request(url headers=headers)<line_sep>#control.log('ZZZZZZZZ PIerwsza result: %s' % result)
<return>result<block_end><except_stmt><block_start><pass><block_end><block_end><def_stmt>getstream id<block_start><try_stmt><block_start>control.set_setting('pierwszatv.tokenExpireIn' '')<line_sep>control.set_setting('pierwszatv.serverId' '')<line_sep>control.set_setting('pierwszatv.streamId' '')<line_sep>control.set_setting('pierwszatv.token' '')<if_stmt>getPierwszaCredentialsInfo()<eq><false><block_start><if_stmt>control.yesnoDialog(control.lang(40003).encode('utf-8') control.lang(30481).encode('utf-8') '' 'Trakt' control.lang(30483).encode('utf-8') control.lang(30482).encode('utf-8'))<block_start>control.set_setting('pierwszatv.user' '')<line_sep>control.set_setting('pierwszatv.password' '')<line_sep>control.openSettings('1.4')<block_end><raise>Exception()<block_end>url='/api/stream/create'<line_sep>params={}<line_sep>params['id']=id<line_sep>params['user']=control.setting('pierwszatv.user').strip()<line_sep>params['password']=urllib.quote_plus(control.setting('pierwszatv.password'))<line_sep>result=get(url params)<line_sep>control.log('x1x1x1: %s'%result)<line_sep>result=json.loads(result)<if_stmt>result['status']<eq>'ok'#time.sleep(1)
<block_start>expirein=int(int(result['tokenExpireIn'])<times>0.75)<line_sep>expirewhen=datetime.datetime.now()+datetime.timedelta(seconds=expirein)<line_sep>control.set_setting('pierwszatv.tokenExpireIn' str(int(time.mktime(expirewhen.timetuple()))))<line_sep>control.set_setting('pierwszatv.serverId' result['serverId'])<line_sep>control.set_setting('pierwszatv.streamId' result['streamId'])<line_sep>control.set_setting('pierwszatv.token' result['token'])<for_stmt>i range(0 5)<block_start><try_stmt><block_start>r=get('/api/stream/status' {'serverId':result['serverId'] 'streamId':result['streamId'] 'token':result['token']})<line_sep>r=json.loads(r)<if_stmt>r['status']<eq>'ok'#control.infoDialog(control.lang(30489).encode('utf-8'), time=6000)
<block_start><for_stmt>j range(0 20)<block_start>time.sleep(1)<line_sep>control.infoDialog(control.lang(30489).encode('utf-8') time=500)<try_stmt><block_start>result2=client.request(r['source']+'?token='+result['token'] safe=<true> timeout='2')<line_sep>control.log('Pierwsza link check nr: %s: result:%s'%(j result2))<if_stmt>result2<eq><none><block_start><raise>Exception()<block_end><else_stmt><block_start><return>r['source']+'?token='+result['token']<block_end><block_end><except_stmt><block_start><pass><block_end><block_end><return>r['source']+'?token='+result['token']<block_end>time.sleep(3)<block_end><except_stmt><block_start><pass><block_end><block_end><block_end><if_stmt>result['status']<eq>'error'<block_start>control.infoDialog('%s'%result['message'].encode('utf-8'))<line_sep>control.dialog.ok(control.addonInfo('name') result['message'].encode('utf-8') '')<block_end><return><none><block_end><except_stmt>Exception<as>e<block_start>control.log('Error pierwsza.getstream %s'%e)<block_end><block_end><def_stmt>getPierwszaCredentialsInfo <block_start>user=control.setting('pierwszatv.user').strip()<line_sep>password=control.setting('pierwszatv.password')<if_stmt>(user<eq>''<or>password<eq>'')<block_start><return><false><block_end><return><true><block_end><def_stmt>streamrefresh <block_start><try_stmt>#mynow = int(datetime.datetime.now().strftime('%s'))
<block_start>mynow=int(str(int(time.mktime(datetime.datetime.now().timetuple()))))<line_sep>expired=int(control.get_setting('pierwszatv.tokenExpireIn'))<line_sep>#control.log('XXXX Exp:%s Now:%s' % (expired, mynow))
<if_stmt>mynow<g>expired<block_start>control.log('Pierwsza refresh')<line_sep>url='/api/stream/refresh'<line_sep>params={}<line_sep>params['serverId']=control.get_setting('pierwszatv.serverId')<line_sep>params['streamId']=control.get_setting('pierwszatv.streamId')<line_sep>params['token']=control.get_setting('pierwszatv.token')<line_sep>result=get(url params)<line_sep>result=json.loads(result)<line_sep>expirein=int(int(result['tokenExpireIn'])<times>0.75)<line_sep>expirewhen=datetime.datetime.now()+datetime.timedelta(seconds=expirein)<line_sep>control.set_setting('pierwszatv.tokenExpireIn' str(int(time.mktime(expirewhen.timetuple()))))<block_end><block_end><except_stmt>Exception<as>e<block_start>control.log('Error pierwsza.refresh %s'%e)<line_sep><raise>Exception()<block_end><block_end><def_stmt>chanels <block_start>items=[]<try_stmt><block_start>result=get('/api/channels')<line_sep>result=json.loads(result)<for_stmt>i result['channels']<block_start><try_stmt><block_start>items.append(i)<block_end><except_stmt><block_start><pass><block_end><block_end><if_stmt>len(items)<eq>0<block_start>items=result<block_end><block_end><except_stmt><block_start>control.log('Error pierwsza.chanels')<line_sep><pass><block_end><return>items<block_end> |
"""Generate a summary of a previously trained vowel recognition model.
"""<import_stmt>torch<import_stmt>wavetorch<import_stmt>argparse<import_stmt>yaml<import_stmt>os<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_stmt>matplotlib<as>mpl<try_stmt><block_start><import_from_stmt>helpers.plot mpl_set_latex<line_sep>mpl_set_latex()<block_end><except_stmt>ImportError<block_start><import_stmt>warnings<line_sep>warnings.warn('The helpers package is unavailable' ImportWarning)<block_end>COL_TRAIN="#1f77b4"<line_sep>COL_TEST="#2ca02c"<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('filename' type=str)<line_sep>parser.add_argument('--vmin' type=float default=1e-3)<line_sep>parser.add_argument('--vmax' type=float default=1.0)<line_sep>parser.add_argument('--fig' type=str default=<none>)<line_sep>parser.add_argument('--title_off' action='store_true')<line_sep>parser.add_argument('--labels' action='store_true')<line_sep>parser.add_argument('--vowel_samples' nargs='+' type=int default=<none>)<if_stmt>__name__<eq>'__main__'<block_start>args=parser.parse_args()<line_sep>model,history,history_state,cfg=wavetorch.io.load_model(args.filename)<try_stmt><block_start><if_stmt>cfg['seed']<is><not><none><block_start>torch.manual_seed(cfg['seed'])<block_end><block_end><except_stmt><block_start><pass><block_end>print("Configuration for model in %s is:"%args.filename)<line_sep>print(yaml.dump(cfg default_flow_style=<false>))<line_sep>sr=cfg['data']['sr']<line_sep>gender=cfg['data']['gender']<line_sep>vowels=cfg['data']['vowels']<line_sep>N_classes=len(vowels)<line_sep>fig=plt.figure(figsize=(7 4.75) constrained_layout=<true>)<line_sep>gs=fig.add_gridspec(1 2 width_ratios=[1 0.4])<line_sep>gs_left=gs[0].subgridspec(3 2)<line_sep>gs_right=gs[1].subgridspec(N_classes+1 1 height_ratios=[1<for>i range(0 N_classes)]+[0.05])<line_sep>gs_bot=gs_left[2 :].subgridspec(1 2)<line_sep>ax_cm_train0=fig.add_subplot(gs_left[0 0])<line_sep>ax_cm_test0=fig.add_subplot(gs_left[0 1])<line_sep>ax_cm_train1=fig.add_subplot(gs_left[1 0])<line_sep>ax_cm_test1=fig.add_subplot(gs_left[1 1])<line_sep>ax_loss=fig.add_subplot(gs_bot[0])<line_sep>ax_acc=fig.add_subplot(gs_bot[1])<line_sep>ax_fields=[fig.add_subplot(gs_right[i])<for>i range(0 N_classes+1)]<line_sep>history_mean=history.groupby('epoch').mean()<line_sep>history_std=history.groupby('epoch').std()<line_sep>epochs=history_mean.index.values<line_sep>ax_loss.fill_between(epochs history_mean['loss_train'].values-history_std['loss_train'].values history_mean['loss_train'].values+history_std['loss_train'].values color=COL_TRAIN alpha=0.15)<line_sep>ax_loss.plot(epochs history_mean['loss_train'].values "-" label="Training dataset" ms=4 color=COL_TRAIN)<line_sep>ax_loss.fill_between(epochs history_mean['loss_test'].values-history_std['loss_test'].values history_mean['loss_test'].values+history_std['loss_test'].values color=COL_TEST alpha=0.15)<line_sep>ax_loss.plot(epochs history_mean['loss_test'].values "-" label="Testing dataset" ms=4 color=COL_TEST)<line_sep>ax_loss.set_ylabel('Loss')<line_sep>ax_loss.set_xlabel('Training epoch \#')<line_sep>ax_acc.plot(epochs history_mean['acc_train'].values<times>100 "-" label="Training dataset" ms=4 color=COL_TRAIN)<line_sep>ax_acc.fill_between(epochs history_mean['acc_train'].values<times>100-history_std['acc_train'].values<times>100 history_mean['acc_train'].values<times>100+history_std['acc_train'].values<times>100 color=COL_TRAIN alpha=0.15)<line_sep>ax_acc.plot(epochs history_mean['acc_test'].values<times>100 "-" label="Testing dataset" ms=4 color=COL_TEST)<line_sep>ax_acc.fill_between(epochs history_mean['acc_test'].values<times>100-history_std['acc_test'].values<times>100 history_mean['acc_test'].values<times>100+history_std['acc_test'].values<times>100 color=COL_TEST alpha=0.15)<line_sep>ax_acc.set_xlabel('Training epoch \#')<line_sep>ax_acc.set_ylabel('Accuracy')<line_sep>ax_acc.yaxis.set_major_locator(mpl.ticker.MultipleLocator(base=10))<line_sep># ax_acc.set_ylim([20,100])
ax_loss.yaxis.set_major_locator(mpl.ticker.MultipleLocator(base=0.1))<line_sep># ax_loss.set_ylim([0.7,1.2])
ax_acc.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f\%%'))<line_sep>ax_loss.legend(fontsize='small')<line_sep># ax_acc.annotate("%.1f%% training set accuracy" % (history_mean['acc_train'].tail(1).iloc[0]*100), xy=(0.1,0.1), xytext=(0,10), textcoords="offset points", xycoords="axes fraction", ha="left", va="bottom", color=COL_TRAIN)
# ax_acc.annotate("%.1f%% testing set accuracy" % (history_mean['acc_test'].tail(1).iloc[0]*100), xy=(0.1,0.1), xycoords="axes fraction", ha="left", va="bottom", color=COL_TEST)
ax_acc.annotate('%.1f\%%'%(history_mean['acc_train'].tail(1).iloc[0]<times>100) xy=(epochs[-1] history_mean['acc_train'].tail(1).iloc[0]<times>100) xycoords='data' xytext=(-1 5) textcoords='offset points' ha='left' va='center' fontsize='small' color=COL_TRAIN bbox=wavetorch.plot.bbox_white)<line_sep>ax_acc.annotate('%.1f\%%'%(history_mean['acc_test'].tail(1).iloc[0]<times>100) xy=(epochs[-1] history_mean['acc_test'].tail(1).iloc[0]<times>100) xycoords='data' xytext=(-1 -5) textcoords='offset points' ha='left' va='center' fontsize='small' color=COL_TEST bbox=wavetorch.plot.bbox_white)<line_sep>print('Accuracy (train): %.1f%% +/- %.1f%%'%(history_mean['acc_train'].tail(1).iloc[0]<times>100 history_std['acc_train'].tail(1).iloc[0]<times>100))<line_sep>print('Accuracy (test): %.1f%% +/- %.1f%%'%(history_mean['acc_test'].tail(1).iloc[0]<times>100 history_std['acc_test'].tail(1).iloc[0]<times>100))<line_sep>cm_train=history.groupby('epoch')['cm_train'].apply(np.mean).head(1).iloc[0]<line_sep>cm_test=history.groupby('epoch')['cm_test'].apply(np.mean).head(1).iloc[0]<line_sep>wavetorch.plot.confusion_matrix(cm_train title="Training dataset" normalize=<true> ax=ax_cm_train0 labels=vowels)<line_sep>wavetorch.plot.confusion_matrix(cm_test title="Testing dataset" normalize=<true> ax=ax_cm_test0 labels=vowels)<line_sep>cm_train=history.groupby('epoch')['cm_train'].apply(np.mean).tail(1).iloc[0]<line_sep>cm_test=history.groupby('epoch')['cm_test'].apply(np.mean).tail(1).iloc[0]<line_sep>wavetorch.plot.confusion_matrix(cm_train title="Training dataset" normalize=<true> ax=ax_cm_train1 labels=vowels)<line_sep>wavetorch.plot.confusion_matrix(cm_test title="Testing dataset" normalize=<true> ax=ax_cm_test1 labels=vowels)<line_sep>X,Y,F=wavetorch.data.load_all_vowels(vowels gender='both' sr=sr random_state=0)<line_sep># model.load_state_dict(history_state[cfg['training']['N_epochs']])
<for_stmt>i range(N_classes)<block_start>xb,yb=wavetorch.data.select_vowel_sample(X Y F i ind=args.vowel_samples[i]<if>args.vowel_samples<is><not><none><else><none>)<with_stmt>torch.no_grad()<block_start>field_dist=model(xb output_fields=<true>)<line_sep>wavetorch.plot.total_field(model field_dist yb ax=ax_fields[yb.argmax().item()] cbar=<true> cax=ax_fields[-1] vmin=args.vmin vmax=args.vmax)<block_end><block_end><if_stmt>args.labels<block_start><try_stmt><block_start><import_from_stmt>helpers.plot apply_panel_labels<line_sep>apply_panel_labels([ax_cm_train0 ax_cm_test0 ax_cm_train1 ax_cm_test1 ax_loss ax_acc]+ax_fields[0:-1] xy=[(-35 0) (-35 0) (-35 0) (-35 0) (-25 0) (-40 0) (8 -6) (8 -6) (8 -6)] color=['k' 'k' 'k' 'k' 'k' 'k' 'w' 'w' 'w'] case='upper')<block_end><except_stmt>ImportError<block_start><import_stmt>warnings<line_sep>warnings.warn('The helpers package is unavailable' ImportWarning)<block_end><block_end>plt.show()<if_stmt>args.fig<is><not><none><block_start>fig.savefig(args.fig dpi=300)<block_end><else_stmt><block_start>fig.savefig(os.path.splitext(args.filename)[0]+"_summary.png" dpi=300)<block_end><block_end> |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
<import_stmt>functools<import_stmt>logging<import_from_stmt>.. trial<line_sep>_logger=logging.getLogger(__name__)<line_sep>_MUTABLE_LAYER_SPACE_PREFIX="_mutable_layer"<line_sep>_namespace={}<line_sep>_tf_variables={}<line_sep>_arch_logits_list=[]<line_sep>_optimizer=<none><line_sep>_train_op=<none><def_stmt>classic_mode mutable_id mutable_layer_id funcs funcs_args fixed_inputs optional_inputs optional_input_size<block_start>'''Execute the chosen function and inputs directly.
In this mode, the trial code is only running the chosen subgraph (i.e., the chosen ops and inputs),
without touching the full model graph.'''<if_stmt>trial.get_current_parameter()<is><none><block_start>trial.get_next_parameter()<block_end>chosen_layer,chosen_inputs=_get_layer_and_inputs_from_tuner(mutable_id mutable_layer_id list(optional_inputs.keys()))<line_sep>real_chosen_inputs=[optional_inputs[input_name]<for>input_name chosen_inputs]<line_sep>layer_out=funcs[chosen_layer]([fixed_inputs real_chosen_inputs] **funcs_args[chosen_layer])<line_sep><return>layer_out<block_end><def_stmt>enas_mode mutable_id mutable_layer_id funcs funcs_args fixed_inputs optional_inputs optional_input_size tf<block_start>'''For enas mode, we build the full model graph in trial but only run a subgraph。
This is implemented by masking inputs and branching ops.
Specifically, based on the received subgraph (through nni.get_next_parameter),
it can be known which inputs should be masked and which op should be executed.'''<line_sep>name_prefix="{}_{}".format(mutable_id mutable_layer_id)<line_sep># store namespace
_namespace[mutable_id]=<true><line_sep>_namespace[name_prefix]=dict()<line_sep>_namespace[name_prefix]['funcs']=list(funcs)<line_sep>_namespace[name_prefix]['optional_inputs']=list(optional_inputs)<line_sep># create tensorflow variables as 1/0 signals used to form subgraph
name_for_optional_inputs=name_prefix+'_optional_inputs'<line_sep>name_for_funcs=name_prefix+'_funcs'<line_sep>_tf_variables[name_prefix]=dict()<line_sep>_tf_variables[name_prefix]['optional_inputs']=tf.get_variable(name_for_optional_inputs [len(optional_inputs)] dtype=tf.bool trainable=<false>)<line_sep>_tf_variables[name_prefix]['funcs']=tf.get_variable(name_for_funcs [] dtype=tf.int64 trainable=<false>)<line_sep># get real values using their variable names
real_optional_inputs_value=[optional_inputs[name]<for>name _namespace[name_prefix]['optional_inputs']]<line_sep>real_func_value=[funcs[name]<for>name _namespace[name_prefix]['funcs']]<line_sep>real_funcs_args=[funcs_args[name]<for>name _namespace[name_prefix]['funcs']]<line_sep># build tensorflow graph of geting chosen inputs by masking
real_chosen_inputs=tf.boolean_mask(real_optional_inputs_value _tf_variables[name_prefix]['optional_inputs'])<line_sep># build tensorflow graph of different branches by using tf.case
branches=dict()<line_sep>func_output=<none><for_stmt>func_id range(len(funcs))<block_start>func_output=real_func_value[func_id]([fixed_inputs real_chosen_inputs] **real_funcs_args[func_id])<line_sep>branches[tf.equal(_tf_variables[name_prefix]['funcs'] func_id)]=<lambda>:func_output<block_end>layer_out=tf.case(branches exclusive=<true> default=<lambda>:func_output)<line_sep><return>layer_out<block_end><def_stmt>oneshot_mode mutable_id mutable_layer_id funcs funcs_args fixed_inputs optional_inputs optional_input_size tf<block_start>'''Similar to enas mode, oneshot mode also builds the full model graph.
The difference is that oneshot mode does not receive subgraph.
Instead, it uses dropout to randomly dropout inputs and ops.'''<line_sep># NNI requires to get_next_parameter before report a result. But the parameter will not be used in this mode
<if_stmt>trial.get_current_parameter()<is><none><block_start>trial.get_next_parameter()<block_end>optional_inputs=list(optional_inputs.values())<line_sep>inputs_num=len(optional_inputs)<line_sep># Calculate dropout rate according to the formular r^(1/k), where r is a hyper-parameter and k is the number of inputs
<if_stmt>inputs_num<g>0<block_start>rate=0.01<power>(1/inputs_num)<line_sep>noise_shape=[inputs_num]+[1]<times>len(optional_inputs[0].get_shape())<line_sep>optional_inputs=tf.nn.dropout(optional_inputs rate=rate noise_shape=noise_shape)<line_sep>optional_inputs=[optional_inputs[idx]<for>idx range(inputs_num)]<block_end>layer_outs=[func([fixed_inputs optional_inputs] **funcs_args[func_name])<for>func_name,func funcs.items()]<line_sep>output_num=len(layer_outs)<line_sep>rate=0.01<power>(1/output_num)<line_sep>noise_shape=[output_num]+[1]<times>len(layer_outs[0].get_shape())<line_sep>layer_outs=tf.nn.dropout(layer_outs rate=rate noise_shape=noise_shape)<line_sep>layer_out=tf.reduce_sum(layer_outs axis=0)<line_sep><return>layer_out<block_end><def_stmt>darts_mode mutable_id mutable_layer_id funcs funcs_args fixed_inputs optional_inputs optional_input_size tf<block_start>optional_inputs=list(optional_inputs.values())<line_sep>layer_outs=[func([fixed_inputs optional_inputs] **funcs_args[func_name])<for>func_name,func funcs.items()]<line_sep># Create architecture weights for every func(op)
var_name="{}_{}_arch_weights".format(mutable_id mutable_layer_id)<line_sep>arch_logits=tf.get_variable(var_name shape=[len(funcs)] trainable=<false>)<line_sep>_arch_logits_list.append(arch_logits)<line_sep>arch_weights=tf.nn.softmax(arch_logits)<line_sep>layer_out=tf.add_n([arch_weights[idx]<times>out<for>idx,out enumerate(layer_outs)])<line_sep><return>layer_out<block_end><def_stmt>reload_tensorflow_variables tf session<block_start>'''In Enas mode, this function reload every signal varaible created in `enas_mode` function so
the whole tensorflow graph will be changed into certain subgraph recerived from Tuner.
---------------
session: the tensorflow session created by users
tf: tensorflow module
'''<line_sep>subgraph_from_tuner=trial.get_next_parameter()<line_sep>mutable_layers=set()<for_stmt>subgraph_key subgraph_from_tuner<block_start><if_stmt>"/"<in>subgraph_key# has to remove the last, could be layer_choice or whatever
<block_start>mutable_id,mutable_layer_id=_decompose_general_key(subgraph_key[:subgraph_key.rfind("/")])<if_stmt>mutable_id<is><not><none><block_start>mutable_layers.add((mutable_id mutable_layer_id))<block_end><block_end><block_end>mutable_layers=sorted(list(mutable_layers))<for_stmt>mutable_id,mutable_layer_id mutable_layers<block_start><if_stmt>mutable_id<not><in>_namespace<block_start>_logger.warning("%s not found in name space" mutable_id)<line_sep><continue><block_end>name_prefix="{}_{}".format(mutable_id mutable_layer_id)<line_sep># get optional inputs names
optional_inputs=_namespace[name_prefix]['optional_inputs']<line_sep># extract layer information from the subgraph sampled by tuner
chosen_layer,chosen_inputs=_get_layer_and_inputs_from_tuner(mutable_id mutable_layer_id optional_inputs)<line_sep>chosen_layer=_namespace[name_prefix]['funcs'].index(chosen_layer)<line_sep>chosen_inputs=[1<if>inp<in>chosen_inputs<else>0<for>inp optional_inputs]<line_sep># load these information into pre-defined tensorflow variables
_tf_variables[name_prefix]['funcs'].load(chosen_layer session)<line_sep>_tf_variables[name_prefix]['optional_inputs'].load(chosen_inputs session)<block_end><block_end><def_stmt>_construct_general_key mutable_id mutable_layer_id# Mutable layer key in a general (search space) format
# that is, prefix/mutable_id/mutable_layer_id
<block_start><return>_MUTABLE_LAYER_SPACE_PREFIX+"/"+mutable_id+"/"+mutable_layer_id<block_end><def_stmt>_decompose_general_key key# inverse operation of above
<block_start><if_stmt><not>key.startswith(_MUTABLE_LAYER_SPACE_PREFIX)<block_start><return><none> <none><block_end><else_stmt><block_start>_,mutable_id,mutable_layer_id=key.split("/" maxsplit=2)<line_sep><return>mutable_id mutable_layer_id<block_end><block_end><def_stmt>darts_training tf session loss feed_dict<block_start><global>_optimizer _train_op<if_stmt>_optimizer<is><none><block_start>_optimizer=tf.MomentumOptimizer(learning_rate=0.025)<line_sep># TODO: Calculate loss
grads_and_vars=_optimizer.compute_gradients(loss _arch_logits_list)<line_sep>_train_op=_optimizer.apply_gradients(grads_and_vars)<block_end>session.run(_train_op)<block_end><def_stmt>training_update nas_mode tf=<none> session=<none> loss=<none> feed_dict=<none><block_start><if_stmt>nas_mode<eq>'darts_mode'<block_start>darts_training(tf session loss feed_dict)<block_end><elif_stmt>nas_mode<eq>'enas_mode'<block_start>reload_tensorflow_variables(tf session)<block_end><block_end><def_stmt>_get_layer_and_inputs_from_tuner mutable_id mutable_layer_id optional_inputs# optional_inputs should be name(key)s of the optional inputs
<block_start><try_stmt><block_start>mutable_block=trial.get_current_parameter(mutable_id)<line_sep># There is a NAS tuner
chosen_layer=mutable_block[mutable_layer_id]["chosen_layer"]<line_sep>chosen_inputs=mutable_block[mutable_layer_id]["chosen_inputs"]<block_end><except_stmt>KeyError# Try to find converted NAS parameters
<block_start>params=trial.get_current_parameter()<line_sep>expected_prefix=_construct_general_key(mutable_id mutable_layer_id)<line_sep>chosen_layer=params[expected_prefix+"/layer_choice"]<line_sep># find how many to choose
optional_input_size=int(params[expected_prefix+"/optional_input_size"])# convert uniform to randint
# find who to choose, can duplicate
optional_input_state=params[expected_prefix+"/optional_input_chosen_state"]<line_sep>chosen_inputs=[]<line_sep># make sure dict -> list produce stable result by sorting
optional_inputs_keys=sorted(optional_inputs)<for_stmt>_ range(optional_input_size)<block_start>chosen_inputs.append(optional_inputs_keys[optional_input_state%len(optional_inputs)])<line_sep>optional_input_state<augfloordiv>len(optional_inputs)<block_end><block_end>_logger.info("%s_%s: layer: %s, optional inputs: %s" mutable_id mutable_layer_id chosen_layer chosen_inputs)<line_sep><return>chosen_layer chosen_inputs<block_end><def_stmt>convert_nas_search_space search_space<block_start>"""
Args:
param search_space: raw search space
return: the new search space, mutable_layers will be converted into choice
"""<if_stmt><not>isinstance(search_space dict)<block_start><return>search_space<block_end>ret=dict()<for_stmt>k,v search_space.items()<block_start><if_stmt>"_type"<not><in>v# this should not happen
<block_start>_logger.warning("There is no _type in one of your search space values with key '%s'"<concat>". Please check your search space" k)<line_sep>ret[k]=v<block_end><elif_stmt>v["_type"]<ne>"mutable_layer"<block_start>ret[k]=v<block_end><else_stmt><block_start>_logger.info("Converting mutable_layer search space with key '%s'" k)<line_sep># v["_value"] looks like {'mutable_layer_1': {'layer_choice': ...} ...}
values=v["_value"]<for_stmt>layer_name,layer_data values.items()# there should be at most layer_choice, optional_inputs, optional_input_size in layer_data
# add "_mutable_layer" as prefix so that they can be recovered later
<block_start>layer_key=_construct_general_key(k layer_name)<if_stmt>layer_data.get("layer_choice")# filter out empty choice and no choice
<block_start>layer_choice=layer_data["layer_choice"]<block_end><else_stmt><block_start><raise>ValueError("No layer choice found in %s"%layer_key)<block_end><if_stmt>layer_data.get("optional_input_size")<block_start>input_size=layer_data["optional_input_size"]<if_stmt>isinstance(input_size int)<block_start>input_size=[input_size input_size]<block_end><if_stmt>input_size[0]<g>input_size[1]<or>input_size[0]<l>0<block_start>_logger.error("Might not be able to handle optional_input_size < 0, please double check")<block_end>input_size[1]<augadd>1<block_end><else_stmt><block_start>_logger.info("Optional input choices are set to empty by default in %s" layer_key)<line_sep>input_size=[0 1]<block_end><if_stmt>layer_data.get("optional_inputs")<block_start>total_state_size=len(layer_data["optional_inputs"])<power>(input_size[1]-1)<block_end><else_stmt><block_start>_logger.info("Optional inputs not found in %s" layer_key)<line_sep>total_state_size=1<block_end>converted={layer_key+"/layer_choice":{"_type":"choice" "_value":layer_choice} layer_key+"/optional_input_size":{"_type":"randint" "_value":input_size} layer_key+"/optional_input_chosen_state":{"_type":"randint" "_value":[0 total_state_size]}}<line_sep>_logger.info(converted)<line_sep>ret.update(converted)<block_end><block_end><block_end><return>ret<block_end><def_stmt>rewrite_nas_space func<block_start>@functools.wraps(func)<def_stmt>wrap self search_space<block_start>search_space=convert_nas_search_space(search_space)<line_sep><return>func(self search_space)<block_end><return>wrap<block_end> |
codigo_set=set()<line_sep>codido_set_saiu=set()<line_sep>s=input()<line_sep>codigos=input().split(' ')<for_stmt>codigo codigos<block_start>codigo_set.add(codigo)<block_end>i=input()<line_sep>saidas=input().split(' ')<line_sep>A=0<line_sep>I=0<line_sep>R=0<for_stmt>saida saidas<block_start><if_stmt>saida<in>codigo_set<block_start><if_stmt>saida<in>codido_set_saiu<block_start>R<augadd>1<block_end><else_stmt><block_start>A<augadd>1<line_sep>codido_set_saiu.add(saida)<block_end><block_end><else_stmt><block_start><if_stmt>saida<in>codido_set_saiu<block_start>R<augadd>1<block_end><else_stmt><block_start>I<augadd>1<line_sep>codido_set_saiu.add(saida)<block_end><block_end><block_end>print('%d A'%A)<line_sep>print('%d I'%I)<line_sep>print('%d R'%R)<line_sep> |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Logging related module."""<import_stmt>os<import_stmt>logging<import_from_stmt>logging _checkLevel<import_from_stmt>fastseq.config FASTSEQ_DEFAULT_LOG_LEVEL FASTSEQ_LOG_LEVEL FASTSEQ_LOG_FORMAT<def_stmt>set_default_log_level <block_start>"""Set the default log level from the environment variable"""<try_stmt><block_start>fastseq_log_level=_checkLevel(FASTSEQ_LOG_LEVEL)<block_end><except_stmt>(ValueError TypeError)<as>e<block_start>logging.error("Please input a valid value for FASTSEQ_LOG_LEVEL (e.g. "<concat>"'DEBUG', 'INFO'): {}".format(e))<line_sep><raise><block_end>logging.basicConfig(level=fastseq_log_level format=FASTSEQ_LOG_FORMAT)<block_end><def_stmt>get_logger name=<none> level=logging.INFO<block_start>"""
Return a logger with the specific name, creating it if necessary.
If no name is specified, return the root logger.
Args:
name (str, optional): logger name. Defaults to None.
Returns:
Logger : the specified logger.
"""<line_sep>level=_checkLevel(level)<if_stmt>FASTSEQ_LOG_LEVEL<ne>FASTSEQ_DEFAULT_LOG_LEVEL<block_start><try_stmt><block_start>level=_checkLevel(FASTSEQ_LOG_LEVEL)<block_end><except_stmt>(ValueError TypeError)<as>e<block_start>logging.error("Please input a valid value for FASTSEQ_LOG_LEVEL (e.g. "<concat>"'DEBUG', 'INFO'): {}".format(e))<line_sep><raise><block_end><block_end>logger=logging.getLogger(name)<line_sep>logger.setLevel(level)<line_sep><return>logger<block_end><def_stmt>update_all_log_level level=logging.INFO<block_start>"""
Update all the loggers to use the specified level.
Args:
level (int/str, optional): the log level. Defaults to logging.INFO.
"""<line_sep>loggers=[logging.getLogger(name)<for>name logging.root.manager.loggerDict]<for_stmt>logger loggers<block_start>logger.setLevel(level)<block_end><block_end> |
<import_stmt>os<import_stmt>sys<import_stmt>xml.etree.ElementTree<as>ET<line_sep>#import xml.dom.minidom as minidom
<import_stmt>cv2<import_from_stmt>bbox_labeling SimpleBBoxLabeling<line_sep>input_dir=sys.argv[1].rstrip(os.sep)<line_sep>bbox_filenames=[x<for>x os.listdir(input_dir)<if>x.endswith('.bbox')]<for_stmt>bbox_filename bbox_filenames<block_start>bbox_filepath=os.sep.join([input_dir bbox_filename])<line_sep>jpg_filepath=bbox_filepath[:-5]<if_stmt><not>os.path.exists(jpg_filepath)<block_start>print('Something is wrong with {}!'.format(bbox_filepath))<line_sep><break><block_end>root=ET.Element('annotation')<line_sep>filename=ET.SubElement(root 'filename')<line_sep>jpg_filename=jpg_filepath.split(os.sep)[-1]<line_sep>filename.text=jpg_filename<line_sep>img=cv2.imread(jpg_filepath)<line_sep>h,w,c=img.shape<line_sep>size=ET.SubElement(root 'size')<line_sep>width=ET.SubElement(size 'width')<line_sep>width.text=str(w)<line_sep>height=ET.SubElement(size 'height')<line_sep>height.text=str(h)<line_sep>depth=ET.SubElement(size 'depth')<line_sep>depth.text=str(c)<line_sep>bboxes=SimpleBBoxLabeling.load_bbox(bbox_filepath)<for_stmt>obj_name,coord bboxes<block_start>obj=ET.SubElement(root 'object')<line_sep>name=ET.SubElement(obj 'name')<line_sep>name.text=obj_name<line_sep>bndbox=ET.SubElement(obj 'bndbox')<line_sep>xmin=ET.SubElement(bndbox 'xmin')<line_sep>xmax=ET.SubElement(bndbox 'xmax')<line_sep>ymin=ET.SubElement(bndbox 'ymin')<line_sep>ymax=ET.SubElement(bndbox 'ymax')<line_sep>(left top),(right bottom)=coord<line_sep>xmin.text=str(left)<line_sep>xmax.text=str(right)<line_sep>ymin.text=str(top)<line_sep>ymax.text=str(bottom)<block_end>xml_filepath=jpg_filepath[:jpg_filepath.rfind('.')]+'.xml'<with_stmt>open(xml_filepath 'w')<as>f<block_start>anno_xmlstr=ET.tostring(root)<line_sep># In case a nicely formatted xml is needed
# uncomment the following 2 lines and minidom import
#anno_xml = minidom.parseString(anno_xmlstr)
#anno_xmlstr = anno_xml.toprettyxml()
f.write(anno_xmlstr)<block_end><block_end> |
"""`Factory` provider - passing injections to the underlying providers example."""<import_from_stmt>dependency_injector containers providers<class_stmt>Regularizer<block_start><def_stmt>__init__ self alpha:float<arrow><none><block_start>self.alpha=alpha<block_end><block_end><class_stmt>Loss<block_start><def_stmt>__init__ self regularizer:Regularizer<arrow><none><block_start>self.regularizer=regularizer<block_end><block_end><class_stmt>ClassificationTask<block_start><def_stmt>__init__ self loss:Loss<arrow><none><block_start>self.loss=loss<block_end><block_end><class_stmt>Algorithm<block_start><def_stmt>__init__ self task:ClassificationTask<arrow><none><block_start>self.task=task<block_end><block_end><class_stmt>Container(containers.DeclarativeContainer)<block_start>algorithm_factory=providers.Factory(Algorithm task=providers.Factory(ClassificationTask loss=providers.Factory(Loss regularizer=providers.Factory(Regularizer ) ) ) )<block_end><if_stmt>__name__<eq>'__main__'<block_start>container=Container()<line_sep>algorithm_1=container.algorithm_factory(task__loss__regularizer__alpha=0.5 )<assert_stmt>algorithm_1.task.loss.regularizer.alpha<eq>0.5<line_sep>algorithm_2=container.algorithm_factory(task__loss__regularizer__alpha=0.7 )<assert_stmt>algorithm_2.task.loss.regularizer.alpha<eq>0.7<block_end> |
# -*- coding: utf-8 -*-
<import_from_stmt>._common *<class_stmt>Yizhibo(Extractor)<block_start>name='Yizhibo (一直播)'<def_stmt>prepare self<block_start>info=MediaInfo(self.name)<line_sep>info.live=<true><line_sep>self.vid=self.url[self.url.rfind('/')+1:].split('.')[0]<line_sep>data=get_response('http://www.yizhibo.com/live/h5api/get_basic_live_info' params={'scid':self.vid}).json()<assert_stmt>content['result']<eq>1 'Error : '+data['result']<line_sep>data=data['data']<line_sep>info.title=data['live_title']<line_sep>info.artist=data['nickname']<line_sep>info.streams['current']={'container':'m3u8' 'video_profile':'current' 'src':[data['play_url']] 'size':float('inf')}<line_sep><return>info<block_end><block_end>site=Yizhibo()<line_sep> |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>enum<import_stmt>numpy<as>np<import_from_stmt>numba float32<line_sep># Internal globals
_THREADS_PER_BLOCK=32<line_sep>_WARP_SIZE=32<line_sep>_DTYPE=float32<line_sep># Constants
FP32_INF=np.inf<line_sep>FP32_NEG_INF=-np.inf<line_sep>THRESHOLD=1e-1<line_sep>"""
Getters
"""<def_stmt>threads_per_block <block_start><global>_THREADS_PER_BLOCK<line_sep><return>_THREADS_PER_BLOCK<block_end><def_stmt>warp_size <block_start><global>_WARP_SIZE<line_sep><return>_WARP_SIZE<block_end><def_stmt>dtype <block_start><global>_DTYPE<line_sep><return>_DTYPE<block_end># RNNT STATUS
<class_stmt>RNNTStatus(enum.Enum)<block_start>RNNT_STATUS_SUCCESS=0<line_sep>RNNT_STATUS_INVALID_VALUE=1<block_end> |
<import_from_stmt>contextlib contextmanager<import_from_stmt>functools lru_cache<import_from_stmt>typing Generator<line_sep>@lru_cache(maxsize=<none>)<def_stmt>slow_function message timeout<block_start>"""This function is slow."""<line_sep>print(message)<block_end>@contextmanager<def_stmt>feeling_good x:int y:int<arrow>Generator<block_start>"""You'll feel better in this context!"""<line_sep><yield><block_end> |
"""
Query and deal common tables.
"""<import_from_stmt>evennia.utils logger<import_from_stmt>django.apps apps<import_from_stmt>django.conf settings<class_stmt>ImageResourcesMapper(object)<block_start>"""
Object's image.
"""<def_stmt>__init__ self<block_start>self.model_name="image_resources"<line_sep>self.model=apps.get_model(settings.WORLD_DATA_APP self.model_name)<line_sep>self.objects=self.model.objects<block_end><def_stmt>get self resource<block_start>"""
Get object's image.
Args:
resource: (string) resource's path.
"""<line_sep><return>self.objects.get(resource=resource)<block_end><def_stmt>add self path type width height<block_start>"""
Add a new image record.
Args:
path: image's path
type: image's type
width: image's width
height: image's height
Return:
none
"""<line_sep>record={"resource":path "type":type "image_width":width "image_height":height }<line_sep>data=self.model(**record)<line_sep>data.full_clean()<line_sep>data.save()<block_end><block_end>IMAGE_RESOURCES=ImageResourcesMapper()<line_sep> |
"""Library with compatible bulb types.
Bulb Type detection:
ESP01_SHDW1C_31
ESP01 -- defines the module family (WiFi only bulb in this case)
SH -- Single Head light (most bulbs are single heads) / LED Strip
TW -- Tunable White - can only control CCT and dimming; no color
DW -- Dimmable White (most filament bulbs)
RGB -- Fullstack bulb
1C -- Specific to the hardware - defines PWM frequency + way of controlling CCT temperature
31 -- Related to the hardware revision
"""<import_stmt>dataclasses<import_from_stmt>enum Enum<import_from_stmt>typing Optional List<import_from_stmt>pywizlight.exceptions WizLightNotKnownBulb<line_sep>@dataclasses.dataclass(frozen=<true>)<class_stmt>Features<block_start>"""Defines the supported features."""<line_sep>color:bool<line_sep>color_tmp:bool<line_sep>effect:bool<line_sep>brightness:bool<block_end># RGB supports effects and tuneable white
RGB_FEATURES=Features(brightness=<true> color=<true> effect=<true> color_tmp=<true>)<line_sep># TODO: TW supports effects but only "some"; improve the mapping to supported effects
TW_FEATURES=Features(brightness=<true> color=<false> effect=<true> color_tmp=<true>)<line_sep># Dimmable white only supports brightness
DW_FEATURES=Features(brightness=<true> color=<false> effect=<false> color_tmp=<false>)<line_sep>@dataclasses.dataclass(frozen=<true>)<class_stmt>KelvinRange<block_start>"""Defines the kelvin range."""<line_sep>max:int<line_sep>min:int<block_end><class_stmt>BulbClass(Enum)<block_start>"""Bulb Types."""<line_sep>"""Have Cool White and Warm White LEDs."""<line_sep>TW="Tunable White"<line_sep>"""Have only Dimmable white LEDs."""<line_sep>DW="Dimmable White"<line_sep>"""Have RGB LEDs."""<line_sep>RGB="RGB Bulb"<block_end>@dataclasses.dataclass(frozen=<true>)<class_stmt>BulbType<block_start>"""BulbType object to define functions and features of the bulb."""<line_sep>features:Features<line_sep>name:str<line_sep>kelvin_range:Optional[KelvinRange]<line_sep>bulb_type:BulbClass<line_sep>@staticmethod<def_stmt>from_data module_name:str kelvin_list:Optional[List[float]]<arrow>"BulbType"<block_start><if_stmt>kelvin_list<block_start>kelvin_range:Optional[KelvinRange]=KelvinRange(min=int(min(kelvin_list)) max=int(max(kelvin_list)))<block_end><else_stmt><block_start>kelvin_range=<none><block_end><try_stmt># parse the features from name
<block_start>_identifier=module_name.split("_")[1]<block_end># Throw exception if index can not be found
<except_stmt>IndexError<block_start><raise>WizLightNotKnownBulb("The bulb type can not be determined!")<block_end><if_stmt>"RGB"<in>_identifier# full RGB bulb
<block_start>features=RGB_FEATURES<line_sep>bulb_type=BulbClass.RGB<block_end><elif_stmt>"TW"<in>_identifier# Non RGB but tunable white bulb
<block_start>features=TW_FEATURES<line_sep>bulb_type=BulbClass.TW<block_end><else_stmt># Plain brightness-only bulb
<block_start>features=DW_FEATURES<line_sep>bulb_type=BulbClass.DW<block_end><return>BulbType(bulb_type=bulb_type name=module_name features=features kelvin_range=kelvin_range )<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_stmt>imp<import_stmt>ipdb<import_stmt>logging<import_from_stmt>sys modules meta_path<import_from_stmt>os mkdir<import_from_stmt>os.path isdir abspath dirname exists join <import_stmt>encodings.idna<import_stmt>requests<import_from_stmt>git Repo<import_from_stmt>packyou find_module_path_in_cloned_repos<import_from_stmt>packyou.utils walklevel memoize<line_sep>MODULES_PATH=dirname(abspath(__file__))<line_sep>LOGGER=logging.getLogger(__name__)<class_stmt>GithubLoader(object)<block_start>"""
Import hook that will allow to import from a github repo.
"""<def_stmt>__init__ self repo_url=<none> path=<none> username=<none> repository_name=<none><block_start>self.path=path<line_sep>self.repo_url=repo_url<line_sep>self.username=username<line_sep>self.repository_name=repository_name<block_end><def_stmt>check_root self fullname<block_start>"""
#Sometimes the code is a python package or similar and there is a directory
#which contains all the code.
This method is used to search first on the root of the cloned repository for the
imported module.
"""<line_sep>parent,_,module_name=fullname.rpartition('.')<if_stmt>self.username<and>self.repository_name# REVISAR QUE PASE TODOS LOS PATHS
<block_start>cloned_root=join(self.path[0] 'github' self.username self.repository_name)<line_sep>candidate_path=join(cloned_root module_name)<if_stmt>exists(candidate_path)<block_start><return>candidate_path<block_end><for_stmt>root,dirs,files walklevel(cloned_root level=1)<block_start><pass><block_end><block_end><block_end><def_stmt>get_source self fullname<block_start>filename=self.get_filename(fullname)<with_stmt>open(filename 'r')<as>source_file<block_start><return>source_file.read()<block_end><block_end><def_stmt>get_code self fullname<block_start>source=self.get_source(fullname)<line_sep><return>compile(source self.get_filename(fullname) 'exec' dont_inherit=<true>)<block_end><def_stmt>get_filename self fullname<block_start>parent,_,current_module=fullname.rpartition('.')<line_sep>filename=<none><line_sep>LOGGER.debug('Fullname {0} self.path {1}'.format(fullname self.path))<for_stmt>path self.path<block_start>package_path=join(path '__init__.py')<if_stmt>exists(package_path)<block_start>filename=package_path<block_end>module_path='{0}.py'.format(join(path current_module))<if_stmt>exists(module_path)<block_start>filename=module_path<block_end><block_end>LOGGER.debug('get_filename({0}) is {1}'.format(fullname filename))<line_sep><return>filename<block_end><def_stmt>is_package self fullname<block_start>filename=self.get_filename(fullname)<line_sep><return><not>exists(filename)<or>isdir(filename)<block_end><def_stmt>get_or_create_module self fullname<block_start>"""
Given a name and a path it will return a module instance
if found.
When the module could not be found it will raise ImportError
"""<line_sep>LOGGER.info('Loading module {0}'.format(fullname))<line_sep>parent,_,module_name=fullname.rpartition('.')<if_stmt>fullname<in>modules<block_start>LOGGER.info('Found cache entry for {0}'.format(fullname))<line_sep><return>modules[fullname]<block_end>module=modules.setdefault(fullname imp.new_module(fullname))<if_stmt>len(fullname.strip('.'))<g>3<block_start>absolute_from_root=fullname.split('.' 3)[-1]<line_sep>modules.setdefault(absolute_from_root module)<block_end><if_stmt>len(fullname.split('.'))<eq>4# add the root of the project
<block_start>modules[fullname.split('.')[-1]]=module<block_end># required by PEP 302
module.__file__=self.get_filename(fullname)<line_sep>LOGGER.info('Created module {0} with fullname {1}'.format(self.get_filename(fullname) fullname))<line_sep>module.__name__=fullname<line_sep>module.__loader__=self<line_sep>module.__path__=self.path<if_stmt>self.is_package(fullname)<block_start>module.__path__=self.path<line_sep>module.__package__=fullname<block_end><else_stmt><block_start>module.__package__=fullname.rpartition('.')[0]<block_end>LOGGER.debug('loading file {0}'.format(self.get_filename(fullname)))<line_sep>source=self.get_source(fullname)<try_stmt><block_start>exec(source module.__dict__)<block_end><except_stmt>Exception<as>ex<block_start>ipdb.set_trace()<block_end><return>module<block_end><def_stmt>clone_github_repo self<block_start>"""
Clones a github repo with a username and repository_name
"""<if_stmt><not>(self.username<and>self.repository_name)<block_start><return><block_end>repository_local_destination=join(MODULES_PATH 'github' self.username self.repository_name)<if_stmt><not>exists(repository_local_destination)<block_start>Repo.clone_from(self.repo_url repository_local_destination branch='master')<line_sep>init_filename=join(repository_local_destination '__init__.py')<line_sep>open(init_filename 'a').close()<block_end><block_end>@property<def_stmt>project_fullname self<block_start><return>'packyou.github.{0}.{1}'.format(self.username self.repository_name)<block_end><def_stmt>load_module self fullname<block_start>"""
Given a name it will load the module from github.
When the project is not locally stored it will clone the
repo from github.
"""<line_sep>module=<none><line_sep>splitted_names=fullname.split('.')<line_sep>_,_,module_name=fullname.rpartition('.')<line_sep>_,remaining=find_module_path_in_cloned_repos(fullname)<if_stmt>'github'<in>splitted_names<and><not>remaining<block_start>self.clone_github_repo()<if_stmt>len(splitted_names)<eq>2<block_start>module=self.get_or_create_module(fullname)<block_end><if_stmt>len(splitted_names)<eq>3<block_start>username_directory=join(MODULES_PATH 'github' self.username)<if_stmt><not>exists(username_directory)<block_start>mkdir(username_directory)<block_end>username_init_filename=join(MODULES_PATH 'github' self.username '__init__.py')<line_sep>open(username_init_filename 'a').close()<line_sep>module=self.get_or_create_module(fullname)<block_end><if_stmt>len(splitted_names)<ge>4<block_start>module=self.get_or_create_module(fullname)<block_end><block_end><elif_stmt>self.username<and>self.repository_name# relative import from project root.
<block_start>fullname='packyou.github.{0}.{1}.{2}'.format(self.username self.repository_name remaining)<line_sep>module=self.get_or_create_module(fullname)<block_end><if_stmt>module<block_start>modules[fullname]=module<if_stmt>remaining<is><not><none><block_start>modules[remaining]=module<block_end><block_end><return>module<block_end><block_end><class_stmt>GithubFinder(object)<block_start><def_stmt>__init__ self<block_start>self.username=<none><line_sep>self.repository_name=<none><block_end>@memoize<def_stmt>check_repository_available self username repository_name<block_start>"""
Sometimes github has a - in the username or repository name.
The - can't be used in the import statement.
"""<line_sep>repo_url='https://github.com/{0}/{1}.git'.format(username repository_name)<line_sep>response=requests.get(repo_url)<if_stmt>response.status_code<eq>404<block_start><if_stmt>'_'<in>username<block_start>repo_url='https://github.com/{0}/{1}.git'.format(username.replace('_' '-') repository_name)<line_sep>response=requests.get(repo_url)<if_stmt>response.status_code<eq>200<block_start><return>repo_url<block_end><block_end><if_stmt>'_'<in>repository_name<block_start>repo_url='https://github.com/{0}/{1}.git'.format(username repository_name.replace('_' '-'))<line_sep>response=requests.get(repo_url)<if_stmt>response.status_code<eq>200<block_start><return>repo_url<block_end><block_end>repo_url='https://github.com/{0}/{1}.git'.format(username.replace('_' '-') repository_name.replace('_' '-'))<line_sep>response=requests.get(repo_url)<if_stmt>response.status_code<eq>200<block_start><return>repo_url<block_end><raise>ImportError('Github repository not found.')<block_end><return>repo_url<block_end><def_stmt>find_module_in_cloned_repos self fullname<block_start><return>find_module_in_cloned_repos(fullname GithubLoader)<block_end><def_stmt>find_module self fullname path=<none><block_start>"""
Finds a module and returns a module loader when
the import uses packyou
"""<line_sep>LOGGER.info('Finding {0}'.format(fullname))<line_sep>partent,_,module_name=fullname.rpartition('.')<line_sep>path,_=find_module_path_in_cloned_repos(fullname)<line_sep>LOGGER.debug('FOUND PATH {0}'.format(path))<try_stmt># sometimes the project imported from github does an
# "import x" (absolute import), this translates to import github...x
# we try first to do an import x and cache the module in the sys.path.
# and return None if the imp.find_module was successful.
# This will allow python finders in the meta_path to do the import, and not packyou
# loaders.
<block_start><if_stmt><not>path<block_start>imp.find_module(module_name)<line_sep>LOGGER.info('Absolute import: {0}. Original fullname {1}'.format(module_name fullname))<line_sep><return><none><block_end><block_end><except_stmt>ImportError<block_start>LOGGER.debug('imp.find_module could not find {0}. this is ussually fine.'.format(module_name))<block_end><if_stmt>'packyou.github'<in>fullname<block_start>fullname_parts=fullname.split('.')<line_sep>repo_url=<none><if_stmt>len(fullname_parts)<ge>3<block_start>self.username=fullname.split('.')[2]<block_end><if_stmt>len(fullname_parts)<ge>4<block_start><if_stmt><not>self.repository_name<block_start>LOGGER.debug('FULLNAME -> {0} '.format(fullname))<line_sep>self.repository_name=fullname.split('.')[3]<block_end>repo_url=self.check_repository_available(self.username self.repository_name)<line_sep>current_path=dirname(abspath(__file__))<line_sep>repo_path=join(current_path 'github' self.username self.repository_name)<if_stmt>repo_path<not><in>path<block_start>path.insert(0 repo_path)<block_end><block_end>LOGGER.info('Found {0} with path {1}'.format(fullname path))<line_sep><return>GithubLoader(repo_url path self.username self.repository_name)<block_end><elif_stmt>self.username<and>self.repository_name<and>path<block_start>LOGGER.info('Fullname {0} does not start with packyou, searching in cloned repos. Result was {1}'.format(fullname path))<line_sep>repo_url=self.check_repository_available(self.username self.repository_name)<line_sep><return>GithubLoader(repo_url path self.username self.repository_name)<block_end>LOGGER.info('Not found -> {0}'.format(fullname))<block_end><block_end>meta_path.append(GithubFinder())<line_sep> |
<import_stmt>pandas<as>pd<import_stmt>warnings<import_from_stmt>...pysd read_vensim<import_from_stmt>io open<def_stmt>read_tabular table_file sheetname='Sheet1'<block_start>"""
Reads a vensim syntax model which has been formatted as a table.
This is useful in contexts where model building is performed
without the aid of Vensim.
Parameters
----------
table_file: .csv, .tab or .xls(x) file
Table should have columns titled as in the table below
| Variable | Equation | Units | Min | Max | Comment |
| :------- | :------- | :---- | :-- | :-- | :--------------- |
| Age | 5 | Yrs | 0 | inf | How old are you? |
| ... | ... | ... | ... | ... | ... |
sheetname: basestring
if the model is specified in an excel file, what sheet?
Returns
-------
PySD Model Object
Notes
-----
Creates an intermediate file in vensim `.mdl` syntax, just so that
the existing vensim parsing machinery can be used.
"""<if_stmt>isinstance(table_file str)<block_start>extension=table_file.split('.')[-1]<if_stmt>extension<in>['xls' 'xlsx']<block_start>table=pd.read_excel(table_file sheetname=sheetname)<block_end><elif_stmt>extension<eq>'csv'<block_start>table=pd.read_csv(table_file encoding='UTF-8')<block_end><elif_stmt>extension<eq>'tab'<block_start>table=pd.read_csv(table_file sep='\t' encoding='UTF-8')<block_end><else_stmt><block_start><raise>ValueError('Unknown file or table type')<block_end><block_end><else_stmt><block_start><raise>ValueError('Unknown file or table type')<block_end><if_stmt><not>set(table.columns).issuperset({'Variable' 'Equation'})<block_start><raise>ValueError('Table must contain at least columns "Variable" and "Equation"')<block_end><if_stmt>"Units"<not><in>set(table.columns)<block_start>warnings.warn('Column for "Units" not found' RuntimeWarning stacklevel=2)<line_sep>table['Units']=''<block_end><if_stmt>"Min"<not><in>set(table.columns)<block_start>warnings.warn('Column for "Min" not found' RuntimeWarning stacklevel=2)<line_sep>table['Min']=''<block_end><if_stmt>"Max"<not><in>set(table.columns)<block_start>warnings.warn('Column for "Max" not found' RuntimeWarning stacklevel=2)<line_sep>table['Max']=''<block_end>mdl_file=table_file.replace(extension 'mdl')<with_stmt>open(mdl_file 'w' encoding='UTF-8')<as>outfile<block_start><for_stmt>element table.to_dict(orient='records')<block_start>outfile.write("%(Variable)s = \n"<concat>"\t %(Equation)s \n"<concat>"\t~\t %(Units)s [%(Min)s, %(Max)s] \n"<concat>"\t~\t %(Comment)s \n\t|\n\n"%element)<block_end>outfile.write(u'\\\---/// Sketch information - this is where sketch stuff would go.')<block_end><return>read_vensim(mdl_file)<block_end> |
#
# Copyright EAVISE
#
<import_from_stmt>.annotations annotation_formats<import_from_stmt>.detections detection_formats<line_sep>__all__=['formats' 'annotation_formats' 'detection_formats']<line_sep>formats={}<for_stmt>key annotation_formats<block_start>formats['anno_'+key]=annotation_formats[key]<block_end><for_stmt>key detection_formats<block_start>formats['det_'+key]=detection_formats[key]<block_end> |
f=open('sample-input.txt')<line_sep>o=open('sample-output.txt' 'w')<line_sep>t=int(f.readline().strip())<for_stmt>i xrange(1 t+1)<block_start>o.write("Case #{}: ".format(i))<line_sep>n=int(f.readline().strip())<line_sep>x=[int(j)<for>j f.readline().strip().split()]<line_sep>y=[int(j)<for>j f.readline().strip().split()]<line_sep>o.write("\n")<block_end> |
#Copyright (C) 2013 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
<import_stmt>os<import_from_stmt>ctypes *<line_sep># Taken from c/cukf.h
UKF_PRECISION_FLOAT=0<line_sep>UKF_PRECISION_DOUBLE=1<line_sep>state=<none><line_sep>state_error=<none><line_sep>innovation=<none><line_sep>covariance=<none><line_sep>parameters=<none><line_sep>parameters_error=<none><line_sep># Internal globals, set during init
_cukf=<none><line_sep>_REAL_T=<none><line_sep># Internal classes, wrapping cukf structs directly
<class_stmt>_SensorParams(Structure)<block_start><pass><block_end><class_stmt>_State(Structure)<block_start><def_stmt>__repr__ self<block_start>fields={"attitude":tuple(self.attitude) "angular_velocity":tuple(self.angular_velocity) "acceleration":tuple(self.angular_velocity)}<line_sep><return>str(fields)<block_end><block_end><class_stmt>_StateError(Structure)<block_start><def_stmt>__repr__ self<block_start>fields={"attitude":tuple(self.attitude) "angular_velocity":tuple(self.angular_velocity)}<line_sep><return>str(fields)<block_end><block_end><class_stmt>_Innovation(Structure)<block_start><def_stmt>__repr__ self<block_start>fields={"accel":tuple(self.accel) "gyro":tuple(self.gyro) "mag":tuple(self.mag)}<line_sep><return>str(fields)<block_end><block_end><class_stmt>_Parameters(Structure)<block_start><def_stmt>__repr__ self<block_start>field={"accel_bias":tuple(self.accel_bias) "gyro_bias":tuple(self.gyro_bias) "mag_bias":tuple(self.mag_bias) "mag_scale":tuple(self.mag_scale) "mag_field_norm":tuple(self.mag_field_norm) "mag_field_inclination":tuple(self.mag_field_inclination)}<line_sep><return>std(fields)<block_end><block_end># Public interface
<def_stmt>iterate dt<block_start><global>_cukf state state_error innovation parameters parameters_error<if_stmt><not>_cukf<block_start><raise>RuntimeError("Please call ukf.init()")<block_end>_cukf.ukf_set_state(state)<line_sep>_cukf.ukf_iterate(dt)<line_sep>_cukf.ukf_sensor_clear()<line_sep>_cukf.ukf_get_state(state)<line_sep>_cukf.ukf_get_state_error(state_error)<line_sep>_cukf.ukf_get_innovation(innovation)<line_sep>_cukf.ukf_get_parameters(parameters)<line_sep>_cukf.ukf_get_parameters_error(parameters_error)<block_end><def_stmt>set_sensors accelerometer=<none> gyroscope=<none> magnetometer=<none><block_start><if_stmt>accelerometer<is><not><none><block_start>_cukf.ukf_sensor_set_accelerometer(*accelerometer)<block_end><if_stmt>gyroscope<is><not><none><block_start>_cukf.ukf_sensor_set_gyroscope(*gyroscope)<block_end><if_stmt>magnetometer<is><not><none><block_start>_cukf.ukf_sensor_set_magnetometer(*magnetometer)<block_end><block_end><def_stmt>configure_sensors accelerometer_covariance=<none> gyroscope_covariance=<none> magnetometer_covariance=<none><block_start>params=_SensorParams()<if_stmt>getattr(accelerometer_covariance '__iter__' <false>)<block_start>params.accel_covariance=accelerometer_covariance<block_end><elif_stmt>accelerometer_covariance<is><not><none><block_start>params.accel_covariance=(accelerometer_covariance )<times>3<block_end><else_stmt><block_start>params.accel_covariance=(1.0 1.0 1.0)<block_end><if_stmt>getattr(gyroscope_covariance '__iter__' <false>)<block_start>params.gyro_covariance=gyroscope_covariance<block_end><elif_stmt>gyroscope_covariance<is><not><none><block_start>params.gyro_covariance=(gyroscope_covariance )<times>3<block_end><else_stmt><block_start>params.gyro_covariance=(1.0 1.0 1.0)<block_end><if_stmt>getattr(magnetometer_covariance '__iter__' <false>)<block_start>params.mag_covariance=magnetometer_covariance<block_end><elif_stmt>magnetometer_covariance<is><not><none><block_start>params.mag_covariance=(magnetometer_covariance )<times>3<block_end><else_stmt><block_start>params.mag_covariance=(1.0 1.0 1.0)<block_end>_cukf.ukf_set_params(params)<block_end><def_stmt>configure_process_noise process_noise_covariance<block_start>_cukf.ukf_set_process_noise((_REAL_T<times>6)(*process_noise_covariance))<block_end><def_stmt>init <block_start><global>_cukf _REAL_T state state_error innovation parameters parameters_error<line_sep>lib=os.path.join(os.path.dirname(__file__) "libahrs.dylib")<line_sep>_cukf=cdll.LoadLibrary(lib)<line_sep>_cukf.ukf_init.argtypes=[]<line_sep>_cukf.ukf_init.restype=<none><line_sep>_cukf.ukf_config_get_precision.argtypes=[]<line_sep>_cukf.ukf_config_get_precision.restype=c_long<line_sep>_cukf.ukf_config_get_state_dim.argtypes=[]<line_sep>_cukf.ukf_config_get_state_dim.restype=c_long<line_sep>_cukf.ukf_config_get_measurement_dim.argtypes=[]<line_sep>_cukf.ukf_config_get_measurement_dim.restype=c_long<line_sep>_PRECISION=_cukf.ukf_config_get_precision()<line_sep>_REAL_T=c_double<if>_PRECISION<eq>UKF_PRECISION_DOUBLE<else>c_float<line_sep>_STATE_DIM=_cukf.ukf_config_get_state_dim()<line_sep>_MEASUREMENT_DIM=_cukf.ukf_config_get_measurement_dim()<line_sep>_SensorParams._fields_=[("accel_covariance" _REAL_T<times>3) ("gyro_covariance" _REAL_T<times>3) ("mag_covariance" _REAL_T<times>3)]<line_sep>_State._fields_=[("attitude" _REAL_T<times>4) ("angular_velocity" _REAL_T<times>3) ("acceleration" _REAL_T<times>3)]<line_sep>_StateError._fields_=[("attitude" _REAL_T<times>3) ("angular_velocity" _REAL_T<times>3)]<line_sep>_Innovation._fields_=[("accel" _REAL_T<times>3) ("gyro" _REAL_T<times>3) ("mag" _REAL_T<times>3)]<line_sep>_Parameters._fields_=[("accel_bias" _REAL_T<times>3) ("gyro_bias" _REAL_T<times>3) ("mag_bias" _REAL_T<times>3) ("mag_scale" _REAL_T<times>3) ("mag_field_norm" _REAL_T) ("mag_field_inclination" _REAL_T) ]<line_sep># Set up the function prototypes
_cukf.ukf_set_attitude.argtypes=[_REAL_T _REAL_T _REAL_T _REAL_T]<line_sep>_cukf.ukf_set_attitude.restype=<none><line_sep>_cukf.ukf_set_angular_velocity.argtypes=[_REAL_T _REAL_T _REAL_T]<line_sep>_cukf.ukf_set_angular_velocity.restype=<none><line_sep>_cukf.ukf_get_state.argtypes=[POINTER(_State)]<line_sep>_cukf.ukf_get_state.restype=<none><line_sep>_cukf.ukf_set_state.argtypes=[POINTER(_State)]<line_sep>_cukf.ukf_set_state.restype=<none><line_sep>_cukf.ukf_get_state_error.argtypes=[POINTER(_StateError)]<line_sep>_cukf.ukf_get_state_error.restype=<none><line_sep>_cukf.ukf_get_innovation.argtypes=[POINTER(_Innovation)]<line_sep>_cukf.ukf_get_innovation.restype=<none><line_sep>_cukf.ukf_get_state_covariance.argtypes=[POINTER(_REAL_T<times>(_STATE_DIM<power>2))]<line_sep>_cukf.ukf_get_state_covariance.restype=<none><line_sep>_cukf.ukf_sensor_clear.argtypes=[]<line_sep>_cukf.ukf_sensor_clear.restype=<none><line_sep>_cukf.ukf_sensor_set_accelerometer.argtypes=[_REAL_T _REAL_T _REAL_T]<line_sep>_cukf.ukf_sensor_set_accelerometer.restype=<none><line_sep>_cukf.ukf_sensor_set_gyroscope.argtypes=[_REAL_T _REAL_T _REAL_T]<line_sep>_cukf.ukf_sensor_set_gyroscope.restype=<none><line_sep>_cukf.ukf_sensor_set_magnetometer.argtypes=[_REAL_T _REAL_T _REAL_T]<line_sep>_cukf.ukf_sensor_set_magnetometer.restype=<none><line_sep>_cukf.ukf_set_params.argtypes=[POINTER(_SensorParams)]<line_sep>_cukf.ukf_set_params.restype=<none><line_sep>_cukf.ukf_iterate.argtypes=[c_float]<line_sep>_cukf.ukf_iterate.restype=<none><line_sep>_cukf.ukf_set_process_noise.argtypes=[POINTER(_REAL_T<times>_STATE_DIM)]<line_sep>_cukf.ukf_set_process_noise.restype=<none><line_sep>_cukf.ukf_get_parameters.argtypes=[POINTER(_Parameters)]<line_sep>_cukf.ukf_get_parameters.restype=<none><line_sep>_cukf.ukf_get_parameters_error.argtypes=[POINTER(_Parameters)]<line_sep>_cukf.ukf_get_parameters_error.restype=<none><line_sep># Initialize the library
_cukf.ukf_init()<line_sep># Set up the state
state=_State()<line_sep>_cukf.ukf_get_state(state)<line_sep># Set up the state errors
state_error=_StateError()<line_sep>_cukf.ukf_get_state_error(state_error)<line_sep># Set up the innovation
innovation=_Innovation()<line_sep># Set up the parameters
parameters=_Parameters()<line_sep>_cukf.ukf_get_parameters(parameters)<line_sep># Set up the parameter errors
parameters_error=_Parameters()<line_sep>_cukf.ukf_get_parameters_error(parameters_error)<block_end> |
<import_stmt>arrow<import_stmt>re<import_from_stmt>..slack.resource MsgResource<import_from_stmt>..utils.data_handler DataHandler<import_from_stmt>..utils.member Member<class_stmt>BusinessCard(object)<block_start><def_stmt>__init__ self slackbot=<none><block_start>self.fname="card.json"<line_sep>self.data_handler=DataHandler()<if_stmt>slackbot<is><none><block_start>self.slackbot=SlackerAdapter()<block_end><else_stmt><block_start>self.slackbot=slackbot<block_end><block_end><def_stmt>read_holder self<block_start>card_data=self.data_handler.read_file(self.fname)<line_sep>holder_names=", ".join(card_data.get("holder" []))<line_sep>holder_names=re.sub("([A-Z])+" r"\1-" holder_names)<line_sep>self.slackbot.send_message(text=MsgResource.CARD_HOLDER(names=holder_names))<block_end><def_stmt>read_history self<block_start>card_data=self.data_handler.read_file(self.fname)<line_sep>historys="\n - ".join(card_data.get("history" [])[-5:])<line_sep>self.slackbot.send_message(text=MsgResource.CARD_HISTORY(historys=historys))<block_end><def_stmt>forward self member<block_start><if_stmt>member<is><none><block_start>self.slackbot.send_message(text=MsgResource.CARD_FORWARD_NONE)<line_sep><return><block_end><elif_stmt>len(member)<g>2<block_start>self.slackbot.send_message(text=MsgResource.CARD_FORWARD_NONE)<line_sep><return><block_end><if_stmt>len(member)<eq>2<block_start>from_name=member[0]<line_sep>to_name=member[1]<block_end><else_stmt># len(member) == 1
<block_start>member_util=Member()<line_sep>from_name=member_util.get_name(self.slackbot.user)<line_sep>to_name=member[0]<block_end><if_stmt>from_name<ne>to_name<block_start>card_data=self.data_handler.read_file(self.fname)<line_sep>holder_data=card_data.get("holder" [])<if_stmt>from_name<not><in>holder_data<block_start>self.slackbot.send_message(text=MsgResource.NOT_CARD_HOLDER(from_name=from_name))<line_sep><return><block_end>holder_data.remove(from_name)<line_sep>holder_data.append(to_name)<line_sep>history_data=card_data.get("history" [])<line_sep>history_data.append(arrow.now().format("YYYY-MM-DD HH:mm")+f": {from_name} -> {to_name}")<line_sep>card_data["holder"]=holder_data<line_sep>card_data["history"]=history_data<line_sep>self.data_handler.write_file(self.fname card_data)<block_end>self.slackbot.send_message(text=MsgResource.CARD_FORWARD(from_name=from_name to_name=to_name))<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> absolute_import<import_from_future_stmt> unicode_literals<import_stmt>responses<import_from_stmt>testing.util read_json_file<line_sep>YELP_SAN_FRANCISCO=responses.Response(method="GET" url="https://api.yelp.com/v3/businesses/yelp-san-francisco" json=read_json_file("business_lookup_yelp_san_francisco.json") status=200 )<line_sep>SACRE_COEUR_PARIS=responses.Response(method="GET" url="https://api.yelp.com/v3/businesses/basilique-du-sacré-cœur-de-montmartre-paris-3" # noqa: E501
json=read_json_file("business_lookup_sacre_coeur_paris.json") status=200 )<line_sep> |
'''
Created on Jan, 2017
@author: hugo
'''<import_from_future_stmt> absolute_import<import_stmt>multiprocessing<import_from_stmt>gensim.models Doc2Vec<class_stmt>MyDoc2Vec(object)<block_start><def_stmt>__init__ self dim hs=0 window=5 negative=5 epoches=5 dm=1 dm_concat=1<block_start>super(MyDoc2Vec self).__init__()<line_sep>self.dim=dim<line_sep>self.hs=hs<line_sep>self.window=window<line_sep>self.negative=negative<line_sep>self.epoches=epoches<line_sep>self.dm=dm<line_sep>self.dm_concat=dm_concat<block_end><def_stmt>train self corpus<block_start>self.model=Doc2Vec(min_count=1 window=self.window size=self.dim workers=multiprocessing.cpu_count() hs=self.hs negative=self.negative iter=1 dm=self.dm dm_concat=self.dm_concat)<line_sep>self.model.build_vocab(corpus())<for_stmt>each range(self.epoches)<block_start>self.model.train(corpus())<block_end><return>self<block_end><block_end><def_stmt>predict model corpus<block_start>doc_codes={}<for_stmt>doc_words,doc_name corpus()<block_start>doc_codes[doc_name[0]]=model.infer_vector(doc_words).tolist()<block_end><return>doc_codes<block_end><def_stmt>save_doc2vec model outfile<block_start>model.save(outfile)<block_end><def_stmt>load_doc2vec mod_file<block_start><return>Doc2Vec.load(mod_file)<block_end> |
<import_stmt>warnings<line_sep>warnings.filterwarnings('ignore')<import_from_stmt>autox.autox_server.model model_util<def_stmt>lgb_with_fe G_df_dict G_data_info G_hist is_train remain_time params lgb_para_dict data_name exp_name<block_start>remain_time=model_util.lgb_model(G_df_dict['BIG_FE'] G_data_info G_hist is_train remain_time exp_name params lgb_para_dict data_name)<line_sep><return>remain_time<block_end> |
"""
Forum member model admin definitions
====================================
This module defines admin classes used to populate the Django administration dashboard.
"""<import_from_stmt>django.contrib admin<import_from_stmt>machina.core.db.models get_model<import_from_stmt>machina.models.fields MarkupTextField MarkupTextFieldWidget<line_sep>ForumProfile=get_model('forum_member' 'ForumProfile')<class_stmt>ForumProfileAdmin(admin.ModelAdmin)<block_start>""" The Forum Profile model admin. """<line_sep>list_display=('id' 'user' 'posts_count' )<line_sep>list_filter=('posts_count' )<line_sep>list_display_links=('id' 'user' )<line_sep>raw_id_fields=('user' )<line_sep>search_fields=('user__username' )<line_sep>formfield_overrides={MarkupTextField:{'widget':MarkupTextFieldWidget} }<block_end>admin.site.register(ForumProfile ForumProfileAdmin)<line_sep> |
# Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>torch Tensor<import_from_stmt>compressai.ops.parametrizers NonNegativeParametrizer<line_sep>__all__=["GDN" "GDN1"]<class_stmt>GDN(nn.Module)<block_start>r"""Generalized Divisive Normalization layer.
Introduced in `"Density Modeling of Images Using a Generalized Normalization
Transformation" <https://arxiv.org/abs/1511.06281>`_,
by <NAME>, <NAME>, and <NAME>, (2016).
.. math::
y[i] = \frac{x[i]}{\sqrt{\beta[i] + \sum_j(\gamma[j, i] * x[j]^2)}}
"""<def_stmt>__init__ self in_channels:int inverse:bool=<false> beta_min:float=1e-6 gamma_init:float=0.1 <block_start>super().__init__()<line_sep>beta_min=float(beta_min)<line_sep>gamma_init=float(gamma_init)<line_sep>self.inverse=bool(inverse)<line_sep>self.beta_reparam=NonNegativeParametrizer(minimum=beta_min)<line_sep>beta=torch.ones(in_channels)<line_sep>beta=self.beta_reparam.init(beta)<line_sep>self.beta=nn.Parameter(beta)<line_sep>self.gamma_reparam=NonNegativeParametrizer()<line_sep>gamma=gamma_init<times>torch.eye(in_channels)<line_sep>gamma=self.gamma_reparam.init(gamma)<line_sep>self.gamma=nn.Parameter(gamma)<block_end><def_stmt>forward self x:Tensor<arrow>Tensor<block_start>_,C,_,_=x.size()<line_sep>beta=self.beta_reparam(self.beta)<line_sep>gamma=self.gamma_reparam(self.gamma)<line_sep>gamma=gamma.reshape(C C 1 1)<line_sep>norm=F.conv2d(x<power>2 gamma beta)<if_stmt>self.inverse<block_start>norm=torch.sqrt(norm)<block_end><else_stmt><block_start>norm=torch.rsqrt(norm)<block_end>out=x<times>norm<line_sep><return>out<block_end><block_end><class_stmt>GDN1(GDN)<block_start>r"""Simplified GDN layer.
Introduced in `"Computationally Efficient Neural Image Compression"
<http://arxiv.org/abs/1912.08771>`_, by <NAME>, <NAME>, <NAME>, and <NAME>, (2019).
.. math::
y[i] = \frac{x[i]}{\beta[i] + \sum_j(\gamma[j, i] * |x[j]|}
"""<def_stmt>forward self x:Tensor<arrow>Tensor<block_start>_,C,_,_=x.size()<line_sep>beta=self.beta_reparam(self.beta)<line_sep>gamma=self.gamma_reparam(self.gamma)<line_sep>gamma=gamma.reshape(C C 1 1)<line_sep>norm=F.conv2d(torch.abs(x) gamma beta)<if_stmt><not>self.inverse<block_start>norm=1.0/norm<block_end>out=x<times>norm<line_sep><return>out<block_end><block_end> |
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>numpy<as>np<import_from_stmt>tests.common.tensorio compare_tensor<import_from_stmt>akg.utils kernel_exec<as>utils<import_from_stmt>akg.ops.array.ascend Five2Four<import_from_stmt>akg tvm<import_from_stmt>tests.common.base get_rtol_atol<import_from_stmt>tests.common.gen_random random_gaussian<import_stmt>math<def_stmt>compute_blockdim shape<block_start>size=1<if_stmt>isinstance(shape (list tuple))<block_start><for_stmt>i shape<block_start>size=size<times>i<block_end><block_end><elif_stmt>isinstance(shape int)<block_start>size=shape<block_end><else_stmt><block_start>size=2<block_end><return>min(32 math.ceil(size/16384))<block_end><def_stmt>five2four_execute shape4d out_dtype format dtype attrs# Generate data
<block_start>op_attrs=[shape4d out_dtype format]<if_stmt>attrs<is><none><block_start>attrs={}<block_end><if_stmt>'tuning'<in>attrs.keys()<block_start>t=attrs.get("tuning" <false>)<line_sep>kernel_name=attrs.get("kernel_name" <false>)<line_sep>input,bench_mark=gen_data(shape4d dtype out_dtype format)<line_sep>shape_5d=input.shape<line_sep>mod=five2four_compile(shape_5d dtype op_attrs attrs kernel_name=kernel_name tuning=t)<if_stmt>t<block_start>output=np.full(shape4d np.nan out_dtype)<line_sep><return>mod bench_mark (input output)<block_end><else_stmt><block_start><return>mod<block_end><block_end><else_stmt><block_start>input,bench_mark=gen_data(shape4d dtype out_dtype format)<line_sep># mod launch
shape_5d=input.shape<line_sep>mod=five2four_compile(shape_5d dtype op_attrs attrs)<line_sep>output=np.full(shape4d np.nan out_dtype)<line_sep>args=[input output]<line_sep># if attrs.get("dynamic"):
# for i in range(len(shape4d) - 1, -1, -1):
# args.append(shape4d[i])
<if_stmt>attrs.get("dynamic")<block_start>args.append(shape_5d[0])<line_sep>args.append(shape_5d[1])<line_sep>args.append(shape_5d[4])<line_sep>block_dim=compute_blockdim(shape4d)<line_sep>args.append(block_dim)<block_end>output=utils.mod_launch(mod args outputs=(1 ) expect=bench_mark)<line_sep># compare result
rtol,atol=get_rtol_atol("five2four" dtype)<line_sep>compare_result=compare_tensor(output bench_mark rtol=rtol atol=atol equal_nan=<true>)<line_sep><return>input output bench_mark compare_result<block_end><block_end><def_stmt>five2four_compile shape_5d dtype op_attrs attrs kernel_name='five2four' tuning=<false><block_start><if_stmt>attrs.get("dynamic")<block_start>var_shape=[]<line_sep>shape4d,dst_type,_=op_attrs<line_sep>channel_idx=1<for_stmt>i range(len(shape_5d))<block_start><if_stmt>shape_5d[i]<eq>1<block_start>var_shape.append(shape_5d[i])<block_end><else_stmt><block_start>var_shape.append(tvm.var("I"+str(i)))<block_end><block_end>build_shape=var_shape<block_end><else_stmt><block_start>build_shape=shape_5d<block_end><return>utils.op_build_test(Five2Four [build_shape] [dtype] op_attrs kernel_name=kernel_name attrs=attrs tuning=tuning)<block_end><def_stmt>gen_data shape dtype out_dtype format<block_start>bench_mark=random_gaussian(shape miu=1 sigma=0.3).astype(dtype)<if_stmt>format<eq>'NCHW'<block_start>n,c,h,w=shape<if_stmt>c%16<ne>0<block_start>pad_input_shape=[n c h w]<line_sep>pad_c=(c+15)<floordiv>16<times>16<line_sep>pad_input_shape[1]=pad_c<line_sep>pad_input=np.zeros(pad_input_shape).astype(dtype)<line_sep>pad_input[: :c : :]=bench_mark<line_sep>new_shape=[n pad_c<floordiv>16 16 h w]<line_sep>input=pad_input.reshape(new_shape).transpose(0 1 3 4 2)<block_end><else_stmt><block_start>new_shape=[n c<floordiv>16 16 h w]<line_sep>input=bench_mark.reshape(new_shape).transpose(0 1 3 4 2)<block_end><block_end><elif_stmt>format<eq>'NHWC'<block_start>n,h,w,c=shape<if_stmt>c%16<ne>0<block_start>pad_input_shape=[n h w c]<line_sep>pad_c=(c+15)<floordiv>16<times>16<line_sep>pad_input_shape[3]=pad_c<line_sep>pad_input=np.zeros(pad_input_shape).astype(dtype)<line_sep>pad_input[: : : :c]=bench_mark<line_sep>new_shape=[n h w pad_c<floordiv>16 16]<line_sep>input=pad_input.reshape(new_shape).transpose(0 3 1 2 4)<block_end><else_stmt><block_start>new_shape=[n h w c<floordiv>16 16]<line_sep>input=bench_mark.reshape(new_shape).transpose(0 3 1 2 4)<block_end><block_end>bench_mark=bench_mark.astype(out_dtype)<line_sep><return>input bench_mark<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>pandas DataFrame IndexSlice <class_stmt>Render<block_start>params=[[12 24 36] [12 120]]<line_sep>param_names=["cols" "rows"]<def_stmt>setup self cols rows<block_start>self.df=DataFrame(np.random.randn(rows cols) columns=[f"float_{i+1}"<for>i range(cols)] index=[f"row_{i+1}"<for>i range(rows)] )<block_end><def_stmt>time_apply_render self cols rows<block_start>self._style_apply()<line_sep>self.st._render_html(<true> <true>)<block_end><def_stmt>peakmem_apply_render self cols rows<block_start>self._style_apply()<line_sep>self.st._render_html(<true> <true>)<block_end><def_stmt>time_classes_render self cols rows<block_start>self._style_classes()<line_sep>self.st._render_html(<true> <true>)<block_end><def_stmt>peakmem_classes_render self cols rows<block_start>self._style_classes()<line_sep>self.st._render_html(<true> <true>)<block_end><def_stmt>time_tooltips_render self cols rows<block_start>self._style_tooltips()<line_sep>self.st._render_html(<true> <true>)<block_end><def_stmt>peakmem_tooltips_render self cols rows<block_start>self._style_tooltips()<line_sep>self.st._render_html(<true> <true>)<block_end><def_stmt>time_format_render self cols rows<block_start>self._style_format()<line_sep>self.st._render_html(<true> <true>)<block_end><def_stmt>peakmem_format_render self cols rows<block_start>self._style_format()<line_sep>self.st._render_html(<true> <true>)<block_end><def_stmt>time_apply_format_hide_render self cols rows<block_start>self._style_apply_format_hide()<line_sep>self.st._render_html(<true> <true>)<block_end><def_stmt>peakmem_apply_format_hide_render self cols rows<block_start>self._style_apply_format_hide()<line_sep>self.st._render_html(<true> <true>)<block_end><def_stmt>_style_apply self<block_start><def_stmt>_apply_func s<block_start><return>["background-color: lightcyan"<if>s.name<eq>"row_1"<else>""<for>v s]<block_end>self.st=self.df.style.apply(_apply_func axis=1)<block_end><def_stmt>_style_classes self<block_start>classes=self.df.applymap(<lambda>v:("cls-1"<if>v<g>0<else>""))<line_sep>classes.index,classes.columns=self.df.index self.df.columns<line_sep>self.st=self.df.style.set_td_classes(classes)<block_end><def_stmt>_style_format self<block_start>ic=int(len(self.df.columns)/4<times>3)<line_sep>ir=int(len(self.df.index)/4<times>3)<line_sep># apply a formatting function
# subset is flexible but hinders vectorised solutions
self.st=self.df.style.format("{:,.3f}" subset=IndexSlice["row_1":f"row_{ir}" "float_1":f"float_{ic}"])<block_end><def_stmt>_style_apply_format_hide self<block_start>self.st=self.df.style.applymap(<lambda>v:"color: red;")<line_sep>self.st.format("{:.3f}")<line_sep>self.st.hide_index(self.st.index[1:])<line_sep>self.st.hide_columns(self.st.columns[1:])<block_end><def_stmt>_style_tooltips self<block_start>ttips=DataFrame("abc" index=self.df.index[::2] columns=self.df.columns[::2])<line_sep>self.st=self.df.style.set_tooltips(ttips)<line_sep>self.st.hide_index(self.st.index[12:])<line_sep>self.st.hide_columns(self.st.columns[12:])<block_end><block_end> |
# Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines create_rule and create_dep macros"""<def_stmt>create_rule impl attrs={} deps=[] fragments=[] remove_attrs=[] **kwargs<block_start>"""Creates a rule composed from dependencies.
Args:
impl: The implementation function of the rule, taking as parameters the
rule ctx followed by the executable function of each dependency
attrs: Dict of attributes required by the rule. These will override any
conflicting attributes specified by dependencies
deps: Dict of name->dependency mappings, with each dependency struct
created using 'create_dep'. The keys of this dict are the parameter
names received by 'impl'
fragments: List of configuration fragments required by the rule
remove_attrs: List of attributes to remove from the implementation.
**kwargs: extra args to be passed for rule creation
Returns:
The composed rule
"""<line_sep>merged_attrs=dict()<line_sep>fragments=list(fragments)<line_sep>merged_mandatory_attrs=[]<for_stmt>dep deps<block_start>merged_attrs.update(dep.attrs)<line_sep>fragments.extend(dep.fragments)<line_sep>merged_mandatory_attrs.extend(dep.mandatory_attrs)<block_end>merged_attrs.update(attrs)<for_stmt>attr remove_attrs<block_start><if_stmt>attr<in>merged_mandatory_attrs<block_start>fail("Cannot remove mandatory attribute %s"%attr)<block_end>merged_attrs.pop(attr)<block_end><return>rule(implementation=impl attrs=merged_attrs fragments=fragments **kwargs)<block_end><def_stmt>create_dep call attrs={} fragments=[] mandatory_attrs=<none><block_start>"""Combines a dependency's executable function, attributes, and fragments.
Args:
call: the executable function
attrs: dict of required rule attrs
fragments: list of required configuration fragments
mandatory_attrs: list of attributes that can't be removed later
(when not set, all attributes are mandatory)
Returns:
The struct
"""<line_sep><return>_create_dep(call attrs fragments mandatory_attrs<if>mandatory_attrs<else>attrs.keys())<block_end><def_stmt>_create_dep call attrs={} fragments=[] mandatory_attrs=[]<block_start><return>struct(call=call attrs=attrs fragments=fragments mandatory_attrs=mandatory_attrs )<block_end><def_stmt>create_composite_dep merge_func *deps<block_start>"""Creates a dependency struct from multiple dependencies
Args:
merge_func: The executable function to evaluate the dependencies.
*deps: The dependencies to compose provided as keyword args
Returns:
A dependency struct
"""<line_sep>merged_attrs=dict()<line_sep>merged_frags=[]<line_sep>merged_mandatory_attrs=[]<for_stmt>dep deps<block_start>merged_attrs.update(dep.attrs)<line_sep>merged_frags.extend(dep.fragments)<line_sep>merged_mandatory_attrs.extend(dep.mandatory_attrs)<block_end><return>_create_dep(call=merge_func attrs=merged_attrs fragments=merged_frags mandatory_attrs=merged_mandatory_attrs )<block_end> |
# TODO: type solution here
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.