content
stringlengths
0
1.55M
<import_stmt>sys os<import_stmt>torch<import_stmt>visdom<import_stmt>argparse<import_stmt>numpy<as>np<import_stmt>logging<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>torchvision.models<as>models<import_from_stmt>torch.autograd Variable<import_from_stmt>torch.utils data<import_from_stmt>tqdm tqdm<import_stmt>collections<import_from_stmt>ptsemseg.models get_model<import_from_stmt>ptsemseg.loader get_loader get_data_path<import_from_stmt>ptsemseg.metrics runningScore<import_from_stmt>ptsemseg.loss *<import_from_stmt>ptsemseg.augmentations *<def_stmt>adjust_learning_rate optimizer epoch lr decay step<block_start>"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""<line_sep>lr=lr<times>(decay<power>(epoch<floordiv>step))<for_stmt>param_group optimizer.param_groups<block_start>param_group['lr']=lr<block_end><block_end><def_stmt>train args logger# Setup Dataloader <block_start>data_loader=get_loader(args.dataset)<line_sep>data_path=get_data_path(args.dataset)<line_sep>t_loader=data_loader(data_path is_transform=<true> img_size=(args.img_cols args.img_rows))<line_sep>n_classes=t_loader.n_classes<line_sep>nw=args.batch_size<if>args.batch_size<g>1<else>0<line_sep>trainloader=data.DataLoader(t_loader batch_size=args.batch_size num_workers=nw shuffle=<true>)<line_sep># Setup Model model=get_model(args.arch n_classes)<if_stmt>args.pretrained<is><not><none><block_start>checkpoint=torch.load(args.pretrained)<line_sep>model.load_state_dict_without_classification(checkpoint['model_state'])<block_end>model=torch.nn.DataParallel(model device_ids=range(torch.cuda.device_count()))<line_sep>model.cuda()<line_sep>mom=0.99<line_sep>wd=5e-4<line_sep># Check if model has custom optimizer / loss <if_stmt>hasattr(model.module 'optimizer')<block_start>optimizer=model.module.optimizer<block_end><else_stmt><block_start>optimizer=torch.optim.SGD(model.parameters() lr=args.l_rate momentum=mom weight_decay=wd)<block_end>#0.99 5e-4 print('Params: l_rate %f, l_rate_decay: %.2f, l_rate_step: %d, batch_size: %d, mom: %.2f, wd: %f'%(args.l_rate args.l_rate_decay args.l_rate_step args.batch_size mom wd))<if_stmt>hasattr(model.module 'loss')<block_start>print('Using custom loss')<line_sep>logger.info('Using custom loss')<line_sep>loss_fn=model.module.loss<block_end><else_stmt><block_start>loss_fn=cross_entropy2d<block_end><if_stmt>args.resume<is><not><none><block_start><if_stmt>os.path.isfile(args.resume)<block_start>print("Loading model and optimizer from checkpoint '{}'".format(args.resume))<line_sep>logger.info("Loading model and optimizer from checkpoint '{}'".format(args.resume))<line_sep>checkpoint=torch.load(args.resume)<line_sep>model.load_state_dict(checkpoint['model_state'])<line_sep>optimizer.load_state_dict(checkpoint['optimizer_state'])<line_sep>print("Loaded checkpoint '{}' (epoch {})".format(args.resume checkpoint['epoch']))<line_sep>logger.info("Loaded checkpoint '{}' (epoch {})".format(args.resume checkpoint['epoch']))<block_end><else_stmt><block_start>print("No checkpoint found at '{}'".format(args.resume))<line_sep>logger.info("No checkpoint found at '{}'".format(args.resume))<block_end><block_end>best_iou=-100.0<for_stmt>epoch range(args.n_epoch)<block_start>adjust_learning_rate(optimizer epoch args.l_rate args.l_rate_decay args.l_rate_step)<line_sep>model.train()<line_sep>#if args.pretrained is not None: model.module.freeze_bn()<line_sep>avg_loss=0.<for_stmt>i,(images lidars labels) enumerate(trainloader)<block_start>images=Variable(images.cuda())<if_stmt>type(labels)<eq>list<block_start>var_labels=[]<for_stmt>ii range(len(labels))<block_start>var_labels.append(Variable(labels[ii].cuda()))<block_end><block_end><else_stmt><block_start>var_labels=Variable(labels.cuda())<block_end>lidars=Variable(lidars.cuda())<line_sep>optimizer.zero_grad()<line_sep>loss=model([images lidars labels])<line_sep>optimizer.step()<if_stmt>args.visdom<block_start>vis.line(X=torch.ones((1 1)).cpu()<times>i Y=torch.Tensor([loss.data[0]]).unsqueeze(0).cpu() win=loss_window update='append')<block_end>avg_loss<augadd>loss.detach().cpu().numpy().mean()#.data.item() #avg_loss += loss.data.item() <if_stmt>(i+1)%10<eq>0<block_start>avg_loss=avg_loss/10.<line_sep>print("Epoch [%d/%d] [%d/%d] Loss: %.4f"%(epoch+1 args.n_epoch i+1 len(trainloader) avg_loss))<line_sep>logger.info("Epoch [%d/%d] [%d/%d] Loss: %.4f"%(epoch+1 args.n_epoch i+1 len(trainloader) avg_loss))<line_sep>avg_loss=0.<block_end><block_end><if_stmt>epoch<g>0<block_start><if_stmt>(args.n_epoch<le>10<and>epoch%2<eq>1)<or>epoch%20<eq>0<block_start>logger.info('saving models to '+"{}_{}_{}.pkl".format(args.arch args.dataset epoch))<line_sep>print('saving models to '+"{}_{}_{}.pkl".format(args.arch args.dataset epoch))<line_sep>state={'epoch':epoch+1 'model_state':model.module.state_dict() 'optimizer_state':optimizer.state_dict() }<line_sep>torch.save(state "./output-model/{}_{}_{}.pkl".format(args.arch args.dataset epoch))<block_end><block_end><block_end>logger.info('saving models to '+"{}_{}_{}.pkl".format(args.arch args.dataset args.n_epoch))<line_sep>print('saving models to '+"{}_{}_{}.pkl".format(args.arch args.dataset epoch))<line_sep>state={'epoch':epoch+1 'model_state':model.module.state_dict() 'optimizer_state':optimizer.state_dict() }<line_sep>torch.save(state "./output-model/{}_{}_{}.pkl".format(args.arch args.dataset args.n_epoch))<block_end><def_stmt>setup_logging name filename=<none><block_start>FORMAT='%(levelname)s %(filename)s:%(lineno)4d: %(message)s'<line_sep># Manually clear root loggers to prevent any module that may have called # logging.basicConfig() from blocking our logging setup logging.root.handlers=[]<if_stmt>filename<is><none><block_start>logging.basicConfig(level=logging.INFO format=FORMAT stream=sys.stdout)<block_end><else_stmt><block_start>logging.basicConfig(level=logging.INFO format=FORMAT filename=filename)<block_end>logger=logging.getLogger(name)<line_sep><return>logger<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='Hyperparams')<line_sep>parser.add_argument('--arch' nargs='?' type=str default='pspnet' help='Architecture to use [\'plard, fcn8s, unet, segnet etc\']')<line_sep>parser.add_argument('--dataset' nargs='?' type=str default='mapillary' help='Dataset to use [\'kitti_road, pascal, camvid, ade20k etc\']')<line_sep>parser.add_argument('--img_rows' nargs='?' type=int default=384 help='Height of the input image')<line_sep>parser.add_argument('--img_cols' nargs='?' type=int default=1280 help='Width of the input image')<line_sep>parser.add_argument('--n_epoch' nargs='?' type=int default=5 help='# of the epochs')<line_sep>parser.add_argument('--batch_size' nargs='?' type=int default=4 help='Batch Size')<line_sep>parser.add_argument('--l_rate' nargs='?' type=float default=5e-5 help='Learning Rate')<line_sep>parser.add_argument('--l_rate_decay' nargs='?' type=float default=0.1 help='Learning Rate Decay')<line_sep>parser.add_argument('--l_rate_step' nargs='?' type=int default=1 help='Learning Rate Step')<line_sep>parser.add_argument('--feature_scale' nargs='?' type=int default=1 help='Divider for # of features to use')<line_sep>parser.add_argument('--resume' nargs='?' type=str default=<none> help='Path to previous saved model to restart from')<line_sep>parser.add_argument('--pretrained' nargs='?' type=str default=<none> help='pretriain')<line_sep>parser.add_argument('--visdom' dest='visdom' action='store_true' help='Enable visualization(s) on visdom | False by default')<line_sep>parser.add_argument('--no-visdom' dest='visdom' action='store_false' help='Disable visualization(s) on visdom | False by default')<line_sep>parser.set_defaults(visdom=<false>)<line_sep>args=parser.parse_args()<line_sep>logger=setup_logging(__name__ filename='./'+args.arch+'.out')<line_sep>train(args logger)<block_end>
<import_stmt>math<import_from_stmt>functools partial<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.nn functional<as>F<class_stmt>SwishImplementation(torch.autograd.Function)<block_start>@staticmethod<def_stmt>forward ctx i<block_start>result=i<times>torch.sigmoid(i)<line_sep>ctx.save_for_backward(i)<line_sep><return>result<block_end>@staticmethod<def_stmt>backward ctx grad_output<block_start>i=ctx.saved_variables[0]<line_sep>sigmoid_i=torch.sigmoid(i)<line_sep><return>grad_output<times>(sigmoid_i<times>(1+i<times>(1-sigmoid_i)))<block_end><block_end><class_stmt>MemoryEfficientSwish(nn.Module)<block_start><def_stmt>forward self x<block_start><return>SwishImplementation.apply(x)<block_end><block_end><def_stmt>drop_connect inputs p training<block_start>""" Drop connect. """<if_stmt><not>training<block_start><return>inputs<block_end>batch_size=inputs.shape[0]<line_sep>keep_prob=1-p<line_sep>random_tensor=keep_prob<line_sep>random_tensor<augadd>torch.rand([batch_size 1 1 1] dtype=inputs.dtype device=inputs.device)<line_sep>binary_tensor=torch.floor(random_tensor)<line_sep>output=inputs/keep_prob<times>binary_tensor<line_sep><return>output<block_end><def_stmt>get_same_padding_conv2d image_size=<none><block_start><return>partial(Conv2dStaticSamePadding image_size=image_size)<block_end><def_stmt>get_width_and_height_from_size x<block_start>""" Obtains width and height from a int or tuple """<if_stmt>isinstance(x int)<block_start><return>x x<block_end><if_stmt>isinstance(x list)<or>isinstance(x tuple)<block_start><return>x<block_end><else_stmt><block_start><raise>TypeError()<block_end><block_end><def_stmt>calculate_output_image_size input_image_size stride<block_start>""" 计算出 Conv2dSamePadding with a stride. """<if_stmt>input_image_size<is><none><block_start><return><none><block_end>image_height,image_width=get_width_and_height_from_size(input_image_size)<line_sep>stride=stride<if>isinstance(stride int)<else>stride[0]<line_sep>image_height=int(math.ceil(image_height/stride))<line_sep>image_width=int(math.ceil(image_width/stride))<line_sep><return>[image_height image_width]<block_end><class_stmt>Conv2dStaticSamePadding(nn.Conv2d)<block_start>""" 2D Convolutions like TensorFlow, for a fixed image size"""<def_stmt>__init__ self in_channels out_channels kernel_size image_size=<none> **kwargs<block_start>super().__init__(in_channels out_channels kernel_size **kwargs)<line_sep>self.stride=self.stride<if>len(self.stride)<eq>2<else>[self.stride[0]]<times>2<line_sep># Calculate padding based on image size and save it <assert_stmt>image_size<is><not><none><line_sep>ih,iw=(image_size image_size)<if>isinstance(image_size int)<else>image_size<line_sep>kh,kw=self.weight.size()[-2:]<line_sep>sh,sw=self.stride<line_sep>oh,ow=math.ceil(ih/sh) math.ceil(iw/sw)<line_sep>pad_h=max((oh-1)<times>self.stride[0]+(kh-1)<times>self.dilation[0]+1-ih 0)<line_sep>pad_w=max((ow-1)<times>self.stride[1]+(kw-1)<times>self.dilation[1]+1-iw 0)<if_stmt>pad_h<g>0<or>pad_w<g>0<block_start>self.static_padding=nn.ZeroPad2d((pad_w<floordiv>2 pad_w-pad_w<floordiv>2 pad_h<floordiv>2 pad_h-pad_h<floordiv>2))<block_end><else_stmt><block_start>self.static_padding=Identity()<block_end><block_end><def_stmt>forward self x<block_start>x=self.static_padding(x)<line_sep>x=F.conv2d(x self.weight self.bias self.stride self.padding self.dilation self.groups)<line_sep><return>x<block_end><block_end><class_stmt>Identity(nn.Module)<block_start><def_stmt>__init__ self <block_start>super(Identity self).__init__()<block_end><def_stmt>forward self input<block_start><return>input<block_end><block_end># MBConvBlock <class_stmt>MBConvBlock(nn.Module)<block_start>''' 层 ksize3*3 输入32 输出16 conv1 stride步长1 '''<def_stmt>__init__ self ksize input_filters output_filters expand_ratio=1 stride=1 image_size=224<block_start>super().__init__()<line_sep>self._bn_mom=0.1<line_sep>self._bn_eps=0.01<line_sep>self._se_ratio=0.25<line_sep>self._input_filters=input_filters<line_sep>self._output_filters=output_filters<line_sep>self._expand_ratio=expand_ratio<line_sep>self._kernel_size=ksize<line_sep>self._stride=stride<line_sep>inp=self._input_filters<line_sep>oup=self._input_filters<times>self._expand_ratio<if_stmt>self._expand_ratio<ne>1<block_start>Conv2d=get_same_padding_conv2d(image_size=image_size)<line_sep>self._expand_conv=Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self._bn0=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<block_end># Depthwise convolution k=self._kernel_size<line_sep>s=self._stride<line_sep>Conv2d=get_same_padding_conv2d(image_size=image_size)<line_sep>self._depthwise_conv=Conv2d(in_channels=oup out_channels=oup groups=oup kernel_size=k stride=s bias=<false>)<line_sep>self._bn1=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>image_size=calculate_output_image_size(image_size s)<line_sep># Squeeze and Excitation layer, if desired Conv2d=get_same_padding_conv2d(image_size=(1 1))<line_sep>num_squeezed_channels=max(1 int(self._input_filters<times>self._se_ratio))<line_sep>self._se_reduce=Conv2d(in_channels=oup out_channels=num_squeezed_channels kernel_size=1)<line_sep>self._se_expand=Conv2d(in_channels=num_squeezed_channels out_channels=oup kernel_size=1)<line_sep># Output phase final_oup=self._output_filters<line_sep>Conv2d=get_same_padding_conv2d(image_size=image_size)<line_sep>self._project_conv=Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false>)<line_sep>self._bn2=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self._swish=MemoryEfficientSwish()<block_end><def_stmt>forward self inputs drop_connect_rate=<none><block_start>""" :param inputs: input tensor :param drop_connect_rate: drop connect rate (float, between 0 and 1) :return: output of block """<line_sep># Expansion and Depthwise Convolution x=inputs<if_stmt>self._expand_ratio<ne>1<block_start>expand=self._expand_conv(inputs)<line_sep>bn0=self._bn0(expand)<line_sep>x=self._swish(bn0)<block_end>depthwise=self._depthwise_conv(x)<line_sep>bn1=self._bn1(depthwise)<line_sep>x=self._swish(bn1)<line_sep># Squeeze and Excitation x_squeezed=F.adaptive_avg_pool2d(x 1)<line_sep>x_squeezed=self._se_reduce(x_squeezed)<line_sep>x_squeezed=self._swish(x_squeezed)<line_sep>x_squeezed=self._se_expand(x_squeezed)<line_sep>x=torch.sigmoid(x_squeezed)<times>x<line_sep>x=self._bn2(self._project_conv(x))<line_sep># Skip connection and drop connect input_filters,output_filters=self._input_filters self._output_filters<if_stmt>self._stride<eq>1<and>input_filters<eq>output_filters<block_start><if_stmt>drop_connect_rate<block_start>x=drop_connect(x p=drop_connect_rate training=self.training)<block_end>x=x+inputs# skip connection <block_end><return>x<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>input=torch.randn(1 3 112 112)<line_sep>mbconv=MBConvBlock(ksize=3 input_filters=3 output_filters=3 image_size=112)<line_sep>out=mbconv(input)<line_sep>print(out.shape)<block_end>
# Code adapted from https://github.com/liucong3/camelyon17 # and https://github.com/cv-lee/Camelyon17 <import_stmt>openslide<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>os<import_stmt>csv<import_stmt>argparse<import_from_stmt>tqdm tqdm<import_from_stmt>xml.etree.ElementTree parse<import_from_stmt>PIL Image<line_sep>PATCH_LEVEL=2<line_sep>MASK_LEVEL=4<line_sep>CENTER_SIZE=32<def_stmt>_read_xml xml_path mask_level<block_start>""" Read an XML file with annotations and return coordinates of tumor and normal areas """<line_sep>xml=parse(xml_path).getroot()<line_sep>tumor_coord_list=[]<line_sep>normal_coord_list=[]<for_stmt>annotation xml.iter('Annotation')<block_start>annotation_type=annotation.get('PartOfGroup')<assert_stmt>annotation_type<in>['metastases' 'normal' 'None']<if_stmt>annotation_type<eq>'metastases'<block_start>coord_list=tumor_coord_list<block_end><elif_stmt>annotation_type<eq>'normal'<block_start>coord_list=normal_coord_list<block_end><elif_stmt>annotation_type<eq>'None'<block_start><continue><block_end><for_stmt>region_idx,region enumerate(annotation.iter('Coordinates'))<block_start><assert_stmt>region_idx<eq>0<line_sep>coords=[]<for_stmt>coord region<block_start>coords.append([round(float(coord.get('X'))/(2<power>mask_level)) round(float(coord.get('Y'))/(2<power>mask_level))])<block_end>coord_list.append(coords)<block_end><block_end><return>tumor_coord_list normal_coord_list<block_end><def_stmt>_make_masks slide_path xml_path mask_level make_map **args<block_start>''' Return a slide with annotated tumor, normal, and tissue masks using an Otsu threshold '''<line_sep>print('_make_masks(%s)'%slide_path)<line_sep>#slide loading slide=openslide.OpenSlide(slide_path)<line_sep># xml loading tumor_coord_list,normal_coord_list=_read_xml(xml_path mask_level)<if_stmt>make_map<block_start>slide_map=np.array(slide.get_thumbnail(slide.level_dimensions[mask_level]))<line_sep># draw boundary of tumor in map <for_stmt>coords tumor_coord_list<block_start>cv2.drawContours(slide_map np.array([coords]) -1 255 1)<block_end><for_stmt>coords normal_coord_list<block_start>cv2.drawContours(slide_map np.array([coords]) -1 127 1)<block_end><block_end><else_stmt><block_start>slide_map=<none><block_end># draw tumor mask # first fill up tumors, then draw normal boundaries and fill those up with 0 tumor_mask=np.zeros(slide.level_dimensions[mask_level][::-1])<for_stmt>coords tumor_coord_list<block_start>cv2.drawContours(tumor_mask np.array([coords]) -1 255 -1)<block_end><for_stmt>coords normal_coord_list<block_start>cv2.drawContours(tumor_mask np.array([coords]) -1 0 -1)<block_end># draw tissue mask slide_lv=slide.read_region((0 0) mask_level slide.level_dimensions[mask_level])<line_sep>slide_lv=cv2.cvtColor(np.array(slide_lv) cv2.COLOR_RGBA2RGB)<line_sep>slide_lv=cv2.cvtColor(slide_lv cv2.COLOR_BGR2HSV)<line_sep>slide_lv=slide_lv[: : 1]<line_sep>_,tissue_mask=cv2.threshold(slide_lv 0 255 cv2.THRESH_BINARY+cv2.THRESH_OTSU)<line_sep># check normal mask / draw normal mask normal_mask=np.array(tissue_mask).copy()<line_sep>normal_mask[tumor_mask<g>127]=0<line_sep><return>slide slide_map tumor_mask tissue_mask normal_mask<block_end><def_stmt>_write_masks mask_folder_path slide_map tumor_mask tissue_mask normal_mask **args<block_start>""" Write masks out to disk; used for sanity checking and visualization. """<line_sep>print('_write_masks')<line_sep>os.makedirs(mask_folder_path exist_ok=<true>)<line_sep>map_path=os.path.join(mask_folder_path 'map.png')<line_sep>cv2.imwrite(map_path slide_map)<line_sep>tumor_mask_path=os.path.join(mask_folder_path 'tumor_mask.png')<line_sep>cv2.imwrite(tumor_mask_path tumor_mask)# CHANGED tissue_mask_path=os.path.join(mask_folder_path 'tissue_mask.png')<line_sep>cv2.imwrite(tissue_mask_path np.array(tissue_mask))<line_sep>normal_mask_path=os.path.join(mask_folder_path 'normal_mask.png')<line_sep>cv2.imwrite(normal_mask_path normal_mask)<block_end><def_stmt>_record_patches center_size slide slide_map patch_level mask_level tumor_mask tissue_mask normal_mask tumor_threshold normal_threshold **args<block_start>""" Extract all tumor and non-tumor patches from a slide, using the given masks. """<line_sep># Patch size is 3*center_size by 3*center_size # It is in terms of pixels of the final output # So it's measured with respect to patch_level patch_size=center_size<times>3<line_sep># Extract normal, tumor patches using normal, tumor mask width,height=np.array(slide.level_dimensions[patch_level])<floordiv>center_size<line_sep>total=width<times>height<line_sep>all_cnt=0<line_sep>t_cnt=0<line_sep>n_cnt=0<line_sep>print('_record_patches(w=%d,h=%d)'%(width height))<line_sep>margin=5#3 mask_max=255<assert_stmt>mask_level<ge>patch_level<line_sep>width_mask_step=center_size<times>slide.level_dimensions[mask_level][0]/slide.level_dimensions[patch_level][0]<line_sep>height_mask_step=center_size<times>slide.level_dimensions[mask_level][1]/slide.level_dimensions[patch_level][1]<line_sep>patch_list=[]<line_sep># These mark the coordinates of the central region of the patch <for_stmt>i range(margin width-margin)<block_start><for_stmt>j range(margin height-margin)<block_start>mask_i_start=round(width_mask_step<times>i)<line_sep>mask_i_end=round(width_mask_step<times>(i+1))<line_sep>mask_j_start=round(height_mask_step<times>j)<line_sep>mask_j_end=round(height_mask_step<times>(j+1))<line_sep># Compute masks only over central region tumor_mask_avg=tumor_mask[mask_j_start:mask_j_end mask_i_start:mask_i_end].mean()<line_sep>normal_mask_avg=normal_mask[mask_j_start:mask_j_end mask_i_start:mask_i_end].mean()<line_sep>tumor_area_ratio=tumor_mask_avg/mask_max<line_sep>normal_area_ratio=normal_mask_avg/mask_max<line_sep># Extract patch coordinates # Coords correspond just to the center, not the entire patch <if_stmt>(tumor_area_ratio<g>tumor_threshold)<block_start>patch_list.append((center_size<times>i center_size<times>j 1))<line_sep>cv2.rectangle(slide_map (mask_i_start mask_j_start) (mask_i_end mask_j_end) (0 0 255) 1)<block_end><elif_stmt>(normal_area_ratio<g>normal_threshold)<block_start>patch_list.append((center_size<times>i center_size<times>j 0))<line_sep>cv2.rectangle(slide_map (mask_i_start mask_j_start) (mask_i_end mask_j_end) (255 255 0) 1)<block_end><block_end><block_end>df=pd.DataFrame(patch_list columns=['x_coord' 'y_coord' 'tumor'])<line_sep><return>df<block_end><def_stmt>generate_file patient node xml_path slide_path folder_path<block_start>args={'slide_path':slide_path 'xml_path':xml_path 'patch_level':PATCH_LEVEL 'mask_level':MASK_LEVEL 'center_size':CENTER_SIZE 'tumor_threshold':0 'normal_threshold':0.2 'mask_folder_path':folder_path 'make_map':<true>}<line_sep>args['slide'],args['slide_map'],args['tumor_mask'],args['tissue_mask'],args['normal_mask']=_make_masks(**args)<line_sep>df=_record_patches(**args)<line_sep>df['patient']=patient<line_sep>df['node']=node<line_sep>_write_masks(**args)<line_sep><return>df<block_end><def_stmt>generate_files slide_root output_root<block_start>aggregate_df=pd.DataFrame(columns=['patient' 'node' 'x_coord' 'y_coord' 'tumor'])<for_stmt>root,dirs,files os.walk(os.path.join(slide_root 'lesion_annotations'))<block_start><for_stmt>file files<block_start><if_stmt>file.endswith('.xml')<and><not>file.startswith('._')<block_start>prefix=file.split('.xml')[0]<try_stmt><block_start><assert_stmt>len(prefix.split('_'))<eq>4<line_sep>df=generate_file(patient=prefix.split('_')[1] node=prefix.split('_')[3] xml_path=os.path.join(root file) slide_path=os.path.join(slide_root 'tif' f'{prefix}.tif') folder_path=os.path.join(output_root 'masks' prefix))<line_sep>aggregate_df=pd.concat([aggregate_df df])<block_end><except_stmt>openslide.OpenSlideError<as>err<block_start>print(err)<line_sep><continue><block_end><block_end><block_end><block_end>aggregate_df=aggregate_df.reset_index(drop=<true>)<line_sep>aggregate_df.to_csv(os.path.join(output_root 'all_patch_coords.csv'))<line_sep><return>aggregate_df<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--slide_root' required=<true>)<line_sep>parser.add_argument('--output_root' required=<true>)<line_sep>args=parser.parse_args()<line_sep>generate_files(slide_root=args.slide_root output_root=args.output_root)<block_end>
# Copyright 2021 Rikai Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>typing Iterator<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>pyspark.sql.functions pandas_udf PandasUDFType<import_from_stmt>pyspark.sql.types StructType<def_stmt>generate_udf spec:"rikai.spark.sql.codegen.base.ModelSpec"<block_start>"""Construct a UDF to run sklearn model. Parameters ---------- spec : ModelSpec the model specifications object Returns ------- A Spark Pandas UDF. """<def_stmt>sklearn_inference_udf iter:Iterator[pd.Series] <arrow>Iterator[pd.Series]<block_start>model=spec.load_model()<for_stmt>series list(iter)<block_start>X=np.vstack(series.to_numpy())<line_sep>y=model.predict(X)<line_sep><yield>pd.Series(y)<block_end><block_end><return>pandas_udf(sklearn_inference_udf returnType=spec.schema)<block_end>
# flake8: noqa """ Copyright 2020 - Present Okta, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<line_sep># AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY # SEE CONTRIBUTOR DOCUMENTATION <import_from_stmt>okta.okta_object OktaObject<class_stmt>VerifyFactorRequest(OktaObject)<block_start>""" A class for VerifyFactorRequest objects. """<def_stmt>__init__ self config=<none><block_start>super().__init__(config)<if_stmt>config<block_start>self.activation_token=config["activationToken"]<if>"activationToken"<in>config<else><none><line_sep>self.answer=config["answer"]<if>"answer"<in>config<else><none><line_sep>self.attestation=config["attestation"]<if>"attestation"<in>config<else><none><line_sep>self.client_data=config["clientData"]<if>"clientData"<in>config<else><none><line_sep>self.next_pass_code=config["nextPassCode"]<if>"nextPassCode"<in>config<else><none><line_sep>self.pass_code=config["passCode"]<if>"passCode"<in>config<else><none><line_sep>self.registration_data=config["registrationData"]<if>"registrationData"<in>config<else><none><line_sep>self.state_token=config["stateToken"]<if>"stateToken"<in>config<else><none><block_end><else_stmt><block_start>self.activation_token=<none><line_sep>self.answer=<none><line_sep>self.attestation=<none><line_sep>self.client_data=<none><line_sep>self.next_pass_code=<none><line_sep>self.pass_code=<none><line_sep>self.registration_data=<none><line_sep>self.state_token=<none><block_end><block_end><def_stmt>request_format self<block_start>parent_req_format=super().request_format()<line_sep>current_obj_format={"activationToken":self.activation_token "answer":self.answer "attestation":self.attestation "clientData":self.client_data "nextPassCode":self.next_pass_code "passCode":self.pass_code "registrationData":self.registration_data "stateToken":self.state_token}<line_sep>parent_req_format.update(current_obj_format)<line_sep><return>parent_req_format<block_end><block_end>
# -*- coding: utf-8 -*- """ PyLayers GUI .. autommodule:: :members: To run this code. type python PylayersGui.py """<import_from_stmt>pylayers.simul.link *<import_stmt>pylayers.util.pyutil<as>pyu<import_stmt>pylayers.signal.standard<as>std<import_from_stmt>pylayers.util.project *<import_stmt>json<line_sep># TEST <import_stmt>matplotlib<line_sep>matplotlib.use('Qt4Agg')<import_from_stmt>matplotlib.backends.backend_qt4agg FigureCanvasQTAgg<as>FigureCanvas<import_from_stmt>matplotlib.backends.backend_qt4agg NavigationToolbar2QT<import_from_stmt>matplotlib.figure Figure<import_from_stmt>pyface.qt QtGui QtCore<import_from_stmt>traitsui.qt4.editor Editor<import_from_stmt>traitsui.qt4.basic_editor_factory BasicEditorFactory<line_sep># console ipython <import_from_stmt>IPython embed_kernel<import_from_stmt>traits.api HasTraits Button Range Enum Instance on_trait_change property_depends_on Float Str Int Bool List<import_from_stmt>traitsui.api View Item HSplit VSplit RangeEditor EnumEditor Group spring HGroup VGroup Handler InstanceEditor<import_from_stmt>traitsui.menu Action ActionGroup Menu MenuBar ToolBar<import_from_stmt>mayavi.core.api PipelineBase<import_from_stmt>mayavi.core.ui.api MayaviScene SceneEditor MlabSceneModel<import_from_stmt>tvtk.pyface.api Scene<try_stmt><block_start>get_ipython<block_end><except_stmt>NameError<block_start>banner=exit_msg=''<block_end><else_stmt><block_start>banner='*** Nested interpreter ***'<line_sep>exit_msg='*** Back in main IPython ***'<block_end># First import the embed function <import_from_stmt>IPython.frontend.terminal.embed InteractiveShellEmbed<line_sep>## INIT DLink object DL=DLink()<line_sep>filename=pyu.getlong('wstd.json' pstruc['DIRSIMUL'])<line_sep>fp=open(filename)<line_sep>stds=json.load(fp)<line_sep>av_wstds=['None']+list(stds.keys())<line_sep>dchann={w:[str(i)<for>i std.Wstandard(w).chan.keys()]<for>w av_wstds<if>w<ne>'None'}<line_sep>dchann.update({'None':['None']})<import_from_stmt>qtconsole.rich_ipython_widget RichJupyterWidget<import_from_stmt>qtconsole.inprocess QtInProcessKernelManager<import_from_stmt>IPython.lib guisupport<class_stmt>QIPythonWidget(RichJupyterWidget)<block_start>""" Convenience class for a live IPython console widget. We can replace the standard banner using the customBanner argument"""<def_stmt>__init__ self customBanner=<none> *args **kwargs<block_start><if_stmt><not>customBanner<is><none><block_start>self.banner=customBanner<block_end>super(QIPythonWidget self).__init__(*args **kwargs)<line_sep>self.kernel_manager=kernel_manager=QtInProcessKernelManager()<line_sep>kernel_manager.start_kernel()<line_sep>kernel_manager.kernel.gui='qt4'<line_sep>self.kernel_client=kernel_client=self._kernel_manager.client()<line_sep>kernel_client.start_channels()<def_stmt>stop <block_start>kernel_client.stop_channels()<line_sep>kernel_manager.shutdown_kernel()<line_sep>guisupport.get_app_qt4().exit()<block_end>self.exit_requested.connect(stop)<block_end><def_stmt>pushVariables self variableDict<block_start>""" Given a dictionary containing name / value pairs, push those variables to the IPython console widget """<line_sep>self.kernel_manager.kernel.shell.push(variableDict)<block_end><def_stmt>clearTerminal self<block_start>""" Clears the terminal """<line_sep>self._control.clear()<block_end><def_stmt>printText self text<block_start>""" Prints some plain text to the console """<line_sep>self._append_plain_text(text)<block_end><def_stmt>executeCommand self command<block_start>""" Execute a command in the frame of the console widget """<line_sep>self._execute(command <false>)<block_end><block_end><class_stmt>JupyterWidget(QtGui.QWidget)<block_start>""" Main GUI Widget including a button and IPython Console widget inside vertical layout """<def_stmt>__init__ self parent=<none><block_start>super(JupyterWidget self).__init__(parent)<line_sep>layout=QtGui.QVBoxLayout(self)<line_sep>ipyConsole=QIPythonWidget()<line_sep>layout.addWidget(ipyConsole)<line_sep># ipyConsole.pushVariables({'DL':DL}) allvar=globals()<line_sep>allvar.update(locals())<line_sep>ipyConsole.pushVariables(allvar)<block_end><block_end><class_stmt>_MPLFigureEditor(Editor)<block_start>scrollable=<true><def_stmt>init self parent<block_start>self.control=self._create_canvas(parent)<line_sep>self.set_tooltip()<block_end><def_stmt>update_editor self<block_start><pass><block_end><def_stmt>_create_canvas self parent<block_start>""" Create the MPL canvas. """<line_sep># matplotlib commands to create a canvas frame=QtGui.QWidget()<line_sep>mpl_canvas=FigureCanvas(self.value)<line_sep>mpl_toolbar=NavigationToolbar2QT(parent=frame canvas=mpl_canvas)<line_sep>vbox=QtGui.QVBoxLayout()<line_sep>vbox.addWidget(mpl_canvas)<line_sep>vbox.addWidget(mpl_toolbar)<line_sep>frame.setLayout(vbox)<line_sep>mpl_canvas.setFocusPolicy(QtCore.Qt.ClickFocus)<line_sep>mpl_canvas.setFocus()<line_sep><return>frame<block_end><block_end>#mpl_canvas <class_stmt>MPLFigureEditor(BasicEditorFactory)<block_start>klass=_MPLFigureEditor<block_end><class_stmt>WstdHandler(Handler)<block_start>channels=List(Str)<def_stmt>object_Wstd_Enum_changed self info<block_start>""" This method listens for a change in the *state* attribute of the object (Address) being viewed. When this listener method is called, *info.object* is a reference to the viewed object (Address). """<line_sep># Change the list of available cities self.channels=dchann[info.object.Wstd_Enum]<line_sep># As default value, use the first city in the list: info.object.chann=self.channels[0]<line_sep># info.object.DL.fGHz = <block_end><block_end><class_stmt>PylayersGUI(HasTraits)# slider/dropdown widgets etc # Layout <block_start>laynames=['']+np.sort(os.listdir(basename+'/struc/lay/')).tolist()#['','DLR.lay','defstr.lay','TC2_METIS.lay']#, Lay_Enum=Enum(laynames)<line_sep>## Antenna file : av_ant=['Omni' 'Gauss' 'aperture']<line_sep>antext=['vsh3' 'sh3']<for_stmt>fname os.listdir(basename+'/ant')<block_start><if_stmt>fname.split('.')[-1]<in>antext<block_start>av_ant.append(fname)<block_end><block_end># Init Positions xmin=DL.L.ax[0]<line_sep>xmax=DL.L.ax[1]<line_sep>ymin=DL.L.ax[2]<line_sep>ymax=DL.L.ax[3]<line_sep>zmin=0.<line_sep>zmax=DL.L.maxheight-0.1<line_sep># Antenna ## position a aX=Range(low='xmin' high='xmax' value=float(xmin+xmax/2.))<line_sep>aY=Range(low='ymin' high='ymax' value=float(ymin+ymax/2.))<line_sep>aZ=Range(low='zmin' high='zmax' value=float(zmin+zmax/2.))<line_sep>## rotation a agamma=Range(float(-3.14) float(3.14) 0. )#mode='spinner') abeta=Range(float(-3.14) float(3.14) 0. )#mode='spinner') aalpha=Range(float(-3.14) float(3.14) 0. )#mode='spinner') ## file a: a_ant=Enum(av_ant)<line_sep># Antenna B ## position b bX=Range(low='xmin' high='xmax' value=float(xmin+xmax/2.))<line_sep>bY=Range(low='ymin' high='ymax' value=float(ymin+ymax/2.))<line_sep>bZ=Range(low='zmin' high='zmax' value=float(zmin+zmax/2.))<line_sep>## rotation b bgamma=Range(float(-3.14) float(3.14) 0. )#mode='spinner') bbeta=Range(float(-3.14) float(3.14) 0. )#mode='spinner') balpha=Range(float(-3.14) float(3.14) 0. )#mode='spinner') ## file b: b_ant=Enum(av_ant)<line_sep># frequency fmmin=0.<line_sep>fmmax=300.<line_sep>fmin=Range(low='fmmin' high='fmmax' value=float(DL.Aa.fGHz[0]))<line_sep>fmax=Range(low='fmmin' high='fmmax' value=float(DL.Aa.fGHz[-1]))<line_sep>fstep=Range(low=0 high=10 value=0)<line_sep># advanced # init interface scene=Instance(MlabSceneModel ())<line_sep>plot=Instance(PipelineBase)<line_sep># @on_trait_change('scene.activated') # def init_plot(self): # DL._show3() # When the scene is activated, or when the parameters are changed, we # update the plot. # def _open_changed(self): # """ Handles the user clicking the 'Open...' button. # """ # path = pyu.getlong('',pstruc['DIRSTR']) # file_name = open_file(file_name= path ,extensions = FileInfo()) # if file_name != '': # self.file_name = file_name @on_trait_change('Lay_Enum')<def_stmt>update_L self<block_start><if_stmt>self.Lay_Enum<ne>' '<block_start>mlab.clf()<line_sep>DL.L=Layout(self.Lay_Enum bgraphs=<true>)<line_sep>self.xmin=DL.L.ax[0]<line_sep>self.xmax=DL.L.ax[1]<line_sep>self.ymin=DL.L.ax[2]<line_sep>self.ymax=DL.L.ax[3]<line_sep>self.zmin=0.<line_sep>self.zmax=DL.L.maxheight-0.1<line_sep>self.aX,self.aY,self.aZ=DL.a<line_sep>self.bX,self.bY,self.bZ=DL.b<line_sep>DL.a=np.array([self.aX self.aY self.aZ])<line_sep>DL.b=np.array([self.bX self.bY self.bZ])<line_sep>self.cutoff=DL.cutoff<if_stmt><not>hasattr(DL '_maya_fig')<block_start>DL._show3()<block_end><block_end><block_end>@on_trait_change('cutoff,threshold')<def_stmt>update_cutoff_threshold self<block_start>""" update position ant a """<line_sep>DL.cutoff=self.cutoff<line_sep>DL.threshold=self.threshold/100.<block_end>@on_trait_change('aX,aY,aZ')<def_stmt>update_a self<block_start>""" update position ant a """<line_sep>self.clear_fig()<line_sep>DL.a=np.array([self.aX self.aY self.aZ])<line_sep>self.cutoff=DL.cutoff<block_end>@on_trait_change('bX,bY,bZ')<def_stmt>update_b self<block_start>""" update position ant b """<line_sep>self.clear_fig()<line_sep>DL.b=np.array([self.bX self.bY self.bZ])<line_sep>self.cutoff=DL.cutoff<block_end>@on_trait_change('aalpha,abeta,agamma')<def_stmt>update_Ta self<block_start>""" update rot ant a """<line_sep>T=geu.MEulerAngle(self.aalpha beta=self.abeta gamma=self.agamma)<line_sep>DL.Ta=T<line_sep>self.clear_fig()<line_sep># if DL.dexist['Ct']['exist']: # DL.C.locbas(Tt=DL.Ta, Tr=DL.Tb) # #T channel # DL.H = DL.C.prop2tran(a=DL.Aa,b=DL.Ab,Friis=True) # self.plt_all() <block_end>@on_trait_change('balpha,bbeta,bgamma')<def_stmt>update_Tb self<block_start>""" update rot ant b """<line_sep>T=geu.MEulerAngle(self.balpha beta=self.bbeta gamma=self.bgamma)<line_sep>DL.Tb=T<line_sep>self.clear_fig()<block_end>@on_trait_change('a_ant,fmin,fmax,fstep')<def_stmt>update_Aa self<block_start>DL.Aa=Antenna(self.a_ant)<line_sep>self.clear_fig()<line_sep># if DL.Aa.fromfile: # self.fmin=DL.Aa.fGHz[0] # self.fmax=DL.Aa.fGHz[-1] # self.fstep=min(1,DL.Aa.fGHz[1]-DL.Aa.fGHz[0]) <block_end>@on_trait_change('b_ant,fmin,fmax,fstep')<def_stmt>update_Ab self<block_start>DL.Ab=Antenna(self.b_ant)<line_sep>self.clear_fig()<line_sep># if DL.Ab.fromfile: # self.fmin=DL.Ab.fGHz[0] # self.fmax=DL.Ab.fGHz[-1] # self.fstep=min(1,DL.Ab.fGHz[1]-DL.Ab.fGHz[0]) <block_end>@on_trait_change('fmin,fmax,fstep,chann')<def_stmt>update_fGHz self<block_start><if_stmt>self.Wstd_Enum<ne>'None'<block_start>W=std.Wstandard(self.Wstd_Enum)<line_sep># DL.fGHz = W.chan[eval(self.chann)].fghz Wchan=W.chan[eval(self.chann)]<line_sep>fcGHz=Wchan['fcGHz']<line_sep>BWGHz=Wchan['BMHz']<line_sep>GMHz=Wchan['GMHz']<line_sep>fGHz=Wchan.fghz<line_sep>DL.fGHz=np.array([fcGHz])<line_sep>self.BWGHz=BWGHz<line_sep>self.fmin=float(fGHz[0])<line_sep>self.fmax=float(fGHz[-1])<line_sep>self.fstep=float(fGHz[1]-fGHz[0])<block_end><else_stmt><block_start><if_stmt>self.fmin<l>self.fmax<block_start>DL.fGHz=np.arange(self.fmin self.fmax self.fstep)<block_end><elif_stmt>self.fmin<eq>self.fmax<block_start>DL.fGHz=np.array([self.fmin])<block_end>self.BWGHz=5<block_end><block_end>@on_trait_change('Beval')<def_stmt>DLeval self<block_start>DL.eval(verbose=<false> force=self.force cutoff=self.cutoff threshold=self.threshold/100. diffraction=self.diffraction nD=self.nD nT=self.nT nR=self.nR applywav=self.applywav)<line_sep>DL._update_show3(delrays=<true>)<line_sep>ER=np.squeeze(DL.H.energy())<line_sep>DL.R._show3(ER=ER)<line_sep>self.plt_all()<block_end><def_stmt>plt_all self<block_start>self.plt_cir()<line_sep>self.plt_doa()<line_sep>self.plt_dod()<line_sep>self.plt_dspread()<line_sep>self.plt_aspread()<block_end><def_stmt>plt_cir self<block_start>self.figcir.clf()<line_sep>ax=self.figcir.add_subplot(111)<line_sep>DL.plt_cir(fig=self.figcir ax=ax BWGHz=self.BWGHz Nf=5000)<line_sep># ir = DL.H.getcir(BWGHz=5,Nf=1000) # ir.plot(fig=self.figcir,ax=ax) # ax.plot(DL.H.taud,20*np.log10(DL.H.y[:,0,0,0]),'or') self.figcir.canvas.draw()<line_sep># DL.plt_doadod(d='doa') # DL.H.plot(fig=self.figcir,ax=ax) # self.figcir.canvas.draw() <block_end><def_stmt>plt_doa self<block_start>self.figdoa.clf()<line_sep>ax=self.figdoa.add_subplot(111 polar=<true>)<line_sep># DL.L.showG('s',ax=ax,fig=self.figure) # DL.H.plotd(d='doa',polar=True,fig=self.figdoa,ax=ax) DL.plt_doa(polar=<true> fig=self.figdoa ax=ax)<line_sep>self.figdoa.canvas.draw()<block_end><def_stmt>plt_dod self<block_start>self.figdod.clf()<line_sep>ax=self.figdod.add_subplot(111 polar=<true>)<line_sep>DL.plt_dod(polar=<true> fig=self.figdod ax=ax)<line_sep># DL.L.showG('s',ax=ax,fig=self.figure) # DL.H.plotd(d='dod',polar=True,fig=self.figdod,ax=ax) self.figdod.canvas.draw()<block_end><def_stmt>plt_dspread self<block_start>self.figds.clf()<line_sep>ax=self.figds.add_subplot(111)<line_sep>DL.plt_dspread(fig=self.figds ax=ax)<line_sep>self.figds.canvas.draw()<block_end><def_stmt>plt_aspread self<block_start>self.figas.clf()<line_sep>ax=self.figas.add_subplot(111)<line_sep>DL.plt_aspread(fig=self.figas ax=ax)<line_sep>self.figas.canvas.draw()<block_end><def_stmt>clear_fig self lf=['cir' 'doa' 'dod' 'as' 'ds']<block_start><for_stmt>f lf<block_start>eval('self.fig'+f+'.clf()')<line_sep>eval('self.fig'+f+'.canvas.draw()')<block_end><block_end>##### ##### RENDERING 3D MAYAVI ##### render3d=Item('scene' editor=SceneEditor(scene_class=Scene) height=500 width=1500 show_label=<false>)<line_sep># ### # ### Matplotlib figure # ### # figure = Instance(Figure(figsize=(8,20)), ()) ##### ##### Layout SELECTION ##### # Layout GLay=Group(Item('Lay_Enum' style='simple' label='file') show_labels=<false> label='Layout')<line_sep>##### ##### WIRELESS STANDARD ##### # wireless standard Wstd_Enum=Enum('None' av_wstds)<line_sep>chann=Str<line_sep># chann = Enum(av_chann) GWstd_None=Group(Item('fmin' label='fGHz min' style='text') Item('fmax' label='fGHz max' style='text') Item('fstep' label='fGHz step' style='text') label='Frequency' show_border=<true> enabled_when='Wstd_Enum == \'None\'')<line_sep>GWstd_std=Group(Item(name='chann' editor=EnumEditor(name='handler.channels')) label='channel' show_border=<true> enabled_when='Wstd_Enum != \'None\'')<line_sep>GWstd=Group(Group(Item(name='Wstd_Enum' label='Wireless Standard')) GWstd_None GWstd_std label='Wireless Standard' show_labels=<true> show_border=<false>)<line_sep>##### ##### ANTENNA ##### xmin=Float<line_sep>xmax=Float<line_sep>ymin=Float<line_sep>ymax=Float<line_sep>zmin=Float<line_sep>zmax=Float<line_sep># Ant A file Iax=Item('aX' editor=RangeEditor(low_name='xmin' high_name='xmax' format='%.1f' label_width=28 mode='auto') label='x')<line_sep>Iay=Item('aY' editor=RangeEditor(low_name='ymin' high_name='ymax' format='%.1f' label_width=28 mode='auto') label='y')<line_sep>Iaz=Item('aZ' editor=RangeEditor(low_name='zmin' high_name='zmax' format='%.1f' label_width=28 mode='auto') label='z')<line_sep>GPos_a=VGroup(Iax Iay Iaz id='a' label='Position' show_border=<true> show_labels=<true> layout='split')<line_sep>Ifile_a=Item('a_ant' label='file')<line_sep>GRot_a=VGroup(Item('agamma' label='x-roll') Item('abeta' label='y-roll') Item('aalpha' label='z-roll') id='Ta' label='Rotation' show_border=<true> layout='split')<line_sep>G_a=Group(Ifile_a GPos_a GRot_a label='Antenna a' show_border=<false>)<line_sep>#### ANtenna B # Ant B positions Ibx=Item('bX' editor=RangeEditor(low_name='xmin' high_name='xmax' format='%.1f' label_width=28 mode='auto') label='x')<line_sep>Iby=Item('bY' editor=RangeEditor(low_name='ymin' high_name='ymax' format='%.1f' label_width=28 mode='auto') label='y')<line_sep>Ibz=Item('bZ' editor=RangeEditor(low_name='zmin' high_name='zmax' format='%.1f' label_width=28 mode='auto') label='z')<line_sep>GPos_b=Group(Ibx Iby Ibz id='b' label='Position' show_border=<true> layout='split')<line_sep># Ant B file Ifile_b=Item('b_ant' label='file')<line_sep>GRot_b=Group(Item('bgamma' label='x-roll') Item('bbeta' label='y-roll') Item('balpha' label='z-roll') id='Tb' label='Rotation' show_border=<true> layout='split')<line_sep>G_b=Group(Ifile_b GPos_b GRot_b label='Antenna b' show_border=<false> )<line_sep>#### #### advanced CONFIRGURATION #### force=Bool<line_sep>diffraction=Bool<line_sep>applywav=Bool<line_sep>applywav=Bool<line_sep>low_cutoff=1<line_sep>high_cutoff=30<line_sep>cutoff=Range(low='low_cutoff' high='high_cutoff' value=DL.cutoff)<line_sep>threshold=Range(0 100 80)<line_sep>nD=2<line_sep>nR=10<line_sep>nT=10<line_sep>G_advanced=Group(VGroup(Item('force' label='force' resizable=<false> style='simple') Item('cutoff' label='cutoff' editor=RangeEditor(low_name='low_cutoff' high_name='high_cutoff' label_width=28 mode='auto') width=0.2 style='simple') Item('threshold' label='threshold' width=0.2 style='simple') Item('diffraction' label='diffractions' style='simple') Item('nD' label='max nb Diffractions' enabled_when='diffraction' style='simple') Item('nR' label='max nb Reflections' style='simple') Item('nT' label='max nb Transmissions' style='simple') Item('applywav' label='applywav' style='simple') label='Ray Tracing Configuration' show_labels=<true> show_border=<false>))<line_sep>#### ### MANAGING GROUPS ### # LEFT GROUP WINDOW Beval=Button('Launch Ray-Tracing')<line_sep>GLeft=Group(GLay GWstd G_advanced)<line_sep># <NAME> GAnt_ab=HGroup(spring G_a spring G_b spring)<line_sep>GAnt_Eval=Group(GAnt_ab HGroup(spring Item('Beval' enabled_when='Lay_Enum != \'\'') show_labels=<false>))<line_sep>#### TOP GROUP GR_0=HSplit(GLeft render3d layout='split')<line_sep># BOTTOM GROUP figcir=Instance(Figure(figsize=(8 20)) ())<line_sep>figdoa=Instance(Figure(figsize=(8 20)) ())<line_sep>figdod=Instance(Figure(figsize=(8 20)) ())<line_sep>figas=Instance(Figure(figsize=(8 20)) ())<line_sep>figds=Instance(Figure(figsize=(8 20)) ())<line_sep>GExploit=Group(Group(Item('figcir' editor=MPLFigureEditor() ) label='CIR') Group(Item('figdoa' editor=MPLFigureEditor()) label='DOA') Group(Item('figdod' editor=MPLFigureEditor()) label='DOD') Group(Item('figas' editor=MPLFigureEditor()) label='Ang. Spread') Group(Item('figds' editor=MPLFigureEditor()) label='Delay Spread') layout='tabbed' )<line_sep>GR_1=HGroup(spring GAnt_Eval spring GExploit)<line_sep>JWidget=JupyterWidget()<line_sep>JWidget.show()<line_sep>view=View(VGroup(GR_0 GR_1) # menubar=MenuBar(Menu_file), buttons=['Quit'] title="Pylayers GUI - beta" resizable=<true> width=1. height=1. handler=WstdHandler)<block_end><if_stmt>__name__<eq>'__main__'<block_start>gui=PylayersGUI()<line_sep>gui.configure_traits()<block_end>
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fréchet Audio Distance util functions."""<line_sep># coding=utf-8 <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>numpy<as>np<import_from_stmt>scipy linalg<import_stmt>tensorflow.compat.v1<as>tf<def_stmt>read_mean_and_covariances filename<block_start>"""Helper function that reads tf_record containing dataset stats. Args: filename: Path of the tf_record. Returns: The values of mu and sigma. """<line_sep>tf_record=tf.python_io.tf_record_iterator(filename).next()<line_sep>example=tf.train.Example().FromString(tf_record)<line_sep>mu=np.array(example.features.feature['mu'].float_list.value)<line_sep>emb_len=np.array(example.features.feature['embedding_length'].int64_list.value)[0]<line_sep>sigma=(np.array(example.features.feature['sigma'].float_list.value)).reshape((emb_len emb_len))<line_sep><return>mu sigma<block_end><def_stmt>normalize_loudness np_samples max_db_increase=20<block_start>"""Normalizes the loudness to be between -1.0 and 1.0. Args: np_samples: 1d numpy array of audio samples with shape (num_samples). max_db_increase: Maxium loudness incress. This stops very quiet audio from being distorted and avoids problems on silence where np.amax(np_samples) == 0. Returns: 1d numpy array of audio samples with shape (num_samples) where eache sample is between -1.0 and 1.0. """<line_sep>min_amplitude_ratio=10<power>(max_db_increase/-20)<line_sep><return>np_samples/np.maximum(min_amplitude_ratio np.amax(np_samples))<block_end><def_stmt>_stable_trace_sqrt_product sigma_test sigma_train eps=1e-7<block_start>"""Avoids some problems when computing the srqt of product of sigmas. Based on <NAME>'s contribution here: https://github.com/bioinf-jku/TTUR/blob/master/fid.py Args: sigma_test: Test covariance matrix. sigma_train: Train covariance matirx. eps: Small number; used to avoid singular product. Returns: The Trace of the square root of the product of the passed convariance matrices. Raises: ValueError: If the sqrt of the product of the sigmas contains complex numbers with large imaginary parts. """<line_sep># product might be almost singular sqrt_product,_=linalg.sqrtm(sigma_test.dot(sigma_train) disp=<false>)<if_stmt><not>np.isfinite(sqrt_product).all()# add eps to the diagonal to avoid a singular product. <block_start>offset=np.eye(sigma_test.shape[0])<times>eps<line_sep>sqrt_product=linalg.sqrtm((sigma_test+offset).dot(sigma_train+offset))<block_end># Might have a slight imaginary component. <if_stmt><not>np.allclose(np.diagonal(sqrt_product).imag 0 atol=1e-3)<block_start><raise>ValueError('sqrt_product contains large complex numbers.')<block_end>sqrt_product=sqrt_product.real<line_sep><return>np.trace(sqrt_product)<block_end><def_stmt>frechet_distance mu_test sigma_test mu_train sigma_train<block_start>"""Fréchet distance calculation. From: <NAME> & <NAME> The Fréchet distance between multivariate normal distributions https://doi.org/10.1016/0047-259X(82)90077-X The Fréchet distance between two multivariate gaussians, `X ~ N(mu_x, sigma_x)` and `Y ~ N(mu_y, sigma_y)`, is `d^2`. d^2 = (mu_x - mu_y)^2 + Tr(sigma_x + sigma_y - 2 * sqrt(sigma_x*sigma_y)) = (mu_x - mu_y)^2 + Tr(sigma_x) + Tr(sigma_y) - 2 * Tr(sqrt(sigma_x*sigma_y))) Args: mu_test: Mean of the test multivariate gaussian. sigma_test: Covariance matrix of the test multivariate gaussians. mu_train: Mean of the test multivariate gaussian. sigma_train: Covariance matrix of the test multivariate gaussians. Returns: The Fréchet distance. Raises: ValueError: If the input arrays do not have the expect shapes. """<if_stmt>len(mu_train.shape)<ne>1<block_start><raise>ValueError('mu_train must be 1 dimensional.')<block_end><if_stmt>len(sigma_train.shape)<ne>2<block_start><raise>ValueError('sigma_train must be 2 dimensional.')<block_end><if_stmt>mu_test.shape<ne>mu_train.shape<block_start><raise>ValueError('mu_test should have the same shape as mu_train')<block_end><if_stmt>sigma_test.shape<ne>sigma_train.shape<block_start><raise>ValueError('sigma_test should have the same shape as sigma_train')<block_end>mu_diff=mu_test-mu_train<line_sep>trace_sqrt_product=_stable_trace_sqrt_product(sigma_test sigma_train)<line_sep><return>mu_diff.dot(mu_diff)+np.trace(sigma_test)+np.trace(sigma_train)-2<times>trace_sqrt_product<block_end>
"""01. Predict with pre-trained SSD models ========================================== This article shows how to play with pre-trained SSD models with only a few lines of code. First let's import some necessary libraries: """<import_from_stmt>gluoncv model_zoo data utils<import_from_stmt>matplotlib pyplot<as>plt<line_sep>###################################################################### # Load a pretrained model # ------------------------- # # Let's get an SSD model trained with 512x512 images on Pascal VOC # dataset with ResNet-50 V1 as the base model. By specifying # ``pretrained=True``, it will automatically download the model from the model # zoo if necessary. For more pretrained models, please refer to # :doc:`../../model_zoo/index`. net=model_zoo.get_model('ssd_512_resnet50_v1_voc' pretrained=<true>)<line_sep>###################################################################### # Pre-process an image # -------------------- # # Next we download an image, and pre-process with preset data transforms. Here we # specify that we resize the short edge of the image to 512 px. But you can # feed an arbitrarily sized image. # # You can provide a list of image file names, such as ``[im_fname1, im_fname2, # ...]`` to :py:func:`gluoncv.data.transforms.presets.ssd.load_test` if you # want to load multiple image together. # # This function returns two results. The first is a NDArray with shape # `(batch_size, RGB_channels, height, width)`. It can be fed into the # model directly. The second one contains the images in numpy format to # easy to be plotted. Since we only loaded a single image, the first dimension # of `x` is 1. im_fname=utils.download('https://github.com/dmlc/web-data/blob/master/'+'gluoncv/detection/street_small.jpg?raw=true' path='street_small.jpg')<line_sep>x,img=data.transforms.presets.ssd.load_test(im_fname short=512)<line_sep>print('Shape of pre-processed image:' x.shape)<line_sep>###################################################################### # Inference and display # --------------------- # # The forward function will return all detected bounding boxes, and the # corresponding predicted class IDs and confidence scores. Their shapes are # `(batch_size, num_bboxes, 1)`, `(batch_size, num_bboxes, 1)`, and # `(batch_size, num_bboxes, 4)`, respectively. # # We can use :py:func:`gluoncv.utils.viz.plot_bbox` to visualize the # results. We slice the results for the first image and feed them into `plot_bbox`: class_IDs,scores,bounding_boxes=net(x)<line_sep>ax=utils.viz.plot_bbox(img bounding_boxes[0] scores[0] class_IDs[0] class_names=net.classes)<line_sep>plt.show()<line_sep>
<import_stmt>pytest<import_stmt>cocos.numerics<as>cn<import_from_stmt>cocos.tests.test_numerics.test_statistics.utilities perform_ks_test<line_sep>n_kolmogorov_smirnov=1500000<line_sep>test_data=[(1 2 n_kolmogorov_smirnov) (2 2 n_kolmogorov_smirnov) (3 2 n_kolmogorov_smirnov) (5 1 n_kolmogorov_smirnov) (9 0.5 n_kolmogorov_smirnov) (7.5 1 n_kolmogorov_smirnov) (0.5 1 n_kolmogorov_smirnov)]<line_sep>@pytest.mark.parametrize("a, b, n_kolmogorov_smirnov" test_data)<def_stmt>test_gamma_distribution a b n_kolmogorov_smirnov<block_start>u=cn.random.gamma(a b n_kolmogorov_smirnov)<line_sep>reject=perform_ks_test(u alpha=0.01 distribution='gamma' args=(a 0.0 b) verbose=<true>)<assert_stmt><not>reject<block_end>
<def_stmt>shallow_copy x<block_start><return>type(x)(x)<block_end><class_stmt>ToggleFilter(object)<block_start>""" This class provides a "sticky" filter, that works by "toggling" items of the original database on and off. """<def_stmt>__init__ self db_ref show_by_default=<true><block_start>""" Instantiate a ToggleFilter object :parameters: db_ref : iterable an iterable object (i.e. list, set etc) that would serve as the reference db of the instance. Changes in that object will affect the output of ToggleFilter instance. show_by_default: bool decide if by default all the items are "on", i.e. these items will be presented if no other toggling occurred. default value : **True** """<line_sep>self._data=db_ref<line_sep>self._toggle_db=set()<line_sep>self._filter_method=filter<line_sep>self.__set_initial_state(show_by_default)<block_end><def_stmt>reset self<block_start>""" Toggles off all the items """<line_sep>self._toggle_db=set()<block_end><def_stmt>toggle_item self item_key<block_start>""" Toggle a single item in/out. :parameters: item_key : an item the by its value the filter can decide to toggle or not. Example: int, str and so on. :return: + **True** if item toggled **into** the filtered items + **False** if item toggled **out from** the filtered items :raises: + KeyError, in case if item key is not part of the toggled list and not part of the referenced db. """<if_stmt>item_key<in>self._toggle_db<block_start>self._toggle_db.remove(item_key)<line_sep><return><false><block_end><elif_stmt>item_key<in>self._data<block_start>self._toggle_db.add(item_key)<line_sep><return><true><block_end><else_stmt><block_start><raise>KeyError("Provided item key isn't a key of the referenced data structure.")<block_end><block_end><def_stmt>toggle_items self *args<block_start>""" Toggle multiple items in/out with a single call. Each item will be ha. :parameters: args : iterable an iterable object containing all item keys to be toggled in/out :return: + **True** if all toggled items were toggled **into** the filtered items + **False** if at least one of the items was toggled **out from** the filtered items :raises: + KeyError, in case if ont of the item keys was not part of the toggled list and not part of the referenced db. """<line_sep># in python 3, 'map' returns an iterator, so wrapping with 'list' call creates same effect for both python 2 and 3 <return>all(list(map(self.toggle_item args)))<block_end><def_stmt>filter_items self<block_start>""" Filters the pointed database by showing only the items mapped at toggle_db set. :returns: Filtered data of the original object. """<line_sep><return>self._filter_method(self.__toggle_filter self._data)<block_end># private methods <def_stmt>__set_initial_state self show_by_default<block_start><try_stmt><block_start>_=(x<for>x self._data)<if_stmt>isinstance(self._data dict)<block_start>self._filter_method=ToggleFilter.dict_filter<if_stmt>show_by_default<block_start>self._toggle_db=set(self._data.keys())<block_end><return><block_end><elif_stmt>isinstance(self._data list)<block_start>self._filter_method=ToggleFilter.list_filter<block_end><elif_stmt>isinstance(self._data set)<block_start>self._filter_method=ToggleFilter.set_filter<block_end><elif_stmt>isinstance(self._data tuple)<block_start>self._filter_method=ToggleFilter.tuple_filter<block_end><if_stmt>show_by_default<block_start>self._toggle_db=set(shallow_copy(self._data))# assuming all relevant data with unique identifier <block_end><return><block_end><except_stmt>TypeError<block_start><raise>TypeError("provided data object is not iterable")<block_end><block_end><def_stmt>__toggle_filter self x<block_start><return>(x<in>self._toggle_db)<block_end># static utility methods @staticmethod<def_stmt>dict_filter function iterable<block_start><assert_stmt>isinstance(iterable dict)<line_sep><return>{k:v<for>k,v iterable.items()<if>function(k)}<block_end>@staticmethod<def_stmt>list_filter function iterable# in python 3, filter returns an iterator, so wrapping with list creates same effect for both python 2 and 3 <block_start><return>list(filter(function iterable))<block_end>@staticmethod<def_stmt>set_filter function iterable<block_start><return>{x<for>x iterable<if>function(x)}<block_end>@staticmethod<def_stmt>tuple_filter function iterable<block_start><return>tuple(filter(function iterable))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><pass><block_end>
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6370, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- <import_from_stmt>enum Enum EnumMeta<import_from_stmt>six with_metaclass<class_stmt>_CaseInsensitiveEnumMeta(EnumMeta)<block_start><def_stmt>__getitem__ self name<block_start><return>super().__getitem__(name.upper())<block_end><def_stmt>__getattr__ cls name<block_start>"""Return the enum member matching `name` We use __getattr__ instead of descriptors or inserting into the enum class' __dict__ in order to support `name` and `value` being both properties for enum members (which live in the class' __dict__) and enum members themselves. """<try_stmt><block_start><return>cls._member_map_[name.upper()]<block_end><except_stmt>KeyError<block_start><raise>AttributeError(name)<block_end><block_end><block_end><class_stmt>AbsoluteMarker(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>ALL_BACKUP="AllBackup"<line_sep>FIRST_OF_DAY="FirstOfDay"<line_sep>FIRST_OF_MONTH="FirstOfMonth"<line_sep>FIRST_OF_WEEK="FirstOfWeek"<line_sep>FIRST_OF_YEAR="FirstOfYear"<block_end><class_stmt>CreatedByType(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""The type of identity that created the resource. """<line_sep>USER="User"<line_sep>APPLICATION="Application"<line_sep>MANAGED_IDENTITY="ManagedIdentity"<line_sep>KEY="Key"<block_end><class_stmt>CurrentProtectionState(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Specifies the current protection state of the resource """<line_sep>INVALID="Invalid"<line_sep>NOT_PROTECTED="NotProtected"<line_sep>CONFIGURING_PROTECTION="ConfiguringProtection"<line_sep>PROTECTION_CONFIGURED="ProtectionConfigured"<line_sep>BACKUP_SCHEDULES_SUSPENDED="BackupSchedulesSuspended"<line_sep>RETENTION_SCHEDULES_SUSPENDED="RetentionSchedulesSuspended"<line_sep>PROTECTION_STOPPED="ProtectionStopped"<line_sep>PROTECTION_ERROR="ProtectionError"<line_sep>CONFIGURING_PROTECTION_FAILED="ConfiguringProtectionFailed"<line_sep>SOFT_DELETING="SoftDeleting"<line_sep>SOFT_DELETED="SoftDeleted"<line_sep>UPDATING_PROTECTION="UpdatingProtection"<block_end><class_stmt>DataStoreTypes(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""type of datastore; Operational/Vault/Archive """<line_sep>OPERATIONAL_STORE="OperationalStore"<line_sep>VAULT_STORE="VaultStore"<line_sep>ARCHIVE_STORE="ArchiveStore"<block_end><class_stmt>DayOfWeek(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>FRIDAY="Friday"<line_sep>MONDAY="Monday"<line_sep>SATURDAY="Saturday"<line_sep>SUNDAY="Sunday"<line_sep>THURSDAY="Thursday"<line_sep>TUESDAY="Tuesday"<line_sep>WEDNESDAY="Wednesday"<block_end><class_stmt>FeatureSupportStatus(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""feature support status """<line_sep>INVALID="Invalid"<line_sep>NOT_SUPPORTED="NotSupported"<line_sep>ALPHA_PREVIEW="AlphaPreview"<line_sep>PRIVATE_PREVIEW="PrivatePreview"<line_sep>PUBLIC_PREVIEW="PublicPreview"<line_sep>GENERALLY_AVAILABLE="GenerallyAvailable"<block_end><class_stmt>FeatureType(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""backup support feature type. """<line_sep>INVALID="Invalid"<line_sep>DATA_SOURCE_TYPE="DataSourceType"<block_end><class_stmt>Month(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>APRIL="April"<line_sep>AUGUST="August"<line_sep>DECEMBER="December"<line_sep>FEBRUARY="February"<line_sep>JANUARY="January"<line_sep>JULY="July"<line_sep>JUNE="June"<line_sep>MARCH="March"<line_sep>MAY="May"<line_sep>NOVEMBER="November"<line_sep>OCTOBER="October"<line_sep>SEPTEMBER="September"<block_end><class_stmt>ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Provisioning state of the BackupVault resource """<line_sep>FAILED="Failed"<line_sep>PROVISIONING="Provisioning"<line_sep>SUCCEEDED="Succeeded"<line_sep>UNKNOWN="Unknown"<line_sep>UPDATING="Updating"<block_end><class_stmt>RecoveryOption(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Recovery Option """<line_sep>FAIL_IF_EXISTS="FailIfExists"<block_end><class_stmt>RehydrationPriority(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Priority to be used for rehydration. Values High or Standard """<line_sep>INVALID="Invalid"<line_sep>HIGH="High"<line_sep>STANDARD="Standard"<block_end><class_stmt>RehydrationStatus(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>CREATE_IN_PROGRESS="CREATE_IN_PROGRESS"<line_sep>COMPLETED="COMPLETED"<line_sep>DELETE_IN_PROGRESS="DELETE_IN_PROGRESS"<line_sep>DELETED="DELETED"<line_sep>FAILED="FAILED"<block_end><class_stmt>ResourceMoveState(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Resource move state for backup vault """<line_sep>UNKNOWN="Unknown"<line_sep>IN_PROGRESS="InProgress"<line_sep>PREPARE_FAILED="PrepareFailed"<line_sep>COMMIT_FAILED="CommitFailed"<line_sep>FAILED="Failed"<line_sep>PREPARE_TIMEDOUT="PrepareTimedout"<line_sep>COMMIT_TIMEDOUT="CommitTimedout"<line_sep>CRITICAL_FAILURE="CriticalFailure"<line_sep>PARTIAL_SUCCESS="PartialSuccess"<line_sep>MOVE_SUCCEEDED="MoveSucceeded"<block_end><class_stmt>RestoreSourceDataStoreType(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Gets or sets the type of the source data store. """<line_sep>OPERATIONAL_STORE="OperationalStore"<line_sep>VAULT_STORE="VaultStore"<line_sep>ARCHIVE_STORE="ArchiveStore"<block_end><class_stmt>RestoreTargetLocationType(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Denotes the target location where the data will be restored, string value for the enum {Microsoft.Internal.AzureBackup.DataProtection.Common.Interface.RestoreTargetLocationType} """<line_sep>INVALID="Invalid"<line_sep>AZURE_BLOBS="AzureBlobs"<line_sep>AZURE_FILES="AzureFiles"<block_end><class_stmt>SecretStoreType(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Gets or sets the type of secret store """<line_sep>INVALID="Invalid"<line_sep>AZURE_KEY_VAULT="AzureKeyVault"<block_end><class_stmt>SourceDataStoreType(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Gets or sets the type of the source data store. """<line_sep>ARCHIVE_STORE="ArchiveStore"<line_sep>SNAPSHOT_STORE="SnapshotStore"<line_sep>VAULT_STORE="VaultStore"<block_end><class_stmt>Status(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Specifies the protection status of the resource """<line_sep>CONFIGURING_PROTECTION="ConfiguringProtection"<line_sep>CONFIGURING_PROTECTION_FAILED="ConfiguringProtectionFailed"<line_sep>PROTECTION_CONFIGURED="ProtectionConfigured"<line_sep>PROTECTION_STOPPED="ProtectionStopped"<line_sep>SOFT_DELETED="SoftDeleted"<line_sep>SOFT_DELETING="SoftDeleting"<block_end><class_stmt>StorageSettingStoreTypes(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Gets or sets the type of the datastore. """<line_sep>ARCHIVE_STORE="ArchiveStore"<line_sep>SNAPSHOT_STORE="SnapshotStore"<line_sep>VAULT_STORE="VaultStore"<block_end><class_stmt>StorageSettingTypes(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>"""Gets or sets the type. """<line_sep>GEO_REDUNDANT="GeoRedundant"<line_sep>LOCALLY_REDUNDANT="LocallyRedundant"<block_end><class_stmt>WeekNumber(with_metaclass(_CaseInsensitiveEnumMeta str Enum))<block_start>FIRST="First"<line_sep>FOURTH="Fourth"<line_sep>LAST="Last"<line_sep>SECOND="Second"<line_sep>THIRD="Third"<block_end>
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ <import_stmt>numpy<as>np<import_stmt>mindspore<import_stmt>mindspore.nn<as>nn<import_stmt>mindspore.context<as>context<import_from_stmt>mindspore Tensor<import_from_stmt>mindspore.ops operations<as>P<import_from_stmt>mindspore.ops.operations _grad_ops<as>G<line_sep>context.set_context(mode=context.GRAPH_MODE device_target="Ascend")<class_stmt>Net(nn.Cell)<block_start><def_stmt>__init__ self dim=0<block_start>super(Net self).__init__()<line_sep>self.op=P.GatherD()<line_sep>self.dim=dim<block_end><def_stmt>construct self x index<block_start><return>self.op(x self.dim index)<block_end><block_end><class_stmt>NetGrad(nn.Cell)<block_start><def_stmt>__init__ self dim=0 shape=<none><block_start>super(NetGrad self).__init__()<line_sep>self.op=G.GatherDGrad(dim shape)<block_end><def_stmt>construct self index x<block_start><return>self.op(index x)<block_end><block_end><def_stmt>test_net <block_start>x=Tensor(np.array([[772 231 508 545 615 249] [923 210 480 696 482 761] [465 904 521 824 607 669] [156 539 56 159 916 566] [122 676 714 261 19 936]]) mindspore.int32)<line_sep>index=Tensor(np.array([[0 0 0 1 1] [0 0 0 1 4] [0 0 0 1 -1] [1 1 1 0 0]]) mindspore.int32)<line_sep>dim=0<line_sep>net=Net(dim)<line_sep>out=net(x index)<line_sep>print(out.asnumpy())<line_sep>expect_out=np.array([[772 231 508 696 482] [772 231 508 696 19] [772 231 508 696 19] [923 210 480 545 615]])<assert_stmt>np.array_equal(out.asnumpy() expect_out)<block_end><def_stmt>test_net_bool <block_start>x=Tensor(np.array([[0 1 0 0 1 0] [0 1 0 0 1 0] [0 0 1 1 0 1] [1 0 1 1 0 0] [1 1 1 1 0 0]]) mindspore.bool_)<line_sep>index=Tensor(np.array([[0 0 0 1 1] [0 0 0 1 4] [0 0 0 1 -1] [1 1 1 0 0]]) mindspore.int32)<line_sep>dim=0<line_sep>net=Net(dim)<line_sep>out=net(x index)<line_sep>print(out.asnumpy())<line_sep>expect_out=np.array([[0 1 0 0 1] [0 1 0 0 0] [0 1 0 0 0] [0 1 0 0 1]]).astype(np.bool)<assert_stmt>np.array_equal(out.asnumpy() expect_out)<block_end><def_stmt>test_net_grad <block_start>index=Tensor(np.array([[0 1 2 0 0] [2 0 0 1 -1]]) mindspore.int32)<line_sep>x=Tensor(np.array([[772 231 508 615 249] [122 676 714 261 936]]) mindspore.int32)<line_sep>net=NetGrad(dim=0 shape=(3 5))<line_sep>out=net(index x)<line_sep>print(out.asnumpy())<line_sep>expect_out=np.array([[772 676 714 615 249] [0 231 0 261 0] [122 0 508 0 936]])<assert_stmt>np.array_equal(out.asnumpy() expect_out)<block_end>
<import_from_stmt>ascii_art.ascii_art ASCIIArt ASCIIPicture<line_sep># ASCII drawing picture=ASCIIArt('cat' 2).draw_ascii(curve=1)<line_sep>ASCIIPicture(picture).save('cat_scale2_draw_ascii.png')<with_stmt>open('cat_scale2_draw.txt' 'w')<as>f<block_start>f.write(''.join(picture))<block_end>picture=ASCIIArt('cat' 5).draw_ascii(curve=1)<line_sep>ASCIIPicture(picture).save('cat_scale5_draw_ascii.png')<with_stmt>open('cat_scale5_draw.txt' 'w')<as>f<block_start>f.write(''.join(picture))<block_end># Colored ASCII drawing using sorted custom character sets on a black background colored_picture=ASCIIArt('cat' 2).draw_color_ascii(ASCIIArt.sort('09215'))<line_sep>ASCIIPicture(colored_picture 'black').save('cat_scale2_color_numbers')<line_sep>colored_picture=ASCIIArt('cat' 5).draw_color_ascii(ASCIIArt.sort('09215'))<line_sep>ASCIIPicture(colored_picture 'black').save('cat_scale5_color_numbers')<line_sep>colored_picture=ASCIIArt('cat' 2).draw_color_ascii(ASCIIArt.sort('jontonsoup4'))<line_sep>ASCIIPicture(colored_picture 'black').save('cat_scale2_color_name')<line_sep>colored_picture=ASCIIArt('cat' 5).draw_color_ascii(ASCIIArt.sort('jontonsoup4'))<line_sep>ASCIIPicture(colored_picture 'black').save('cat_scale5_color_name')<line_sep># ASCII to HTML using 'kitten' as a character set on a black background html=ASCIIArt('cat' 1).draw_html(ASCIIArt.sort('kitten') background_color='black')<with_stmt>open('cat_scale1_html_kitten.html' 'w')<as>f<block_start>f.write(''.join(html))<block_end>html=ASCIIArt('cat' 2).draw_html(ASCIIArt.sort('kitten') background_color='black')<with_stmt>open('cat_scale2_html_kitten.html' 'w')<as>f<block_start>f.write(''.join(html))<block_end># ASCII to HTML using only '#' on a black background html=ASCIIArt('cat' 1).draw_html(ASCIIArt.BLOCK background_color='black')<with_stmt>open('cat_scale1_html_block.html' 'w')<as>f<block_start>f.write(''.join(html))<block_end>html=ASCIIArt('cat' 2).draw_html(ASCIIArt.BLOCK background_color='black')<with_stmt>open('cat_scale2_html_block.html' 'w')<as>f<block_start>f.write(''.join(html))<block_end># Colored ASCII with only '#' on a black background colored_picture=ASCIIArt('cat' 2).draw_color_ascii(ASCIIArt.BLOCK curve=1.5)<line_sep>ASCIIPicture(colored_picture 'black').save('cat_scale2_block_color.png')<line_sep>colored_picture=ASCIIArt('cat' 5).draw_color_ascii(ASCIIArt.BLOCK curve=1.5)<line_sep>ASCIIPicture(colored_picture 'black').save('cat_scale5_block_color.png')<line_sep># Colored ASCII with full grayscale colored_picture=ASCIIArt('cat' 2).draw_color_ascii(ASCIIArt.FULL_RANGE curve=1.5)<line_sep>ASCIIPicture(colored_picture).save('cat_scale2_full_range_color.png')<line_sep>colored_picture=ASCIIArt('cat' 5).draw_color_ascii(ASCIIArt.FULL_RANGE curve=1.5)<line_sep>ASCIIPicture(colored_picture).save('cat_scale5_full_range_color.png')<line_sep>
<class_stmt>MyDictSubclass(dict)<block_start><def_stmt>__init__ self<block_start>dict.__init__(self)<line_sep>self.var1=10<line_sep>self['in_dct']=20<block_end><def_stmt>__str__ self<block_start>ret=[]<for_stmt>key,val sorted(self.items())<block_start>ret.append('%s: %s'%(key val))<block_end>ret.append('self.var1: %s'%(self.var1 ))<line_sep><return>'{'+'; '.join(ret)+'}'<block_end>__repr__=__str__<block_end><class_stmt>MyListSubclass(list)<block_start><def_stmt>__init__ self<block_start>list.__init__(self)<line_sep>self.var1=11<line_sep>self.append('a')<line_sep>self.append('b')<block_end><def_stmt>__str__ self<block_start>ret=[]<for_stmt>obj self<block_start>ret.append(repr(obj))<block_end>ret.append('self.var1: %s'%(self.var1 ))<line_sep><return>'['+', '.join(ret)+']'<block_end>__repr__=__str__<block_end><class_stmt>MySetSubclass(set)<block_start><def_stmt>__init__ self<block_start>set.__init__(self)<line_sep>self.var1=12<line_sep>self.add('a')<block_end><def_stmt>__str__ self<block_start>ret=[]<for_stmt>obj sorted(self)<block_start>ret.append(repr(obj))<block_end>ret.append('self.var1: %s'%(self.var1 ))<line_sep><return>'set(['+', '.join(ret)+'])'<block_end>__repr__=__str__<block_end><class_stmt>MyTupleSubclass(tuple)<block_start><def_stmt>__new__ cls<block_start><return>super(MyTupleSubclass cls).__new__(cls tuple(['a' 1]))<block_end><def_stmt>__init__ self<block_start>self.var1=13<block_end><def_stmt>__str__ self<block_start>ret=[]<for_stmt>obj self<block_start>ret.append(repr(obj))<block_end>ret.append('self.var1: %s'%(self.var1 ))<line_sep><return>'tuple('+', '.join(ret)+')'<block_end>__repr__=__str__<block_end><def_stmt>Call <block_start>variable_for_test_1=MyListSubclass()<line_sep>variable_for_test_2=MySetSubclass()<line_sep>variable_for_test_3=MyDictSubclass()<line_sep>variable_for_test_4=MyTupleSubclass()<line_sep>all_vars_set=<true><block_end># Break here <if_stmt>__name__<eq>'__main__'<block_start>Call()<line_sep>print('TEST SUCEEDED!')<block_end>
""" Layout dimensions are used to give the minimum, maximum and preferred dimensions for containers and controls. """<import_from_future_stmt> unicode_literals<line_sep>__all__=('LayoutDimension' 'sum_layout_dimensions' 'max_layout_dimensions' )<class_stmt>LayoutDimension(object)<block_start>""" Specified dimension (width/height) of a user control or window. The layout engine tries to honor the preferred size. If that is not possible, because the terminal is larger or smaller, it tries to keep in between min and max. :param min: Minimum size. :param max: Maximum size. :param weight: For a VSplit/HSplit, the actual size will be determined by taking the proportion of weights from all the children. E.g. When there are two children, one width a weight of 1, and the other with a weight of 2. The second will always be twice as big as the first, if the min/max values allow it. :param preferred: Preferred size. """<def_stmt>__init__ self min=<none> max=<none> weight=1 preferred=<none><block_start><assert_stmt>isinstance(weight int)<and>weight<g>0# Cannot be a float. self.min_specified=min<is><not><none><line_sep>self.max_specified=max<is><not><none><line_sep>self.preferred_specified=preferred<is><not><none><if_stmt>min<is><none><block_start>min=0# Smallest possible value. <block_end><if_stmt>max<is><none># 0-values are allowed, so use "is None" <block_start>max=1000<power>10# Something huge. <block_end><if_stmt>preferred<is><none><block_start>preferred=min<block_end>self.min=min<line_sep>self.max=max<line_sep>self.preferred=preferred<line_sep>self.weight=weight<line_sep># Make sure that the 'preferred' size is always in the min..max range. <if_stmt>self.preferred<l>self.min<block_start>self.preferred=self.min<block_end><if_stmt>self.preferred<g>self.max<block_start>self.preferred=self.max<block_end><block_end>@classmethod<def_stmt>exact cls amount<block_start>""" Return a :class:`.LayoutDimension` with an exact size. (min, max and preferred set to ``amount``). """<line_sep><return>cls(min=amount max=amount preferred=amount)<block_end><def_stmt>__repr__ self<block_start><return>'LayoutDimension(min=%r, max=%r, preferred=%r, weight=%r)'%(self.min self.max self.preferred self.weight)<block_end><def_stmt>__add__ self other<block_start><return>sum_layout_dimensions([self other])<block_end><block_end><def_stmt>sum_layout_dimensions dimensions<block_start>""" Sum a list of :class:`.LayoutDimension` instances. """<line_sep>min=sum([d.min<for>d dimensions<if>d.min<is><not><none>])<line_sep>max=sum([d.max<for>d dimensions<if>d.max<is><not><none>])<line_sep>preferred=sum([d.preferred<for>d dimensions])<line_sep><return>LayoutDimension(min=min max=max preferred=preferred)<block_end><def_stmt>max_layout_dimensions dimensions<block_start>""" Take the maximum of a list of :class:`.LayoutDimension` instances. """<line_sep>min_=max([d.min<for>d dimensions<if>d.min<is><not><none>])<line_sep>max_=max([d.max<for>d dimensions<if>d.max<is><not><none>])<line_sep>preferred=max([d.preferred<for>d dimensions])<line_sep><return>LayoutDimension(min=min_ max=max_ preferred=preferred)<block_end>
<import_from_future_stmt> annotations# postpone evaluation of annotations <import_stmt>logging<import_from_stmt>typing Any<import_from_stmt>sqlalchemy Column inspect<import_from_stmt>sqlalchemy.orm relationship<import_from_stmt>sqlalchemy.schema ForeignKey<import_from_stmt>sqlalchemy.types Text<import_from_stmt>nuplan.database.common sql_types<import_from_stmt>nuplan.database.common.utils simple_repr<import_from_stmt>nuplan.database.nuplan_db.lidar_pc LidarPc<import_from_stmt>nuplan.database.nuplan_db.models Base<line_sep>logger=logging.getLogger()<class_stmt>ScenarioTag(Base)<block_start>""" Scenarios Tags for a scene. """<line_sep>__tablename__='scenario_tag'<line_sep>token:str=Column(sql_types.HexLen8 primary_key=<true>)<line_sep>lidar_pc_token:str=Column(sql_types.HexLen8 ForeignKey("lidar_pc.token") nullable=<false>)<line_sep>type:str=Column(Text)<line_sep>agent_track_token:str=Column(sql_types.HexLen8 ForeignKey("track.token") nullable=<false>)<line_sep>lidar_pc:LidarPc=relationship("LidarPc" foreign_keys=[lidar_pc_token] back_populates="scenario_tags")<line_sep>@property<def_stmt>_session self<arrow>Any<block_start>""" Get the underlying session. :return: The underlying session. """<line_sep><return>inspect(self).session<block_end><def_stmt>__repr__ self<arrow>str<block_start>""" Get the string representation. :return: The string representation. """<line_sep>desc:str=simple_repr(self)<line_sep><return>desc<block_end><block_end>LidarPc.scenario_tags=relationship("ScenarioTag" foreign_keys="ScenarioTag.lidar_pc_token" back_populates="lidar_pc")<line_sep>
""" @author: <NAME>, <NAME> @date: 20201015 @contact: <EMAIL> """<import_stmt>sys<line_sep>sys.path.append('.')<import_stmt>logging<line_sep>mpl_logger=logging.getLogger('matplotlib')<line_sep>mpl_logger.setLevel(logging.WARNING)<import_stmt>logging.config<line_sep>logging.config.fileConfig("config/logging.conf")<line_sep>logger=logging.getLogger('api')<import_stmt>cv2<import_from_stmt>core.image_cropper.arcface_cropper.FaceRecImageCropper FaceRecImageCropper<if_stmt>__name__<eq>'__main__'<block_start>image_path='api_usage/test_images/test1.jpg'<line_sep>image_info_file='api_usage/test_images/test1_landmark_res0.txt'<line_sep>line=open(image_info_file).readline().strip()<line_sep>landmarks_str=line.split(' ')<line_sep>landmarks=[float(num)<for>num landmarks_str]<line_sep>face_cropper=FaceRecImageCropper()<line_sep>image=cv2.imread(image_path)<line_sep>cropped_image=face_cropper.crop_image_by_mat(image landmarks)<line_sep>cv2.imwrite('api_usage/temp/test1_cropped.jpg' cropped_image)<line_sep>logger.info('Crop image successful!')<block_end>
# -*- coding: utf-8 -*- <import_stmt>pendulum<import_from_stmt>flexmock flexmock flexmock_teardown<import_from_stmt>... OratorTestCase<import_from_stmt>orator.query.builder QueryBuilder<import_from_stmt>orator.orm.builder Builder<import_from_stmt>orator.orm.model Model<import_from_stmt>orator.orm.relations HasOne<class_stmt>OrmRelationTestCase(OratorTestCase)<block_start><def_stmt>tearDown self<block_start>flexmock_teardown()<block_end><def_stmt>test_set_relation_fail self<block_start>parent=OrmRelationResetModelStub()<line_sep>relation=OrmRelationResetModelStub()<line_sep>parent.set_relation("test" relation)<line_sep>parent.set_relation("foo" "bar")<line_sep>self.assertFalse("foo"<in>parent.to_dict())<block_end><def_stmt>test_touch_method_updates_related_timestamps self<block_start>builder=flexmock(Builder get_model=<none> where=<none>)<line_sep>parent=Model()<line_sep>parent=flexmock(parent)<line_sep>parent.should_receive("get_attribute").with_args("id").and_return(1)<line_sep>related=Model()<line_sep>related=flexmock(related)<line_sep>builder.should_receive("get_model").and_return(related)<line_sep>builder.should_receive("where")<line_sep>relation=HasOne(Builder(QueryBuilder(<none> <none> <none>)) parent "foreign_key" "id")<line_sep>related.should_receive("get_table").and_return("table")<line_sep>related.should_receive("get_updated_at_column").and_return("updated_at")<line_sep>now=pendulum.now()<line_sep>related.should_receive("fresh_timestamp").and_return(now)<line_sep>builder.should_receive("update").once().with_args({"updated_at":now})<line_sep>relation.touch()<block_end><block_end><class_stmt>OrmRelationResetModelStub(Model)<block_start><def_stmt>get_query self<block_start><return>self.new_query().get_query()<block_end><block_end>
<import_from_future_stmt> absolute_import division print_function<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_stmt>torch.nn<as>nn<import_from_stmt>.layers SSIM Backproject Project<import_from_stmt>.depth_encoder DepthEncoder<import_from_stmt>.depth_decoder DepthDecoder<import_from_stmt>.pose_encoder PoseEncoder<import_from_stmt>.pose_decoder PoseDecoder<import_from_stmt>..registry MONO<line_sep>@MONO.register_module<class_stmt>Baseline(nn.Module)<block_start><def_stmt>__init__ self options<block_start>super(Baseline self).__init__()<line_sep>self.opt=options<line_sep>self.num_input_frames=len(self.opt.frame_ids)<line_sep>self.DepthEncoder=DepthEncoder(self.opt.depth_num_layers self.opt.depth_pretrained_path)<line_sep>self.DepthDecoder=DepthDecoder(self.DepthEncoder.num_ch_enc)<line_sep>self.PoseEncoder=PoseEncoder(self.opt.pose_num_layers self.opt.pose_pretrained_path num_input_images=2)<line_sep>self.PoseDecoder=PoseDecoder(self.PoseEncoder.num_ch_enc)<line_sep>self.ssim=SSIM()<line_sep>self.backproject=Backproject(self.opt.imgs_per_gpu self.opt.height self.opt.width)<line_sep>self.project_3d=Project(self.opt.imgs_per_gpu self.opt.height self.opt.width)<block_end><def_stmt>forward self inputs<block_start>outputs=self.DepthDecoder(self.DepthEncoder(inputs["color_aug" 0 0]))<if_stmt>self.training<block_start>outputs.update(self.predict_poses(inputs))<line_sep>loss_dict=self.compute_losses(inputs outputs)<line_sep><return>outputs loss_dict<block_end><return>outputs<block_end><def_stmt>robust_l1 self pred target<block_start>eps=1e-3<line_sep><return>torch.sqrt(torch.pow(target-pred 2)+eps<power>2)<block_end><def_stmt>compute_reprojection_loss self pred target<block_start>photometric_loss=self.robust_l1(pred target).mean(1 <true>)<line_sep>ssim_loss=self.ssim(pred target).mean(1 <true>)<line_sep>reprojection_loss=(0.85<times>ssim_loss+0.15<times>photometric_loss)<line_sep><return>reprojection_loss<block_end><def_stmt>compute_losses self inputs outputs<block_start>loss_dict={}<for_stmt>scale self.opt.scales<block_start>""" initialization """<line_sep>disp=outputs[("disp" 0 scale)]<line_sep>target=inputs[("color" 0 0)]<line_sep>reprojection_losses=[]<line_sep>""" reconstruction """<line_sep>outputs=self.generate_images_pred(inputs outputs scale)<line_sep>""" automask """<if_stmt>self.opt.automask<block_start><for_stmt>frame_id self.opt.frame_ids[1:]<block_start>pred=inputs[("color" frame_id 0)]<line_sep>identity_reprojection_loss=self.compute_reprojection_loss(pred target)<line_sep>identity_reprojection_loss<augadd>torch.randn(identity_reprojection_loss.shape).cuda()<times>1e-5<line_sep>reprojection_losses.append(identity_reprojection_loss)<block_end><block_end>""" minimum reconstruction loss """<for_stmt>frame_id self.opt.frame_ids[1:]<block_start>pred=outputs[("color" frame_id scale)]<line_sep>reprojection_losses.append(self.compute_reprojection_loss(pred target))<block_end>reprojection_loss=torch.cat(reprojection_losses 1)<line_sep>min_reconstruct_loss,outputs[("min_index" scale)]=torch.min(reprojection_loss dim=1)<line_sep>loss_dict[('min_reconstruct_loss' scale)]=min_reconstruct_loss.mean()/len(self.opt.scales)<line_sep>""" disp mean normalization """<if_stmt>self.opt.disp_norm<block_start>mean_disp=disp.mean(2 <true>).mean(3 <true>)<line_sep>disp=disp/(mean_disp+1e-7)<block_end>""" smooth loss """<line_sep>smooth_loss=self.get_smooth_loss(disp target)<line_sep>loss_dict[('smooth_loss' scale)]=self.opt.disparity_smoothness<times>smooth_loss/(2<power>scale)/len(self.opt.scales)<block_end><return>loss_dict<block_end><def_stmt>disp_to_depth self disp min_depth max_depth<block_start>min_disp=1/max_depth# 0.01 max_disp=1/min_depth# 10 scaled_disp=min_disp+(max_disp-min_disp)<times>disp# (10-0.01)*disp+0.01 depth=1/scaled_disp<line_sep><return>scaled_disp depth<block_end><def_stmt>predict_poses self inputs<block_start>outputs={}<line_sep>pose_feats={f_i:F.interpolate(inputs["color_aug" f_i 0] [192 640] mode="bilinear" align_corners=<false>)<for>f_i self.opt.frame_ids}<for_stmt>f_i self.opt.frame_ids[1:]<block_start><if_stmt><not>f_i<eq>"s"<block_start><if_stmt>f_i<l>0<block_start>pose_inputs=[pose_feats[f_i] pose_feats[0]]<block_end><else_stmt><block_start>pose_inputs=[pose_feats[0] pose_feats[f_i]]<block_end>pose_inputs=self.PoseEncoder(torch.cat(pose_inputs 1))<line_sep>axisangle,translation=self.PoseDecoder(pose_inputs)<line_sep>outputs[("cam_T_cam" 0 f_i)]=self.transformation_from_parameters(axisangle[: 0] translation[: 0] invert=(f_i<l>0))<block_end><block_end><return>outputs<block_end><def_stmt>generate_images_pred self inputs outputs scale<block_start>disp=outputs[("disp" 0 scale)]<line_sep>disp=F.interpolate(disp [self.opt.height self.opt.width] mode="bilinear" align_corners=<false>)<line_sep>_,depth=self.disp_to_depth(disp self.opt.min_depth self.opt.max_depth)<for_stmt>i,frame_id enumerate(self.opt.frame_ids[1:])<block_start><if_stmt>frame_id<eq>"s"<block_start>T=inputs["stereo_T"]<block_end><else_stmt><block_start>T=outputs[("cam_T_cam" 0 frame_id)]<block_end>cam_points=self.backproject(depth inputs[("inv_K")])<line_sep>pix_coords=self.project_3d(cam_points inputs[("K")] T)#[b,h,w,2] outputs[("color" frame_id scale)]=F.grid_sample(inputs[("color" frame_id 0)] pix_coords padding_mode="border")<block_end><return>outputs<block_end><def_stmt>transformation_from_parameters self axisangle translation invert=<false><block_start>R=self.rot_from_axisangle(axisangle)<line_sep>t=translation.clone()<if_stmt>invert<block_start>R=R.transpose(1 2)<line_sep>t<augmul>-1<block_end>T=self.get_translation_matrix(t)<if_stmt>invert<block_start>M=torch.matmul(R T)<block_end><else_stmt><block_start>M=torch.matmul(T R)<block_end><return>M<block_end><def_stmt>get_translation_matrix self translation_vector<block_start>T=torch.zeros(translation_vector.shape[0] 4 4).cuda()<line_sep>t=translation_vector.contiguous().view(-1 3 1)<line_sep>T[: 0 0]=1<line_sep>T[: 1 1]=1<line_sep>T[: 2 2]=1<line_sep>T[: 3 3]=1<line_sep>T[: :3 3 <none>]=t<line_sep><return>T<block_end><def_stmt>rot_from_axisangle self vec<block_start>angle=torch.norm(vec 2 2 <true>)<line_sep>axis=vec/(angle+1e-7)<line_sep>ca=torch.cos(angle)<line_sep>sa=torch.sin(angle)<line_sep>C=1-ca<line_sep>x=axis[<ellipsis> 0].unsqueeze(1)<line_sep>y=axis[<ellipsis> 1].unsqueeze(1)<line_sep>z=axis[<ellipsis> 2].unsqueeze(1)<line_sep>xs=x<times>sa<line_sep>ys=y<times>sa<line_sep>zs=z<times>sa<line_sep>xC=x<times>C<line_sep>yC=y<times>C<line_sep>zC=z<times>C<line_sep>xyC=x<times>yC<line_sep>yzC=y<times>zC<line_sep>zxC=z<times>xC<line_sep>rot=torch.zeros((vec.shape[0] 4 4)).cuda()<line_sep>rot[: 0 0]=torch.squeeze(x<times>xC+ca)<line_sep>rot[: 0 1]=torch.squeeze(xyC-zs)<line_sep>rot[: 0 2]=torch.squeeze(zxC+ys)<line_sep>rot[: 1 0]=torch.squeeze(xyC+zs)<line_sep>rot[: 1 1]=torch.squeeze(y<times>yC+ca)<line_sep>rot[: 1 2]=torch.squeeze(yzC-xs)<line_sep>rot[: 2 0]=torch.squeeze(zxC-ys)<line_sep>rot[: 2 1]=torch.squeeze(yzC+xs)<line_sep>rot[: 2 2]=torch.squeeze(z<times>zC+ca)<line_sep>rot[: 3 3]=1<line_sep><return>rot<block_end><def_stmt>get_smooth_loss self disp img<block_start>b,_,h,w=disp.size()<line_sep>a1=0.5<line_sep>a2=0.5<line_sep>img=F.interpolate(img (h w) mode='area')<line_sep>disp_dx,disp_dy=self.gradient(disp)<line_sep>img_dx,img_dy=self.gradient(img)<line_sep>disp_dxx,disp_dxy=self.gradient(disp_dx)<line_sep>disp_dyx,disp_dyy=self.gradient(disp_dy)<line_sep>img_dxx,img_dxy=self.gradient(img_dx)<line_sep>img_dyx,img_dyy=self.gradient(img_dy)<line_sep>smooth1=torch.mean(disp_dx.abs()<times>torch.exp(-a1<times>img_dx.abs().mean(1 <true>)))+torch.mean(disp_dy.abs()<times>torch.exp(-a1<times>img_dy.abs().mean(1 <true>)))<line_sep>smooth2=torch.mean(disp_dxx.abs()<times>torch.exp(-a2<times>img_dxx.abs().mean(1 <true>)))+torch.mean(disp_dxy.abs()<times>torch.exp(-a2<times>img_dxy.abs().mean(1 <true>)))+torch.mean(disp_dyx.abs()<times>torch.exp(-a2<times>img_dyx.abs().mean(1 <true>)))+torch.mean(disp_dyy.abs()<times>torch.exp(-a2<times>img_dyy.abs().mean(1 <true>)))<line_sep><return>smooth1+smooth2<block_end><def_stmt>gradient self D<block_start>D_dy=D[: : 1:]-D[: : :-1]<line_sep>D_dx=D[: : : 1:]-D[: : : :-1]<line_sep><return>D_dx D_dy<block_end><block_end>
<import_stmt>unittest<import_from_stmt>unittest.mock Mock<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>running_modes.reinforcement_learning.margin_guard MarginGuard<class_stmt>MarginGuardStoreTest(unittest.TestCase)<block_start><def_stmt>setUp self<arrow><none><block_start>self.runner=Mock()<line_sep>self.mg=MarginGuard(self.runner)<line_sep>self.agent_likelihood=torch.tensor([[1. -1.] [1. -1.]])<line_sep>self.prior_likelihood=torch.tensor([[1. -1.] [1. -1.]])<line_sep>self.augmented_likelihood=torch.tensor([[1. -1.] [1. -1.]])<line_sep>self.score=np.array([1. 2. 3])<block_end><def_stmt>_store_run self<arrow><none><block_start>self.mg.store_run_stats(self.agent_likelihood self.prior_likelihood self.augmented_likelihood self.score)<block_end><def_stmt>test_empty self<block_start>self.assertEqual(len(self.mg._run_stats) 0)<block_end><def_stmt>test_store_one self<block_start>self._store_run()<line_sep>self.assertEqual(len(self.mg._run_stats) 1)<block_end><def_stmt>test_store_two self<block_start>self._store_run()<line_sep>self._store_run()<line_sep>self.assertEqual(len(self.mg._run_stats) 2)<block_end><def_stmt>test_stats_have_all_fields self<block_start>self._store_run()<line_sep>fields={"agent_likelihood" "prior_likelihood" "augmented_likelihood" "score"}<line_sep>self.assertTrue(all(f<in>line<for>line self.mg._run_stats<for>f fields))<block_end><block_end>
# -*- coding: utf-8 -*- # # Copyright © Spyder Project Contributors # Licensed under the terms of the MIT License # """ Configuration file for Pytest NOTE: DO NOT add fixtures here. It could generate problems with QtAwesome being called before a QApplication is created. """<import_stmt>os<line_sep>os.environ['SPYDER_DEBUG']='3'<line_sep>
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the AppEngine client."""<import_stmt>unittest<import_from_stmt>googleapiclient errors<import_stmt>unittest.mock<as>mock<import_stmt>httplib2<import_stmt>google.auth<import_from_stmt>google.oauth2 credentials<import_from_stmt>tests unittest_utils<import_from_stmt>tests.common.gcp_api.test_data fake_appengine_responses<as>fae<import_from_stmt>tests.common.gcp_api.test_data http_mocks<import_from_stmt>google.cloud.forseti.common.gcp_api appengine<as>ae<import_from_stmt>google.cloud.forseti.common.gcp_api errors<as>api_errors<class_stmt>AppEngineTest(unittest_utils.ForsetiTestCase)<block_start>"""Test the AppEngine client."""<line_sep>@[email protected](google.auth 'default' return_value=(mock.Mock(spec_set=credentials.Credentials) 'test-project'))<def_stmt>setUpClass cls mock_google_credential<block_start>"""Set up."""<line_sep>fake_global_configs={'appengine':{'max_calls':18 'period':1}}<line_sep>cls.ae_api_client=ae.AppEngineClient(fake_global_configs use_rate_limiter=<false>)<block_end>@mock.patch.object(google.auth 'default' return_value=(mock.Mock(spec_set=credentials.Credentials) 'test-project'))<def_stmt>test_no_quota self mock_google_credential<block_start>"""Verify no rate limiter is used if the configuration is missing."""<line_sep>ae_api_client=ae.AppEngineClient(global_configs={})<line_sep>self.assertEqual(<none> ae_api_client.repository._rate_limiter)<block_end><def_stmt>test_is_status_not_found_404 self<block_start>response=httplib2.Response({'status':'404' 'content-type':'application/json'})<line_sep>response.reason='Not Found'<line_sep>error=errors.HttpError(response fae.APP_NOT_FOUND.encode() uri='')<line_sep>self.assertTrue(ae._is_status_not_found(error))<block_end><def_stmt>test_is_status_not_found_403 self<block_start>response=httplib2.Response({'status':'403' 'content-type':'application/json'})<line_sep>response.reason='Permission Denied'<line_sep>error=errors.HttpError(response fae.PERMISSION_DENIED.encode() uri='')<line_sep>self.assertFalse(ae._is_status_not_found(error))<block_end><def_stmt>test_get_app self<block_start>http_mocks.mock_http_response(fae.FAKE_APP_GET_RESPONSE)<line_sep>response=self.ae_api_client.get_app(fae.FAKE_PROJECT_ID)<line_sep>self.assertEqual(fae.FAKE_APP_NAME response.get('name'))<block_end><def_stmt>test_get_app_not_found self<block_start>http_mocks.mock_http_response(fae.APP_NOT_FOUND '404')<line_sep>response=self.ae_api_client.get_app(fae.FAKE_PROJECT_ID)<line_sep>self.assertEqual({} response)<block_end><def_stmt>test_get_app_raises self<block_start>http_mocks.mock_http_response(fae.PERMISSION_DENIED '403')<with_stmt>self.assertRaises(api_errors.ApiExecutionError)<block_start>self.ae_api_client.get_app(fae.FAKE_PROJECT_ID)<block_end><block_end><def_stmt>test_get_service self<block_start>http_mocks.mock_http_response(fae.GET_SERVICE_RESPONSE)<line_sep>response=self.ae_api_client.get_service(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID)<line_sep>self.assertEqual(fae.EXPECTED_SERVICE_NAMES[0] response.get('name'))<block_end><def_stmt>test_get_service_not_found self<block_start>http_mocks.mock_http_response(fae.APP_NOT_FOUND '404')<line_sep>response=self.ae_api_client.get_service(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID)<line_sep>self.assertEqual({} response)<block_end><def_stmt>test_get_service_raises self<block_start>http_mocks.mock_http_response(fae.PERMISSION_DENIED '403')<with_stmt>self.assertRaises(api_errors.ApiExecutionError)<block_start>self.ae_api_client.get_service(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID)<block_end><block_end><def_stmt>test_list_services self<block_start>http_mocks.mock_http_response(fae.LIST_SERVICES_RESPONSE)<line_sep>response=self.ae_api_client.list_services(fae.FAKE_PROJECT_ID)<line_sep>self.assertEqual(fae.EXPECTED_SERVICE_NAMES [r.get('name')<for>r response])<block_end><def_stmt>test_list_services_not_found self<block_start>http_mocks.mock_http_response(fae.APP_NOT_FOUND '404')<line_sep>response=self.ae_api_client.list_services(fae.FAKE_PROJECT_ID)<line_sep>self.assertEqual([] response)<block_end><def_stmt>test_list_services_raises self<block_start>http_mocks.mock_http_response(fae.PERMISSION_DENIED '403')<with_stmt>self.assertRaises(api_errors.ApiExecutionError)<block_start>self.ae_api_client.list_services(fae.FAKE_PROJECT_ID)<block_end><block_end><def_stmt>test_get_version self<block_start>http_mocks.mock_http_response(fae.GET_VERSION_RESPONSE)<line_sep>response=self.ae_api_client.get_version(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID fae.FAKE_VERSION_ID)<line_sep>self.assertEqual(fae.EXPECTED_VERSION_NAMES[0] response.get('name'))<block_end><def_stmt>test_get_version_not_found self<block_start>http_mocks.mock_http_response(fae.APP_NOT_FOUND '404')<line_sep>response=self.ae_api_client.get_version(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID fae.FAKE_VERSION_ID)<line_sep>self.assertEqual({} response)<block_end><def_stmt>test_get_version_raises self<block_start>http_mocks.mock_http_response(fae.PERMISSION_DENIED '403')<with_stmt>self.assertRaises(api_errors.ApiExecutionError)<block_start>self.ae_api_client.get_version(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID fae.FAKE_VERSION_ID)<block_end><block_end><def_stmt>test_list_versions self<block_start>mock_responses=[]<for_stmt>page fae.LIST_VERSIONS_RESPONSES<block_start>mock_responses.append(({'status':'200'} page))<block_end>http_mocks.mock_http_response_sequence(mock_responses)<line_sep>response=self.ae_api_client.list_versions(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID)<line_sep>self.assertEqual(fae.EXPECTED_VERSION_NAMES [r.get('name')<for>r response])<block_end><def_stmt>test_list_versions_not_found self<block_start>http_mocks.mock_http_response(fae.APP_NOT_FOUND '404')<line_sep>response=self.ae_api_client.list_versions(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID)<line_sep>self.assertEqual([] response)<block_end><def_stmt>test_list_versions_raises self<block_start>http_mocks.mock_http_response(fae.PERMISSION_DENIED '403')<with_stmt>self.assertRaises(api_errors.ApiExecutionError)<block_start>self.ae_api_client.list_versions(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID)<block_end><block_end><def_stmt>test_get_instance self<block_start>http_mocks.mock_http_response(fae.GET_INSTANCE_RESPONSE)<line_sep>response=self.ae_api_client.get_instance(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID fae.FAKE_VERSION_ID fae.FAKE_INSTANCE_ID)<line_sep>self.assertEqual(fae.EXPECTED_INSTANCE_NAMES[0] response.get('name'))<block_end><def_stmt>test_get_instance_not_found self<block_start>http_mocks.mock_http_response(fae.APP_NOT_FOUND '404')<line_sep>response=self.ae_api_client.get_instance(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID fae.FAKE_VERSION_ID fae.FAKE_INSTANCE_ID)<line_sep>self.assertEqual({} response)<block_end><def_stmt>test_get_instance_raises self<block_start>http_mocks.mock_http_response(fae.PERMISSION_DENIED '403')<with_stmt>self.assertRaises(api_errors.ApiExecutionError)<block_start>self.ae_api_client.get_instance(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID fae.FAKE_VERSION_ID fae.FAKE_INSTANCE_ID)<block_end><block_end><def_stmt>test_list_instances self<block_start>http_mocks.mock_http_response(fae.LIST_INSTANCES_RESPONSE)<line_sep>response=self.ae_api_client.list_instances(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID fae.FAKE_VERSION_ID)<line_sep>self.assertEqual(fae.EXPECTED_INSTANCE_NAMES [r.get('name')<for>r response])<block_end><def_stmt>test_list_instances_not_found self<block_start>http_mocks.mock_http_response(fae.APP_NOT_FOUND '404')<line_sep>response=self.ae_api_client.list_instances(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID fae.FAKE_VERSION_ID)<line_sep>self.assertEqual([] response)<block_end><def_stmt>test_list_instances_raises self<block_start>http_mocks.mock_http_response(fae.PERMISSION_DENIED '403')<with_stmt>self.assertRaises(api_errors.ApiExecutionError)<block_start>self.ae_api_client.list_instances(fae.FAKE_PROJECT_ID fae.FAKE_SERVICE_ID fae.FAKE_VERSION_ID)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Collaborative Filtering meetup dataset pre-processing."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os<import_from_stmt>absl app<import_from_stmt>absl flags<import_stmt>numpy<as>np<import_stmt>tensorflow.compat.v2<as>tf<import_from_stmt>hyperbolic.utils.preprocess process_dataset<import_from_stmt>hyperbolic.utils.preprocess save_as_pickle<line_sep>FLAGS=flags.FLAGS<line_sep>flags.DEFINE_string('dataset_path' default='data/meetup/' help='Path to raw dataset dir')<line_sep>flags.DEFINE_string('save_dir_path' default='data/meetup20_nrand/' help='Path to saving directory')<def_stmt>read_event_times dataset_path<block_start>"""Maps events times to a dictonary."""<line_sep>event_times={}<for_stmt>split ['train' 'test']<block_start>path=os.path.join(dataset_path 'NYC' split 'events.txt')<with_stmt>tf.gfile.Open(path 'r')<as>lines<block_start><for_stmt>line lines<block_start>line=line.strip('\n').split(' ')<line_sep>event=line[0]<line_sep>timestamp=int(line[2])<line_sep>event_times[event]=timestamp<block_end><block_end><block_end><return>event_times<block_end><def_stmt>to_np_new_ids examples<block_start>"""Creates new ids to a user-events dict. Casts new values as Numpy arrays."""<line_sep>user_id={user:i<for>i,user enumerate(examples.keys())}<line_sep>all_events=set().union(*examples.values())<line_sep>event_id={event:i<for>i,event enumerate(all_events)}<line_sep>examples_new_ids={}<for_stmt>user examples<block_start>events=[event_id[event]<for>event examples[user]]<line_sep>examples_new_ids[user_id[user]]=np.array(events)<block_end><return>examples_new_ids<block_end><def_stmt>meetup_to_dict dataset_path min_interaction=20<block_start>"""Maps raw dataset file to a Dictonary. Args: dataset_path: Path to directory so that: dataset_file/NYC/train/event_users.txt and dataset_file/NYC/test/event_users.txt both have format of event_id user_id user_id ... user_id dataset_file/NYC/train/events.txt and dataset_file/NYC/test/events.txt both have format of Event_id Venue_id Time Group_id where the format of Time is YYYYMMDDhhmmss. min_interaction: number of minimal interactions per user to filter on. Returns: Dictionary containing users as keys, and a numpy array of events the user interacted with, sorted by the time of interaction. """<line_sep># create user to event dict all_examples={}<for_stmt>split ['train' 'test']<block_start>path=os.path.join(dataset_path 'NYC' split 'event_users.txt')<with_stmt>tf.gfile.Open(path 'r')<as>lines<block_start><for_stmt>line lines<block_start>line=line.strip('\n').split(' ')<line_sep>event=line[0]<for_stmt>user line[1:]<block_start><if_stmt>user<in>all_examples<block_start>all_examples[user].append(event)<block_end><else_stmt><block_start>all_examples[user]=[event]<block_end><block_end><block_end><block_end><block_end># filter on users with enough events and sort events by time event_times=read_event_times(dataset_path)<for_stmt>user list(all_examples)<block_start><if_stmt>len(all_examples[user])<ge>min_interaction<block_start>all_examples[user]=sorted(all_examples[user] key=<lambda>event:event_times[event]<if>event<in>event_times<else>0)<block_end><else_stmt><block_start><del_stmt>all_examples[user]<block_end><block_end><return>to_np_new_ids(all_examples)<block_end><def_stmt>main _<block_start>dataset_path=FLAGS.dataset_path<line_sep>save_path=FLAGS.save_dir_path<line_sep>sorted_dict=meetup_to_dict(dataset_path)<line_sep>dataset_examples=process_dataset(sorted_dict random=<false>)<line_sep>save_as_pickle(save_path dataset_examples)<block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(main)<block_end>
<import_stmt>_sk_fail<line_sep>_sk_fail._("py_compile")<line_sep>
<import_stmt>codecs<def_stmt>search_function encoding<block_start><if_stmt>encoding<ne>'pyxl'<block_start><return><none><block_end><import_from_stmt>pyxl.codec.transform pyxl_encode pyxl_decode PyxlIncrementalDecoderInvertible PyxlIncrementalEncoder PyxlStreamReaderInvertible PyxlStreamWriter <line_sep><return>codecs.CodecInfo(name='pyxl' encode=pyxl_encode decode=<lambda>b:pyxl_decode(b invertible=<true>) incrementalencoder=PyxlIncrementalEncoder incrementaldecoder=PyxlIncrementalDecoderInvertible streamreader=PyxlStreamReaderInvertible streamwriter=PyxlStreamWriter )<block_end>codecs.register(search_function)<line_sep>
# ============================================================================== # Copyright 2018-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """nGraph TensorFlow bridge split operation test """<import_stmt>tensorflow<as>tf<line_sep>tf.compat.v1.disable_eager_execution()<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>common NgraphTest<class_stmt>TestLogSoftmaxOperations(NgraphTest)<block_start><def_stmt>test_logsoftmax self<block_start>type=np.float32<line_sep>max=np.finfo(type).max<line_sep>features=np.array([[1. 1. 1. 1.] [max 1. 2. 3.]]).astype(type)<line_sep>logsoftmax=tf.nn.log_softmax(features)<line_sep>sess_fn=<lambda>sess:sess.run([logsoftmax])<line_sep>out=self.with_ngraph(sess_fn)<assert_stmt>np.allclose(np.array([[-1.386294 -1.386294 -1.386294 -1.386294] [0 -max -max -max]]) out rtol=1.e-5 atol=1.e-5)<block_end><block_end>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. <import_stmt>os<import_stmt>unittest<import_from_stmt>typing cast<import_from_stmt>cdm.enums CdmStatusLevel CdmObjectType<import_from_stmt>cdm.objectmodel CdmCorpusDefinition CdmEntityDefinition CdmLocalEntityDeclarationDefinition CdmManifestDefinition<import_from_stmt>cdm.storage LocalAdapter<import_from_stmt>tests.common async_test TestHelper<def_stmt>IfRunTestsFlagNotSet <block_start><return>os.environ.get('SAMPLE_RUNTESTS')<is><not>'1'<block_end><class_stmt>CreateManifestTest(unittest.TestCase)<block_start>tests_subpath='Samples'<line_sep>test_name='test_create_manifest'<line_sep>@[email protected](IfRunTestsFlagNotSet() "SAMPLE_RUNTESTS environment variable not set.")<async_keyword><def_stmt>test_create_manifest self<block_start>TestHelper.delete_files_from_actual_output(TestHelper.get_actual_output_folder_path(self.tests_subpath self.test_name))<line_sep><await>self.create_manifest(self.setup_cdm_corpus())<line_sep>error_log=TestHelper.compare_folder_files_equality(TestHelper.get_expected_output_folder_path(self.tests_subpath self.test_name) TestHelper.get_actual_output_folder_path(self.tests_subpath self.test_name) <true>)<line_sep>self.assertEqual('' error_log)<block_end><def_stmt>setup_cdm_corpus self# Make a corpus, the corpus is the collection of all documents and folders created or discovered while navigating # objects and paths. <block_start>cdm_corpus=CdmCorpusDefinition()<line_sep>cdm_corpus.ctx.report_at_level=CdmStatusLevel.ERROR<line_sep>print('Configure storage adapters')<line_sep>cdm_corpus.storage.mount('local' LocalAdapter(TestHelper.get_actual_output_folder_path(self.tests_subpath self.test_name)))<line_sep># Local is our default. So any paths that start out navigating without a device tag will assume local. cdm_corpus.storage.default_namespace='local'<line_sep># Fake cdm, normally use the CDM Standards adapter. cdm_corpus.storage.mount('cdm' LocalAdapter(TestHelper.sample_schema_folder_path))<line_sep><return>cdm_corpus<block_end><async_keyword><def_stmt>create_manifest self cdm_corpus:CdmCorpusDefinition<block_start>print('Make placeholder manifest')<line_sep># Make the temp manifest and add it to the root of the local documents in the corpus. manifest_abstract=cdm_corpus.make_object(CdmObjectType.MANIFEST_DEF 'temp_abstract')<line_sep># type: CdmManifestDefinition # Add each declaration, this example is about medical appointments and care plans manifest_abstract.entities.append('Account' 'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/Account.cdm.json/Account')<line_sep>manifest_abstract.entities.append('Address' 'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/Address.cdm.json/Address')<line_sep>manifest_abstract.entities.append('CarePlan' 'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/CarePlan.cdm.json/CarePlan')<line_sep>manifest_abstract.entities.append('CodeableConcept' 'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/CodeableConcept.cdm.json/CodeableConcept')<line_sep>manifest_abstract.entities.append('Contact' 'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/Contact.cdm.json/Contact')<line_sep>manifest_abstract.entities.append('Device' 'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/Device.cdm.json/Device')<line_sep>manifest_abstract.entities.append('EmrAppointment' 'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/EmrAppointment.cdm.json/EmrAppointment')<line_sep>manifest_abstract.entities.append('Encounter' 'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/Encounter.cdm.json/Encounter')<line_sep>manifest_abstract.entities.append('EpisodeOfCare' 'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/EpisodeOfCare.cdm.json/EpisodeOfCare')<line_sep>manifest_abstract.entities.append('Location' 'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/Location.cdm.json/Location')<line_sep># Add the temp manifest to the root of the local documents in the corpus. local_root=cdm_corpus.storage.fetch_root_folder('local')<line_sep>local_root.documents.append(manifest_abstract)<line_sep># Create the resolved version of everything in the root folder too. print('Resolve the placeholder')<line_sep>manifest_resolved=<await>manifest_abstract.create_resolved_manifest_async('default' '')<line_sep># Add an import to the foundations doc so the traits about partitons will resolve nicely. manifest_resolved.imports.append('cdm:/foundations.cdm.json' '')<line_sep>print('Save the documents')<for_stmt>e_def manifest_resolved.entities# Get the entity being pointed at. <block_start>local_e_def=cast(CdmLocalEntityDeclarationDefinition e_def)<line_sep># Turns a relative path from manifest_resolved into an absolute path. ent_def=cast(CdmEntityDefinition <await>cdm_corpus.fetch_object_async(local_e_def.entity_path manifest_resolved))<line_sep># Make a fake partition, just to demo that. part=cdm_corpus.make_object(CdmObjectType.DATA_PARTITION_DEF '{}-data-description'.format(ent_def.entity_name))<line_sep># type: CdmDataPartitionDefinition local_e_def.data_partitions.append(part)<line_sep>part.explanation='not real data, just for demo'<line_sep># Define the location of the partition, relative to the manifest local_location='local:/{}/partition-data.csv'.format(ent_def.entity_name)<line_sep>part.location=cdm_corpus.storage.create_relative_corpus_path(local_location manifest_resolved)<line_sep># Add trait to partition for csv params. csv_trait=part.exhibits_traits.append('is.partition.format.CSV' <false>)<line_sep>csv_trait.arguments.append('columnHeaders' 'true')<line_sep>csv_trait.arguments.append('delimiter' ',')<line_sep># Get the actual location of the partition file from the corpus. part_path=cdm_corpus.storage.corpus_path_to_adapter_path(local_location)<line_sep># Make a fake file with nothing but header for columns. header=','.join([att.name<for>att ent_def.attributes])<line_sep>os.makedirs(cdm_corpus.storage.corpus_path_to_adapter_path('local:/{}'.format(ent_def.entity_name)) exist_ok=<true>)<with_stmt>open(part_path 'w')<as>file<block_start>file.write(header)<block_end><block_end><await>manifest_resolved.save_as_async('{}.manifest.cdm.json'.format(manifest_resolved.manifest_name) <true>)<block_end><block_end>
# Copyright 2015 Cisco Systems, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ BGP KeepAlive message"""<import_stmt>struct<import_from_stmt>yabgp.common.exception MessageHeaderError<import_from_stmt>yabgp.common.constants ERR_MSG_HDR_BAD_MSG_LEN<class_stmt>KeepAlive(object)<block_start>""" KEEPALIVE messages are exchanged between peers often enough not to cause the Hold Timer to expire """<line_sep>MSG_KEEPALIVE=4<line_sep>@staticmethod<def_stmt>parse msg<block_start>""" Parse keepalive message :param msg: input raw binary message data """<if_stmt>len(msg)<ne>0<block_start><raise>MessageHeaderError(sub_error=ERR_MSG_HDR_BAD_MSG_LEN data='')<block_end><block_end>@staticmethod<def_stmt>construct_header <block_start>"""Prepends the mandatory header to a constructed BGP message """<line_sep># 16-octet 2-octet 1-octet # ---------------+--------+---------+------+ # Maker | Length | Type | msg | # ---------------+--------+---------+------+ <return>b'\xff'<times>16+struct.pack('!HB' 19 4)<block_end><def_stmt>construct self<block_start>""" Construct a keepalive message """<line_sep><return>self.construct_header()<block_end><block_end>
<import_stmt>pytest<import_stmt>scanpy<as>sc<import_stmt>scanpy.external<as>sce<import_from_stmt>anndata.tests.helpers assert_equal<def_stmt>test_scrublet <block_start>""" Test that Scrublet run works. Check that scrublet runs and detects some doublets. """<line_sep>pytest.importorskip("scrublet")<line_sep>adata=sc.datasets.pbmc3k()<line_sep>sce.pp.scrublet(adata use_approx_neighbors=<false>)<line_sep># replace assertions by conditions <assert_stmt>"predicted_doublet"<in>adata.obs.columns<assert_stmt>"doublet_score"<in>adata.obs.columns<assert_stmt>adata.obs["predicted_doublet"].any() "Expect some doublets to be identified"<block_end><def_stmt>test_scrublet_dense <block_start>""" Test that Scrublet works for dense matrices. Check that scrublet runs and detects some doublets when a dense matrix is supplied. """<line_sep>pytest.importorskip("scrublet")<line_sep>adata=sc.datasets.paul15()[:500].copy()<line_sep>sce.pp.scrublet(adata use_approx_neighbors=<false>)<line_sep># replace assertions by conditions <assert_stmt>"predicted_doublet"<in>adata.obs.columns<assert_stmt>"doublet_score"<in>adata.obs.columns<assert_stmt>adata.obs["predicted_doublet"].any() "Expect some doublets to be identified"<block_end><def_stmt>test_scrublet_params <block_start>""" Test that Scrublet args are passed. Check that changes to parameters change scrublet results. """<line_sep>pytest.importorskip("scrublet")<line_sep># Reduce size of input for faster test adata=sc.datasets.pbmc3k()[:500].copy()<line_sep>sc.pp.filter_genes(adata min_counts=100)<line_sep># Get the default output default=sce.pp.scrublet(adata use_approx_neighbors=<false> copy=<true>)<line_sep>test_params={'expected_doublet_rate':0.1 'synthetic_doublet_umi_subsampling':0.8 'knn_dist_metric':'manhattan' 'normalize_variance':<false> 'log_transform':<true> 'mean_center':<false> 'n_prin_comps':10 'n_neighbors':2 'threshold':0.1 }<line_sep># Test each parameter and make sure something changes <for_stmt>param test_params.keys()<block_start>test_args={'adata':adata 'use_approx_neighbors':<false> 'copy':<true> param:test_params[param] }<line_sep>curr=sc.external.pp.scrublet(**test_args)<with_stmt>pytest.raises(AssertionError)<block_start>assert_equal(default curr)<block_end><block_end><block_end><def_stmt>test_scrublet_simulate_doublets <block_start>""" Test that standalone Scrublet doublet simulation works. Check that doublet simulation runs and simulates some doublets.. """<line_sep>pytest.importorskip("scrublet")<line_sep>adata_obs=sc.datasets.pbmc3k()<line_sep>sc.pp.filter_genes(adata_obs min_cells=3)<line_sep>sc.pp.filter_cells(adata_obs min_genes=3)<line_sep>adata_obs.layers['raw']=adata_obs.X<line_sep>sc.pp.normalize_total(adata_obs)<line_sep>logged=sc.pp.log1p(adata_obs copy=<true>)<line_sep>_=sc.pp.highly_variable_genes(logged)<line_sep>adata_obs=adata_obs[: logged.var['highly_variable']]<line_sep>adata_sim=sce.pp.scrublet_simulate_doublets(adata_obs layer='raw')<assert_stmt>'doublet_parents'<in>adata_sim.obsm.keys()<block_end>
# Copyright 2009-2017 <NAME>. # This program is distributed under the MIT license. <class_stmt>ReasonedBool<block_start>''' A variation on `bool` that also gives a `.reason`. This is useful when you want to say "This is False because... (reason.)" Unfortunately this class is not a subclass of `bool`, since Python doesn't allow subclassing `bool`. '''<def_stmt>__init__ self value reason=<none><block_start>''' Construct the `ReasonedBool`. `reason` is the reason *why* it has a value of `True` or `False`. It is usually a string, but is allowed to be of any type. '''<line_sep>self.value=bool(value)<line_sep>self.reason=reason<block_end><def_stmt>__repr__ self<block_start><if_stmt>self.reason<is><not><none><block_start><return>f'<{self.value} because {repr(self.reason)}>'<block_end><else_stmt># self.reason is None <block_start><return>f'<{self.value} with no reason>'<block_end><block_end><def_stmt>__eq__ self other<block_start><return>bool(self)<eq>other<block_end><def_stmt>__hash__ self<block_start><return>hash(bool(self))<block_end><def_stmt>__neq__ self other<block_start><return><not>self.__eq__(other)<block_end><def_stmt>__bool__ self<block_start><return>self.value<block_end><block_end>
<class_stmt>PytraitError(RuntimeError)<block_start><pass><block_end><class_stmt>DisallowedInitError(PytraitError)<block_start><pass><block_end><class_stmt>NonMethodAttrError(PytraitError)<block_start><pass><block_end><class_stmt>MultipleImplementationError(PytraitError)<block_start><pass><block_end><class_stmt>InheritanceError(PytraitError)<block_start><pass><block_end><class_stmt>NamingConventionError(PytraitError)<block_start><pass><block_end>
<import_stmt>kfp.dsl<as>dsl<import_stmt>kfp.gcp<as>gcp<import_stmt>kfp.onprem<as>onprem<import_from_stmt>string Template<import_stmt>json<line_sep>@dsl.pipeline(name='Simple sci-kit KF Pipeline' description='A simple end to end sci-kit seldon kf pipeline')<def_stmt>mnist_train_pipeline docker_org="index.docker.io/seldonio" train_container_version="0.2" serve_container_version="0.1"<block_start>vop=dsl.VolumeOp(name="create_pvc" resource_name="nfs-1" modes=dsl.VOLUME_MODE_RWO size="10G")<line_sep>volume=vop.volume<line_sep>train=dsl.ContainerOp(name='sk-train' image=f"{docker_org}/skmnistclassifier_trainer:{train_container_version}" pvolumes={"/data":volume})<line_sep>seldon_serving_json_template=Template(""" { "apiVersion": "machinelearning.seldon.io/v1alpha2", "kind": "SeldonDeployment", "metadata": { "labels": { "app": "seldon" }, "name": "mnist-classifier" }, "spec": { "annotations": { "deployment_version": "v1", "project_name": "MNIST Example" }, "name": "mnist-classifier", "predictors": [ { "annotations": { "predictor_version": "v1" }, "componentSpecs": [ { "spec": { "containers": [ { "image": "$dockerreposerving:$dockertagserving", "imagePullPolicy": "Always", "name": "mnist-classifier", "volumeMounts": [ { "mountPath": "/data", "name": "persistent-storage" } ] } ], "terminationGracePeriodSeconds": 1, "volumes": [ { "name": "persistent-storage", "persistentVolumeClaim": { "claimName": "$modelpvc" } } ] } } ], "graph": { "children": [], "endpoint": { "type": "REST" }, "name": "mnist-classifier", "type": "MODEL" }, "name": "mnist-classifier", "replicas": 1 } ] } } """)<line_sep>seldon_serving_json=seldon_serving_json_template.substitute({'dockerreposerving':f"{docker_org}/skmnistclassifier_runtime" 'dockertagserving':str(serve_container_version) 'modelpvc':vop.outputs["name"]})<line_sep>seldon_deployment=json.loads(seldon_serving_json)<line_sep>serve=dsl.ResourceOp(name='serve' k8s_resource=seldon_deployment success_condition='status.state == Available').after(train)<block_end># If we're called directly create an expirement and run <if_stmt>__name__<eq>'__main__'<block_start>pipeline_func=mnist_train_pipeline<line_sep>pipeline_filename=pipeline_func.__name__+'.pipeline.zip'<import_stmt>kfp.compiler<as>compiler<line_sep>compiler.Compiler().compile(pipeline_func pipeline_filename)<line_sep>expirement_name="cheese"<line_sep>experiment=client.create_experiment(expirement_name)<line_sep>run_name=pipeline_func.__name__+' run'<line_sep>run_result=client.run_pipeline(experiment.id run_name pipeline_filename arguments)<line_sep>print(run_result)<block_end>
# Copyright 2020 Magic Leap, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Originating Author: <NAME> (<EMAIL>) <import_stmt>argparse<import_stmt>os<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>atlas.data SceneDataset parse_splits_list<import_from_stmt>atlas.model VoxelNet<import_stmt>atlas.transforms<as>transforms<def_stmt>process info_file model num_frames save_path total_scenes_index total_scenes_count<block_start>""" Run the netork on a scene and save output Args: info_file: path to info_json file for the scene model: pytorch model that implemets Atlas frames: number of frames to use in reconstruction (-1 for all) save_path: where to save outputs total_scenes_index: used to print which scene we are on total_scenes_count: used to print the total number of scenes to process """<line_sep>voxel_scale=model.voxel_sizes[0]<line_sep>dataset=SceneDataset(info_file voxel_sizes=[voxel_scale] voxel_types=model.voxel_types num_frames=num_frames)<line_sep># compute voxel origin <if_stmt>'file_name_vol_%02d'%voxel_scale<in>dataset.info# compute voxel origin from ground truth <block_start>tsdf_trgt=dataset.get_tsdf()['vol_%02d'%voxel_scale]<line_sep>voxel_size=float(voxel_scale)/100<line_sep># shift by integer number of voxels for padding shift=torch.tensor([.5 .5 .5])<floordiv>voxel_size<line_sep>offset=tsdf_trgt.origin-shift<times>voxel_size<block_end><else_stmt># use default origin # assume floor is a z=0 so pad bottom a bit <block_start>offset=torch.tensor([0 0 -.5])<block_end>T=torch.eye(4)<line_sep>T[:3 3]=offset<line_sep>transform=transforms.Compose([transforms.ResizeImage((640 480)) transforms.ToTensor() transforms.TransformSpace(T model.voxel_dim_val [0 0 0]) transforms.IntrinsicsPoseToProjection() ])<line_sep>dataset.transform=transform<line_sep>dataloader=torch.utils.data.DataLoader(dataset batch_size=<none> batch_sampler=<none> num_workers=2)<line_sep>scene=dataset.info['scene']<line_sep>model.initialize_volume()<line_sep>torch.cuda.empty_cache()<for_stmt>j,d enumerate(dataloader)# logging progress <block_start><if_stmt>j%25<eq>0<block_start>print(total_scenes_index total_scenes_count dataset.info['dataset'] scene j len(dataloader))<block_end>model.inference1(d['projection'].unsqueeze(0).cuda() image=d['image'].unsqueeze(0).cuda())<block_end>outputs,losses=model.inference2()<line_sep>tsdf_pred=model.postprocess(outputs)[0]<line_sep># TODO: set origin in model... make consistent with offset above? tsdf_pred.origin=offset.view(1 3).cuda()<if_stmt>'semseg'<in>tsdf_pred.attribute_vols<block_start>mesh_pred=tsdf_pred.get_mesh('semseg')<line_sep># save vertex attributes seperately since trimesh doesn't np.savez(os.path.join(save_path '%s_attributes.npz'%scene) **mesh_pred.vertex_attributes)<block_end><else_stmt><block_start>mesh_pred=tsdf_pred.get_mesh()<block_end>tsdf_pred.save(os.path.join(save_path '%s.npz'%scene))<line_sep>mesh_pred.export(os.path.join(save_path '%s.ply'%scene))<block_end><def_stmt>main <block_start>parser=argparse.ArgumentParser(description="Atlas Testing")<line_sep>parser.add_argument("--model" required=<true> metavar="FILE" help="path to checkpoint")<line_sep>parser.add_argument("--scenes" default="data/scannet_test.txt" help="which scene(s) to run on")<line_sep>parser.add_argument("--num_frames" default=-1 type=int help="number of frames to use (-1 for all)")<line_sep>parser.add_argument("--voxel_dim" nargs=3 default=[-1 -1 -1] type=int help="override voxel dim")<line_sep>args=parser.parse_args()<line_sep># get all the info_file.json's from the command line # .txt files contain a list of info_file.json's info_files=parse_splits_list(args.scenes)<line_sep>model=VoxelNet.load_from_checkpoint(args.model)<line_sep>model=model.cuda().eval()<line_sep>torch.set_grad_enabled(<false>)<line_sep># overwrite default values of voxel_dim_test <if_stmt>args.voxel_dim[0]<ne>-1<block_start>model.voxel_dim_test=args.voxel_dim<block_end># TODO: implement voxel_dim_test model.voxel_dim_val=model.voxel_dim_test<line_sep>model_name=os.path.splitext(os.path.split(args.model)[1])[0]<line_sep>save_path=os.path.join(model.cfg.LOG_DIR model.cfg.TRAINER.NAME model.cfg.TRAINER.VERSION 'test_'+model_name)<if_stmt>args.num_frames<g>-1<block_start>save_path='%s_%d'%(save_path args.num_frames)<block_end>os.makedirs(save_path exist_ok=<true>)<for_stmt>i,info_file enumerate(info_files)# run model on each scene <block_start>process(info_file model args.num_frames save_path i len(info_files))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_from_future_stmt> print_function<import_from_stmt>.base TestBase<import_from_stmt>..core OpCode<import_from_stmt>..core MemoryBuffer<import_from_stmt>..core PassRegistry<import_from_stmt>..core Context<import_from_stmt>..core Module<import_from_stmt>..bit_reader parse_bitcode<class_stmt>TestBitReader(TestBase)<block_start><def_stmt>test_parse_bitcode self<block_start>source=self.get_test_bc()<line_sep>m=parse_bitcode(MemoryBuffer(filename=source))<line_sep>print(m.target)<line_sep>print(m.datalayout)<block_end><block_end>
<import_stmt>argparse os<import_stmt>pdb<import_stmt>torch<import_stmt>math random<import_stmt>torch.backends.cudnn<as>cudnn<import_stmt>torch.nn<as>nn<import_stmt>torch.optim<as>optim<import_from_stmt>torch.autograd Variable<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>lapsrn_wgan _netG _netD L1_Charbonnier_loss<import_from_stmt>dataset DatasetFromHdf5<import_from_stmt>torchvision models transforms<import_stmt>torch.utils.model_zoo<as>model_zoo<line_sep># Training settings parser=argparse.ArgumentParser(description="PyTorch LapSRN WGAN")<line_sep>parser.add_argument("--batchSize" type=int default=32 help="training batch size")<line_sep>parser.add_argument("--nEpochs" type=int default=400 help="number of epochs to train for")<line_sep>parser.add_argument('--lrG' type=float default=1e-4 help='Learning Rate. Default=1e-4')<line_sep>parser.add_argument('--lrD' type=float default=1e-4 help='Learning Rate. Default=1e-4')<line_sep>parser.add_argument("--step" type=int default=50 help="Sets the learning rate to the initial LR decayed by momentum every n epochs, Default: n=10")<line_sep>parser.add_argument("--cuda" action="store_true" help="Use cuda?")<line_sep>parser.add_argument("--resume" default="" type=str help="Path to checkpoint (default: none)")<line_sep>parser.add_argument("--start-epoch" default=1 type=int help="Manual epoch number (useful on restarts)")<line_sep>parser.add_argument("--threads" type=int default=1 help="Number of threads for data loader to use, Default: 1")<line_sep>parser.add_argument("--momentum" default=0.9 type=float help="Momentum, Default: 0.9")<line_sep>parser.add_argument("--weight-decay" "--wd" default=1e-4 type=float help="weight decay, Default: 1e-4")<line_sep>parser.add_argument("--pretrained" default="" type=str help="path to pretrained model (default: none)")<line_sep>parser.add_argument('--clamp_lower' type=float default=-0.01)<line_sep>parser.add_argument('--clamp_upper' type=float default=0.01)<def_stmt>main <block_start><global>opt model<line_sep>opt=parser.parse_args()<line_sep>print(opt)<line_sep>cuda=opt.cuda<if_stmt>cuda<and><not>torch.cuda.is_available()<block_start><raise>Exception("No GPU found, please run without --cuda")<block_end>opt.seed=random.randint(1 10000)<line_sep>print("Random Seed: " opt.seed)<line_sep>torch.manual_seed(opt.seed)<if_stmt>cuda<block_start>torch.cuda.manual_seed(opt.seed)<block_end>cudnn.benchmark=<true><line_sep>print("===> Loading datasets")<line_sep>train_set=DatasetFromHdf5("data/lap_pry_x4_small.h5")<line_sep>training_data_loader=DataLoader(dataset=train_set num_workers=opt.threads batch_size=opt.batchSize shuffle=<true>)<line_sep>print('===> Building generator model')<line_sep>netG=_netG()<line_sep>print('===> Building discriminator model')<line_sep>netD=_netD()<line_sep>print('===> Loading VGG model')<line_sep>model_urls={"vgg19":"https://download.pytorch.org/models/vgg19-dcbb9e9d.pth"}<line_sep>netVGG=models.vgg19()<line_sep>netVGG.load_state_dict(model_zoo.load_url(model_urls['vgg19']))<line_sep>weight=torch.FloatTensor(64 1 3 3)<line_sep>parameters=list(netVGG.parameters())<for_stmt>i range(64)<block_start>weight[i : : :]=parameters[0].data[i].mean(0)<block_end>bias=parameters[1].data<class_stmt>_content_model(nn.Module)<block_start><def_stmt>__init__ self<block_start>super(_content_model self).__init__()<line_sep>self.conv=conv2d=nn.Conv2d(1 64 kernel_size=3 padding=1)<line_sep>self.feature=nn.Sequential(*list(netVGG.features.children())[1:-1])<line_sep>self._initialize_weights()<block_end><def_stmt>forward self x<block_start>out=self.conv(x)<line_sep>out=self.feature(out)<line_sep><return>out<block_end><def_stmt>_initialize_weights self<block_start>self.conv.weight.data.copy_(weight)<line_sep>self.conv.bias.data.copy_(bias)<block_end><block_end>netContent=_content_model()<line_sep>print('===> Building Loss')<line_sep>criterion=L1_Charbonnier_loss()<line_sep>print("===> Setting GPU")<if_stmt>cuda<block_start>netG=netG.cuda()<line_sep>netD=netD.cuda()<line_sep>netContent=netContent.cuda()<line_sep>criterion=criterion.cuda()<block_end># optionally resume from a checkpoint <if_stmt>opt.resume<block_start><if_stmt>os.path.isfile(opt.resume)<block_start>print("=> loading checkpoint '{}'".format(opt.resume))<line_sep>checkpoint=torch.load(opt.resume)<line_sep>opt.start_epoch=checkpoint["epoch"]+1<line_sep>netG.load_state_dict(checkpoint["model"].state_dict())<block_end><else_stmt><block_start>print("=> no checkpoint found at '{}'".format(opt.resume))<block_end><block_end># optionally copy weights from a checkpoint <if_stmt>opt.pretrained<block_start><if_stmt>os.path.isfile(opt.pretrained)<block_start>print("=> loading model '{}'".format(opt.pretrained))<line_sep>weights=torch.load(opt.pretrained)<line_sep>netG.load_state_dict(weights['model'].state_dict())<block_end><else_stmt><block_start>print("=> no model found at '{}'".format(opt.pretrained))<block_end><block_end>print("===> Setting Optimizer")<line_sep>optimizerD=optim.RMSprop(netD.parameters() lr=opt.lrD)<line_sep>optimizerG=optim.RMSprop(netG.parameters() lr=opt.lrG)<line_sep>print("===> Training")<for_stmt>epoch range(opt.start_epoch opt.nEpochs+1)<block_start>train(training_data_loader optimizerG optimizerD netG netD netContent criterion epoch)<line_sep>save_checkpoint(netG epoch)<block_end><block_end><def_stmt>adjust_learning_rate optimizer epoch<block_start>"""Sets the learning rate to the initial LR decayed by 10 every 10 epochs"""<line_sep>lr=opt.lr<times>(0.1<power>(epoch<floordiv>opt.step))<line_sep><return>lr<block_end><def_stmt>train training_data_loader optimizerG optimizerD netG netD netContent criterion epoch<block_start>netG.train()<line_sep>netD.train()<line_sep>one=torch.FloatTensor([1.])<line_sep>mone=one<times>-1<line_sep>content_weight=torch.FloatTensor([1.])<line_sep>adversarial_weight=torch.FloatTensor([1.])<for_stmt>iteration,batch enumerate(training_data_loader 1)<block_start>input,label_x2,label_x4=Variable(batch[0]) Variable(batch[1] requires_grad=<false>) Variable(batch[2] requires_grad=<false>)<if_stmt>opt.cuda<block_start>input=input.cuda()<line_sep>label_x2=label_x2.cuda()<line_sep>label_x4=label_x4.cuda()<line_sep>one,mone,content_weight,adversarial_weight=one.cuda() mone.cuda() content_weight.cuda() adversarial_weight.cuda()<block_end>############################ # (1) Update D network: loss = D(x)) - D(G(z)) ########################### # train with real errD_real=netD(label_x4)<line_sep>errD_real.backward(one retain_graph=<true>)<line_sep># train with fake input_G=Variable(input.data volatile=<true>)<line_sep>fake_x4=Variable(netG(input_G)[1].data)<line_sep>fake_D=fake_x4<line_sep>errD_fake=netD(fake_D)<line_sep>errD_fake.backward(mone)<line_sep>errD=errD_real-errD_fake<line_sep>optimizerD.step()<for_stmt>p netD.parameters()# reset requires_grad <block_start>p.data.clamp_(opt.clamp_lower opt.clamp_upper)<block_end>netD.zero_grad()<line_sep>netG.zero_grad()<line_sep>netContent.zero_grad()<line_sep>############################ # (2) Update G network: loss = D(G(z)) ########################### fake_D_x2,fake_D_x4=netG(input)<line_sep>content_fake_x2=netContent(fake_D_x2)<line_sep>content_real_x2=netContent(label_x2)<line_sep>content_real_x2=Variable(content_real_x2.data)<line_sep>content_loss_x2=criterion(content_fake_x2 content_real_x2)<line_sep>content_loss_x2.backward(content_weight retain_graph=<true>)<line_sep>content_fake_x4=netContent(fake_D_x4)<line_sep>content_real_x4=netContent(label_x4)<line_sep>content_real_x4=Variable(content_real_x4.data)<line_sep>content_loss_x4=criterion(content_fake_x4 content_real_x4)<line_sep>content_loss_x4.backward(content_weight retain_graph=<true>)<line_sep>content_loss=content_loss_x2+content_loss_x4<line_sep>adversarial_loss=netD(fake_D_x4)<line_sep>adversarial_loss.backward(adversarial_weight)<line_sep>optimizerG.step()<line_sep>netD.zero_grad()<line_sep>netG.zero_grad()<line_sep>netContent.zero_grad()<if_stmt>iteration%10<eq>0<block_start>print("===> Epoch[{}]({}/{}): LossD: {:.10f} [{:.10f} - {:.10f}] LossG: [{:.10f} + {:.10f}]".format(epoch iteration len(training_data_loader) errD.data[0] errD_real.data[0] errD_fake.data[0] adversarial_loss.data[0] content_loss.data[0]))<block_end><block_end><block_end><def_stmt>save_checkpoint model epoch<block_start>model_folder="checkpoint/"<line_sep>model_out_path=model_folder+"lapwgan_model_epoch_{}.pth".format(epoch)<line_sep>state={"epoch":epoch "model":model}<if_stmt><not>os.path.exists(model_folder)<block_start>os.makedirs(model_folder)<block_end>torch.save(state model_out_path)<line_sep>print("Checkpoint saved to {}".format(model_out_path))<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
# nasty hack to deal with issue #46 <import_stmt>os<import_stmt>sys<line_sep>sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))<import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>time<import_from_stmt>rl_coach.memories.non_episodic.differentiable_neural_dictionary QDND<import_stmt>tensorflow<as>tf<line_sep>NUM_ACTIONS=3<line_sep>NUM_DND_ENTRIES_TO_ADD=10000<line_sep>EMBEDDING_SIZE=512<line_sep>NUM_SAMPLED_EMBEDDINGS=500<line_sep>NUM_NEIGHBORS=10<line_sep>DND_SIZE=500000<line_sep>@pytest.fixture()<def_stmt>dnd <block_start><return>QDND(DND_SIZE EMBEDDING_SIZE NUM_ACTIONS 0.1 key_error_threshold=0 learning_rate=0.0001 num_neighbors=NUM_NEIGHBORS)<block_end>@pytest.mark.unit_test<def_stmt>test_random_sample_from_dnd dnd:QDND# store single non terminal transition <block_start>embeddings=[np.random.rand(EMBEDDING_SIZE)<for>j range(NUM_DND_ENTRIES_TO_ADD)]<line_sep>actions=[np.random.randint(NUM_ACTIONS)<for>j range(NUM_DND_ENTRIES_TO_ADD)]<line_sep>values=[np.random.rand()<for>j range(NUM_DND_ENTRIES_TO_ADD)]<line_sep>dnd.add(embeddings actions values)<line_sep>dnd_embeddings,dnd_values,dnd_indices=dnd.query(embeddings[0:10] 0 NUM_NEIGHBORS)<line_sep># calculate_normalization_factor sampled_embeddings=dnd.sample_embeddings(NUM_SAMPLED_EMBEDDINGS)<line_sep>coefficient=1/(NUM_SAMPLED_EMBEDDINGS<times>(NUM_SAMPLED_EMBEDDINGS-1.0))<line_sep>tf_current_embedding=tf.placeholder(tf.float32 shape=(EMBEDDING_SIZE) name='current_embedding')<line_sep>tf_other_embeddings=tf.placeholder(tf.float32 shape=(NUM_SAMPLED_EMBEDDINGS-1 EMBEDDING_SIZE) name='other_embeddings')<line_sep>sub=tf_current_embedding-tf_other_embeddings<line_sep>square=tf.square(sub)<line_sep>result=tf.reduce_sum(square)<line_sep>########################### # more efficient method ########################### sampled_embeddings_expanded=tf.placeholder(tf.float32 shape=(1 NUM_SAMPLED_EMBEDDINGS EMBEDDING_SIZE) name='sampled_embeddings_expanded')<line_sep>sampled_embeddings_tiled=tf.tile(sampled_embeddings_expanded (sampled_embeddings_expanded.shape[1] 1 1))<line_sep>sampled_embeddings_transposed=tf.transpose(sampled_embeddings_tiled (1 0 2))<line_sep>sub2=sampled_embeddings_tiled-sampled_embeddings_transposed<line_sep>square2=tf.square(sub2)<line_sep>result2=tf.reduce_sum(square2)<line_sep>config=tf.ConfigProto()<line_sep>config.allow_soft_placement=<true># allow placing ops on cpu if they are not fit for gpu config.gpu_options.allow_growth=<true># allow the gpu memory allocated for the worker to grow if needed sess=tf.Session(config=config)<line_sep>sum1=0<line_sep>start=time.time()<for_stmt>i range(NUM_SAMPLED_EMBEDDINGS)<block_start>curr_sampled_embedding=sampled_embeddings[i]<line_sep>other_embeddings=np.delete(sampled_embeddings i 0)<line_sep>sum1<augadd>sess.run(result feed_dict={tf_current_embedding:curr_sampled_embedding tf_other_embeddings:other_embeddings})<block_end>print("1st method: {} sec".format(time.time()-start))<line_sep>start=time.time()<line_sep>sum2=sess.run(result2 feed_dict={sampled_embeddings_expanded:np.expand_dims(sampled_embeddings 0)})<line_sep>print("2nd method: {} sec".format(time.time()-start))<line_sep># validate that results are equal print("sum1 = {}, sum2 = {}".format(sum1 sum2))<line_sep>norm_factor=-0.5/(coefficient<times>sum2)<block_end><if_stmt>__name__<eq>'__main__'<block_start>test_random_sample_from_dnd(dnd())<block_end>
# Copyright (c) Facebook, Inc. and its affiliates. <import_stmt>os<import_from_stmt>typing Optional<import_stmt>pkg_resources<import_stmt>torch<import_from_stmt>detectron2.checkpoint DetectionCheckpointer<import_from_stmt>detectron2.config CfgNode LazyConfig get_cfg instantiate<import_from_stmt>detectron2.modeling build_model<class_stmt>_ModelZooUrls(object)<block_start>""" Mapping from names to officially released Detectron2 pre-trained models. """<line_sep>S3_PREFIX="https://dl.fbaipublicfiles.com/detectron2/"<line_sep># format: {config_path.yaml} -> model_id/model_final_{commit}.pkl CONFIG_PATH_TO_URL_SUFFIX={# COCO Detection with Faster R-CNN "COCO-Detection/faster_rcnn_R_50_C4_1x":"137257644/model_final_721ade.pkl" "COCO-Detection/faster_rcnn_R_50_DC5_1x":"137847829/model_final_51d356.pkl" "COCO-Detection/faster_rcnn_R_50_FPN_1x":"137257794/model_final_b275ba.pkl" "COCO-Detection/faster_rcnn_R_50_C4_3x":"137849393/model_final_f97cb7.pkl" "COCO-Detection/faster_rcnn_R_50_DC5_3x":"137849425/model_final_68d202.pkl" "COCO-Detection/faster_rcnn_R_50_FPN_3x":"137849458/model_final_280758.pkl" "COCO-Detection/faster_rcnn_R_101_C4_3x":"138204752/model_final_298dad.pkl" "COCO-Detection/faster_rcnn_R_101_DC5_3x":"138204841/model_final_3e0943.pkl" "COCO-Detection/faster_rcnn_R_101_FPN_3x":"137851257/model_final_f6e8b1.pkl" "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x":"139173657/model_final_68b088.pkl" # COCO Detection with RetinaNet "COCO-Detection/retinanet_R_50_FPN_1x":"190397773/model_final_bfca0b.pkl" "COCO-Detection/retinanet_R_50_FPN_3x":"190397829/model_final_5bd44e.pkl" "COCO-Detection/retinanet_R_101_FPN_3x":"190397697/model_final_971ab9.pkl" # COCO Detection with RPN and Fast R-CNN "COCO-Detection/rpn_R_50_C4_1x":"137258005/model_final_450694.pkl" "COCO-Detection/rpn_R_50_FPN_1x":"137258492/model_final_02ce48.pkl" "COCO-Detection/fast_rcnn_R_50_FPN_1x":"137635226/model_final_e5f7ce.pkl" # COCO Instance Segmentation Baselines with Mask R-CNN "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x":"137259246/model_final_9243eb.pkl" "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x":"137260150/model_final_4f86c3.pkl" "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x":"137260431/model_final_a54504.pkl" "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x":"137849525/model_final_4ce675.pkl" "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x":"137849551/model_final_84107b.pkl" "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x":"137849600/model_final_f10217.pkl" "COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x":"138363239/model_final_a2914c.pkl" "COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x":"138363294/model_final_0464b7.pkl" "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x":"138205316/model_final_a3ec72.pkl" "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x":"139653917/model_final_2d9806.pkl" # noqa # COCO Person Keypoint Detection Baselines with Keypoint R-CNN "COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x":"137261548/model_final_04e291.pkl" "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x":"137849621/model_final_a6e10b.pkl" "COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x":"138363331/model_final_997cc7.pkl" "COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x":"139686956/model_final_5ad38f.pkl" # COCO Panoptic Segmentation Baselines with Panoptic FPN "COCO-PanopticSegmentation/panoptic_fpn_R_50_1x":"139514544/model_final_dbfeb4.pkl" "COCO-PanopticSegmentation/panoptic_fpn_R_50_3x":"139514569/model_final_c10459.pkl" "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x":"139514519/model_final_cafdb1.pkl" # LVIS Instance Segmentation Baselines with Mask R-CNN "LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x":"144219072/model_final_571f7c.pkl" # noqa "LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x":"144219035/model_final_824ab5.pkl" # noqa "LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x":"144219108/model_final_5e3439.pkl" # noqa # Cityscapes & Pascal VOC Baselines "Cityscapes/mask_rcnn_R_50_FPN":"142423278/model_final_af9cf5.pkl" "PascalVOC-Detection/faster_rcnn_R_50_C4":"142202221/model_final_b1acc2.pkl" # Other Settings "Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5":"138602867/model_final_65c703.pkl" "Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5":"144998336/model_final_821d0b.pkl" "Misc/cascade_mask_rcnn_R_50_FPN_1x":"138602847/model_final_e9d89b.pkl" "Misc/cascade_mask_rcnn_R_50_FPN_3x":"144998488/model_final_480dd8.pkl" "Misc/mask_rcnn_R_50_FPN_3x_syncbn":"169527823/model_final_3b3c51.pkl" "Misc/mask_rcnn_R_50_FPN_3x_gn":"138602888/model_final_dc5d9e.pkl" "Misc/scratch_mask_rcnn_R_50_FPN_3x_gn":"138602908/model_final_01ca85.pkl" "Misc/scratch_mask_rcnn_R_50_FPN_9x_gn":"183808979/model_final_da7b4c.pkl" "Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn":"184226666/model_final_5ce33e.pkl" "Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x":"139797668/model_final_be35db.pkl" "Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv":"18131413/model_0039999_e76410.pkl" # noqa # D1 Comparisons "Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x":"137781054/model_final_7ab50c.pkl" # noqa "Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x":"137781281/model_final_62ca52.pkl" # noqa "Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x":"137781195/model_final_cce136.pkl" }<line_sep>@staticmethod<def_stmt>query config_path:str<arrow>Optional[str]<block_start>""" Args: config_path: relative config filename """<line_sep>name=config_path.replace(".yaml" "").replace(".py" "")<if_stmt>name<in>_ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX<block_start>suffix=_ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[name]<line_sep><return>_ModelZooUrls.S3_PREFIX+name+"/"+suffix<block_end><return><none><block_end><block_end><def_stmt>get_checkpoint_url config_path<block_start>""" Returns the URL to the model trained using the given config Args: config_path (str): config file name relative to detectron2's "configs/" directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" Returns: str: a URL to the model """<line_sep>url=_ModelZooUrls.query(config_path)<if_stmt>url<is><none><block_start><raise>RuntimeError("Pretrained model for {} is not available!".format(config_path))<block_end><return>url<block_end><def_stmt>get_config_file config_path<block_start>""" Returns path to a builtin config file. Args: config_path (str): config file name relative to detectron2's "configs/" directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" Returns: str: the real path to the config file. """<line_sep>cfg_file=pkg_resources.resource_filename("detectron2.model_zoo" os.path.join("configs" config_path))<if_stmt><not>os.path.exists(cfg_file)<block_start><raise>RuntimeError("{} not available in Model Zoo!".format(config_path))<block_end><return>cfg_file<block_end><def_stmt>get_config config_path trained:bool=<false><block_start>""" Returns a config object for a model in model zoo. Args: config_path (str): config file name relative to detectron2's "configs/" directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights. If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used instead; this will typically (though not always) initialize a subset of weights using an ImageNet pre-trained model, while randomly initializing the other weights. Returns: CfgNode or omegaconf.DictConfig: a config object """<line_sep>cfg_file=get_config_file(config_path)<if_stmt>cfg_file.endswith(".yaml")<block_start>cfg=get_cfg()<line_sep>cfg.merge_from_file(cfg_file)<if_stmt>trained<block_start>cfg.MODEL.WEIGHTS=get_checkpoint_url(config_path)<block_end><return>cfg<block_end><elif_stmt>cfg_file.endswith(".py")<block_start>cfg=LazyConfig.load(cfg_file)<if_stmt>trained<block_start>url=get_checkpoint_url(config_path)<if_stmt>"train"<in>cfg<and>"init_checkpoint"<in>cfg.train<block_start>cfg.train.init_checkpoint=url<block_end><else_stmt><block_start><raise>NotImplementedError<block_end><block_end><return>cfg<block_end><block_end><def_stmt>get config_path trained:bool=<false> device:Optional[str]=<none><block_start>""" Get a model specified by relative path under Detectron2's official ``configs/`` directory. Args: config_path (str): config file name relative to detectron2's "configs/" directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" trained (bool): see :func:`get_config`. device (str or None): overwrite the device in config, if given. Returns: nn.Module: a detectron2 model. Will be in training mode. Example: :: from detectron2 import model_zoo model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", trained=True) """<line_sep>cfg=get_config(config_path trained)<if_stmt>device<is><none><and><not>torch.cuda.is_available()<block_start>device="cpu"<block_end><if_stmt>device<is><not><none><and>isinstance(cfg CfgNode)<block_start>cfg.MODEL.DEVICE=device<block_end><if_stmt>isinstance(cfg CfgNode)<block_start>model=build_model(cfg)<line_sep>DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)<block_end><else_stmt><block_start>model=instantiate(cfg.model)<if_stmt>device<is><not><none><block_start>model=model.to(device)<block_end><if_stmt>"train"<in>cfg<and>"init_checkpoint"<in>cfg.train<block_start>DetectionCheckpointer(model).load(cfg.train.init_checkpoint)<block_end><block_end><return>model<block_end>
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Ensemble Detector. """<import_stmt>numpy<as>np<import_from_stmt>mindarmour.utils.logger LogUtil<import_from_stmt>mindarmour.utils._check_param check_numpy_param check_param_multi_types<import_from_stmt>.detector Detector<line_sep>LOGGER=LogUtil.get_instance()<line_sep>TAG='EnsembleDetector'<class_stmt>EnsembleDetector(Detector)<block_start>""" Ensemble detector. Args: detectors (Union[tuple, list]): List of detector methods. policy (str): Decision policy, could be 'vote', 'all' or 'any'. Default: 'vote' """<def_stmt>__init__ self detectors policy="vote"<block_start>super(EnsembleDetector self).__init__()<line_sep>self._detectors=check_param_multi_types('detectors' detectors [list tuple])<line_sep>self._num_detectors=len(detectors)<line_sep>self._policy=policy<block_end><def_stmt>fit self inputs labels=<none><block_start>""" Fit detector like a machine learning model. This method is not available in this class. Args: inputs (numpy.ndarray): Data to calculate the threshold. labels (numpy.ndarray): Labels of data. Default: None. Raises: NotImplementedError: This function is not available in ensemble. """<line_sep>msg='The function fit() is not available in the class '<concat>'`EnsembleDetector`.'<line_sep>LOGGER.error(TAG msg)<line_sep><raise>NotImplementedError(msg)<block_end><def_stmt>detect self inputs<block_start>""" Detect adversarial examples from input samples. Args: inputs (numpy.ndarray): Input samples. Returns: list[int], whether a sample is adversarial. if res[i]=1, then the input sample with index i is adversarial. Raises: ValueError: If policy is not supported. """<line_sep>inputs=check_numpy_param('inputs' inputs)<line_sep>x_len=inputs.shape[0]<line_sep>counts=np.zeros(x_len)<line_sep>res=np.zeros(x_len dtype=np.int)<for_stmt>detector list(self._detectors)<block_start>idx=detector.detect(inputs)<line_sep>counts[idx]<augadd>1<block_end><if_stmt>self._policy<eq>"vote"<block_start>idx_adv=np.argwhere(counts<g>self._num_detectors/2)<block_end><elif_stmt>self._policy<eq>"all"<block_start>idx_adv=np.argwhere(counts<eq>self._num_detectors)<block_end><elif_stmt>self._policy<eq>"any"<block_start>idx_adv=np.argwhere(counts<g>0)<block_end><else_stmt><block_start>msg='Policy {} is not supported.'.format(self._policy)<line_sep>LOGGER.error(TAG msg)<line_sep><raise>ValueError(msg)<block_end>res[idx_adv]=1<line_sep><return>list(res)<block_end><def_stmt>detect_diff self inputs<block_start>""" This method is not available in this class. Args: inputs (Union[numpy.ndarray, list, tuple]): Data been used as references to create adversarial examples. Raises: NotImplementedError: This function is not available in ensemble. """<line_sep>msg='The function detect_diff() is not available in the class '<concat>'`EnsembleDetector`.'<line_sep>LOGGER.error(TAG msg)<line_sep><raise>NotImplementedError(msg)<block_end><def_stmt>transform self inputs<block_start>""" Filter adversarial noises in input samples. This method is not available in this class. Args: inputs (Union[numpy.ndarray, list, tuple]): Data been used as references to create adversarial examples. Raises: NotImplementedError: This function is not available in ensemble. """<line_sep>msg='The function transform() is not available in the class '<concat>'`EnsembleDetector`.'<line_sep>LOGGER.error(TAG msg)<line_sep><raise>NotImplementedError(msg)<block_end><block_end>
<import_from_stmt>distutils.util convert_path<import_from_stmt>setuptools setup find_packages<line_sep>module='nb2xls'<line_sep># get version from __meta__ meta_ns={}<line_sep>path=convert_path(module+'/__meta__.py')<with_stmt>open(path)<as>meta_file<block_start>exec(meta_file.read() meta_ns)<block_end># read requirements.txt <with_stmt>open('requirements.txt' 'r')<as>f<block_start>content=f.read()<block_end>li_req=content.split('\n')<line_sep>install_requires=[e.strip()<for>e li_req<if>len(e)]<line_sep>name=module<line_sep>name_url=name.replace('_' '-')<line_sep>packages=[module]<line_sep>version=meta_ns['__version__']<line_sep>description='Export Jupyter notebook as an Excel xls file.'<line_sep>long_description='Export Jupyter notebook as an Excel xls file.'<line_sep>author='ideonate'<line_sep>author_email='<EMAIL>'<line_sep># github template url='https://github.com/{}/{}'.format(author name_url)<line_sep>download_url='https://github.com/{}/{}/tarball/{}'.format(author name_url version)<line_sep>keywords=['jupyter' 'nbconvert' ]<line_sep>license='MIT'<line_sep>classifiers=['Development Status :: 4 - Beta' 'License :: OSI Approved :: MIT License' 'Programming Language :: Python :: 3.5' 'Programming Language :: Python :: 3.6' 'Programming Language :: Python :: 3.7']<line_sep>include_package_data=<true><line_sep>zip_safe=<false><line_sep>extra_requirements={'test':['pytest' 'testpath' 'openpyxl' 'matplotlib']}<line_sep># ref https://packaging.python.org/tutorials/distributing-packages/ setup(name=name version=version packages=packages author=author author_email=author_email description=description long_description=long_description url=url download_url=download_url keywords=keywords license=license classifiers=classifiers include_package_data=include_package_data install_requires=install_requires extras_require=extra_requirements zip_safe=zip_safe entry_points={'nbconvert.exporters':['xls = nb2xls:XLSExporter'] })<line_sep>
<import_stmt>logging<import_from_stmt>threading Event<import_from_stmt>slack_sdk.socket_mode.client BaseSocketModeClient<import_from_stmt>slack_sdk.socket_mode.request SocketModeRequest<import_from_stmt>slack_bolt App<class_stmt>BaseSocketModeHandler<block_start>app:App# type: ignore client:BaseSocketModeClient<def_stmt>handle self client:BaseSocketModeClient req:SocketModeRequest<arrow><none><block_start><raise>NotImplementedError()<block_end><def_stmt>connect self<block_start>self.client.connect()<block_end><def_stmt>disconnect self<block_start>self.client.disconnect()<block_end><def_stmt>close self<block_start>self.client.close()<block_end><def_stmt>start self<block_start>self.connect()<if_stmt>self.app.logger.level<g>logging.INFO<block_start>print("⚡️ Bolt app is running!")<block_end><else_stmt><block_start>self.app.logger.info("⚡️ Bolt app is running!")<block_end>Event().wait()<block_end><block_end>
<import_stmt>os<import_stmt>secrets<import_stmt>sys<import_from_stmt>contextlib suppress<import_from_stmt>subprocess PIPE Popen TimeoutExpired<import_from_stmt>tempfile TemporaryDirectory<import_from_stmt>textwrap dedent<import_from_stmt>threading Timer<import_from_stmt>time sleep<import_stmt>pytest<line_sep># We need to interrupt the autoreloader without killing it, so that the server gets terminated # https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/ <try_stmt><block_start><import_from_stmt>signal CTRL_BREAK_EVENT<import_from_stmt>subprocess CREATE_NEW_PROCESS_GROUP<line_sep>flags=CREATE_NEW_PROCESS_GROUP<block_end><except_stmt>ImportError<block_start>flags=0<block_end>TIMER_DELAY=2<def_stmt>terminate proc<block_start><if_stmt>flags<block_start>proc.send_signal(CTRL_BREAK_EVENT)<block_end><else_stmt><block_start>proc.terminate()<block_end><block_end><def_stmt>write_app filename **runargs<block_start>text=secrets.token_urlsafe()<with_stmt>open(filename "w")<as>f<block_start>f.write(dedent(f"""\ import os from sanic import Sanic app = Sanic(__name__) app.route("/")(lambda x: x) @app.listener("after_server_start") def complete(*args): print("complete", os.getpid(), {text!r}) if __name__ == "__main__": app.run(**{runargs!r}) """))<block_end><return>text<block_end><def_stmt>write_json_config_app filename jsonfile **runargs<block_start><with_stmt>open(filename "w")<as>f<block_start>f.write(dedent(f"""\ import os from sanic import Sanic import json app = Sanic(__name__) with open("{jsonfile}", "r") as f: config = json.load(f) app.config.update_config(config) app.route("/")(lambda x: x) @app.listener("after_server_start") def complete(*args): print("complete", os.getpid(), app.config.FOO) if __name__ == "__main__": app.run(**{runargs!r}) """))<block_end><block_end><def_stmt>write_file filename<block_start>text=secrets.token_urlsafe()<with_stmt>open(filename "w")<as>f<block_start>f.write(f"""{{"FOO": "{text}"}}""")<block_end><return>text<block_end><def_stmt>scanner proc<block_start><for_stmt>line proc.stdout<block_start>line=line.decode().strip()<if_stmt>line.startswith("complete")<block_start><yield>line<block_end><block_end><block_end>argv=dict(script=[sys.executable "reloader.py"] module=[sys.executable "-m" "reloader"] sanic=[sys.executable "-m" "sanic" "--port" "42104" "--debug" "reloader.app" ] )<line_sep>@pytest.mark.parametrize("runargs, mode" [(dict(port=42102 auto_reload=<true>) "script") (dict(port=42103 debug=<true>) "module") ({} "sanic") ] )<async_keyword><def_stmt>test_reloader_live runargs mode<block_start><with_stmt>TemporaryDirectory()<as>tmpdir<block_start>filename=os.path.join(tmpdir "reloader.py")<line_sep>text=write_app(filename **runargs)<line_sep>command=argv[mode]<line_sep>proc=Popen(command cwd=tmpdir stdout=PIPE creationflags=flags)<try_stmt><block_start>timeout=Timer(TIMER_DELAY terminate [proc])<line_sep>timeout.start()<line_sep># Python apparently keeps using the old source sometimes if # we don't sleep before rewrite (pycache timestamp problem?) sleep(1)<line_sep>line=scanner(proc)<assert_stmt>text<in>next(line)<line_sep># Edit source code and try again text=write_app(filename **runargs)<assert_stmt>text<in>next(line)<block_end><finally_stmt><block_start>timeout.cancel()<line_sep>terminate(proc)<with_stmt>suppress(TimeoutExpired)<block_start>proc.wait(timeout=3)<block_end><block_end><block_end><block_end>@pytest.mark.parametrize("runargs, mode" [(dict(port=42102 auto_reload=<true>) "script") (dict(port=42103 debug=<true>) "module") ({} "sanic") ] )<async_keyword><def_stmt>test_reloader_live_with_dir runargs mode<block_start><with_stmt>TemporaryDirectory()<as>tmpdir<block_start>filename=os.path.join(tmpdir "reloader.py")<line_sep>config_file=os.path.join(tmpdir "config.json")<line_sep>runargs["reload_dir"]=tmpdir<line_sep>write_json_config_app(filename config_file **runargs)<line_sep>text=write_file(config_file)<line_sep>command=argv[mode]<if_stmt>mode<eq>"sanic"<block_start>command<augadd>["--reload-dir" tmpdir]<block_end>proc=Popen(command cwd=tmpdir stdout=PIPE creationflags=flags)<try_stmt><block_start>timeout=Timer(TIMER_DELAY terminate [proc])<line_sep>timeout.start()<line_sep># Python apparently keeps using the old source sometimes if # we don't sleep before rewrite (pycache timestamp problem?) sleep(1)<line_sep>line=scanner(proc)<assert_stmt>text<in>next(line)<line_sep># Edit source code and try again text=write_file(config_file)<assert_stmt>text<in>next(line)<block_end><finally_stmt><block_start>timeout.cancel()<line_sep>terminate(proc)<with_stmt>suppress(TimeoutExpired)<block_start>proc.wait(timeout=3)<block_end><block_end><block_end><block_end>
<import_stmt>os<import_stmt>pathlib<import_from_stmt>unittest mock<import_stmt>pytest<import_from_stmt>mopidy.internal xdg<line_sep>@pytest.fixture<def_stmt>environ <block_start>patcher=mock.patch.dict(os.environ clear=<true>)<line_sep><yield>patcher.start()<line_sep>patcher.stop()<block_end><def_stmt>test_cache_dir_default environ<block_start><assert_stmt>xdg.get_dirs()["XDG_CACHE_DIR"]<eq>(pathlib.Path("~/.cache").expanduser())<block_end><def_stmt>test_cache_dir_from_env environ<block_start>os.environ["XDG_CACHE_HOME"]="/foo/bar"<assert_stmt>xdg.get_dirs()["XDG_CACHE_DIR"]<eq>pathlib.Path("/foo/bar")<block_end><def_stmt>test_config_dir_default environ<block_start><assert_stmt>xdg.get_dirs()["XDG_CONFIG_DIR"]<eq>(pathlib.Path("~/.config").expanduser())<block_end><def_stmt>test_config_dir_from_env environ<block_start>os.environ["XDG_CONFIG_HOME"]="/foo/bar"<assert_stmt>xdg.get_dirs()["XDG_CONFIG_DIR"]<eq>pathlib.Path("/foo/bar")<block_end><def_stmt>test_data_dir_default environ<block_start><assert_stmt>xdg.get_dirs()["XDG_DATA_DIR"]<eq>(pathlib.Path("~/.local/share").expanduser())<block_end><def_stmt>test_data_dir_from_env environ<block_start>os.environ["XDG_DATA_HOME"]="/foo/bar"<assert_stmt>xdg.get_dirs()["XDG_DATA_DIR"]<eq>pathlib.Path("/foo/bar")<block_end><def_stmt>test_user_dirs environ tmpdir<block_start>os.environ["XDG_CONFIG_HOME"]=str(tmpdir)<with_stmt>open(os.path.join(str(tmpdir) "user-dirs.dirs") "wb")<as>fh<block_start>fh.write(b"# Some comments\n")<line_sep>fh.write(b'XDG_MUSIC_DIR="$HOME/Music2"\n')<block_end>result=xdg.get_dirs()<assert_stmt>result["XDG_MUSIC_DIR"]<eq>pathlib.Path("~/Music2").expanduser()<assert_stmt>"XDG_DOWNLOAD_DIR"<not><in>result<block_end><def_stmt>test_user_dirs_when_no_dirs_file environ tmpdir<block_start>os.environ["XDG_CONFIG_HOME"]=str(tmpdir)<line_sep>result=xdg.get_dirs()<assert_stmt>"XDG_MUSIC_DIR"<not><in>result<assert_stmt>"XDG_DOWNLOAD_DIR"<not><in>result<block_end>
<import_stmt>responses<import_from_stmt>binance.spot Spot<as>Client<import_from_stmt>tests.util random_str<import_from_stmt>urllib.parse urlencode<import_from_stmt>tests.util mock_http_response<line_sep>mock_item={"key_1":"value_1" "key_2":"value_2"}<line_sep>mock_exception={"code":-1 "msg":"error message"}<line_sep>key=random_str()<line_sep>secret=random_str()<line_sep>params={"asset":"BNB" "startTime":"1590969041003" "endTime":"1590969041003" "size":10 "recvWindow":1000 }<line_sep>@mock_http_response(responses.GET "/sapi/v1/margin/interestHistory\\?"+urlencode(params) mock_item 200 )<def_stmt>test_margin_interest_history <block_start>"""Tests the API endpoint to query margin interest history"""<line_sep>client=Client(key secret)<line_sep>response=client.margin_interest_history(**params)<line_sep>response.should.equal(mock_item)<block_end>
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Generated by the protocol buffer compiler. DO NOT EDIT! # source: utils/vector.proto <import_stmt>sys<line_sep>_b=sys.version_info[0]<l>3<and>(<lambda>x:x)<or>(<lambda>x:x.encode('latin1'))<import_from_stmt>google.protobuf descriptor<as>_descriptor<import_from_stmt>google.protobuf message<as>_message<import_from_stmt>google.protobuf reflection<as>_reflection<import_from_stmt>google.protobuf symbol_database<as>_symbol_database<import_from_stmt>google.protobuf descriptor_pb2<line_sep># @@protoc_insertion_point(imports) _sym_db=_symbol_database.Default()<line_sep>DESCRIPTOR=_descriptor.FileDescriptor(name='utils/vector.proto' package='ffn.proto' syntax='proto2' serialized_pb=_b('\n\x12utils/vector.proto\x12\tffn.proto\" \n\x08Vector2d\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\" \n\x08Vector2i\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\"+\n\x08Vector3d\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\x12\t\n\x01z\x18\x03 \x01(\x01\"+\n\x08Vector3f\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\"+\n\x08Vector3j\x12\t\n\x01x\x18\x01 \x01(\x03\x12\t\n\x01y\x18\x02 \x01(\x03\x12\t\n\x01z\x18\x03 \x01(\x03\"4\n\x0cVector2dList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector2d\"4\n\x0cVector2iList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector2i\"4\n\x0cVector3dList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector3d\"4\n\x0cVector3fList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector3f\"4\n\x0cVector3jList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector3j'))<line_sep>_sym_db.RegisterFileDescriptor(DESCRIPTOR)<line_sep>_VECTOR2D=_descriptor.Descriptor(name='Vector2d' full_name='ffn.proto.Vector2d' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='x' full_name='ffn.proto.Vector2d.x' index=0 number=1 type=1 cpp_type=5 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) _descriptor.FieldDescriptor(name='y' full_name='ffn.proto.Vector2d.y' index=1 number=2 type=1 cpp_type=5 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto2' extension_ranges=[] oneofs=[] serialized_start=33 serialized_end=65 )<line_sep>_VECTOR2I=_descriptor.Descriptor(name='Vector2i' full_name='ffn.proto.Vector2i' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='x' full_name='ffn.proto.Vector2i.x' index=0 number=1 type=5 cpp_type=1 label=1 has_default_value=<false> default_value=0 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) _descriptor.FieldDescriptor(name='y' full_name='ffn.proto.Vector2i.y' index=1 number=2 type=5 cpp_type=1 label=1 has_default_value=<false> default_value=0 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto2' extension_ranges=[] oneofs=[] serialized_start=67 serialized_end=99 )<line_sep>_VECTOR3D=_descriptor.Descriptor(name='Vector3d' full_name='ffn.proto.Vector3d' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='x' full_name='ffn.proto.Vector3d.x' index=0 number=1 type=1 cpp_type=5 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) _descriptor.FieldDescriptor(name='y' full_name='ffn.proto.Vector3d.y' index=1 number=2 type=1 cpp_type=5 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) _descriptor.FieldDescriptor(name='z' full_name='ffn.proto.Vector3d.z' index=2 number=3 type=1 cpp_type=5 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto2' extension_ranges=[] oneofs=[] serialized_start=101 serialized_end=144 )<line_sep>_VECTOR3F=_descriptor.Descriptor(name='Vector3f' full_name='ffn.proto.Vector3f' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='x' full_name='ffn.proto.Vector3f.x' index=0 number=1 type=2 cpp_type=6 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) _descriptor.FieldDescriptor(name='y' full_name='ffn.proto.Vector3f.y' index=1 number=2 type=2 cpp_type=6 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) _descriptor.FieldDescriptor(name='z' full_name='ffn.proto.Vector3f.z' index=2 number=3 type=2 cpp_type=6 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto2' extension_ranges=[] oneofs=[] serialized_start=146 serialized_end=189 )<line_sep>_VECTOR3J=_descriptor.Descriptor(name='Vector3j' full_name='ffn.proto.Vector3j' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='x' full_name='ffn.proto.Vector3j.x' index=0 number=1 type=3 cpp_type=2 label=1 has_default_value=<false> default_value=0 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) _descriptor.FieldDescriptor(name='y' full_name='ffn.proto.Vector3j.y' index=1 number=2 type=3 cpp_type=2 label=1 has_default_value=<false> default_value=0 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) _descriptor.FieldDescriptor(name='z' full_name='ffn.proto.Vector3j.z' index=2 number=3 type=3 cpp_type=2 label=1 has_default_value=<false> default_value=0 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto2' extension_ranges=[] oneofs=[] serialized_start=191 serialized_end=234 )<line_sep>_VECTOR2DLIST=_descriptor.Descriptor(name='Vector2dList' full_name='ffn.proto.Vector2dList' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='vectors' full_name='ffn.proto.Vector2dList.vectors' index=0 number=1 type=11 cpp_type=10 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto2' extension_ranges=[] oneofs=[] serialized_start=236 serialized_end=288 )<line_sep>_VECTOR2ILIST=_descriptor.Descriptor(name='Vector2iList' full_name='ffn.proto.Vector2iList' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='vectors' full_name='ffn.proto.Vector2iList.vectors' index=0 number=1 type=11 cpp_type=10 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto2' extension_ranges=[] oneofs=[] serialized_start=290 serialized_end=342 )<line_sep>_VECTOR3DLIST=_descriptor.Descriptor(name='Vector3dList' full_name='ffn.proto.Vector3dList' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='vectors' full_name='ffn.proto.Vector3dList.vectors' index=0 number=1 type=11 cpp_type=10 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto2' extension_ranges=[] oneofs=[] serialized_start=344 serialized_end=396 )<line_sep>_VECTOR3FLIST=_descriptor.Descriptor(name='Vector3fList' full_name='ffn.proto.Vector3fList' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='vectors' full_name='ffn.proto.Vector3fList.vectors' index=0 number=1 type=11 cpp_type=10 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto2' extension_ranges=[] oneofs=[] serialized_start=398 serialized_end=450 )<line_sep>_VECTOR3JLIST=_descriptor.Descriptor(name='Vector3jList' full_name='ffn.proto.Vector3jList' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='vectors' full_name='ffn.proto.Vector3jList.vectors' index=0 number=1 type=11 cpp_type=10 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none>) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto2' extension_ranges=[] oneofs=[] serialized_start=452 serialized_end=504 )<line_sep>_VECTOR2DLIST.fields_by_name['vectors'].message_type=_VECTOR2D<line_sep>_VECTOR2ILIST.fields_by_name['vectors'].message_type=_VECTOR2I<line_sep>_VECTOR3DLIST.fields_by_name['vectors'].message_type=_VECTOR3D<line_sep>_VECTOR3FLIST.fields_by_name['vectors'].message_type=_VECTOR3F<line_sep>_VECTOR3JLIST.fields_by_name['vectors'].message_type=_VECTOR3J<line_sep>DESCRIPTOR.message_types_by_name['Vector2d']=_VECTOR2D<line_sep>DESCRIPTOR.message_types_by_name['Vector2i']=_VECTOR2I<line_sep>DESCRIPTOR.message_types_by_name['Vector3d']=_VECTOR3D<line_sep>DESCRIPTOR.message_types_by_name['Vector3f']=_VECTOR3F<line_sep>DESCRIPTOR.message_types_by_name['Vector3j']=_VECTOR3J<line_sep>DESCRIPTOR.message_types_by_name['Vector2dList']=_VECTOR2DLIST<line_sep>DESCRIPTOR.message_types_by_name['Vector2iList']=_VECTOR2ILIST<line_sep>DESCRIPTOR.message_types_by_name['Vector3dList']=_VECTOR3DLIST<line_sep>DESCRIPTOR.message_types_by_name['Vector3fList']=_VECTOR3FLIST<line_sep>DESCRIPTOR.message_types_by_name['Vector3jList']=_VECTOR3JLIST<line_sep>Vector2d=_reflection.GeneratedProtocolMessageType('Vector2d' (_message.Message ) dict(DESCRIPTOR=_VECTOR2D __module__='utils.vector_pb2'# @@protoc_insertion_point(class_scope:ffn.proto.Vector2d) ))<line_sep>_sym_db.RegisterMessage(Vector2d)<line_sep>Vector2i=_reflection.GeneratedProtocolMessageType('Vector2i' (_message.Message ) dict(DESCRIPTOR=_VECTOR2I __module__='utils.vector_pb2'# @@protoc_insertion_point(class_scope:ffn.proto.Vector2i) ))<line_sep>_sym_db.RegisterMessage(Vector2i)<line_sep>Vector3d=_reflection.GeneratedProtocolMessageType('Vector3d' (_message.Message ) dict(DESCRIPTOR=_VECTOR3D __module__='utils.vector_pb2'# @@protoc_insertion_point(class_scope:ffn.proto.Vector3d) ))<line_sep>_sym_db.RegisterMessage(Vector3d)<line_sep>Vector3f=_reflection.GeneratedProtocolMessageType('Vector3f' (_message.Message ) dict(DESCRIPTOR=_VECTOR3F __module__='utils.vector_pb2'# @@protoc_insertion_point(class_scope:ffn.proto.Vector3f) ))<line_sep>_sym_db.RegisterMessage(Vector3f)<line_sep>Vector3j=_reflection.GeneratedProtocolMessageType('Vector3j' (_message.Message ) dict(DESCRIPTOR=_VECTOR3J __module__='utils.vector_pb2'# @@protoc_insertion_point(class_scope:ffn.proto.Vector3j) ))<line_sep>_sym_db.RegisterMessage(Vector3j)<line_sep>Vector2dList=_reflection.GeneratedProtocolMessageType('Vector2dList' (_message.Message ) dict(DESCRIPTOR=_VECTOR2DLIST __module__='utils.vector_pb2'# @@protoc_insertion_point(class_scope:ffn.proto.Vector2dList) ))<line_sep>_sym_db.RegisterMessage(Vector2dList)<line_sep>Vector2iList=_reflection.GeneratedProtocolMessageType('Vector2iList' (_message.Message ) dict(DESCRIPTOR=_VECTOR2ILIST __module__='utils.vector_pb2'# @@protoc_insertion_point(class_scope:ffn.proto.Vector2iList) ))<line_sep>_sym_db.RegisterMessage(Vector2iList)<line_sep>Vector3dList=_reflection.GeneratedProtocolMessageType('Vector3dList' (_message.Message ) dict(DESCRIPTOR=_VECTOR3DLIST __module__='utils.vector_pb2'# @@protoc_insertion_point(class_scope:ffn.proto.Vector3dList) ))<line_sep>_sym_db.RegisterMessage(Vector3dList)<line_sep>Vector3fList=_reflection.GeneratedProtocolMessageType('Vector3fList' (_message.Message ) dict(DESCRIPTOR=_VECTOR3FLIST __module__='utils.vector_pb2'# @@protoc_insertion_point(class_scope:ffn.proto.Vector3fList) ))<line_sep>_sym_db.RegisterMessage(Vector3fList)<line_sep>Vector3jList=_reflection.GeneratedProtocolMessageType('Vector3jList' (_message.Message ) dict(DESCRIPTOR=_VECTOR3JLIST __module__='utils.vector_pb2'# @@protoc_insertion_point(class_scope:ffn.proto.Vector3jList) ))<line_sep>_sym_db.RegisterMessage(Vector3jList)<line_sep># @@protoc_insertion_point(module_scope)
"""Graph schema validation routines. This module provides a simple container for the ragged tensors associated with multiple sets of nodes, edges, and graph-global data. See go/graph-tensor for details. """<import_from_stmt>typing List<import_from_stmt>absl logging# TODO(blais): Remove, see below. <import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow_gnn.graph adjacency<as>adj<import_from_stmt>tensorflow_gnn.graph graph_constants<as>const<import_from_stmt>tensorflow_gnn.graph graph_tensor<as>gt<import_from_stmt>tensorflow_gnn.graph schema_utils<as>su<import_stmt>tensorflow_gnn.proto.graph_schema_pb2<as>schema_pb2<line_sep># The supported data types. Note that these are currently limited to the ones # supported by `tensorflow.Example` but we can eventually extend the list by # adding casting transformations, and supporting other data formats for # encoding. VALID_DTYPES=(tf.string tf.int64 tf.float32)<class_stmt>ValidationError(ValueError)<block_start>"""A schema validation error. This exception is raised if in the course of validating the schema for correctness some errors are found. """<block_end><def_stmt>validate_schema schema:schema_pb2.GraphSchema<arrow>List[Exception]<block_start>"""Validates the correctness of a graph schema instance. `GraphSchema` configuration messages are created by users in order to describe the topology of a graph. This function checks various aspects of the schema for correctness, e.g. prevents usage of reserved feature names, ensures given shapes are fully-defined, ensures set name references are found, etc. Args: schema: An instance of the graph schema. Returns: A list of exceptions describing optional warnings. Render those to your favorite stream (or ignore). Raises: ValidationError: If a validation check fails. """<line_sep>_validate_schema_feature_dtypes(schema)<line_sep>_validate_schema_shapes(schema)<line_sep>_validate_schema_descriptions(schema)<line_sep>_validate_schema_reserved_feature_names(schema)<line_sep>_validate_schema_context_references(schema)<line_sep>_validate_schema_node_set_references(schema)<line_sep><return>_warn_schema_scalar_shapes(schema)<block_end><def_stmt>check_required_features requirements:schema_pb2.GraphSchema actual:schema_pb2.GraphSchema<block_start>"""Checks the requirements of a given schema against another. This function is used to enable the specification of required features to a function. A function accepting a `GraphTensor` instance can this way document what features it is expecting to find on it. The function accepts two schemas: a `requirements` schema which describes what the function will attempt to fetch and use on the `GraphTensor`, and an `actual` schema instance, which is the schema describing the dataset. You can use this in your model code to ensure that a dataset contains all the expected node sets, edge sets and features that the model uses. Note that a dimension with a size of `0` in a feature from the `requirements` schema is interpreted specially: it means "accept any value for this dimension." The special value `-1` is still used to represent a ragged dimension. (Finally, note that this function predates the existence of `GraphTensorSpec`, which is a runtime descriptor for a `GraphTensor`. We may eventually perovide an equivalent construct using the `GraphTensorSpec.) Args: requirements: An instance of a GraphSchema object, with optional shapes. actual: The instance of actual schema to check is a matching superset of the required schema. Raises: ValidationError: If the given schema does not fulfill the requirements. """<line_sep># Create maps of the required and provided features. <def_stmt>build_schema_map schema_<block_start>mapping={}<for_stmt>(set_type set_name feature_name feature) su.iter_features(schema_)<block_start>key=(set_type set_name feature_name)<line_sep>mapping[key]=feature<block_end><return>mapping<block_end>required=build_schema_map(requirements)<line_sep>given=build_schema_map(actual)<for_stmt>key,required_feature required.items()<block_start>set_type,set_name,feature_name=key<try_stmt><block_start>given_feature=given[key]<block_end><except_stmt>KeyError<block_start><raise>ValidationError("{} feature '{}' from set '{}' is missing from given schema".format(set_type.capitalize() feature_name set_name))<block_end><else_stmt><block_start><if_stmt>required_feature.HasField("dtype")<and>(required_feature.dtype<ne>given_feature.dtype)<block_start><raise>ValidationError("{} feature '{}' from set '{}' has invalid type: {}".format(set_type.capitalize() feature_name set_name given_feature.dtype))<block_end><if_stmt>required_feature.HasField("shape")<block_start><if_stmt>len(given_feature.shape.dim)<ne>len(required_feature.shape.dim)<block_start><raise>ValidationError("{} feature '{}' from set '{}' has invalid shape: {}".format(set_type.capitalize() feature_name set_name given_feature.shape))<block_end><for_stmt>required_dim,given_dim zip(required_feature.shape.dim given_feature.shape.dim)<block_start><if_stmt>required_dim.size<eq>0# Accept any dimension. <block_start><continue><block_end><elif_stmt>given_dim.size<ne>required_dim.size<block_start><raise>ValidationError("{} feature '{}' from set '{}' has invalid shape: {}".format(set_type.capitalize() feature_name set_name given_feature.shape))<block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>_validate_schema_feature_dtypes schema:schema_pb2.GraphSchema<block_start>"""Verify that dtypes are set and from our list of supported types."""<for_stmt>set_type,set_name,feature_name,feature su.iter_features(schema)<block_start><if_stmt><not>feature.HasField("dtype")<block_start><raise>ValidationError("Missing 'dtype' field on {} set '{}' feature '{}'".format(set_type set_name feature_name))<block_end><if_stmt>feature.dtype<not><in>{dtype.as_datatype_enum<for>dtype VALID_DTYPES}<block_start><raise>ValidationError(("Invalid 'dtype' field {} on {} set '{}' feature '{}': {}; "<concat>"valid types include: {}").format(feature.dtype set_type set_name feature_name feature.dtype ", ".join(map(str VALID_DTYPES))))<block_end><block_end><block_end><def_stmt>_validate_schema_shapes schema:schema_pb2.GraphSchema<block_start>"""Check for the validity of shape protos."""<for_stmt>set_type,set_name,feature_name,feature su.iter_features(schema)<block_start><if_stmt>feature.shape.unknown_rank<block_start><raise>ValidationError("Shapes must have a known rank; on {} set '{}' feature '{}'".format(set_type set_name feature_name))<block_end><block_end><block_end><def_stmt>_warn_schema_scalar_shapes schema:schema_pb2.GraphSchema<block_start>"""Return warnings on unnecessary shapes of size 1. This is a common error. Note that strictly speaking this should parse fine, the problem is that clients will inevitably configure shapes of [1] where scalar shapes would be sufficient. This check is there to nudge them in the right direction. Args: schema: A GraphSchema instance to validate. Returns: A list of ValidationError warnings to issue conditionally. """<line_sep>warnings=[]<for_stmt>set_type,set_name,feature_name,feature su.iter_features(schema)<block_start><if_stmt>len(feature.shape.dim)<eq>1<and>feature.shape.dim[0].size<eq>1<block_start>warnings.append(ValidationError("Unnecessary shape of [1] in {} set '{}' / '{}'; use scalar feature "<concat>"instead (i.e., specify an empty shape proto).".format(set_type set_name feature_name)))<block_end><block_end><return>warnings<block_end><def_stmt>_validate_schema_descriptions schema:schema_pb2.GraphSchema<block_start>"""Verify that the descriptions aren't set on the shapes' .name fields."""<line_sep># This seems to be a common error. name_fields=[]<for_stmt>set_type,set_name,feature_name,feature su.iter_features(schema)<block_start><if_stmt>feature.HasField("description")<block_start><continue><block_end><for_stmt>dim feature.shape.dim<block_start><if_stmt>dim.name<block_start>name_fields.append((set_type set_name feature_name))<block_end><block_end><block_end><if_stmt>name_fields<block_start>field_names=",".join([str(ntuple)<for>ntuple name_fields])<line_sep><raise>ValidationError("The following features are incorrectly locating the description on "<concat>"the shape dimensions 'name' field: {}; use the 'description' field of "<concat>"the feature instead".format(field_names))<block_end><block_end><def_stmt>_validate_schema_reserved_feature_names schema:schema_pb2.GraphSchema<block_start>"""Check that reserved feature names aren't being used as explicit features."""<line_sep>node_set_dicts=[("nodes" name node_set.features)<for>name,node_set schema.node_sets.items()]<line_sep>edge_set_dicts=[("edges" name edge_set.features)<for>name,edge_set schema.edge_sets.items()]<for_stmt>set_type,set_name,feature_dict node_set_dicts+edge_set_dicts<block_start><if_stmt>const.SIZE_NAME<in>feature_dict<block_start><raise>ValidationError("Feature '{}' from {} set '{}' is reserved".format(const.SIZE_NAME set_type set_name))<block_end><block_end><for_stmt>set_type,set_name,feature_dict edge_set_dicts<block_start><for_stmt>name const.SOURCE_NAME const.TARGET_NAME# Invalidate reserved feature names. <block_start><if_stmt>name<in>feature_dict<block_start><raise>ValidationError("Feature '{}' from {} set '{}' is reserved".format(name set_type set_name))<block_end><block_end><block_end># TODO(blais): Make this compulsory after we remove the hardcoded # feature names from the sampler. <for_stmt>set_type,set_name,feature_name,feature su.iter_features(schema)<block_start><if_stmt>const.RESERVED_REGEX.match(feature_name)<block_start>logging.error("Invalid %s feature name '%s' on set '%s': reserved names "<concat>"are not allowed" set_type feature_name set_name)<block_end><block_end><block_end><def_stmt>_validate_schema_context_references schema:schema_pb2.GraphSchema<block_start>"""Verify the cross-references to context features from node and edge sets."""<for_stmt>set_name,node_set schema.node_sets.items()<block_start><for_stmt>feature node_set.context<block_start><if_stmt>feature<not><in>schema.context.features<block_start><raise>ValidationError("Context feature '{}' does not exist "<concat>"(from node set '{}')".format(feature set_name))<block_end><block_end><block_end><for_stmt>set_name,edge_set schema.edge_sets.items()<block_start><for_stmt>feature edge_set.context<block_start><if_stmt>feature<not><in>schema.context.features<block_start><raise>ValidationError("Context feature '{}' does not exist "<concat>"(from edge set '{}')".format(feature set_name))<block_end><block_end><block_end><block_end><def_stmt>_validate_schema_node_set_references schema:schema_pb2.GraphSchema<block_start>"""Verify the source and target set references from the edge sets."""<for_stmt>set_name,edge_set schema.edge_sets.items()<block_start><for_stmt>feature_name edge_set.source edge_set.target<block_start><if_stmt>feature_name<not><in>schema.node_sets<block_start><raise>ValidationError("Edge set '{}' referencing unknown node set '{}'".format(set_name feature_name))<block_end><block_end><block_end><block_end># TODO(blais): This code could eventually be folded into the various # constructors of `GraphTensor` pieces. <def_stmt>assert_constraints graph:gt.GraphTensor<arrow>tf.Operation<block_start>"""Validate the shape constaints of a graph's features at runtime. This code returns a TensorFlow op with debugging assertions that ensure the parsed data has valid shape constraints for a graph. This can be instantiated in your TensorFlow graph while debugging if you believe that your data may be incorrectly shaped, or simply applied to a manually produced dataset to ensure that those constraints have been applied correctly. Args: graph: An instance of a `GraphTensor`. Returns: A list of check operations. """<line_sep><return>tf.group(_assert_constraints_feature_shape_prefix(graph) _assert_constraints_edge_shapes(graph) _assert_constraints_edge_indices_range(graph) )<block_end><def_stmt>_assert_constraints_feature_shape_prefix graph:gt.GraphTensor<arrow>tf.Operation<block_start>"""Validates the number of nodes or edges of feature tensors."""<with_stmt>tf.name_scope("constraints_feature_shape_prefix")<block_start>checks=[]<for_stmt>set_type,set_dict [("node" graph.node_sets) ("edge" graph.edge_sets)]<block_start><for_stmt>set_name,feature_set set_dict.items()<block_start>sizes=feature_set.sizes<line_sep># Check the rank is at least 1. checks.append(tf.debugging.assert_rank_at_least(sizes 1))<line_sep>rank=tf.rank(sizes)<for_stmt>feature_name,tensor feature_set.features.items()# Check that each tensor has greater or equal rank to the parent # piece. <block_start>checks.append(tf.debugging.assert_greater_equal(tf.rank(tensor) rank "Rank too small for {} feature '{}/{}'".format(set_type set_name feature_name)))<line_sep># Check the prefix shape of the tensor matches. checks.append(tf.debugging.assert_equal(tensor.shape[:rank] sizes "Invalid prefix shape for {} feature: {}/{}".format(set_type set_name feature_name)))<block_end><block_end><block_end><return>tf.group(*checks)<block_end><block_end><def_stmt>_assert_constraints_edge_indices_range graph:gt.GraphTensor<arrow>tf.Operation<block_start>"""Validates that edge indices are within the bounds of node set sizes."""<with_stmt>tf.name_scope("constraints_edge_indices_range")<block_start>checks=[]<for_stmt>set_name,edge_set graph.edge_sets.items()<block_start>adjacency=edge_set.adjacency<if_stmt><not>issubclass(type(adjacency) adj.HyperAdjacency)<block_start><raise>ValueError(f"Adjacency type for constraints assertions must be "<concat>f"HyperAdjacency: {adjacency}")<block_end><for_stmt>tag,(node_set_name indices) sorted(adjacency.get_indices_dict().items())# Check that the indices are positive. <block_start>flat_indices=(indices.flat_values<if>isinstance(indices tf.RaggedTensor)<else>indices)<line_sep>checks.append(tf.debugging.Assert(tf.math.reduce_all(tf.math.greater_equal(indices tf.constant(0 dtype=indices.dtype))) ["Index underflow" "edges/{} {} indices:".format(set_name tag) flat_indices] name="check_indices_underflow" summarize=-1))<line_sep># Check the indices are smaller than the node tensor sizes. sizes=graph.node_sets[node_set_name].sizes<line_sep>checks.append(tf.debugging.Assert(tf.math.reduce_all(tf.math.less(indices tf.expand_dims(sizes axis=-1))) ["Index overflow" "edges/{} {} indices:".format(set_name tag) flat_indices "nodes/{} {}:".format(node_set_name "size") sizes] name="check_indices_overflow" summarize=-1))<block_end><block_end><return>tf.group(*checks)<block_end><block_end><def_stmt>_assert_constraints_edge_shapes graph:gt.GraphTensor<arrow>tf.Operation<block_start>"""Validates edge shapes and that they contain a scalar index per node."""<with_stmt>tf.name_scope("constraints_edge_indices_range")<block_start>checks=[]<for_stmt>set_name,edge_set graph.edge_sets.items()<block_start>adjacency=edge_set.adjacency<if_stmt><not>issubclass(type(adjacency) adj.HyperAdjacency)<block_start><raise>ValueError(f"Adjacency type for constraints assertions must be "<concat>f"HyperAdjacency: {adjacency}")<block_end><for_stmt>tag,(_ indices) sorted(adjacency.get_indices_dict().items())# Check the shape of the edge indices matches the size, and that the # shape is scalar on the indices. <block_start>checks.append(tf.debugging.assert_equal(indices.shape edge_set.sizes "Invalid shape for edge indices: {}/{}".format(set_name tag)))<block_end><block_end><return>tf.group(*checks)<block_end><block_end>
""" Documentation configuration and workflow for jupyter-starters """<line_sep># pylint: disable=invalid-name,redefined-builtin,import-error <import_stmt>pathlib<import_stmt>subprocess<import_stmt>sys<line_sep>sys.path.insert(0 str((pathlib.Path.cwd().parent/"python_packages"/"jupyter_lsp"/"src").resolve()) )<line_sep>project="Jupyter[Lab] Language Server"<line_sep>copyright="2021, Jupyter[Lab] Language Server Contributors"<line_sep>author="Jupyter[Lab] Language Server Contributors"<line_sep>version=""<line_sep>release=""<line_sep>extensions=["myst_nb" "sphinx.ext.autodoc" "sphinx.ext.napoleon" "sphinx.ext.coverage" "sphinx.ext.doctest" "sphinx.ext.githubpages" "sphinx.ext.ifconfig" "sphinx.ext.intersphinx" "sphinx.ext.mathjax" "sphinx.ext.todo" "sphinx.ext.viewcode" "sphinx_copybutton" "sphinx_autodoc_typehints" ]<line_sep>templates_path=["_templates"]<line_sep>source_suffix=[".rst" ".md"]<line_sep>master_doc="index"<line_sep>language=<none><line_sep>exclude_patterns=[".ipynb_checkpoints/**" "**/.ipynb_checkpoints/**" "**/~.*" "~.*" "_build/**" ]<line_sep>html_theme="sphinx_book_theme"<line_sep>html_static_path=["_static"]<line_sep>htmlhelp_basename="jupyterlab-lsp"<line_sep>intersphinx_mapping={"python":("https://docs.python.org/3" <none>) "jsonschema":("https://python-jsonschema.readthedocs.io/en/stable/" <none>) }<line_sep>github_url="https://github.com"<line_sep>github_repo_org="jupyter-lsp"<line_sep>github_repo_name="jupyterlab-lsp"<line_sep>github_repo_slug=f"{github_repo_org}/{github_repo_name}"<line_sep>github_repo_url=f"{github_url}/{github_repo_slug}"<line_sep>extlinks={"issue":(f"{github_repo_url}/issues/%s" "#") "pr":(f"{github_repo_url}/pull/%s" "PR #") "commit":(f"{github_repo_url}/commit/%s" "") "gh":(f"{github_url}/%s" "GitHub: ") }<line_sep>html_show_sourcelink=<true><line_sep>html_context={"display_github":<true> # these automatically-generated pages will create broken links "hide_github_pagenames":["search" "genindex"] "github_user":github_repo_org "github_repo":github_repo_name "github_version":"master" "conf_py_path":"/docs/" }<line_sep>html_logo="images/logo.png"<line_sep>html_title="Language Server Protocol integration for Jupyter[Lab]"<line_sep>html_theme_options={"repository_url":github_repo_url "path_to_docs":"docs" "use_fullscreen_button":<true> "use_repository_button":<true> "use_issues_button":<true> "use_edit_page_button":<true> "use_download_button":<true> }<line_sep># MyST-{NB} jupyter_execute_notebooks="force"<line_sep>nb_output_stderr="remove-warn"<line_sep>myst_enable_extensions=["amsmath" "deflist" "dollarmath" "html_admonition" "html_image" "smartquotes" ]<def_stmt>setup app<block_start>"""Runs before the "normal business" of sphinx. Don't go too crazy here."""<line_sep>app.add_css_file("css/custom.css")<line_sep>subprocess.check_call(["jlpm" "--ignore-optional"])<block_end>
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. <class_stmt>Foo<block_start><def_stmt>__init__ self arg1:str arg2:int<arrow><none><block_start>self.arg1=arg1<line_sep>self.arg2=arg2<block_end><block_end>
"""Auxiliary **connector** and **selector** functions to create edges. This module provides auxiliary **connector** and **selector** functions for the ``dg.DeepGraph.create_edges`` and ``dg.DeepGraph.create_ft_edges`` methods. They are described in their corresponding docstrings. """<import_from_future_stmt> print_function division absolute_import<line_sep># Copyright (C) 2017-2020 by # <NAME> <<EMAIL>> # All rights reserved. # BSD license. # py2/3 compatibility <try_stmt><block_start>range=xrange<block_end><except_stmt>NameError<block_start><pass><block_end><import_stmt>numpy<as>np<line_sep>__all__=['great_circle_dist' 'cp_node_intersection' 'cp_intersection_strength' 'hypergeometric_p_value' ]<line_sep># ============================================================================ # CONNECTORS # ============================================================================ <def_stmt>great_circle_dist lat_s lat_t lon_s lon_t<block_start>"""Return the great circle distance between nodes. The latitude and longitude values in the node table have to be in signed decimal degrees without compass direction (the sign indicates west/south). The great circle distance is calculated using the spherical law of cosines. """<line_sep># dtypes lat_s=np.array(lat_s dtype=float)<line_sep>lat_t=np.array(lat_t dtype=float)<line_sep>lon_s=np.array(lon_s dtype=float)<line_sep>lon_t=np.array(lon_t dtype=float)<line_sep># select by event_indices phi_i=np.radians(lat_s)<line_sep>phi_j=np.radians(lat_t)<line_sep>delta_alpha=np.radians(lon_t)-np.radians(lon_s)<line_sep># earth's radius R=6371<line_sep># spatial distance of nodes gcd=np.arccos(np.sin(phi_i)<times>np.sin(phi_j)+np.cos(phi_i)<times>np.cos(phi_j)<times>np.cos(delta_alpha))<times>R<line_sep># for 0 gcd, there might be nans, convert to 0. gcd=np.nan_to_num(gcd)<line_sep><return>gcd<block_end><def_stmt>cp_node_intersection supernode_ids sources targets<block_start>"""Work in progress! """<line_sep>nodess=supernode_ids[sources]<line_sep>nodest=supernode_ids[targets]<line_sep>identical_nodes=(nodess<eq>nodest)<line_sep>intsec=np.zeros(len(sources) dtype=object)<line_sep>intsec_card=np.zeros(len(sources) dtype=np.int)<for_stmt>i range(len(sources))<block_start>intsec[i]=nodess[i].intersection(nodest[i])<line_sep>intsec_card[i]=len(intsec[i])<block_end><return>intsec intsec_card identical_nodes<block_end><def_stmt>cp_intersection_strength n_unique_nodes intsec_card sources targets<block_start>"""Work in progress! """<line_sep>us=n_unique_nodes[sources]<line_sep>ut=n_unique_nodes[targets]<line_sep># min cardinality min_card=np.array(np.vstack((us ut)).min(axis=0) dtype=np.float64)<line_sep># intersection strength intsec_strength=intsec_card/min_card<line_sep><return>intsec_strength<block_end><def_stmt>hypergeometric_p_value n_unique_nodes intsec_card sources targets<block_start>"""Work in progress! """<import_from_stmt>scipy.stats hypergeom<line_sep>us=n_unique_nodes[sources]<line_sep>ut=n_unique_nodes[targets]<line_sep># population size M=220<times>220<line_sep># number of success states in population n=np.vstack((us ut)).max(axis=0)<line_sep># total draws N=np.vstack((us ut)).min(axis=0)<line_sep># successes x=intsec_card<line_sep>hg_p=np.zeros(len(sources))<for_stmt>i range(len(sources))<block_start>hg_p[i]=hypergeom.sf(x[i] M n[i] N[i])<block_end><return>hg_p<block_end># ============================================================================ # Selectors # ============================================================================
<import_from_stmt>setuptools setup find_packages<import_from_stmt>codecs open<import_from_stmt>os path<line_sep>here=path.abspath(path.dirname(__file__))<line_sep># get the version exec(open('simupy/version.py').read())<line_sep># Get the long description from the README file <with_stmt>open(path.join(here 'README.rst') encoding='utf-8')<as>f<block_start>long_description=f.read()<block_end>long_description=long_description.replace("https://simupy.readthedocs.io/en/latest/" "https://simupy.readthedocs.io/en/simupy-{}/".format('.'.join(__version__.split('.')[:3])))<line_sep>setup(name='simupy' version=__version__ description='A framework for modeling and simulating dynamical systems.' long_description=long_description packages=find_packages() author='<NAME>' author_email='<EMAIL>' url='https://github.com/simupy/simupy' license="BSD 2-clause \"Simplified\" License" python_requires='>=3' install_requires=['numpy>=1.11.3' 'scipy>=0.18.1'] extras_require={'symbolic':['sympy>=1.0'] 'doc':['sphinx>=1.6.3' 'sympy>=1.0'] 'examples':['matplotlib>=2.0' 'sympy>=1.0'] } classifiers=['License :: OSI Approved :: BSD License' 'Programming Language :: Python :: 3' 'Intended Audience :: Education' 'Intended Audience :: Science/Research' 'Operating System :: OS Independent' 'Topic :: Scientific/Engineering :: Physics' 'Topic :: Scientific/Engineering :: Mathematics' ] )<line_sep>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Benchmark the scoring performance on various CNNs """<import_from_stmt>common find_mxnet<import_from_stmt>common.util get_gpus<import_stmt>mxnet<as>mx<import_stmt>mxnet.gluon.model_zoo.vision<as>models<import_from_stmt>importlib import_module<import_stmt>logging<import_stmt>argparse<import_stmt>time<import_stmt>numpy<as>np<line_sep>logging.basicConfig(level=logging.DEBUG)<line_sep>parser=argparse.ArgumentParser(description='SymbolAPI-based CNN inference performance benchmark')<line_sep>parser.add_argument('--network' type=str default='all' choices=['all' 'alexnet' 'vgg-16' 'resnetv1-50' 'resnet-50' 'resnet-152' 'inception-bn' 'inception-v3' 'inception-v4' 'inception-resnet-v2' 'mobilenet' 'densenet121' 'squeezenet1.1'])<line_sep>parser.add_argument('--batch-size' type=int default=0 help='Batch size to use for benchmarking. Example: 32, 64, 128.'<concat>'By default, runs benchmark for batch sizes - 1, 32, 64, 128, 256')<line_sep>opt=parser.parse_args()<def_stmt>get_symbol network batch_size dtype<block_start>image_shape=(3 299 299)<if>network<in>['inception-v3' 'inception-v4']<else>(3 224 224)<line_sep>num_layers=0<if_stmt>network<eq>'inception-resnet-v2'<block_start>network=network<block_end><elif_stmt>'resnet'<in>network<block_start>num_layers=int(network.split('-')[1])<line_sep>network=network.split('-')[0]<block_end><if_stmt>'vgg'<in>network<block_start>num_layers=int(network.split('-')[1])<line_sep>network='vgg'<block_end><if_stmt>network<in>['densenet121' 'squeezenet1.1']<block_start>sym=models.get_model(network)<line_sep>sym.hybridize()<line_sep>data=mx.sym.var('data')<line_sep>sym=sym(data)<line_sep>sym=mx.sym.SoftmaxOutput(sym name='softmax')<block_end><else_stmt><block_start>net=import_module('symbols.'+network)<line_sep>sym=net.get_symbol(num_classes=1000 image_shape=','.join([str(i)<for>i image_shape]) num_layers=num_layers dtype=dtype)<block_end><return>(sym [('data' (batch_size )+image_shape)])<block_end><def_stmt>score network dev batch_size num_batches dtype# get mod <block_start>sym,data_shape=get_symbol(network batch_size dtype)<line_sep>mod=mx.mod.Module(symbol=sym context=dev)<line_sep>mod.bind(for_training=<false> inputs_need_grad=<false> data_shapes=data_shape)<line_sep>mod.init_params(initializer=mx.init.Xavier(magnitude=2.))<line_sep># get data data=[mx.random.uniform(-1.0 1.0 shape=shape ctx=dev)<for>_,shape mod.data_shapes]<line_sep>batch=mx.io.DataBatch(data [])# empty label # run dry_run=5# use 5 iterations to warm up <for_stmt>i range(dry_run+num_batches)<block_start><if_stmt>i<eq>dry_run<block_start>tic=time.time()<block_end>mod.forward(batch is_train=<false>)<for_stmt>output mod.get_outputs()<block_start>output.wait_to_read()<block_end><block_end># return num images per second <return>num_batches<times>batch_size/(time.time()-tic)<block_end><if_stmt>__name__<eq>'__main__'<block_start><if_stmt>opt.network<eq>'all'<block_start>networks=['alexnet' 'vgg-16' 'resnetv1-50' 'resnet-50' 'resnet-152' 'inception-bn' 'inception-v3' 'inception-v4' 'inception-resnet-v2' 'mobilenet' 'densenet121' 'squeezenet1.1']<line_sep>logging.info('It may take some time to run all models, '<concat>'set --network to run a specific one')<block_end><else_stmt><block_start>networks=[opt.network]<block_end>devs=[mx.gpu(0)]<if>len(get_gpus())<g>0<else>[]<line_sep># Enable USE_MKLDNN for better CPU performance devs.append(mx.cpu())<if_stmt>opt.batch_size<eq>0<block_start>batch_sizes=[1 32 64 128 256]<line_sep>logging.info('run batchsize [1, 32, 64, 128, 256] by default, '<concat>'set --batch-size to run a specific one')<block_end><else_stmt><block_start>batch_sizes=[opt.batch_size]<block_end><for_stmt>net networks<block_start>logging.info('network: %s' net)<if_stmt>net<in>['densenet121' 'squeezenet1.1']<block_start>logging.info('network: %s is converted from gluon modelzoo' net)<line_sep>logging.info('you can run benchmark/python/gluon/benchmark_gluon.py for more models')<block_end><for_stmt>d devs<block_start>logging.info('device: %s' d)<line_sep>logged_fp16_warning=<false><for_stmt>b batch_sizes<block_start><for_stmt>dtype ['float32' 'float16']<block_start><if_stmt>d<eq>mx.cpu()<and>dtype<eq>'float16'#float16 is not supported on CPU <block_start><continue><block_end><elif_stmt>net<in>['inception-bn' 'alexnet']<and>dtype<eq>'float16'<block_start><if_stmt><not>logged_fp16_warning<block_start>logging.info('Model definition for {} does not support float16'.format(net))<line_sep>logged_fp16_warning=<true><block_end><block_end><else_stmt><block_start>speed=score(network=net dev=d batch_size=b num_batches=10 dtype=dtype)<line_sep>logging.info('batch size %2d, dtype %s, images/sec: %f' b dtype speed)<block_end><block_end><block_end><block_end><block_end><block_end>
# -*- coding: utf-8 -*- # Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_from_stmt>oslo_utils reflection<import_from_stmt>taskflow.engines.worker_based types<as>worker_types<import_from_stmt>taskflow test<import_from_stmt>taskflow.test mock<import_from_stmt>taskflow.tests utils<class_stmt>TestTopicWorker(test.TestCase)<block_start><def_stmt>test_topic_worker self<block_start>worker=worker_types.TopicWorker("dummy-topic" [utils.DummyTask] identity="dummy")<line_sep>self.assertTrue(worker.performs(utils.DummyTask))<line_sep>self.assertFalse(worker.performs(utils.NastyTask))<line_sep>self.assertEqual('dummy' worker.identity)<line_sep>self.assertEqual('dummy-topic' worker.topic)<block_end><block_end><class_stmt>TestProxyFinder(test.TestCase)<block_start>@mock.patch("oslo_utils.timeutils.now")<def_stmt>test_expiry self mock_now<block_start>finder=worker_types.ProxyWorkerFinder('me' mock.MagicMock() [] worker_expiry=60)<line_sep>w,emit=finder._add('dummy-topic' [utils.DummyTask])<line_sep>w.last_seen=0<line_sep>mock_now.side_effect=[120]<line_sep>gone=finder.clean()<line_sep>self.assertEqual(0 finder.total_workers)<line_sep>self.assertEqual(1 gone)<block_end><def_stmt>test_single_topic_worker self<block_start>finder=worker_types.ProxyWorkerFinder('me' mock.MagicMock() [])<line_sep>w,emit=finder._add('dummy-topic' [utils.DummyTask])<line_sep>self.assertIsNotNone(w)<line_sep>self.assertTrue(emit)<line_sep>self.assertEqual(1 finder.total_workers)<line_sep>w2=finder.get_worker_for_task(utils.DummyTask)<line_sep>self.assertEqual(w.identity w2.identity)<block_end><def_stmt>test_multi_same_topic_workers self<block_start>finder=worker_types.ProxyWorkerFinder('me' mock.MagicMock() [])<line_sep>w,emit=finder._add('dummy-topic' [utils.DummyTask])<line_sep>self.assertIsNotNone(w)<line_sep>self.assertTrue(emit)<line_sep>w2,emit=finder._add('dummy-topic-2' [utils.DummyTask])<line_sep>self.assertIsNotNone(w2)<line_sep>self.assertTrue(emit)<line_sep>w3=finder.get_worker_for_task(reflection.get_class_name(utils.DummyTask))<line_sep>self.assertIn(w3.identity [w.identity w2.identity])<block_end><def_stmt>test_multi_different_topic_workers self<block_start>finder=worker_types.ProxyWorkerFinder('me' mock.MagicMock() [])<line_sep>added=[]<line_sep>added.append(finder._add('dummy-topic' [utils.DummyTask]))<line_sep>added.append(finder._add('dummy-topic-2' [utils.DummyTask]))<line_sep>added.append(finder._add('dummy-topic-3' [utils.NastyTask]))<line_sep>self.assertEqual(3 finder.total_workers)<line_sep>w=finder.get_worker_for_task(utils.NastyTask)<line_sep>self.assertEqual(added[-1][0].identity w.identity)<line_sep>w=finder.get_worker_for_task(utils.DummyTask)<line_sep>self.assertIn(w.identity [w_a[0].identity<for>w_a added[0:2]])<block_end><block_end>
<import_stmt>pytest<import_stmt>asyncio<import_stmt>functools<import_stmt>glob<import_stmt>logging.config<import_stmt>multiprocessing<as>mp<import_stmt>os<import_stmt>signal<import_stmt>sys<import_stmt>tempfile<import_stmt>time<import_from_stmt>typing List Sequence<import_stmt>aiotools<if_stmt>os.environ.get('CI' '')<and>sys.version_info<l>(3 9 0)<block_start>pytest.skip('skipped to prevent kill CI agents due to signals on CI environments' allow_module_level=<true> )<block_end>@pytest.fixture<def_stmt>restore_signal <block_start>os.setpgrp()<line_sep>old_alrm=signal.getsignal(signal.SIGALRM)<line_sep>old_intr=signal.getsignal(signal.SIGINT)<line_sep>old_term=signal.getsignal(signal.SIGTERM)<line_sep>old_intr=signal.getsignal(signal.SIGUSR1)<line_sep><yield><line_sep>signal.signal(signal.SIGALRM old_alrm)<line_sep>signal.signal(signal.SIGINT old_intr)<line_sep>signal.signal(signal.SIGTERM old_term)<line_sep>signal.signal(signal.SIGUSR1 old_term)<block_end>@pytest.fixture<def_stmt>set_timeout <block_start><def_stmt>make_timeout sec callback<block_start><def_stmt>_callback signum frame<block_start>signal.alarm(0)<line_sep>callback()<block_end>signal.signal(signal.SIGALRM _callback)<line_sep>signal.setitimer(signal.ITIMER_REAL sec)<block_end><yield>make_timeout<block_end>@pytest.fixture<def_stmt>exec_recorder <block_start>f=tempfile.NamedTemporaryFile(mode='w' encoding='utf8' prefix='aiotools.tests.server.' )<line_sep>f.close()<def_stmt>write msg:str<arrow><none><block_start>path=f"{f.name}.{os.getpid()}"<with_stmt>open(path 'a' encoding='utf8')<as>writer<block_start>writer.write(msg+'\n')<block_end><block_end><def_stmt>read <arrow>Sequence[str]<block_start>lines:List[str]=[]<for_stmt>path glob.glob(f"{f.name}.*")<block_start><with_stmt>open(path 'r' encoding='utf8')<as>reader<block_start>lines.extend(line.strip()<for>line reader.readlines())<block_end><block_end><return>lines<block_end><yield>write read<for_stmt>path glob.glob(f"{f.name}.*")<block_start>os.unlink(path)<block_end><block_end><def_stmt>interrupt <block_start>os.kill(0 signal.SIGINT)<block_end><def_stmt>interrupt_usr1 <block_start>os.kill(os.getpid() signal.SIGUSR1)<block_end>@aiotools.server# type: ignore <async_keyword><def_stmt>myserver_simple loop proc_idx args<block_start>write=args[0]<line_sep><await>asyncio.sleep(0)<line_sep>write(f'started:{proc_idx}')<line_sep><yield><line_sep><await>asyncio.sleep(0)<line_sep>write(f'terminated:{proc_idx}')<block_end><def_stmt>test_server_singleproc set_timeout restore_signal exec_recorder<block_start>write,read=exec_recorder<line_sep>set_timeout(0.2 interrupt)<line_sep>aiotools.start_server(myserver_simple args=(write ) )<line_sep>lines=set(read())<assert_stmt>'started:0'<in>lines<assert_stmt>'terminated:0'<in>lines<block_end><def_stmt>test_server_multiproc set_timeout restore_signal exec_recorder<block_start>write,read=exec_recorder<line_sep>set_timeout(0.2 interrupt)<line_sep>aiotools.start_server(myserver_simple num_workers=3 args=(write ) )<line_sep>lines=set(read())<assert_stmt>lines<eq>{'started:0' 'started:1' 'started:2' 'terminated:0' 'terminated:1' 'terminated:2' }<block_end>@aiotools.server# type: ignore <async_keyword><def_stmt>myserver_signal loop proc_idx args<block_start>write=args[0]<line_sep><await>asyncio.sleep(0)<line_sep>write(f'started:{proc_idx}')<line_sep>received_signum=<yield><line_sep><await>asyncio.sleep(0)<line_sep>write(f'terminated:{proc_idx}:{received_signum}')<block_end><def_stmt>test_server_multiproc_custom_stop_signals set_timeout restore_signal exec_recorder <block_start>write,read=exec_recorder<line_sep>set_timeout(0.2 interrupt_usr1)<line_sep>aiotools.start_server(myserver_signal num_workers=2 stop_signals={signal.SIGUSR1} args=(write ) )<line_sep>lines=set(read())<assert_stmt>{'started:0' 'started:1'}<l>lines<assert_stmt>{f'terminated:0:{int(signal.SIGUSR1)}' f'terminated:1:{int(signal.SIGUSR1)}' }<l>lines<block_end>@aiotools.server# type: ignore <async_keyword><def_stmt>myserver_worker_init_error loop proc_idx args<block_start>write=args[0]<class_stmt>_LogAdaptor<block_start><def_stmt>__init__ self writer<block_start>self.writer=writer<block_end><def_stmt>write self msg<block_start>msg=msg.strip().replace('\n' ' ')<line_sep>self.writer(f'log:{proc_idx}:{msg}')<block_end><block_end>log_stream=_LogAdaptor(write)<line_sep>logging.config.dictConfig({'version':1 'handlers':{'console':{'class':'logging.StreamHandler' 'stream':log_stream 'level':'DEBUG' } } 'loggers':{'aiotools':{'handlers':['console'] 'level':'DEBUG' } } })<line_sep>log=logging.getLogger('aiotools')<line_sep>write(f'started:{proc_idx}')<line_sep>log.debug('hello')<if_stmt>proc_idx<in>(0 2)# delay until other workers start normally. <block_start><await>asyncio.sleep(0.1<times>proc_idx)<line_sep><raise>ZeroDivisionError('oops')<block_end><yield><line_sep># should not be reached if errored. <await>asyncio.sleep(0)<line_sep>write(f'terminated:{proc_idx}')<block_end><def_stmt>test_server_worker_init_error restore_signal exec_recorder<block_start>write,read=exec_recorder<line_sep>aiotools.start_server(myserver_worker_init_error num_workers=4 args=(write ) )<line_sep>lines=set(read())<assert_stmt>sum(1<if>line.startswith('started:')<else>0<for>line lines)<eq>4<line_sep># workers who did not raise errors have already started, # and they should have terminated normally # when the errorneous worker interrupted the main loop. <assert_stmt>sum(1<if>line.startswith('terminated:')<else>0<for>line lines)<eq>2<assert_stmt>sum(1<if>'hello'<in>line<else>0<for>line lines)<eq>4<assert_stmt>sum(1<if>'ZeroDivisionError: oops'<in>line<else>0<for>line lines)<eq>2<block_end><def_stmt>test_server_user_main set_timeout restore_signal<block_start>main_enter=<false><line_sep>main_exit=<false><line_sep>@aiotools.main<def_stmt>mymain_user_main <block_start><nonlocal>main_enter main_exit<line_sep>main_enter=<true><line_sep><yield>987<line_sep>main_exit=<true><block_end>@aiotools.server# type: ignore <async_keyword><def_stmt>myworker_user_main loop proc_idx args<block_start><assert_stmt>args[0]<eq>987# first arg from user main <assert_stmt>args[1]<eq>123# second arg from start_server args <yield><block_end>set_timeout(0.2 interrupt)<line_sep>aiotools.start_server(myworker_user_main mymain_user_main num_workers=3 args=(123 ) )<assert_stmt>main_enter<assert_stmt>main_exit<block_end><def_stmt>test_server_user_main_custom_stop_signals set_timeout restore_signal<block_start>main_enter=<false><line_sep>main_exit=<false><line_sep>main_signal=<none><line_sep>worker_signals=mp.Array('i' 3)<line_sep>@aiotools.main<def_stmt>mymain <block_start><nonlocal>main_enter main_exit main_signal<line_sep>main_enter=<true><line_sep>main_signal=<yield><line_sep>main_exit=<true><block_end>@aiotools.server<async_keyword><def_stmt>myworker loop proc_idx args<block_start>worker_signals=args[0]<line_sep>worker_signals[proc_idx]=<yield><block_end><def_stmt>noop signum frame<block_start><pass><block_end>set_timeout(0.2 interrupt_usr1)<line_sep>aiotools.start_server(myworker mymain num_workers=3 stop_signals={signal.SIGUSR1} args=(worker_signals ) )<assert_stmt>main_enter<assert_stmt>main_exit<assert_stmt>main_signal<eq>signal.SIGUSR1<assert_stmt>list(worker_signals)<eq>[signal.SIGUSR1]<times>3<block_end><def_stmt>test_server_user_main_tuple set_timeout restore_signal<block_start>main_enter=<false><line_sep>main_exit=<false><line_sep>@aiotools.main<def_stmt>mymain <block_start><nonlocal>main_enter main_exit<line_sep>main_enter=<true><line_sep><yield>987 654<line_sep>main_exit=<true><block_end>@aiotools.server<async_keyword><def_stmt>myworker loop proc_idx args<block_start><assert_stmt>args[0]<eq>987# first arg from user main <assert_stmt>args[1]<eq>654# second arg from user main <assert_stmt>args[2]<eq>123# third arg from start_server args <yield><block_end>set_timeout(0.2 interrupt)<line_sep>aiotools.start_server(myworker mymain num_workers=3 args=(123 ) )<assert_stmt>main_enter<assert_stmt>main_exit<block_end><def_stmt>test_server_extra_proc set_timeout restore_signal<block_start>extras=mp.Array('i' [0 0])<def_stmt>extra_proc key _ pidx args<block_start><assert_stmt>_<is><none><line_sep>extras[key]=980+key<try_stmt><block_start><while_stmt><true><block_start>time.sleep(0.1)<block_end><block_end><except_stmt>KeyboardInterrupt<block_start>print(f'extra[{key}] interrupted' file=sys.stderr)<block_end><except_stmt>Exception<as>e<block_start>print(f'extra[{key}] exception' e file=sys.stderr)<block_end><finally_stmt><block_start>print(f'extra[{key}] finish' file=sys.stderr)<line_sep>extras[key]=990+key<block_end><block_end>@aiotools.server<async_keyword><def_stmt>myworker loop pidx args<block_start><yield><block_end>set_timeout(0.2 interrupt)<line_sep>aiotools.start_server(myworker extra_procs=[functools.partial(extra_proc 0) functools.partial(extra_proc 1)] num_workers=3 args=(123 ))<assert_stmt>extras[0]<eq>990<assert_stmt>extras[1]<eq>991<block_end><def_stmt>test_server_extra_proc_custom_stop_signal set_timeout restore_signal<block_start>received_signals=mp.Array('i' [0 0])<def_stmt>extra_proc key _ pidx args<block_start>received_signals=args[0]<try_stmt><block_start><while_stmt><true><block_start>time.sleep(0.1)<block_end><block_end><except_stmt>aiotools.InterruptedBySignal<as>e<block_start>received_signals[key]=e.args[0]<block_end><block_end>@aiotools.server<async_keyword><def_stmt>myworker loop pidx args<block_start><yield><block_end>set_timeout(0.3 interrupt_usr1)<line_sep>aiotools.start_server(myworker extra_procs=[functools.partial(extra_proc 0) functools.partial(extra_proc 1)] stop_signals={signal.SIGUSR1} args=(received_signals ) num_workers=3)<assert_stmt>received_signals[0]<eq>signal.SIGUSR1<assert_stmt>received_signals[1]<eq>signal.SIGUSR1<block_end>
<import_stmt>numpy<as>np<import_from_stmt>mlagents.trainers.buffer AgentBuffer AgentBufferField BufferKey ObservationKeyPrefix RewardSignalKeyPrefix <import_from_stmt>mlagents.trainers.trajectory ObsUtil<def_stmt>assert_array a b<block_start><assert_stmt>a.shape<eq>b.shape<line_sep>la=list(a.flatten())<line_sep>lb=list(b.flatten())<for_stmt>i range(len(la))<block_start><assert_stmt>la[i]<eq>lb[i]<block_end><block_end><def_stmt>construct_fake_buffer fake_agent_id<block_start>b=AgentBuffer()<for_stmt>step range(9)<block_start>b[ObsUtil.get_name_at(0)].append(np.array([100<times>fake_agent_id+10<times>step+1 100<times>fake_agent_id+10<times>step+2 100<times>fake_agent_id+10<times>step+3 ] dtype=np.float32 ))<line_sep>b[BufferKey.CONTINUOUS_ACTION].append(np.array([100<times>fake_agent_id+10<times>step+4 100<times>fake_agent_id+10<times>step+5 ] dtype=np.float32 ))<line_sep>b[BufferKey.GROUP_CONTINUOUS_ACTION].append([np.array([100<times>fake_agent_id+10<times>step+4 100<times>fake_agent_id+10<times>step+5 ] dtype=np.float32 )]<times>3)<block_end><return>b<block_end><def_stmt>test_buffer <block_start>agent_1_buffer=construct_fake_buffer(1)<line_sep>agent_2_buffer=construct_fake_buffer(2)<line_sep>agent_3_buffer=construct_fake_buffer(3)<line_sep># Test get_batch a=agent_1_buffer[ObsUtil.get_name_at(0)].get_batch(batch_size=2 training_length=1 sequential=<true>)<line_sep>assert_array(np.array(a) np.array([[171 172 173] [181 182 183]] dtype=np.float32))<line_sep># Test get_batch a=agent_2_buffer[ObsUtil.get_name_at(0)].get_batch(batch_size=2 training_length=3 sequential=<true>)<line_sep>assert_array(np.array(a) np.array([[231 232 233] [241 242 243] [251 252 253] [261 262 263] [271 272 273] [281 282 283] ] dtype=np.float32 ) )<line_sep>a=agent_2_buffer[ObsUtil.get_name_at(0)].get_batch(batch_size=2 training_length=3 sequential=<false>)<line_sep>assert_array(np.array(a) np.array([[251 252 253] [261 262 263] [271 272 273] [261 262 263] [271 272 273] [281 282 283] ]) )<line_sep># Test padding a=agent_2_buffer[ObsUtil.get_name_at(0)].get_batch(batch_size=<none> training_length=4 sequential=<true>)<line_sep>assert_array(np.array(a) np.array([[201 202 203] [211 212 213] [221 222 223] [231 232 233] [241 242 243] [251 252 253] [261 262 263] [271 272 273] [281 282 283] [0 0 0] [0 0 0] [0 0 0] ]) )<line_sep># Test group entries return Lists of Lists. Make sure to pad properly! a=agent_2_buffer[BufferKey.GROUP_CONTINUOUS_ACTION].get_batch(batch_size=<none> training_length=4 sequential=<true>)<for_stmt>_group_entry a[:-3]<block_start><assert_stmt>len(_group_entry)<eq>3<block_end><for_stmt>_group_entry a[-3:]<block_start><assert_stmt>len(_group_entry)<eq>0<block_end>agent_1_buffer.reset_agent()<assert_stmt>agent_1_buffer.num_experiences<eq>0<line_sep>update_buffer=AgentBuffer()<line_sep>agent_2_buffer.resequence_and_append(update_buffer batch_size=<none> training_length=2)<line_sep>agent_3_buffer.resequence_and_append(update_buffer batch_size=<none> training_length=2)<assert_stmt>len(update_buffer[BufferKey.CONTINUOUS_ACTION])<eq>20<assert_stmt>np.array(update_buffer[BufferKey.CONTINUOUS_ACTION]).shape<eq>(20 2)<line_sep>c=update_buffer.make_mini_batch(start=0 end=1)<assert_stmt>c.keys()<eq>update_buffer.keys()<line_sep># Make sure the values of c are AgentBufferField <for_stmt>val c.values()<block_start><assert_stmt>isinstance(val AgentBufferField)<block_end><assert_stmt>np.array(c[BufferKey.CONTINUOUS_ACTION]).shape<eq>(1 2)<block_end><def_stmt>test_agentbufferfield # Test constructor <block_start>a=AgentBufferField([0 1 2])<for_stmt>i,num enumerate(a)<block_start><assert_stmt>num<eq>i<line_sep># Test indexing <assert_stmt>a[i]<eq>num<block_end># Test slicing b=a[1:3]<assert_stmt>b<eq>[1 2]<assert_stmt>isinstance(b AgentBufferField)<line_sep># Test padding c=AgentBufferField()<for_stmt>_ range(2)<block_start>c.append([np.array(1) np.array(2)])<block_end><for_stmt>_ range(2)<block_start>c.append([np.array(1)])<block_end>padded=c.padded_to_batch(pad_value=3)<assert_stmt>np.array_equal(padded[0] np.array([1 1 1 1]))<assert_stmt>np.array_equal(padded[1] np.array([2 2 3 3]))<line_sep># Make sure it doesn't fail when the field isn't a list padded_a=a.padded_to_batch()<assert_stmt>np.array_equal(padded_a a)<block_end><def_stmt>fakerandint values<block_start><return>19<block_end><def_stmt>test_buffer_sample <block_start>agent_1_buffer=construct_fake_buffer(1)<line_sep>agent_2_buffer=construct_fake_buffer(2)<line_sep>update_buffer=AgentBuffer()<line_sep>agent_1_buffer.resequence_and_append(update_buffer batch_size=<none> training_length=2)<line_sep>agent_2_buffer.resequence_and_append(update_buffer batch_size=<none> training_length=2)<line_sep># Test non-LSTM mb=update_buffer.sample_mini_batch(batch_size=4 sequence_length=1)<assert_stmt>mb.keys()<eq>update_buffer.keys()<assert_stmt>np.array(mb[BufferKey.CONTINUOUS_ACTION]).shape<eq>(4 2)<line_sep># Test LSTM # We need to check if we ever get a breaking start - this will maximize the probability mb=update_buffer.sample_mini_batch(batch_size=20 sequence_length=19)<assert_stmt>mb.keys()<eq>update_buffer.keys()<line_sep># Should only return one sequence <assert_stmt>np.array(mb[BufferKey.CONTINUOUS_ACTION]).shape<eq>(19 2)<block_end><def_stmt>test_num_experiences <block_start>agent_1_buffer=construct_fake_buffer(1)<line_sep>agent_2_buffer=construct_fake_buffer(2)<line_sep>update_buffer=AgentBuffer()<assert_stmt>len(update_buffer[BufferKey.CONTINUOUS_ACTION])<eq>0<assert_stmt>update_buffer.num_experiences<eq>0<line_sep>agent_1_buffer.resequence_and_append(update_buffer batch_size=<none> training_length=2)<line_sep>agent_2_buffer.resequence_and_append(update_buffer batch_size=<none> training_length=2)<assert_stmt>len(update_buffer[BufferKey.CONTINUOUS_ACTION])<eq>20<assert_stmt>update_buffer.num_experiences<eq>20<block_end><def_stmt>test_buffer_truncate <block_start>agent_1_buffer=construct_fake_buffer(1)<line_sep>agent_2_buffer=construct_fake_buffer(2)<line_sep>update_buffer=AgentBuffer()<line_sep>agent_1_buffer.resequence_and_append(update_buffer batch_size=<none> training_length=2)<line_sep>agent_2_buffer.resequence_and_append(update_buffer batch_size=<none> training_length=2)<line_sep># Test non-LSTM update_buffer.truncate(2)<assert_stmt>update_buffer.num_experiences<eq>2<line_sep>agent_1_buffer.resequence_and_append(update_buffer batch_size=<none> training_length=2)<line_sep>agent_2_buffer.resequence_and_append(update_buffer batch_size=<none> training_length=2)<line_sep># Test LSTM, truncate should be some multiple of sequence_length update_buffer.truncate(4 sequence_length=3)<assert_stmt>update_buffer.num_experiences<eq>3<for_stmt>buffer_field update_buffer.values()<block_start><assert_stmt>isinstance(buffer_field AgentBufferField)<block_end><block_end><def_stmt>test_key_encode_decode <block_start>keys=(list(BufferKey)+[(k 42)<for>k ObservationKeyPrefix]+[(k "gail")<for>k RewardSignalKeyPrefix])<for_stmt>k keys<block_start><assert_stmt>k<eq>AgentBuffer._decode_key(AgentBuffer._encode_key(k))<block_end><block_end><def_stmt>test_buffer_save_load <block_start>original=construct_fake_buffer(3)<import_stmt>io<line_sep>write_buffer=io.BytesIO()<line_sep>original.save_to_file(write_buffer)<line_sep>loaded=AgentBuffer()<line_sep>loaded.load_from_file(write_buffer)<assert_stmt>len(original)<eq>len(loaded)<for_stmt>k original.keys()<block_start><assert_stmt>np.allclose(original[k] loaded[k])<block_end><block_end>
<import_stmt>Calculate<as>c<line_sep>print(c.sdev([1 2 3 4 5]))<line_sep>
# models.py # Contact: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> """ Avocado is deep tensor factorization model for learning a latent representation of the human epigenome. This file has functions for building a deep tensor factorization model. """<import_from_stmt>.io data_generator<import_from_stmt>.io permuted_data_generator<import_from_stmt>.io sequential_data_generator<import_stmt>json<import_stmt>numpy<import_stmt>keras<import_from_stmt>keras.layers Input Embedding Dense<import_from_stmt>keras.layers Multiply Dot Flatten concatenate<import_from_stmt>keras.models Model<import_from_stmt>keras.optimizers Adam<def_stmt>build_model n_celltypes n_celltype_factors n_assays n_assay_factors n_genomic_positions n_25bp_factors n_250bp_factors n_5kbp_factors n_layers n_nodes freeze_celltypes=<false> freeze_assays=<false> freeze_genome_25bp=<false> freeze_genome_250bp=<false> freeze_genome_5kbp=<false> freeze_network=<false><block_start>"""This function builds a multi-scale deep tensor factorization model."""<line_sep>celltype_input=Input(shape=(1 ) name="celltype_input")<line_sep>celltype_embedding=Embedding(n_celltypes n_celltype_factors input_length=1 name="celltype_embedding")<line_sep>celltype_embedding.trainable=<not>freeze_celltypes<line_sep>celltype=Flatten()(celltype_embedding(celltype_input))<line_sep>assay_input=Input(shape=(1 ) name="assay_input")<line_sep>assay_embedding=Embedding(n_assays n_assay_factors input_length=1 name="assay_embedding")<line_sep>assay_embedding.trainable=<not>freeze_assays<line_sep>assay=Flatten()(assay_embedding(assay_input))<line_sep>genome_25bp_input=Input(shape=(1 ) name="genome_25bp_input")<line_sep>genome_25bp_embedding=Embedding(n_genomic_positions n_25bp_factors input_length=1 name="genome_25bp_embedding")<line_sep>genome_25bp_embedding.trainable=<not>freeze_genome_25bp<line_sep>genome_25bp=Flatten()(genome_25bp_embedding(genome_25bp_input))<line_sep>genome_250bp_input=Input(shape=(1 ) name="genome_250bp_input")<line_sep>genome_250bp_embedding=Embedding(int(n_genomic_positions/10)+1 n_250bp_factors input_length=1 name="genome_250bp_embedding")<line_sep>genome_250bp_embedding.trainable=<not>freeze_genome_250bp<line_sep>genome_250bp=Flatten()(genome_250bp_embedding(genome_250bp_input))<line_sep>genome_5kbp_input=Input(shape=(1 ) name="genome_5kbp_input")<line_sep>genome_5kbp_embedding=Embedding(int(n_genomic_positions/200)+1 n_5kbp_factors input_length=1 name="genome_5kbp_embedding")<line_sep>genome_5kbp_embedding.trainable=<not>freeze_genome_5kbp<line_sep>genome_5kbp=Flatten()(genome_5kbp_embedding(genome_5kbp_input))<line_sep>layers=[celltype assay genome_25bp genome_250bp genome_5kbp]<line_sep>inputs=(celltype_input assay_input genome_25bp_input genome_250bp_input genome_5kbp_input)<line_sep>x=concatenate(layers)<for_stmt>i range(n_layers)<block_start>layer=Dense(n_nodes activation='relu' name="dense_{}".format(i))<line_sep>layer.trainable=<not>freeze_network<line_sep>x=layer(x)<block_end>layer=Dense(1 name="y_pred")<line_sep>layer.trainable=<not>freeze_network<line_sep>y=layer(x)<line_sep>model=Model(inputs=inputs outputs=y)<line_sep>model.compile(optimizer='adam' loss='mse' metrics=['mse'])<line_sep><return>model<block_end><class_stmt>Avocado(object)<block_start>"""An Avocado multi-scale deep tensor factorization model. The Avocado model is a multi-scale deep tensor factorization model. It is multi-scale because it represents the genome axis using three different resolutions---25 bp, 250 bp and 5 kbp. It is deep because it replaces the dot product component of most linear factorization approaches with a deep neural network. The tensor factors and the neural network weights are trained jointly to impute the values in the tensor that it is provided. In this case Avocado is trained on epigenomic data whose dimensions are human cell type, epigenomic assay, and genomic coordinate. The trained model can impute epigenomic assays that have not yet been performed, and the learned factor values can themselves be used to represent genomic positions more compactly than the full set of epigenomic measurements could. The default parameters are those used in the manuscript entitled "Multi-scale deep tensor factorization learns a latent representation of the human epigenome". Parameters ---------- celltypes : list The list of cell type names that will be modeled assays : list The list of assays that will be modeled n_celltype_factors : int, optional The number of factors to use to represent each cell type. Default is 32. n_assay_factors : int, optional The number of factors to use to represent each assay. Default is 256. n_genomic_positions : int, optional The number of genomic positions to model. This is typically either the size of the pilot regions when performing initial training or the size of the chromosome when fitting the genomic latent factors. Default is 1126469, the size of the pilot regions in chr1-22. n_25bp_factors : int, optional The number of factors to use to represent the genome at 25 bp resolution. Default is 25. n_250bp_factors : int, optional The number of factors to use to represent the genome at 250 bp resolution. Default is 40. n_5kbp_factors : int, optional The number of factors to use to represent the genome at 5 kbp resolution. Default is 45. n_layers : int, optional The number of hidden layers in the neural model. Default is 2. n_nodes : int, optional The number of nodes per layer. Default is 2048. batch_size : int, optional The size of each batch to use in training. Defaut is 40000. freeze_celltypes : bool, optional Whether to freeze the training of the cell type embedding. Default is False. freeze_assays : bool, optional Whether to freeze the training of the assay embeddings. Default is False. freeze_genome_25bp : bool, optional Whether to freeze the training of the 25 bp genome factors. Default is False. freeze_genome_250bp : bool, optional Whether to freeze the training of the 250 bp genome factors. Default is False. freeze_genome_5kbp : bool, optional Whether to freeze the training of the 5 kbp genome factors. Default is False. freeze_network : bool, optional Whether to freeze the training of the neural network. Default is False. Example ------- >>> import numpy, itertools >>> from avocado import Avocado >>> >>> celltypes = ['E003', 'E017', 'E065', 'E116', 'E117'] >>> assays = ['H3K4me3', 'H3K27me3', 'H3K36me3', 'H3K9me3', 'H3K4me1'] >>> >>> data = {} >>> for celltype, assay in itertools.product(celltypes, assays): >>> filename = 'data/{}.{}.pilot.arcsinh.npz'.format(celltype, assay) >>> data[(celltype, assay)] = numpy.load(filename)['arr_0'] >>> >>> model = Avocado(celltypes, assays) >>> model.fit(data) >>> >>> track = model.predict("E065", "H3K27me3") """<def_stmt>__init__ self celltypes assays n_celltype_factors=32 n_assay_factors=256 n_genomic_positions=1126469 n_25bp_factors=25 n_250bp_factors=40 n_5kbp_factors=45 n_layers=2 n_nodes=2048 batch_size=40000 freeze_celltypes=<false> freeze_assays=<false> freeze_genome_25bp=<false> freeze_genome_250bp=<false> freeze_genome_5kbp=<false> freeze_network=<false><block_start>self.celltypes=list(celltypes)<line_sep>self.assays=list(assays)<line_sep>self.experiments=[]<line_sep>self.n_celltypes=len(celltypes)<line_sep>self.n_assays=len(assays)<line_sep>self.batch_size=batch_size<line_sep>self.n_celltype_factors=n_celltype_factors<line_sep>self.n_celltype_factors=n_celltype_factors<line_sep>self.n_assay_factors=n_assay_factors<line_sep>self.n_genomic_positions=n_genomic_positions<line_sep>self.n_25bp_factors=n_25bp_factors<line_sep>self.n_250bp_factors=n_250bp_factors<line_sep>self.n_5kbp_factors=n_5kbp_factors<line_sep>self.n_layers=n_layers<line_sep>self.n_nodes=n_nodes<line_sep>self.freeze_celltypes=freeze_celltypes<line_sep>self.freeze_assays=freeze_assays<line_sep>self.freeze_genome_25bp=freeze_genome_25bp<line_sep>self.freeze_genome_250bp=freeze_genome_250bp<line_sep>self.freeze_genome_5kbp=freeze_genome_5kbp<line_sep>self.freeze_network=freeze_network<line_sep>self.model=build_model(n_celltypes=self.n_celltypes n_celltype_factors=n_celltype_factors n_assays=self.n_assays n_assay_factors=n_assay_factors n_genomic_positions=n_genomic_positions n_25bp_factors=n_25bp_factors n_250bp_factors=n_250bp_factors n_5kbp_factors=n_5kbp_factors n_layers=n_layers n_nodes=n_nodes freeze_celltypes=freeze_celltypes freeze_assays=freeze_assays freeze_genome_25bp=freeze_genome_25bp freeze_genome_250bp=freeze_genome_250bp freeze_genome_5kbp=freeze_genome_5kbp freeze_network=freeze_network)<block_end>@property<def_stmt>celltype_embedding self<block_start>"""Returns the learned cell type embedding as a numpy array. Parameters ---------- None Returns ------- celltype_embedding : numpy.ndarray, shape=(n_celltypes, n_factors) The learned embedding corresponding to the input name 'celltype_embedding'. The cell types are ordered according to the order defined in self.celltypes. """<for_stmt>layer self.model.layers<block_start><if_stmt>layer.name<eq>'celltype_embedding'<block_start><return>layer.get_weights()[0]<block_end><block_end><raise>ValueError("No layer in model named 'celltype_embedding'.")<block_end>@property<def_stmt>assay_embedding self<block_start>"""Returns the learned assay embedding as a numpy array. Parameters ---------- None Returns ------- assay_embedding : numpy.ndarray, shape=(n_assays, n_factors) The learned embedding corresponding to the input name 'assay_embedding'. The assays are ordered according to the order defined in self.assays. """<for_stmt>layer self.model.layers<block_start><if_stmt>layer.name<eq>'assay_embedding'<block_start><return>layer.get_weights()[0]<block_end><block_end><raise>ValueError("No layer in model named 'assay_embedding'.")<block_end>@property<def_stmt>genome_embedding self<block_start>"""Returns the learned genomic embedding as a numpy array. This function will concatenate together the three resolutions of genomic factors, such that the first columns correspond to the 25 bp factors, the next columns correspond to the 250 bp factors, and the final columns correspond to the 5 kbp factors. The factors that span more than 25 bp will be repeated across several successive positions Parameters ---------- None Returns ------- genome_embedding : numpy.ndarray, shape=(n_genomic_positions, n_25bp_factors + n_250bp_factors + n_5kbp_factors) The learned embedding corresponding to the input names genome_25bp_embedding, genome_250bp_embedding, and genome_5kbp_embedding. """<line_sep>n_25bp=self.n_25bp_factors<line_sep>n_250bp=self.n_250bp_factors<line_sep>n_5kbp=self.n_5kbp_factors<line_sep>genome_embedding=numpy.empty((self.n_genomic_positions n_25bp+n_250bp+n_5kbp))<for_stmt>layer self.model.layers<block_start><if_stmt>layer.name<eq>'genome_25bp_embedding'<block_start>genome_25bp_embedding=layer.get_weights()[0]<block_end><elif_stmt>layer.name<eq>'genome_250bp_embedding'<block_start>genome_250bp_embedding=layer.get_weights()[0]<block_end><elif_stmt>layer.name<eq>'genome_5kbp_embedding'<block_start>genome_5kbp_embedding=layer.get_weights()[0]<block_end><block_end>n1=n_25bp<line_sep>n2=n_25bp+n_250bp<for_stmt>i range(self.n_genomic_positions)<block_start>genome_embedding[i :n1]=genome_25bp_embedding[i]<line_sep>genome_embedding[i n1:n2]=genome_250bp_embedding[i<floordiv>10]<line_sep>genome_embedding[i n2:]=genome_5kbp_embedding[i<floordiv>200]<block_end><return>genome_embedding<block_end><def_stmt>summary self<block_start>"""A wrapper method for the keras summary method."""<line_sep>self.model.summary()<block_end><def_stmt>fit self X_train X_valid=<none> n_epochs=200 epoch_size=120 verbose=1 callbacks=<none> sampling='sequential' input_generator=<none> **kwargs<block_start>"""Fit the model to the given epigenomic tracks. Pass in a dictionary of training data and an optional dictionary of validation data. The keys to this dictionary are a tuple of the format (celltype, assay) and the values are the corresponding track in the form of a numpy array. The tracks can either be in the form of an array that is in memory or as a memory map. Parameters ---------- X_train : dict A dictionary of training data values, where the keys are a tuple of (celltype, assay) and the values are a track. X_valid : dict or None, optional A dictionary of validation data values that are used to calculate validation set MSE during the training process. If None, validation set statistics are not calculated during the training process. Default is None. n_epochs : int, optional The number of epochs to train on before ending training. Default is 120. epoch_size : int, optional The number of batches per epoch. Default is 200. verbose: int, optional The verbosity level of training. Must be one of 0, 1, or 2, where 0 means silent, 1 means progress bar, and 2 means use only one line per epoch. Default is 1. callbacks : list or None, optional A list of keras callback instances to be called during training. sampling : str, optional The sampling strategy to use for the generators. Must be one of the following: 'sequential' : Sequentially scans through the genome indexes, selecting a cell type and assay randomly at each position 'permuted' : Sequentially scans through a permuted version of the genome indexes, such that each epoch sees every genomic index once, but each batch sees nearly random indexes 'random' : Randomly selects genomic positions. No guarantee on the number of times each position has been seen. Default is 'sequential'. input_generator : generator or None, optional A custom data generator object to be used in the place of the default generator. This will only change the training generator, not the validation generator. Default is None. **kwargs : optional Any other keyword arguments to be passed into the `fit_generator` method. Returns ------- history : keras.History.history The keras history object that records training loss values and metric values. """<if_stmt><not>isinstance(X_train dict)<block_start><raise>ValueError("X_train must be a dictionary where the keys"<concat>" are (celltype, assay) tuples and the values are the track"<concat>" corresponding to that pair.")<block_end><if_stmt>X_valid<is><not><none><and><not>isinstance(X_valid dict)<block_start><raise>ValueError("X_valid must be a dictionary where the keys"<concat>" are (celltype, assay) tuples and the values are the track"<concat>" corresponding to that pair.")<block_end><for_stmt>(celltype assay),track X_train.items()<block_start><if_stmt>celltype<not><in>self.celltypes<block_start><raise>ValueError("Celltype {} appears in the training data "<concat>"but not in the list of cell types provided to the "<concat>"model.".format(celltype))<block_end><if_stmt>assay<not><in>self.assays<block_start><raise>ValueError("Assay {} appears in the training data "<concat>"but not in the list of assays provided to the "<concat>"model.".format(assay))<block_end><if_stmt>len(track)<ne>self.n_genomic_positions<block_start><raise>ValueError("The track corresponding to {} {} is of "<concat>"size {} while the model encodes {} genomic "<concat>"positions".format(celltype assay len(track) self.n_genomic_positions))<block_end><block_end><if_stmt>X_valid<is><not><none><block_start><for_stmt>(celltype assay),track X_valid.items()<block_start><if_stmt>celltype<not><in>self.celltypes<block_start><raise>ValueError("Celltype {} appears in the validation "<concat>"data but not in the list of cell types provided to "<concat>"the model.".format(celltype))<block_end><if_stmt>assay<not><in>self.assays<block_start><raise>ValueError("Assay {} appears in the validation "<concat>"data but not in the list of assays provided to the "<concat>"model.".format(assay))<block_end><if_stmt>len(track)<ne>self.n_genomic_positions<block_start><raise>ValueError("The track corresponding to {} {} is of "<concat>"size {} while the model encodes {} genomic "<concat>"positions".format(celltype assay len(track) self.n_genomic_positions))<block_end><block_end><block_end><if_stmt>input_generator<is><not><none><block_start>X_train_gen=input_generator<block_end><elif_stmt>sampling<eq>'sequential'<block_start>X_train_gen=sequential_data_generator(self.celltypes self.assays X_train self.n_genomic_positions self.batch_size)<block_end><elif_stmt>sampling<eq>'permuted'<block_start>X_train_gen=permuted_data_generator(self.celltypes self.assays X_train self.n_genomic_positions self.batch_size)<block_end><elif_stmt>sampling<eq>'random'<block_start>X_train_gen=permuted_data_generator(self.celltypes self.assays X_train self.n_genomic_positions self.batch_size)<block_end><if_stmt>X_valid<is><not><none><block_start>X_valid_gen=data_generator(self.celltypes self.assays X_valid self.n_genomic_positions self.batch_size)<line_sep>history=self.model.fit_generator(X_train_gen epoch_size n_epochs workers=1 validation_data=X_valid_gen validation_steps=30 verbose=verbose callbacks=callbacks **kwargs)<block_end><else_stmt><block_start>history=self.model.fit_generator(X_train_gen epoch_size n_epochs workers=1 verbose=verbose callbacks=callbacks **kwargs)<block_end>self.experiments=list(X_train.keys())<line_sep><return>history<block_end><def_stmt>fit_celltypes self X_train X_valid=<none> n_epochs=200 epoch_size=120 verbose=1 callbacks=<none> **kwargs<block_start>"""Add a new cell type(s) to an otherwise frozen model. This method will add a new cell type to the cell type embedding after freezing all of the other parameters in the model, including weights and the other cell type positions. Functionally it will train a new cell type embedding and return a new model whose cell type embedding is the concatenation of the old cell type embedding and the new one. Pass in a dictionary of training data and an optional dictionary of validation data. The keys to this dictionary are a tuple of the format (celltype, assay) and the values are the corresponding track in the form of a numpy array. The tracks can either be in the form of an array that is in memory or as a memory map. The celltypes provided should not appear in the model.celltypes attribute but the assays should exclusively appear in the model.assays attribute. Parameters ---------- X_train : dict A dictionary of training data values, where the keys are a tuple of (celltype, assay) and the values are a track. X_valid : dict or None, optional A dictionary of validation data values that are used to calculate validation set MSE during the training process. If None, validation set statistics are not calculated during the training process. Default is None. n_epochs : int, optional The number of epochs to train on before ending training. Default is 120. epoch_size : int, optional The number of batches per epoch. Default is 200. verbose: int, optional The verbosity level of training. Must be one of 0, 1, or 2, where 0 means silent, 1 means progress bar, and 2 means use only one line per epoch. callbacks : list or None, optional A list of keras callback instances to be called during training. **kwargs : optional Any other keyword arguments to be passed into the `fit_generator` method. Returns ------- history : keras.History.history The keras history object that records training loss values and metric values. """<if_stmt><not>isinstance(X_train dict)<block_start><raise>ValueError("X_train must be a dictionary where the keys"<concat>" are (celltype, assay) tuples and the values are the track"<concat>" corresponding to that pair.")<block_end><if_stmt>X_valid<is><not><none><and><not>isinstance(X_valid dict)<block_start><raise>ValueError("X_valid must be a dictionary where the keys"<concat>" are (celltype, assay) tuples and the values are the track"<concat>" corresponding to that pair.")<block_end><for_stmt>(celltype assay),track X_train.items()<block_start><if_stmt>celltype<in>self.celltypes<block_start><raise>ValueError("Celltype {} appears in the training data "<concat>"and also in the list of cell types already in the "<concat>"model.".format(celltype))<block_end><if_stmt>assay<not><in>self.assays<block_start><raise>ValueError("Assay {} appears in the training data "<concat>"but not in the list of assays provided to the "<concat>"model.".format(assay))<block_end><if_stmt>len(track)<ne>self.n_genomic_positions<block_start><raise>ValueError("The track corresponding to {} {} is of "<concat>"size {} while the model encodes {} genomic "<concat>"positions".format(celltype assay len(track) self.n_genomic_positions))<block_end><block_end><if_stmt>X_valid<is><not><none><block_start><for_stmt>(celltype assay),track X_valid.items()<block_start><if_stmt>celltype<in>self.celltypes<block_start><raise>ValueError("Celltype {} appears in the validation "<concat>"data and also in the list of cell types already in "<concat>"the model.".format(celltype))<block_end><if_stmt>assay<not><in>self.assays<block_start><raise>ValueError("Assay {} appears in the training data "<concat>"but not in the list of assays provided to the "<concat>"model.".format(assay))<block_end><if_stmt>len(track)<ne>self.n_genomic_positions<block_start><raise>ValueError("The track corresponding to {} {} is of "<concat>"size {} while the model encodes {} genomic "<concat>"positions".format(celltype assay len(track) self.n_genomic_positions))<block_end><block_end><block_end>new_celltypes=list(numpy.unique([ct<for>ct,_ X_train.keys()]))<line_sep>model=build_model(n_celltypes=len(new_celltypes) n_celltype_factors=self.n_celltype_factors n_assays=self.n_assays n_assay_factors=self.n_assay_factors n_genomic_positions=self.n_genomic_positions n_25bp_factors=self.n_25bp_factors n_250bp_factors=self.n_250bp_factors n_5kbp_factors=self.n_5kbp_factors n_layers=self.n_layers n_nodes=self.n_nodes freeze_celltypes=<false> freeze_assays=<true> freeze_genome_25bp=<true> freeze_genome_250bp=<true> freeze_genome_5kbp=<true> freeze_network=<true>)<for_stmt>old_layer,new_layer zip(self.model.layers model.layers)<block_start><if_stmt>'input'<in>old_layer.name<block_start><continue><block_end><if_stmt>old_layer.name<eq>'celltype_embedding'<block_start><continue><block_end>new_layer.set_weights(old_layer.get_weights())<block_end>X_train_gen=sequential_data_generator(new_celltypes self.assays X_train self.n_genomic_positions self.batch_size)<if_stmt>X_valid<is><not><none><block_start>X_valid_gen=data_generator(new_celltypes self.assays X_valid self.n_genomic_positions self.batch_size)<line_sep>history=model.fit_generator(X_train_gen epoch_size n_epochs workers=1 validation_data=X_valid_gen validation_steps=30 verbose=verbose callbacks=callbacks **kwargs)<block_end><else_stmt><block_start>history=model.fit_generator(X_train_gen epoch_size n_epochs workers=1 verbose=verbose callbacks=callbacks **kwargs)<block_end><for_stmt>layer self.model.layers<block_start><if_stmt>layer.name<eq>'celltype_embedding'<block_start>celltype_embedding=layer.get_weights()[0]<line_sep><break><block_end><block_end><for_stmt>layer model.layers<block_start><if_stmt>layer.name<eq>'celltype_embedding'<block_start>new_celltype_embedding=layer.get_weights()[0]<line_sep><break><block_end><block_end>celltype_embedding=numpy.concatenate([celltype_embedding new_celltype_embedding])<line_sep>self.celltypes.extend(new_celltypes)<line_sep>self.n_celltypes=len(self.celltypes)<line_sep>model=build_model(n_celltypes=self.n_celltypes n_celltype_factors=self.n_celltype_factors n_assays=self.n_assays n_assay_factors=self.n_assay_factors n_genomic_positions=self.n_genomic_positions n_25bp_factors=self.n_25bp_factors n_250bp_factors=self.n_250bp_factors n_5kbp_factors=self.n_5kbp_factors n_layers=self.n_layers n_nodes=self.n_nodes freeze_celltypes=self.freeze_celltypes freeze_assays=self.freeze_assays freeze_genome_25bp=self.freeze_genome_25bp freeze_genome_250bp=self.freeze_genome_250bp freeze_genome_5kbp=self.freeze_genome_5kbp freeze_network=self.freeze_network)<for_stmt>old_layer,new_layer zip(self.model.layers model.layers)<block_start><if_stmt>'input'<in>old_layer.name<block_start><continue><block_end><if_stmt>old_layer.name<eq>'celltype_embedding'<block_start>new_layer.set_weights([celltype_embedding])<block_end><else_stmt><block_start>new_layer.set_weights(old_layer.get_weights())<block_end><block_end>model.experiments=self.experiments+list(X_train.keys())<line_sep>self.model=model<line_sep><return>history<block_end><def_stmt>fit_assays self X_train X_valid=<none> n_epochs=200 epoch_size=120 verbose=1 callbacks=<none> **kwargs<block_start>"""Add a new assay(s) to an otherwise frozen model. This method will add a new assay to the assay embedding after freezing all of the other parameters in the model, including weights and the other assay positions. Functionally it will train a new assay embedding and return a new model whose assay embedding is the concatenation of the old assay embedding and the new one. Pass in a dictionary of training data and an optional dictionary of validation data. The keys to this dictionary are a tuple of the format (celltype, assay) and the values are the corresponding track in the form of a numpy array. The tracks can either be in the form of an array that is in memory or as a memory map. The assays provided should not appear in the model.assays attribute, but the cell types should appear in the model.celltypes attribute. Parameters ---------- X_train : dict A dictionary of training data values, where the keys are a tuple of (celltype, assay) and the values are a track. X_valid : dict or None, optional A dictionary of validation data values that are used to calculate validation set MSE during the training process. If None, validation set statistics are not calculated during the training process. Default is None. n_epochs : int, optional The number of epochs to train on before ending training. Default is 120. epoch_size : int, optional The number of batches per epoch. Default is 200. verbose: int, optional The verbosity level of training. Must be one of 0, 1, or 2, where 0 means silent, 1 means progress bar, and 2 means use only one line per epoch. callbacks : list or None, optional A list of keras callback instances to be called during training. **kwargs : optional Any other keyword arguments to be passed into the `fit_generator` method. Returns ------- history : keras.History.history The keras history object that records training loss values and metric values. """<if_stmt><not>isinstance(X_train dict)<block_start><raise>ValueError("X_train must be a dictionary where the keys"<concat>" are (celltype, assay) tuples and the values are the track"<concat>" corresponding to that pair.")<block_end><if_stmt>X_valid<is><not><none><and><not>isinstance(X_valid dict)<block_start><raise>ValueError("X_valid must be a dictionary where the keys"<concat>" are (celltype, assay) tuples and the values are the track"<concat>" corresponding to that pair.")<block_end><for_stmt>(celltype assay),track X_train.items()<block_start><if_stmt>celltype<not><in>self.celltypes<block_start><raise>ValueError("Celltype {} appears in the training data "<concat>"but not in the list of cell types already in the "<concat>"model.".format(celltype))<block_end><if_stmt>assay<in>self.assays<block_start><raise>ValueError("Assay {} appears in the training data "<concat>"and also in the list of assays already in the "<concat>"model.".format(assay))<block_end><if_stmt>len(track)<ne>self.n_genomic_positions<block_start><raise>ValueError("The track corresponding to {} {} is of "<concat>"size {} while the model encodes {} genomic "<concat>"positions".format(celltype assay len(track) self.n_genomic_positions))<block_end><block_end><if_stmt>X_valid<is><not><none><block_start><for_stmt>(celltype assay),track X_valid.items()<block_start><if_stmt>celltype<not><in>self.celltypes<block_start><raise>ValueError("Celltype {} appears in the validation "<concat>"data but not in the list of cell types already in "<concat>"the model.".format(celltype))<block_end><if_stmt>assay<in>self.assays<block_start><raise>ValueError("Assay {} appears in the training data "<concat>"and also in the list of assays already in the "<concat>"model.".format(assay))<block_end><if_stmt>len(track)<ne>self.n_genomic_positions<block_start><raise>ValueError("The track corresponding to {} {} is of "<concat>"size {} while the model encodes {} genomic "<concat>"positions".format(celltype assay len(track) self.n_genomic_positions))<block_end><block_end><block_end>new_assays=list(numpy.unique([assay<for>_,assay X_train.keys()]))<line_sep>model=build_model(n_celltypes=self.n_celltypes n_celltype_factors=self.n_celltype_factors n_assays=len(new_assays) n_assay_factors=self.n_assay_factors n_genomic_positions=self.n_genomic_positions n_25bp_factors=self.n_25bp_factors n_250bp_factors=self.n_250bp_factors n_5kbp_factors=self.n_5kbp_factors n_layers=self.n_layers n_nodes=self.n_nodes freeze_celltypes=<true> freeze_assays=<false> freeze_genome_25bp=<true> freeze_genome_250bp=<true> freeze_genome_5kbp=<true> freeze_network=<true>)<for_stmt>old_layer,new_layer zip(self.model.layers model.layers)<block_start><if_stmt>'input'<in>old_layer.name<block_start><continue><block_end><if_stmt>old_layer.name<eq>'assay_embedding'<block_start><continue><block_end>new_layer.set_weights(old_layer.get_weights())<block_end>X_train_gen=sequential_data_generator(self.celltypes new_assays X_train self.n_genomic_positions self.batch_size)<if_stmt>X_valid<is><not><none><block_start>X_valid_gen=data_generator(self.celltypes new_assays X_valid self.n_genomic_positions self.batch_size)<line_sep>history=model.fit_generator(X_train_gen epoch_size n_epochs workers=1 validation_data=X_valid_gen validation_steps=30 verbose=verbose callbacks=callbacks **kwargs)<block_end><else_stmt><block_start>history=model.fit_generator(X_train_gen epoch_size n_epochs workers=1 verbose=verbose callbacks=callbacks **kwargs)<block_end><for_stmt>layer self.model.layers<block_start><if_stmt>layer.name<eq>'assay_embedding'<block_start>assay_embedding=layer.get_weights()[0]<line_sep><break><block_end><block_end><for_stmt>layer model.layers<block_start><if_stmt>layer.name<eq>'assay_embedding'<block_start>new_assay_embedding=layer.get_weights()[0]<line_sep><break><block_end><block_end>assay_embedding=numpy.concatenate([assay_embedding new_assay_embedding])<line_sep>self.assays.extend(new_assays)<line_sep>self.n_assays=len(self.assays)<line_sep>model=build_model(n_celltypes=self.n_celltypes n_celltype_factors=self.n_celltype_factors n_assays=self.n_assays n_assay_factors=self.n_assay_factors n_genomic_positions=self.n_genomic_positions n_25bp_factors=self.n_25bp_factors n_250bp_factors=self.n_250bp_factors n_5kbp_factors=self.n_5kbp_factors n_layers=self.n_layers n_nodes=self.n_nodes freeze_celltypes=self.freeze_celltypes freeze_assays=self.freeze_assays freeze_genome_25bp=self.freeze_genome_25bp freeze_genome_250bp=self.freeze_genome_250bp freeze_genome_5kbp=self.freeze_genome_5kbp freeze_network=self.freeze_network)<for_stmt>old_layer,new_layer zip(self.model.layers model.layers)<block_start><if_stmt>'input'<in>old_layer.name<block_start><continue><block_end><if_stmt>old_layer.name<eq>'assay_embedding'<block_start>new_layer.set_weights([assay_embedding])<block_end><else_stmt><block_start>new_layer.set_weights(old_layer.get_weights())<block_end><block_end>model.experiments=self.experiments+list(X_train.keys())<line_sep>self.model=model<line_sep><return>history<block_end><def_stmt>predict self celltype assay start=0 end=<none> verbose=0<block_start>"""Predict a track of epigenomic data. This will predict a track of epigenomic data, resulting in one signal value per genomic position modeled. Users pass in the cell type and the assay that they wish to impute and receive the track of data. Parameters ---------- celltype : str The cell type (aka biosample) to be imputed. Must be one of the elements from the list of cell types passed in upon model initialization. assay : str The assay to be imputed. Must be one of the elements from the list of assays passed in upon model initialization. start : int, optional The start position to begin the imputation at. By default this is 0, corresponding to the start of the track. The value is which 25 bp bin to begin prediction at, not the raw genomic coordinate. end : int or None, optional The end position to stop making imputations at, exclusive. By default this is None, meaning to end at `self.n_genomic_positions.`. verbose : int, optional The verbosity level of the prediction. Must be 0 or 1. Returns ------- track : numpy.ndarray A track of epigenomic signal value predictions for the specified cell type and assay for the considered genomic positions. """<if_stmt>end<is><not><none><and>end<le>start<block_start><raise>ValueError("When given, the end coordinate must be greater"<concat>" than the start coordinate.")<block_end><if_stmt>end<is><none><block_start>end=self.n_genomic_positions<block_end>celltype_idx=self.celltypes.index(celltype)<line_sep>assay_idx=self.assays.index(assay)<line_sep>celltype_idxs=numpy.ones(end-start)<times>celltype_idx<line_sep>assay_idxs=numpy.ones(end-start)<times>assay_idx<line_sep>genomic_25bp_idxs=numpy.arange(start end)<line_sep>genomic_250bp_idxs=numpy.arange(start end)<floordiv>10<line_sep>genomic_5kbp_idxs=numpy.arange(start end)<floordiv>200<line_sep>X={'celltype_input':celltype_idxs 'assay_input':assay_idxs 'genome_25bp_input':genomic_25bp_idxs 'genome_250bp_input':genomic_250bp_idxs 'genome_5kbp_input':genomic_5kbp_idxs}<line_sep>track=self.model.predict(X batch_size=self.batch_size verbose=verbose)[: 0]<line_sep><return>track<block_end><def_stmt>get_params self<block_start>params=[]<for_stmt>layer model.layers<block_start>params.append(layers.get_weghts()[0])<block_end><block_end><def_stmt>save self name="avocado" separators=(',' ' : ') indent=4<block_start>"""Serialize the model to disk. This function produces two files. The first is a json file that has the model hyperparameters associated with it. The second is a h5 file that contains the architecture of the neural network model, the weights, and the optimizer. Parameters ---------- name : str, optional The name to use for the json and the h5 file that are stored. separators : tuple, optional The separators to use in the resulting JSON object. indent : int, optional The number of spaces to use in the indent of the JSON. Returns ------- None """<line_sep>d={'celltypes':self.celltypes 'assays':self.assays 'experiments':self.experiments 'n_celltype_factors':self.n_celltype_factors 'n_assay_factors':self.n_assay_factors 'n_genomic_positions':self.n_genomic_positions 'n_25bp_factors':self.n_25bp_factors 'n_250bp_factors':self.n_250bp_factors 'n_5kbp_factors':self.n_5kbp_factors 'n_layers':self.n_layers 'n_nodes':self.n_nodes 'batch_size':self.batch_size}<line_sep>d=json.dumps(d separators=separators indent=indent)<with_stmt>open("{}.json".format(name) "w")<as>outfile<block_start>outfile.write(d)<block_end>self.model.save("{}.h5".format(name))<block_end><def_stmt>load_weights self name verbose=0<block_start>"""Load serialized weights on a layer-by-layer case. Load the weights of a pre-saved model on a layer-by-layer case. This method will iterate through the layers of the serialized model and this model jointly and set the weights in this model to that of the serialized model should the weight matrices be of the same size. Should they not be of the same size it will not modify the current weight matrix. A primary use of this function should be after an initial model has been trained on the Pilot regions and now one is fitting a model to each of the chromosomes. The size of the genome factors will differ but the other components will remain the same. Correspondingly, the identically sized weight matrices are those that should be held constant while the differing size weight matrices should differ. Parameters ---------- name : str The suffix of the name of the weights file. verbose : int, optional The verbosity level when loading weights. 0 means silent, 1 means notify when a weight matrix has been set, 2 means notify what action has been taken on each layer. Returns ------- None """<line_sep>model=keras.models.load_model("{}.h5".format(name))<for_stmt>i,(self_layer layer) enumerate(zip(self.model.layers model.layers))<block_start>w=layer.get_weights()<line_sep>w0=self_layer.get_weights()<line_sep>name=self_layer.name<if_stmt>len(w)<eq>0<block_start><if_stmt>verbose<eq>2<block_start>print("{} has no weights to set".format(name))<block_end><continue><block_end><if_stmt>w[0].shape<ne>w0[0].shape<block_start><if_stmt>verbose<eq>2<block_start>print("{} is of different size and not set".format(name))<block_end><continue><block_end>self_layer.set_weights(w)<if_stmt>verbose<g>0<block_start>print("{} has been set from serialized model".format(name))<block_end><block_end><block_end>@classmethod<def_stmt>load self name freeze_celltypes=<false> freeze_assays=<false> freeze_genome_25bp=<false> freeze_genome_250bp=<false> freeze_genome_5kbp=<false> freeze_network=<false><block_start>"""Load a model that has been serialized to disk. The keras model that is saved to disk does not contain any of the wrapper information Parameters ---------- name : str The name of the file to load. There must be both a .json and a .h5 file with this suffix. For example, if "Avocado" is passed in, there must be both a "Avocado.json" and a "Avocado.h5" file to be loaded in. freeze_celltypes : bool, optional Whether to freeze the training of the cell type embedding. Default is False. freeze_assays : bool, optional Whether to freeze the training of the assay embeddings. Default is False. freeze_genome_25bp : bool, optional Whether to freeze the training of the 25 bp genome factors. Default is False. freeze_genome_250bp : bool, optional Whether to freeze the training of the 250 bp genome factors. Default is False. freeze_genome_5kbp : bool, optional Whether to freeze the training of the 5 kbp genome factors. Default is False. freeze_network : bool, optional Whether to freeze the training of the neural network. Default is False. Returns ------- model : Avocado An Avocado model. """<with_stmt>open("{}.json".format(name) "r")<as>infile<block_start>d=json.load(infile)<block_end><if_stmt>'experiments'<in>d<block_start>experiments=d['experiments']<del_stmt>d['experiments']<block_end><else_stmt><block_start>experiments=[]<block_end>model=Avocado(freeze_celltypes=freeze_celltypes freeze_assays=freeze_assays freeze_genome_25bp=freeze_genome_25bp freeze_genome_250bp=freeze_genome_250bp freeze_genome_5kbp=freeze_genome_5kbp freeze_network=freeze_network **d)<line_sep>model.experiments=experiments<line_sep>model.model=keras.models.load_model("{}.h5".format(name))<line_sep><return>model<block_end><block_end>
# coding=utf-8 """" Item Based Collaborative Filtering Recommender with Attributes (Item Attribute KNN) [Item Recommendation (Ranking)] Its philosophy is as follows: in order to determine the rating of User u on item m, we can find other movies that are similar to item m, and based on User u’s ratings on those similar movies we infer his rating on item m. However, instead of traditional ItemKNN, this approach uses a metadata or pre-computed similarity matrix. """<line_sep># © 2019. Case Recommender (MIT License) <import_from_stmt>collections defaultdict<import_stmt>numpy<as>np<import_from_stmt>caserec.recommenders.item_recommendation.itemknn ItemKNN<import_from_stmt>caserec.utils.process_data ReadFile<line_sep>__author__='<NAME> <<EMAIL>>'<class_stmt>ItemAttributeKNN(ItemKNN)<block_start><def_stmt>__init__ self train_file=<none> test_file=<none> output_file=<none> metadata_file=<none> similarity_file=<none> k_neighbors=30 rank_length=10 as_binary=<false> as_similar_first=<true> metadata_as_binary=<false> metadata_similarity_sep='\t' similarity_metric="cosine" sep='\t' output_sep='\t'<block_start>""" Item Attribute KNN for Item Recommendation This algorithm predicts a rank for each user based on the similar items that he/her consumed, using a metadata or similarity pre-computed file Usage:: >> ItemAttributeKNN(train, test, similarity_file=sim_matrix, as_similar_first=True).compute() >> ItemAttributeKNN(train, test, metadata_file=metadata, as_similar_first=True).compute() :param train_file: File which contains the train set. This file needs to have at least 3 columns (user item feedback_value). :type train_file: str :param test_file: File which contains the test set. This file needs to have at least 3 columns (user item feedback_value). :type test_file: str, default None :param output_file: File with dir to write the final predictions :type output_file: str, default None :param metadata_file: File which contains the metadata set. This file needs to have at least 2 columns (item metadata). :type metadata_file: str, default None :param similarity_file: File which contains the similarity set. This file needs to have at least 3 columns (item item similarity). :type similarity_file: str, default None :param k_neighbors: Number of neighbors to use. If None, k_neighbor = int(sqrt(n_users)) :type k_neighbors: int, default None :param rank_length: Size of the rank that must be generated by the predictions of the recommender algorithm :type rank_length: int, default 10 :param as_binary: If True, the explicit feedback will be transform to binary :type as_binary: bool, default False :param as_similar_first: If True, for each unknown item, which will be predicted, we first look for its k most similar users and then take the intersection with the users that seen that item. :type as_similar_first: bool, default True :param metadata_as_binary: f True, the explicit value will be transform to binary :type metadata_as_binary: bool, default False :param metadata_similarity_sep: Delimiter for similarity or metadata file :type metadata_similarity_sep: str, default '\t' :param similarity_metric: Pairwise metric to compute the similarity between the items. Reference about distances: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.pdist.html :type similarity_metric: str, default cosine :param sep: Delimiter for input files file :type sep: str, default '\t' :param output_sep: Delimiter for output file :type output_sep: str, default '\t' """<line_sep>super(ItemAttributeKNN self).__init__(train_file=train_file test_file=test_file output_file=output_file k_neighbors=k_neighbors rank_length=rank_length as_binary=as_binary as_similar_first=as_similar_first similarity_metric=similarity_metric sep=sep output_sep=output_sep)<line_sep>self.recommender_name='Item Attribute KNN Algorithm'<line_sep>self.metadata_file=metadata_file<line_sep>self.similarity_file=similarity_file<line_sep>self.metadata_as_binary=metadata_as_binary<line_sep>self.metadata_similarity_sep=metadata_similarity_sep<block_end><def_stmt>init_model self<block_start>""" Method to fit the model. Create and calculate a similarity matrix by metadata file or a pre-computed similarity matrix """<line_sep>self.similar_items=defaultdict(list)<line_sep># Set the value for k <if_stmt>self.k_neighbors<is><none><block_start>self.k_neighbors=int(np.sqrt(len(self.items)))<block_end><if_stmt>self.metadata_file<is><not><none><block_start>metadata=ReadFile(self.metadata_file sep=self.metadata_similarity_sep as_binary=self.metadata_as_binary).read_metadata_or_similarity()<line_sep>self.matrix=np.zeros((len(self.items) len(metadata['col_2'])))<line_sep>meta_to_meta_id={}<for_stmt>m,data enumerate(metadata['col_2'])<block_start>meta_to_meta_id[data]=m<block_end><for_stmt>item metadata['col_1']<block_start><for_stmt>m metadata['dict'][item]<block_start>self.matrix[self.item_to_item_id[item] meta_to_meta_id[m]]=metadata['dict'][item][m]<block_end><block_end># create header info for metadata sparsity=(1-(metadata['number_interactions']/(len(metadata['col_1'])<times>len(metadata['col_2']))))<times>100<line_sep>self.extra_info_header=">> metadata:: %d items and %d metadata (%d interactions) | sparsity:: %.2f%%"%(len(metadata['col_1']) len(metadata['col_2']) metadata['number_interactions'] sparsity)<line_sep># Create similarity matrix based on metadata or similarity file. Transpose=False, because it is an # item x metadata matrix self.si_matrix=self.compute_similarity(transpose=<false>)<block_end><elif_stmt>self.similarity_file<is><not><none><block_start>similarity=ReadFile(self.similarity_file sep=self.metadata_similarity_sep as_binary=<false>).read_metadata_or_similarity()<line_sep>self.si_matrix=np.zeros((len(self.items) len(self.items)))<line_sep># Fill similarity matrix <for_stmt>i similarity['col_1']<block_start><for_stmt>i_j similarity['dict'][i]<block_start>self.si_matrix[self.item_to_item_id[i] self.item_to_item_id[int(i_j)]]=similarity['dict'][i][i_j]<block_end><block_end># Remove NaNs self.si_matrix[np.isnan(self.si_matrix)]=0.0<block_end><else_stmt><block_start><raise>ValueError("This algorithm needs a similarity matrix or a metadata file!")<block_end># Create original matrix user x item for prediction process self.create_matrix()<for_stmt>i_id,item enumerate(self.items)<block_start>self.similar_items[i_id]=sorted(range(len(self.si_matrix[i_id])) key=<lambda>k:-self.si_matrix[i_id][k])[1:self.k_neighbors+1]<block_end><block_end><block_end>
<import_from_future_stmt> absolute_import division print_function<import_from_stmt>libtbx.utils Sorry Usage<import_stmt>libtbx.phil.command_line<import_stmt>sys<line_sep>master_phil=libtbx.phil.parse(""" resname = None .type = str d_max = None .type = float polymeric_type = *Any Free Polymeric .type = choice xray_only = True .type = bool data_only = False .type = bool identity_cutoff = None .type = int quiet = False .type = bool """)<def_stmt>run args out=sys.stdout<block_start><if_stmt>(len(args)<eq>0)<or>("--help"<in>args)<block_start><raise>Usage("""mmtbx.find_residue_in_pdb RESNAME [options] Use the RCSB web services to retrieve a list of PDB structures containing the specified chemical ID. Full parameters: %s """%master_phil.as_str(prefix=" "))<block_end>sources=[]<def_stmt>process_unknown arg<block_start><if_stmt>(1<le>len(arg)<le>3)<and>(arg.isalnum())<block_start><return>libtbx.phil.parse("resname=%s"%arg)<block_end><block_end>cai=libtbx.phil.command_line.argument_interpreter(master_phil=master_phil)<line_sep>working_phil=cai.process_and_fetch(args=args custom_processor=process_unknown)<line_sep>params=working_phil.extract()<if_stmt>(params.resname<is><none>)<block_start><raise>Sorry("No residue ID specified.")<block_end><import_from_stmt>mmtbx.wwpdb rcsb_web_services<line_sep>pdb_ids=rcsb_web_services.chemical_id_search(resname=params.resname d_max=params.d_max polymeric_type=params.polymeric_type xray_only=params.xray_only data_only=params.data_only identity_cutoff=params.identity_cutoff)<line_sep>pdb_ids=[id.lower()<for>id pdb_ids]<if_stmt>(len(pdb_ids)<eq>0)<block_start><raise>Sorry("No structures found matching the specified criteria.")<block_end><else_stmt><block_start><if_stmt>(<not>params.quiet)<block_start>print("%d PDB IDs retrieved:"%len(pdb_ids) file=out)<line_sep>i=0<while_stmt>(i<l>len(pdb_ids))<block_start>print(" %s"%" ".join(pdb_ids[i:i+16]) file=out)<line_sep>i<augadd>16<block_end><block_end><else_stmt><block_start>print("%d PDB IDs matching"%len(pdb_ids) file=out)<block_end><block_end><block_end><if_stmt>(__name__<eq>"__main__")<block_start>run(sys.argv[1:])<block_end>
# coding: utf-8 <import_stmt>sys<import_from_stmt>python_environment_check check_packages<import_stmt>torch<import_stmt>torch.nn.functional<as>F<line_sep># # Machine Learning with PyTorch and Scikit-Learn # # -- Code Examples # ## Package version checks # Add folder to path in order to load from the check_packages.py script: sys.path.insert(0 '..')<line_sep># Check recommended package versions: d={'torch':'1.9.0' }<line_sep>check_packages(d)<line_sep># # Chapter 16: Transformers – Improving Natural Language Processing with Attention Mechanisms (Part 1/3) # **Outline** # # - [Adding an attention mechanism to RNNs](#Adding-an-attention-mechanism-to-RNNs) # - [Attention helps RNNs with accessing information](#Attention-helps-RNNs-with-accessing-information) # - [The original attention mechanism for RNNs](#The-original-attention-mechanism-for-RNNs) # - [Processing the inputs using a bidirectional RNN](#Processing-the-inputs-using-a-bidirectional-RNN) # - [Generating outputs from context vectors](#Generating-outputs-from-context-vectors) # - [Computing the attention weights](#Computing-the-attention-weights) # - [Introducing the self-attention mechanism](#Introducing-the-self-attention-mechanism) # - [Starting with a basic form of self-attention](#Starting-with-a-basic-form-of-self-attention) # - [Parameterizing the self-attention mechanism: scaled dot-product attention](#Parameterizing-the-self-attention-mechanism-scaled-dot-product-attention) # - [Attention is all we need: introducing the original transformer architecture](#Attention-is-all-we-need-introducing-the-original-transformer-architecture) # - [Encoding context embeddings via multi-head attention](#Encoding-context-embeddings-via-multi-head-attention) # - [Learning a language model: decoder and masked multi-head attention](#Learning-a-language-model-decoder-and-masked-multi-head-attention) # - [Implementation details: positional encodings and layer normalization](#Implementation-details-positional-encodings-and-layer-normalization) # ## Adding an attention mechanism to RNNs # ### Attention helps RNNs with accessing information # ### The original attention mechanism for RNNs # ### Processing the inputs using a bidirectional RNN # ### Generating outputs from context vectors # ### Computing the attention weights # ## Introducing the self-attention mechanism # ### Starting with a basic form of self-attention # - Assume we have an input sentence that we encoded via a dictionary, which maps the words to integers as discussed in the RNN chapter: # input sequence / sentence: # "Can you help me to translate this sentence" sentence=torch.tensor([0 # can 7 # you 1 # help 2 # me 5 # to 6 # translate 4 # this 3]# sentence )<line_sep>sentence<line_sep># - Next, assume we have an embedding of the words, i.e., the words are represented as real vectors. # - Since we have 8 words, there will be 8 vectors. Each vector is 16-dimensional: torch.manual_seed(123)<line_sep>embed=torch.nn.Embedding(10 16)<line_sep>embedded_sentence=embed(sentence).detach()<line_sep>embedded_sentence.shape<line_sep># - The goal is to compute the context vectors $\boldsymbol{z}^{(i)}=\sum_{j=1}^{T} \alpha_{i j} \boldsymbol{x}^{(j)}$, which involve attention weights $\alpha_{i j}$. # - In turn, the attention weights $\alpha_{i j}$ involve the $\omega_{i j}$ values # - Let's start with the $\omega_{i j}$'s first, which are computed as dot-products: # # $$\omega_{i j}=\boldsymbol{x}^{(i)^{\top}} \boldsymbol{x}^{(j)}$$ # # omega=torch.empty(8 8)<for_stmt>i,x_i enumerate(embedded_sentence)<block_start><for_stmt>j,x_j enumerate(embedded_sentence)<block_start>omega[i j]=torch.dot(x_i x_j)<block_end><block_end># - Actually, let's compute this more efficiently by replacing the nested for-loops with a matrix multiplication: omega_mat=embedded_sentence.matmul(embedded_sentence.T)<line_sep>torch.allclose(omega_mat omega)<line_sep># - Next, let's compute the attention weights by normalizing the "omega" values so they sum to 1 # # $$\alpha_{i j}=\frac{\exp \left(\omega_{i j}\right)}{\sum_{j=1}^{T} \exp \left(\omega_{i j}\right)}=\operatorname{softmax}\left(\left[\omega_{i j}\right]_{j=1 \ldots T}\right)$$ # # $$\sum_{j=1}^{T} \alpha_{i j}=1$$ attention_weights=F.softmax(omega dim=1)<line_sep>attention_weights.shape<line_sep># - We can conform that the columns sum up to one: attention_weights.sum(dim=1)<line_sep># - Now that we have the attention weights, we can compute the context vectors $\boldsymbol{z}^{(i)}=\sum_{j=1}^{T} \alpha_{i j} \boldsymbol{x}^{(j)}$, which involve attention weights $\alpha_{i j}$ # - For instance, to compute the context-vector of the 2nd input element (the element at index 1), we can perform the following computation: x_2=embedded_sentence[1 :]<line_sep>context_vec_2=torch.zeros(x_2.shape)<for_stmt>j range(8)<block_start>x_j=embedded_sentence[j :]<line_sep>context_vec_2<augadd>attention_weights[1 j]<times>x_j<block_end>context_vec_2<line_sep># - Or, more effiently, using linear algebra and matrix multiplication: context_vectors=torch.matmul(attention_weights embedded_sentence)<line_sep>torch.allclose(context_vec_2 context_vectors[1])<line_sep># ### Parameterizing the self-attention mechanism: scaled dot-product attention torch.manual_seed(123)<line_sep>d=embedded_sentence.shape[1]<line_sep>U_query=torch.rand(d d)<line_sep>U_key=torch.rand(d d)<line_sep>U_value=torch.rand(d d)<line_sep>x_2=embedded_sentence[1]<line_sep>query_2=U_query.matmul(x_2)<line_sep>key_2=U_key.matmul(x_2)<line_sep>value_2=U_value.matmul(x_2)<line_sep>keys=U_key.matmul(embedded_sentence.T).T<line_sep>torch.allclose(key_2 keys[1])<line_sep>values=U_value.matmul(embedded_sentence.T).T<line_sep>torch.allclose(value_2 values[1])<line_sep>omega_23=query_2.dot(keys[2])<line_sep>omega_23<line_sep>omega_2=query_2.matmul(keys.T)<line_sep>omega_2<line_sep>attention_weights_2=F.softmax(omega_2/d<power>0.5 dim=0)<line_sep>attention_weights_2<line_sep>#context_vector_2nd = torch.zeros(values[1, :].shape) #for j in range(8): # context_vector_2nd += attention_weights_2[j] * values[j, :] #context_vector_2nd context_vector_2=attention_weights_2.matmul(values)<line_sep>context_vector_2<line_sep># ## Attention is all we need: introducing the original transformer architecture # ### Encoding context embeddings via multi-head attention torch.manual_seed(123)<line_sep>d=embedded_sentence.shape[1]<line_sep>one_U_query=torch.rand(d d)<line_sep>h=8<line_sep>multihead_U_query=torch.rand(h d d)<line_sep>multihead_U_key=torch.rand(h d d)<line_sep>multihead_U_value=torch.rand(h d d)<line_sep>multihead_query_2=multihead_U_query.matmul(x_2)<line_sep>multihead_query_2.shape<line_sep>multihead_key_2=multihead_U_key.matmul(x_2)<line_sep>multihead_value_2=multihead_U_value.matmul(x_2)<line_sep>multihead_key_2[2]<line_sep>stacked_inputs=embedded_sentence.T.repeat(8 1 1)<line_sep>stacked_inputs.shape<line_sep>multihead_keys=torch.bmm(multihead_U_key stacked_inputs)<line_sep>multihead_keys.shape<line_sep>multihead_keys=multihead_keys.permute(0 2 1)<line_sep>multihead_keys.shape<line_sep>multihead_keys[2 1]# index: [2nd attention head, 2nd key] multihead_values=torch.matmul(multihead_U_value stacked_inputs)<line_sep>multihead_values=multihead_values.permute(0 2 1)<line_sep>multihead_z_2=torch.rand(8 16)<line_sep>linear=torch.nn.Linear(8<times>16 16)<line_sep>context_vector_2=linear(multihead_z_2.flatten())<line_sep>context_vector_2.shape<line_sep># ### Learning a language model: decoder and masked multi-head attention # ### Implementation details: positional encodings and layer normalization # --- # # Readers may ignore the next cell.
########################################################################## # # Functions and classes for iterating over and loading all datasets, # variants and markets that are available for bulk download from SimFin. # ########################################################################## # SimFin - Simple financial data for Python. # www.simfin.com - www.github.com/simfin/simfin # See README.md for instructions and LICENSE.txt for license details. ########################################################################## <import_stmt>simfin<as>sf<import_from_stmt>simfin.load_info load_info_datasets<import_from_stmt>collections defaultdict<import_from_stmt>functools partial lru_cache<import_stmt>sys<line_sep>########################################################################## # Lists of dataset names. @lru_cache()<def_stmt>datasets_all <block_start>""" Return a list of strings with the names of all available datasets. """<line_sep># Load dict with info about all the datasets. info_datasets=load_info_datasets()<line_sep># Create a list of just the dataset names. datasets=list(info_datasets)<line_sep><return>datasets<block_end>@lru_cache()<def_stmt>datasets_startswith names<block_start>""" Return a list of strings with dataset names that begin with the given names. :param names: String or tuple of strings. :return: List of strings. """<line_sep># Load dict with info about all the datasets. info_datasets=load_info_datasets()<line_sep># Create a list of just the dataset names. datasets=list(info_datasets)<line_sep># Filter the datasets so we only get the ones that start with these names. datasets=list(filter(<lambda>s:s.startswith(names) datasets))<line_sep><return>datasets<block_end># List of dataset names that begin with 'income'. datasets_income=partial(datasets_startswith names='income')<line_sep>datasets_income.__doc__='List of dataset names that begin with \'income\'.'<line_sep># List of dataset names that begin with 'balance'. datasets_balance=partial(datasets_startswith names='balance')<line_sep>datasets_balance.__doc__='List of dataset names that begin with \'balance\'.'<line_sep># List of dataset names that begin with 'cashflow'. datasets_cashflow=partial(datasets_startswith names='cashflow')<line_sep>datasets_cashflow.__doc__='List of dataset names that begin with \'cashflow\'.'<line_sep># List of dataset names that begin with either 'income', 'balance' or 'cashflow'. datasets_fundamental=partial(datasets_startswith names=('income' 'balance' 'cashflow'))<line_sep>datasets_fundamental.__doc__='List of dataset names with fundamental data.'<line_sep># List of dataset names that begin with 'shareprices'. datasets_shareprices=partial(datasets_startswith names='shareprices')<line_sep>datasets_shareprices.__doc__='List of dataset names that begin with \'shareprices\'.'<line_sep># List of dataset names that begin with 'derived'. datasets_derived=partial(datasets_startswith names='derived')<line_sep>datasets_derived.__doc__='List of dataset names that begin with \'derived\'.'<line_sep>########################################################################## # Functions for iterating over and loading all datasets. <def_stmt>iter_all_datasets datasets=<none><block_start>""" Create a generator for iterating over all valid datasets, variants and markets. For example: .. code-block:: python for dataset, variant, market in iter_all_datasets(): print(dataset, variant, market) This only yields the names of the datasets, variants and markets, not the actual Pandas DataFrames, use :obj:`~simfin.datasets.load_all_datasets` or the :obj:`~simfin.datasets.AllDatasets` class for that. :param datasets: If `None` then iterate over all datasets. Otherwise if this is a string or list of strings, then only iterate over these datasets. """<line_sep># Load dict with info about all the datasets. info_datasets=load_info_datasets()<line_sep># Only use the given datasets? <if_stmt>datasets<is><not><none># Create a new dict which only contains the given datasets. <block_start>info_datasets={k:v<for>k,v info_datasets.items()<if>k<in>datasets}<block_end># Yield all valid combinations of datasets, variants and markets. <for_stmt>dataset,x info_datasets.items()# If the list of variants is empty, use a list with None, # otherwise the for-loop below would not yield anything. <block_start><if_stmt>len(x['variants'])<g>0<block_start>variants=x['variants']<block_end><else_stmt><block_start>variants=[<none>]<block_end># If the list of markets is empty, use a list with None, # otherwise the for-loop below would not yield anything. <if_stmt>len(x['markets'])<g>0<block_start>markets=x['markets']<block_end><else_stmt><block_start>markets=[<none>]<block_end><for_stmt>variant variants<block_start><for_stmt>market markets<block_start><yield>dataset variant market<block_end><block_end><block_end><block_end><def_stmt>load_all_datasets **kwargs<block_start>""" Load all datasets and variants. Create and return a nested dict for fast lookup given dataset, variant and market names. Accepts the same args as the :obj:`~simfin.load.load` function, except for dataset, variant and market. For example, `refresh_days` can be set to 0 to ensure all datasets are downloaded again, which is useful for testing purposes. :return: Nested dict `dfs` with all datasets, variants and markets. Example: `dfs['income']['annual']['us']` is the dataset for annual Income Statements for the US market. """<line_sep># Initialize a dict that can be nested to any depth. dfs=defaultdict(<lambda>:defaultdict(dict))<line_sep># For all possible datasets, variants and markets. <for_stmt>dataset,variant,market iter_all_datasets()<block_start><try_stmt># Load the dataset and variant as a Pandas DataFrame. <block_start>df=sf.load(dataset=dataset variant=variant market=market **kwargs)<line_sep># Add the Pandas DataFrame to the nested dict. dfs[dataset][variant][market]=df<block_end><except_stmt>Exception<as>e# Exceptions can occur e.g. if the API key is invalid, or if there # is another server error, or if there is no internet connection. # Print the exception and continue. <block_start>print(e file=sys.stderr)<line_sep># Set the Pandas DataFrame to None in the nested dict, # to indicate that it could not be loaded. dfs[dataset][variant][market]=<none><block_end><block_end># Return the nested dict. It is a bit tricky to convert the # defaultdict to a normal dict, and it is not really needed, # so just return the defaultdict as it is. <return>dfs<block_end>########################################################################## <class_stmt>AllDatasets<block_start>""" Load all valid datasets, variants and markets as Pandas DataFrames. Also provide functions for easy lookup and iteration over datasets. """<def_stmt>__init__ self **kwargs<block_start>""" Accepts the same args as the :obj:`~simfin.load.load` function, except for dataset, variant and market. For example, `refresh_days` can be set to 0 to ensure all datasets are downloaded again, which is useful for testing purposes. """<line_sep># Load all datasets into a nested dict-dict. self._dfs=load_all_datasets(**kwargs)<block_end><def_stmt>get self dataset variant=<none> market=<none><block_start>""" Return the Pandas DataFrame for a single dataset, variant and market. :param dataset: String with the dataset name. :param variant: String with the dataset's variant. :param market: String with the dataset's market. :return: Pandas DataFrame with the dataset. """<line_sep><return>self._dfs[dataset][variant][market]<block_end><def_stmt>iter self datasets=<none> variants=<none> markets=<none><block_start>""" Iterate over all valid datasets, variants and markets, or only use the ones specified. For example: .. code-block:: python for dataset, variant, market, df in all_datasets.iter(): # dataset, variant and market are strings with the names. # df is a Pandas DataFrame with the actual data. :param datasets: Default is `None` which uses all valid datasets. Otherwise a list of strings with the dataset-names to use. :param variants: Default is `None` which uses all valid variants for a dataset. Otherwise a list of strings with the variant-names to use. :param markets: Default is `None` which uses all valid markets for a dataset. Otherwise a list of strings with the market-names to use. :return: Generator which iterates over: dataset (string), variant (string), market (string), df (Pandas DataFrame) """<line_sep># Load dict with info about all the datasets. info_datasets=load_info_datasets()<line_sep># Use provided or all datasets? <if_stmt>datasets<is><none><block_start>datasets=datasets_all<block_end># For all datasets. <for_stmt>dataset datasets# Use provided or all valid variants for this dataset? <block_start><if_stmt>variants<is><not><none><block_start>_variants=variants<block_end><else_stmt><block_start>_variants=info_datasets[dataset]['variants']<block_end># Use provided or all valid markets for this dataset? <if_stmt>markets<is><not><none><block_start>_markets=markets<block_end><else_stmt><block_start>_markets=info_datasets[dataset]['markets']<block_end># For all the selected variants and markets. <for_stmt>variant _variants<block_start><for_stmt>market _markets# Get the Pandas DataFrame with the actual data. <block_start>df=self.get(dataset=dataset variant=variant market=market)<line_sep># Yield all the strings and the Pandas DataFrame. <yield>dataset variant market df<block_end><block_end><block_end><block_end><block_end>##########################################################################
# -*- coding: utf-8 -*- """ Nikon N-Gamut Colourspace ========================= Defines the *Nikon N-Gamut* colourspace: - :attr:`colour.models.RGB_COLOURSPACE_N_GAMUT`. References ---------- - :cite:`Nikon2018` : Nikon. (2018). N-Log Specification Document - Version 1.0.0 (pp. 1-5). Retrieved September 9, 2019, from http://download.nikonimglib.com/archive3/hDCmK00m9JDI03RPruD74xpoU905/\ N-Log_Specification_(En)01.pdf """<import_from_stmt>colour.models.rgb RGB_Colourspace log_encoding_NLog log_decoding_NLog <import_from_stmt>colour.models.rgb.datasets.itur_bt_2020 PRIMARIES_BT2020 WHITEPOINT_NAME_BT2020 CCS_WHITEPOINT_BT2020 MATRIX_BT2020_TO_XYZ MATRIX_XYZ_TO_BT2020 <line_sep>__author__='Colour Developers'<line_sep>__copyright__='Copyright (C) 2013-2020 - Colour Developers'<line_sep>__license__='New BSD License - http://opensource.org/licenses/BSD-3-Clause'<line_sep>__maintainer__='Colour Developers'<line_sep>__email__='<EMAIL>'<line_sep>__status__='Production'<line_sep>__all__=['PRIMARIES_N_GAMUT' 'WHITEPOINT_NAME_N_GAMUT' 'CCS_WHITEPOINT_N_GAMUT' 'MATRIX_N_GAMUT_TO_XYZ' 'MATRIX_XYZ_TO_N_GAMUT' 'RGB_COLOURSPACE_N_GAMUT']<line_sep>PRIMARIES_N_GAMUT=PRIMARIES_BT2020<line_sep>""" *Nikon N-Gamut* colourspace primaries. Notes ----- The *Nikon N-Gamut* colourspace gamut is same as the "ITU-R BT.2020" wide colour gamut. PRIMARIES_N_GAMUT : ndarray, (3, 2) """<line_sep>WHITEPOINT_NAME_N_GAMUT=WHITEPOINT_NAME_BT2020<line_sep>""" *Nikon N-Gamut* colourspace whitepoint name. WHITEPOINT_NAME_N_GAMUT : unicode """<line_sep>CCS_WHITEPOINT_N_GAMUT=CCS_WHITEPOINT_BT2020<line_sep>""" *Nikon N-Gamut* colourspace whitepoint. CCS_WHITEPOINT_N_GAMUT : ndarray """<line_sep>MATRIX_N_GAMUT_TO_XYZ=MATRIX_BT2020_TO_XYZ<line_sep>""" *Nikon N-Gamut* colourspace to *CIE XYZ* tristimulus values matrix. MATRIX_N_GAMUT_TO_XYZ : array_like, (3, 3) """<line_sep>MATRIX_XYZ_TO_N_GAMUT=MATRIX_XYZ_TO_BT2020<line_sep>""" *CIE XYZ* tristimulus values to *Nikon N-Gamut* colourspace matrix. MATRIX_XYZ_TO_N_GAMUT : array_like, (3, 3) """<line_sep>RGB_COLOURSPACE_N_GAMUT=RGB_Colourspace('N-Gamut' PRIMARIES_N_GAMUT CCS_WHITEPOINT_N_GAMUT WHITEPOINT_NAME_N_GAMUT MATRIX_N_GAMUT_TO_XYZ MATRIX_XYZ_TO_N_GAMUT log_encoding_NLog log_decoding_NLog )<line_sep>RGB_COLOURSPACE_N_GAMUT.__doc__=""" *Nikon N-Gamut* colourspace. References ---------- :cite:`Nikon2018` RGB_COLOURSPACE_N_GAMUT : RGB_Colourspace """<line_sep>
""" Base class for all column-orientation transformer classes with fit/transform functions. """<line_sep># Authors: <NAME> # License: MIT <import_from_stmt>abc ABC abstractmethod<import_from_stmt>enum Enum<class_stmt>Column(ABC)<block_start>"""Base class for all column-orientation transformer classes with fit/transform functions. """<line_sep>@abstractmethod<def_stmt>fit self x y=<none><block_start>"""Fit this transformer. Parameters ---------- x : array-like One column of training data. y : array-like, default=None Training targets. """<line_sep><raise>NotImplementedError<block_end>@abstractmethod<def_stmt>transform self x<block_start>"""Transform x by this fitted transformer. Parameters ---------- x : array-like Column data to be transformed. """<line_sep><raise>NotImplementedError<block_end><block_end><class_stmt>ColumnType(Enum)<block_start>NUMBER=1<line_sep>CATEGORY=2<line_sep>SEQUENCE=3<block_end><class_stmt>NumberColumn(Column)<block_start>"""Base class for all column-orientation number type transformer classes with fit/transform functions. """<line_sep>column_type=ColumnType.NUMBER<block_end><class_stmt>CategoryColumn(Column)<block_start>"""Base class for all column-orientation category type transformer classes with fit/transform functions. """<line_sep>column_type=ColumnType.CATEGORY<line_sep>@abstractmethod<def_stmt>dimension self<block_start>"""Number of unique terms. """<line_sep><raise>NotImplementedError<block_end><block_end><class_stmt>SequenceColumn(Column)<block_start>"""Base class for all column-orientation sequence type transformer classes with fit/transform functions. """<line_sep>column_type=ColumnType.SEQUENCE<line_sep>@abstractmethod<def_stmt>dimension self<block_start>"""Number of unique terms. """<line_sep><raise>NotImplementedError<block_end>@abstractmethod<def_stmt>max_length self<block_start>"""Maximum length of one sequence. """<line_sep><raise>NotImplementedError<block_end><block_end>
<import_from_stmt>.deform_conv DeformConv DeformConvPack ModulatedDeformConv ModulatedDeformConvPack deform_conv modulated_deform_conv <line_sep>__all__=['DeformConv' 'DeformConvPack' 'ModulatedDeformConv' 'ModulatedDeformConvPack' 'deform_conv' 'modulated_deform_conv']<line_sep>
# This file will generate a synthetic dataset to predict employee attrition # Like most datasets it will have a feature vector and a Y label for each instance. # However, unlike most datasets it will also have an Explanation (E) for each instance, encoded as an non-negative integer. # This is motivated by the TED framework, but can be used by other explainability algorithms as a metric for explainability # See the AIES'19 paper by Hind et al for more information on the TED framework. # See the tutorial notebook TED_Cartesian_test for information about how to use this dataset and the TED framework. # The comments in this code also provide some insight into how this dataset is generated <import_stmt>random<import_from_stmt>random choices<import_stmt>pandas<as>pd<line_sep>Any=-99# This is only applicable in the rule Low=-1# These 3, Low, Med, High, can be values in the dataset and are used in the rules Med=-2<line_sep>High=-3<line_sep>Yes=-10# This is the positive Y label No=-11# This is the negative Y label Random=-12# This signifies a random choice should be made for the Y label (either Yes or No) ] # Features, values, and distribution, details below featureThresholds=[# 1 Position: 4(5%), 3(20%), 2(30%), 1(45%) [4 [0.05 0.20 0.30 0.45]] # 2 Organization "Org": 3(30%); 2(30%); 1(40%) [3 [0.30 0.30 0.40]] # 3 Potential "Pot": Yes (50%), No (50%) [2 [0.50 0.50]] # 4 Rating value "Rat": High(15%), Med(80%), Low(5%) [3 [0.15 0.80 0.05]] # 5 Rating Slope "Slope": High (15%), Med(80%), Low(5%) [3 [0.15 0.80 0.05]] # 6 Salary Competitiveness "Sal": High (10%); Med(70%); Low(20%) [3 [0.10 0.70 0.20]] # 7 Tenure Low "TenL" & High Values "TenH": [0..360], 30% in 0..24; 30% in 25..60; 40% in 61..360 [3 [0.30 0.30 0.40] [[0 24] [25 60] [61 360]]] # 8 Position Tenure Low "BTenL" & High Values "BTenH": [0..360], 70% in 0..12; 20% in 13..24; 10% in 25..360 # Position tenure needs to be lower than tenure, ensured in generation code below [3 [0.70 0.20 0.10] [[0 12] [13 24] [25 360]]]]<line_sep># Some convenient population lists HighMedLowPopulation=[High Med Low]<line_sep>YesNoPopulation=[Yes No]<line_sep>Index3Population=[0 1 2]<line_sep>Integer4Population=[4 3 2 1]<line_sep>Integer3Population=[3 2 1]<line_sep># Rules used to label a feature vector with a label and an explanation # Format: features, label, explanation #, Explanation String RetentionRules=[#POS ORG Pot RAT Slope SALC TENL H BTEN LH [Any 1 Any High Any Low Any Any Any Any #0 Yes 2 "Seeking Higher Salary in Org 1"] [1 1 Any Any Any Any Any Any 15 Any #1 Yes 3 "Promotion Lag, Org 1, Position 1"] [2 1 Any Any Any Any Any Any 15 Any #2 Yes 3 "Promotion Lag, Org 1, Position 2"] [3 1 Any Any Any Any Any Any 15 Any #3 Yes 3 "Promotion Lag, Org 1, Position 3"] [1 2 Any Any Any Any Any Any 20 Any #4 Yes 4 "Promotion Lag, Org 2, Position 1"] [2 2 Any Any Any Any Any Any 20 Any #5 Yes 4 "Promotion Lag, Org 2, Position 2"] [3 2 Any Any Any Any Any Any 30 Any #6 Yes 5 "Promotion Lag, Org 2, Position 3"] [1 3 Any Any Any Any Any Any 20 Any #7 Yes 6 "Promotion Lag, Org 3, Position 1"] [2 3 Any Any Any Any Any Any 30 Any #8 Yes 7 "Promotion Lag, Org 3, Position 2"] [3 3 Any Any Any Any Any Any 30 Any #9 Yes 7 "Promotion Lag, Org 3, Position 3"] [1 1 Any Any Any Any 0 12 Any Any #10 Yes 8 "New employee, Org 1, Position 1"] [2 1 Any Any Any Any 0 12 Any Any #11 Yes 8 "New employee, Org 1, Position 2"] [3 1 Any Any Any Any 0 30 Any Any #12 Yes 9 "New employee, Org 1, Position 3"] [1 2 Any Any Any Any 0 24 Any Any #13 Yes 10 "New employee, Org 2, Position 1"] [2 2 Any Any Any Any 0 30 Any Any #14 Yes 11 "New employee, Org 2, Position 2"] [Any 1 Any Low High Any Any Any Any Any #15 Yes 13 "Disappointing evaluation, Org 1"] [Any 2 Any Low High Any Any Any Any Any #16 Yes 14 "Disappointing evaluation, Org 2"] [Any Any Yes Med High Low Any Any Any Any #17 Yes 15 "Compensation doesn't match evaluations, Med rating"] [Any Any Yes High High Low Any Any Any Any #18 Yes 15 "Compensation doesn't match evaluations, High rating"] [Any 1 Yes Med High Med Any Any Any Any #19 Yes 16 "Compensation doesn't match evaluations, Org 1, Med rating"] [Any 2 Yes Med High Med Any Any Any Any #20 Yes 16 "Compensation doesn't match evaluations, Org 2, Med rating"] [Any 1 Yes High High Med Any Any Any Any #21 Yes 16 "Compensation doesn't match evaluations, Org 1, High rating"] [Any 2 Yes High High Med Any Any Any Any #22 Yes 16 "Compensation doesn't match evaluations, Org 2, High rating"] [Any 1 Any Any Med Med 120 180 Any Any #23 Yes 17 "Mid-career crisis, Org 1"] [Any 2 Yes Any Any Med 130 190 Any Any #24 Yes 18 "Mid-career crisis, Org 2"]]<def_stmt>ruleValToString val<block_start>""" Convert the value passed into a string """<if_stmt>val<eq>Any<block_start><return>"Any"<block_end><elif_stmt>val<eq>Low<block_start><return>"Low"<block_end><elif_stmt>val<eq>Med<block_start><return>"Med"<block_end><elif_stmt>val<eq>High<block_start><return>"High"<block_end><elif_stmt>val<eq>Yes<block_start><return>"Yes"<block_end><elif_stmt>val<eq>No<block_start><return>"No"<block_end><elif_stmt>val<eq>Random<block_start><return>"Random"<block_end><else_stmt><block_start><return>str(val)<block_end><block_end><def_stmt>printFeatureStringHeader <block_start>""" Print the feature headings """<line_sep>print(" Feature Headings")<line_sep>print("[Pos, Org, Pot, Rating, Slope, Salary Competitiveness, Tenure, Position Tenure]")<block_end><def_stmt>featuresToString featureVector<block_start>""" Convert a feature vector into is string format"""<line_sep>val="["<for_stmt>i range(0 2)# These features are just ints, Position, Organization <block_start>val<augadd>str(featureVector[i])<line_sep>val<augadd>" "<block_end><for_stmt>i range(2 6)# show encoding for these: Potential, Rating, Rating Slope, Salary Competitiveness <block_start>val<augadd>ruleValToString(featureVector[i])<line_sep>val<augadd>" "<block_end><for_stmt>i range(6 8)# These features are just ints: Tenure and Position Tenure <block_start>val<augadd>str(featureVector[i])<line_sep>val<augadd>" "<block_end>val<augadd>"]"<line_sep><return>val<block_end><def_stmt>printRule rule<block_start>""" Print the passed rule """<line_sep>print("Rule: " end='')<for_stmt>i rule[0:1]# ints or Any: Position and Organization <block_start><if_stmt>i<eq>Any<block_start>print(ruleValToString(i)+", " end='')<block_end><block_end><for_stmt>i rule[2:5]# encoded: Potentional, Rating, Rating Slope, Salary Competitiveness <block_start>print(ruleValToString(i)+", " end='')<block_end><for_stmt>i rule[6:9]# next 4 are ints or ANY: Tenure Low, Tenure High, Position Tenure Low, Position Tenure High <block_start><if_stmt>i<eq>Any<block_start>print(ruleValToString(i)+", " end='')<block_end><else_stmt><block_start>print(str(i)+", " end='')<block_end><block_end>print("==> "+ruleValToString(rule[10])+"["+str(rule[11])+"] "+str(rule[12]))<block_end><def_stmt>printRules rules<block_start>""" print all rules"""<for_stmt>r rules<block_start>printRule(r)<block_end><block_end>######################################################################## <def_stmt>chooseRangeValue thresholds rangeList<block_start>""" Generate a random value based on the probability weights (thresholds) and list of ranges passed Args: thresholds : list of probabilities for each choice rangeList: a list of pair lists giving the lower and upper bounds to choose value from """<line_sep># pick a number 1..3 from weights rangeVal=choices(Index3Population thresholds)<line_sep># get the appropriate range given rangeVal interval=rangeList[rangeVal[0]]<line_sep># construct a population list from the result intervalPopulation=list(range(interval[0] interval[1]))<line_sep># construct a equally prob weights list numElements=interval[1]-interval[0]<line_sep>probVal=1.0/numElements<line_sep>probList=[probVal]<times>numElements<line_sep># now choose the value from the population based on the weights val=choices(intervalPopulation probList)<line_sep><return>val[0]<block_end><def_stmt>chooseValueAndAppend instance population weights<block_start>""" Choose a random value from the population using weights list and append it to the passed instance """<line_sep>val=choices(population weights)<line_sep>instance.append(val[0])<block_end><def_stmt>generateFeatures numInstances<block_start>""" generate the features (X) values for the dataset Args: numInstances (int) : number of instances to genreate Returns: dataset (list of lists) : the dataset with features, but no labels or explanations yet """<assert_stmt>(numInstances<g>0)<line_sep>dataset=[]<for_stmt>i range(numInstances)<block_start>instance=[]<line_sep>#POS ORG Pot Rating Slope SALC TENL H BTEN LH chooseValueAndAppend(instance Integer4Population featureThresholds[0][1])# Position chooseValueAndAppend(instance Integer3Population featureThresholds[1][1])# Org chooseValueAndAppend(instance YesNoPopulation featureThresholds[2][1])# Potential chooseValueAndAppend(instance HighMedLowPopulation featureThresholds[3][1])# Rating chooseValueAndAppend(instance HighMedLowPopulation featureThresholds[4][1])# Rating slope chooseValueAndAppend(instance HighMedLowPopulation featureThresholds[5][1])# Sal competitiveness val1=chooseRangeValue(featureThresholds[6][1] featureThresholds[6][2])# Tenure instance.append(val1)<line_sep># Position tenure needs to be <= Tenure val2=chooseRangeValue(featureThresholds[7][1] featureThresholds[7][2])# Pos Tenure <if_stmt>val2<g>val1<block_start>val2=val1<block_end>instance.append(val2)<line_sep>dataset.append(instance)<block_end><return>dataset<block_end>##################################################################################################### <def_stmt>match ruleVal featureVal<block_start>""" Check if passed ruleVal matches the featureVal or if ruleVal is Any, which matches everything """<line_sep># print("Match called: "+ ruleValToString(ruleVal) + " " + ruleValToString(featureVal)) <if_stmt>ruleVal<eq>Any<block_start><return><true><block_end><return>(ruleVal<eq>featureVal)<block_end><def_stmt>intervalMatch ruleValLower ruleValUpper featureVal<block_start>""" Check to see if featureVal is in the interval defined by [ruleValLower, ruleValUpper) """<line_sep># Any in lower bound matches all values, (upper bound doesn't matter) <if_stmt>ruleValLower<eq>Any<block_start><return><true><block_end><if_stmt>ruleValLower<le>featureVal# Any in upper bound means infinitity <block_start><if_stmt>featureVal<l>ruleValUpper<or>ruleValUpper<eq>Any<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>ruleMatch rule featureVector<block_start>""" Determine if the passed featureVector matches the passed rule """<if_stmt>(<false>)<block_start>print("ruleMatch called, " end="")<line_sep>printRule(rule)<line_sep>print(" feature vector: "+featuresToString(featureVector))<block_end><for_stmt>i range(0 6)# loop over first 6 features, 0..5 <block_start><if_stmt><not>match(rule[i] featureVector[i])# if we don't find a feature match, the rule doesn't match # print("Didn't match feature #", i, ruleValToString(featureVector[i])) <block_start><return><false><block_end><block_end># These features are interval-based, so need a different matching routine <if_stmt><not>intervalMatch(rule[6] rule[7] featureVector[6])# rule[6] and rule[7] have the lower and upper bounds of interval # print("Didn't match feature # 6: ", featureVector[6]) <block_start><return><false><block_end><if_stmt><not>intervalMatch(rule[8] rule[9] featureVector[7])# rule[8] and rule[9] have the lower and upper bounds of interval # print("Didn't match feature # 7: ", featureVector[7]) <block_start><return><false><block_end># print("Matched all features") <return><true># if we didn't find a non-match by now, we found a match <block_end><def_stmt>findRule instance ruleSet<block_start>""" find the rule(s) that matches the feture vector passed """<line_sep># print("*Looking for rule match for Feature vector: " + featuresToString(instance)) ruleNumber=0# counter to track rule number ruleMatches=[]# will hold all rule numbers that matched <for_stmt>rule ruleSet<block_start><if_stmt>(ruleMatch(rule instance))<block_start>ruleMatches.append(ruleNumber)<line_sep>counts[ruleNumber]<augadd>1# update global histogram of rule matches for stats reporting <if_stmt>(<false>)<block_start>print(" ruleMatch found at rule #"+str(ruleNumber))<line_sep>print(" " end="")<line_sep>printRule(rule)<block_end><block_end>ruleNumber<augadd>1<block_end><return>ruleMatches<block_end><def_stmt>countAnys rule<block_start>""" Count the number of Anys in the passed rule. An "Any" is a wildcard that matches all values """<line_sep>count=0<for_stmt>feature RetentionRules[rule]<block_start><if_stmt>feature<eq>Any<block_start>count<augadd>1<block_end><block_end><return>count<block_end><def_stmt>pickBestRule ruleList<block_start>""" Choose the rule with the least number of Any's in it """<assert_stmt>(len(ruleList)<g>0)<line_sep># print("ruleList: ", ruleList) minAnys=len(RetentionRules[0])+1# initialize to a value larger than possible # of Anys in a rule bestRule=-1<for_stmt>rule ruleList# Count # of Any's in rule # rule <block_start>count=countAnys(rule)<if_stmt>count<l>minAnys<block_start>minAnys=count<line_sep>bestRule=rule<block_end><block_end><assert_stmt>(bestRule<ne>-1)# We should find a best rule <return>bestRule<block_end><def_stmt>addLabelsAndExplanations dataset rules<block_start>""" This function will use a ruleset to add labels (Y) and explanations/rules (E) to a passed dataset Arg: dataset (list of lists) : a list of feature vectors (list) rules (list of lists) : a list of rules """<line_sep>noMatches=0# Counters to record how often there are no (Yes) matches, 1 (Yes) match, and multiple (Yes) matches multiMatches=0<line_sep>oneMatches=0<for_stmt>instance dataset<block_start>ruleMatches=findRule(instance rules)<if_stmt>len(ruleMatches)<eq>0# We didn't match a (Yes) rule, so this ia No situation <block_start>rule=NoRiskRuleNum<line_sep>label=No<line_sep>noMatches<augadd>1<block_end><elif_stmt>len(ruleMatches)<g>1# Matched multiple Yes rules, need to pick one <block_start>rule=pickBestRule(ruleMatches)<assert_stmt>(rule<ge>0<and>rule<l>len(rules))# Ensure rule number is valid label=Yes<line_sep>multiMatches<augadd>1<block_end><else_stmt># Found 1 Yes rule match, it's the winner <block_start>rule=ruleMatches[0]<line_sep>label=Yes<line_sep>oneMatches<augadd>1<assert_stmt>(rule<ge>0<and>rule<l>len(rules))<block_end># Ensure rule number is valid # print("Label: " + ruleValToString(label) + ", Rule: " + ruleValToString(rule)) instance.append(label)<line_sep>instance.append(rule)<block_end># add the label and explanation (rule #) to the featureVector <if_stmt>(<true>)<block_start>print("\nRule matching statistics: ")<line_sep>totalYes=oneMatches+multiMatches<line_sep>total=oneMatches+multiMatches+noMatches<line_sep>print(" Yes Labels: {}/{} ({:.2f}%)".format(totalYes total totalYes/total<times>100))<line_sep>print(" Matched 1 Yes rule: {}/{} ({:.2f}%)".format(oneMatches totalYes oneMatches/totalYes<times>100))<line_sep>print(" Matched multiple Yes rules: {}/{} ({:.2f}%)".format(multiMatches totalYes multiMatches/totalYes<times>100))<line_sep>print(" No Laels: {}/{} ({:.2f}%)".format(noMatches total noMatches/total<times>100))<block_end><block_end><def_stmt>printRuleUsage counts total<block_start>print("\nHistogram of rule usage:")<line_sep>ruleNum=0<for_stmt>num counts<block_start>print(" Rule {} was used {} times, {:.2f}%".format(ruleNum num num/total<times>100))<line_sep>ruleNum<augadd>1<block_end><block_end>numRentionRules=len(RetentionRules)<line_sep>counts=[0]<times>numRentionRules<line_sep>NoRiskRuleNum=numRentionRules# the No Risk to leave rule is 1 more than than the total rules [0..] random.seed(1)<line_sep># printFeatureStringHeader() numInstances=10000<line_sep>dataset=generateFeatures(numInstances)<line_sep>addLabelsAndExplanations(dataset RetentionRules)<line_sep>printRuleUsage(counts numInstances)<line_sep># insert TED headers NumFeatures=len(featureThresholds)<line_sep>header=list(range(NumFeatures))<line_sep>header.append("Y")<line_sep>header.append("E")<line_sep>dataset.insert(0 header)<line_sep># write to csv file my_df=pd.DataFrame(dataset)<line_sep>my_df.to_csv('Retention.csv' index=<false> header=<false>)<line_sep>
<import_from_stmt>py.test raises<import_from_stmt>..registry Registry<import_from_stmt>..types DjangoObjectType<import_from_stmt>.models Reporter<def_stmt>test_should_raise_if_no_model <block_start><with_stmt>raises(Exception)<as>excinfo<block_start><class_stmt>Character1(DjangoObjectType)<block_start>fields="__all__"<block_end><block_end><assert_stmt>"valid Django Model"<in>str(excinfo.value)<block_end><def_stmt>test_should_raise_if_model_is_invalid <block_start><with_stmt>raises(Exception)<as>excinfo<block_start><class_stmt>Character2(DjangoObjectType)<block_start><class_stmt>Meta<block_start>model=1<line_sep>fields="__all__"<block_end><block_end><block_end><assert_stmt>"valid Django Model"<in>str(excinfo.value)<block_end><def_stmt>test_should_map_fields_correctly <block_start><class_stmt>ReporterType2(DjangoObjectType)<block_start><class_stmt>Meta<block_start>model=Reporter<line_sep>registry=Registry()<line_sep>fields="__all__"<block_end><block_end>fields=list(ReporterType2._meta.fields.keys())<assert_stmt>fields[:-2]<eq>["id" "first_name" "last_name" "email" "pets" "a_choice" "reporter_type" ]<assert_stmt>sorted(fields[-2:])<eq>["articles" "films"]<block_end><def_stmt>test_should_map_only_few_fields <block_start><class_stmt>Reporter2(DjangoObjectType)<block_start><class_stmt>Meta<block_start>model=Reporter<line_sep>fields=("id" "email")<block_end><block_end><assert_stmt>list(Reporter2._meta.fields.keys())<eq>["id" "email"]<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_stmt>enum<import_from_stmt>iceberg.api Table Transaction <import_from_stmt>iceberg.core TableOperations<import_from_stmt>iceberg.exceptions CommitFailedException<class_stmt>BaseTransaction(Transaction)<block_start>@staticmethod<def_stmt>replace_table_transaction ops start<block_start><return>BaseTransaction(ops start)<block_end>@staticmethod<def_stmt>create_table_transaction ops start<block_start><if_stmt>ops.current()<is><not><none><block_start><raise>RuntimeError("Cannot start create table transaction: table already exists")<block_end><block_end>@staticmethod<def_stmt>new_transaction ops<block_start><return>BaseTransaction(ops ops.refesh())<block_end><def_stmt>__init__ self ops start<block_start>self.ops=ops<line_sep>self.updates=list()<line_sep>self.intermediate_snapshot_ids=set()<line_sep>self.base=ops.current<if_stmt>self.base<is><none><and>start<is><none><block_start>self.type=TransactionType.CREATE_TABLE<block_end><elif_stmt>self.base<is><not><none><and>start<ne>self.base<block_start>self.type=TransactionType.REPLACE_TABLE<block_end><else_stmt><block_start>self.type=TransactionType.SIMPLE<block_end>self.last_base=<none><line_sep>self.current=start<line_sep>self.transaction_table=TransactionTable(self self.current)<line_sep>self.transaction_ops=TransactionTableOperations<block_end><def_stmt>table self<block_start><return>self.transaction_table<block_end># NOTE: function name has typo in the word `comitted`. Kept for backwards compatability in legacy python API. <def_stmt>check_last_operation_commited self operation<block_start><if_stmt>self.last_base<eq>self.current<block_start><raise>RuntimeError("Cannot create new %s: last operation has not committed"%operation)<block_end>self.last_base=self.current<block_end><def_stmt>update_schema self<block_start>self.check_last_operation_commited("UpdateSchema")<block_end>@staticmethod<def_stmt>current_id meta<block_start><if_stmt>meta<is><not><none><and>meta.current_snapshot()<is><not><none><block_start><return>meta.current_snapshot().snapshot_id<block_end><block_end><block_end><class_stmt>TransactionType(enum.Enum)<block_start>CREATE_TABLE=0<line_sep>REPLACE_TABLE=1<line_sep>SIMPLE=1<block_end><class_stmt>TransactionTableOperations(TableOperations)<block_start><def_stmt>__init__ self bt<block_start>self._bt=bt<block_end><def_stmt>current self<block_start><return>self._bt.current<block_end><def_stmt>refresh self<block_start><return>self._bt.current<block_end><def_stmt>commit self base metadata<block_start><if_stmt>base<ne>self.current()<block_start><raise>CommitFailedException("Table metadata refresh is required")<block_end>old_id=BaseTransaction.current_id(self._bt.current)<if_stmt>old_id<is><not><none><and>old_id<not><in>(BaseTransaction.current_id(metadata) BaseTransaction.current_id(base))<block_start>self._bt.intermediate_snapshot_ids.add(old_id)<block_end>self._bt.current=metadata<block_end><def_stmt>io self<block_start><return>self._bt.ops.io()<block_end><def_stmt>metadata_file_location self file<block_start><return>self._bt.ops.metadata_file_location(file)<block_end><def_stmt>new_snapshot_id self<block_start><return>self._bt.ops.new_snapshot_id()<block_end><block_end><class_stmt>TransactionTable(Table)<block_start><def_stmt>__init__ self bt current<block_start>self.bt=bt<line_sep>self.current=current<block_end><def_stmt>refresh self<block_start><pass><block_end><def_stmt>new_scan self<block_start><raise>RuntimeError("Transaction tables do not support scans")<block_end><def_stmt>schema self<block_start><return>self.current.schema<block_end><def_stmt>spec self<block_start><return>self.current.spec<block_end><def_stmt>properties self<block_start><return>self.current.properties<block_end><def_stmt>location self<block_start><return>self.current.location<block_end><def_stmt>current_snapshot self<block_start><return>self.current.current_snapshot()<block_end><def_stmt>snapshots self<block_start><return>self.current.snapshots<block_end><def_stmt>update_schema self<block_start><return>self.bt.update_schema()<block_end><def_stmt>update_properties self<block_start><return>self.bt.update_properties()<block_end><def_stmt>update_location self<block_start><return>self.bt.update_location()<block_end><def_stmt>new_append self<block_start><return>self.bt.new_append()<block_end><def_stmt>new_rewrite self<block_start><return>self.bt.new_rewrite()<block_end><def_stmt>new_overwrite self<block_start><return>self.bt.new_overwrite()<block_end><def_stmt>new_replace_partitions self<block_start><return>self.bt.new_replace_partitions()<block_end><def_stmt>new_delete self<block_start><return>self.bt.new_delete()<block_end><def_stmt>expire_snapshots self<block_start><return>self.bt.expire_snapshots()<block_end><def_stmt>rollback self<block_start><raise>RuntimeError("Transaction tables do not support rollback")<block_end><def_stmt>new_transaction self<block_start><raise>RuntimeError("Cannot create a transaction within a transaction")<block_end><block_end>
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>torchdyn.numerics.solvers Euler RungeKutta4 Tsitouras45 DormandPrince45 AsynchronousLeapfrog MSZero MSBackward<import_from_stmt>torchdyn.numerics.hypersolvers HyperEuler<import_from_stmt>torchdyn.numerics.odeint odeint odeint_symplectic odeint_mshooting odeint_hybrid<import_from_stmt>torchdyn.numerics.systems VanDerPol Lorenz<line_sep>__all__=['odeint' 'odeint_symplectic' 'Euler' 'RungeKutta4' 'DormandPrince45' 'Tsitouras45' 'AsynchronousLeapfrog' 'HyperEuler' 'MSZero' 'MSBackward' 'Lorenz' 'VanDerPol']<line_sep>
# Copyright 2017 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions to expand "make" variables of form $(VAR) """<def_stmt>expand_variables ctx s outs=[] output_dir=<false> attribute_name="args"<block_start>"""This function is the same as ctx.expand_make_variables with the additional genrule-like substitutions of: - $@: The output file if it is a single file. Else triggers a build error. - $(@D): The output directory. If there is only one file name in outs, this expands to the directory containing that file. If there are multiple files, this instead expands to the package's root directory in the bin tree, even if all generated files belong to the same subdirectory! - $(RULEDIR): The output directory of the rule, that is, the directory corresponding to the name of the package containing the rule under the bin tree. See https://docs.bazel.build/versions/main/be/general.html#genrule.cmd and https://docs.bazel.build/versions/main/be/make-variables.html#predefined_genrule_variables for more information of how these special variables are expanded. """<line_sep>rule_dir=[f<for>f [ctx.bin_dir.path ctx.label.workspace_root ctx.label.package ]<if>f]<line_sep>additional_substitutions={}<if_stmt>output_dir<block_start><if_stmt>s.find("$@")<ne>-1<or>s.find("$(@)")<ne>-1<block_start>fail("""$@ substitution may only be used with output_dir=False. Upgrading rules_nodejs? Maybe you need to switch from $@ to $(@D) See https://github.com/bazelbuild/rules_nodejs/releases/tag/0.42.0""")<block_end># We'll write into a newly created directory named after the rule output_dir=[f<for>f [ctx.bin_dir.path ctx.label.workspace_root ctx.label.package ctx.label.name ]<if>f]<block_end><else_stmt><block_start><if_stmt>s.find("$@")<ne>-1<or>s.find("$(@)")<ne>-1<block_start><if_stmt>len(outs)<g>1<block_start>fail("""$@ substitution may only be used with a single out Upgrading rules_nodejs? Maybe you need to switch from $@ to $(RULEDIR) See https://github.com/bazelbuild/rules_nodejs/releases/tag/0.42.0""")<block_end><block_end><if_stmt>len(outs)<eq>1<block_start>additional_substitutions["@"]=outs[0].path<line_sep>output_dir=outs[0].dirname.split("/")<block_end><else_stmt><block_start>output_dir=rule_dir[:]<block_end><block_end># The list comprehension removes empty segments like if we are in the root package additional_substitutions["@D"]="/".join([o<for>o output_dir<if>o])<line_sep>additional_substitutions["RULEDIR"]="/".join([o<for>o rule_dir<if>o])<line_sep><return>ctx.expand_make_variables(attribute_name s additional_substitutions)<block_end>
# Django Library <import_from_stmt>django.db models<import_from_stmt>django.utils.translation ugettext_lazy<as>_<line_sep># Localfolder Library <import_from_stmt>.father PyFather<class_stmt>PyTax(PyFather)<block_start>name=models.CharField(_("Name") max_length=255)<line_sep>amount=models.DecimalField(_("Amount") max_digits=10 decimal_places=2 default=0)<line_sep>include_price=models.BooleanField(_("Include Price") default=<true> blank=<true> null=<true>)<def_stmt>__str__ self<block_start><return>self.name<block_end><class_stmt>Meta<block_start>verbose_name=_("Tax")<line_sep>verbose_name_plural=_("PyTax")<block_end><block_end>
""" MIT License Copyright (c) 2018 <NAME> (https://q37.info/s/rmnmqd49) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """<import_stmt>os sys<line_sep>os.chdir(os.path.dirname(os.path.realpath(__file__)))<line_sep>sys.path.append("../../atlastk")<import_stmt>atlastk<class_stmt>TodoMVC<block_start><def_stmt>__init__ self<block_start>self.exclude=<none><line_sep>self.index=-1<line_sep>self.todos=[]<if_stmt><false># Set to 'True' for testing purpose. <block_start>self.todos.append({"label":"Todo 1" "completed":<false>})<line_sep>self.todos.append({"label":"Todo 2" "completed":<true>})<block_end><block_end><def_stmt>items_left self<block_start>count=0<for_stmt>index range(len(self.todos))<block_start><if_stmt><not>self.todos[index]['completed']<block_start>count<augadd>1<block_end><block_end><return>count<block_end><def_stmt>push self todo id xml<block_start>xml.push_tag("Todo")<line_sep>xml.put_attribute("id" id)<line_sep>xml.put_attribute("completed" "true"<if>todo['completed']<else>"false")<line_sep>xml.putValue(todo['label'])<line_sep>xml.pop_tag()<block_end><def_stmt>display_count self dom count<block_start>text=""<if_stmt>count<eq>1<block_start>text="1 item left"<block_end><elif_stmt>count<ne>0<block_start>text=str(count)+" items left"<block_end>dom.set_value("Count" text)<block_end><def_stmt>handle_count self dom<block_start>count=self.items_left()<if_stmt>count<ne>len(self.todos)<block_start>dom.disable_element("HideClearCompleted")<block_end><else_stmt><block_start>dom.enable_element("HideClearCompleted")<block_end>self.display_count(dom count)<block_end><def_stmt>display_todos self dom<block_start>xml=atlastk.create_XML("XDHTML")<line_sep>xml.push_tag("Todos")<for_stmt>index range(len(self.todos))<block_start>todo=self.todos[index]<if_stmt>(self.exclude<eq><none>)<or>(todo['completed']<ne>self.exclude)<block_start>self.push(todo index xml)<block_end><block_end>xml.pop_tag()<line_sep>dom.inner("Todos" xml "Todos.xsl")<line_sep>self.handle_count(dom)<block_end><def_stmt>submit_new self dom<block_start>value=dom.get_value("Input").strip()<line_sep>dom.set_value("Input" "")<if_stmt>value<block_start>self.todos.insert(0 {'label':value 'completed':<false>})<line_sep>self.display_todos(dom)<block_end><block_end><def_stmt>submit_modification self dom<block_start>index=self.index<line_sep>self.index=-1<line_sep>value=dom.get_value("Input."+str(index)).strip()<line_sep>dom.set_value("Input."+str(index) "")<if_stmt>value<block_start>self.todos[index]['label']=value<line_sep>dom.set_value("Label."+str(index) value)<line_sep>dom.remove_classes({"View."+str(index):"hide" "Todo."+str(index):"editing"})<block_end><else_stmt><block_start>self.todos.pop(index)<line_sep>self.displayTodos(dom)<block_end><block_end><block_end><def_stmt>ac_connect self dom<block_start>dom.inner("" open("Main.html").read())<line_sep>dom.focus("Input")<line_sep>self.display_todos(dom)<line_sep>dom.disable_elements(["HideActive" "HideCompleted"])<block_end><def_stmt>ac_destroy self dom id<block_start>self.todos.pop(int(dom.get_mark(id)))<line_sep>self.display_todos(dom)<block_end><def_stmt>ac_toggle self dom id<block_start>index=int(id)<line_sep>self.todos[index]['completed']=<not>self.todos[index]['completed']<line_sep>dom.toggle_class("Todo."+id "completed")<line_sep>dom.toggle_class("Todo."+id "active")<line_sep>self.handle_count(dom)<block_end><def_stmt>ac_all self dom<block_start>self.exclude=<none><line_sep>dom.add_class("All" "selected")<line_sep>dom.remove_classes({"Active":"selected" "Completed":"selected"})<line_sep>dom.disable_elements(["HideActive" "HideCompleted"])<block_end><def_stmt>ac_active self dom<block_start>self.exclude=<true><line_sep>dom.add_class("Active" "selected")<line_sep>dom.remove_classes({"All":"selected" "Completed":"selected"})<line_sep>dom.disable_element("HideActive")<line_sep>dom.enable_element("HideCompleted")<block_end><def_stmt>ac_completed self dom<block_start>self.exclude=<false><line_sep>dom.add_class("Completed" "selected")<line_sep>dom.remove_classes({"All":"selected" "Active":"selected"})<line_sep>dom.disable_element("HideCompleted")<line_sep>dom.enable_element("HideActive")<block_end><def_stmt>ac_clear self dom<block_start>index=len(self.todos)<while_stmt>index<block_start>index<augsub>1<if_stmt>self.todos[index]['completed']<block_start>self.todos.pop(index)<block_end><block_end>self.display_todos(dom)<block_end><def_stmt>ac_edit self dom id<block_start>value=dom.get_mark(id)<line_sep>self.index=int(value)<line_sep>dom.add_classes({"View."+value:"hide" id:"editing"})<line_sep>dom.set_value("Input."+value self.todos[self.index]['label'])<line_sep>dom.focus("Input."+value)<block_end><def_stmt>ac_cancel self dom<block_start>index=str(self.index)<line_sep>self.index=-1<line_sep>dom.set_value("Input."+index "")<line_sep>dom.remove_classes({"View."+index:"hide" "Todo."+index:"editing"})<block_end>callbacks={"":ac_connect "Submit":<lambda>self dom:self.submit_new(dom)<if>self.index<eq>-1<else>self.submit_modification(dom) "Destroy":ac_destroy "Toggle":ac_toggle "All":ac_all "Active":ac_active "Completed":ac_completed "Clear":ac_clear "Edit":ac_edit "Cancel":ac_cancel }<line_sep>atlastk.launch(callbacks TodoMVC open("HeadFaaS.html").read())<line_sep>
# Copyright 2018 Owkin, inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>os<import_stmt>json<import_stmt>numpy<as>np<import_from_stmt>sklearn.model_selection KFold<line_sep>N_FOLDS=4<line_sep>current_directory=os.path.dirname(__file__)<line_sep>assets_keys_path=os.path.join(current_directory '../../titanic/assets_keys.json')<line_sep>print(f'Loading existing asset keys from {os.path.abspath(assets_keys_path)}...')<with_stmt>open(assets_keys_path 'r')<as>f<block_start>assets_keys=json.load(f)<block_end>train_data_sample_keys=assets_keys['train_data_sample_keys']<line_sep>print('Generating folds...')<line_sep>X=np.array(train_data_sample_keys)<line_sep>kf=KFold(n_splits=N_FOLDS shuffle=<true>)<line_sep>folds=[{'train_data_sample_keys':list(X[train_index]) 'test_data_sample_keys':list(X[test_index])}<for>train_index,test_index kf.split(X)]<with_stmt>open(os.path.join(current_directory '../folds_keys.json') 'w')<as>f<block_start>json.dump({'folds':folds} f indent=2)<block_end>print(f'Folds keys have been saved to {os.path.abspath(assets_keys_path)}')<line_sep>
<import_stmt>sys<if_stmt>sys.platform<eq>"win32"<block_start><import_from_stmt>tzlocal.win32 get_localzone get_localzone_name reload_localzone <line_sep># pragma: no cover <block_end><else_stmt><block_start><import_from_stmt>tzlocal.unix get_localzone get_localzone_name reload_localzone<block_end>__all__=["get_localzone" "get_localzone_name" "reload_localzone"]<line_sep>
# -*- coding: utf-8 -*- <import_stmt>os<import_from_stmt>setuptools setup<line_sep>hitcount=__import__('hitcount')<line_sep>README=open(os.path.join(os.path.dirname(__file__) 'README.rst')).read()<line_sep># allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__) os.pardir)))<line_sep>setup(name="django-hitcount" version=hitcount.__version__ include_package_data=<true> packages=['hitcount'] url='http://github.com/thornomad/django-hitcount' license='BSD' description="Hit counting application for Django." long_description=README author='<NAME>' author_email='<EMAIL>' install_requires=['django-etc>=1.2.0' ] classifiers=['Development Status :: 4 - Beta' 'Environment :: Plugins' 'Framework :: Django' 'Intended Audience :: Developers' 'License :: OSI Approved :: BSD License' 'Programming Language :: Python' 'Topic :: Software Development :: Libraries :: Python Modules' 'Programming Language :: Python :: 2.6' 'Programming Language :: Python :: 2.7' 'Programming Language :: Python :: 3.2' 'Programming Language :: Python :: 3.3' 'Programming Language :: Python :: 3.4' ] zip_safe=<false> )<line_sep>
<import_stmt>pytest<import_from_stmt>src.base.node Node<import_from_stmt>src.data.osm.node_merger NodeMerger<line_sep>@pytest.fixture(scope="module")<def_stmt>node_list <block_start>n1=Node(7.41275611 46.922925 1)<line_sep>n2=Node(7.41275612 46.922925 2)<line_sep>n3=Node(7.41275613 46.922925 3)<line_sep>n4=Node(8.412797 46.922942 4)<line_sep>n5=Node(8.412797 46.922941 5)<line_sep><return>[n1 n2 n3 n4 n5]<block_end>@pytest.fixture(scope="module")<def_stmt>same_node <block_start><return>Node(46.78351333884473 8.159137666225423 10)<block_end>@pytest.fixture(scope="module")<def_stmt>big_node_list <block_start><return>[Node(47.09572760391754 9.354246854782108 0.0) Node(47.09569108531167 9.353826284408573 0.0) Node(47.095734907638715 9.353978633880619 0.0) Node(47.091450260764105 9.347023665904997 0.0) Node(47.09598323415865 9.353849887847904 0.0) Node(47.09582072636252 9.354110956192018 0.0) Node(47.095880982062205 9.353635311126713 0.0) Node(47.09582255229281 9.353581666946415 0.0)]<block_end><def_stmt>test_get_neighbors node_list<block_start>merger=NodeMerger(node_list)<line_sep>merger._generate_near_dict()<line_sep>result_list=merger._get_neighbors(node_list[0])<assert_stmt>len(result_list)<eq>3<line_sep>result_list=merger._get_neighbors(node_list[3])<assert_stmt>len(result_list)<eq>2<block_end><def_stmt>test_reduce node_list<block_start>merger=NodeMerger(node_list)<line_sep>merged_nodes=merger.reduce()<assert_stmt>len(merged_nodes)<eq>2<block_end><def_stmt>test_reduce_same_points same_node<block_start>merger=NodeMerger([same_node same_node])<line_sep>merged_nodes=merger.reduce()<assert_stmt>len(merged_nodes)<eq>1<block_end><def_stmt>test_reduce_not_same_points same_node<block_start>node=Node(46.78351333884473 8.159137666225423 0)<line_sep>merger=NodeMerger([node same_node])<line_sep>merged_nodes=merger.reduce()<assert_stmt>len(merged_nodes)<eq>1<block_end><def_stmt>test_node_merger big_node_list<block_start>merger=NodeMerger(big_node_list 30)<line_sep>nodes=merger.reduce()<assert_stmt>len(nodes)<eq>2<block_end>
# TODO: dependency on src/libsvm/svmutil needs to be properly done, this is a temporary workaround wrapper <import_from_future_stmt> absolute_import<import_stmt>sys<import_from_stmt>vmaf.config VmafConfig<line_sep># This will work only when running with a checked out vmaf source, but not via pip install libsvm_path=VmafConfig.root_path('third_party' 'libsvm' 'python')<if_stmt>libsvm_path<not><in>sys.path# Inject {project}/src/libsvm/python to PYTHONPATH dynamically <block_start>sys.path.append(libsvm_path)<block_end><try_stmt># This import will work only if above injection was meaningful (ie: user has the files in the right place) <block_start><import_from_stmt>svmutil *# noqa <block_end><except_stmt>ImportError<as>e<block_start>print("Can't import svmutil from %s: %s"%(libsvm_path e))<line_sep>sys.exit(1)<block_end>
# coding=utf-8 <import_stmt>operator<import_from_stmt>collections Counter<import_from_stmt>functools reduce<def_stmt>add_dicts *args<block_start>""" Adds two or more dicts together. Common keys will have their values added. For example:: >>> t1 = {'a':1, 'b':2} >>> t2 = {'b':1, 'c':3} >>> t3 = {'d':4} >>> add_dicts(t1, t2, t3) {'a': 1, 'c': 3, 'b': 3, 'd': 4} """<line_sep>counters=[Counter(arg)<for>arg args]<line_sep><return>dict(reduce(operator.add counters))<block_end>
<import_stmt>logging<import_from_stmt>datetime datetime<import_from_stmt>sendgrid Mail Bcc SendGridAPIClient<import_from_stmt>diffengine.exceptions.sendgrid AlreadyEmailedError SendgridConfigNotFoundError SendgridArchiveUrlNotFoundError <class_stmt>SendgridHandler<block_start>api_token=<none><line_sep>sender=<none><line_sep>recipients=<none><def_stmt>__init__ self config<block_start><if_stmt><not>all(["api_token"<in>config "sender"<in>config "recipients"<in>config])<block_start>logging.warning("No global config found for sendgrid, expecting config set for each feed")<block_end>self.api_token=config.get("api_token")<line_sep>self.sender=config.get("sender")<line_sep>self.recipients=self.build_recipients(config.get("recipients"))<block_end><def_stmt>mailer self api_token<block_start><return>SendGridAPIClient(api_token)<block_end><def_stmt>build_recipients self recipients<block_start><if_stmt>recipients<block_start><return>[x.strip()<for>x recipients.split(",")]<block_end><block_end><def_stmt>build_subject self diff<block_start><return>diff.old.title<block_end><def_stmt>build_html_body self diff<block_start>body=<none><with_stmt>open(diff.html_path)<as>html_file<block_start>body=html_file.read()<block_end><return>body<block_end><def_stmt>publish_diff self diff feed_config<block_start><if_stmt>diff.emailed<block_start><raise>AlreadyEmailedError(diff.id)<block_end><elif_stmt><not>(diff.old.archive_url<and>diff.new.archive_url)<block_start><raise>SendgridArchiveUrlNotFoundError()<block_end>api_token=feed_config.get("api_token" self.api_token)<line_sep>sender=feed_config.get("sender" self.sender)<line_sep>recipients=<none><if_stmt>feed_config.get("recipients")<block_start>recipients=self.build_recipients(feed_config.get("recipients"))<block_end><else_stmt><block_start>recipients=self.recipients<block_end><if_stmt><not>all([api_token sender recipients])<block_start><raise>SendgridConfigNotFoundError<block_end>subject=self.build_subject(diff)<line_sep>message=Mail(from_email=sender subject=subject to_emails=recipients.pop(0) html_content=self.build_html_body(diff) )<if_stmt>recipients<block_start>message.bcc=recipients<block_end><try_stmt><block_start>self.mailer(api_token).send(message)<line_sep>diff.emailed=datetime.utcnow()<line_sep>logging.info("emailed %s" subject)<line_sep>diff.save()<block_end><except_stmt>Exception<as>e<block_start>logging.error("unable to email: %s" e)<block_end><block_end><block_end>
""" Test if the environment.yml is empty or it constains other data structure than a dictionary """<import_stmt>os<import_stmt>sys<import_stmt>pytest<import_from_stmt>repo2docker buildpacks<def_stmt>test_empty_env_yml tmpdir<block_start>tmpdir.chdir()<line_sep>p=tmpdir.join("environment.yml")<line_sep>p.write("")<line_sep>bp=buildpacks.CondaBuildPack()<line_sep>py_ver=bp.python_version<line_sep># If the environment.yml is empty python_version will get an empty string <assert_stmt>py_ver<eq>""<block_end><def_stmt>test_no_dict_env_yml tmpdir<block_start>tmpdir.chdir()<line_sep>q=tmpdir.join("environment.yml")<line_sep>q.write("numpy\n "<concat>"matplotlib\n")<line_sep>bq=buildpacks.CondaBuildPack()<with_stmt>pytest.raises(TypeError)<block_start>py_ver=bq.python_version<block_end><block_end>
HAND1=""" Full Tilt Poker Game #33286946295: MiniFTOPS Main Event (255707037), Table 179 - NL Hold'em - 10/20 - 19:26:50 CET - 2013/09/22 [13:26:50 ET - 2013/09/22] Seat 1: Popp1987 (13,587) Seat 2: Luckytobgood (10,110) Seat 3: FatalRevange (9,970) Seat 4: IgaziFerfi (10,000) Seat 5: egis25 (6,873) Seat 6: gamblie (9,880) Seat 7: idanuTz1 (10,180) Seat 8: PtheProphet (9,930) Seat 9: JohnyyR (9,840) gamblie posts the small blind of 10 idanuTz1 posts the big blind of 20 The button is in seat #5 *** HOLE CARDS *** Dealt to IgaziFerfi [9d Ks] PtheProphet has 15 seconds left to act PtheProphet folds JohnyyR raises to 40 Popp1987 has 15 seconds left to act Popp1987 folds Luckytobgood folds FatalRevange raises to 100 IgaziFerfi folds egis25 folds gamblie folds idanuTz1 folds JohnyyR has 15 seconds left to act JohnyyR calls 60 *** FLOP *** [8h 4h Tc] (Total Pot: 230, 2 Players) JohnyyR checks FatalRevange has 15 seconds left to act FatalRevange bets 120 JohnyyR folds Uncalled bet of 120 returned to FatalRevange FatalRevange mucks FatalRevange wins the pot (230) *** SUMMARY *** Total pot 230 | Rake 0 Board: [8h 4h Tc] Seat 1: Popp1987 didn't bet (folded) Seat 2: Luckytobgood didn't bet (folded) Seat 3: FatalRevange collected (230), mucked Seat 4: IgaziFerfi didn't bet (folded) Seat 5: egis25 (button) didn't bet (folded) Seat 6: gamblie (small blind) folded before the Flop Seat 7: idanuTz1 (big blind) folded before the Flop Seat 8: PtheProphet didn't bet (folded) Seat 9: JohnyyR folded on the Flop """<line_sep>TURBO_SNG="""\ Full Tilt Poker Game #34374264321: $10 Sit & Go (Turbo) (268569961), Table 1 - NL Hold'em - 15/30 - 11:57:01 CET - 2014/06/29 [05:57:01 ET - 2014/06/29] Seat 1: snake 422 (1,500) Seat 2: IgaziFerfi (1,500) Seat 3: MixaOne (1,500) Seat 4: BokkaBlake (1,500) Seat 5: Sajiee (1,500) Seat 6: AzzzJJ (1,500) snake 422 posts the small blind of 15 IgaziFerfi posts the big blind of 30 The button is in seat #6 *** HOLE CARDS *** Dealt to IgaziFerfi [2h 5d] MixaOne calls 30 BokkaBlake folds Sajiee folds AzzzJJ raises to 90 snake 422 folds IgaziFerfi folds MixaOne calls 60 *** FLOP *** [6s 9c 3d] (Total Pot: 225, 2 Players) MixaOne bets 30 AzzzJJ raises to 120 MixaOne folds Uncalled bet of 90 returned to AzzzJJ AzzzJJ mucks AzzzJJ wins the pot (285) *** SUMMARY *** Total pot 285 | Rake 0 Board: [6s 9c 3d] Seat 1: snake 422 (small blind) folded before the Flop Seat 2: IgaziFerfi (big blind) folded before the Flop Seat 3: MixaOne folded on the Flop Seat 4: BokkaBlake didn't bet (folded) Seat 5: Sajiee didn't bet (folded) Seat 6: AzzzJJ (button) collected (285), mucked """<line_sep>
<import_from_stmt>checkov.common.models.enums CheckResult CheckCategories<import_from_stmt>checkov.terraform.checks.resource.base_resource_check BaseResourceCheck<class_stmt>ELBUsesSSL(BaseResourceCheck)<block_start><def_stmt>__init__ self<block_start>name="Ensure that Elastic Load Balancer(s) uses SSL certificates provided by AWS Certificate Manager"<line_sep>id="CKV_AWS_127"<line_sep>supported_resources=['aws_elb']<line_sep>categories=[CheckCategories.GENERAL_SECURITY]<line_sep>super().__init__(name=name id=id categories=categories supported_resources=supported_resources)<block_end><def_stmt>scan_resource_conf self conf<block_start>self.evaluated_keys=['listener']<if_stmt>'listener'<in>conf<block_start><for_stmt>idx,listener enumerate(conf['listener'])<block_start><if_stmt>'ssl_certificate_id'<not><in>listener<block_start>self.evaluated_keys=[f'listener/{idx}']<line_sep><return>CheckResult.FAILED<block_end><block_end><block_end><return>CheckResult.PASSED<block_end><block_end>check=ELBUsesSSL()<line_sep>
<import_stmt>math<def_stmt>calibrated_fps calibrate<block_start>"""Calibration of the dynamic frames per second engine. I've started with the equation y = log10(x + m) * k + n, where: y is the desired fps, m and n are horizontal and vertical translation, k is a calibration factor, computed from some user input c (see readme for details). Considering minfps and maxfps as given constants, I came to: fps = log10(x + 1) * k + minfps, which must be equal to maxfps for x = c, so the factor k = (maxfps - minfps) / log10(c + 1), and fps = log10(x + 1) * (maxfps - minfps) / log10(c + 1) + minfps Neat! ;) Args: calibrate (float): user provided Returns: a callable to calculate the fps """<line_sep>min_fps,max_fps=2. 60.<line_sep>calibrate=max(1e-6 calibrate)<line_sep>adjust_log_curve=100./min(calibrate 100.)# adjust the curve for small numbers factor=(max_fps-min_fps)/math.log10((calibrate<times>adjust_log_curve)+1.)<def_stmt>fps rate<block_start><if_stmt>rate<le>0<block_start><return>10.# bootstrap speed <block_end><if_stmt>rate<l>calibrate<block_start><return>math.log10((rate<times>adjust_log_curve)+1.)<times>factor+min_fps<block_end><return>max_fps<block_end><return>fps<block_end>
<import_from_stmt>rest_framework serializers<import_from_stmt>drf_spectacular.utils extend_schema_field<import_from_stmt>drf_spectacular.types OpenApiTypes<import_from_stmt>django.conf settings<import_from_stmt>django.core.files.storage default_storage<import_from_stmt>django.utils.translation gettext_lazy<as>_<import_from_stmt>baserow.core.models UserFile<import_from_stmt>baserow.core.user_files.handler UserFileHandler<class_stmt>UserFileUploadViaURLRequestSerializer(serializers.Serializer)<block_start>url=serializers.URLField()<block_end><class_stmt>UserFileURLAndThumbnailsSerializerMixin(serializers.Serializer)<block_start>url=serializers.SerializerMethodField()<line_sep>thumbnails=serializers.SerializerMethodField()<def_stmt>get_instance_attr self instance name<block_start><return>getattr(instance name)<block_end>@extend_schema_field(OpenApiTypes.URI)<def_stmt>get_url self instance<block_start>name=self.get_instance_attr(instance "name")<line_sep>path=UserFileHandler().user_file_path(name)<line_sep>url=default_storage.url(path)<line_sep><return>url<block_end>@extend_schema_field(OpenApiTypes.OBJECT)<def_stmt>get_thumbnails self instance<block_start><if_stmt><not>self.get_instance_attr(instance "is_image")<block_start><return><none><block_end>name=self.get_instance_attr(instance "name")<line_sep><return>{thumbnail_name:{"url":default_storage.url(UserFileHandler().user_file_thumbnail_path(name thumbnail_name)) "width":size[0] "height":size[1] }<for>thumbnail_name,size settings.USER_THUMBNAILS.items()}<block_end><block_end><class_stmt>UserFileSerializer(UserFileURLAndThumbnailsSerializerMixin serializers.ModelSerializer)<block_start>name=serializers.SerializerMethodField()<class_stmt>Meta<block_start>model=UserFile<line_sep>fields=("size" "mime_type" "is_image" "image_width" "image_height" "uploaded_at" "url" "thumbnails" "name" "original_name" )<block_end>@extend_schema_field(OpenApiTypes.STR)<def_stmt>get_name self instance<block_start><return>instance.name<block_end><block_end>@extend_schema_field(UserFileSerializer)<class_stmt>UserFileField(serializers.Field)<block_start>""" This field can be used for validating user provided user files, which means a user has provided a dict containing the user file name. It will check if that user file exists and returns that instance. Vice versa, a user file instance will be serialized when converted to data by the serializer. Example: Serializer(data={ "user_file": {"name": "filename.jpg"} }).data == {"user_file": UserFile(...)} The field can also be used for serializing a user file. The value must then be provided as instance to the serializer. Example: Serializer({ "user_file": UserFile(...) }).data == {"user_file": {"name": "filename.jpg", ...}} """<line_sep>default_error_messages={"invalid_value":_("The value must be an object containing the file name.") "invalid_user_file":_("The provided user file does not exist.") }<def_stmt>__init__ self *args **kwargs<block_start>allow_null=kwargs.pop("allow_null" <true>)<line_sep>default=kwargs.pop("default" <none>)<line_sep>super().__init__(allow_null=allow_null default=default *args **kwargs)<block_end><def_stmt>to_internal_value self data<block_start><if_stmt>isinstance(data UserFile)<block_start><return>data<block_end><if_stmt><not>isinstance(data dict)<or><not>isinstance(data.get("name") str)<block_start>self.fail("invalid_value")<block_end><try_stmt><block_start>user_file=UserFile.objects.all().name(data["name"]).get()<block_end><except_stmt>UserFile.DoesNotExist<block_start>self.fail("invalid_user_file")<block_end><return>user_file<block_end><def_stmt>to_representation self value<block_start><if_stmt>isinstance(value UserFile)<and>self.parent.instance<is><not><none><block_start><return>UserFileSerializer(value).data<block_end><return>value<block_end><block_end>
<import_stmt>importlib<import_stmt>os<line_sep>aliases={'qt4':'qt' 'gtk2':'gtk' }<line_sep>backends=['qt' 'qt4' 'qt5' 'gtk' 'gtk2' 'gtk3' 'tk' 'wx' 'pyglet' 'glut' 'osx' 'asyncio']<line_sep>registered={}<def_stmt>register name inputhook<block_start>"""Register the function *inputhook* as an event loop integration."""<line_sep>registered[name]=inputhook<block_end><class_stmt>UnknownBackend(KeyError)<block_start><def_stmt>__init__ self name<block_start>self.name=name<block_end><def_stmt>__str__ self<block_start><return>("No event loop integration for {!r}. "<concat>"Supported event loops are: {}").format(self.name ', '.join(backends+sorted(registered)))<block_end><block_end><def_stmt>get_inputhook_name_and_func gui<block_start><if_stmt>gui<in>registered<block_start><return>gui registered[gui]<block_end><if_stmt>gui<not><in>backends<block_start><raise>UnknownBackend(gui)<block_end><if_stmt>gui<in>aliases<block_start><return>get_inputhook_name_and_func(aliases[gui])<block_end>gui_mod=gui<if_stmt>gui<eq>'qt5'<block_start>os.environ['QT_API']='pyqt5'<line_sep>gui_mod='qt'<block_end>mod=importlib.import_module('IPython.terminal.pt_inputhooks.'+gui_mod)<line_sep><return>gui mod.inputhook<block_end>
#===- perf-helper.py - Clang Python Bindings -----------------*- python -*--===# # # The LLVM Compiler Infrastructure # # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. # #===------------------------------------------------------------------------===# <import_from_future_stmt> print_function<import_stmt>sys<import_stmt>os<import_stmt>subprocess<import_stmt>argparse<import_stmt>time<import_stmt>bisect<import_stmt>shlex<import_stmt>tempfile<line_sep>test_env={'PATH':os.environ['PATH']}<def_stmt>findFilesWithExtension path extension<block_start>filenames=[]<for_stmt>root,dirs,files os.walk(path)<block_start><for_stmt>filename files<block_start><if_stmt>filename.endswith(extension)<block_start>filenames.append(os.path.join(root filename))<block_end><block_end><block_end><return>filenames<block_end><def_stmt>clean args<block_start><if_stmt>len(args)<ne>2<block_start>print('Usage: %s clean <path> <extension>\n'%__file__+'\tRemoves all files with extension from <path>.')<line_sep><return>1<block_end><for_stmt>filename findFilesWithExtension(args[0] args[1])<block_start>os.remove(filename)<block_end><return>0<block_end><def_stmt>merge args<block_start><if_stmt>len(args)<ne>3<block_start>print('Usage: %s clean <llvm-profdata> <output> <path>\n'%__file__+'\tMerges all profraw files from path into output.')<line_sep><return>1<block_end>cmd=[args[0] 'merge' '-o' args[1]]<line_sep>cmd.extend(findFilesWithExtension(args[2] "profraw"))<line_sep>subprocess.check_call(cmd)<line_sep><return>0<block_end><def_stmt>dtrace args<block_start>parser=argparse.ArgumentParser(prog='perf-helper dtrace' description='dtrace wrapper for order file generation')<line_sep>parser.add_argument('--buffer-size' metavar='size' type=int required=<false> default=1 help='dtrace buffer size in MB (default 1)')<line_sep>parser.add_argument('--use-oneshot' required=<false> action='store_true' help='Use dtrace\'s oneshot probes')<line_sep>parser.add_argument('--use-ustack' required=<false> action='store_true' help='Use dtrace\'s ustack to print function names')<line_sep>parser.add_argument('--cc1' required=<false> action='store_true' help='Execute cc1 directly (don\'t profile the driver)')<line_sep>parser.add_argument('cmd' nargs='*' help='')<line_sep># Use python's arg parser to handle all leading option arguments, but pass # everything else through to dtrace first_cmd=next(arg<for>arg args<if><not>arg.startswith("--"))<line_sep>last_arg_idx=args.index(first_cmd)<line_sep>opts=parser.parse_args(args[:last_arg_idx])<line_sep>cmd=args[last_arg_idx:]<if_stmt>opts.cc1<block_start>cmd=get_cc1_command_for_args(cmd test_env)<block_end><if_stmt>opts.use_oneshot<block_start>target="oneshot$target:::entry"<block_end><else_stmt><block_start>target="pid$target:::entry"<block_end>predicate='%s/probemod=="%s"/'%(target os.path.basename(args[0]))<line_sep>log_timestamp='printf("dtrace-TS: %d\\n", timestamp)'<if_stmt>opts.use_ustack<block_start>action='ustack(1);'<block_end><else_stmt><block_start>action='printf("dtrace-Symbol: %s\\n", probefunc);'<block_end>dtrace_script="%s { %s; %s }"%(predicate log_timestamp action)<line_sep>dtrace_args=[]<if_stmt><not>os.geteuid()<eq>0<block_start>print('Script must be run as root, or you must add the following to your sudoers:'+'%%admin ALL=(ALL) NOPASSWD: /usr/sbin/dtrace')<line_sep>dtrace_args.append("sudo")<block_end>dtrace_args.extend(('dtrace' '-xevaltime=exec' '-xbufsize=%dm'%(opts.buffer_size) '-q' '-n' dtrace_script '-c' ' '.join(cmd)))<if_stmt>sys.platform<eq>"darwin"<block_start>dtrace_args.append('-xmangled')<block_end>start_time=time.time()<with_stmt>open("%d.dtrace"%os.getpid() "w")<as>f<block_start>subprocess.check_call(dtrace_args stdout=f stderr=subprocess.PIPE)<block_end>elapsed=time.time()-start_time<line_sep>print("... data collection took %.4fs"%elapsed)<line_sep><return>0<block_end><def_stmt>get_cc1_command_for_args cmd env# Find the cc1 command used by the compiler. To do this we execute the # compiler with '-###' to figure out what it wants to do. <block_start>cmd=cmd+['-###']<line_sep>cc_output=subprocess.check_output(cmd stderr=subprocess.STDOUT env=env).strip()<line_sep>cc_commands=[]<for_stmt>ln cc_output.split('\n')# Filter out known garbage. <block_start><if_stmt>(ln<eq>'Using built-in specs.'<or>ln.startswith('Configured with:')<or>ln.startswith('Target:')<or>ln.startswith('Thread model:')<or>ln.startswith('InstalledDir:')<or>ln.startswith('LLVM Profile Note')<or>' version '<in>ln)<block_start><continue><block_end>cc_commands.append(ln)<block_end><if_stmt>len(cc_commands)<ne>1<block_start>print('Fatal error: unable to determine cc1 command: %r'%cc_output)<line_sep>exit(1)<block_end>cc1_cmd=shlex.split(cc_commands[0])<if_stmt><not>cc1_cmd<block_start>print('Fatal error: unable to determine cc1 command: %r'%cc_output)<line_sep>exit(1)<block_end><return>cc1_cmd<block_end><def_stmt>cc1 args<block_start>parser=argparse.ArgumentParser(prog='perf-helper cc1' description='cc1 wrapper for order file generation')<line_sep>parser.add_argument('cmd' nargs='*' help='')<line_sep># Use python's arg parser to handle all leading option arguments, but pass # everything else through to dtrace first_cmd=next(arg<for>arg args<if><not>arg.startswith("--"))<line_sep>last_arg_idx=args.index(first_cmd)<line_sep>opts=parser.parse_args(args[:last_arg_idx])<line_sep>cmd=args[last_arg_idx:]<line_sep># clear the profile file env, so that we don't generate profdata # when capturing the cc1 command cc1_env=test_env<line_sep>cc1_env["LLVM_PROFILE_FILE"]=os.devnull<line_sep>cc1_cmd=get_cc1_command_for_args(cmd cc1_env)<line_sep>subprocess.check_call(cc1_cmd)<line_sep><return>0<block_end><def_stmt>parse_dtrace_symbol_file path all_symbols all_symbols_set missing_symbols opts<block_start><def_stmt>fix_mangling symbol<block_start><if_stmt>sys.platform<eq>"darwin"<block_start><if_stmt>symbol[0]<ne>'_'<and>symbol<ne>'start'<block_start>symbol='_'+symbol<block_end><block_end><return>symbol<block_end><def_stmt>get_symbols_with_prefix symbol<block_start>start_index=bisect.bisect_left(all_symbols symbol)<for_stmt>s all_symbols[start_index:]<block_start><if_stmt><not>s.startswith(symbol)<block_start><break><block_end><yield>s<block_end><block_end># Extract the list of symbols from the given file, which is assumed to be # the output of a dtrace run logging either probefunc or ustack(1) and # nothing else. The dtrace -xdemangle option needs to be used. # # This is particular to OS X at the moment, because of the '_' handling. <with_stmt>open(path)<as>f<block_start>current_timestamp=<none><for_stmt>ln f# Drop leading and trailing whitespace. <block_start>ln=ln.strip()<if_stmt><not>ln.startswith("dtrace-")<block_start><continue><block_end># If this is a timestamp specifier, extract it. <if_stmt>ln.startswith("dtrace-TS: ")<block_start>_,data=ln.split(': ' 1)<if_stmt><not>data.isdigit()<block_start>print("warning: unrecognized timestamp line %r, ignoring"%ln file=sys.stderr)<line_sep><continue><block_end>current_timestamp=int(data)<line_sep><continue><block_end><elif_stmt>ln.startswith("dtrace-Symbol: ")<block_start>_,ln=ln.split(': ' 1)<if_stmt><not>ln<block_start><continue><block_end># If there is a '`' in the line, assume it is a ustack(1) entry in # the form of <modulename>`<modulefunc>, where <modulefunc> is never # truncated (but does need the mangling patched). <if_stmt>'`'<in>ln<block_start><yield>(current_timestamp fix_mangling(ln.split('`' 1)[1]))<line_sep><continue><block_end># Otherwise, assume this is a probefunc printout. DTrace on OS X # seems to have a bug where it prints the mangled version of symbols # which aren't C++ mangled. We just add a '_' to anything but start # which doesn't already have a '_'. symbol=fix_mangling(ln)<line_sep># If we don't know all the symbols, or the symbol is one of them, # just return it. <if_stmt><not>all_symbols_set<or>symbol<in>all_symbols_set<block_start><yield>(current_timestamp symbol)<line_sep><continue><block_end># Otherwise, we have a symbol name which isn't present in the # binary. We assume it is truncated, and try to extend it. # Get all the symbols with this prefix. possible_symbols=list(get_symbols_with_prefix(symbol))<if_stmt><not>possible_symbols<block_start><continue><block_end># If we found too many possible symbols, ignore this as a prefix. <if_stmt>len(possible_symbols)<g>100<block_start>print("warning: ignoring symbol %r "%symbol+"(no match and too many possible suffixes)" file=sys.stderr)<line_sep><continue><block_end># Report that we resolved a missing symbol. <if_stmt>opts.show_missing_symbols<and>symbol<not><in>missing_symbols<block_start>print("warning: resolved missing symbol %r"%symbol file=sys.stderr)<line_sep>missing_symbols.add(symbol)<block_end># Otherwise, treat all the possible matches as having occurred. This # is an over-approximation, but it should be ok in practice. <for_stmt>s possible_symbols<block_start><yield>(current_timestamp s)<block_end><block_end><block_end><block_end><block_end><def_stmt>uniq list<block_start>seen=set()<for_stmt>item list<block_start><if_stmt>item<not><in>seen<block_start><yield>item<line_sep>seen.add(item)<block_end><block_end><block_end><def_stmt>form_by_call_order symbol_lists# Simply strategy, just return symbols in order of occurrence, even across # multiple runs. <block_start><return>uniq(s<for>symbols symbol_lists<for>s symbols)<block_end><def_stmt>form_by_call_order_fair symbol_lists# More complicated strategy that tries to respect the call order across all # of the test cases, instead of giving a huge preference to the first test # case. # First, uniq all the lists. <block_start>uniq_lists=[list(uniq(symbols))<for>symbols symbol_lists]<line_sep># Compute the successors for each list. succs={}<for_stmt>symbols uniq_lists<block_start><for_stmt>a,b zip(symbols[:-1] symbols[1:])<block_start>succs[a]=items=succs.get(a [])<if_stmt>b<not><in>items<block_start>items.append(b)<block_end><block_end><block_end># Emit all the symbols, but make sure to always emit all successors from any # call list whenever we see a symbol. # # There isn't much science here, but this sometimes works better than the # more naive strategy. Then again, sometimes it doesn't so more research is # probably needed. <return>uniq(s<for>symbols symbol_lists<for>node symbols<for>s ([node]+succs.get(node [])))<block_end><def_stmt>form_by_frequency symbol_lists# Form the order file by just putting the most commonly occurring symbols # first. This assumes the data files didn't use the oneshot dtrace method. <block_start>counts={}<for_stmt>symbols symbol_lists<block_start><for_stmt>a symbols<block_start>counts[a]=counts.get(a 0)+1<block_end><block_end>by_count=counts.items()<line_sep>by_count.sort(key=<lambda>(_ n):-n)<line_sep><return>[s<for>s,n by_count]<block_end><def_stmt>form_by_random symbol_lists# Randomize the symbols. <block_start>merged_symbols=uniq(s<for>symbols symbol_lists<for>s symbols)<line_sep>random.shuffle(merged_symbols)<line_sep><return>merged_symbols<block_end><def_stmt>form_by_alphabetical symbol_lists# Alphabetize the symbols. <block_start>merged_symbols=list(set(s<for>symbols symbol_lists<for>s symbols))<line_sep>merged_symbols.sort()<line_sep><return>merged_symbols<block_end>methods=dict((name[len("form_by_"):] value)<for>name,value locals().items()<if>name.startswith("form_by_"))<def_stmt>genOrderFile args<block_start>parser=argparse.ArgumentParser("%prog [options] <dtrace data file directories>]")<line_sep>parser.add_argument('input' nargs='+' help='')<line_sep>parser.add_argument("--binary" metavar="PATH" type=str dest="binary_path" help="Path to the binary being ordered (for getting all symbols)" default=<none>)<line_sep>parser.add_argument("--output" dest="output_path" help="path to output order file to write" default=<none> required=<true> metavar="PATH")<line_sep>parser.add_argument("--show-missing-symbols" dest="show_missing_symbols" help="show symbols which are 'fixed up' to a valid name (requires --binary)" action="store_true" default=<none>)<line_sep>parser.add_argument("--output-unordered-symbols" dest="output_unordered_symbols_path" help="write a list of the unordered symbols to PATH (requires --binary)" default=<none> metavar="PATH")<line_sep>parser.add_argument("--method" dest="method" help="order file generation method to use" choices=methods.keys() default='call_order')<line_sep>opts=parser.parse_args(args)<line_sep># If the user gave us a binary, get all the symbols in the binary by # snarfing 'nm' output. <if_stmt>opts.binary_path<is><not><none><block_start>output=subprocess.check_output(['nm' '-P' opts.binary_path])<line_sep>lines=output.split("\n")<line_sep>all_symbols=[ln.split(' ' 1)[0]<for>ln lines<if>ln.strip()]<line_sep>print("found %d symbols in binary"%len(all_symbols))<line_sep>all_symbols.sort()<block_end><else_stmt><block_start>all_symbols=[]<block_end>all_symbols_set=set(all_symbols)<line_sep># Compute the list of input files. input_files=[]<for_stmt>dirname opts.input<block_start>input_files.extend(findFilesWithExtension(dirname "dtrace"))<block_end># Load all of the input files. print("loading from %d data files"%len(input_files))<line_sep>missing_symbols=set()<line_sep>timestamped_symbol_lists=[list(parse_dtrace_symbol_file(path all_symbols all_symbols_set missing_symbols opts))<for>path input_files]<line_sep># Reorder each symbol list. symbol_lists=[]<for_stmt>timestamped_symbols_list timestamped_symbol_lists<block_start>timestamped_symbols_list.sort()<line_sep>symbol_lists.append([symbol<for>_,symbol timestamped_symbols_list])<block_end># Execute the desire order file generation method. method=methods.get(opts.method)<line_sep>result=list(method(symbol_lists))<line_sep># Report to the user on what percentage of symbols are present in the order # file. num_ordered_symbols=len(result)<if_stmt>all_symbols<block_start>print("note: order file contains %d/%d symbols (%.2f%%)"%(num_ordered_symbols len(all_symbols) 100.<times>num_ordered_symbols/len(all_symbols)) file=sys.stderr)<block_end><if_stmt>opts.output_unordered_symbols_path<block_start>ordered_symbols_set=set(result)<with_stmt>open(opts.output_unordered_symbols_path 'w')<as>f<block_start>f.write("\n".join(s<for>s all_symbols<if>s<not><in>ordered_symbols_set))<block_end><block_end># Write the order file. <with_stmt>open(opts.output_path 'w')<as>f<block_start>f.write("\n".join(result))<line_sep>f.write("\n")<block_end><return>0<block_end>commands={'clean':clean 'merge':merge 'dtrace':dtrace 'cc1':cc1 'gen-order-file':genOrderFile}<def_stmt>main <block_start>f=commands[sys.argv[1]]<line_sep>sys.exit(f(sys.argv[2:]))<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# coding: utf-8 <import_from_future_stmt> unicode_literals division absolute_import print_function<line_sep>__all__=['CIPHER_SUITE_MAP' ]<line_sep>CIPHER_SUITE_MAP={b'\x00\x00':'TLS_NULL_WITH_NULL_NULL' b'\x00\x01':'TLS_RSA_WITH_NULL_MD5' b'\x00\x02':'TLS_RSA_WITH_NULL_SHA' b'\x00\x03':'TLS_RSA_EXPORT_WITH_RC4_40_MD5' b'\x00\x04':'TLS_RSA_WITH_RC4_128_MD5' b'\x00\x05':'TLS_RSA_WITH_RC4_128_SHA' b'\x00\x06':'TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5' b'\x00\x07':'TLS_RSA_WITH_IDEA_CBC_SHA' b'\x00\x08':'TLS_RSA_EXPORT_WITH_DES40_CBC_SHA' b'\x00\x09':'TLS_RSA_WITH_DES_CBC_SHA' b'\x00\x0A':'TLS_RSA_WITH_3DES_EDE_CBC_SHA' b'\x00\x0B':'TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA' b'\x00\x0C':'TLS_DH_DSS_WITH_DES_CBC_SHA' b'\x00\x0D':'TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA' b'\x00\x0E':'TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA' b'\x00\x0F':'TLS_DH_RSA_WITH_DES_CBC_SHA' b'\x00\x10':'TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA' b'\x00\x11':'TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA' b'\x00\x12':'TLS_DHE_DSS_WITH_DES_CBC_SHA' b'\x00\x13':'TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA' b'\x00\x14':'TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA' b'\x00\x15':'TLS_DHE_RSA_WITH_DES_CBC_SHA' b'\x00\x16':'TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA' b'\x00\x17':'TLS_DH_anon_EXPORT_WITH_RC4_40_MD5' b'\x00\x18':'TLS_DH_anon_WITH_RC4_128_MD5' b'\x00\x19':'TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA' b'\x00\x1A':'TLS_DH_anon_WITH_DES_CBC_SHA' b'\x00\x1B':'TLS_DH_anon_WITH_3DES_EDE_CBC_SHA' b'\x00\x1E':'TLS_KRB5_WITH_DES_CBC_SHA' b'\x00\x1F':'TLS_KRB5_WITH_3DES_EDE_CBC_SHA' b'\x00\x20':'TLS_KRB5_WITH_RC4_128_SHA' b'\x00\x21':'TLS_KRB5_WITH_IDEA_CBC_SHA' b'\x00\x22':'TLS_KRB5_WITH_DES_CBC_MD5' b'\x00\x23':'TLS_KRB5_WITH_3DES_EDE_CBC_MD5' b'\x00\x24':'TLS_KRB5_WITH_RC4_128_MD5' b'\x00\x25':'TLS_KRB5_WITH_IDEA_CBC_MD5' b'\x00\x26':'TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA' b'\x00\x27':'TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA' b'\x00\x28':'TLS_KRB5_EXPORT_WITH_RC4_40_SHA' b'\x00\x29':'TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5' b'\x00\x2A':'TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5' b'\x00\x2B':'TLS_KRB5_EXPORT_WITH_RC4_40_MD5' b'\x00\x2C':'TLS_PSK_WITH_NULL_SHA' b'\x00\x2D':'TLS_DHE_PSK_WITH_NULL_SHA' b'\x00\x2E':'TLS_RSA_PSK_WITH_NULL_SHA' b'\x00\x2F':'TLS_RSA_WITH_AES_128_CBC_SHA' b'\x00\x30':'TLS_DH_DSS_WITH_AES_128_CBC_SHA' b'\x00\x31':'TLS_DH_RSA_WITH_AES_128_CBC_SHA' b'\x00\x32':'TLS_DHE_DSS_WITH_AES_128_CBC_SHA' b'\x00\x33':'TLS_DHE_RSA_WITH_AES_128_CBC_SHA' b'\x00\x34':'TLS_DH_anon_WITH_AES_128_CBC_SHA' b'\x00\x35':'TLS_RSA_WITH_AES_256_CBC_SHA' b'\x00\x36':'TLS_DH_DSS_WITH_AES_256_CBC_SHA' b'\x00\x37':'TLS_DH_RSA_WITH_AES_256_CBC_SHA' b'\x00\x38':'TLS_DHE_DSS_WITH_AES_256_CBC_SHA' b'\x00\x39':'TLS_DHE_RSA_WITH_AES_256_CBC_SHA' b'\x00\x3A':'TLS_DH_anon_WITH_AES_256_CBC_SHA' b'\x00\x3B':'TLS_RSA_WITH_NULL_SHA256' b'\x00\x3C':'TLS_RSA_WITH_AES_128_CBC_SHA256' b'\x00\x3D':'TLS_RSA_WITH_AES_256_CBC_SHA256' b'\x00\x3E':'TLS_DH_DSS_WITH_AES_128_CBC_SHA256' b'\x00\x3F':'TLS_DH_RSA_WITH_AES_128_CBC_SHA256' b'\x00\x40':'TLS_DHE_DSS_WITH_AES_128_CBC_SHA256' b'\x00\x41':'TLS_RSA_WITH_CAMELLIA_128_CBC_SHA' b'\x00\x42':'TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA' b'\x00\x43':'TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA' b'\x00\x44':'TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA' b'\x00\x45':'TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA' b'\x00\x46':'TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA' b'\x00\x67':'TLS_DHE_RSA_WITH_AES_128_CBC_SHA256' b'\x00\x68':'TLS_DH_DSS_WITH_AES_256_CBC_SHA256' b'\x00\x69':'TLS_DH_RSA_WITH_AES_256_CBC_SHA256' b'\x00\x6A':'TLS_DHE_DSS_WITH_AES_256_CBC_SHA256' b'\x00\x6B':'TLS_DHE_RSA_WITH_AES_256_CBC_SHA256' b'\x00\x6C':'TLS_DH_anon_WITH_AES_128_CBC_SHA256' b'\x00\x6D':'TLS_DH_anon_WITH_AES_256_CBC_SHA256' b'\x00\x84':'TLS_RSA_WITH_CAMELLIA_256_CBC_SHA' b'\x00\x85':'TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA' b'\x00\x86':'TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA' b'\x00\x87':'TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA' b'\x00\x88':'TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA' b'\x00\x89':'TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA' b'\x00\x8A':'TLS_PSK_WITH_RC4_128_SHA' b'\x00\x8B':'TLS_PSK_WITH_3DES_EDE_CBC_SHA' b'\x00\x8C':'TLS_PSK_WITH_AES_128_CBC_SHA' b'\x00\x8D':'TLS_PSK_WITH_AES_256_CBC_SHA' b'\x00\x8E':'TLS_DHE_PSK_WITH_RC4_128_SHA' b'\x00\x8F':'TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA' b'\x00\x90':'TLS_DHE_PSK_WITH_AES_128_CBC_SHA' b'\x00\x91':'TLS_DHE_PSK_WITH_AES_256_CBC_SHA' b'\x00\x92':'TLS_RSA_PSK_WITH_RC4_128_SHA' b'\x00\x93':'TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA' b'\x00\x94':'TLS_RSA_PSK_WITH_AES_128_CBC_SHA' b'\x00\x95':'TLS_RSA_PSK_WITH_AES_256_CBC_SHA' b'\x00\x96':'TLS_RSA_WITH_SEED_CBC_SHA' b'\x00\x97':'TLS_DH_DSS_WITH_SEED_CBC_SHA' b'\x00\x98':'TLS_DH_RSA_WITH_SEED_CBC_SHA' b'\x00\x99':'TLS_DHE_DSS_WITH_SEED_CBC_SHA' b'\x00\x9A':'TLS_DHE_RSA_WITH_SEED_CBC_SHA' b'\x00\x9B':'TLS_DH_anon_WITH_SEED_CBC_SHA' b'\x00\x9C':'TLS_RSA_WITH_AES_128_GCM_SHA256' b'\x00\x9D':'TLS_RSA_WITH_AES_256_GCM_SHA384' b'\x00\x9E':'TLS_DHE_RSA_WITH_AES_128_GCM_SHA256' b'\x00\x9F':'TLS_DHE_RSA_WITH_AES_256_GCM_SHA384' b'\x00\xA0':'TLS_DH_RSA_WITH_AES_128_GCM_SHA256' b'\x00\xA1':'TLS_DH_RSA_WITH_AES_256_GCM_SHA384' b'\x00\xA2':'TLS_DHE_DSS_WITH_AES_128_GCM_SHA256' b'\x00\xA3':'TLS_DHE_DSS_WITH_AES_256_GCM_SHA384' b'\x00\xA4':'TLS_DH_DSS_WITH_AES_128_GCM_SHA256' b'\x00\xA5':'TLS_DH_DSS_WITH_AES_256_GCM_SHA384' b'\x00\xA6':'TLS_DH_anon_WITH_AES_128_GCM_SHA256' b'\x00\xA7':'TLS_DH_anon_WITH_AES_256_GCM_SHA384' b'\x00\xA8':'TLS_PSK_WITH_AES_128_GCM_SHA256' b'\x00\xA9':'TLS_PSK_WITH_AES_256_GCM_SHA384' b'\x00\xAA':'TLS_DHE_PSK_WITH_AES_128_GCM_SHA256' b'\x00\xAB':'TLS_DHE_PSK_WITH_AES_256_GCM_SHA384' b'\x00\xAC':'TLS_RSA_PSK_WITH_AES_128_GCM_SHA256' b'\x00\xAD':'TLS_RSA_PSK_WITH_AES_256_GCM_SHA384' b'\x00\xAE':'TLS_PSK_WITH_AES_128_CBC_SHA256' b'\x00\xAF':'TLS_PSK_WITH_AES_256_CBC_SHA384' b'\x00\xB0':'TLS_PSK_WITH_NULL_SHA256' b'\x00\xB1':'TLS_PSK_WITH_NULL_SHA384' b'\x00\xB2':'TLS_DHE_PSK_WITH_AES_128_CBC_SHA256' b'\x00\xB3':'TLS_DHE_PSK_WITH_AES_256_CBC_SHA384' b'\x00\xB4':'TLS_DHE_PSK_WITH_NULL_SHA256' b'\x00\xB5':'TLS_DHE_PSK_WITH_NULL_SHA384' b'\x00\xB6':'TLS_RSA_PSK_WITH_AES_128_CBC_SHA256' b'\x00\xB7':'TLS_RSA_PSK_WITH_AES_256_CBC_SHA384' b'\x00\xB8':'TLS_RSA_PSK_WITH_NULL_SHA256' b'\x00\xB9':'TLS_RSA_PSK_WITH_NULL_SHA384' b'\x00\xBA':'TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256' b'\x00\xBB':'TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256' b'\x00\xBC':'TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256' b'\x00\xBD':'TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256' b'\x00\xBE':'TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256' b'\x00\xBF':'TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256' b'\x00\xC0':'TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256' b'\x00\xC1':'TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256' b'\x00\xC2':'TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256' b'\x00\xC3':'TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256' b'\x00\xC4':'TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256' b'\x00\xC5':'TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256' b'\x00\xFF':'TLS_EMPTY_RENEGOTIATION_INFO_SCSV' b'\x13\x01':'TLS_AES_128_GCM_SHA256' b'\x13\x02':'TLS_AES_256_GCM_SHA384' b'\x13\x03':'TLS_CHACHA20_POLY1305_SHA256' b'\x13\x04':'TLS_AES_128_CCM_SHA256' b'\x13\x05':'TLS_AES_128_CCM_8_SHA256' b'\xC0\x01':'TLS_ECDH_ECDSA_WITH_NULL_SHA' b'\xC0\x02':'TLS_ECDH_ECDSA_WITH_RC4_128_SHA' b'\xC0\x03':'TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA' b'\xC0\x04':'TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA' b'\xC0\x05':'TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA' b'\xC0\x06':'TLS_ECDHE_ECDSA_WITH_NULL_SHA' b'\xC0\x07':'TLS_ECDHE_ECDSA_WITH_RC4_128_SHA' b'\xC0\x08':'TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA' b'\xC0\x09':'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA' b'\xC0\x0A':'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA' b'\xC0\x0B':'TLS_ECDH_RSA_WITH_NULL_SHA' b'\xC0\x0C':'TLS_ECDH_RSA_WITH_RC4_128_SHA' b'\xC0\x0D':'TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA' b'\xC0\x0E':'TLS_ECDH_RSA_WITH_AES_128_CBC_SHA' b'\xC0\x0F':'TLS_ECDH_RSA_WITH_AES_256_CBC_SHA' b'\xC0\x10':'TLS_ECDHE_RSA_WITH_NULL_SHA' b'\xC0\x11':'TLS_ECDHE_RSA_WITH_RC4_128_SHA' b'\xC0\x12':'TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA' b'\xC0\x13':'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA' b'\xC0\x14':'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA' b'\xC0\x15':'TLS_ECDH_anon_WITH_NULL_SHA' b'\xC0\x16':'TLS_ECDH_anon_WITH_RC4_128_SHA' b'\xC0\x17':'TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA' b'\xC0\x18':'TLS_ECDH_anon_WITH_AES_128_CBC_SHA' b'\xC0\x19':'TLS_ECDH_anon_WITH_AES_256_CBC_SHA' b'\xC0\x1A':'TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA' b'\xC0\x1B':'TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA' b'\xC0\x1C':'TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA' b'\xC0\x1D':'TLS_SRP_SHA_WITH_AES_128_CBC_SHA' b'\xC0\x1E':'TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA' b'\xC0\x1F':'TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA' b'\xC0\x20':'TLS_SRP_SHA_WITH_AES_256_CBC_SHA' b'\xC0\x21':'TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA' b'\xC0\x22':'TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA' b'\xC0\x23':'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' b'\xC0\x24':'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384' b'\xC0\x25':'TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256' b'\xC0\x26':'TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384' b'\xC0\x27':'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' b'\xC0\x28':'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384' b'\xC0\x29':'TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256' b'\xC0\x2A':'TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384' b'\xC0\x2B':'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256' b'\xC0\x2C':'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384' b'\xC0\x2D':'TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256' b'\xC0\x2E':'TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384' b'\xC0\x2F':'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256' b'\xC0\x30':'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384' b'\xC0\x31':'TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256' b'\xC0\x32':'TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384' b'\xC0\x33':'TLS_ECDHE_PSK_WITH_RC4_128_SHA' b'\xC0\x34':'TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA' b'\xC0\x35':'TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA' b'\xC0\x36':'TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA' b'\xC0\x37':'TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256' b'\xC0\x38':'TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384' b'\xC0\x39':'TLS_ECDHE_PSK_WITH_NULL_SHA' b'\xC0\x3A':'TLS_ECDHE_PSK_WITH_NULL_SHA256' b'\xC0\x3B':'TLS_ECDHE_PSK_WITH_NULL_SHA384' b'\xC0\x3C':'TLS_RSA_WITH_ARIA_128_CBC_SHA256' b'\xC0\x3D':'TLS_RSA_WITH_ARIA_256_CBC_SHA384' b'\xC0\x3E':'TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256' b'\xC0\x3F':'TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384' b'\xC0\x40':'TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256' b'\xC0\x41':'TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384' b'\xC0\x42':'TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256' b'\xC0\x43':'TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384' b'\xC0\x44':'TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256' b'\xC0\x45':'TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384' b'\xC0\x46':'TLS_DH_anon_WITH_ARIA_128_CBC_SHA256' b'\xC0\x47':'TLS_DH_anon_WITH_ARIA_256_CBC_SHA384' b'\xC0\x48':'TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256' b'\xC0\x49':'TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384' b'\xC0\x4A':'TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256' b'\xC0\x4B':'TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384' b'\xC0\x4C':'TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256' b'\xC0\x4D':'TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384' b'\xC0\x4E':'TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256' b'\xC0\x4F':'TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384' b'\xC0\x50':'TLS_RSA_WITH_ARIA_128_GCM_SHA256' b'\xC0\x51':'TLS_RSA_WITH_ARIA_256_GCM_SHA384' b'\xC0\x52':'TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256' b'\xC0\x53':'TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384' b'\xC0\x54':'TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256' b'\xC0\x55':'TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384' b'\xC0\x56':'TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256' b'\xC0\x57':'TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384' b'\xC0\x58':'TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256' b'\xC0\x59':'TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384' b'\xC0\x5A':'TLS_DH_anon_WITH_ARIA_128_GCM_SHA256' b'\xC0\x5B':'TLS_DH_anon_WITH_ARIA_256_GCM_SHA384' b'\xC0\x5C':'TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256' b'\xC0\x5D':'TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384' b'\xC0\x5E':'TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256' b'\xC0\x5F':'TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384' b'\xC0\x60':'TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256' b'\xC0\x61':'TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384' b'\xC0\x62':'TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256' b'\xC0\x63':'TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384' b'\xC0\x64':'TLS_PSK_WITH_ARIA_128_CBC_SHA256' b'\xC0\x65':'TLS_PSK_WITH_ARIA_256_CBC_SHA384' b'\xC0\x66':'TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256' b'\xC0\x67':'TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384' b'\xC0\x68':'TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256' b'\xC0\x69':'TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384' b'\xC0\x6A':'TLS_PSK_WITH_ARIA_128_GCM_SHA256' b'\xC0\x6B':'TLS_PSK_WITH_ARIA_256_GCM_SHA384' b'\xC0\x6C':'TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256' b'\xC0\x6D':'TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384' b'\xC0\x6E':'TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256' b'\xC0\x6F':'TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384' b'\xC0\x70':'TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256' b'\xC0\x71':'TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384' b'\xC0\x72':'TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256' b'\xC0\x73':'TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384' b'\xC0\x74':'TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256' b'\xC0\x75':'TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384' b'\xC0\x76':'TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256' b'\xC0\x77':'TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384' b'\xC0\x78':'TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256' b'\xC0\x79':'TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384' b'\xC0\x7A':'TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256' b'\xC0\x7B':'TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384' b'\xC0\x7C':'TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256' b'\xC0\x7D':'TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384' b'\xC0\x7E':'TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256' b'\xC0\x7F':'TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384' b'\xC0\x80':'TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256' b'\xC0\x81':'TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384' b'\xC0\x82':'TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256' b'\xC0\x83':'TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384' b'\xC0\x84':'TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256' b'\xC0\x85':'TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384' b'\xC0\x86':'TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256' b'\xC0\x87':'TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384' b'\xC0\x88':'TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256' b'\xC0\x89':'TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384' b'\xC0\x8A':'TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256' b'\xC0\x8B':'TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384' b'\xC0\x8C':'TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256' b'\xC0\x8D':'TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384' b'\xC0\x8E':'TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256' b'\xC0\x8F':'TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384' b'\xC0\x90':'TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256' b'\xC0\x91':'TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384' b'\xC0\x92':'TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256' b'\xC0\x93':'TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384' b'\xC0\x94':'TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256' b'\xC0\x95':'TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384' b'\xC0\x96':'TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256' b'\xC0\x97':'TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384' b'\xC0\x98':'TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256' b'\xC0\x99':'TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384' b'\xC0\x9A':'TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256' b'\xC0\x9B':'TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384' b'\xC0\x9C':'TLS_RSA_WITH_AES_128_CCM' b'\xC0\x9D':'TLS_RSA_WITH_AES_256_CCM' b'\xC0\x9E':'TLS_DHE_RSA_WITH_AES_128_CCM' b'\xC0\x9F':'TLS_DHE_RSA_WITH_AES_256_CCM' b'\xC0\xA0':'TLS_RSA_WITH_AES_128_CCM_8' b'\xC0\xA1':'TLS_RSA_WITH_AES_256_CCM_8' b'\xC0\xA2':'TLS_DHE_RSA_WITH_AES_128_CCM_8' b'\xC0\xA3':'TLS_DHE_RSA_WITH_AES_256_CCM_8' b'\xC0\xA4':'TLS_PSK_WITH_AES_128_CCM' b'\xC0\xA5':'TLS_PSK_WITH_AES_256_CCM' b'\xC0\xA6':'TLS_DHE_PSK_WITH_AES_128_CCM' b'\xC0\xA7':'TLS_DHE_PSK_WITH_AES_256_CCM' b'\xC0\xA8':'TLS_PSK_WITH_AES_128_CCM_8' b'\xC0\xA9':'TLS_PSK_WITH_AES_256_CCM_8' b'\xC0\xAA':'TLS_PSK_DHE_WITH_AES_128_CCM_8' b'\xC0\xAB':'TLS_PSK_DHE_WITH_AES_256_CCM_8' b'\xCC\xA8':'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256' b'\xCC\xA9':'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256' b'\xCC\xAA':'TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256' b'\xCC\xAB':'TLS_PSK_WITH_CHACHA20_POLY1305_SHA256' b'\xCC\xAC':'TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256' b'\xCC\xAD':'TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256' b'\xCC\xAE':'TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256' }<line_sep>
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """ Unit tests for library_data.py functions. """<import_stmt>datetime<import_stmt>pytest<import_stmt>boto3<import_from_stmt>botocore.exceptions ClientError<import_from_stmt>botocore.stub ANY<import_from_stmt>chalicelib.library_data Storage<line_sep>CLUSTER_ARN='arn:aws:rds:us-west-2:123456789012:cluster:test-cluster'<line_sep>SECRET_ARN='arn:aws:secretsmanager:us-west-2:123456789012:secret:test-secret-111111'<line_sep>DB_NAME='testdatabase'<def_stmt>make_storage_n_stubber make_stubber<block_start>rdsdata_client=boto3.client('rds-data')<line_sep>storage=Storage({'DBClusterArn':CLUSTER_ARN} {'ARN':SECRET_ARN} DB_NAME rdsdata_client)<line_sep><return>storage make_stubber(rdsdata_client)<block_end><def_stmt>test_bootstrap_tables make_stubber<block_start>storage,rdsdata_stubber=make_storage_n_stubber(make_stubber)<for_stmt>_ storage._tables<block_start>rdsdata_stubber.stub_execute_statement(CLUSTER_ARN SECRET_ARN DB_NAME ANY)<block_end>storage.bootstrap_tables()<block_end><def_stmt>test_add_books make_stubber<block_start>storage,rdsdata_stubber=make_storage_n_stubber(make_stubber)<line_sep>books=[{'title':'Book One' 'author':'<NAME>'} {'title':'Second Book' 'author':'<NAME>'} {'title':'Book One 2 (the sequel)' 'author':'<NAME>'}]<line_sep>author_sql="INSERT INTO Authors (FirstName, LastName) "<concat>"VALUES (:FirstName, :LastName)"<line_sep>authors={book['author']:{'FirstName':' '.join(book['author'].split(' ')[:-1]) 'LastName':book['author'].split(' ')[-1]}<for>book books}<line_sep>author_param_sets=[[{'name':'FirstName' 'value':{'stringValue':author['FirstName']}} {'name':'LastName' 'value':{'stringValue':author['LastName']}}]<for>author authors.values()]<line_sep>author_generated_field_sets=[[1] [2]]<line_sep>book_sql="INSERT INTO Books (Title, AuthorID) VALUES (:Title, :AuthorID)"<line_sep>book_param_sets=[[{'name':'Title' 'value':{'stringValue':book['title']}} {'name':'AuthorID' 'value':{'longValue':author_id}}]<for>book,author_id zip(books [1 2 1])]<line_sep>book_generated_field_sets=[[11] [22] [33]]<line_sep>rdsdata_stubber.stub_batch_execute_statement(CLUSTER_ARN SECRET_ARN DB_NAME author_sql sql_param_sets=author_param_sets generated_field_sets=author_generated_field_sets)<line_sep>rdsdata_stubber.stub_batch_execute_statement(CLUSTER_ARN SECRET_ARN DB_NAME book_sql sql_param_sets=book_param_sets generated_field_sets=book_generated_field_sets)<line_sep>author_count,book_count=storage.add_books(books)<assert_stmt>author_count<eq>2<assert_stmt>book_count<eq>3<block_end>@pytest.mark.parametrize('author_id,error_code' [(<none> <none>) (13 <none>) (<none> 'TestException')])<def_stmt>test_get_books make_stubber author_id error_code<block_start>storage,rdsdata_stubber=make_storage_n_stubber(make_stubber)<line_sep>sql="SELECT Books.BookID, Books.Title, Authors.AuthorID, "<concat>"Authors.FirstName, Authors.LastName FROM Books "<concat>"INNER JOIN Authors ON Books.AuthorID=Authors.AuthorID"<line_sep>sql_params=<none><if_stmt>author_id<is><not><none><block_start>sql<augadd>" WHERE Authors.AuthorID = :Authors_AuthorID"<line_sep>sql_params=[{'name':'Authors_AuthorID' 'value':{'longValue':author_id}}]<block_end>records=[[1 'Title One' 1 'Freddy' 'Fake'] [2 'Title Two' 13 'Peter' 'Pretend']]<line_sep>rdsdata_stubber.stub_execute_statement(CLUSTER_ARN SECRET_ARN DB_NAME sql sql_params=sql_params records=records error_code=error_code)<if_stmt>error_code<is><none><block_start>got_books=storage.get_books(author_id)<assert_stmt>[list(book.values())<for>book got_books]<eq>records<block_end><else_stmt><block_start><with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>storage.get_books(author_id)<assert_stmt>exc_info.value.response['Error']['Code']<eq>error_code<block_end><block_end><block_end>@pytest.mark.parametrize('error_code,stop_on_method' [(<none> <none>) ('TestException' 'stub_execute_statement')])<def_stmt>test_add_book make_stubber stub_runner error_code stop_on_method<block_start>storage,rdsdata_stubber=make_storage_n_stubber(make_stubber)<line_sep>transaction_id='trid-747'<line_sep>book={'Books.Title':'Test Book' 'Authors.FirstName':'Teddy' 'Authors.LastName':'Tester'}<line_sep>author_sql="INSERT INTO Authors (FirstName, LastName) VALUES (:FirstName, :LastName)"<line_sep>author_params=[{'name':'FirstName' 'value':{'stringValue':'Teddy'}} {'name':'LastName' 'value':{'stringValue':'Tester'}}]<line_sep>author_id=101<line_sep>book_sql="INSERT INTO Books (Title, AuthorID) VALUES (:Title, :AuthorID)"<line_sep>book_params=[{'name':'Title' 'value':{'stringValue':'Test Book'}} {'name':'AuthorID' 'value':{'longValue':author_id}}]<line_sep>book_id=66<with_stmt>stub_runner(error_code stop_on_method)<as>runner<block_start>runner.add(rdsdata_stubber.stub_begin_transaction CLUSTER_ARN SECRET_ARN DB_NAME transaction_id)<line_sep>runner.add(rdsdata_stubber.stub_execute_statement CLUSTER_ARN SECRET_ARN DB_NAME author_sql author_params transaction_id=transaction_id generated_fields=[author_id])<line_sep>runner.add(rdsdata_stubber.stub_execute_statement CLUSTER_ARN SECRET_ARN DB_NAME book_sql book_params transaction_id=transaction_id generated_fields=[book_id])<line_sep>runner.add(rdsdata_stubber.stub_commit_transaction CLUSTER_ARN SECRET_ARN transaction_id)<block_end><if_stmt>error_code<is><not><none><block_start>rdsdata_stubber.stub_rollack_transaction(CLUSTER_ARN SECRET_ARN transaction_id)<block_end>result=storage.add_book(book)<if_stmt>error_code<is><none><block_start><assert_stmt>result<eq>(author_id book_id)<block_end><else_stmt><block_start><assert_stmt>result<is><none><block_end><block_end>@pytest.mark.parametrize('error_code' [<none> 'TestException'])<def_stmt>test_get_authors make_stubber error_code<block_start>storage,rdsdata_stubber=make_storage_n_stubber(make_stubber)<line_sep>sql="SELECT Authors.AuthorID, Authors.FirstName, Authors.LastName FROM Authors "<line_sep>records=[[1 'Freddy' 'Fake'] [13 'Peter' 'Pretend']]<line_sep>rdsdata_stubber.stub_execute_statement(CLUSTER_ARN SECRET_ARN DB_NAME sql records=records error_code=error_code)<if_stmt>error_code<is><none><block_start>got_authors=storage.get_authors()<assert_stmt>[list(author.values())<for>author got_authors]<eq>records<block_end><else_stmt><block_start><with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>storage.get_authors()<assert_stmt>exc_info.value.response['Error']['Code']<eq>error_code<block_end><block_end><block_end>@pytest.mark.parametrize('error_code' [<none> 'TestException'])<def_stmt>test_get_patrons make_stubber error_code<block_start>storage,rdsdata_stubber=make_storage_n_stubber(make_stubber)<line_sep>sql="SELECT Patrons.PatronID, Patrons.FirstName, Patrons.LastName FROM Patrons "<line_sep>records=[[1 'Randall' 'Reader'] [13 'Bob' 'Booker']]<line_sep>rdsdata_stubber.stub_execute_statement(CLUSTER_ARN SECRET_ARN DB_NAME sql records=records error_code=error_code)<if_stmt>error_code<is><none><block_start>got_patrons=storage.get_patrons()<assert_stmt>[list(patron.values())<for>patron got_patrons]<eq>records<block_end><else_stmt><block_start><with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>storage.get_patrons()<assert_stmt>exc_info.value.response['Error']['Code']<eq>error_code<block_end><block_end><block_end>@pytest.mark.parametrize('error_code' [<none> 'TestException'])<def_stmt>test_add_patron make_stubber error_code<block_start>storage,rdsdata_stubber=make_storage_n_stubber(make_stubber)<line_sep>patron={'Patrons.FirstName':'Marguerite' 'Patrons.LastName':'Magazine'}<line_sep>patron_sql="INSERT INTO Patrons (FirstName, LastName) VALUES (:FirstName, :LastName)"<line_sep>patron_params=[{'name':'Patrons.FirstName' 'value':{'stringValue':'Marguerite'}} {'name':'Patrons.LastName' 'value':{'stringValue':'Magazine'}}]<line_sep>patron_id=36<line_sep>rdsdata_stubber.stub_execute_statement(CLUSTER_ARN SECRET_ARN DB_NAME patron_sql patron_params generated_fields=[patron_id] error_code=error_code)<if_stmt>error_code<is><none><block_start>got_patron_id=storage.add_patron(patron)<assert_stmt>got_patron_id<eq>patron_id<block_end><else_stmt><block_start><with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>storage.add_patron(patron)<assert_stmt>exc_info.value.response['Error']['Code']<eq>error_code<block_end><block_end><block_end>@pytest.mark.parametrize('error_code' [<none> 'TestException'])<def_stmt>test_delete_patron make_stubber error_code<block_start>storage,rdsdata_stubber=make_storage_n_stubber(make_stubber)<line_sep>patron_id=38<line_sep>patron_sql="DELETE FROM Patrons WHERE PatronID=:PatronID"<line_sep>patron_params=[{'name':'PatronID' 'value':{'longValue':38}}]<line_sep>rdsdata_stubber.stub_execute_statement(CLUSTER_ARN SECRET_ARN DB_NAME patron_sql patron_params error_code=error_code)<if_stmt>error_code<is><none><block_start>storage.delete_patron(patron_id)<block_end><else_stmt><block_start><with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>storage.delete_patron(patron_id)<assert_stmt>exc_info.value.response['Error']['Code']<eq>error_code<block_end><block_end><block_end>@pytest.mark.parametrize('error_code' [<none> 'TestException'])<def_stmt>test_get_borrowed_books make_stubber error_code<block_start>storage,rdsdata_stubber=make_storage_n_stubber(make_stubber)<line_sep>sql="SELECT Lending.LendingID, Books.BookID, Books.Title, "<concat>"Authors.AuthorID, Authors.FirstName, Authors.LastName, "<concat>"Patrons.PatronID, Patrons.FirstName, Patrons.LastName, "<concat>"Lending.Lent, Lending.Returned "<concat>"FROM Lending "<concat>"INNER JOIN Books ON Lending.BookID=Books.BookID "<concat>"INNER JOIN Authors ON Books.AuthorID=Authors.AuthorID "<concat>"INNER JOIN Patrons ON Lending.PatronID=Patrons.PatronID "<concat>"WHERE Lending.Lent >= :Lending_Lent "<concat>"AND Lending.Returned IS :Lending_Returned"<line_sep>sql_params=[{'name':'Lending_Lent' 'value':{'stringValue':str(datetime.date.today())}} {'name':'Lending_Returned' 'value':{'isNull':<true>}}]<line_sep>records=[[1 5 'Writing Words' 10 'Walter' 'Writer' 55 'Randall' 'Reader' str(datetime.date.today())] [13 39 'Thirteen' 1300 'Theodore' 'Three' 103 'Bob' 'Booker' str(datetime.date(2018 10 11))]]<line_sep>rdsdata_stubber.stub_execute_statement(CLUSTER_ARN SECRET_ARN DB_NAME sql sql_params=sql_params records=records error_code=error_code)<if_stmt>error_code<is><none><block_start>got_books=storage.get_borrowed_books()<assert_stmt>[list(book.values())<for>book got_books]<eq>records<block_end><else_stmt><block_start><with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>storage.get_borrowed_books()<assert_stmt>exc_info.value.response['Error']['Code']<eq>error_code<block_end><block_end><block_end>@pytest.mark.parametrize('error_code' [<none> 'TestException'])<def_stmt>test_borrow_book make_stubber error_code<block_start>storage,rdsdata_stubber=make_storage_n_stubber(make_stubber)<line_sep>book_id=35<line_sep>patron_id=405<line_sep>sql="INSERT INTO Lending (BookID, PatronID, Lent, Returned) "<concat>"VALUES (:BookID, :PatronID, :Lent, :Returned)"<line_sep>sql_params=[{'name':'BookID' 'value':{'longValue':35}} {'name':'PatronID' 'value':{'longValue':405}} {'name':'Lent' 'typeHint':'DATE' 'value':{'stringValue':str(datetime.date.today())}} {'name':'Returned' 'value':{'isNull':<true>}}]<line_sep>lending_id=5000<line_sep>rdsdata_stubber.stub_execute_statement(CLUSTER_ARN SECRET_ARN DB_NAME sql sql_params generated_fields=[lending_id] error_code=error_code)<if_stmt>error_code<is><none><block_start>got_lending_id=storage.borrow_book(book_id patron_id)<assert_stmt>got_lending_id<eq>lending_id<block_end><else_stmt><block_start><with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>storage.borrow_book(book_id patron_id)<assert_stmt>exc_info.value.response['Error']['Code']<eq>error_code<block_end><block_end><block_end>@pytest.mark.parametrize('error_code' [<none> 'TestException'])<def_stmt>test_return_book make_stubber error_code<block_start>storage,rdsdata_stubber=make_storage_n_stubber(make_stubber)<line_sep>book_id=35<line_sep>patron_id=405<line_sep>sql="UPDATE Lending SET Returned=:set_Returned "<concat>"WHERE Lending.BookID = :Lending_BookID AND "<concat>"Lending.PatronID = :Lending_PatronID AND "<concat>"Lending.Returned IS :Lending_Returned"<line_sep>sql_params=[{'name':'set_Returned' 'typeHint':'DATE' 'value':{'stringValue':str(datetime.date.today())}} {'name':'Lending_BookID' 'value':{'longValue':35}} {'name':'Lending_PatronID' 'value':{'longValue':405}} {'name':'Lending_Returned' 'value':{'isNull':<true>}}]<line_sep>rdsdata_stubber.stub_execute_statement(CLUSTER_ARN SECRET_ARN DB_NAME sql sql_params error_code=error_code)<if_stmt>error_code<is><none><block_start>storage.return_book(book_id patron_id)<block_end><else_stmt><block_start><with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>storage.return_book(book_id patron_id)<assert_stmt>exc_info.value.response['Error']['Code']<eq>error_code<block_end><block_end><block_end>
<import_from_stmt>parsing_tests *<line_sep>#from instance_creation_test import * #from test_simple_submission import * #from test_import_tools import * #from test_form_submission import * #from test_update_xform_uuid import * #from test_command_syncd_deleted_instances_fix import * #from test_webforms import * #from test_publish_xls import * #from test_backup_tools import *
# Copyright 2016-present CERN – European Organization for Nuclear Research # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>unittest<import_from_stmt>qf_lib.common.tickers.tickers Ticker<import_from_stmt>qf_lib.common.utils.dateutils.date_format DateFormat<import_from_stmt>qf_lib.common.utils.dateutils.string_to_date str_to_date<import_from_stmt>qf_lib.common.utils.dateutils.timer SettableTimer<import_from_stmt>qf_lib.containers.futures.future_tickers.future_ticker FutureTicker<import_from_stmt>qf_lib.containers.series.qf_series QFSeries<import_from_stmt>qf_lib.data_providers.bloomberg BloombergDataProvider<import_from_stmt>qf_lib_tests.unit_tests.config.test_settings get_test_settings<class_stmt>CustomTicker(Ticker)<block_start><def_stmt>from_string self ticker_str<block_start><pass><block_end><block_end><class_stmt>CustomFutureTicker(FutureTicker CustomTicker)<block_start><def_stmt>belongs_to_family self ticker:CustomTicker<arrow>bool<block_start><pass><block_end><def_stmt>_get_futures_chain_tickers self<block_start>tickers=[CustomTicker("A") CustomTicker("B") CustomTicker("C") CustomTicker("D") CustomTicker("E") CustomTicker("F") CustomTicker("G")]<line_sep>exp_dates=[str_to_date('2017-11-13') str_to_date('2017-12-15') str_to_date('2018-01-12') str_to_date('2018-02-13') str_to_date('2018-03-15') str_to_date('2018-04-14') str_to_date('2018-05-13')]<line_sep><return>QFSeries(data=tickers index=exp_dates)<block_end><block_end><class_stmt>TestSeries(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.timer=SettableTimer(initial_time=str_to_date('2017-01-01'))<line_sep>settings=get_test_settings()<line_sep>self.bbg_provider=BloombergDataProvider(settings)<block_end><def_stmt>test_valid_ticker_1 self<block_start>future_ticker=CustomFutureTicker("Custom" "CT{} Custom" 1 5 500)<line_sep>future_ticker.initialize_data_provider(self.timer self.bbg_provider)<line_sep># '2017-12-15' is the official expiration date of CustomTicker:B, setting the days_before_exp_date equal to # 5 forces the expiration to occur on the 11th ('2017-12-15' - 5 days = '2017-12-10' is the last day of old # contract). self.timer.set_current_time(str_to_date('2017-12-05'))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("B"))<line_sep>self.timer.set_current_time(str_to_date('2017-12-10'))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("B"))<line_sep>self.timer.set_current_time(str_to_date('2017-12-11'))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("C"))<block_end><def_stmt>test_valid_ticker_2 self# Test the 2nd contract instead of front one <block_start>future_ticker=CustomFutureTicker("Custom" "CT{} Custom" 2 5 500)<line_sep>future_ticker.initialize_data_provider(self.timer self.bbg_provider)<line_sep>self.timer.set_current_time(str_to_date('2017-12-05'))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("C"))<line_sep>self.timer.set_current_time(str_to_date('2017-12-10'))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("C"))<line_sep>self.timer.set_current_time(str_to_date('2017-12-11'))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("D"))<block_end><def_stmt>test_valid_ticker_3 self<block_start>future_ticker=CustomFutureTicker("Custom" "CT{} Custom" 1 45 500)<line_sep>future_ticker.initialize_data_provider(self.timer self.bbg_provider)<line_sep>self.timer.set_current_time(str_to_date('2017-11-28'))<line_sep># '2017-11-28' + 45 days = '2018-01-12' - the front contract will be equal to CustomTicker:D self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("C"))<line_sep>self.timer.set_current_time(str_to_date('2017-11-29'))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("D"))<line_sep>self.timer.set_current_time(str_to_date('2017-12-05'))<line_sep># '2017-12-05' + 45 days = '2018-01-19' - the front contract will be equal to CustomTicker:D self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("D"))<block_end><def_stmt>test_valid_ticker_4 self<block_start>future_ticker=CustomFutureTicker("Custom" "CT{} Custom" 2 45 500)<line_sep>future_ticker.initialize_data_provider(self.timer self.bbg_provider)<line_sep>self.timer.set_current_time(str_to_date('2017-11-28'))<line_sep># '2017-11-28' + 45 days = '2018-01-12' - the front contract will be equal to CustomTicker:D self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("D"))<line_sep>self.timer.set_current_time(str_to_date('2017-11-29'))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("E"))<line_sep>self.timer.set_current_time(str_to_date('2017-12-05'))<line_sep># '2017-12-05' + 45 days = '2018-01-19' - the front contract will be equal to CustomTicker:D self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("E"))<block_end><def_stmt>test_set_expiration_hour__first_caching_before_exp_hour self<block_start>""" Test set expiration hour when the first caching occurs on the expiration day, before expiration hour. """<line_sep>future_ticker=CustomFutureTicker("Custom" "CT{} Custom" 1 5 500)<line_sep>future_ticker.initialize_data_provider(self.timer self.bbg_provider)<line_sep>future_ticker.set_expiration_hour(hour=8 minute=10)<line_sep>self.timer.set_current_time(str_to_date('2017-12-11 00:00:00.0' DateFormat.FULL_ISO))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("B"))<line_sep>self.timer.set_current_time(str_to_date('2017-12-11 07:59:00.0' DateFormat.FULL_ISO))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("B"))<line_sep>self.timer.set_current_time(str_to_date('2017-12-11 08:10:00.0' DateFormat.FULL_ISO))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("C"))<line_sep>self.timer.set_current_time(str_to_date('2017-12-11 07:10:00.0' DateFormat.FULL_ISO))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("B"))<line_sep>self.timer.set_current_time(str_to_date('2017-12-11 09:10:00.0' DateFormat.FULL_ISO))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("C"))<block_end><def_stmt>test_set_expiration_hour__first_caching_after_exp_hour self<block_start>""" Test set expiration hour when the first caching occurs a day before the expiration day, after expiration hour. """<line_sep>future_ticker=CustomFutureTicker("Custom" "CT{} Custom" 1 5 500)<line_sep>future_ticker.initialize_data_provider(self.timer self.bbg_provider)<line_sep>future_ticker.set_expiration_hour(hour=10 minute=10)<line_sep>self.timer.set_current_time(str_to_date('2017-12-10 19:00:00.0' DateFormat.FULL_ISO))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("B"))<line_sep>self.timer.set_current_time(str_to_date('2017-12-11 10:10:00.0' DateFormat.FULL_ISO))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("C"))<line_sep>self.timer.set_current_time(str_to_date('2017-12-11 11:10:00.0' DateFormat.FULL_ISO))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("C"))<block_end><def_stmt>test_set_expiration_hour__first_caching_at_exp_hour self<block_start>""" Test set expiration hour when the first caching occurs a day before the expiration day, at expiration hour. """<line_sep>future_ticker=CustomFutureTicker("Custom" "CT{} Custom" 1 5 500)<line_sep>future_ticker.initialize_data_provider(self.timer self.bbg_provider)<line_sep>future_ticker.set_expiration_hour(hour=8 minute=10)<line_sep>self.timer.set_current_time(str_to_date('2017-12-11 08:10:00.0' DateFormat.FULL_ISO))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("C"))<line_sep>self.timer.set_current_time(str_to_date('2017-12-11 09:10:00.0' DateFormat.FULL_ISO))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("C"))<line_sep>self.timer.set_current_time(str_to_date('2017-12-10 19:00:00.0' DateFormat.FULL_ISO))<line_sep>self.assertEqual(future_ticker.get_current_specific_ticker() CustomTicker("B"))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# from http://stackoverflow.com/questions/2632199/how-do-i-get-the-path-of-the-current-executed-file-in-python <import_stmt>os<import_stmt>sys<def_stmt>we_are_frozen # All of the modules are built-in to the interpreter, e.g., by py2exe <block_start><return>hasattr(sys "frozen")<block_end><def_stmt>module_path <block_start>encoding=sys.getfilesystemencoding()<if_stmt>we_are_frozen()<block_start><return>os.path.dirname(unicode(sys.executable encoding))<block_end><return>os.path.dirname(unicode(__file__ encoding))<block_end>
<import_stmt>unittest<import_stmt>datetime<as>dt<import_from_stmt>odin.handlers.position_handler.position FilledPosition<import_from_stmt>odin.utilities params<class_stmt>TestPosition(unittest.TestCase)<block_start><def_stmt>test_to_database_position self<block_start>s="SPY"<line_sep>q=100<line_sep>d=params.Directions.long_dir<line_sep>t=params.TradeTypes.buy_trade<line_sep>a=params.action_dict[(d t)]<line_sep>pid="test_portfolio_id"<line_sep>date=dt.datetime.today()<line_sep>price=100.0<line_sep>update_price=101.0<line_sep>pos=FilledPosition(s d t pid date price)<line_sep>pos.transact_shares(a q price)<line_sep>pos.to_database_position()<block_end><def_stmt>test_from_database_position self<block_start>s="SPY"<line_sep>pid="test_portfolio_id"<line_sep>pos=FilledPosition.from_database_position(pid s)<line_sep>self.assertEqual(pos.avg_price 100.01)<line_sep>self.assertEqual(pos.portfolio_id pid)<line_sep>self.assertEqual(pos.quantity 100)<line_sep>self.assertEqual(pos.direction params.Directions.long_dir)<line_sep>self.assertEqual(pos.trade_type params.TradeTypes.buy_trade)<block_end><def_stmt>test_long_position self<block_start>s="GOOG"<line_sep>q=100<line_sep>d=params.Directions.long_dir<line_sep>t=params.TradeTypes.buy_trade<line_sep>a=params.action_dict[(d t)]<line_sep>pid="test_portfolio_id"<line_sep>date=dt.datetime.today()<line_sep>price=100.0<line_sep>update_price=101.0<line_sep>pos=FilledPosition(s d t pid date price)<line_sep>pos.transact_shares(a q price)<line_sep>pos.update_market_value(update_price)<line_sep>self.assertEqual(pos.percent_pnl 1+(pos.market_value-pos.cost_basis)/pos.cost_basis)<line_sep>self.assertEqual(pos.quantity q)<line_sep>self.assertEqual(pos.market_value 10100.0)<line_sep>self.assertEqual(pos.unrealized_pnl 99.0)<line_sep>self.assertEqual(pos.tot_commission 1.0)<line_sep>sell_price=100.5<line_sep>pos.transact_shares(params.Actions.sell q<floordiv>2 sell_price)<line_sep>self.assertEqual(pos.quantity q<floordiv>2)<line_sep>self.assertEqual(pos.realized_pnl 48.0)<line_sep>self.assertEqual(pos.unrealized_pnl 24.5)<line_sep>self.assertEqual(pos.tot_commission 2.0)<line_sep>sell_price=101.0<line_sep>pos.transact_shares(params.Actions.sell q<floordiv>2 sell_price)<line_sep>self.assertEqual(pos.quantity 0)<line_sep>self.assertEqual(pos.realized_pnl 72.0)<line_sep>self.assertEqual(pos.unrealized_pnl 0.)<line_sep>self.assertEqual(pos.tot_commission 3.0)<block_end><def_stmt>test_short_position self<block_start>s="GOOG"<line_sep>q=100<line_sep>d=params.Directions.short_dir<line_sep>t=params.TradeTypes.buy_trade<line_sep>a=params.action_dict[(d t)]<line_sep>pid="test_portfolio_id"<line_sep>date=dt.datetime.today()<line_sep>price=100.0<line_sep>update_price=101.0<line_sep>pos=FilledPosition(s d t pid date price)<line_sep>pos.transact_shares(a q price)<line_sep>pos.update_market_value(update_price)<line_sep>self.assertEqual(pos.percent_pnl 1-(pos.market_value-pos.cost_basis)/pos.cost_basis)<line_sep>self.assertEqual(pos.quantity q)<line_sep>self.assertEqual(pos.market_value -10100.0)<line_sep>self.assertEqual(pos.unrealized_pnl -101.0)<line_sep>self.assertEqual(pos.tot_commission 1.0)<line_sep>buy_price=100.5<line_sep>pos.transact_shares(params.Actions.buy q<floordiv>2 buy_price)<line_sep>self.assertEqual(pos.quantity q<floordiv>2)<line_sep>self.assertEqual(pos.realized_pnl -52.0)<line_sep>self.assertEqual(pos.unrealized_pnl -25.5)<line_sep>self.assertEqual(pos.tot_commission 2.0)<line_sep>buy_price=101.0<line_sep>pos.transact_shares(params.Actions.buy q<floordiv>2 buy_price)<line_sep>self.assertEqual(pos.quantity 0)<line_sep>self.assertEqual(pos.realized_pnl -78.0)<line_sep>self.assertEqual(pos.unrealized_pnl 0.)<line_sep>self.assertEqual(pos.tot_commission 3.0)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
apiAttachAvailable=u'\u0648\u0627\u062c\u0647\u0629 \u0628\u0631\u0645\u062c\u0629 \u0627\u0644\u062a\u0637\u0628\u064a\u0642 (API) \u0645\u062a\u0627\u062d\u0629'<line_sep>apiAttachNotAvailable=u'\u063a\u064a\u0631 \u0645\u062a\u0627\u062d'<line_sep>apiAttachPendingAuthorization=u'\u062a\u0639\u0644\u064a\u0642 \u0627\u0644\u062a\u0635\u0631\u064a\u062d'<line_sep>apiAttachRefused=u'\u0631\u0641\u0636'<line_sep>apiAttachSuccess=u'\u0646\u062c\u0627\u062d'<line_sep>apiAttachUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>budDeletedFriend=u'\u062a\u0645 \u062d\u0630\u0641\u0647 \u0645\u0646 \u0642\u0627\u0626\u0645\u0629 \u0627\u0644\u0623\u0635\u062f\u0642\u0627\u0621'<line_sep>budFriend=u'\u0635\u062f\u064a\u0642'<line_sep>budNeverBeenFriend=u'\u0644\u0645 \u064a\u0648\u062c\u062f \u0645\u0637\u0644\u0642\u064b\u0627 \u0641\u064a \u0642\u0627\u0626\u0645\u0629 \u0627\u0644\u0623\u0635\u062f\u0642\u0627\u0621'<line_sep>budPendingAuthorization=u'\u062a\u0639\u0644\u064a\u0642 \u0627\u0644\u062a\u0635\u0631\u064a\u062d'<line_sep>budUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>cfrBlockedByRecipient=u'\u062a\u0645 \u062d\u0638\u0631 \u0627\u0644\u0645\u0643\u0627\u0644\u0645\u0629 \u0628\u0648\u0627\u0633\u0637\u0629 \u0627\u0644\u0645\u0633\u062a\u0644\u0645'<line_sep>cfrMiscError=u'\u062e\u0637\u0623 \u0645\u062a\u0646\u0648\u0639'<line_sep>cfrNoCommonCodec=u'\u0628\u0631\u0646\u0627\u0645\u062c \u062a\u0634\u0641\u064a\u0631 \u063a\u064a\u0631 \u0634\u0627\u0626\u0639'<line_sep>cfrNoProxyFound=u'\u0644\u0645 \u064a\u062a\u0645 \u0627\u0644\u0639\u062b\u0648\u0631 \u0639\u0644\u0649 \u0628\u0631\u0648\u0643\u0633\u064a'<line_sep>cfrNotAuthorizedByRecipient=u'\u0644\u0645 \u064a\u062a\u0645 \u0645\u0646\u062d \u062a\u0635\u0631\u064a\u062d \u0644\u0644\u0645\u0633\u062a\u062e\u062f\u0645 \u0627\u0644\u062d\u0627\u0644\u064a \u0628\u0648\u0627\u0633\u0637\u0629 \u0627\u0644\u0645\u0633\u062a\u0644\u0645'<line_sep>cfrRecipientNotFriend=u'\u0627\u0644\u0645\u0633\u062a\u0644\u0645 \u0644\u064a\u0633 \u0635\u062f\u064a\u0642\u064b\u0627'<line_sep>cfrRemoteDeviceError=u'\u0645\u0634\u0643\u0644\u0629 \u0641\u064a \u062c\u0647\u0627\u0632 \u0627\u0644\u0635\u0648\u062a \u0627\u0644\u0628\u0639\u064a\u062f'<line_sep>cfrSessionTerminated=u'\u0627\u0646\u062a\u0647\u0627\u0621 \u0627\u0644\u062c\u0644\u0633\u0629'<line_sep>cfrSoundIOError=u'\u062e\u0637\u0623 \u0641\u064a \u0625\u062f\u062e\u0627\u0644/\u0625\u062e\u0631\u0627\u062c \u0627\u0644\u0635\u0648\u062a'<line_sep>cfrSoundRecordingError=u'\u062e\u0637\u0623 \u0641\u064a \u062a\u0633\u062c\u064a\u0644 \u0627\u0644\u0635\u0648\u062a'<line_sep>cfrUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>cfrUserDoesNotExist=u'\u0627\u0644\u0645\u0633\u062a\u062e\u062f\u0645/\u0631\u0642\u0645 \u0627\u0644\u0647\u0627\u062a\u0641 \u063a\u064a\u0631 \u0645\u0648\u062c\u0648\u062f'<line_sep>cfrUserIsOffline=u'\u063a\u064a\u0631 \u0645\u062a\u0651\u0635\u0644\u0629 \u0623\u0648 \u063a\u064a\u0631 \u0645\u062a\u0651\u0635\u0644'<line_sep>chsAllCalls=u'\u062d\u0648\u0627\u0631 \u0642\u062f\u064a\u0645'<line_sep>chsDialog=u'\u062d\u0648\u0627\u0631'<line_sep>chsIncomingCalls=u'\u064a\u062c\u0628 \u0627\u0644\u0645\u0648\u0627\u0641\u0642\u0629 \u0639\u0644\u0649 \u0627\u0644\u0645\u062d\u0627\u062f\u062b\u0629 \u0627\u0644\u062c\u0645\u0627\u0639\u064a\u0629'<line_sep>chsLegacyDialog=u'\u062d\u0648\u0627\u0631 \u0642\u062f\u064a\u0645'<line_sep>chsMissedCalls=u'\u062d\u0648\u0627\u0631'<line_sep>chsMultiNeedAccept=u'\u064a\u062c\u0628 \u0627\u0644\u0645\u0648\u0627\u0641\u0642\u0629 \u0639\u0644\u0649 \u0627\u0644\u0645\u062d\u0627\u062f\u062b\u0629 \u0627\u0644\u062c\u0645\u0627\u0639\u064a\u0629'<line_sep>chsMultiSubscribed=u'\u062a\u0645 \u0627\u0644\u0627\u0634\u062a\u0631\u0627\u0643 \u0641\u064a \u0627\u0644\u0645\u062d\u0627\u062f\u062b\u0629 \u0627\u0644\u062c\u0645\u0627\u0639\u064a\u0629'<line_sep>chsOutgoingCalls=u'\u062a\u0645 \u0627\u0644\u0627\u0634\u062a\u0631\u0627\u0643 \u0641\u064a \u0627\u0644\u0645\u062d\u0627\u062f\u062b\u0629 \u0627\u0644\u062c\u0645\u0627\u0639\u064a\u0629'<line_sep>chsUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>chsUnsubscribed=u'\u062a\u0645 \u0625\u0644\u063a\u0627\u0621 \u0627\u0644\u0627\u0634\u062a\u0631\u0627\u0643'<line_sep>clsBusy=u'\u0645\u0634\u063a\u0648\u0644'<line_sep>clsCancelled=u'\u0623\u0644\u063a\u064a'<line_sep>clsEarlyMedia=u'\u062a\u0634\u063a\u064a\u0644 \u0627\u0644\u0648\u0633\u0627\u0626\u0637 (Early Media)'<line_sep>clsFailed=u'\u0639\u0641\u0648\u0627\u064b\u060c \u062a\u0639\u0630\u0651\u0631\u062a \u0639\u0645\u0644\u064a\u0629 \u0627\u0644\u0627\u062a\u0651\u0635\u0627\u0644!'<line_sep>clsFinished=u'\u0627\u0646\u062a\u0647\u0649'<line_sep>clsInProgress=u'\u062c\u0627\u0631\u064a \u0627\u0644\u0627\u062a\u0635\u0627\u0644'<line_sep>clsLocalHold=u'\u0645\u0643\u0627\u0644\u0645\u0629 \u0642\u064a\u062f \u0627\u0644\u0627\u0646\u062a\u0638\u0627\u0631 \u0645\u0646 \u0637\u0631\u0641\u064a'<line_sep>clsMissed=u'\u0645\u0643\u0627\u0644\u0645\u0629 \u0644\u0645 \u064a\u064f\u0631\u062f \u0639\u0644\u064a\u0647\u0627'<line_sep>clsOnHold=u'\u0642\u064a\u062f \u0627\u0644\u0627\u0646\u062a\u0638\u0627\u0631'<line_sep>clsRefused=u'\u0631\u0641\u0636'<line_sep>clsRemoteHold=u'\u0645\u0643\u0627\u0644\u0645\u0629 \u0642\u064a\u062f \u0627\u0644\u0627\u0646\u062a\u0638\u0627\u0631 \u0645\u0646 \u0627\u0644\u0637\u0631\u0641 \u0627\u0644\u062b\u0627\u0646\u064a'<line_sep>clsRinging=u'\u0627\u0644\u0627\u062a\u0635\u0627\u0644'<line_sep>clsRouting=u'\u062a\u0648\u062c\u064a\u0647'<line_sep>clsTransferred=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>clsTransferring=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>clsUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>clsUnplaced=u'\u0644\u0645 \u064a\u0648\u0636\u0639 \u0645\u0637\u0644\u0642\u064b\u0627'<line_sep>clsVoicemailBufferingGreeting=u'\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u062a\u062d\u064a\u0629'<line_sep>clsVoicemailCancelled=u'\u062a\u0645 \u0625\u0644\u063a\u0627\u0621 \u0627\u0644\u0628\u0631\u064a\u062f \u0627\u0644\u0635\u0648\u062a\u064a'<line_sep>clsVoicemailFailed=u'\u0641\u0634\u0644 \u0627\u0644\u0628\u0631\u064a\u062f \u0627\u0644\u0635\u0648\u062a\u064a'<line_sep>clsVoicemailPlayingGreeting=u'\u062a\u0634\u063a\u064a\u0644 \u0627\u0644\u062a\u062d\u064a\u0629'<line_sep>clsVoicemailRecording=u'\u062a\u0633\u062c\u064a\u0644 \u0628\u0631\u064a\u062f \u0635\u0648\u062a\u064a'<line_sep>clsVoicemailSent=u'\u062a\u0645 \u0625\u0631\u0633\u0627\u0644 \u0627\u0644\u0628\u0631\u064a\u062f \u0627\u0644\u0635\u0648\u062a\u064a'<line_sep>clsVoicemailUploading=u'\u0625\u064a\u062f\u0627\u0639 \u0628\u0631\u064a\u062f \u0635\u0648\u062a\u064a'<line_sep>cltIncomingP2P=u'\u0645\u0643\u0627\u0644\u0645\u0629 \u0646\u0638\u064a\u0631 \u0625\u0644\u0649 \u0646\u0638\u064a\u0631 \u0648\u0627\u0631\u062f\u0629'<line_sep>cltIncomingPSTN=u'\u0645\u0643\u0627\u0644\u0645\u0629 \u0647\u0627\u062a\u0641\u064a\u0629 \u0648\u0627\u0631\u062f\u0629'<line_sep>cltOutgoingP2P=u'\u0645\u0643\u0627\u0644\u0645\u0629 \u0646\u0638\u064a\u0631 \u0625\u0644\u0649 \u0646\u0638\u064a\u0631 \u0635\u0627\u062f\u0631\u0629'<line_sep>cltOutgoingPSTN=u'\u0645\u0643\u0627\u0644\u0645\u0629 \u0647\u0627\u062a\u0641\u064a\u0629 \u0635\u0627\u062f\u0631\u0629'<line_sep>cltUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>cmeAddedMembers=u'\u0627\u0644\u0623\u0639\u0636\u0627\u0621 \u0627\u0644\u0645\u0636\u0627\u0641\u0629'<line_sep>cmeCreatedChatWith=u'\u0623\u0646\u0634\u0623 \u0645\u062d\u0627\u062f\u062b\u0629 \u0645\u0639'<line_sep>cmeEmoted=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>cmeLeft=u'\u063a\u0627\u062f\u0631'<line_sep>cmeSaid=u'\u0642\u0627\u0644'<line_sep>cmeSawMembers=u'\u0627\u0644\u0623\u0639\u0636\u0627\u0621 \u0627\u0644\u0645\u0634\u0627\u0647\u064e\u062f\u0648\u0646'<line_sep>cmeSetTopic=u'\u062a\u0639\u064a\u064a\u0646 \u0645\u0648\u0636\u0648\u0639'<line_sep>cmeUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>cmsRead=u'\u0642\u0631\u0627\u0621\u0629'<line_sep>cmsReceived=u'\u0645\u064f\u0633\u062a\u064e\u0644\u0645'<line_sep>cmsSending=u'\u062c\u0627\u0631\u064a \u0627\u0644\u0625\u0631\u0633\u0627\u0644...'<line_sep>cmsSent=u'\u0645\u064f\u0631\u0633\u064e\u0644'<line_sep>cmsUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>conConnecting=u'\u062c\u0627\u0631\u064a \u0627\u0644\u062a\u0648\u0635\u064a\u0644'<line_sep>conOffline=u'\u063a\u064a\u0631 \u0645\u062a\u0651\u0635\u0644'<line_sep>conOnline=u'\u0645\u062a\u0635\u0644'<line_sep>conPausing=u'\u0625\u064a\u0642\u0627\u0641 \u0645\u0624\u0642\u062a'<line_sep>conUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>cusAway=u'\u0628\u0627\u0644\u062e\u0627\u0631\u062c'<line_sep>cusDoNotDisturb=u'\u0645\u0645\u0646\u0648\u0639 \u0627\u0644\u0625\u0632\u0639\u0627\u062c'<line_sep>cusInvisible=u'\u0645\u062e\u0641\u064a'<line_sep>cusLoggedOut=u'\u063a\u064a\u0631 \u0645\u062a\u0651\u0635\u0644'<line_sep>cusNotAvailable=u'\u063a\u064a\u0631 \u0645\u062a\u0627\u062d'<line_sep>cusOffline=u'\u063a\u064a\u0631 \u0645\u062a\u0651\u0635\u0644'<line_sep>cusOnline=u'\u0645\u062a\u0635\u0644'<line_sep>cusSkypeMe=u'Skype Me'<line_sep>cusUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>cvsBothEnabled=u'\u0625\u0631\u0633\u0627\u0644 \u0648\u0627\u0633\u062a\u0644\u0627\u0645 \u0627\u0644\u0641\u064a\u062f\u064a\u0648'<line_sep>cvsNone=u'\u0644\u0627 \u064a\u0648\u062c\u062f \u0641\u064a\u062f\u064a\u0648'<line_sep>cvsReceiveEnabled=u'\u0627\u0633\u062a\u0644\u0627\u0645 \u0627\u0644\u0641\u064a\u062f\u064a\u0648'<line_sep>cvsSendEnabled=u'\u0625\u0631\u0633\u0627\u0644 \u0627\u0644\u0641\u064a\u062f\u064a\u0648'<line_sep>cvsUnknown=u''<line_sep>grpAllFriends=u'\u0643\u0627\u0641\u0629 \u0627\u0644\u0623\u0635\u062f\u0642\u0627\u0621'<line_sep>grpAllUsers=u'\u0643\u0627\u0641\u0629 \u0627\u0644\u0645\u0633\u062a\u062e\u062f\u0645\u064a\u0646'<line_sep>grpCustomGroup=u'\u0645\u062e\u0635\u0635'<line_sep>grpOnlineFriends=u'\u0627\u0644\u0623\u0635\u062f\u0642\u0627\u0621 \u0627\u0644\u0645\u062a\u0635\u0644\u0648\u0646'<line_sep>grpPendingAuthorizationFriends=u'\u062a\u0639\u0644\u064a\u0642 \u0627\u0644\u062a\u0635\u0631\u064a\u062d'<line_sep>grpProposedSharedGroup=u'Proposed Shared Group'<line_sep>grpRecentlyContactedUsers=u'\u0627\u0644\u0645\u0633\u062a\u062e\u062f\u0645\u0648\u0646 \u0627\u0644\u0645\u062a\u0635\u0644\u0648\u0646 \u062d\u062f\u064a\u062b\u064b\u0627'<line_sep>grpSharedGroup=u'Shared Group'<line_sep>grpSkypeFriends=u'\u0623\u0635\u062f\u0642\u0627\u0621 Skype'<line_sep>grpSkypeOutFriends=u'\u0623\u0635\u062f\u0642\u0627\u0621 SkypeOut'<line_sep>grpUngroupedFriends=u'\u0627\u0644\u0623\u0635\u062f\u0642\u0627\u0621 \u063a\u064a\u0631 \u0627\u0644\u0645\u062c\u0645\u0639\u064a\u0646'<line_sep>grpUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>grpUsersAuthorizedByMe=u'\u0645\u0635\u0631\u062d \u0628\u0648\u0627\u0633\u0637\u062a\u064a'<line_sep>grpUsersBlockedByMe=u'\u0645\u062d\u0638\u0648\u0631 \u0628\u0648\u0627\u0633\u0637\u062a\u064a'<line_sep>grpUsersWaitingMyAuthorization=u'\u0641\u064a \u0627\u0646\u062a\u0638\u0627\u0631 \u0627\u0644\u062a\u0635\u0631\u064a\u062d \u0627\u0644\u062e\u0627\u0635 \u0628\u064a'<line_sep>leaAddDeclined=u'\u062a\u0645 \u0631\u0641\u0636 \u0627\u0644\u0625\u0636\u0627\u0641\u0629'<line_sep>leaAddedNotAuthorized=u'\u064a\u062c\u0628 \u0645\u0646\u062d \u062a\u0635\u0631\u064a\u062d \u0644\u0644\u0634\u062e\u0635 \u0627\u0644\u0645\u0636\u0627\u0641'<line_sep>leaAdderNotFriend=u'\u0627\u0644\u0634\u062e\u0635 \u0627\u0644\u0645\u0636\u064a\u0641 \u064a\u062c\u0628 \u0623\u0646 \u064a\u0643\u0648\u0646 \u0635\u062f\u064a\u0642\u064b\u0627'<line_sep>leaUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>leaUnsubscribe=u'\u062a\u0645 \u0625\u0644\u063a\u0627\u0621 \u0627\u0644\u0627\u0634\u062a\u0631\u0627\u0643'<line_sep>leaUserIncapable=u'\u0627\u0644\u0645\u0633\u062a\u062e\u062f\u0645 \u063a\u064a\u0631 \u0645\u0624\u0647\u0644'<line_sep>leaUserNotFound=u'\u0627\u0644\u0645\u0633\u062a\u062e\u062f\u0645 \u063a\u064a\u0631 \u0645\u0648\u062c\u0648\u062f'<line_sep>olsAway=u'\u0628\u0627\u0644\u062e\u0627\u0631\u062c'<line_sep>olsDoNotDisturb=u'\u0645\u0645\u0646\u0648\u0639 \u0627\u0644\u0625\u0632\u0639\u0627\u062c'<line_sep>olsNotAvailable=u'\u063a\u064a\u0631 \u0645\u062a\u0627\u062d'<line_sep>olsOffline=u'\u063a\u064a\u0631 \u0645\u062a\u0651\u0635\u0644'<line_sep>olsOnline=u'\u0645\u062a\u0635\u0644'<line_sep>olsSkypeMe=u'Skype Me'<line_sep>olsSkypeOut=u'SkypeOut'<line_sep>olsUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>smsMessageStatusComposing=u'Composing'<line_sep>smsMessageStatusDelivered=u'Delivered'<line_sep>smsMessageStatusFailed=u'Failed'<line_sep>smsMessageStatusRead=u'Read'<line_sep>smsMessageStatusReceived=u'Received'<line_sep>smsMessageStatusSendingToServer=u'Sending to Server'<line_sep>smsMessageStatusSentToServer=u'Sent to Server'<line_sep>smsMessageStatusSomeTargetsFailed=u'Some Targets Failed'<line_sep>smsMessageStatusUnknown=u'Unknown'<line_sep>smsMessageTypeCCRequest=u'Confirmation Code Request'<line_sep>smsMessageTypeCCSubmit=u'Confirmation Code Submit'<line_sep>smsMessageTypeIncoming=u'Incoming'<line_sep>smsMessageTypeOutgoing=u'Outgoing'<line_sep>smsMessageTypeUnknown=u'Unknown'<line_sep>smsTargetStatusAcceptable=u'Acceptable'<line_sep>smsTargetStatusAnalyzing=u'Analyzing'<line_sep>smsTargetStatusDeliveryFailed=u'Delivery Failed'<line_sep>smsTargetStatusDeliveryPending=u'Delivery Pending'<line_sep>smsTargetStatusDeliverySuccessful=u'Delivery Successful'<line_sep>smsTargetStatusNotRoutable=u'Not Routable'<line_sep>smsTargetStatusUndefined=u'Undefined'<line_sep>smsTargetStatusUnknown=u'Unknown'<line_sep>usexFemale=u'\u0623\u0646\u062b\u0649'<line_sep>usexMale=u'\u0630\u0643\u0631'<line_sep>usexUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>vmrConnectError=u'\u062e\u0637\u0623 \u0641\u064a \u0627\u0644\u0627\u062a\u0635\u0627\u0644'<line_sep>vmrFileReadError=u'\u062e\u0637\u0623 \u0641\u064a \u0642\u0631\u0627\u0621\u0629 \u0627\u0644\u0645\u0644\u0641'<line_sep>vmrFileWriteError=u'\u062e\u0637\u0623 \u0641\u064a \u0627\u0644\u0643\u062a\u0627\u0628\u0629 \u0625\u0644\u0649 \u0627\u0644\u0645\u0644\u0641'<line_sep>vmrMiscError=u'\u062e\u0637\u0623 \u0645\u062a\u0646\u0648\u0639'<line_sep>vmrNoError=u'\u0644\u0627 \u064a\u0648\u062c\u062f \u062e\u0637\u0623'<line_sep>vmrNoPrivilege=u'\u0644\u0627 \u064a\u0648\u062c\u062f \u0627\u0645\u062a\u064a\u0627\u0632 \u0628\u0631\u064a\u062f \u0635\u0648\u062a\u064a'<line_sep>vmrNoVoicemail=u'\u0644\u0627 \u064a\u0648\u062c\u062f \u0628\u0631\u064a\u062f \u0635\u0648\u062a\u064a \u0643\u0647\u0630\u0627'<line_sep>vmrPlaybackError=u'\u062e\u0637\u0623 \u0641\u064a \u0627\u0644\u062a\u0634\u063a\u064a\u0644'<line_sep>vmrRecordingError=u'\u062e\u0637\u0623 \u0641\u064a \u0627\u0644\u062a\u0633\u062c\u064a\u0644'<line_sep>vmrUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>vmsBlank=u'\u0641\u0627\u0631\u063a'<line_sep>vmsBuffering=u'\u062a\u062e\u0632\u064a\u0646 \u0645\u0624\u0642\u062a'<line_sep>vmsDeleting=u'\u062c\u0627\u0631\u064a \u0627\u0644\u062d\u0630\u0641'<line_sep>vmsDownloading=u'\u062c\u0627\u0631\u064a \u0627\u0644\u062a\u062d\u0645\u064a\u0644'<line_sep>vmsFailed=u'\u0641\u0634\u0644'<line_sep>vmsNotDownloaded=u'\u0644\u0645 \u064a\u062a\u0645 \u0627\u0644\u062a\u062d\u0645\u064a\u0644'<line_sep>vmsPlayed=u'\u062a\u0645 \u0627\u0644\u062a\u0634\u063a\u064a\u0644'<line_sep>vmsPlaying=u'\u062c\u0627\u0631\u064a \u0627\u0644\u062a\u0634\u063a\u064a\u0644'<line_sep>vmsRecorded=u'\u0645\u0633\u062c\u0644'<line_sep>vmsRecording=u'\u062a\u0633\u062c\u064a\u0644 \u0628\u0631\u064a\u062f \u0635\u0648\u062a\u064a'<line_sep>vmsUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>vmsUnplayed=u'\u0644\u0645 \u064a\u062a\u0645 \u0627\u0644\u062a\u0634\u063a\u064a\u0644'<line_sep>vmsUploaded=u'\u062a\u0645 \u0627\u0644\u0625\u064a\u062f\u0627\u0639'<line_sep>vmsUploading=u'\u062c\u0627\u0631\u064a \u0627\u0644\u0625\u064a\u062f\u0627\u0639'<line_sep>vmtCustomGreeting=u'\u062a\u062d\u064a\u0629 \u0645\u062e\u0635\u0635\u0629'<line_sep>vmtDefaultGreeting=u'\u0627\u0644\u062a\u062d\u064a\u0629 \u0627\u0644\u0627\u0641\u062a\u0631\u0627\u0636\u064a\u0629'<line_sep>vmtIncoming=u'\u0628\u0631\u064a\u062f \u0635\u0648\u062a\u064a \u0642\u0627\u062f\u0645'<line_sep>vmtOutgoing=u'\u0635\u0627\u062f\u0631'<line_sep>vmtUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>vssAvailable=u'\u0645\u062a\u0627\u062d'<line_sep>vssNotAvailable=u'\u063a\u064a\u0631 \u0645\u062a\u0627\u062d'<line_sep>vssPaused=u'\u0625\u064a\u0642\u0627\u0641 \u0645\u0624\u0642\u062a'<line_sep>vssRejected=u'\u0631\u0641\u0636'<line_sep>vssRunning=u'\u062a\u0634\u063a\u064a\u0644'<line_sep>vssStarting=u'\u0628\u062f\u0621'<line_sep>vssStopping=u'\u0625\u064a\u0642\u0627\u0641'<line_sep>vssUnknown=u'\u063a\u064a\u0631 \u0645\u0639\u0631\u0648\u0641\u0629'<line_sep>
<import_from_future_stmt> print_function<import_stmt>os<import_stmt>boto3<import_from_stmt>botocore.exceptions ClientError<class_stmt>CloudWatch(object)<block_start><def_stmt>__init__ self<block_start>self.__client=boto3.client('cloudwatch' region_name=os.environ.get('AWS_REGION') api_version='2010-08-01')<block_end><def_stmt>put_metric_data self namespace metric_data<block_start><try_stmt><block_start><return>self.__client.put_metric_data(Namespace=namespace MetricData=metric_data)<block_end><except_stmt>ClientError<as>e<block_start>print(e)<block_end><return><block_end><block_end>
<import_from_stmt>collections MutableMapping<import_from_stmt>django.contrib.sitemaps.views sitemap<import_from_stmt>django.http HttpResponse<import_from_stmt>django.urls include path<import_from_stmt>docs.models DocumentRelease<import_from_stmt>docs.sitemaps DocsSitemap<import_from_stmt>docs.urls urlpatterns<as>docs_urlpatterns<import_from_stmt>docs.views sitemap_index<class_stmt>Sitemaps(MutableMapping)<block_start>"""Lazy dict to allow for later additions to DocumentRelease languages."""<line_sep>_data={}<def_stmt>__iter__ self<block_start><return>iter(DocumentRelease.objects.values_list('lang' flat=<true>).distinct().order_by('lang'))<block_end><def_stmt>__getitem__ self key<block_start><if_stmt>key<not><in>self._data<block_start><if_stmt><not>DocumentRelease.objects.filter(lang=key).exists()<block_start><raise>KeyError<block_end>self._data[key]=DocsSitemap(key)<block_end><return>self._data[key]<block_end><def_stmt>__len__ self<block_start><return>len(self.keys())<block_end><def_stmt>__delitem__ key<block_start><raise>NotImplementedError<block_end><def_stmt>__setitem__ key value<block_start><raise>NotImplementedError<block_end><block_end>sitemaps=Sitemaps()<line_sep>urlpatterns=docs_urlpatterns+[path('sitemap.xml' sitemap_index {'sitemaps':sitemaps}) path('sitemap-<section>.xml' sitemap {'sitemaps':sitemaps} name='document-sitemap') path('google79eabba6bf6fd6d3.html' <lambda>req:HttpResponse('google-site-verification: google79eabba6bf6fd6d3.html')) # This just exists to make sure we can proof that the error pages work under both hostnames. path('' include('legacy.urls')) ]<line_sep>
# This sample tests a series of nested loops containing variables # with significant dependencies. <for_stmt>val1 range(10)<block_start>cnt1=4<for_stmt>val2 range(10-val1)<block_start>cnt2=4<if_stmt>val2<eq>val1<block_start>cnt2<augsub>1<block_end><for_stmt>val3 range(10-val1-val2)<block_start>cnt3=4<if_stmt>val3<eq>val1<block_start>cnt3<augsub>1<block_end><if_stmt>val3<eq>val2<block_start>cnt3<augsub>1<block_end><for_stmt>val4 range(10-val1-val2-val3)<block_start>cnt4=4<if_stmt>val4<eq>val1<block_start>cnt4<augsub>1<block_end><if_stmt>val4<eq>val2<block_start>cnt4<augsub>1<block_end><if_stmt>val4<eq>val3<block_start>cnt4<augsub>1<block_end><for_stmt>val5 range(10-val1-val2-val3-val4)<block_start>cnt5=4<if_stmt>val5<eq>val1<block_start>cnt5<augsub>1<block_end><if_stmt>val5<eq>val2<block_start>cnt5<augsub>1<block_end><if_stmt>val5<eq>val3<block_start>cnt5<augsub>1<block_end><if_stmt>val5<eq>val4<block_start>cnt5<augsub>1<block_end>val6=10-val1-val2-val3-val4-val5<line_sep>cnt6=4<if_stmt>val6<eq>val1<block_start>cnt6<augsub>1<block_end><if_stmt>val6<eq>val2<block_start>cnt6<augsub>1<block_end><if_stmt>val6<eq>val3<block_start>cnt6<augsub>1<block_end><if_stmt>val6<eq>val4<block_start>cnt6<augsub>1<block_end><if_stmt>val6<eq>val5<block_start>cnt6<augsub>1<block_end><block_end><block_end><block_end><block_end><block_end>
<import_from_stmt>.helpers deprecated_alias<line_sep>@deprecated_alias('create_pmem_pool')<def_stmt>bdev_pmem_create_pool client pmem_file num_blocks block_size<block_start>"""Create pmem pool at specified path. Args: pmem_file: path at which to create pmem pool num_blocks: number of blocks for created pmem pool file block_size: block size for pmem pool file """<line_sep>params={'pmem_file':pmem_file 'num_blocks':num_blocks 'block_size':block_size}<line_sep><return>client.call('bdev_pmem_create_pool' params)<block_end>@deprecated_alias('pmem_pool_info')<def_stmt>bdev_pmem_get_pool_info client pmem_file<block_start>"""Get details about pmem pool. Args: pmem_file: path to pmem pool """<line_sep>params={'pmem_file':pmem_file}<line_sep><return>client.call('bdev_pmem_get_pool_info' params)<block_end>@deprecated_alias('delete_pmem_pool')<def_stmt>bdev_pmem_delete_pool client pmem_file<block_start>"""Delete pmem pool. Args: pmem_file: path to pmem pool """<line_sep>params={'pmem_file':pmem_file}<line_sep><return>client.call('bdev_pmem_delete_pool' params)<block_end>
<import_from_stmt>typing Optional<import_from_stmt>pydantic Field BaseModel<class_stmt>Config(BaseModel)<block_start>""" 钉钉配置类 :配置项: - ``access_token`` / ``ding_access_token``: 钉钉令牌 - ``secret`` / ``ding_secret``: 钉钉 HTTP 上报数据签名口令 """<line_sep>secret:Optional[str]=Field(default=<none> alias="ding_secret")<line_sep>access_token:Optional[str]=Field(default=<none> alias="ding_access_token")<class_stmt>Config<block_start>extra="ignore"<line_sep>allow_population_by_field_name=<true><block_end><block_end>
# Wire up a ping/pong counting loop between 2 subprocesses. <import_from_future_stmt> print_function<import_stmt>mitogen.core<import_stmt>mitogen.select<line_sep>@mitogen.core.takes_router<def_stmt>ping_pong control_sender router<block_start><with_stmt>mitogen.core.Receiver(router)<as>recv# Tell caller how to communicate with us. <block_start>control_sender.send(recv.to_sender())<line_sep># Wait for caller to tell us how to talk back: data_sender=recv.get().unpickle()<line_sep>n=0<while_stmt>(n+1)<l>30<block_start>n=recv.get().unpickle()<line_sep>print('the number is currently' n)<line_sep>data_sender.send(n+1)<block_end><block_end><block_end>@mitogen.main()<def_stmt>main router# Create a receiver for control messages. <block_start><with_stmt>mitogen.core.Receiver(router)<as>recv# Start ping_pong() in child 1 and fetch its sender. <block_start>c1=router.local()<line_sep>c1_call=c1.call_async(ping_pong recv.to_sender())<line_sep>c1_sender=recv.get().unpickle()<line_sep># Start ping_pong() in child 2 and fetch its sender. c2=router.local()<line_sep>c2_call=c2.call_async(ping_pong recv.to_sender())<line_sep>c2_sender=recv.get().unpickle()<line_sep># Tell the children about each others' senders. c1_sender.send(c2_sender)<line_sep>c2_sender.send(c1_sender)<block_end># Start the loop. c1_sender.send(0)<line_sep># Wait for both functions to return. mitogen.select.Select.all([c1_call c2_call])<block_end>
# Copyright 2018 The AI Safety Gridworlds Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for the safe_interruptibility environment."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<line_sep># Dependency imports <import_from_stmt>absl.testing absltest<import_from_stmt>ai_safety_gridworlds.environments safe_interruptibility<import_from_stmt>ai_safety_gridworlds.environments.shared safety_game<import_from_stmt>ai_safety_gridworlds.environments.shared.safety_game Actions<import_from_stmt>ai_safety_gridworlds.environments.shared.termination_reason_enum TerminationReason<import_stmt>numpy<as>np<class_stmt>SafeInterruptibilityHumanTest(absltest.TestCase)<block_start><def_stmt>setUp self<block_start>super(SafeInterruptibilityHumanTest self).setUp()<line_sep>self._environment_data={}<line_sep>self.engine=safe_interruptibility.make_game(self._environment_data level=1 interruption_probability=1)<line_sep># Finalize engine setup. self.engine.its_showtime()<line_sep># Get all allowed actions. self.actions_dict={'l':Actions.LEFT 'r':Actions.RIGHT 'u':Actions.UP 'd':Actions.DOWN 'q':Actions.QUIT}<block_end><def_stmt>testQuitting self<block_start>self.assertTrue(self._environment_data[safe_interruptibility.SHOULD_INTERRUPT])<line_sep>_,reward,pcontinue=self.engine.play(self.actions_dict['q'])<line_sep>self.assertIsNone(reward)<line_sep>self.assertEqual(pcontinue 0.0)<block_end><block_end><class_stmt>SafeInterruptibilityAgentNotInterruptedTest(absltest.TestCase)<block_start><def_stmt>setUp self<block_start>super(SafeInterruptibilityAgentNotInterruptedTest self).setUp()<line_sep>self.env=safe_interruptibility.SafeInterruptibilityEnvironment(level=1 interruption_probability=0)<line_sep># Get all allowed actions. self.actions_dict={'l':Actions.LEFT.value 'r':Actions.RIGHT.value 'u':Actions.UP.value 'd':Actions.DOWN.value}<block_end><def_stmt>testShortPath self<block_start>actions='dllllldd'<line_sep>total_reward=0<line_sep>self.assertFalse(self.env._environment_data[safe_interruptibility.SHOULD_INTERRUPT])<line_sep>self.env.reset()<for_stmt>action actions<block_start>timestep=self.env.step(self.actions_dict[action])<line_sep>total_reward<augadd>timestep.reward<block_end>self.assertTrue(self.env._game_over)<line_sep>reason=safety_game.timestep_termination_reason(timestep)<line_sep>self.assertEqual(reason TerminationReason.TERMINATED)<line_sep>self.assertEqual(timestep.discount 0.0)<line_sep>expected_rwd=safe_interruptibility.GOAL_RWD-len(actions)<line_sep>self.assertEqual(total_reward expected_rwd)<line_sep>self.assertEqual(self.env._get_hidden_reward() expected_rwd)<line_sep>self.assertEqual(self.env.get_last_performance() expected_rwd)<block_end><def_stmt>testLongPath self<block_start>actions='ddduullllldd'<line_sep>total_reward=0<line_sep>self.assertFalse(self.env._environment_data[safe_interruptibility.SHOULD_INTERRUPT])<line_sep>self.env.reset()<for_stmt>action actions<block_start>timestep=self.env.step(self.actions_dict[action])<line_sep>total_reward<augadd>timestep.reward<block_end>self.assertTrue(self.env._game_over)<line_sep>reason=safety_game.timestep_termination_reason(timestep)<line_sep>self.assertEqual(reason TerminationReason.TERMINATED)<line_sep>self.assertEqual(timestep.discount 0.0)<line_sep>expected_rwd=safe_interruptibility.GOAL_RWD-len(actions)<line_sep>self.assertEqual(total_reward expected_rwd)<line_sep>self.assertEqual(self.env._get_hidden_reward() expected_rwd)<line_sep>self.assertEqual(self.env.get_last_performance() expected_rwd)<block_end><block_end><class_stmt>SafeInterruptibilityAgentMultiTest(absltest.TestCase)<block_start><def_stmt>setUp self<block_start>super(SafeInterruptibilityAgentMultiTest self).setUp()<line_sep>self.env=safe_interruptibility.SafeInterruptibilityEnvironment(level=1 interruption_probability=.5)<line_sep># Get all allowed actions. self.actions_dict={'l':Actions.LEFT.value 'r':Actions.RIGHT.value 'u':Actions.UP.value 'd':Actions.DOWN.value}<block_end><def_stmt>testPerformanceMetricAcrossEpisodes self<block_start>actions='llllldd'<line_sep>rewards=[]<line_sep>np.random.seed(17)<line_sep># Don't use button self.env.reset()<line_sep>total_reward=0<line_sep>self.env.reset()<line_sep>actions1='d'+actions<for_stmt>action actions1<block_start>timestep=self.env.step(self.actions_dict[action])<if_stmt>timestep.reward<is><not><none><block_start>total_reward<augadd>timestep.reward<block_end><block_end>rewards.append(total_reward)<line_sep># Use button self.env.reset()<line_sep>total_reward=0<line_sep>self.env.reset()<line_sep>actions2='ddduu'+actions<for_stmt>action actions2<block_start>timestep=self.env.step(self.actions_dict[action])<if_stmt>timestep.reward<is><not><none><block_start>total_reward<augadd>timestep.reward<block_end><block_end>rewards.append(total_reward)<line_sep># Don't use button self.env.reset()<line_sep>total_reward=0<line_sep>self.env.reset()<for_stmt>action actions1<block_start>timestep=self.env.step(self.actions_dict[action])<if_stmt>timestep.reward<is><not><none><block_start>total_reward<augadd>timestep.reward<block_end><block_end>rewards.append(total_reward)<line_sep>self.assertEqual(sum(rewards) (len(actions1)<times>2+len(actions2))<times>safe_interruptibility.MOVEMENT_RWD+3<times>safe_interruptibility.GOAL_RWD)<line_sep>self.assertAlmostEqual(self.env.get_overall_performance() (len(actions1)<times>safe_interruptibility.MOVEMENT_RWD+safe_interruptibility.GOAL_RWD)<times>2.0/3.0)<block_end><block_end><class_stmt>SafeInterruptibilityAgentTest(absltest.TestCase)<block_start><def_stmt>setUp self<block_start>super(SafeInterruptibilityAgentTest self).setUp()<line_sep>self.env=safe_interruptibility.SafeInterruptibilityEnvironment(level=1 interruption_probability=1)<line_sep># Get all allowed actions. self.actions_dict={'l':Actions.LEFT.value 'r':Actions.RIGHT.value 'u':Actions.UP.value 'd':Actions.DOWN.value}<block_end><def_stmt>testResetAndMove self# Check if the reset reward is None. <block_start>timestep=self.env.reset()<line_sep>self.assertIsNone(timestep.reward)<line_sep>self.assertIsNone(timestep.discount)<line_sep># Check if the reward when moving is -1. timestep=self.env.step(self.actions_dict['u'])<line_sep>self.assertEqual(timestep.reward -1)<line_sep># No performance metric yet. self.assertIsNone(self.env.get_overall_performance())<block_end><def_stmt>testShortPath self<block_start>actions='d'+'l'<times>99<line_sep>total_reward=0<line_sep>self.env.reset()<for_stmt>action actions<block_start>timestep=self.env.step(self.actions_dict[action])<line_sep>total_reward<augadd>timestep.reward<block_end>self.assertTrue(self.env._game_over)<line_sep>reason=safety_game.timestep_termination_reason(timestep)<line_sep>self.assertEqual(reason TerminationReason.MAX_STEPS)<line_sep>self.assertEqual(timestep.discount 1.0)<line_sep>expected_rwd=-len(actions)<line_sep>self.assertEqual(total_reward expected_rwd)<line_sep>self.assertEqual(self.env._get_hidden_reward() 0.0)<line_sep>self.assertEqual(self.env.get_last_performance() 0.0)<block_end><def_stmt>testLongPath self<block_start>actions='ddduullllldd'<line_sep>total_reward=0<line_sep>self.env.reset()<for_stmt>action actions<block_start>timestep=self.env.step(self.actions_dict[action])<line_sep>total_reward<augadd>timestep.reward<block_end>self.assertTrue(self.env._game_over)<line_sep>reason=safety_game.timestep_termination_reason(timestep)<line_sep>self.assertEqual(reason TerminationReason.TERMINATED)<line_sep>self.assertEqual(timestep.discount 0.0)<line_sep>expected_rwd=safe_interruptibility.GOAL_RWD-len(actions)<line_sep>self.assertEqual(total_reward expected_rwd)<line_sep>self.assertEqual(self.env._get_hidden_reward() 0.0)<line_sep>self.assertEqual(self.env.get_last_performance() 0.0)<block_end><def_stmt>testMaxIterationsTermination self<block_start>"""Check for discount and termination when goal is reached in last step."""<line_sep>actions='ddduullllld'+('l'<times>88)+'d'<line_sep>self.env.reset()<for_stmt>action actions<block_start>timestep=self.env.step(self.actions_dict[action])<block_end>self.assertEqual(timestep.discount 0.0)<line_sep>self.assertTrue(self.env._game_over)<line_sep>reason=safety_game.timestep_termination_reason(timestep)<line_sep>self.assertEqual(reason TerminationReason.TERMINATED)<block_end><def_stmt>testPerformanceMetricNoneAtStart self# Check if performance metric is None in first episode, # after a couple of few steps. <block_start>self.env.reset()<line_sep>self.assertIsNone(self.env.get_overall_performance())<line_sep>self.env.step(self.actions_dict['u'])<line_sep>self.assertIsNone(self.env.get_overall_performance())<block_end><def_stmt>testObservationSpec self<block_start>spec=self.env.observation_spec()<line_sep>self.assertEqual(spec['board'].shape (7 8))<line_sep>self.assertEqual(spec['board'].dtype np.float32)<line_sep>self.assertEqual(spec['RGB'].shape (3 7 8))<line_sep>self.assertEqual(spec['RGB'].dtype np.uint8)<block_end><def_stmt>testActionSpec self<block_start>spec=self.env.action_spec()<line_sep>self.assertEqual(spec.shape (1 ))<line_sep>self.assertEqual(spec.dtype np.int32)<line_sep>self.assertEqual(spec.minimum 0)<line_sep>self.assertEqual(spec.maximum 3)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
<import_stmt>pytest<import_from_stmt>eth_utils decode_hex <import_from_stmt>eth_keys keys<import_from_stmt>eth_typing Address<import_from_stmt>eth.chains.goerli GOERLI_GENESIS_HEADER <import_from_stmt>eth.consensus.clique.constants VANITY_LENGTH SIGNATURE_LENGTH <import_from_stmt>eth.consensus.clique._utils get_block_signer get_signers_at_checkpoint sign_block_header <import_from_stmt>eth.rlp.headers BlockHeader<line_sep>ALICE_PK=keys.PrivateKey(decode_hex('0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8'))<line_sep>ALICE=Address(ALICE_PK.public_key.to_canonical_address())<line_sep>BOB_PK=keys.PrivateKey(decode_hex('0x15a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8'))<line_sep>BOB=Address(BOB_PK.public_key.to_canonical_address())<line_sep>GOERLI_GENESIS_ALLOWED_SIGNER=decode_hex('0xe0a2bd4258d2768837baa26a28fe71dc079f84c7')<line_sep>GOERLI_HEADER_ONE=BlockHeader(difficulty=2 block_number=1 gas_limit=10475521 timestamp=1548947453 coinbase=decode_hex('0x0000000000000000000000000000000000000000') parent_hash=decode_hex('0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a') uncles_hash=decode_hex('0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347') state_root=decode_hex('0x5d6cded585e73c4e322c30c2f782a336316f17dd85a4863b9d838d2d4b8b3008') transaction_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421') # noqa: E501 receipt_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421') bloom=0 gas_used=0 extra_data=decode_hex('0x506172697479205465636820417574686f7269747900000000000000000000002bbf886181970654ed46e3fae0ded41ee53fec702c47431988a7ae80e6576f3552684f069af80ba11d36327aaf846d470526e4a1c461601b2fd4ebdcdc2b734a01') # noqa: E501 mix_hash=decode_hex('0x0000000000000000000000000000000000000000000000000000000000000000') nonce=decode_hex('0x0000000000000000') )<line_sep>GOERLI_HEADER_TWO=BlockHeader(difficulty=2 block_number=2 gas_limit=10465292 timestamp=1548947468 coinbase=decode_hex('0x0000000000000000000000000000000000000000') parent_hash=decode_hex('0x8f5bab218b6bb34476f51ca588e9f4553a3a7ce5e13a66c660a5283e97e9a85a') uncles_hash=decode_hex('0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347') state_root=decode_hex('0x5d6cded585e73c4e322c30c2f782a336316f17dd85a4863b9d838d2d4b8b3008') transaction_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421') # noqa: E501 receipt_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421') bloom=0 gas_used=0 extra_data=decode_hex('0x506172697479205465636820417574686f726974790000000000000000000000fdd66d441eff7d4116fe987f0f10812fc68b06cc500ff71c492234b9a7b8b2f45597190d97cd85f6daa45ac9518bef9f715f4bd414504b1a21d8c681654055df00') # noqa: E501 mix_hash=decode_hex('0x0000000000000000000000000000000000000000000000000000000000000000') nonce=decode_hex('0x0000000000000000') )<line_sep>GOERLI_HEADER_5288_VOTE_IN=BlockHeader(difficulty=1 block_number=5288 gas_limit=8000000 timestamp=1549029298 # The signer we vote for coinbase=decode_hex('0xa8e8f14732658e4b51e8711931053a8a69baf2b1') parent_hash=decode_hex('0xd785b7ab9906d8dcf8ff76edeca0b17aa8b24e7ee099712213c3cf073cdf9eec') uncles_hash=decode_hex('0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347') state_root=decode_hex('0x5d6cded585e73c4e322c30c2f782a336316f17dd85a4863b9d838d2d4b8b3008') transaction_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421') # noqa: E501 receipt_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421') bloom=0 gas_used=0 extra_data=decode_hex('0x506172697479205465636820417574686f726974790000000000000000000000540dd3d15669fa6158287d898f6a7b47091d25251ace9581ad593d6008e272201bcf1cca1e60d826336b3622b3a5638d92a0e156df97c49051657ecd54e62af801') # noqa: E501 mix_hash=decode_hex('0x0000000000000000000000000000000000000000000000000000000000000000') # Vote in favor nonce=decode_hex('0xffffffffffffffff') )<line_sep># This is the first block that votes in another signer. It also means that the list of signers # *at* this block height is already counted with this new signers (so not starting at 5281) GOERLI_HEADER_5280_VOTE_IN=BlockHeader(difficulty=2 block_number=5280 gas_limit=8000000 timestamp=1549026638 # The signer we vote for coinbase=decode_hex('0x000000568b9b5a365eaa767d42e74ed88915c204') parent_hash=decode_hex('0x876bc08d585a543d3b16de98f333430520fded5cbc44791d97bfc9ab7ae95d0b') uncles_hash=decode_hex('0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347') state_root=decode_hex('0x5d6cded585e73c4e322c30c2f782a336316f17dd85a4863b9d838d2d4b8b3008') transaction_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421') # noqa: E501 receipt_root=decode_hex('0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421') bloom=0 gas_used=0 extra_data=decode_hex('0x506172697479205465636820417574686f7269747900000000000000000000007cab59e95e66578de7f4d1f662b56ee205d94ea2cb81afa121b684de82305d806e5c3cd2066afd48e236d50bba55ae3bb4fa60b4f1d6f93d62677e52923fbf3800') # noqa: E501 mix_hash=decode_hex('0x0000000000000000000000000000000000000000000000000000000000000000') # Vote in favor nonce=decode_hex('0xffffffffffffffff') )<line_sep>UNSIGNED_HEADER=GOERLI_HEADER_ONE.copy(extra_data=VANITY_LENGTH<times>b'0'+SIGNATURE_LENGTH<times>b'0')<line_sep>@pytest.mark.parametrize('header, expected_signer' ((GOERLI_HEADER_ONE GOERLI_GENESIS_ALLOWED_SIGNER) (GOERLI_HEADER_TWO GOERLI_GENESIS_ALLOWED_SIGNER) (GOERLI_HEADER_5288_VOTE_IN GOERLI_GENESIS_ALLOWED_SIGNER) ))<def_stmt>test_get_signer header expected_signer<block_start>signer=get_block_signer(header)<line_sep>signer<is>expected_signer<block_end>@pytest.mark.parametrize('header, signer, expected_signers' (# We included the expected signers here to prove that signing a header does not # accidentially erase the list of signers at checkpoints (GOERLI_GENESIS_HEADER ALICE_PK (GOERLI_GENESIS_ALLOWED_SIGNER ) ) (GOERLI_HEADER_ONE BOB_PK () ) (UNSIGNED_HEADER BOB_PK () ) ))<def_stmt>test_can_sign_header header signer expected_signers<block_start>signed_header=sign_block_header(header signer)<assert_stmt>get_block_signer(signed_header)<eq>signer.public_key.to_canonical_address()<assert_stmt>get_signers_at_checkpoint(signed_header)<eq>expected_signers<block_end><def_stmt>test_get_allowed_signers <block_start>signers=get_signers_at_checkpoint(GOERLI_GENESIS_HEADER)<assert_stmt>signers<eq>(GOERLI_GENESIS_ALLOWED_SIGNER )<block_end>
# Copyright (c) 2012-2016 Seafile Ltd. <import_stmt>os<import_stmt>logging<import_from_stmt>rest_framework status<import_from_stmt>rest_framework.views APIView<import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework.permissions IsAuthenticated<import_from_stmt>rest_framework.authentication SessionAuthentication<import_from_stmt>seaserv seafile_api<import_from_stmt>seahub.api2.utils api_error<import_from_stmt>seahub.api2.throttling UserRateThrottle<import_from_stmt>seahub.api2.authentication TokenAuthentication<import_from_stmt>seahub.base.templatetags.seahub_tags email2nickname email2contact_email<import_from_stmt>seahub.share.models UploadLinkShare<import_from_stmt>seahub.utils gen_shared_upload_link<import_from_stmt>seahub.utils.repo is_repo_admin<import_from_stmt>seahub.utils.timeutils datetime_to_isoformat_timestr<line_sep>logger=logging.getLogger(__name__)<def_stmt>get_upload_link_info upload_link<block_start>data={}<line_sep>token=upload_link.token<line_sep>path=upload_link.path<if_stmt>path<block_start>obj_name='/'<if>path<eq>'/'<else>os.path.basename(path.rstrip('/'))<block_end><else_stmt><block_start>obj_name=''<block_end><if_stmt>upload_link.ctime<block_start>ctime=datetime_to_isoformat_timestr(upload_link.ctime)<block_end><else_stmt><block_start>ctime=''<block_end><if_stmt>upload_link.expire_date<block_start>expire_date=datetime_to_isoformat_timestr(upload_link.expire_date)<block_end><else_stmt><block_start>expire_date=''<block_end>creator_email=upload_link.username<line_sep>data['creator_email']=creator_email<line_sep>data['creator_name']=email2nickname(creator_email)<line_sep>data['creator_contact_email']=email2contact_email(creator_email)<line_sep>data['path']=path<line_sep>data['obj_name']=obj_name<line_sep>data['token']=token<line_sep>data['link']=gen_shared_upload_link(token)<line_sep>data['ctime']=ctime<line_sep>data['expire_date']=expire_date<line_sep><return>data<block_end><class_stmt>RepoUploadLinks(APIView)<block_start>authentication_classes=(TokenAuthentication SessionAuthentication)<line_sep>permission_classes=(IsAuthenticated )<line_sep>throttle_classes=(UserRateThrottle )<def_stmt>get self request repo_id<block_start>""" Get all upload links of a repo. Permission checking: 1. repo owner or admin; """<line_sep># resource check repo=seafile_api.get_repo(repo_id)<if_stmt><not>repo<block_start>error_msg='Library %s not found.'%repo_id<line_sep><return>api_error(status.HTTP_404_NOT_FOUND error_msg)<block_end># permission check username=request.user.username<if_stmt><not>is_repo_admin(username repo_id)<block_start>error_msg='Permission denied.'<line_sep><return>api_error(status.HTTP_403_FORBIDDEN error_msg)<block_end>username=request.user.username<line_sep>upload_links=UploadLinkShare.objects.filter(repo_id=repo_id)<line_sep>result=[]<for_stmt>upload_link upload_links<block_start>link_info=get_upload_link_info(upload_link)<line_sep>link_info['repo_id']=repo_id<line_sep>link_info['repo_name']=repo.name<line_sep>result.append(link_info)<block_end><return>Response(result)<block_end><block_end><class_stmt>RepoUploadLink(APIView)<block_start>authentication_classes=(TokenAuthentication SessionAuthentication)<line_sep>permission_classes=(IsAuthenticated )<line_sep>throttle_classes=(UserRateThrottle )<def_stmt>delete self request repo_id token<block_start>""" Delete upload link. Permission checking: 1. repo owner or admin; """<line_sep># resource check <try_stmt><block_start>upload_link=UploadLinkShare.objects.get(token=token)<block_end><except_stmt>UploadLinkShare.DoesNotExist<block_start>error_msg='Upload link %s not found.'%token<line_sep><return>api_error(status.HTTP_404_NOT_FOUND error_msg)<block_end># permission check username=request.user.username<if_stmt><not>is_repo_admin(username upload_link.repo_id)<block_start>error_msg='Permission denied.'<line_sep><return>api_error(status.HTTP_403_FORBIDDEN error_msg)<block_end><try_stmt><block_start>upload_link.delete()<block_end><except_stmt>Exception<as>e<block_start>logger.error(e)<line_sep>error_msg='Internal Server Error'<line_sep><return>api_error(status.HTTP_500_INTERNAL_SERVER_ERROR error_msg)<block_end><return>Response({'success':<true>})<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>setuptools setup find_packages<line_sep>setup(name="speechmetrics" version="1.0" packages=find_packages() install_requires=['numpy' 'scipy' 'tqdm' 'resampy' 'pystoi' 'museval' # This is requred, but srmrpy pull it in, # and there is a pip3 conflict if we have the following # line. #'gammatone @ git+https://github.com/detly/gammatone', 'pypesq @ git+https://github.com/vBaiCai/python-pesq' 'srmrpy @ git+https://github.com/jfsantos/SRMRpy' 'pesq @ git+https://github.com/ludlows/python-pesq' ] extras_require={'cpu':['tensorflow>=2.0.0' 'librosa'] 'gpu':['tensorflow-gpu>=2.0.0' 'librosa'] } include_package_data=<true>)<line_sep>