repo
stringlengths
8
116
tasks
stringlengths
8
117
titles
stringlengths
17
302
dependencies
stringlengths
5
372k
readme
stringlengths
5
4.26k
__index_level_0__
int64
0
4.36k
ETS-Research-Repositories/deep-clustering-toolbox
['semantic segmentation']
['Deep Co-Training for Semi-Supervised Image Segmentation']
deepclustering/meters/_metric.py playground/structured-consistency-loss/main.py test/deepclustering/dataset/classification/test_semi_loaders.py playground/PaNN/toy_example/augment.py deepclustering/utils/yaml_parser.py deepclustering/postprocessing/clip_images.py deepclustering/meters/_meterinterface.py playground/swa_cifar_benchmark/arch/googlenet.py deepclustering/dataset/classification/mnist.py test/deepclustering/meters/test_Averagewith_std.py playground/swa_cifar_benchmark/arch/senet.py deepclustering/meters2/individual_meters/torchnet/meter/averagevaluemeter.py deepclustering/dataset/classification/stl10.py playground/swa_cifar_benchmark/arch/wideresnet.py deepclustering/viewer/batchviewer.py deepclustering/dataloader/_utils/pin_memory.py deepclustering/arch/classification/IIC/net5g.py deepclustering/arch/segmentation/epsnetv2/cnn_utils.py deepclustering/meters/kappa.py playground/IIC_VAT/VATIICTrainer.py playground/subspaceClustering/run.py deepclustering/meters2/individual_meters/iou.py test/deepclustering/model/test_ema.py test/deepclustering/model/test_interface.py deepclustering/dataset/classification/svhn.py deepclustering/meters2/meter_interface.py deepclustering/arch/classification/preresnet.py deepclustering/meters2/individual_meters/torchnet/meter/__init__.py deepclustering/utils/io.py deepclustering/arch/classification/dummy.py deepclustering/writer/dataframedrawer.py deepclustering/writer/draw_csv.py playground/PaNN/toy_example/utils.py test/deepclustering/dataset/test_toyexample.py playground/PaNN/toy_example/trainer.py deepclustering/meters2/historicalContainer/__init__.py deepclustering/schedulers/customized_scheduler.py deepclustering/meters2/individual_meters/kappa.py deepclustering/postprocessing/plot.py deepclustering/model/ema.py deepclustering/dataset/segmentation/_metainfoGenerator.py deepclustering/utils/download_unzip_helper.py test/deepclustering/augment/test_interface.py playground/subspaceClustering/dataset.py deepclustering/arch/segmentation/deeplab/resnet.py deepclustering/loss/loss.py deepclustering/meters2/individual_meters/torchnet/meter/apmeter.py deepclustering/meters2/individual_meters/averagemeter.py deepclustering/schedulers/lr_scheduler.py deepclustering/dataloader/_utils/worker.py playground/swa_cifar_benchmark/arch/mobilenetv2.py test/deepclustering/utils/test_fsmGenerator.py deepclustering/arch/classification/IIC/vgg.py deepclustering/trainer/_hooks.py setup.py test/deepclustering/augment/test_tensor_aug.py deepclustering/decorator/decorator.py deepclustering/schedulers/__init__.py deepclustering/dataset/classification/utils.py playground/swa_cifar_benchmark/arch/__init__.py deepclustering/loss/IMSAT_loss.py deepclustering/dataset/classification/cifar_helper.py deepclustering/meters2/utils/__init__.py deepclustering/postprocessing/utils.py deepclustering/arch/segmentation/attention_unet.py deepclustering/dataset/semi_helper.py deepclustering/decorator/lazy_load_checkpoint.py deepclustering/meters2/individual_meters/torchnet/meter/meter.py deepclustering/trainer/__init__.py playground/IMSAT/IMSATTrainer.py deepclustering/dataset/segmentation/toydataset.py deepclustering/dataset/segmentation/prostate_dataset.py deepclustering/decorator/deprecation_helper.py deepclustering/meters2/individual_meters/torchnet/meter/aucmeter.py deepclustering/arch/classification/vat_network.py deepclustering/dataset/segmentation/toydataset_helper.py deepclustering/viewer/Viewer.py playground/IMSAT/mnist.py playground/PaNN/toy_example/dataset.py deepclustering/dataset/segmentation/_medicalSegmentationDataset.py deepclustering/meters2/individual_meters/torchnet/meter/movingaveragevaluemeter.py deepclustering/dataset/segmentation/__init__.py deepclustering/loss/__init__.py playground/subspaceClustering/subspace_trainer.py deepclustering/dataset/segmentation/spleen_dataset.py deepclustering/meters/dicemeter.py deepclustering/model/models.py test/deepclustering/augment/test_sychronized_augmentation.py playground/PaNN/toy_example/toy_example.py test/deepclustering/augment/test_tensorCutout.py test/deepclustering/meters/test_haussdorf.py deepclustering/utils/general.py deepclustering/utils/multiprocessing.py deepclustering/arch/segmentation/__init__.py test/deepclustering/meters/test_new_interface.py deepclustering/augment/sychronized_augment.py deepclustering/arch/classification/IIC/residual.py deepclustering/optim/adabound.py deepclustering/arch/segmentation/deeplab/deeplabv2.py deepclustering/arch/classification/IIC/net5g_multi_head.py deepclustering/viewer/realtime_viewer.py deepclustering/dataset/segmentation/_patient_sampler.py deepclustering/utils/warnings.py deepclustering/dataloader/_utils/fetch.py playground/swa_cifar_benchmark/arch/preact_resnet.py playground/IMSAT/train_IMSAT.py deepclustering/arch/segmentation/network.py deepclustering/arch/segmentation/epsnetv2/SegmentationModel.py playground/swa_cifar_benchmark/arch/resnext.py deepclustering/meters/_utils.py deepclustering/model/convert2apex.py deepclustering/writer/__init__.py deepclustering/meters2/individual_meters/surface_meter.py deepclustering/meters/cache.py deepclustering/meters2/individual_meters/surface_distance.py deepclustering/dataloader/sampler.py deepclustering/arch/segmentation/deeplab/deeplabv3plus.py deepclustering/manager.py deepclustering/meters2/individual_meters/torchnet/meter/msemeter.py playground/swa_cifar_benchmark/trainer.py test/deepclustering/GradualWarmpScheduler/test_scheduler.py test/deepclustering/writer/test_dataframedrawer.py deepclustering/schedulers/polynomiallr.py deepclustering/optim/radam.py playground/subspaceClustering/admm.py playground/MINE/MutualInformationEstmator.py deepclustering/utils/VAT.py deepclustering/arch/segmentation/deeplab/msc.py deepclustering/meters/instance.py playground/swa_cifar_benchmark/arch/large_conv.py deepclustering/postprocessing/report.py deepclustering/viewer/__init__.py script/train_IIC_Twohead.py deepclustering/augment/ndim_transforms.py deepclustering/utils/segmentation/utils.py playground/swa_cifar_benchmark/arch/dpn.py deepclustering/arch/segmentation/deeplab/deeplabv3.py playground/swa_cifar_benchmark/arch/pnasnet.py deepclustering/dataloader/dataloader_helper.py playground/swa_cifar_benchmark/my_scheduler.py playground/swa_cifar_benchmark/arch/lenet.py deepclustering/arch/segmentation/joseent/layers.py deepclustering/meters2/individual_meters/dicemeter.py deepclustering/dataloader/_utils/signal_handling.py deepclustering/augment/tensor_augment.py deepclustering/dataloader/distributed.py deepclustering/dataloader/__init__.py deepclustering/arch/segmentation/deeplab/enet.py deepclustering/dataset/classification/mnist_helper.py playground/MI/main.py deepclustering/dataloader/_utils/collate.py deepclustering/loss/kl_losses.py playground/swa_cifar_benchmark/arch/shufflenetv2.py deepclustering/__init__.py playground/semisup/semisup.py deepclustering/meters2/storage_interface.py deepclustering/meters2/historicalContainer/historical_container.py deepclustering/meters2/individual_meters/torchnet/meter/classerrormeter.py deepclustering/meters2/individual_meters/torchnet/meter/timemeter.py deepclustering/arch/__init__.py deepclustering/dataset/clustering_helper.py deepclustering/loss/IID_losses.py playground/MI/__init__.py test/deepclustering/dataset/classification/test_cifar10ClusteringDataloaders.py test/deepclustering/loss/test_dice_loss.py test/deepclustering/loss/test_loss.py deepclustering/dataloader/dataloader.py playground/MI/toy_example.py deepclustering/dataset/segmentation/wMH_dataset.py deepclustering/utils/adversarial_generator.py deepclustering/meters/__init__.py test/deepclustering/dataset/segmentation/test_iSeg2017_loader.py deepclustering/schedulers/warmup_scheduler.py playground/swa_cifar_benchmark/arch/efficientnet.py deepclustering/utils/classification/assignment_mapping.py deepclustering/decorator/cache_decorator.py deepclustering/model/__init__.py test/deepclustering/loss/test_imsat.py deepclustering/trainer/_trainer.py playground/auto_encoder/mnist.py deepclustering/dataset/classification/__init__.py deepclustering/meters2/individual_meters/_metric.py deepclustering/writer/SummaryWriter.py deepclustering/arch/segmentation/enet.py test/deepclustering/augment/test_dataagument.py deepclustering/meters2/individual_meters/cache.py deepclustering/augment/__init__.py playground/IIC_VAT/main.py deepclustering/method/__init__.py playground/swa_cifar_benchmark/arch/vgg.py deepclustering/meters/averagemeter.py deepclustering/arch/classification/IIC/net6c.py deepclustering/arch/segmentation/joseent/networks.py test/deepclustering/dataset/segmentation/test_patient_sampler.py deepclustering/dataset/segmentation/mmwhs_dataset.py test/deepclustering/dataset/segmentation/test_wmh_loader.py deepclustering/decorator/__init__.py playground/swa_cifar_benchmark/swa_main.py playground/swa_cifar_benchmark/arch/resnet.py deepclustering/dataloader/dataset.py deepclustering/meters/confusionmatrix.py deepclustering/meters2/individual_meters/instance.py playground/IMSAT/mnist_helper.py deepclustering/postprocessing/report2.py playground/PaNN/main.py deepclustering/arch/segmentation/deeplab/__init__.py deepclustering/dataset/classification/svhn_helper.py deepclustering/writer/_dataframedrawercallback.py deepclustering/arch/classification/__init__.py deepclustering/meters/iou.py deepclustering/augment/pil_augment.py playground/Mixup/mixup_main.py deepclustering/arch/classification/IIC/baselines/__init__.py playground/subspaceClustering/arch.py playground/subspaceClustering/subclassClustering.py deepclustering/arch/segmentation/joseent/__init__.py playground/swa_cifar_benchmark/arch/mobilenet.py test/deepclustering/dataset/segmentation/test_prostate_dataset.py deepclustering/utils/typecheckconvert.py deepclustering/dataloader/_utils/__init__.py deepclustering/arch/segmentation/epsnetv2/Model.py deepclustering/utils/segmentation/__init__.py deepclustering/arch/segmentation/epsnetv2/__init__.py deepclustering/dataset/classification/stl10_helper.py test/deepclustering/utils/test_yaml_parser.py deepclustering/optim/__init__.py deepclustering/dataset/segmentation/iSeg2017_dataset.py playground/PaNN/trainer.py deepclustering/dataset/classification/cifar.py deepclustering/meters2/__init__.py test/deepclustering/dataset/segmentation/test_acdc_loader.py deepclustering/dataset/segmentation/acdc_dataset.py deepclustering/meters2/individual_meters/hausdorff.py playground/swa_cifar_benchmark/arch/shufflenet.py test/deepclustering/trainer/test_trainer.py deepclustering/arch/classification/IIC/baselines/triplets.py deepclustering/arch/classification/IIC/net6c_two_head.py test/deepclustering/viewer/test_viewer.py deepclustering/dataset/classification/vision.py deepclustering/arch/classification/IIC/net5g_two_head.py deepclustering/meters2/individual_meters/torchnet/meter/mapmeter.py playground/MINE/__init__.py deepclustering/utils/__init__.py deepclustering/meters/hausdorff.py deepclustering/meters2/individual_meters/__init__.py deepclustering/arch/segmentation/vnet.py test/deepclustering/meters/test_meters.py deepclustering/meters2/individual_meters/confusionmatrix.py deepclustering/meters2/individual_meters/general_dice_meter.py deepclustering/arch/classification/vgg16_bn.py deepclustering/meters2/individual_meters/torchnet/meter/confusionmeter.py test/deepclustering/arch/test_arch_interface.py deepclustering/arch/classification/IMSAT/imsat.py playground/swa_cifar_benchmark/arch/densenet.py deepclustering/loss/dice_loss.py deepclustering/dataset/__init__.py ConfigManger ModelMode get_arch weights_init PlaceholderNet Dummy PreResNet Bottleneck PreResNet110 PreResNet164 conv3x3 BasicBlock VATNetwork init_weights vgg16_bn ClusterNet5gTrunk ClusterNet5gHead ClusterNet5g ClusterNet5gMultiHead ClusterNet5gMultiHeadHead ClusterNet5gTwoHeadHead ClusterNet5gTwoHead ClusterNet6cHead ClusterNet6c ClusterNet6cTrunk ClusterNet6cTwoHead ClusterNet6cTwoHeadHead ResNet conv3x3 ResNetTrunk BasicBlock VGGNet VGGTrunk TripletsNet5gHead TripletsNet6cHead TripletsNet6c TripletsNet5g IMSATHeader IMSATNet conv_block UNet_Attention Attention_block up_conv Decoder Encoder Enet InitialBlock BottleNeck UNetEnc SegNet PSPNet UNetDec_bn UNet_bn UNetDec UNet UNetEnc_bn FCN16 FCN8 SegNetEnc FCN32 PSPDec DownTransition UpTransition _make_nConv InputTransition passthrough ContBatchNorm3d LUConv VNet ELUCons OutputTransition _ASPPModule DeepLabV2 DeepLabV3 _ASPPModule DeepLabV3Plus UpsamplingBottleneck DownsamplingBottleneck ENet InitialBlock RegularBottleneck MSC _Bottleneck _ResBlock _ConvBatchNormReLU DeepLabV3Plus_ResNet101_MSC DeepLabV2S_ResNet101_MSC init_weights DeepLabV2_ResNet101_MSC DeepLabV3_ResNet101_MSC CDilated CDilatedB PSPModule BR C CBR CB EESP DownSampler EESPNet EESPNet_Seg conv_block convBatch conv_block_3 maxpool conv_block_Asym conv_block_1 conv_block_3_3 upSampleConv conv downSampleConv conv_decod_block Conv_residual_conv BottleNeckDownSampling Dummy ENet BottleNeckNormal_Asym CorstemNet BottleNeckDownSamplingDilatedConvLast weights_init BottleNeckUpSampling BottleNeckDownSamplingDilatedConv BottleNeckNormal Transformer _recover_ignore_index ToTensor blur_boundary GaussianNoise RandomRotate ElasticDeformation RangeNormalize RandomFlip RandomRotate90 get_transformer AbstractLabelToBoundary Normalize StandardLabelToBoundary RandomContrast LabelToAffinities LabelToBoundaryAndAffinities RandomLabelToAffinities Identity ToLabel PILCutout CenterCrop RandomRotation ToTensor SobelProcess RandomVerticalFlip RandomApply Resize RandomCrop RandomChoice RandomHorizontalFlip Identity Img2Tensor RandomTransforms FixRandomSeed SequentialWrapper CenterCrop Compose RandomVerticalFlip Resize RandomCrop RandomHorizontalFlip TensorCutout GaussianNoise TransformInterface _TransformInterface _DatasetKind _InfiniteConstantSampler _MultiProcessingDataLoaderIter _BaseDataLoaderIter DataLoader _SingleProcessDataLoaderIter background DataIter BackgroundGenerator random_split ConcatDataset IterableDataset Subset TensorDataset ChainDataset CombineDataset Dataset DistributedSampler _InfiniteRandomIterator SubsetRandomSampler WeightedRandomSampler RandomSampler BatchSampler InfiniteRandomSampler SequentialSampler default_collate default_convert _MapDatasetFetcher _BaseDatasetFetcher _IterableDatasetFetcher pin_memory _pin_memory_loop _set_SIGCHLD_handler ManagerWatchdog _worker_loop get_worker_info WorkerInfo _set_python_exit_flag ClusterDatasetInterface SemiDataSetInterface MedicalDatasetSemiInterface _draw_indices CIFAR100 CIFAR10 Cifar10ClusteringDatasetInterface Cifar10SemiSupervisedDatasetInterface MNIST read_label_file get_int read_image_file MNISTClusteringDatasetInterface MNISTSemiSupervisedDatasetInterface STL10 STL10DatasetInterface SVHN SVHNSemiSupervisedDatasetInterface SVHNClusteringDatasetInterface list_files download_url check_integrity gen_bar_updater list_dir makedir_exist_ok StandardTransform VisionDataset ACDCSemiInterface ACDCDataset ISeg2017SemiInterface ISeg2017Dataset MMWHSDataset MMWHSSemiInterface ProstateSemiInterface ProstateDataset SpleenSemiInterface SpleenDataset Cls_ShapesDataset ShapesDataset Seg_ShapesDataset Ins_ShapesDataset ToyExampleInterFace WMHSemiInterface WMHDataset default_transform MedicalImageSegmentationDataset MedicalImageSegmentationDatasetWithMetaInfo allow_extension classSizeCalulator getImage_GT SubMedicalDatasetBasedOnIndex PatientSampler MultiProcessCache SingleProcessCache FixRandomSeed processed _extract_bn_modules TimeBlock _disable_tracking_bn_stats_pytoch_el_1_1_0 threaded_ threaded timethis _disable_tracking_bn_stats export WaitThreadsEnd SuppressStdout convert_params DeprecationWarning warn_deprecated warn deprecated lazy_load_checkpoint _extract_variable_from_kwargs _extract_variable_from_args GeneralizedDiceLoss MetaDice IIDLoss compute_joint MultualInformaton_IMSAT Perturbation_Loss SimplexCrossEntropyLoss Entropy KL_div _check_reduction_params JSD_div SimplexCrossEntropyLoss Entropy KL_div _check_reduction_params JSD_div AverageValueMeter AveragewithStd Cache ConfusionMatrix BatchDiceMeter _DiceMeter toOneHot SliceDiceMeter numpy_haussdorf HaussdorffDistance InstanceValue IoU KappaMetrics Kappa2Annotator MeterInterface _AggregatedMeter _Metric rename_df_columns MeterInterface MeterInteractMixin _IOMixin Storage HistoricalContainer AverageValueMeter AveragewithStd Cache ConfusionMatrix BatchDiceMeter _DiceMeter toOneHot SliceDiceMeter UniversalDice numpy_haussdorf HaussdorffDistance InstanceValue IoU KappaMetrics Kappa2Annotator average_surface_distance hausdorff_distance mod_hausdorff_distance SurfaceMeter _Metric APMeter AUCMeter AverageValueMeter ClassErrorMeter ConfusionMeter mAPMeter Meter MovingAverageValueMeter MSEMeter TimeMeter rename_df_columns _Method AMPGradientBackwardStep to_Apex EMA_Model DeployModel Model NormalGradientBackwardStep ZeroGradientBackwardStep AdaBoundW AdaBound AdamW RAdam PlainRAdam call_from_cmd get_image_paths get_args main split_images main get_args main get_args call_from_cmd arg_parser extract_path_info extract_value main _butter_lowpass butter_lowpass_filter identical RampScheduler WeightScheduler ConstantScheduler ExponentialLR StepLR CosineAnnealingWarmRestarts LambdaLR MultiStepLR _LRScheduler ReduceLROnPlateau CosineAnnealingLR CyclicLR PolynomialLR GradualWarmupScheduler HookBase HookMixin _Trainer _TrainerHook FSGMGenerator VATGenerator distance kl vat _is_tarxz _is_zip extract_archive _is_tgz _is_tar download_url download_and_extract_archive check_md5 check_integrity makedir_exist_ok gen_bar_updater calculate_md5 _is_targz download _is_gzip map_ mmap_ iter_average dict_filter simplex logit2one_hot identical uniq probs2one_hot uncurry eq Vectorize set_nicer intersection dict_merge class2one_hot union Identical set_benchmark one_hot uc_ _register sset set_environment assert_list flatten_dict fix_all_seed probs2class tqdm_ _tqdm id_ extract_from_big_dict nice_dict write_yaml yaml_load path2Path path2str read_img multiprocessing_mapping read_img2 to_float is_single_integer is_float_array is_callable is_tuple_or_list to_torch is_string is_integer_array is_np_array is_single_float is_single_number is_single_bool is_np_scalar to_numpy is_generator is_iterable VATLoss_Multihead _disable_tracking_bn_stats VATLoss _l2_normalize _warnings YAMLArgParser str2bool original_match hungarian_match flat_acc ToLabel compute_iou non_max_suppression ImageViewer2DWidget view_batch ImageSlicingWidget BatchViewer tensor2plotable _is_iterable_tensor _empty_iterator _is_tensor multi_slice_viewer_debug cmap Multi_Slice_Viewer main get_parser Volume DataFrameDrawer arg_parser DrawCSV2 SummaryWriter _create_new_axe _repositioning_axes singleline_plot multipleline_plot _num_axes plot_callback autoencoder MNISTTrainer gradient_difference_loss IMSATIICTrainer IMSATTrainer MNIST read_label_file get_int read_image_file MNISTClusteringDatasetInterface MNISTSemiSupervisedDatasetInterface IMSAT_Enhanced_Trainer IMSAT_Trainer IIC_Trainer IIC_enhanced_Trainer IIC_adv_Trainer Net IMSAT IIC_Trainer Trainer ma MI_Estimator mutual_information sample_batch Mine get_dataloader MixUpTrainer dice_loss SemiSegTrainer inverse_transform_matrix AffineTensorTransform _draw_equal_dataset _override_transformation get_mnist_dataloaders _draw_inequal_dataset show_dataset SemiEntropyTrainer SemiUDATrainer SemiPrimalDualTrainer SemiTrainer convbnrelu_bloc get_prior_from_dataset SimpleNet val Wloss DropPath Dataug Affine test imshow Prediction Transf CIFAR10ID test_val train signout structured_consistency_loss create_gaussian_norm_dataset fix_seed ClusterNet5gTrunk ClusterNet5gHead ClusterNet5g create_gaussian_norm_dataset TensorDataset merge SubSpaceClusteringMethod2 SubSpaceClusteringMethod subspaceTrainer CosineAnnealingLR _LRScheduler CosineAnnealingLR_ SWATrainer SGDTrainer DenseNet201 DenseNet161 DenseNet121 Transition DenseNet Bottleneck densenet_cifar test DenseNet169 DPN Bottleneck test DPN92 DPN26 EfficientNetB0 Block EfficientNet test GoogLeNet Inception test conv_block LargeConvNet GradReverse SimpleNet identical LeNet Block MobileNet test Block test MobileNetV2 PNASNetB PNASNetA SepConv test PNASNet CellB CellA PreActBlock PreActResNet50 PreActResNet PreActResNet18 test PreActResNet152 PreActBottleneck PreActResNet101 PreActResNet34 ResNet ResNet18 ResNet34 Bottleneck ResNet101 test ResNet50 BasicBlock ResNet152 Block ResNeXt29_4x64d ResNeXt ResNeXt29_2x64d test_resnext ResNeXt29_32x4d ResNeXt29_8x64d PreActBlock SENet18 SENet test BasicBlock ShuffleNetG2 ShuffleNet Bottleneck test ShuffleBlock ShuffleNetG3 SplitBlock test DownBlock ShuffleBlock ShuffleNetV2 BasicBlock VGG test BasicBlock NetworkBlock WideResNet get_dataloader Test_arch_interface TestPILCutout TestGrey2tensor TestRandomApply Test_RandomCrop Test_Sobel TestCenterCrop TestInterface Test_Sequential_Wrapper TestTensorCutout TestCasewithSetUp TestResize TensorRandomCrop TestCenterCrop TestTensorAugmentation TestToyExample TestCifar Test_semisupervised_MNIST Test_semisupervised_CIFAR TestDownloadDataset TestDownloadDataset Test_SemiDataloader TestPatientSampler TestDownloadDataset Test_ACDCDataset TestDownloadDataset Test_wMHDatasetDataset TestDiceLoss TestIMSATLoss TestKLDiv Test_IIC TestJSDDiv TestCrossEntropyLoss TestDrawAverageWithSTD TestHaussdorffDistance TestBasicInterface TestMeterInterface TestEMA TestModel_ TestTrainer TestAdversarialFSGMGenerator fakenetwork np2torch TestViewer torch2numpy TestDataFrameDrawer xavier_normal_ normal_ data fill_ get lower pop arch_callable list Compose list Compose data isinstance fill_ Conv2d xavier_uniform_ normal_ zero_ BatchNorm2d Linear append LUConv range bias kaiming_normal_ modules weight constant_ BatchNorm2d Sequential Conv2d PReLU BatchNorm2d Sequential Conv2d PReLU BatchNorm2d Sequential Conv2d PReLU BatchNorm2d Sequential Conv2d conv_block BatchNorm2d Sequential Conv2d activ insert append BatchNorm2d layer BatchNorm2d Sequential ConvTranspose2d MaxPool2d gaussian append items Compose tolist Tensor Mapping type isinstance sum isinstance Sequence new type zip _new_shared Tensor Mapping get pin_memory set_device put hasattr isinstance Sequence Tensor Mapping SIGCHLD signal getsignal seed init_fn get fetch set_num_threads close _set_worker_signal_handlers put create_fetcher is_set manual_seed cancel_join_thread ManagerWatchdog is_alive WorkerInfo print int array shuffle tqdm md5 hexdigest makedirs join basename urlretrieve print expanduser makedir_exist_ok expanduser list listdir filter expanduser list listdir filter dcp append hasattr __name__ apply warn list index get t shape unsqueeze sum squeeze class2one_hot softmax probs2one_hot hd list columns map __surface_distances max percentile __surface_distances max initialize torchnet device to optimizer step zero_grad is_apex hasattr glob sorted print mean append crop range len get_image_paths len mkdir save titles folder_path split_images range open parse_args pprint add_argument ArgumentParser str get_args insert check_output main grid Path xrange to_list yrange list stem title ylim savefig legend partial plot close set classes smooth_factor xlim enumerate extend filter out_dir T print to_csv folder pprint to_dict isinstance print add_argument add_mutually_exclusive_group high_better ArgumentParser vars parse_args specific_folders extract_path_info file save_filename rglob save_dir append str split_path enumerate arg_parser butter lfilter _butter_lowpass str Path view backward grad requires_grad_ distance device normalize to network range download_and_extract_archive join basename format print download_url expanduser check_integrity _is_tarxz _is_zip join remove _is_tar dirname _is_gzip md5 print nice print str items seed str manual_seed_all manual_seed seed manual_seed_all manual_seed ones_like type float32 argmax shape class2one_hot probs2class softmax MutableMapping items isinstance extend append items items isinstance isinstance print pprint exists path2Path get T set_num_threads put task_done open open is_tensor is_tensor ones view join warn isinstance float numel int Tensor items zeros_like zeros sum range int items zeros_like Tensor sum range astype delete float32 compute_iou append minimum maximum QApplication show list exec_ argv isinstance concatenate setBatch exit instance BatchViewer Tensor numpy range len isinstance is_tensor ndarray isinstance _is_tensor isinstance Tensor ndarray isinstance show subplots use set_title tensor2plotable reshape mpl_connect axis imshow _is_tensor contour array enumerate arange ListedColormap N get_cmap add_argument_group parse_args add_argument show decode insert img_source Multi_Slice_Viewer gt_folders vars get_parser Volume pprint verbose axes set_position Bbox linspace _num_axes enumerate _num_axes add_subplot mean exp log mine_net concatenate range choice ParallelDataLoader sum append shape _inverse_transform_matrix dataset isinstance list sort tolist extend unique range len items list tolist extend __len__ set range value_counts isinstance print indices __repr__ MNIST _draw_equal_dataset _override_transformation _draw_inequal_dataset show_dataset Subset targets DataLoader dcp value_counts show transpose numpy gradit zero_grad whengrad wcriterion max progress_bar transf iter append next range detach_ grad net enumerate criterion backward print repeat stoch step len backward print aug progress_bar zero_grad len mean filter numpy modules item append train step max net enumerate stoch append eval print print name eval mkdir save append seed shuffle zeros range randn net densenet_cifar randn DPN92 EfficientNetB0 shape size GoogLeNet MobileNet MobileNetV2 PNASNetB PreActResNet18 ResNet18 randn print size ResNeXt29_2x64d net SENet18 ShuffleNetG2 ShuffleNetV2 VGG
# deep-clustering-toolbox #### PyTorch Vision toolbox not only for deep-clustering ### Introduction I still use this repo for research propose. I update some modules frequently to make the framework flexible enough. This repo contains the base code for a deep learning framework using `PyTorch`, to benchmark algorithms for various dataset. The current version supports `MNIST`, `CIFAR10`, `SVHN` and `STL-10` for semisupervised and unsupervised learning. `ACDC`, `Promise12`, `WMH` and so on are supported as segmentation counterpart. #### Features: >- Powerful cmd parser using `yaml` module, providing flexible input formats without predefined argparser. >- Automatic checkpoint management adapting to various settings
300
EVavylonis/iSEER
['unity']
['Unity: A General Platform for Intelligent Agents']
ml-agents/mlagents/envs/communicator_objects/environment_parameters_proto_pb2.py ml-agents/tests/trainers/test_trainer_controller.py ml-agents/mlagents/trainers/buffer.py ml-agents/mlagents/envs/communicator_objects/unity_rl_initialization_input_pb2.py ml-agents/mlagents/envs/communicator_objects/brain_parameters_proto_pb2.py ml-agents/tests/envs/test_envs.py ml-agents/mlagents/envs/communicator_objects/__init__.py ml-agents/mlagents/envs/rpc_communicator.py ml-agents/mlagents/trainers/ppo/__init__.py gym-unity/gym_unity/envs/__init__.py ml-agents/mlagents/envs/communicator_objects/agent_action_proto_pb2.py ml-agents/mlagents/trainers/learn.py gym-unity/gym_unity/envs/unity_env.py ml-agents/mlagents/trainers/bc/trainer.py ml-agents/mlagents/trainers/policy.py ml-agents/mlagents/envs/communicator_objects/unity_rl_initialization_output_pb2.py ml-agents/tests/trainers/test_curriculum.py ml-agents/mlagents/trainers/meta_curriculum.py ml-agents/mlagents/trainers/curriculum.py ml-agents/mlagents/trainers/ppo/models.py ml-agents/mlagents/envs/communicator_objects/space_type_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_output_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_input_pb2.py gym-unity/gym_unity/__init__.py ml-agents/mlagents/trainers/ppo/policy.py ml-agents/mlagents/envs/communicator_objects/engine_configuration_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/brain_type_proto_pb2.py ml-agents/mlagents/envs/socket_communicator.py gym-unity/setup.py ml-agents/mlagents/trainers/trainer_controller.py ml-agents/mlagents/envs/communicator_objects/agent_info_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_to_external_pb2_grpc.py ml-agents/tests/trainers/test_ppo.py ml-agents/mlagents/envs/brain.py ml-agents/mlagents/trainers/bc/policy.py ml-agents/tests/trainers/test_bc.py ml-agents/tests/mock_communicator.py ml-agents/mlagents/envs/communicator_objects/unity_message_pb2.py ml-agents/mlagents/trainers/models.py ml-agents/mlagents/trainers/__init__.py ml-agents/mlagents/envs/communicator_objects/resolution_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_to_external_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_rl_input_pb2.py ml-agents/tests/trainers/test_buffer.py ml-agents/mlagents/trainers/trainer.py ml-agents/mlagents/envs/communicator.py ml-agents/setup.py ml-agents/mlagents/envs/communicator_objects/unity_rl_output_pb2.py ml-agents/mlagents/envs/__init__.py ml-agents/mlagents/trainers/bc/__init__.py gym-unity/tests/test_gym.py ml-agents/mlagents/envs/exception.py ml-agents/mlagents/envs/environment.py ml-agents/mlagents/trainers/bc/models.py ml-agents/mlagents/envs/communicator_objects/command_proto_pb2.py ml-agents/mlagents/trainers/exception.py ml-agents/tests/trainers/test_meta_curriculum.py ml-agents/mlagents/trainers/ppo/trainer.py ml-agents/mlagents/envs/communicator_objects/header_pb2.py UnityGymException UnityEnv test_gym_wrapper test_multi_agent BrainInfo BrainParameters Communicator UnityEnvironment UnityException UnityTimeOutException UnityEnvironmentException UnityActionException RpcCommunicator UnityToExternalServicerImplementation SocketCommunicator UnityToExternalServicer UnityToExternalStub add_UnityToExternalServicer_to_server BufferException Buffer Curriculum CurriculumError MetaCurriculumError TrainerError main run_training MetaCurriculum LearningModel Policy UnityPolicyException UnityTrainerException Trainer TrainerController BehavioralCloningModel BCPolicy BehavioralCloningTrainer PPOModel PPOPolicy PPOTrainer get_gae discount_rewards MockCommunicator test_initialization test_reset test_close test_step test_handles_bad_filename test_dc_bc_model test_cc_bc_model test_visual_cc_bc_model test_bc_policy_evaluate dummy_config test_visual_dc_bc_model assert_array test_buffer location default_reset_parameters test_init_curriculum_bad_curriculum_raises_error test_init_curriculum_happy_path test_increment_lesson test_get_config test_init_meta_curriculum_happy_path test_increment_lessons_with_reward_buff_sizes default_reset_parameters MetaCurriculumTest test_increment_lessons measure_vals reward_buff_sizes test_set_all_curriculums_to_lesson_num test_get_config test_set_lesson_nums test_init_meta_curriculum_bad_curriculum_folder_raises_error more_reset_parameters test_rl_functions test_ppo_model_dc_vector_curio test_ppo_model_dc_vector_rnn test_ppo_model_cc_vector_rnn test_ppo_policy_evaluate test_ppo_model_cc_visual dummy_config test_ppo_model_dc_vector test_ppo_model_dc_visual test_ppo_model_cc_visual_curio test_ppo_model_dc_visual_curio test_ppo_model_cc_vector_curio test_ppo_model_cc_vector test_initialization test_initialize_trainers dummy_bc_config dummy_bad_config dummy_config dummy_start test_load_config sample step MockCommunicator UnityEnv step MockCommunicator UnityEnv method_handlers_generic_handler add_generic_rpc_handlers start_learning int str TrainerController int Process getLogger print start info append randint docopt range size range reversed zeros_like asarray tolist discount_rewards UnityEnvironment close MockCommunicator UnityEnvironment close MockCommunicator reset str local_done print agents step close reset MockCommunicator UnityEnvironment len UnityEnvironment close MockCommunicator reset_default_graph close reset_default_graph reset_default_graph reset_default_graph reset_default_graph flatten list range len get_batch Buffer assert_array append_update_buffer make_mini_batch append reset_agent array range Curriculum Curriculum Curriculum MetaCurriculum assert_has_calls MetaCurriculumTest increment_lessons assert_called_with MetaCurriculumTest increment_lessons assert_called_with assert_not_called MetaCurriculumTest set_all_curriculums_to_lesson_num MetaCurriculumTest dict update MetaCurriculumTest reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph assert_array_almost_equal array discount_rewards TrainerController
<img src="docs/images/unity-wide.png" align="middle" width="3000"/> <img src="docs/images/image-banner.png" align="middle" width="3000"/> # Unity ML-Agents Toolkit (Beta) **The Unity Machine Learning Agents Toolkit** (ML-Agents) is an open-source Unity plugin that enables games and simulations to serve as environments for training intelligent agents. Agents can be trained using reinforcement learning, imitation learning, neuroevolution, or other machine learning methods through a simple-to-use Python API. We also provide implementations (based on TensorFlow) of state-of-the-art algorithms to enable game developers and hobbyists to easily train intelligent agents for 2D, 3D and VR/AR games. These trained agents can be
301
EagleW/Writing-editing-Network
['text generation']
['Paper Abstract Writing through Editing Mechanism']
Writing-editing network/plot.py Writing-editing network/seq2seq/attention.py pycocoevalcap/cider/cider.py pycocoevalcap/cider/__init__.py split_data.py pycocoevalcap/rouge/__init__.py Writing-editing network/seq2seq/fb_seq2seq.py pycocoevalcap/cider/cider_scorer.py pycocoevalcap/meteor/__init__.py pycocoevalcap/bleu/bleu.py pycocoevalcap/rouge/rouge.py Writing-editing network/seq2seq/baseRNN.py Writing-editing network/predictor.py pycocoevalcap/__init__.py Writing-editing network/seq2seq/DecoderRNNFB.py Writing-editing network/main.py eval.py pycocoevalcap/bleu/__init__.py pycocoevalcap/bleu/bleu_scorer.py Writing-editing network/seq2seq/EncoderRNN.py pycocoevalcap/meteor/meteor.py Writing-editing network/utils.py Evaluate Bleu precook BleuScorer cook_test cook_refs Cider precook CiderScorer cook_test cook_refs Meteor my_lcs Rouge Config train_batch ConfigTest train_epoches _mask Predictor Vectorizer headline2abstractdataset load_embeddings Attention BaseRNN DecoderRNNFB Gate EncoderRNN FbSeq2seq defaultdict tuple split range len get items precook min append float sum max len items precook max range len size extend ByteTensor eq item append argmax cuda range criterion view model backward size clip_grad_norm_ zero_grad num_exams parameters max_grad_norm item append step range _mask train_batch batch_size print tolist num_exams DataLoader pprint train range enumerate len get zeros array split
# Writing-editing Network: Paper Abstract Writing through Editing Mechanism [Paper Abstract Writing through Editing Mechanism](http://aclanthology.org/P18-2042.pdf) [[Poster]](https://eaglew.github.io/files/Paper_abstract_generation.pdf)[[Fake Handbook*]](https://eaglew.github.io/files/handbook.pdf) *Fake abstracts for the main conference (ACL 2018) Accpeted by 56th Annual Meeting of the Association for Computational Linguistics (ACL 2018) Table of Contents ================= * [Overview](#overview) * [Requirements](#requirements) * [Quickstart](#quickstart) * [Citation](#citation)
302
Ebjerrum/SMILES-enumeration
['data augmentation']
['SMILES Enumeration as Data Augmentation for Neural Network Modeling of Molecules']
SmilesEnumerator.py time_test.py
<div class="cell markdown"> # SMILES enumeration, vectorization and batch generation [![Smiles Enumeration Header](README_files/README_files/f467ed21a7e815cdf8a9c29fa75b3ebc12878a74.png)](https://www.cheminformania.com/useful-information/smiles-enumeration-as-data-augmentation-for-molecular-neural-networks/) </div> <div class="cell markdown"> SMILES enumeration is the process of writing out all possible SMILES forms of a molecule. It's a useful technique for data augmentation before sequence based modeling of molecules. You can read more about the background in this [blog
303
EdoardoCicero/Neural-Networks-Project
['breast cancer detection', 'medical diagnosis']
["Deep Neural Networks Improve Radiologists' Performance in Breast Cancer Screening"]
Train.py code/Augmentation.py code/creaListeDiImmaginiElabelsPerView.py breast_cancer_classifier-master/src/data_loading/augmentations.py code/INbreast_to_project_structure.py code/All_keras_models.py breast_cancer_classifier-master/src/constants.py breast_cancer_classifier-master/src/modeling/run_model_single_keras_2.py breast_cancer_classifier-master/src/modeling/models_keras_2.py breast_cancer_classifier-master/src/data_loading/loading.py breast_cancer_classifier-master/src/utilities/reading_images.py breast_cancer_classifier-master/src/heatmaps/run_producer_project.py breast_cancer_classifier-master/src/modeling/layers_keras_2.py breast_cancer_classifier-master/src/utilities/pickling.py breast_cancer_classifier-master/src/utilities/tools.py breast_cancer_classifier-master/src/utilities/tf_utils.py breast_cancer_classifier-master/src/utilities/saving_images.py code/creaDizionarioScreen2biradAndScreen2label.py breast_cancer_classifier-master/src/heatmaps/models.py AUC_Benign AUC_Malign CustomCross VIEWS MODELMODES LABELS VIEWANGLES simple_resize window_location_at_center_point zero_pad_and_align_window sample_crop_best_center shift_window_inside_image sample_crop random_augmentation_best_center crop_image augment_and_normalize_image load_image_and_heatmaps standard_normalize_single_image load_heatmaps load_image flip_image TFSamePadWrapper ModifiedDenseNet121 ori_image_prepare load_model produce_heatmaps prediction_by_batch probabilities_to_heatmap save_heatmaps stride_list_generator get_all_prob patch_batch_prepare get_image_path main making_heatmap_with_large_minibatch_potential sample_patches_single sample_patches batch_norm conv1x1 output_layer avg_pool_layer conv3x3 conv2d_fixed_padding gaussian_noise_layer basic_block_v2 single_image_breast_model resnet22 construct_single_image_breast_model_match_dict four_view_resnet view_resnet_v2 make_layer process_augment_inputs load_model batch_to_inputs ModelInput main load_inputs run unpickle_from_file pickle_to_file read_image_mat original_save_dicom_image_as_png dicom_png_read read_image_png save_image_as_png save_image_as_hdf5 convert_conv_torch2tf convert_fc_weight_torch2tf get_tf_variables construct_weight_assign_ops partition_batch view_wise_model breast_wise_model image_wise_model joint_model r22mlo r22cc get_output_dict Feeding_Sequence createDictIdBirad2birad createDictScreen2label createDictScreen2birad allScreen2label suddividiInListeImmaginiElabelsPerView suddividiPercorsiImmaginiElabelsInListe creaListaPazientiEsclusi creaDictPazienti2screens int expand_dims resize simple_resize is_mlo window_location_at_center_point zero_pad_and_align_window uniform any round shift_window_inside_image is_cc zeros abs array uniform min round array concatenate sample_crop_best_center sample_crop expand_dims crop_image fliplr is_right is_left read_image_mat read_image_png endswith astype float32 flip_image load_image stack load_image load_heatmaps random_augmentation_best_center standard_normalize_single_image copy sample range transpose standard_normalize_single_image astype shape stride_list_generator load_image append shape expand_dims zeros prediction_by_batch zeros partition_batch enumerate join flip_image save_image_as_hdf5 append sample_patches_single LIST get_image_path ori_image_prepare patch_batch_prepare probabilities_to_heatmap save_heatmaps tqdm get_all_prob sample_patches makedirs format print to ModifiedDenseNet121 load_from_path eval device has_cudnn making_heatmap_with_large_minibatch_potential dict seed load_model produce_heatmaps as_list pad conv2d_fixed_padding slice items format replace isinstance convert_conv_torch2tf convert_fc_weight_torch2tf LIST range Graph lower Session unpickle_from_file load_image load_heatmaps augment_and_normalize_image process_augment_inputs RandomState load_model print batch_to_inputs dumps mean partition_batch append load_inputs range parse_args add_argument ArgumentParser run pixel_array print imread original_save_dicom_image_as_png array imread array T File close imwrite File close create_dataset append get_collection TRAINABLE_VARIABLES GLOBAL_VARIABLES append items assign append int concatenate MLO_R22_model CC_R22_model Model stack r22mlo summary r22cc Input int exp concatenate log_softmax MLO_R22_model CC_R22_model Model stack r22mlo summary r22cc Input int exp concatenate log_softmax MLO_R22_model CC_R22_model Model r22mlo summary r22cc Input int exp concatenate log_softmax MLO_R22_model CC_R22_model Model stack r22mlo summary r22cc Input int suddividiPercorsiImmaginiElabelsInListe len astype allScreen2label range append enumerate int str open_workbook sheet_by_index nrows dict zip append cell_value range dict endswith walk int dict items createDictIdBirad2birad createDictScreen2label createDictScreen2birad join replace endswith zip append sep array walk values split append zip dict listdir append list keys
# DNN for Breast Cancer Detection This is a project based on paper ["Deep Neural Networks Improve Radiologists’ Performance in Breast Cancer Screening"](https://arxiv.org/abs/1903.08297) and the linked github repo https://github.com/nyukat/breast_cancer_classifier. <br/> Here we reproduced the networks presented in that paper in Tensorflow.Keras. <br/> The folder breast_cancer_classifier-master contains the code from the github repo mentioned above that we used for our work.
304
EducationalTestingService/discourse-parsing
['discourse parsing']
['Fast Rhetorical Structure Theory Discourse Parsing']
tests/test_discourseparsing.py rstfinder/make_segmentation_crfpp_template.py rstfinder/version.py tests/test_segmentation_evaluation.py rstfinder/utils/compute_bootstrap_from_predictions.py rstfinder/discourse_segmentation.py rstfinder/io_util.py rstfinder/tune_rst_parser.py rstfinder/tune_segmentation_model.py tests/test_paragraph_splitting.py rstfinder/convert_rst_discourse_tb.py rstfinder/utils/try_head_rules.py tests/test_parse_util.py rstfinder/reformat_rst_trees.py tests/test_rst_parse.py rstfinder/extract_segmentation_features.py rstfinder/make_traindev_split.py rstfinder/discourse_parsing.py rstfinder/rst_parse_batch.py rstfinder/rst_parse.py rstfinder/tree_util.py rstfinder/parse_util.py rstfinder/collapse_rst_labels.py rstfinder/extract_actions_from_trees.py setup.py rstfinder/segment_document.py rstfinder/utils/visualize_rst_tree.py tests/test_tree_util.py rstfinder/rst_eval.py rstfinder/paragraph_splitting.py rstfinder/__init__.py requirements readme main collapse_rst_labels _collapse_rst_label main Parser parse_node_features extract_segmentation_features extract_tagged_doc_edus extract_edus_tokens Segmenter _is_head_of _merge_constituent_end_shifts extract_parse_actions main _extract_parse_actions_helper main read_text_file main make_segmentation_crfpp_template main ParagraphSplitter SyntaxParserWrapper reformat_rst_tree fix_rst_treebank_tree_str _delete_span_leaf_nodes _replace_edu_strings _move_rel2par convert_parens_in_rst_tree_str main predict_rst_trees_for_eval predict_and_evaluate_rst_trees _extract_spans main compute_p_r_f1 compute_rst_eval_results main from_constituency_trees segment_and_parse main batch_process main extract_preterminals convert_parens_to_ptb_format convert_paren_tokens_to_ptb_format convert_ptb_tree collapse_binarized_nodes extract_converted_terminals HeadedParentedTree find_first_common_ancestor main train_and_evaluate_model train_rst_parsing_model prune_model main evaluate_segmentation_output convert_crfpp_output make_score_func main main depth main _convert_tree_json_helper convert_tree_json test_reconstruct_training_examples test_extract_parse_actions test_parse_single_edu test_paragraph_splitting test_syntax_wrapper TestRSTParse test_segmentation_evaluation test_collapse_binarized_nodes_bad_label test_find_first_common_ancestor test_find_first_common_ancestor_order test_collapse_binarized_nodes test_find_first_common_ancestor_separate_trees subtrees label _collapse_rst_label set_label lower search split parse_args add_argument ArgumentParser extract_preterminals fromstring search strip warning rst_discourse_treebank_dir basicConfig sorted basename TreebankWordTokenizer sent_tokenize extract_converted_terminals append replace glob convert_paren_tokens_to_ptb_format tag lower startswith info zip tokenize enumerate load join convert_ptb_tree edit_distance extract_edus_tokens sub penn_treebank_dir head_preterminal parse_node_features treeposition right_sibling parent zip fromstring extend lower find_first_common_ancestor append enumerate len append zip _extract_parse_actions_helper set_label _merge_constituent_end_shifts append ShiftReduceAction pop parent startswith pop _is_head_of parent isinstance ShiftReduceAction append label num_features make_segmentation_crfpp_template output_path seed random_seed shuffle items list sub parent remove extend join remove set_label parent extend leaves lower clear subtrees append isinstance set_label debug _replace_edu_strings _move_rel2par _delete_span_leaf_nodes subtrees leaves add set append len float len items sorted list info Counter set zip compute_p_r_f1 enumerate join collapse_rst_labels fromstring extract_edus_tokens info append segment_and_parse predict_rst_trees_for_eval compute_rst_eval_results items load_model getLogger print dumps predict_and_evaluate_rst_trees Parser captureWarnings parsing_model evaluation_set segment_document parse extract_edus_tokens warning append parse_document join segment_document parse fromstring extract_edus_tokens warning append segmentation_model SyntaxParserWrapper list __repr__ Segmenter segment_and_parse debug read_text_file input_paths Parser Segmenter SyntaxParserWrapper load_model max_workers ceil len segment_document input_path parse_document model_path items list strip replace append leaves remove set_label parent isinstance subtrees sub label range len add treeposition parent set pop parent endswith subtrees insert index reversed append join list items prune_model add_section ConfigParser set run_configuration makedirs join set_params ones from_file restrict save fit load_model train_rst_parsing_model predict_and_evaluate_rst_trees Parser info abspath train_file exists collapse_rst_labels eval_file parse partial extract_parse_actions working_path single_process makedirs list recall_score precision_score f1_score chain decode evaluate_segmentation_output template_path parent call convert_crfpp_output split make_score_func format metric_name n_samples alpha predict_rst_trees_for_eval ci array parent _convert_tree_json_helper fromstring Environment embed_d3js convert_tree_json render get_template eq_ extract_parse_actions fromstring label type len eq_ extract_parse_actions fromstring Parser Path load str list load_model eq_ subtrees len leaves Parser label open join eq_ find_paragraphs ParagraphSplitter sub enumerate len SyntaxParserWrapper evaluate_segmentation_output convert_crfpp_output eq_ root eq_ find_first_common_ancestor eq_ find_first_common_ancestor fromstring assert_raises fromstring eq_ collapse_binarized_nodes fromstring assert_raises
![Gitlab CI Status](https://gitlab.com/EducationalTestingService/rstfinder/badges/develop/pipeline.svg) ![Conda Package](https://img.shields.io/conda/v/ets/rstfinder.svg) ![Conda Platform](https://img.shields.io/conda/pn/ets/rstfinder.svg) ![License](https://img.shields.io/github/license/EducationalTestingService/rstfinder) ## Table of Contents * [Introduction](#introduction) * [Installation](#installation) * [Usage](#usage) * [Train models](#train-models) * [Use trained models](#use-trained-models) * [License](#license) ## Introduction This repository contains the code for **RSTFinder** -- a discourse segmenter & shift-reduce parser based on rhetorical structure theory. A detailed system description can be found in this [paper](http://arxiv.org/abs/1505.02425).
305
EducationalTestingService/rstfinder
['discourse parsing']
['Fast Rhetorical Structure Theory Discourse Parsing']
tests/test_discourseparsing.py rstfinder/make_segmentation_crfpp_template.py rstfinder/version.py tests/test_segmentation_evaluation.py rstfinder/utils/compute_bootstrap_from_predictions.py rstfinder/discourse_segmentation.py rstfinder/io_util.py rstfinder/tune_rst_parser.py rstfinder/tune_segmentation_model.py tests/test_paragraph_splitting.py rstfinder/convert_rst_discourse_tb.py rstfinder/utils/try_head_rules.py tests/test_parse_util.py rstfinder/reformat_rst_trees.py tests/test_rst_parse.py rstfinder/extract_segmentation_features.py rstfinder/make_traindev_split.py rstfinder/discourse_parsing.py rstfinder/rst_parse_batch.py rstfinder/rst_parse.py rstfinder/tree_util.py rstfinder/parse_util.py rstfinder/collapse_rst_labels.py rstfinder/extract_actions_from_trees.py setup.py rstfinder/segment_document.py rstfinder/utils/visualize_rst_tree.py tests/test_tree_util.py rstfinder/rst_eval.py rstfinder/paragraph_splitting.py rstfinder/__init__.py requirements readme main collapse_rst_labels _collapse_rst_label main Parser parse_node_features extract_segmentation_features extract_tagged_doc_edus extract_edus_tokens Segmenter _is_head_of _merge_constituent_end_shifts extract_parse_actions main _extract_parse_actions_helper main read_text_file main make_segmentation_crfpp_template main ParagraphSplitter SyntaxParserWrapper reformat_rst_tree fix_rst_treebank_tree_str _delete_span_leaf_nodes _replace_edu_strings _move_rel2par convert_parens_in_rst_tree_str main predict_rst_trees_for_eval predict_and_evaluate_rst_trees _extract_spans main compute_p_r_f1 compute_rst_eval_results main from_constituency_trees segment_and_parse main batch_process main extract_preterminals convert_parens_to_ptb_format convert_paren_tokens_to_ptb_format convert_ptb_tree collapse_binarized_nodes extract_converted_terminals HeadedParentedTree find_first_common_ancestor main train_and_evaluate_model train_rst_parsing_model prune_model main evaluate_segmentation_output convert_crfpp_output make_score_func main main depth main _convert_tree_json_helper convert_tree_json test_reconstruct_training_examples test_extract_parse_actions test_parse_single_edu test_paragraph_splitting test_syntax_wrapper TestRSTParse test_segmentation_evaluation test_collapse_binarized_nodes_bad_label test_find_first_common_ancestor test_find_first_common_ancestor_order test_collapse_binarized_nodes test_find_first_common_ancestor_separate_trees subtrees label _collapse_rst_label set_label lower search split parse_args add_argument ArgumentParser extract_preterminals fromstring search strip warning rst_discourse_treebank_dir basicConfig sorted basename TreebankWordTokenizer sent_tokenize extract_converted_terminals append replace glob convert_paren_tokens_to_ptb_format tag lower startswith info zip tokenize enumerate load join convert_ptb_tree edit_distance extract_edus_tokens sub penn_treebank_dir head_preterminal parse_node_features treeposition right_sibling parent zip fromstring extend lower find_first_common_ancestor append enumerate len append zip _extract_parse_actions_helper set_label _merge_constituent_end_shifts append ShiftReduceAction pop parent startswith pop _is_head_of parent isinstance ShiftReduceAction append label num_features make_segmentation_crfpp_template output_path seed random_seed shuffle items list sub parent remove extend join remove set_label parent extend leaves lower clear subtrees append isinstance set_label debug _replace_edu_strings _move_rel2par _delete_span_leaf_nodes subtrees leaves add set append len float len items sorted list info Counter set zip compute_p_r_f1 enumerate join collapse_rst_labels fromstring extract_edus_tokens info append segment_and_parse predict_rst_trees_for_eval compute_rst_eval_results items load_model getLogger print dumps predict_and_evaluate_rst_trees Parser captureWarnings parsing_model evaluation_set segment_document parse extract_edus_tokens warning append parse_document join segment_document parse fromstring extract_edus_tokens warning append segmentation_model SyntaxParserWrapper list __repr__ Segmenter segment_and_parse debug read_text_file input_paths Parser Segmenter SyntaxParserWrapper load_model max_workers ceil len segment_document input_path parse_document model_path items list strip replace append leaves remove set_label parent isinstance subtrees sub label range len add treeposition parent set pop parent endswith subtrees insert index reversed append join list items prune_model add_section ConfigParser set run_configuration makedirs join set_params ones from_file restrict save fit load_model train_rst_parsing_model predict_and_evaluate_rst_trees Parser info abspath train_file exists collapse_rst_labels eval_file parse partial extract_parse_actions working_path single_process makedirs list recall_score precision_score f1_score chain decode evaluate_segmentation_output template_path parent call convert_crfpp_output split make_score_func format metric_name n_samples alpha predict_rst_trees_for_eval ci array parent _convert_tree_json_helper fromstring Environment embed_d3js convert_tree_json render get_template eq_ extract_parse_actions fromstring label type len eq_ extract_parse_actions fromstring Parser Path load str list load_model eq_ subtrees len leaves Parser label open join eq_ find_paragraphs ParagraphSplitter sub enumerate len SyntaxParserWrapper evaluate_segmentation_output convert_crfpp_output eq_ root eq_ find_first_common_ancestor eq_ find_first_common_ancestor fromstring assert_raises fromstring eq_ collapse_binarized_nodes fromstring assert_raises
![Gitlab CI Status](https://gitlab.com/EducationalTestingService/rstfinder/badges/develop/pipeline.svg) ![Conda Package](https://img.shields.io/conda/v/ets/rstfinder.svg) ![Conda Platform](https://img.shields.io/conda/pn/ets/rstfinder.svg) ![License](https://img.shields.io/github/license/EducationalTestingService/rstfinder) ## Table of Contents * [Introduction](#introduction) * [Installation](#installation) * [Usage](#usage) * [Train models](#train-models) * [Use trained models](#use-trained-models) * [License](#license) ## Introduction This repository contains the code for **RSTFinder** -- a discourse segmenter & shift-reduce parser based on rhetorical structure theory. A detailed system description can be found in this [paper](http://arxiv.org/abs/1505.02425).
306
EdwardRaff/pyBWMD
['malware classification']
['A New Burrows Wheeler Transform Markov Distance']
setup.py pyBWMD/__init__.py pyBWMD/bwmd.py isFile makeMatrix vectorize read partial bytes_to_raw_vec isinstance isFile close sqrt encode Pool open
# Burrows Wheeler Markov Distance pyBWMD is an implementation of the Burrows Wheeler Markov Distance (BWMD). It is an approach inspired by the Normalized Compression Distance ([NCD](https://en.wikipedia.org/wiki/Normalized_compression_distance)). The basic goal is to use compression as a method of measuring similarity, and thus, gives us a method we can use for any possible input. Though it is not always the best approach, it is very versatile and especially useful in domains where we do not necessarily know how to extract features, such as malware analysis. BWMD works by using the [Burrows Wheeler Transform (BWT)](https://en.wikipedia.org/wiki/Burrows%E2%80%93Wheeler_transform), and developing a Markov model based on the BWT transform of the input data. This works because the BWT tends to create repetition in the data, allowing simple compression techniques like run length encoding to become effective. Unique to BWMD is that it converts the input sequence of bytes into a vector in euclidean space, making it easy to apply to all of your favorite machine learning classification, clustering, and search algorithms. Check the `examples` directory in this repo for some small snipets showing how to get started with BWMD. # Insallation To install pyBWMD, you can currently use this syntax with pip: ``` pip install git+git://github.com/EdwardRaff/pyBWMD#egg=pyBWMD ```
307
EdwinKim3069/XtarNet
['few shot learning']
['XtarNet: Learning to Extract Task-Adaptive Representation for Incremental Few-Shot Learning']
train_lib.py run_pretrain.py fewshot/models/kmeans_utils.py fewshot/data/tiered_imagenet.py fewshot/models/resnet_backbone.py fewshot/models/XtarNet_model_base.py fewshot/utils/logger.py fewshot/data/mini_imagenet.py fewshot/utils/checkpoint.py fewshot/utils/debug.py fewshot/utils/experiment_logger.py run_inc.py fewshot/data/refinement_dataset.py fewshot/models/fc_backbone.py fewshot/configs/train_config_pb2.py fewshot/models/XtarNet_model.py fewshot/configs/protonet_config_pb2.py fewshot/data/data_factory.py fewshot/models/basic_backbone.py fewshot/models/backbone.py fewshot/data/batch_iter.py fewshot/data/concurrent_batch_iter.py fewshot/models/nnlib.py fewshot/configs/experiment_config_pb2.py fewshot/models/MetaModules.py run_proto_exp.py fewshot/data/compress_tiered_imagenet.py fewshot/configs/optimizer_config_pb2.py fewshot/models/multi_task_model.py fewshot/configs/cnn_config_pb2.py fewshot/configs/resnet_config_pb2.py run_exp.py fewshot/configs/transfer_config_pb2.py fewshot/models/resnet_base.py fewshot/configs/config_factory.py fewshot/models/metaCNN_resnet.py fewshot/data/episode.py fewshot/models/model_factory.py get_exp_logger evaluate_a _log_line get_saver traineval top1 evaluate_b topk stderr get_datasets get_iter get_restore_saver save_config get_config preprocess_old_and_new restore_model main train get_model final_log calculate_protos calculate_episode_protos dot cosine main euclidean evaluate_b get_iter get_metadata get_config RegisterConfig BatchIterator IBatchIterator decompress main compress BatchConsumer ConcurrentBatchIterator BatchProducer get_data_folder get_concurrent_iterator get_dataset RegisterDataset Episode MiniImageNetDataset MetaDataset RefinementMetaDataset TieredImageNetDataset Backbone BasicBackbone FCBackbone assign_cluster_soft_mask assign_cluster_radii compute_logits compute_logits_radii assign_cluster compute_logits_cosine update_cluster ResnetBackbone Encoder_r Encoder_h_pre Encoder_h Encoder_g_gamma Encoder_g_beta get_model RegisterModel MultiTaskModel mlp compute_euc batch_norm nullspace_gpu nullspace_signalling layer_norm cnn compute_logits_cosine weight_variable ResnetBackbone ResnetBase XtarNetModel XtarNetModelBase update_add_checkpoint build_checkpoint_with_initializer read_checkpoint clear_checkpoint write_checkpoint build_checkpoint debug_identity ExperimentLogger get Logger Saver join time format makedirs BatchIterator topk eval_step_a tqdm set_postfix xrange top1 zeros next topk eval_step_b stderr tqdm mean set_postfix xrange top1 zeros next num_classes_a flush print evaluate_a learn_rate log evaluate_b run format train_config train_step set_postfix tqdm nepisode old_and_new xrange traineval save run learn_rate next train_step_a log flush max_train_steps read ExperimentConfig Merge int64 float32 placeholder resnet_config get_concurrent_iterator get_batch_idx get_size preprocess_old_and_new get_dataset get_iter list format global_variables zip dict filter Saver info initialize format restore latest_checkpoint info join write info join formatname write close _log_line open config get_exp_logger evaluate_a get_saver dataset Session evaluate_b nclasses_b nshot ntest nclasses_a retest get_restore_saver optimizer_config val format results pretrain save_config get_config test tag restore_model eval info ConfigProto join nepisode_final nepisode get_metadata old_and_new train final_log mean h_a xrange append next enumerate run mean h_a xrange append run dot sqrt T sum expand_dims logical_not cosine run calculate_episode_protos h_a euclidean concatenate size astype sqrt float float32 dot std num_classes_a similarity calculate_protos makedirs format info format savez tqdm imdecode zeros enumerate decompress compress join data_root data_folder expand_dims sqrt expand_dims square reduce_sum compute_logits softmax reshape shape expand_dims reduce_sum concat shape compute_logits_radii softmax reshape expand_dims log pi compute_logits softmax reshape shape format info format name truncated_normal_initializer constant_initializer warning info zeros_initializer xavier_initializer uniform_unit_scaling_initializer len int assign_sub batch_normalization moments get_variable len expand_dims nullspace_gpu sqrt reduce_sum svd greater_equal transpose reduce_sum cast int32 conj Logger
# XtarNet: Learning to Extract Task-Adaptive Representation for Incremental Few-Shot Learning This repository contains the code for the following ICML 2020 paper: [**XtarNet: Learning to Extract Task-Adaptive Representation for Incremental Few-Shot Learning**](https://arxiv.org/abs/2003.08561) ## Dependencies This code was tested on the following environment: * Ubuntu 16.04 * python 3.6 * cv2 * numpy * pandas
308
EgorLakomkin/KTSpeechCrawler
['speech recognition']
['KT-Speech-Crawler: Automatic Dataset Construction for Speech Recognition from YouTube Videos']
crawler/youtube_helpers.py webdemo/server.py crawler/utils.py crawler/process.py crawler/filters.py OverlappingSubtitlesRemover SubtitleCaptionTextFilter CaptionRegexMatcher SubtitleMerger GoogleASRCheck GoogleRandomSubsetWERFilter CaptionLeaveOnlyAlphaNumCharacters MinNumberSubtitlesFilter CaptionLengthFilter BaseFilter CaptionDurationFilter Pipeline CaptionNormalizer RESULT get_ts_seconds extract_audio_part_segment parse_ts normalize_numbers get_video_file if_contain_bad_symbols filter_too_close_subtitles get_closest_captions parse_subtitle load_all_subtitles if_phrase_is_bad remove_overlapping_subtitles merge_subtitles _get_transcript_google_web_asr leave_alphanum_characters normalize_subtitle getsize get_hash get_all_subtitles striphtml google_speech_test check_sub_overlap _load_annotations int_to_en timedelta_dt render_random select_random_sample iterate_corpus dump_medatadata_corpus annotate find_files remove communicate terminate devnull exists Popen open walkfiles strptime timedelta time replace timedelta_dt text captions append enumerate format replace move print exists deepcopy total_seconds timedelta_dt append range len format print add set check_sub_overlap range len append enumerate get_ts_seconds findall int int_to_en replace normalize_numbers replace sub normalize IGNORECASE upper lower sub set join remove_overlapping_subtitles format merge_subtitles list str print leave_alphanum_characters normalize_subtitle filter load_all_subtitles get_hash range len print format exists append sample print read strip choice join basename copy append enumerate join replace walkfiles tqdm append iterate_corpus print format
# KT-Speech-Crawler: Automatic Dataset Construction for Speech Recognition from YouTube Videos ## Google Colab https://colab.research.google.com/drive/1JVKzB9N2FIcxlib1kXuGlfeIuudkM9Vr ## Installation ``` git clone https://github.com/EgorLakomkin/KTSpeechCrawler pip install -r requirements.txt ``` ## Running crawler ```
309
EkdeepSLubana/flowandprune
['network pruning']
['A Gradient Flow Framework For Analyzing Network Pruning']
train.py imp_estimator.py pruner.py eval.py models.py config.py main.py format_time progress_bar cal_compression_ratio cal_acc cal_flops create_model cal_importance cal_importance_nvidia_fisher cal_importance_grad_preserve cal_importance_mag cal_importance_fisher cal_importance_biased_loss_tracked cal_grad grasp_data cal_importance_l1 model_grads cal_importance_grasp cal_importance_tfo cal_importance_loss_based cal_importance_biased_loss cal_hg cal_grad_fisher cal_importance_bn model_params create_model shrink_metrics test cal_metrics train cal_acc get_optimizer Block MobileNet ResNet ResNet18 VGG Bottleneck ResNet34 ResNet56 BasicBlock ResNet_cifar constrain_ratios cfg_res_zero res_pruner vgg_size vgg_optimizer cal_size cfg_res_zero_cifar ResNet_p res_order_and_ratios mobile_pruner ResNet_cifar_p BasicBlock_p res_pruner_cifar res_order_and_ratios_cifar vgg_pruner res_cifar_optimizer skip_or_prune cfg_res_cifar res_optimizer mobile_order_and_ratios Block cfg_res mobile_optimizer ResPruned_cifar mobile_size res_size_cifar MobileNet_p cfg_vgg MapLayer cfg_mobile ResPruned vgg_order_and_ratios skip_or_prune_cifar res_size VGG_p train create_model get_optimizer test int time join format_time write append range flush len int eval format create_model print rmtree mkdir save getsize exists ResPruned_cifar ResPruned MobileNet print ResNet18 VGG ResNet34 MobileNet_p DataParallel ResNet56 VGG_p append parameters append parameters dict iter item append next range criterion size zero_grad grad eval to net model_params criterion backward zero_grad grasp_data grad tqdm model_grads eval range net model_params len model_params model_params model_params cal_hg model_params cal_hg model_params model_params model_params criterion size zero_grad grad eval to net model_params model_params model_params model_params model_params cal_importance_nvidia_fisher cal_importance_grad_preserve cal_importance_mag cal_importance_fisher cal_importance_l1 cal_importance_grasp cal_importance_tfo cal_importance_loss_based cal_importance_biased_loss cal_importance_bn cal_importance_biased_loss_tracked parameters SGD cal_importance model_params append items enumerate criterion backward progress_bar zero_grad cal_metrics append step max net enumerate len seed format print eval save append astype enumerate unique where update int sort astype argsort append update int sort astype argsort append range update int sort astype argsort append range update int sort astype argsort append range append array append array range isinstance modules append BatchNorm2d array isinstance modules append BatchNorm2d array append range append int range update range append len update range append len update range append len update range append len sort clone DataParallel cfg_vgg to range len sort clone SGD parameters range len int cfg_mobile sort clone MobileNet_p copy DataParallel range int cfg_mobile sort clone SGD copy parameters range update cfg_res int cfg_res_zero ResPruned sort clone copy DataParallel to range len cfg_res int to range len cfg_res int cfg_res_zero sort len clone SGD copy parameters range enumerate update cfg_res_zero_cifar int ResPruned_cifar sort clone copy DataParallel to cfg_res_cifar range len int to cfg_res_cifar range len cfg_res_zero_cifar int sort len clone SGD copy parameters cfg_res_cifar range enumerate str
# A Gradient Flow Framework for Analyzing Network Pruning Codebase for the paper ["A Gradient Flow Framework for Analyzing Network Pruning"](https://openreview.net/forum?id=rumv7QmLUue) \[ICLR, 2021\]. ## Requirements The code requires: * Python 3.6 or higher * Pytorch 1.4 or higher To install other dependencies, the following command can be used (uses pip): ```setup ./requirements.sh ```
310
ElMehdiBouamama/MBTI-Tweetouilles
['word embeddings']
['Controlled Experiments for Word Embeddings']
PythonBasics/Tests.py PythonBasics/MemoryMonitor.py PythonBasics/Content/Tests/TestWordToVec.py PythonBasics/TestingFile.py PythonBasics/Content/Helpers/text_helper.py PythonBasics/Content/Tests/matplotlib.py PythonBasics/Content/Managers/TwitterManager.py PythonBasics/Content/Models/TweetToType.py PythonBasics/Content/Models/TrainingWord2Vec.py PythonBasics/Content/Managers/DataManager.py PythonBasics/Content/Managers/ConfigManager.py PythonBasics/Content/Helpers/ImportingDataIntoFiles.py PythonBasics/Content/Models/TweetToVec.py PythonBasics/Start.py PythonBasics/Content/Models/Word2VecOptimization.py PythonBasics/Content/Models/Word2VecPCA.py PythonBasics/Content/Managers/DatabaseManager.py PythonBasics/Content/Tests/RegexTraining.py main getProjectPath ConfigurationManager GetTotalConfirmedTweetsCount GetUserIdOfUser GetCountOfConfirmedTweetOfUser GetOtherTweetIdsOfUser GetUserIds GetGenderOfUser GetConfirmedTweetIdsOfUser ReadJsonFile GetNumberOfUsers GetMbtiOfUser GetCountArrayOfOtherTweet GetCountArrayOfConfirmedTweet ReadFiles read_classified_files DataManager GetPostFromTwitter BreadthFirstSearch Verificator ReadFiles Tweet2Type TweetToVec ReadFiles NearestTo add getEmbeding sub KNN DisplayData ExtractDataFromURL ReadFiles set_title plot print add_subplot savetxt savefig Fit Tweet2Type figure SaveModel array append GetUserIds GetUserIds urlopen parse tokenize remove Verificator show format subplots title scatter annotate sum enumerate print format range len embedding_lookup constant dot transpose argsort findall Request add_header
# MBTI-Tweetouilles Author : El Mehdi Bouâmama <p align="center"><u><b>Video showing embedding visualisation PCA/T-SNE before training</b></u></p> <p align="center"><a href="https://youtu.be/sKwr3i8fq6g"><img src ="https://img.youtube.com/vi/sKwr3i8fq6g/0.jpg" /></a></p> This project is meant to analyze user tweets, trying to determin users personality types according to MBTI types by analyzing the users tweets, it is also built on form of classes to be able to accept any data / categories so it can learn to classify many documents not only tweets. The actual model can extend it's learning knowledge to new users / documents to categories classification it has never seen. The actual program is based on two models: - First one : Doc2Vec model (Neural Network) - Second one : Logistic regression (Neural Network) on document embeddings Work in progress :
311
ElementAI/LCFCN
['object counting']
['Where are the Blobs: Counting by Localization with Point Supervision']
src/models/metrics.py scripts/test_on_image.py src/datasets/__init__.py src/models/lcfcn.py src/models/__init__.py src/datasets/transformers.py src/datasets/trancos.py lcfcn/lcfcn.py src/datasets/shanghai.py exp_configs.py lcfcn/lcfcn_loss.py lcfcn/networks.py trainval.py src/models/base_networks/__init__.py scripts/test_speed.py src/utils.py src/models/base_networks/fcn8_resnet.py src/models/base_networks/fcn8_vgg16.py scripts/trainvis_on_image.py setup.py trainval convert LCFCN transform_image save_tmp blobs2points get_blobs get_tgt_list compute_loss get_points_from_mask compute_game get_random_points watersplit conv1x1 conv3x3 FCN8_VGG16 FCN8_ResNet get_upsampling_weight apply label2rgb combine_image_blobs color_map poly2mask read_text RandomSampler t2n load_pkl save_json compute_loss shrink2roi load_json imread loadmat read_xml Shanghai Trancos ComposeJoint ToLong apply_transform get_dataset LCFCN Meter get_model FCN8 FCN8_VGG16 get_upsampling_weight conv3x3 conv1x1 get_base val_on_loader hash_dict DataLoader DataFrame cuda exists torch_load len get_dataset pprint load_pkl save_json load_state_dict SequentialSampler range update train_on_loader join get_state_dict print delete_and_backup_experiment tail RandomSampler torch_save save_pkl makedirs label2rgb mark_boundaries astype array Compose squeeze view get_tgt_list sum view argmin get_blobs numpy unique argmax watersplit arange astype copy black_tophat sum watershed squeeze astype shape label zeros centroid squeeze astype regionprops shape squeeze range denormalize squeeze softmax save_image array zeros where shape blobs2points squeeze where shape unique label zeros zeros abs load int list format combine_image_blobs ComposeJoint print squeeze map transformer load_state_dict imread cuda imsave to_pil_image min max where Tensor numpy isinstance print format range len polygon zeros label2rgb color_map squeeze shape unique zeros max range len zeros bitget array range ComposeJoint Shanghai Trancos LCFCN FCN8_VGG16 FCN8
*ServiceNow completed its acquisition of Element AI on January 8, 2021. All references to Element AI in the materials that are part of this project should refer to ServiceNow.* # LCFCN - ECCV 2018 [(Try in a Colab)](https://colab.research.google.com/drive/10NxrkOWKq_r0g91T84NrAqqlTakrsD7U?usp=sharing) ## Where are the Blobs: Counting by Localization with Point Supervision [[Paper]](https://arxiv.org/abs/1807.09856)[[Video]](https://youtu.be/DHKD8LGvX6c) Make the segmentation model learn to count and localize objects by adding a single line of code. Instead of applying the cross-entropy loss on dense per-pixel labels, apply the lcfcn loss on point-level annotations. ## Usage ``` pip install git+https://github.com/ElementAI/LCFCN ``` ```python
312
ElementAI/wise_ils
['image level supervised instance segmentation', 'instance segmentation', 'semantic segmentation']
['Where are the Masks: Instance Segmentation with Image-level Supervision']
train.py ann_utils.py metrics/ap.py test.py datasets/pascal2012.py models/__init__.py datasets/__init__.py metrics/__init__.py models/mrcnn.py utils.py test_on_image.py models/prm.py clamp_boxes_yxyx poly2mask SharpProposals bbox_yxyx_shape2shape compute_bbox_delta get_image_ids mask2annList load_UpperBound ann2mask bbox2mask annList2propList xyxy2xywh targets2annList_bbox target2annList load_annList validate proposals2annList Polygons maskList2annList bbox_yxyx_dict annList2targets annList2scores_mask annList2BestDice load_ann_json batch2annList load_BestObjectness test_best annList2points load_gtAnnDict pointList2mask load_LCFCNPoints pointList2points annList2maskList pointList2propDict assert_gtAnnDict bbox_yxyx_normalize compute_overlaps_yxyx Mask test_baselines pointList2annList load_trainval valBatch mask2pointList bbox2annList proposals2bbox test probs2blobs dice print_results CombineSeeds dataset2annList blobs2annList annList2scores_points targets2annList pointList2UpperBound cosine_similarity maskBatch2ann get_perSizeResults get_random_indices get_perCategoryResults propDict2seedList create_ann naive yxyx2xywh annList2mask mask2ann annList2bbox pred2annList blobs2BestDice bbox_yxyx_denormalize ann2poly annList2best_objectness apply_delta_on_bbox bbox_xywh_dict intersect_bbox compute_iou_yxyx segm2annList pointList2BestObjectness loop_and_resize main main main train_epoch get_image random_proposal load_txt create_dirs extract_fname ann2mask f2l bo_proposal t2n load_pkl save_json BGR_Transform load_json bgrNormalize save_image collate_fn_0_4 colormap Pascal2012 annList2mask get_gt_pointList load_mask AP per_class_iu AP50_segm annList2cocoDict evaluate_annList computeIoU fast_hist AP50_bbox evaluateImg accumulate mIoU compute_AP_dict ensure_image_list MRCNN _load_file PeakStimulation _median_filter PRM PRMLoss peak_stimulation decode iou squeeze astype encode zeros argmax enumerate len decode squeeze set_trace astype shape pointList2propDict encode zeros enumerate decode format print astype encode range len SharpProposals zeros range enumerate len setdiff1d array unique mask2ann toBbox enumerate SegmentationMask add_field clip_to_image reshape convert tensor len size bbox bbox2annList decode Masker byte segm2annList masker expand resize get_field ones bbox2annList ones size bbox2annList get_field bbox size bbox2annList get_field zeros bbox str encode decode replace extract_fname tolist astype save_json load_json exists array encode dice astype decode decode astype shape encode range decode shape zeros range len int decode replace squeeze asfortranarray unique item encode max range get_perSizeResults dataset2annList load_gtAnnDict compare_annList bbox_yxyx_denormalize yxyx2xywh shape t2n range len bbox_yxyx_denormalize yxyx2xywh t2n float xyxy2xywh range len bbox_yxyx_denormalize yxyx2xywh t2n xyxy2xywh range len bbox_yxyx_denormalize map t2n cpu zeros range clamp stack bbox_yxyx_dict t2n log zeros range compute_iou_yxyx minimum maximum chunk chunk FloatTensor list clamp_boxes_yxyx map FloatTensor float bbox_yxyx_normalize FloatTensor min flatten zeros enumerate len append find_contours flip astype decode frPoly format load_trainval load_LCFCNPoints pointList2UpperBound print DataLoader save_json pointList2BestObjectness enumerate len evaluate COCOeval summarize COCO accumulate loadRes array list evaluate COCOeval COCO accumulate loadRes catIds array values add set get_perSizeResults print save_pkl load_UpperBound load_pkl load_BestObjectness DataFrame load_gtAnnDict test_baselines load_best_annList load_trainval get_perSizeResults print load_history DataFrame load_gtAnnDict randint squeeze where get_random_indices randint len range len format get_module_classes path get_functions split decode categories format annList_path load_trainval replace print load_json t2n asfortranarray save_json unique item encode range len sorted replace annList_path load_trainval COCOeval evaluate summarize min choice COCO accumulate getImgIds loadRes array len load format load_trainval print save_pkl DataLoader load_pkl eval load_state_dict cuda exists enumerate len decode astype shape unique encode range decode squeeze astype dice shape unique encode zeros print len DataLoader predict enumerate zeros int zeros sum astype range shape t2n unique unravel_index label argmax max zeros size squeeze where t2n range squeeze shape t2n dice pointList2propDict mask2ann max range enumerate encode shape astype decode int decode astype area shape encode toBbox encode shape astype decode sum ravel norm LongTensor shape zeros long range len zeros isinstance decode zeros range len zeros range len zeros range len load update Pascal2012 print AP50_segm close tqdm enumerate DataLoader eval set_description load_state_dict metric_class add_batch cuda __name__ get_score_dict len collate_fn save_image visualize tail train_epoch save range update time train_step close tqdm isnan set_description train enumerate len Variable numpy isinstance create_dirs dirname makedirs fromarray astype save axis CHAIN_APPROX_NONE clip colormap ones imshow shape Axes range findContours close copy add_axes FigureCanvas RETR_CCOMP get_dpi int set_size_inches Polygon text reshape draw add_patch figure Rectangle get_size_inches len reshape astype float32 isinstance Sequence type to_image_list zip Tensor Mapping iou choice mask2ann zeros array enumerate len iou mask2ann zeros array enumerate len Normalize enumerate fromarray uint8 astype open ann2mask isinstance enumerate update deepcopy list product computeIoU compute_AP_dict add set accumulate linspace round enumerate argsort iou mean range reshape min logical_and argsort logical_or repeat zeros array enumerate len count_nonzero spacing concatenate tolist logical_and map logical_not set argsort astype searchsorted zip zeros array range enumerate len get cache_url import_file PATHS_CATALOG endswith WEIGHT startswith Tensor to_image_list isinstance median size view model float shape multilabel_soft_margin_loss train cuda
*ServiceNow completed its acquisition of Element AI on January 8, 2021. All references to Element AI in the materials that are part of this project should refer to ServiceNow.* # WISE - BMVC 2019 ## Where are the Masks: Instance Segmentation with Image-level Supervision [[Paper]](https://arxiv.org/abs/1907.01430) ## Requirements - Pytorch version 0.4 or higher. ## Description Given a test image, the trained model outputs the instance masks in the image: ![predicted image](results/pred_image.png) ## Checkpoint for the weakly supervised mask rcnn
313
EliasKassapis/CARMSS
['medical image segmentation', 'instance segmentation', 'semantic segmentation']
['Calibrated Adversarial Refinement for Stochastic Semantic Segmentation', 'A Hierarchical Probabilistic U-Net for Modeling Multi-Scale Ambiguities']
models/losses/TotalDiscriminatorLoss.py models/generators/calibration_nets/SegNetCalNet.py data/datasets/cityscapes/preprocessing.py models/generators/calibration_nets/GeneralCalNet.py utils/evaluation_utils.py models/generators/UNetVAEGenerator.py models/general/statistic.py models/losses/PixelLoss.py models/losses/TotalGeneratorLoss.py models/losses/GeneralLoss.py models/generators/calibration_nets/EmptyCalNet.py models/generators/GeneralVAE.py data/__init__.py models/discriminators/EmptyDiscriminator.py utils/architecture_utils.py utils/constants.py data/datasets/cityscapes/preprocessing_config.py models/losses/CalLoss.py models/losses/ComplexityLoss.py models/losses/NonSaturatingGLoss.py data/Cityscapes19.py models/general/data_management.py models/losses/DefaultDLoss.py data/Cityscapes35.py models/generators/EmptyGenerator.py utils/pretrained_utils.py utils/personal_constants.py testing/test.py models/discriminators/PixelDiscriminator.py models/losses/CalNetLoss.py models/generators/calibration_nets/DeepLabV3CalNet.py models/GeneralModel.py models/general/trainer.py main.py utils/data_utils.py utils/general_utils.py data/transformations.py utils/training_helpers.py models/discriminators/GeneralDiscriminator.py models/generators/GeneralGenerator.py models/generators/UNetGenerator.py data/LIDC.py models/generators/calibration_nets/ToyCalNet.py utils/model_utils.py data/datasets/cityscapes/move_bb_preds.py training/train.py main parse load_data Cityscapes19 Cityscapes35 LIDC _process_all RandomScaleCrop Crop _2d_to_1hot RescaleValues _apply_to_all Resize ChangeChannels RandomHorizontalFlip RCrop RandomRescale ClassFlip all_file_paths get_test_set move_bb_preds resample preprocess recursive_mkdir GeneralModel EmptyDiscriminator GeneralDiscriminator PixelDiscriminator DataManager Statistic Trainer EmptyGenerator GeneralGenerator GeneralVAE UNetGenerator UNetVAEGenerator DeepLabV3CalNet EmptyCalNet GeneralCalNet SegNetCalNet _DecoderBlock ToyCalNet CalLoss CalNetLoss ComplexityLoss DefaultDLoss GeneralLoss NonSaturatingGLoss PixelLoss TotalDiscriminatorLoss TotalGeneratorLoss validation test_forward_pass evaluation visualize_results validation_plots TrainingProcess initialize_weights init_weights_unet Noise_injector weights_init truncated_normal_ init_weights_orthogonal_normal UnFlatten Flatten _1hot_2_2d BGR2RGB_pytorch move_color_channel BGR2RGB_numpy denormalize_picture get_transforms_list de_torch _recolour_label save_results get_all_modes calc_energy_distances get_foreground_IoU compute_iou plot_sample_preds get_energy_distance_components count_pixel_modes plot_calibration_figure compute_stats get_confusion_matrix get_mode_statistics plot_comparison_figure compute_pred_class_probs get_IoU load_results plot_calibration save_numpy_arrays get_cs_ignore_mask nanmean compute_gt_class_probs compute_ged load_numpy_arrays get_loss_weights assert_non_empty assert_type setup_directories find_model _read_all_classnames load_models_and_state save_models inceptionv3 deeplabv3_segmentation resnet50_segmentation VGG_19 resnet50 googlenet resnet101_segmentation instance_checker comp_along_dim scheduler calibration_net_forward_pass get_entropy generator_forward_pass discriminator_forward_pass renormalize save_example_images l2_regularisation get_ce refactor_batch tile unpack_batch compute_accuracy torch_comp_along_dim Cityscapes19 class_flip print LIDC Compose DataLoader get_transforms_list test_model_suffix batch_size MultiStepLR pretrained find_model test_models_to_load dataset batch_size_plotting models_to_load get_loss_weights Adam pretrained_model_suffix to pretrained_model_date load_models_and_state init print test_model_date TrainingProcess assert_type load_data evaluation train add_argument ArgumentParser fn items sample_setup str move stem rmtree iterdir str stem iterdir move int size float join mkdir join uint8 format list print raw_data_dir resample transpose astype float32 tqdm save out_dir listdir keys recursive_mkdir open save_results get_all_modes where argmax log count_pixel_modes expand shape permute unpack_batch append to mean eval stack item float compute_pred_class_probs enumerate instance_checker print clone save_numpy_arrays get_cs_ignore_mask nanmean tqdm compute_ged visualize_results load_numpy_arrays axis plot_sample_preds plot_manifold show imshow permute gca unpack_batch to close eval plot_comparison_figure float enumerate instance_checker test_forward_pass isinstance clone tqdm figure evaluation validation_plots instance_checker test_forward_pass save_models to print clone get_cs_ignore_mask mean compute_stats plot_calibration_figure plot_comparison_figure save_example_images permute plot_sample_preds unpack_batch float log calibration_net_forward_pass get_entropy where eval sample fill_ isinstance modules zero_ BatchNorm2d weight kaiming_normal squeeze add_ copy_ shape normal_ bias kaiming_normal_ weight truncated_normal_ bias weight orthogonal_ truncated_normal_ data fill_ xavier_uniform_ normal_ __name__ moveaxis empty_like empty_like cpu eval take squeeze argmax OneHotCategorical eval resize isnan clone numpy int shape tensor to sum zeros enumerate len to arange nan tensor to range zeros enumerate tensor arange get_IoU get_foreground_IoU get_confusion_matrix to range len copy nanmean mean get_mode_statistics sum get_foreground_IoU get_IoU get_confusion_matrix n_generator_samples_test calc_energy_distances get_energy_distance_components nanmean mean stack repeat permute item append to keys range len instance_checker get_all_modes compute_iou log nanmean mean stack eval compute_gt_class_probs append tensor compute_pred_class_probs compute_ged range len get_mode_statistics range zeros len argmax where zeros sum range enumerate len append eval enumerate ones where mean shape eval append tensor enumerate str savez test_model_date stamp mkdir Path numpy load str test_model_date Path to stamp str savez test_model_date plot_calibration stamp Path mkdir float numpy array save_image load str list test_model_date Path to stamp _1hot_2_2d xticks yticks subplot print_to_buffer squeeze move_color_channel imshow title permute unpack_batch to range concatenate FigureCanvasAgg close Normalize float tostring_rgb draw clone figure de_torch _recolour_label print_to_buffer subplots arange LongTensor set_title set_xticklabels FigureCanvasAgg draw tostring_rgb close bar set_ylabel set_xticks legend len _1hot_2_2d xticks yticks subplot print_to_buffer squeeze move_color_channel imshow title range FigureCanvasAgg close mean suptitle tostring_rgb draw figure de_torch _recolour_label list print_to_buffer subplots arange set_title set_xticklabels FigureCanvasAgg draw tostring_rgb close bar set_ylabel set_xticks legend numpy len create_dir stamp print listdir import_module getattr isdir save_python_obj to load_state_dict load_python_obj to deeplabv3_resnet101 fcn_resnet50 to to fcn_resnet101 to to to vgg19 to inception_v3 parameters norm save_image mean type DoubleTensor detach to to renormalize clamp log clip log size index_select repeat to dim ones calibration_net shape softmax zeros to cat sample cat torch_comp_along_dim shape compute_accuracy discriminator cat
# Calibrated Adversarial Refinement for Stochastic Semantic Segmentation [![Python 3.7](https://img.shields.io/badge/Python-3.7-3776AB.svg?logo=python)](https://www.python.org/) [![PyTorch 1.4](https://img.shields.io/badge/PyTorch-1.4-EE4C2C.svg?logo=pytorch)](https://pytorch.org/docs/1.4.0/) [![Apache](https://img.shields.io/badge/License-Apache-3DA639.svg?logo=open-source-initiative)](LICENSE) Official PyTorch implementation of the Calibrated Adversarial Refinement models described in the paper <a href="https://openaccess.thecvf.com/content/ICCV2021/html/Kassapis_Calibrated_Adversarial_Refinement_for_Stochastic_Semantic_Segmentation_ICCV_2021_paper.html"> Calibrated Adversarial Refinement for Stochastic Semantic Segmentation</a> accepted at ICCV2021. An overview of the model architecture is depicted below. We show ambiguous boundary segmentation as a use case, where blue and red pixels in the input image are separable by different vertical boundaries, resulting in multiple valid labels. <p align="center"> <img src="images/model_overview.jpg" height="250" alt="image"/> </p> Results on the stochastic version of the Cityscapes dataset are shown below. The leftmost column illustrates input images overlaid with ground truth labels, the middle section shows 8 randomly sampled predictions from the refinement network, and the final column shows aleatoric uncertainty maps extracted from the calibration network. <p align="center"> <img src="images/overlaid_labels.jpg" height="175" alt="image"/> <img src="images/samples.jpg" height="175" alt="image"/> <img src="images/aleatoric.jpg" height="175" alt="image"/>
314
ElliottYan/DS_Temporal
['relation extraction']
['Relation Extraction with Temporal Reasoning Based on Memory Augmented Distant Supervision']
transformer/modules/conv_multi_step_attention.py transformer/utils/statistics.py transformer/utils/cnn_factory.py transformer/modules/average_attn.py transformer/utils/parse.py transformer/modules/sparse_activations.py cnn.py transformer/utils/misc.py transformer/decoders/decoder.py cnn_ave.py transformer/utils/loss.py transformer/encoders/transformer_encoder.py transformer/modules/structured_attention.py transformer/utils/logging.py transformer/decoders/transformer_decoder.py transformer/utils/report_manager.py transformer/utils/__init__.py trainer.py process.py transformer/modules/__init__.py pcnn_att.py utils.py cnn_rel_mem.py transformer/utils/rnn_factory.py transformer/encoders/encoder.py transformer/modules/gate.py Dataset.py word_rel_mem.py transformer/modules/global_attention.py transformer/modules/copy_generator.py mem_cnn.py cnn_word_mem.py transformer/modules/sparse_losses.py test.py transformer/modules/multi_headed_attn.py temp_mem.py transformer/modules/position_ffn.py miml_conv.py pcnn_one.py transformer/modules/weight_norm.py transformer/modules/embeddings.py transformer/modules/util_class.py structures.py cnn_one.py cnn_att.py mem_pcnn.py pcnn.py pcnn_word_mem.py transformer/utils/optimizers.py tm_att.py transformer/utils/distributed.py transformer/transformer.py CNN CNN_ATT CONV_AVE CNN_AVE CNN_ONE MIML_CONV_ATT_REL_MEM CNN_WORD_MEM NYT_10 WIKI_TIME position_encoding_init GroupBatchnorm2d AttrProxy MEM_CNN_RIEDEL MEM_CNN_WIKI MEM_PCNN_RIEDEL GroupBatchnorm2d position_encoding_init AttrProxy MIML_CONV_ATT MIML_CONV MIML_CONV_WORD_MEM_ATT PCNN PCNN_ATT PCNN_ONE PCNN_WORD_MEM create_mini_dataset test_dataset construct_dataset create_labels load_wiki_time set_default fn_timer filter_by_dataset read_in_en2id unit_test read_in_vec tokenization main save_wiki_time Normalization read_in_en_vecs clean check_relation separate_datasets time_signature Stack Mention extract TEMP_MEM test TM_ATT FocalLoss parse_config Trainer one_hot precision_recall_compute_multi multi_hot_label compute_average_f1 compute_max_f1 logging_existing_tensor Rel_MEM Word_Rel_MEM Word_MEM split_query_and_memory TRANSFORMER_ENCODER create_embedding_matrix DecoderBase TransformerDecoder TransformerDecoderLayer EncoderBase TransformerEncoder TransformerEncoderLayer AverageAttention ConvMultiStepAttention seq_linear CopyGenerator CopyGeneratorLossCompute collapse_copy_scores CopyGeneratorLoss PositionalEncoding Embeddings SourceContextGate ContextGate context_gate_factory TargetContextGate BothContextGate GlobalAttention MultiHeadedAttention PositionwiseFeedForward SparsemaxFunction LogSparsemax _make_ix_like _threshold_and_support Sparsemax SparsemaxLossFunction SparsemaxLoss MatrixTree Cast Elementwise WeightNormLinear get_var_maybe_avg WeightNormConv2d WeightNormConvTranspose2d get_vars_maybe_avg GatedConv StackedCNN shape_transform all_gather_list all_reduce_and_rescale_tensors is_master multi_init init_logger build_loss_compute shards LossComputeBase filter_shard_state LabelSmoothingLoss NMTLossCompute use_gpu sequence_mask split_corpus set_random_seed generate_relative_positions_matrix fn_args aeq tile relative_matmul Optimizer build_torch_optimizer MultipleOptimizer rsqrt_decay exponential_decay noam_decay make_learning_rate_decay_fn AdaFactor ArgumentParser ReportMgr ReportMgrBase build_report_manager rnn_factory Statistics array cos sin print format print add dict set split len pop Stack relation time_signature push append join iterrows defaultdict list items relation insert sort concat time_signature Normalization print append read_csv split join print sort time_signature append check_relation Mention tuple str list defaultdict add append range format product set tag_name time_signature keys join items time save_wiki_time print sort sent dict split check_relation len print format print defaultdict format split strip sub load punctuation text escape lower sub nlp append add_special_case compile join list defaultdict print set_trace shuffle append keys split items list defaultdict product print append split iterrows Normalization add set append read_csv split read_in_en2id list items format print reshape filter_by_dataset save zeros T defaultdict randn len set_trace append normalize array enumerate split load int split tuple set_trace split test_model Trainer add_argument ArgumentParser cuda cuda range len format print reshape argsort append sum array range max range len range len print get_objects append reshape normal FloatTensor concatenate linear size view data size type_as index_select index_fill_ append index_add_ range len size dim arange cumsum sort _make_ix_like unsqueeze gather getattr append get_var_maybe_avg world_size get_rank format init_process_group all_reduce_buffer element_size numel all_reduce zero_ div_ append list bytes tolist dumps get_world_size _out_buffers ByteTensor loads all_gather item append _in_buffer cuda range len setFormatter getLogger addHandler StreamHandler Formatter setLevel INFO FileHandler NLLLoss vocab SparsemaxLoss isinstance CopyGeneratorLoss copy_attn_force copy_loss_by_seqlength NMTLossCompute CopyGeneratorLossCompute copy_attn LabelSmoothingLoss device label_smoothing to len items requires_grad list isinstance clone append Tensor split items list backward filter_shard_state extend dict zip split next numel list view size contiguous range len seed manual_seed clamp transpose unsqueeze arange reshape transpose permute matmul Adagrad optimizers loss_scale Adadelta MultipleOptimizer Adam SGD named_parameters fp16_utils FusedAdam startswith append FP16_Optimizer AdaFactor SummaryWriter tensorboard report_every ReportMgr tensorboard_log_dir SRU
## Intro The repository for NAACL 2019 paper "Relation Extraction with Temporal Reasoning Based on Memory Augmented Distant Supervision". We use torch == 0.4 for all the experiments. Also, 1. TempMEM used to be called MemCNN. Most of the experiment is done by mem_cnn and mem_pcnn. Usage: examples for running experiments can be found in scripts. Due to my experience, better results come from the settings in "wiki_temp_mem_pe_epoch_70_lr_5e-3.sh" script. You can explore other choices. If you want to use miml and other bash scripts that use the NYT-10 dataset, please unzip the nyt.zip file in origin_data. Enjoy :) Also, I'm still constantly updating the code, if you find any problem please raise an issue. Thanks! ## Citation If you use the code base, please cite the following paper. ``` @inproceedings{
315
ElternalEnVy/tensorflow_rbm
['density estimation']
['On Compression of Unsupervised Neural Nets by Pruning Weak Connections']
tfrbm/base_rbm.py mnist_classification.py caltech0_trprtr.py mnist_rbm.py ocr_rbm.py caltech0_rbm.py tfrbm/plot_utils.py tfrbm/utils.py caltech_trprtr.py caltech_rbm.py tfrbm/dataset.py tfrbm/rbm.py tfrbm/rbm_ais.py mnist_trprtr.py ocr_trprtr.py ocr_letters_classification.py pruning_experiment pruning_woretrain pruning_iter pruning_wretrainreinit pruning_single_songhan pruning_wretrain pruning_iter_probability pruning_progressive_prob pruning_experiment pruning_woretrain pruning_iter pruning_wretrainreinit pruning_single_songhan pruning_wretrain pruning_iter_probability pruning_progressive_prob unsupervised_pretrain supervised_learn pruning_iter_probability indices_to_one_hot pruning_experiment pruning_woretrain pruning_iter pruning_wretrainreinit pruning_single_songhan pruning_wretrain pruning_iter_probability train pruning_progressive_prob unsupervised_pretrain supervised_learn pruning_iter_probability indices_to_one_hot pruning_experiment pruning_woretrain pruning_iter pruning_wretrainreinit pruning_single_songhan pruning_wretrain pruning_iter_probability pruning_progressive_prob BaseRBM load_mnist load_OCR_letters load_NORB load_cifar10 divide_OCR_letters im_gif im_reshape im_plot plot_confusion_matrix tick_params BernoulliRBM GaussianRBM avg_log_p AIS base_rate eval_logp_custom AIS_custom eval_logp_dbn eval_logp cal_true_logZ np_sample_bernoulli batch_iter mnist_data tf_count assert_len assert_shape logit_mean make_list_from sample_gaussian sample_bernoulli print pruning_weight reset_mask _load_weights reset_mask _load_weights print pruning_weight fit reset_mask _load_weights print reinit_weight pruning_weight fit norm reset_mask _load_weights print fit len placeholder precision _w pruning_weight range run print pruning_weight fit norm print fit reduce_mean _w pruning_weight range run print pruning_weight range fit _sess _hb print close precision pruning_iter_probability _w eval_logp _vb fit reduce_mean reshape Graph exponential_decay argmax random_normal Session run softmax_cross_entropy_with_logits_v2 placeholder add matmul assign_add GradientDescentOptimizer apply_gradients cast global_variables_initializer group softmax compute_gradients equal enumerate Variable float32 sigmoid reduce_mean int32 zeros print sample fit multiply assign _w _mask _load_weights join join zeros len join read T itemsize permutation fromstring repeat zeros range open join list int asarray reader astype append open seed join list asarray str reader ord print write shuffle append array open subplots setdefault reshape axis subplots_adjust imshow range asarray reshape copy zeros range save setdefault FuncAnimation variables_initializer Variable reshape placeholder reduce_sum log shape assign_add zeros range run softplus reduce_max assign linspace max log run exp matmul placeholder reduce_sum shape assign_add cast append range variables_initializer sample_bernoulli np_sample_bernoulli constant Variable reshape sigmoid repeat zeros len softplus reduce_max assign linspace max log run exp matmul placeholder reduce_sum shape assign_add cast range variables_initializer sample_bernoulli np_sample_bernoulli constant Variable reshape sigmoid repeat zeros len variables_initializer softplus Variable placeholder reduce_sum matmul shape assign_add zeros range run variables_initializer max exp softplus Variable reshape reduce_max astype placeholder reduce_sum matmul shape log assign_add zeros array range run AIS cal_true_logZ base_rate avg_log_p cal_true_logZ AIS_custom base_rate avg_log_p mean reshape clip log shape getattr len int32 cast equal reduce_sum int array range tqdm print load_mnist astype zeros loadmat range
# tensorflow_rbm ### Dependencies ### Description This is a tensorflow based implementation of restricted boltzmann machines. <br> Also the code for pruning in RBM,Zhiwen Zuo et al.([https://arxiv.org/abs/1901.07066]) <br> ### Dataset MNIST,OCR letters,NORB and CalTech 101 Silhouettes datasets can be downloaded by runing the shell scripts in data folder.
316
Embedding/Chinese-Word-Vectors
['word embeddings']
['Analogical Reasoning on Chinese Morphological and Semantic Relations']
evaluation/ana_eval_dense.py evaluation/ana_eval_sparse.py normalize read_vectors read_analogy main guess load_matrix load_vocabulary read_analogy main normalize guess enumerate sqrt sum reciprocal ArgumentParser read_analogy round vectors str list read_vectors analogy topn normalize parse_args float keys guess enumerate T print add_argument dot zeros reciprocal setdiag dok_matrix copy dot todense load_matrix array
# Chinese Word Vectors 中文词向量 [中文](https://github.com/Embedding/Chinese-Word-Vectors/blob/master/README_zh.md) This project provides 100+ Chinese Word Vectors (embeddings) trained with different **representations** (dense and sparse), **context features** (word, ngram, character, and more), and **corpora**. One can easily obtain pre-trained vectors with different properties and use them for downstream tasks. Moreover, we provide a Chinese analogical reasoning dataset **CA8** and an evaluation toolkit for users to evaluate the quality of their word vectors. ## Reference Please cite the paper, if using these embeddings and CA8 dataset. Shen Li, Zhe Zhao, Renfen Hu, Wensi Li, Tao Liu, Xiaoyong Du, <a href="http://aclweb.org/anthology/P18-2023"><em>Analogical Reasoning on Chinese Morphological and Semantic Relations</em></a>, ACL 2018. ``` @InProceedings{P18-2023, author = "Li, Shen
317
EmoryMLIP/OT-Flow
['density estimation']
['OT-Flow: Fast and Accurate Continuous Normalizing Flows via Optimal Transport']
trainLargeOTflow.py trainMnistOTflow.py trainToyOTflow.py interpMnist.py evaluateLargeOTflow.py datasets/gas.py src/Autoencoder.py datasets/miniboone.py datasets/power.py lib/toy_data.py src/Phi.py datasets/bsds300.py datasets/mnist.py test/gradTestOTFlowProblem.py datasets/__init__.py src/PhiHC.py lib/dataloader.py src/OTFlowProblem.py test/testPhiGradx.py src/plotTraceComparison.py evaluateToyOTflow.py datasets/hepmass.py lib/transform.py lib/utils.py test/testPhiOpt.py src/mmd.py config.py src/plotter.py compareTrace.py test/gradTestTrHess.py compareTrace getconfig ConfigOT batch_iter load_data compute_loss normpdf compute_loss load_data batch_iter update_lr compute_loss compute_loss compute_loss BSDS300 GAS get_correlation_numbers load_data load_data_and_clean load_data_and_clean_and_split load_data_no_discrete_normalised_as_array HEPMASS load_data_no_discrete load_data_no_discrete_normalised load_data load_data_normalised load_data MINIBOONE getLoader POWER load_data_split_with_noise load_data load_data_normalised add_noise dataloader inf_train_gen logit logit_back ZeroPadding Crop Transpose ToTensor Resize HorizontalFlip AddUniformNoise count_parameters RunningAverageMeter AverageMeter get_logger makedirs Autoencoder trainAE mmd pdist MMDStatistic integrate odefun C stepRK4 stepRK1 OTFlowProblem vec Phi antiderivTanh ResNN derivTanh PhiHC antiderivTanh ResNN derivTanh plotImageGen plot4mnist plot4 plotAutoEnc3D plotAutoEnc plotTraceCompare bootstrap elapsed_time record to sum Event range format synchronize sqrt manual_seed float Phi zeros enumerate norm print trHess Tensor mm len arange randperm cuda is_cuda split OTFlowProblem exp ones pi sqrt zeros sum prod param_groups lr_drop lr read_pickle drop corr sum get_correlation_numbers mean any load_data std drop int values read_csv load_data drop mean std load_data_no_discrete int T Counter load_data_no_discrete_normalised append load int mean vstack load_data std MNIST int random_split Compose exit DataLoader Normalize item int RandomState rand hstack shuffle delete load_data zeros load_data_split_with_noise uniform_ MNIST int random_split print exit DataLoader item arange randn rand cos pi floor vstack linspace exp sin append range normal RandomState astype sqrt stack util_shuffle T reshape repeat randint array sigmoid getLogger addHandler StreamHandler info DEBUG setLevel INFO FileHandler zero_grad view Adam MSELoss load_state_dict range state_dict format d param_groups size eval float net plotAutoEnc join cvt criterion backward print makedirs parameters zeros train step len sum size mm expand t float abs norm float32 to numpy MMDStatistic range mean pad C stepRK4 stepRK1 range odefun pad stepRK4 stepRK1 range zeros abs trHess shape pad unsqueeze pow sum subplots integrate hist2d set_aspect set_title colorbar savefig dirname range d format plot close unique norm set_size_inches suptitle randint numpy makedirs set_aspect int set_size_inches subplots suptitle close subplots_adjust sqrt imshow set_visible savefig dirname numpy range makedirs set_aspect set_size_inches subplots suptitle close subplots_adjust imshow set_visible savefig dirname numpy range makedirs set_aspect int set_size_inches subplots suptitle close subplots_adjust sqrt imshow set_visible savefig dirname numpy range makedirs set_aspect set_size_inches subplots suptitle close colorbar imshow set_visible savefig dirname numpy range makedirs percentile list print resample mean append array range GridSpecFromSubplotSpec add_subplot GridSpec clf tick_params max set_title FloatTensor ones semilogy set_xlabel Subplot savefig dirname legend bootstrap plot mean set_size_inches print set_yticks min makedirs subplots_adjust set_xticks set_ylabel figure fill_between numpy set_ylim len
# OT-Flow Pytorch implementation of our continuous normalizing flows regularized with optimal transport. ## Associated Publication OT-Flow: Fast and Accurate Continuous Normalizing Flows via Optimal Transport Paper: https://ojs.aaai.org/index.php/AAAI/article/view/17113 Supplemental: https://arxiv.org/abs/2006.00104 Please cite as @inproceedings{onken2021otflow, title={{OT-Flow}: Fast and Accurate Continuous Normalizing Flows via Optimal Transport},
318
EnchanterXiao/video-style-transfer
['style transfer']
['Arbitrary Style Transfer with Style-Attentional Networks']
model/Net.py model/Decoder.py image_transfer.py viedo_transfer.py model/VGG.py video_train.py dataset/dataset.py model/Transform.py dataset/video_dataset.py model/SANet.py image_train.py adjust_learning_rate test_transform adjust_learning_rate test_transform FlatFolderDataset train_transform train_transform2 InfiniteSamplerWrapper InfiniteSampler Video_dataset Decoder calc_mean_std _calc_feat_flatten_mean_std mean_variance_norm Net calc_mean_std SANet mean_variance_norm Transform VGG param_groups lr_decay lr append Compose ToTensor seed permutation var size view calc_mean_std size expand mean std view
# video-style-transfer This is video style transfer PyTorch implementation based on "Arbitrary Style Transfer with Style-Attentional Networks". Official paper: https://arxiv.org/abs/1812.02342v5. Source code: https://github.com/GlebBrykin/SANET ## Dataset: COCO WikiArt Video sequence(60 videos, from https://www.videvo.net/) ## Modify: Add temporal loss and Spatial smoothing loss to fine-tune.
319
EndlessSora/DeeperForensics-1.0
['face swapping']
['DeeperForensics-1.0: A Large-Scale Dataset for Real-World Face Forgery Detection']
perturbation/distortions.py perturbation/add_distortion_to_video.py perturbation/check_video.py distortion_vid write_to_meta_file get_distortion_parameter apply_distortion_log main parse_args get_distortion_function main parse_args get_vid_info block_wise color_contrast bgr2ycbcr gaussian_noise_color video_compression color_saturation jpeg_compression gaussian_blur ycbcr2bgr add_argument ArgumentParser dict dict print VideoCapture CAP_PROP_FRAME_HEIGHT VideoWriter CAP_PROP_FPS CAP_PROP_FRAME_COUNT VideoWriter_fourcc exists release CAP_PROP_FOURCC append get_distortion_function get get_distortion_parameter CAP_PROP_FRAME_WIDTH dist_function int read remove print write system tqdm apply_distortion_log randint makedirs join list items makedirs close write dict splitlines append exists open meta_path distortion_vid write_to_meta_file vid_out level via_xvid parse_args type vid_in get CAP_PROP_FRAME_HEIGHT CAP_PROP_FOURCC CAP_PROP_FPS CAP_PROP_FRAME_COUNT CAP_PROP_FRAME_WIDTH get_vid_info VideoCapture print release astype float32 cvtColor COLOR_BGR2YCR_CB COLOR_YCR_CB2BGR astype float32 cvtColor bgr2ycbcr uint8 astype uint8 astype float32 randint min astype range uint8 randn bgr2ycbcr astype shape sqrt ycbcr2bgr GaussianBlur shape resize system
# DeeperForensics-1.0: A Large-Scale Dataset for Real-World Face Forgery Detection ![firstfigure](supports/first_figure.png) This repository provides the dataset and code for the following paper: **DeeperForensics-1.0: A Large-Scale Dataset for Real-World Face Forgery Detection**<br> [Liming Jiang](https://liming-jiang.com/), [Ren Li](https://liren2515.github.io/page/), [Wayne Wu](http://wywu.github.io), [Chen Qian](https://scholar.google.com/citations?user=AerkT0YAAAAJ&hl=en) and [Chen Change Loy](http://personal.ie.cuhk.edu.hk/~ccloy/)<br> In CVPR 2020.<br> [**Project Page**](https://liming-jiang.com/projects/DrF1/DrF1.html) | [**Paper**](https://arxiv.org/abs/2001.03024) | [**YouTube Demo**](https://www.youtube.com/watch?v=b6iKqkJht38) > **Abstract:** *We present our on-going effort of constructing a large-scale benchmark for face forgery detection. The first version of this benchmark, DeeperForensics-1.0, represents the largest face forgery detection dataset by far, with 60,000 videos constituted by a total of 17.6 million frames, 10 times larger than existing datasets of the same kind. Extensive real-world perturbations are applied to obtain a more challenging benchmark of larger scale and higher diversity. All source videos in DeeperForensics-1.0 are carefully collected, and fake videos are generated by a newly proposed end-to-end face swapping framework. The quality of generated videos outperforms those in existing datasets, validated by user studies. The benchmark features a hidden test set, which contains manipulated videos achieving high deceptive scores in human evaluations. We further contribute a comprehensive study that evaluates five representative detection baselines and make a thorough analysis of different settings.* ![comparison](supports/comparison.png) ## Updates
320
EndyWon/GLStyleNet
['style transfer']
['GLStyleNet: Higher Quality Style Transfer Combining Global and Local Pyramid Features']
GLStyleNet.py norm format_and_norm avg_pooling prepare_mask conv2d Model main extract_target_data build_base_net constant transpose pad bias_add reshape flatten shape append range load join list product concat avg_pooling conv2d dirname range open resize_images int value extract_image_patches reshape concat len transpose matmul range build_base_net concatenate shape sqrt zeros sum range reshape labels_ diag predict fit content resize style add_argument content_mask prepare_mask Model style_mask class_num add_arg ArgumentParser parse_args imread run
# GLStyleNet **[update 1/12/2022]** paper: [GLStyleNet: Exquisite Style Transfer Combining Global and Local Pyramid Features](https://ietresearch.onlinelibrary.wiley.com/doi/pdf/10.1049/iet-cvi.2019.0844), published in [IET Computer Vision 2020](https://digital-library.theiet.org/content/journals/iet-cvi). Arxiv paper: [GLStyleNet: Higher Quality Style Transfer Combining Global and Local Pyramid Features](https://arxiv.org/abs/1811.07260). ### Environment Required: - Python 3.6 - TensorFlow 1.4.0 - CUDA 8.0 ### Getting Started: Step 1: clone this repo
321
Ennosigaeon/automl_benchmark
['automl']
['Benchmark and Survey of Automated Machine Learning Frameworks']
run_cash.py benchmark/ml.py adapter/btb_adapter.py adapter/run_h2o.py test/test_converter.py config/base.py adapter/run_hpsklearn.py adapter/random_search.py adapter/bohb.py evaluation/performance.py util/mean_shift.py adapter/run_atm.py benchmark/synthetic.py evaluation/visualization.py benchmark/open_ml.py adapter/optunity_adapter.py adapter/hyperopt_adapter.py config/util.py comparison_human.py adapter/robo.py benchmark/base.py config/vectorizer.py util/__init__.py adapter/base.py util/logger.py adapter/run_auto_sklearn.py config/__init__.py adapter/run_tpot.py util/multiprocessor.py adapter/run_baseline.py benchmark/__init__.py evaluation/base.py adapter/grid_search.py evaluation/scripts.py run_framework.py config/converter.py adapter/smac.py run run run BenchmarkResult OptimizationStatistic BaseAdapter EvaluationResult HPOlib2Worker BohbAdapter start_worker FixedGP FixedSelector BtbAdapter query_objective_function ObjectiveGridSearch HyperoptAdapter _fun create_pmap pmap logged OptunityAdapter CustomParameterSampler timed_query run_counted_query ObjectiveRandomSearch RoBoAdapter setup load_model skip main load_pipeline setup load_model get_random_search_object_callback skip main load_pipeline main setup skip _createFrame setup load_model _cleanup skip main load_pipeline setup load_model skip main load_pipeline setup load_model skip main load_pipeline query_objective_function SmacAdapter _dict_as_array meta_information Iris create_estimator OpenMLDataManager SantanderBenchmark fix_no_tags OttoBenchmark OpenML100Suite OpenMLCVDataManager OpenMLHoldoutDataManager OpenMLBenchmark OpenMLCSVBenchmark Branin Levy Rosenbrock20D GoldsteinPrice Hartmann3 Rosenbrock10D SinTwo Forrester SinOne Camelback Bohachevsky Hartmann6 MetaConfig MetaConfigCollection ConfigInheritanceGraph ConfigFeature OptunityConverter ConfigSpaceConverter TpotConverter BaseConverter NoopConverter RandomSearchConverter NaiveSearchConverter GPyOptConverter BtbConverter HyperoptConverter RoBoConverter GridSearchConverter ConfigSpace ConfigVectorizer MongoPersistence Persistence print_configurations print_automl_framework_results print_cash_results print_best_incumbent print_synthetic_results print_pipelines comparison_human print_data_set_stats calculate_cash_overfitting calculate_framework_overfitting load_file_results load_atm_results Dataset merge_cash_results plot_cash_incumbent plot_pipeline_similarity plot_overall_performance print_pairwise_performance plot_successive_halving plot_dataset_performance plot_configuration_similarity plot_pairwise_performance plot_cash_overfitting plot_branin plot_framework_overfitting SaneEqualityDist SaneEqualityArray TestConfigSpaceConverter get setup estimate_bandwidth gower_distances CustomMeanShift mean_shift NoDaemonPool NoDaemonProcess flatten print time format SantanderBenchmark SmacAdapter score HyperoptAdapter iterations bohb optunity OptunityAdapter BohbAdapter RoBoAdapter ObjectiveRandomSearch seed BenchmarkResult add_result random_search hyperopt store_new_run get robo Namespace start BtbAdapter info timeout store_results load_all optimize n_jobs ObjectiveGridSearch end grid_search smac btb estimate_grid_size get_result len send folds OpenMLBenchmark get_configuration_space ConfigSpaceConverter run HPOlib2Worker items list value from_dict acquire item append objective_function release get f hasattr put get sorted hasattr cpu_count start Queue CallLog hasattr append from_dict RandomState objective_function get from_dict value RandomState get_configuration_space RandomSearchConverter copy acquire append objective_function release rmtree mkdir ATM pid getpgid wait get_best_classifier get_hyperpartition hyperpartition_id max Popen setup getcwd task_id call Model terminate sleep predict format predict_proba mkdir SIGTERM _make_pipeline time print fit to_csv killpg any get_dataruns hyperparameter_values pipeline method column_names append steps __name__ get_smac_output_glob Scenario len show_models get_spawn_classifier AutoSklearnClassifier Process append range start int join fit_ensemble isinstance endswith choice range len fit_transform H2OAutoML asfactor list _createFrame categorical mkdtemp get_params no_progress init train keys len rmtree shutdown append T DataFrame eval lower enumerate append Pipeline load_model fitted_pipeline_ TPOTClassifier _map_algo optimize RandomState ConfigSpaceConverter get_configuration_space SMAC int list items copy import_module getattr get isinstance CustomMeanShift values seed add uniform ConfigVectorizer append update set mean plot_configuration_similarity unique items print cluster_centers_ isnan labels_ silhouette_score vectorize array fit subgraph flatten distance values str list chr DiGraph len append sum plot_pipeline_similarity add_edge format keys add_node enumerate join items print has_node cash_tasks inverse plot_pairwise_performance argmax max list defaultdict std ones OrderedDict algorithm plot_dataset_performance append sum range plot_cash_incumbent format from_json solvers mean start keys pvalue enumerate items load_all best plot_overall_performance print_pairwise_performance setdefault print end wilcoxon zfill dict argsort array OpenMLBenchmark cash_datasets len plot_pairwise_performance argmax max framework_datasets list ones plot_dataset_performance array append range format mean framework_tasks pvalue enumerate plot_overall_performance print_pairwise_performance print wilcoxon zfill argsort zeros std len sum format print score min argmin mean benchmark append abs array range pvalue len load_all print_best_incumbent load_all print MongoPersistence solvers OpenMLBenchmark store_new_run store_results dataset_id get_task sorted name task_id get_dataset NumberOfInstancesWithMissingValues append MinorityClassPercentage NumberOfInstances format replace NumberOfSymbolicFeatures NumberOfNumericFeatures NumberOfMissingValues int print Dataset NumberOfClasses load items join cursor format extend connect join format print set_printoptions append exists items format exists print score OpenMLBenchmark append folds create_estimator fit join format print set_printoptions OpenMLBenchmark init no_progress shutdown exists print format keys mean subplots arange tick_params set_xlabel apply_along_axis savefig legend range update inset_axes plot set_xlim mean mark_inset join set_xscale set_size_inches print set_yticks set_ylabel zeros set_ylim len subplots grid tick_params abs set_xlabel logical_and scatter savefig range format replace plot set_xlim autoscale combinations print set_axisbelow set_ylabel set_ylim len DataFrame print logical_and mean shape sum range len subplots arange prop_cycler set_yticklabels grid Line2D linspace tick_params max set_frame_on tick_right shape scatter array savefig legend append next range set_xlim add_artist mean enumerate set_size_inches set_axisbelow reshape set_yticks min subplots_adjust set_xticks zeros get_legend_handles_labels std len max set_size_inches subplots set_title plot print set_yticklabels set_yticks min hstack grid autoscale set_ylabel savefig boxplot append tick_params array update format subplots append arange text tight_layout subplots_adjust set flatten scatter savefig delaxes legend ceil max enumerate len bfs_tree subplots minmax_scale endswith set_ylim max show sorted nodes savefig edges legend append sum range asarray set_xlim items set_size_inches text draw array graphviz_layout len update set_size_inches arange set_title name view_init set_yticks set_xlabel pi plot_surface set_rotate_label set_xticks set_ylabel figure set_zlabel savefig gca meshgrid set_size_inches subplots plot set_xlabel set_xlim set_ylabel set_xticks savefig tick_params set_ylim set_size_inches subplots set_title grid set_ylabel savefig boxplot append tick_params array set_size_inches subplots set_title grid set_ylabel savefig boxplot tick_params setFormatter basicConfig format getLogger addHandler StreamHandler Formatter FileHandler DEBUG setLevel getLogger sum logical_or abs int NearestNeighbors check_random_state check_array len kneighbors gen_batches fit items sorted list estimate_bandwidth print ones len get_bin_seeds flatten shape fill zeros kneighbors array range enumerate fit property
Ennosigaeon/automl_benchmark
322
EnriqueSolarte/GC-Net-tensorflow
['stereo lidar fusion']
['End-to-End Learning of Geometry and Context for Deep Stereo Regression']
readPFM.py image_normalize.py network.py utils/dataset.py preprocess.py utils/reading_data.py config.py main.py utils/gen_image_list.py Config assert_raises imageProcess normalizeRGB test output_dir main parse_args train loss predict _build_resnet conv_3d _get_variable _build_3d_conv stack conv inference deconv_3d bn load_pfm Dataset gen_images_list fn chdir glob makedirs add_argument ArgumentParser reshape multiply float32 reduce_sum stack softmax cast append range zeros_like REGULARIZATION_LOSSES subtract get_collection float32 where reduce_sum div cast add_n abs max min astype range ERROR Dataset set_verbosity zeros_like to_int32 div Saver save Session get_variable run fromarray restore transpose squeeze placeholder reduce_sum cast inference predict asarray latest_checkpoint subtract equal uint8 constant print float32 int32 loss parse_args train test _get_variable conv2d constant_initializer xavier_initializer get_variable _get_variable conv3d constant_initializer xavier_initializer get_variable as_list _get_variable constant_initializer xavier_initializer conv3d_transpose get_variable get_shape list _get_variable assign_moving_average add_to_collection batch_normalization cond moments range len l2_regularizer range Config convert_to_tensor slice reshape squeeze transpose pad stack append range fromarray list rstrip readline reshape close map groups rotate match fromfile float open join str zip print sort len write close open abspath isfile append listdir split
### Reference: https://github.com/MaidouPP/gc_net_stereo **gc_net_stereo** is the implementation according to paper: **End-to-End Learning of Geometry and Context for Deep Stereo Regression** which can be found in https://arxiv.org/abs/1703.04309 1. how to run: CUDA_VISIBLE_DEVICES=n python main.py --gpu n --phase train --max_steps 50000 --learning_rate 0.00005 --output_dir /home/users/shixin.li/segment/gc-net/log/0508 --pretrain true 2. Before you run the code on a dataset, you need to generate an image list file, where image file paths are stored. You can use gen_image_list.py to generate it. 3. Scene Flow dataset uses pfm format, you probably want to use readPFM.py to read.
323
EntropicEffect/dendritic_backprop
['denoising']
['Dendritic error backpropagation in deep cortical microcircuits']
difference_target_prop.py dendritic_backprop.py dendritic_backprop_classes.py dendritic_backprop_hiddenLayer.py dendritic_backprob_shallowLearning.py MSE tot_weight_sum elem_mult output_layer low_pass_filter neuron base_non_linearity hidden_neuron_layer base_voltage_clamp MSE calc_accuracy deriv deriv_softmax softmax gen_one_hot base_non_linearity range len print shape reshape shape max range len zeros reshape tqdm dot softmax argmax range
# dendritic backprop The repository contains the code for a project done as part of the IBRO-Simons Computational Neuroscience 2018. The project was an effort to implement parts of this paper https://arxiv.org/abs/1801.00062 It's been a while since I looked at the code, so I'm slowly going to document the code a bit. TODO: -- Document code -- Make sure results in paper can be reliably replicated
324
ErenTuring/SIINet
['semantic segmentation']
['Spatial Information Inference Net: Road Extraction Using Road-Specific Contextual Information']
models/BasicModule.py utils/dataset.py utils/data_enhance.py models/UNet.py eval_roadtracer.py main_road_reval_mit.py models/model_utils.py models/DeeplabV3_plus.py models/SIIS_NET.py models/__init__.py config/opt_rbdd.py utils/evaluation.py main_road_train.py config/opt_mit.py utils/tools.py main_road_reval.py models/SIIS_Kernel.py config/__init__.py utils/visualize.py models/HF_FCN.py models/ContrastNets.py utils/loss.py config/opt_cvpr.py models/resnet.py crops expand_lines certeral_crop val de_merge code_lbls vis_result num_of_break main main val test get_net test_mit val main get_net main val train quick_val DefaultConfig DefaultConfig DefaultConfig get_opt ResNet set_parameter_requires_grad BasicModule ResUnetBlock ResUnet_SIIS Resnet_seg conv3x3 ResUnet DeeplabV3_plus build_model resize ASPP_test ASPP HF_FCN_back_1 HF_FCN build_model resize custom_initialization resize load_weight ResNet resnet50 Bottleneck resnet152 conv3x3 resnet34 resnet18 BasicBlock resnet101 SIIS_Conv1d SIIS SIIS_Conv3dRNN SegRNNCell BuildPass_4 Deeplab_SIIS Resnet_SIIS Vgg_SIIS upconv2x2 conv1x1 build_model UNet_SIIS UNet UpConv conv3x3 DownConv build_model MyDataset_1 SegData Train_Dataset MyDataset_2 filelist crop_data_smartly data_crop judge RelaxedRoadExtractionScore RoadExtractionScore runingScore fast_hist main jaccard_loss dice_bce_loss load_ckpt net_predict_tta setup_mode colour_code_label TTA vote_combine rename_file compute_class_iou adjust_lr net_predict data_generator net_predict_enhance save_ckpt train_log Visualizer sorted imwrite print len slide_crop tqdm rename_file mkdir resize INTER_NEAREST imread listdir range INTER_LINEAR sorted imwrite tqdm mkdir imread listdir sorted imwrite getStructuringElement MORPH_ELLIPSE mkdir dilate imread listdir sorted listdir imread imwrite addWeighted sorted imwrite tqdm stack mkdir splitext zip imread listdir uint8 threshold CHAIN_APPROX_SIMPLE getStructuringElement findContours RETR_EXTERNAL astype THRESH_BINARY MORPH_ELLIPSE erode dilate len get_scores sorted print add RoadExtractionScore zip print_score imread listdir len sorted imwrite tqdm mkdir imread listdir print expand_lines val colour_code_label argmax enumerate tqdm argmax update colour_code_label squeeze write tqdm numpy keys enumerate open eval to load_ckpt ckpt test mkdir dataset_dir get_net uint8 colour_code_label astype tqdm resize INTER_NEAREST enumerate RelaxedRoadExtractionScore test_mit batch_size model ckpt zero_grad localtime DataLoader save BCEWithLogitsLoss list max_epoch Adam map MSELoss strftime add lr_decay Train_Dataset to mycriterion range size start_epoch lr dataset_dir alpha item basic_criterion enumerate MyDataset_1 AverageValueMeter time backward print quick_val parameters filter reset adjust_lr model_name zeros step train_log eval train MyDataset_1 mean eval DataLoader mkdir dataset_dir zeros range train parameters DeeplabV3_plus HF_FCN data named_modules named_children isinstance fill_ out_channels Conv2d normal_ sqrt modules zero_ BatchNorm2d kaiming_normal_ ResNet load_weight ResNet load_weight ResNet load_weight ResNet load_weight ResNet load_weight load_url update load_state_dict state_dict UNet print eval sum range append range judge copy join sorted remove listdir range len imwrite print copy tqdm crop_data_smartly mkdir filelist imread range len flatten reshape sigmoid size array clone mean append rot90 flip range cat net transpose_ use_gpu tuple transpose vote_combine from_numpy crop_params softmax resize append normalize numpy cuda transpose_ use_gpu rot90 tuple transpose vote_combine copy from_numpy crop_params softmax resize append normalize numpy cuda range transpose_ use_gpu tuple transpose vote_combine from_numpy crop_params softmax resize append normalize numpy cuda squeeze repeat zeros array range param_groups print log print load load_state_dict isfile copyfile save print current_device device sorted listdir str splitext findall range len flush write strftime logical_and logical_or zeros float sum range addWeighted uint8 imwrite astype rename_file resize argmax
# Spatial information inference net: Road extraction using road-specific contextual information Code for the paper:"Spatial information inference net: Road extraction using road-specific contextual information" by Chao Tao, Ji Qi, Yansheng Li, Hao Wang and Haifeng Li. ![img](fig\fig_03.png) ## Dependencies ``` python3 pytorch >= 1.1 ``` ## Datasets ### The CVPR dataset
325
EricZgw/PyramidBox
['face detection']
['PyramidBox: A Context-assisted Single Shot Face Detector']
tf_extended/bboxes.py nets/ssd.py tf_extended/__init__.py preprocessing/vgg_preprocessing.py preprocessing/inception_preprocessing.py preprocessing/tf_image.py datasets/pascalvoc_datasets.py tf_utils.py train_model.py preprocessing/preprocessing_factory.py preparedata.py nets/ssd_common.py AnchorSampling.py tf_extended/math.py datasets/dataset_utils.py makedir.py nets/np_methods.py widerface_eval.py tf_extended/tensors.py utility/visualization.py nets/custom_layers.py demo.py tf_extended/metrics.py preprocessing/ssd_vgg_preprocessing.py check_data_io.py datasets/pascalvoc_to_tfrecords.py process_image PrepareData get_init_fn configure_optimizer update_model_scope reshape_list get_variables_to_train print_configuration add_variables_summaries configure_learning_rate TrainModel process_image download_and_uncompress_tarball image_to_tfexample write_label_file int64_feature bytes_feature float_feature read_label_file has_labels get_dataset_info _convert_to_example _add_to_tfrecord _get_output_filename run _get_dataset_filename _process_image abs_smooth_2 pad2d channel_to_last l2_normalization abs_smooth bboxes_nms ssd_bboxes_decode ssd_bboxes_select bboxes_nms_fast bboxes_jaccard bboxes_clip ssd_bboxes_select_layer bboxes_intersection bboxes_resize bboxes_sort PyramidBoxModel tf_ssd_bboxes_select_all_classes tf_ssd_bboxes_select_layer_all_classes tf_ssd_bboxes_encode tf_ssd_bboxes_select_layer tf_ssd_bboxes_encode_layer tf_ssd_bboxes_decode tf_ssd_bboxes_select tf_ssd_bboxes_decode_layer distorted_bounding_box_crop preprocess_for_train preprocess_for_eval preprocess_image distort_color apply_with_random_selector get_preprocessing tf_image_unwhitened distorted_bounding_box_crop np_image_unwhitened tf_summary_image preprocess_for_train preprocess_for_eval preprocess_image tf_image_whitened distort_color apply_with_random_selector _assert _Check3DImage bboxes_crop_or_pad fix_image_flip_shape random_flip_left_right _ImageDimensions _is_tensor resize_image resize_image_bboxes_with_crop_or_pad _aspect_preserving_resize preprocess_for_train _crop _central_crop _smallest_size_at_least _mean_image_subtraction preprocess_for_eval preprocess_image _random_crop bboxes_nms bboxes_matching bboxes_filter_overlap bboxes_filter_labels bboxes_nms_batch bboxes_jaccard bboxes_matching_batch bboxes_sort_all_classes bboxes_clip bboxes_intersection bboxes_filter_center bboxes_resize bboxes_sort safe_divide cummax _create_local streaming_tp_fp_arrays average_precision_voc12 precision_recall_values _safe_div _precision_recall average_precision_voc07 precision_recall _broadcast_weights streaming_precision_recall_arrays get_shape pad_axis bboxes_draw_on_img colors_subselect plt_bboxes draw_rectangle draw_bbox draw_lines fromarray int uint8 bboxes_nms ssd_bboxes_select bboxes_sort bboxes_clip resize ssd_anchors_all_layers bboxes_resize run append list isinstance join print_config makedirs int num_epochs_per_decay batch_size MomentumOptimizer AdagradOptimizer GradientDescentOptimizer AdamOptimizer RMSPropOptimizer AdadeltaOptimizer FtrlOptimizer name get_model_variables histogram append scalar checkpoint_path latest_checkpoint get_model_variables checkpoint_exclude_scopes IsDirectory startswith info append train_dir extend get_collection TRAINABLE_VARIABLES join urlretrieve st_size print extractall stat join join filter index split TFExampleDecoder TFRecordReader join read format parse int encode findall print text getroot append find Example _convert_to_example write SerializeToString _process_image seed join sorted int print min shuffle mkdir ceil float listdir _get_dataset_filename range len minimum abs abs square where shape exp reshape zeros_like ssd_bboxes_decode reshape where shape argmax amax concatenate ssd_bboxes_select_layer append range len argsort minimum transpose maximum copy copy minimum transpose maximum minimum transpose maximum ones size logical_and where bboxes_jaccard shape logical_or range append maximum minimum ones while_loop stack zeros log stack exp get_shape dtype reshape reduce_max greater stack cast argmax random_uniform constant constant cast int32 uint8 astype copy tf_image_unwhitened expand_dims image draw_bounding_boxes _is_tensor as_list shape unstack is_fully_defined any with_rank get_shape set_shape pack greater_equal slice to_int32 logical_and with_dependencies Assert shape rank equal greater_equal reshape logical_and with_dependencies extend Assert shape rank random_uniform append range equal len append _crop range split convert_to_tensor to_float to_int32 greater cond convert_to_tensor resize_bilinear squeeze shape set_shape _smallest_size_at_least expand_dims to_float _aspect_preserving_resize random_flip_left_right set_shape random_uniform to_float set_shape _aspect_preserving_resize isinstance isinstance list get_shape keys isinstance keys as_list shape unstack is_fully_defined len append range isinstance len line rectangle FONT_HERSHEY_DUPLEX putText rectangle str FONT_HERSHEY_DUPLEX putText shape rectangle range int str suptitle add_patch dict imshow figure Rectangle range
PyramidBox === This is an unofficial Tensorflow re-implementation of [PyramidBox: A Context-assisted Single Shot Face Detector](https://arxiv.org/abs/1803.07737?context=cs), which achieves superior performance among the state-of-the-art on the two common face detection benchmarks, FDDB and WIDER FACE. ## Note There is still a gap in performance from the paper. May be caused by several reasons: * Without implementing data-anchor-sampling. * Differences of data augmentation from original. * The batch size in the paper is 16, but I used 1 because of the limitation of memory. * Hyperparameters not mentioned in the paper.
326
Ericolony/QAOA
['combinatorial optimization', 'variational monte carlo']
['Natural evolution strategies and variational Monte Carlo']
src/offshelf/maxcut/build/lib/maxcut/_solvers/backend.py src/util/plottings.py maxCutPy/maxcutpy/graphcut.py src/offshelf/maxcut/maxcut/_solvers/_sdp.py maxCutPy/maxcutpy/__init__.py src/offshelf/maxcut/build/lib/maxcut/riemannian/_tcg.py src/train.py src/offshelf/maxcut/maxcut/_solvers/__init__.py src/offshelf/maxcut/build/lib/maxcut/_solvers/__init__.py src/offshelf/maxcut/maxcut/riemannian/stiefel/_stiefel.py src/offshelf/maxcut/setup.py maxCutPy/setup.py src/offshelf/maxcut/build/lib/maxcut/riemannian/stiefel/__init__.py src/util/__init__.py src/offshelf/maxcut/maxcut/_solvers/_bm.py src/presentation.py src/offshelf/maxcut/maxcut/riemannian/stiefel/__init__.py src/util/data_loader.py maxCutPy/maxcutpy/graphtest.py maxCutPy/maxcutpy/maxcut.py src/offshelf/maxcut/build/lib/maxcut/_solvers/_sdp.py src/offshelf/maxcut/maxcut/riemannian/_tcg.py src/objectives/spinglass.py src/offshelf/maxcut/maxcut/riemannian/_rtr.py RL/dqn.py RL/model.py src/offshelf/maxcut/build/lib/maxcut/_graphs.py src/ising_gt.py maxCutPy/max_cut_gt.py RL/train.py src/objectives/max_cut.py main.py src/util/directory.py src/offshelf/maxcut/build/lib/maxcut/riemannian/stiefel/_stiefel.py src/offshelf/maxcut/build/lib/maxcut/__init__.py src/offshelf/maxcut/build/lib/maxcut/_solvers/_bm.py src/util/helper.py src/objectives/__init__.py src/offshelf/maxcut/build/lib/maxcut/riemannian/__init__.py src/offshelf/maxcut/maxcut/_solvers/backend.py src/offshelf/MaxCut.py src/offshelf/maxcut/maxcut/_graphs.py src/util/models.py maxCutPy/maxcutpy/graphgen.py src/offshelf/maxcut/build/lib/maxcut/riemannian/_rtr.py src/offshelf/maxcut/maxcut/__init__.py config.py src/offshelf/maxcut/maxcut/riemannian/__init__.py RL/env.py src/offshelf/manopt_maxcut.py maxCutPy/maxcutpy/graphdraw.py add_argument_group define_args_parser str2bool get_config main are_undecided_nodes is_cut_consistent highest_degree_nodes degree_nodes_sequence binary_cut minority_class init_cut lowest_degree_nodes sign_norm all_possible_cuts integer_to_binary compute_epsilon remove_isolates_nodes cut cut_edges marked_nodes_could_be_cut is_all_isolate pick_random_nodes partition_dictionary two_maximal_independent_set set_partitions could_be_cut edges_beetween get_partitions strong_minority_class draw_custom draw_graphs_list draw_cut_graph merge_dictionaries read_from_file generate_static_graphs dictionary_size generate_crescent_nodes_graphs generate_crescent_edges_graphs write_to_file remove_unconnected_graphs compare_cut_algorithm_results execution_time_and_output test_cut_algorithm execution_time choose_new_candidate brute_force_max_cut TimedOutExc aux_pruning_local_consistent_max_cut f1 largest_eigenvector do_work complete_cut greedy_cut compute_estimated_cut lazy_local_consistent_max_cut first_lemma recursive_spectral_cut aux_lazy_local_consistent_max_cut local_consistent_max_cut soto_function two_threshold_spectral_cut timeout aux_local_consistent_max_cut trevisan_approximation_alg trevisan_function f2 greedy_choice second_lemma DQN Environment Policy train smooth Ising_model ising_ground_truth decode_state presentation run_netket MaxCutEnergy SpinGlassEnergy manopt off_the_shelf load_gset_graph generate_sbm RiemannianTrustRegion TruncatedConjugateGradient symblockdiag stiefel_dimension stiefel_retraction random_from_stiefel froebenius stiefel_projection inner_prod get_partition get_cut_value AbstractMaxCut MaxCutBM is_psd nearest_psd MaxCutSDP load_gset_graph generate_sbm RiemannianTrustRegion TruncatedConjugateGradient symblockdiag stiefel_dimension stiefel_retraction random_from_stiefel froebenius stiefel_projection inner_prod get_partition get_cut_value AbstractMaxCut MaxCutBM is_psd nearest_psd MaxCutSDP load_data get_time prepare_dirs_and_logger folder_name_generator compute_edge_weight_cut make_locally_connect record_result evaluate build_model_netket load_model plot_graph laplacian_to_graph plot_train_curve append ArgumentParser parse_known_args int batch_size manopt format print load_data off_the_shelf train run_netket prepare_dirs_and_logger framework fromkeys cut init_cut add nodes set number_of_nodes Graph binary_cut append range nodes cut keys nodes list neighbors len neighbors minority_class keys nodes fromkeys set_node_attributes set_node_attributes number_of_nodes nodes enumerate integer_to_binary minority_class dict strong_minority_class degree degree_nodes_sequence degree_nodes_sequence maximal_independent_set set nodes remove_node nodes sign circular_layout draw_networkx_nodes axis draw_networkx_labels draw_networkx_edge_labels draw_networkx_edges connected_component_subgraphs draw_cut_graph Graph disjoint_union graphviz_layout circular_layout edge_boundary draw_networkx_nodes axis draw_networkx_labels set_node_attributes title PARTITION get_partitions draw_networkx_edges str dump print makedirs close open load print close exit open defaultdict keys extend str defaultdict print is_connected dictionary_size from_numpy_matrix append str is_connected adj_matrix erdos_renyi_graph write_to_file range str defaultdict is_connected adj_matrix log1p erdos_renyi_graph write_to_file range str defaultdict is_connected adj_matrix min erdos_renyi_graph write_to_file max time function str sorted defaultdict BENCHMARKS_FILE_DIR print iterkeys read_from_file compute_epsilon from_numpy_matrix write_to_file execution_time_and_output round __name__ append str sorted defaultdict are_undecided_nodes print cut_alg iterkeys read_from_file compute_epsilon from_numpy_matrix write_to_file __name__ append subgraph BLACK add BLUE cut_edges pop list nodes set add greedy_choice init_cut set_partitions has_edge float number_of_edges cut_edges argmax eig toarray normalized_laplacian_matrix fromkeys dict first_lemma largest_eigenvector sign_norm is_all_isolate edges_beetween set_partitions Graph subgraph add set greedy_cut number_of_edges two_threshold_spectral_cut set_partitions recursive_spectral_cut number_of_nodes Graph binary_cut cut_edges range pop could_be_cut choose_new_candidate list BLACK dict BLUE could_be_cut choose_new_candidate list BLACK neighbors dict BLUE choose_new_candidate fromkeys degree_nodes_sequence UNDECIDED BLUE aux_pruning_local_consistent_max_cut cut aux_local_consistent_max_cut marked_nodes_could_be_cut subgraph add dict set number_of_edges PARTITION set_node_attributes cut_edges could_be_cut choose_new_candidate list BLACK dict BLUE compute_estimated_cut append pop could_be_cut list int BLACK dict BLUE append aux_local_consistent_max_cut get list complete_cut put number_of_nodes highest_degree_nodes degree_nodes_sequence cpu_count put lowest_degree_nodes list complete_cut nodes MARKED append cut range get pick_random_nodes two_maximal_independent_set start Queue pop join UNDECIDED len ones sum eval convolve Environment smooth alg DQN linspace save max partition dir ylabel title savefig append range format plot param_groups int time join print axes xlabel figure epochs array len range time list ising_model Ising_model print graph local_consistent_max_cut draw_cut_graph search nodes close laplacian_to_graph savefig execution_time decode_state sum cut_edges range subplots set_size set_major_formatter tick_params abs list std set_yscale set_xlabel set_box_color set_trace savefig twinx legend append expand_dims format ScalarFormatter plot set_xticklabels close tight_layout add_artist mean stack boxplot item enumerate int set_xscale suptitle print set_yticks set_ylabel set_xticks figure fill_between array split build_model_netket RmsProp input_size Momentum run str set_trace Sgd MaxCutEnergy framework MetropolisLocal Vmc laplacian_to_hamiltonian AdaDelta SpinGlassEnergy time AdaMax init_random_parameters get_observable_stats AdaGrad use_sr str time format print system input_size savemat random_seed loadmat framework makedirs time evaluate_cut_size print solve random_cut MaxCutBM laplacian_to_graph goemans_williamson_weighted greedy_max_cut Graph stochastic_block_model set_edge_attributes choice dict edges zip len normal sum sqrt square dot symblockdiag T normal edges spacing norm min identity eigvals real is_psd len cholesky load normal format fill_diagonal ising_ground_truth reshape transpose write close input_size make_locally_connect save randint prod record_result open format learning_rate batch_size num_of_iterations pb_type input_size get_time model_name width append depth optimizer framework setFormatter join format getLogger handlers addHandler pb_type StreamHandler dir Formatter removeHandler folder_name_generator makedirs format print write close save item append abs open mean var find_conn max format print compute_edge_weight_cut random_states next sqrt range zeros int FullyConnected RbmSpinReal tuple SumOutput input_size ACT RbmSpin Lncosh append depth prod range FFNN len load eval zero_grad load_state_dict append add_edges_from range Graph states int list local_consistent_max_cut print check search shape execution_time energies random_seed decode_state cut_edges range log_interval arange plot suptitle xlabel ylabel mean savefig legend fill_between std
# Natural Evolution Strategies and Quantum Approximate Optimization # Tianchen Zhao, Giuseppe Carleo, James Stokes and Shravan Veerapaneni This repository includes the codes for the paper "Natural Evolution Strategies and Quantum Approximate Optimization" (https://arxiv.org/pdf/2005.04447.pdf). ## How to Use ## Download this repository. ``` git clone https://github.com/Ericolony/quantum_optimization.git ``` Get into working directory. ```
327
ErikKratzCth/ALOCC_Keras_SMILE
['outlier detection', 'one class classifier', 'anomaly detection']
['Adversarially Learned One-Class Classifier for Novelty Detection']
log/prosivic/common_setup_181220/configuration.py baseline_model.py kh_tools.py baseline_configuration.py loss_fix.py log/dreyeve/old/common_setup_181219/configuration.py configuration.py log/mnist/debug2/configuration.py utils.py log/dreyeve/old/good_auroc/configuration.py ops.py log/dreyeve/old/long/configuration.py test.py architecture.py log/dreyeve/common_setup_181220/configuration.py log/dreyeve/old/common_architecture_181213/configuration.py old files/models_with_alternate_training.py log/dreyeve/old/1_dense_layer/configuration.py log/mnist/orig_arch/configuration.py log/dreyeve/old/181202/configuration.py log/dreyeve/old/first_real_experiment/configuration.py log/prosivic/old/common_architecture_181213/configuration.py models.py ae_architecture.py d_architecture.py log/prosivic/old/common_setup_181219/configuration.py AE_Architecture Configuration baseline_model Configuration D_Architecture read_lst_images kh_extractPatches get_image_patches kh_crop get_noisy_data kh_getSliceImages get_patch_video kh_getImages kh_isDirExist read_lst_images_w_noise2 read_image_w_noise kh_extractPatchesOne read_dataset_image_path kh_getSliceImages_simple read_lst_images_w_noise read_dataset_images read_image ALOCC_Model deconv2d export_scores get_image make_gif to_json transform_Slicization save_images transform visualize montage center_crop merge get_image_SlicizationWithShape merge_images kh_make_patches conv_out_size_same inverse_transform imread imsave get_image_Slicization Configuration Configuration Configuration Configuration Configuration Configuration Configuration Configuration Configuration ALOCC_Model append random_noise append join glob random_noise read_image append read_image_w_noise extend get_image_patches read_image_w_noise append extend get_image_patches read_image append join read_image glob read_lst_images get_image_patches format print extend range len append shape array print makedirs basename std kh_crop print min add mean kh_isDirExist dirname resize append zeros range array imsave open basename kh_crop print min kh_isDirExist dirname resize append zeros range array imsave open kh_extractPatchesOne append kh_extractPatches print export_results_dir imread transform_Slicization astype float32 transform_Slicization astype float32 zeros enumerate squeeze merge int round center_crop imresize VideoClip write_gif make_gif int arange save_images batch_size print sampler strftime choice uniform gmtime run xrange ceil zeros tile append enumerate list as_strided Number isinstance tuple ndim strides shape array int isinstance imsave print ones sqrt ceil array range Path get_namelist_from_file
# ALOCC Re-implementation of https://github.com/Tony607/ALOCC_Keras ## How to setup an experiment The experiment settings are defined in ```./configuration.py``` ### Change configuration with implemented dataset Settings are kept separately for each dataset, under corresponding if-statements for the ```dataset``` variable. The dataset is specified as an argument when running a training or testing session. ### Add a new dataset * Put your data in separate directories: * training data (inliers) * validation data (inliers)
328
EvZissel/Residual-Flow
['out of distribution detection']
['Deep Residual Flow for Out of Distribution Detection']
ADV_Generate_Mahalanobis.py Residual_flow_test_processing.py Residual _flow_train.py models/__init__.py OOD_Regression_Mahalanobis.py lib_generation.py lib_regression.py ADV_Samples_FGSM.py OOD_Generate_Mahalanobis.py OOD_Regression_Residual _flow.py models/densenet.py calculate_log.py Residual_flow_prepare.py OOD_Regression_Mahalanobis_FGSM_validation.py data_loader.py models/resnet.py densenet.py OOD_Regression_Residual_flow_FGSM_validation.py main recursion_change_bn main recursion_change_bn get_curve metric getCIFAR10 getSVHN getNonTargetDataSet getFGSM getCIFAR100 getTargetDataSet DenseNet3 TransitionBlock BottleneckBlock DenseBlock BasicBlock get_Mahalanobis_score get_Mahalanobis_score_adv sample_estimator merge_and_generate_labels get_resflow_score get_LID mle_batch get_resflow_score_FGSM get_posterior block_split_adv_RealNVP detection_performance load_characteristics block_split block_split_RealNVP load_characteristics_RealNVP block_split_adv main recursion_change_bn main main main main Nett make_roc RealNVP Nets Nett_linear test BatchNormStats1d Permutation main train Nets_linear Rescale main recursion_change_bn Nett RealNVP Nets Nett_linear reture_length_hidden BatchNormStats1d Permutation main recursion_change_bn Nets_linear Rescale DenseNet3 TransitionBlock BottleneckBlock DenseBlock BasicBlock PreActBlock ResNet ResNet18 Bottleneck ResNet34 ResNet101 test conv3x3 ResNet50 PreActBottleneck BasicBlock ResNet152 batch_size net_type modules save dataset merge_and_generate_labels cuda dataroot str num_classes set_device sample_estimator load_state_dict range asarray get_Mahalanobis_score_adv concatenate outf Compose size ResNet34 eval mkdir manual_seed setattr recursion_change_bn empty getTargetDataSet enumerate load items DenseNet3 int join print Variable reshape gpu len items isinstance BatchNorm2d enumerate data model zero_grad abs max view add append ge cat format LongTensor float criterion backward clamp index_select cpu index_copy_ makedirs format arange loadtxt sort min argmin dict max range format concatenate print get_curve trapz dict max pop join setdefault DataLoader SVHN append expanduser pop join setdefault DataLoader CIFAR10 append expanduser pop join setdefault DataLoader append expanduser CIFAR100 load join format DataLoader cat getSVHN getCIFAR100 getCIFAR10 join getCIFAR10 ImageFolder DataLoader getSVHN expanduser getCIFAR100 asarray cdist min apply_along_axis len reshape asarray concatenate data cuda view len append range cat format size EmpiricalCovariance feature_list mean eval fill empty Variable print precision_ cpu numpy fit data intermediate_forward max cuda open view add ge range cat format size close mean eval float backward Variable write extend index_select index_copy_ numpy diag data model max cuda open add ge range CrossEntropyLoss format size close eval softmax float criterion backward Variable write index_select index_copy_ data floor intermediate_forward max cuda view add ge range cat size mean eval float int backward Variable extend index_select index_copy_ numpy diag data int view concatenate Variable reshape size extend feature_list mean eval mle_batch floor append cuda range data index_copy_ view backward Variable float size extend add mean ge index_select numpy intermediate_forward range cuda log_prob cat data index_copy_ view backward Variable float size extend add mean ge index_select numpy intermediate_forward range cuda log_prob cat concatenate concatenate int concatenate int concatenate format write close metric range open load join concatenate load join format stack amax get_Mahalanobis_score getNonTargetDataSet detection_performance load_characteristics block_split fit coef_ intercept_ block_split_adv block_split_RealNVP load_characteristics_RealNVP block_split_adv_RealNVP DataLoader tensor ones num_iter Adam RealNVP test cuda_index savez min length_hidden zeros train std layer join backward cpu step zero_grad empty numpy mkdir save append to next cuda range state_dict data view extend eval numpy range cuda log_prob cat insert trapz linspace zip append amin float empty range amax len log svd transpose matmul to sum feature_list mean sqrt fill getFGSM T cov numpy diag get_resflow_score get_resflow_score_FGSM reture_length_hidden randn Variable ResNet18 print size net
# Deep Residual Flow for Out of Distribution Detection An implementation of the Residual Flow algorithm for out-of-distribution detection \[[arXiv](https://arxiv.org/abs/2001.05419)\] . Some code was adopted from [deep_Mahalanobis_detector](https://github.com/pokaxpoka/deep_Mahalanobis_detector) and [RealNVP](https://github.com/tensorflow/models/tree/master/research/real_nvp). <p align="center"> <img width="500" src="./figures/Fig_AUROC_vs_iterations.png"> </p> ## Citing Residual Flow for OOD Detection ``` E. Zisselman, A. Tamar. "Deep Residual Flow for Out of Distribution Detection". CVPR 2020.
329
EvelynFan/FAU
['semantic correspondence']
['Facial Action Unit Intensity Estimation via Semantic Correspondence Learning with Dynamic Graph Convolution']
train.py lib/base.py demo.py functions.py lib/saver.py lib/data_provider.py lib/utils.py lib/net_utils.py test.py lib/logger.py lib/resnet_utils.py lib/resnet_v1.py lib/serialize.py lib/basemodel.py config.py model_graph.py lib/timer.py datasets.py Config BP4D DISFA Demo main test_net test denormalize_input get_lr generate_batch normalize_input Model_graph main test_net test main ModelDesc Tester Base Trainer colorlogger aggregate_batch average_gradients sum_gradients get_tower_summary_dict get_optimizer Block conv2d_same subsample resnet_arg_scope stack_blocks_dense resnet_v1_152 resnet_v1_101 bottleneck resnet_v1_200 resnet_v1_50 resnet_v1 get_variables_in_checkpoint_file Saver load_model loads_pyarrow loads_msgpack load_pkl dumps_msgpack dumps_pyarrow dump_pkl Timer approx_equal get_rng join DISFA num_AU_points dataset_path BP4D array join join join int epoch set_demo test vis set_vis demo parse_args gpu min append range array test_batch_size generate_batch len test_net load_val_data_with_annot load_weights Demo Model_graph Tester lr_dec_epoch index asarray print astype float32 IMREAD_COLOR IMREAD_IGNORE_ORIENTATION demo imread array sign num_AU_points floor argmax max shape float int time dict unravel_index zeros output_dir evaluation dataset Trainer Model_graph train ABCMeta ABCMeta format print name concat reduce_mean zip append expand_dims format print name concat reduce_sum zip append expand_dims isinstance concat append Tensor range len MomentumOptimizer AdamOptimizer GradientDescentOptimizer concat get_collection reduce_sum dict reduce_mean pad NewCheckpointReader get_variable_to_shape_map pop restore format global_variables print get_variables_in_checkpoint_file Saver int getpid strftime id
## FAU Implementation of the paper: **Facial Action Unit Intensity Estimation via Semantic Correspondence Learning with Dynamic Graph Convolution**. Yingruo Fan, Jacqueline C.K. Lam and Victor O.K. Li. ***AAAI 2020*** [[PDF]](https://aaai.org/Papers/AAAI/2020GB/AAAI-FanY.6827.pdf) The [Pytorch version](https://github.com/EvelynFan/Pytorch-FAU) ## Overview <p align="center"> <img src="examples/framework.jpg" width="88%" /> </p> ## Environment - Ubuntu 18.04.4
330
EvgeniaAR/foolbox
['adversarial attack']
['Foolbox: A Python toolbox to benchmark the robustness of machine learning models']
foolbox/attacks/iterative_projected_gradient.py foolbox/tests/test_models_theano.py foolbox/tests/test_utils.py foolbox/tests/test_models.py foolbox/tests/test_attacks_pointwise.py foolbox/models/lasagne.py foolbox/attacks/precomputed.py foolbox/attacks/blended_noise.py foolbox/tests/conftest.py foolbox/tests/test_model_wrappers.py foolbox/tests/test_attacks_deepfool.py foolbox/tests/test_attacks_slsqp.py foolbox/models/mxnet.py foolbox/tests/test_attacks_approx_lbfgs.py foolbox/tests/test_models_pytorch.py foolbox/attacks/slsqp.py foolbox/tests/test_attacks_lbfgs.py foolbox/distances.py foolbox/tests/test_attacks_boundary.py foolbox/attacks/saltandpepper.py foolbox/attacks/adef_attack.py foolbox/models/mxnet_gluon.py foolbox/tests/test_attacks.py foolbox/tests/test_attacks_precomputed.py docs/conf.py foolbox/tests/test_attacks_saliency.py foolbox/tests/test_model_preprocessing.py foolbox/models/keras.py foolbox/tests/test_attacks_iterative_projected_gradient.py foolbox/tests/test_criteria.py foolbox/tests/test_models_lasagne.py foolbox/attacks/boundary_attack.py foolbox/models/wrappers.py foolbox/tests/test_attacks_localsearch.py foolbox/attacks/localsearch.py foolbox/tests/test_attacks_adef_attack.py foolbox/attacks/lbfgs.py foolbox/criteria.py foolbox/tests/test_models_mxnet.py foolbox/utils.py foolbox/tests/test_attacks_gradient.py foolbox/gradient_estimators.py foolbox/__init__.py foolbox/models/pytorch.py foolbox/tests/test_attacks_iterative_gradient_sign.py foolbox/models/tensorflow.py foolbox/tests/test_attacks_gradient_sign.py foolbox/tests/test_models_tensorflow.py foolbox/tests/test_adversarial.py foolbox/models/base.py foolbox/attacks/binarization.py foolbox/attacks/blur.py foolbox/attacks/gradient.py foolbox/tests/test_attacks_singlepixel.py foolbox/attacks/pointwise.py foolbox/adversarial.py foolbox/tests/test_attacks_noise.py foolbox/tests/test_attacks_contrast.py foolbox/models/theano.py foolbox/attacks/iterative_gradient.py foolbox/tests/test_attacks_iterative_gradient.py foolbox/attacks/base.py foolbox/attacks/saliency.py foolbox/models/__init__.py foolbox/tests/test_attacks_binarization.py foolbox/tests/test_attacks_blur.py foolbox/tests/test_distances.py setup.py foolbox/attacks/__init__.py foolbox/attacks/additive_noise.py foolbox/tests/test_models_mxnet_gluon.py foolbox/tests/test_models_keras.py foolbox/attacks/contrast.py foolbox/attacks/deepfool.py linkcode_resolve Adversarial CombinedCriteria TargetClassProbability TargetClass Misclassification Criterion ConfidentMisclassification TopKMisclassification OriginalClassProbability Linfinity MeanSquaredDistance MeanAbsoluteDistance L0 Distance CoordinateWiseGradientEstimator EvolutionaryStrategiesGradientEstimator imagenet_example binarize softmax crossentropy onehot_like batch_crossentropy AdditiveNoiseAttack AdditiveGaussianNoiseAttack AdditiveUniformNoiseAttack _transpose_image _create_vec_field _difference_map _compose ADefAttack _re_transpose_image Attack call_decorator BinarizationRefinementAttack BlendedUniformNoiseAttack GaussianBlurAttack BoundaryAttack DummyExecutor ContrastReductionAttack DeepFoolLinfinityAttack DeepFoolAttack DeepFoolL2Attack GradientAttack SingleStepGradientBaseAttack GradientSignAttack IterativeGradientBaseAttack IterativeGradientSignAttack IterativeGradientAttack L2GradientMixin LinfinityClippingMixin L1BasicIterativeAttack RandomStartProjectedGradientDescentAttack L2BasicIterativeAttack ProjectedGradientDescentAttack L1GradientMixin L1DistanceCheckMixin LinfinityBasicIterativeAttack IterativeProjectedGradientBaseAttack L2DistanceCheckMixin LinfinityGradientMixin MomentumIterativeAttack L1ClippingMixin L2ClippingMixin LinfinityDistanceCheckMixin LBFGSAttack ApproximateLBFGSAttack LocalSearchAttack SinglePixelAttack PointwiseAttack PrecomputedImagesAttack SaliencyMapAttack SaltAndPepperNoiseAttack SLSQPAttack _create_preprocessing_fn DifferentiableModel Model KerasModel LasagneModel MXNetModel MXNetGluonModel PyTorchModel TensorFlowModel TheanoModel ModelWithEstimatedGradients ModelWithoutGradients ModelWrapper CompositeModel DifferentiableModelWrapper test_attack test_attack_gl bn_targeted_adversarial bn_label binarized2_bn_adversarial model bn_trivial image bn_criterion bn_adversarial_pytorch bn_targeted_criterion binarized2_bn_model binarized_bn_adversarial gl_bn_model bn_adversarial bn_model_pytorch bn_adversarial_linf gl_bn_adversarial bn_targeted_adversarial_pytorch bn_model bn_impossible_criterion bn_trivial_criterion label eg_bn_model_factory bn_image criterion bn_image_pytorch binarized2_bn_label bn_impossible eg_bn_adversarial binarized_bn_label bn_adversarial_mae binarized_bn_model test_inplace test_adversarial test_base_attack test_base_init test_aliases test_attack test_targeted_attack test_attack_impossible test_attack_pytorch test_attack_gl test_targeted_attack_pytorch test_attack test_name test_attack_gl test_attack test_attack_fail test_attack_sp test_attack_noinit test_attack2 test_attack_wrong_arg test_attack test_attack_trivial test_attack_gl test_attack test_attack_parameters2 test_attack_convergence test_attack_impossible test_attack_continue test_attack_non_verbose test_attack_gl test_attack_parameters test_attack_parameters3 test_attack test_attack_gl test_attack test_targeted_attack test_deepfool_auto_linf test_deepfool_auto_p0 test_attack_impossible test_deepfool_auto_mae test_attack_gl test_subsample test_attack test_attack_eg test_attack_eps test_attack_gl test_attack test_attack_eps test_attack_gl test_attack test_attack_gl test_targeted_attack test_attack_l1 test_attack_impossible test_attack_no_binary_search_and_no_return_early test_attack_gl test_attack_l2 test_attack_linf test_attack test_targeted_attack test_attack_with_init_attack test_attack_pytorch test_attack_gl test_targeted_attack_pytorch test_attack test_targeted_attack test_attack_gl test_attack test_attack_impossible test_attack_gl test_attack test_attack_impossible test_attack_gl test_attack_continue test_attack_startingpoint test_attack test_unknown_image test_attack test_targeted_attack test_targeted_attack_slow test_targeted_attack_max test_attack_random_targets test_attack test_attack_gl test_attack test_attack_impossible test_attack_gl test_combined_criteria test_abstract_criterion test_misclassfication test_top_k_misclassfication test_original_class_probability test_target_class_probability test_confident_misclassification test_misclassification_names test_base_criterion test_target_class test_mse test_linf test_mae test_l0 test_mean_squared_distance test_str_repr test_base_distance test_mean_absolute_distance test_abstract_distance test_linfinity test_differentiable_base_model test_abstract_differentiable_model test_base_model test_abstract_model test_keras_backward test_keras_model test_keras_model_preprocess test_keras_model_probs test_keras_model_gradients test_lasagne_model test_lasagne_backward test_lasagne_gradient test_model test_model_gradient test_model_backward test_model test_model_gradient MeanBrightnessNet test_pytorch_backward test_pytorch_model_preprocessing_shape_change test_pytorch_model_gradient test_pytorch_model_preprocessing test_pytorch_model test_tensorflow_model test_tensorflow_model_non_diff test_tensorflow_preprocessing test_tensorflow_gradient test_tensorflow_backward test_tensorflow_model_cm test_theano_backward test_theano_model test_theano_gradient test_preprocessing test_composite_model test_context_manager test_wrapping test_diff_wrapper test_estimate_gradient_wrapper test_imagenet_example test_binarize test_crossentropy test_softmax test_imagenet_example_channels_first find_source exp max zeros_like sum exp max log sum exp max log copy join asarray transpose dirname resize open _transpose_image zeros_like _transpose_image arange interpolation empty_like shape RectBivariateSpline meshgrid range clip _transpose_image shape empty sum gaussian_filter asarray all Attack attack Attack attack join asarray dirname open Mock image array mean_brightness_net float32 placeholder Net PyTorchModel contextmanager param seed astype float32 seed astype float32 mean bn_image argmax bn_label bn_image contextmanager bn_label bn_criterion bn_image contextmanager bn_label bn_criterion bn_image contextmanager bn_label bn_criterion bn_targeted_criterion bn_image contextmanager bn_label bn_image contextmanager bn_label bn_criterion bn_image bn_label bn_criterion contextmanager eg_bn_model_factory bn_impossible_criterion bn_image contextmanager bn_label bn_trivial_criterion bn_image contextmanager bn_label bn_image_pytorch bn_label bn_model_pytorch bn_criterion bn_targeted_criterion bn_label bn_model_pytorch bn_image_pytorch mean_brightness_net float32 placeholder binarized_bn_label bn_image contextmanager bn_criterion mean bn_image argmax binarize mean_brightness_net float32 placeholder bn_image contextmanager binarized2_bn_label bn_criterion mean bn_image argmax binarize seed ones_like Adversarial value set_distance_dtype reset_distance_dtype predictions_and_gradient backward float64 Mock astype float32 gradient uniform batch_predictions predictions attack TestAttack FGSM Adversarial attack Attack attack Attack attack Attack attack Attack attack Attack value original_image BinarizationRefinementAttack GradientAttack image assert_allclose BinarizationRefinementAttack GradientAttack attack BinarizationRefinementAttack attack _model original_class value original_image BinarizationRefinementAttack GradientAttack image attack assert_allclose value original_image BinarizationRefinementAttack GradientAttack image attack assert_allclose BinarizationRefinementAttack GradientAttack attack Attack attack BoundaryAttack BoundaryAttack attack value BlendedUniformNoiseAttack BoundaryAttack attack2 attack1 seed dtype original_image BoundaryAttack astype attack BoundaryAttack attack seed dtype original_image BoundaryAttack astype attack BoundaryAttack BoundaryAttack attack1 BoundaryAttack DeepFoolAttack attack2 Attack attack attack DeepFoolAttack DeepFoolAttack DeepFoolAttack Attack attack Attack attack LinfinityBasicIterativeAttack attack Attack attack Attack attack Attack attack Attack attack seed dtype original_image astype Attack attack seed dtype original_image astype Attack attack predictions zeros_like Attack original_image zeros_like Attack attack Attack attack Attack attack TestCriterion len is_adversarial Misclassification range array log OriginalClassProbability Misclassification array Misclassification TopKMisclassification array TopKMisclassification TargetClass array range len array OriginalClassProbability TargetClassProbability array range len array range ConfidentMisclassification TestDistance MeanSquaredDistance array MeanAbsoluteDistance array Linfinity array array L0 ones zeros Distance TestModel ones TestModel Sequential astype float32 gradient assert_almost_equal predictions seed astype float32 batch_predictions assert_array_almost_equal max seed astype float32 copy batch_predictions assert_array_almost_equal max seed norm predictions_and_gradient astype float32 assert_array_almost_equal array backward reshape Sequential astype float32 repeat assert_almost_equal mean_brightness_net astype float32 gradient LasagneModel assert_almost_equal tensor4 InputLayer predictions seed norm mean_brightness_net predictions_and_gradient astype float32 LasagneModel assert_array_almost_equal tensor4 InputLayer mean_brightness_net backward reshape astype float32 LasagneModel repeat assert_almost_equal tensor4 InputLayer mean_brightness_net Variable astype float32 gradient MXNetModel assert_almost_equal predictions norm mean_brightness_net predictions_and_gradient Variable astype float32 MXNetModel _loss_fn assert_array_almost_equal mean_brightness_net backward Variable reshape astype float32 MXNetModel repeat assert_almost_equal MeanBrightnessNet MXNetGluonModel MeanBrightnessNet MXNetGluonModel astype float32 gradient Net PyTorchModel assert_almost_equal predictions seed astype float32 copy Net batch_predictions PyTorchModel assert_array_almost_equal max seed norm predictions_and_gradient astype float32 Net PyTorchModel _loss_fn assert_array_almost_equal backward reshape astype float32 Net PyTorchModel repeat assert_almost_equal seed transpose astype float32 gradient Net batch_predictions PyTorchModel assert_array_almost_equal predictions Graph Graph Graph Graph Graph Graph mean_brightness_net TheanoModel astype float32 gradient tensor4 assert_almost_equal predictions seed norm mean_brightness_net predictions_and_gradient TheanoModel astype float32 assert_array_almost_equal tensor4 mean_brightness_net backward reshape TheanoModel astype float32 repeat tensor4 assert_almost_equal _create_preprocessing_fn preprocessing copy backward DifferentiableModelWrapper predictions CompositeModel seed predictions predictions_and_gradient gradient array softmax softmax crossentropy array range len imagenet_example range imagenet_example array binarize
EvgeniaAR/foolbox
331
ExplorerFreda/TreeEnc
['sentiment analysis', 'text classification']
['On Tree-Based Neural Sentence Modeling']
src/train_sentrel_classification.py src/basic.py src/data.py src/models.py src/utils.py src/train_classification.py src/train_generation.py reverse_padded_sequence cos_nd apply_nd sequence_mask affine_nd dot_nd st_gumbel_softmax convert_to_one_hot masked_softmax greedy_select SentenceDataset SentRelDataset Vocab TranslationDataset SentRelModel Classifier BinaryTreeLSTMLayer GumbelTreeLSTMEncoder SentClassModel LinearLSTMEncoder AttnCombiner TreeLSTMEncoder RecursiveTreeLSTMEncoder SentenceDecoder Seq2SeqModel main train main train main train unwrap_scalar_variable generate_balance_masks generate_lifted_balance_masks get_tree_structures generate_right_branch_masks generate_left_branch_masks build_right_branch_tree wrap_with_variable build_left_branch_tree sort_sentences_by_lengths bleu load_glove generate_guided_balance_masks build_balance_tree_from_range fn size view size addmm view expand mv size view mv size sum view data Variable size scatter_ unsqueeze float sum softmax convert_to_one_hot masked_softmax Variable uniform_ float masked_softmax detach max Variable size expand cuda expand_as long is_cuda Variable transpose get_device expand_as gather cuda is_cuda enumerate vocab_path run_iter SentClassModel DataLoader ReduceLROnPlateau save save_dir cuda max FloatTensor max_epoch Vocab load_state_dict getattr anneal_temperature_rate glove CrossEntropyLoss range optimizer_class state_dict format set_ info is_available load_glove optimizer enumerate load join print load_checkpoint from_file fix_word_embedding dict SentenceDataset step len setFormatter addHandler train add_argument system StreamHandler ArgumentParser save parse_args save_dir setLevel exists INFO FileHandler TranslationDataset src_vocab_path tgt_vocab_path Seq2SeqModel SentRelModel SentRelDataset tolist range len Variable cuda is_available isinstance list format enumerate unwrap_scalar_variable range append split str list make_one_hot_gold_mask wrap_with_variable tree_encoding_to_mask_ids append build_balance_tree_from_range pad_mask enumerate list pad_mask min make_one_hot_gold_mask wrap_with_variable tree_encoding_to_mask_ids append range max build_balance_tree_from_range enumerate len list pad_mask min make_one_hot_gold_mask wrap_with_variable filter tree_encoding_to_mask_ids append range max build_balance_tree_from_range enumerate len format range list make_one_hot_gold_mask wrap_with_variable build_left_branch_tree tree_encoding_to_mask_ids append pad_mask enumerate format range list make_one_hot_gold_mask build_right_branch_tree wrap_with_variable tree_encoding_to_mask_ids append pad_mask enumerate dict zeros_like word_to_id sort index_select
# [On Tree-Based Neural Sentence Modeling](https://arxiv.org/pdf/1808.09644.pdf) Authors: [Haoyue Shi](http://explorerfreda.github.io), [Hao Zhou](http://zhouh.github.io), Jiaze Chen and [Lei Li](http://www.cs.cmu.edu/~leili/). This repo includes the implementation of our paper "On Tree-Based Neural Sentence Modeling" at EMNLP 2018 [1]. In this repo, you may find * various kinds of text encoders (contributions for new encoders are always welcome!!); * a unified PyTorch framework which can support three common groups of NLP tasks. ## Overview ![intro.jpg](misc/intro.jpg) We study the problem of sentence encoding on various downstream tasks, which can be grouped into three categories: sentence classification, sentence relation classification and sentence generation.
332
FAKEBOB-adversarial-attack/FAKEBOB
['speech recognition', 'speaker recognition', 'adversarial attack']
['Who is Real Bob? Adversarial Attacks on Speaker Recognition Systems']
gmm_ubm_kaldiHelper.py ivector_PLDA_OSI.py gmm_ubm_CSI.py build_spk_models.py ivector_PLDA_SV.py ivector_PLDA_kaldiHelper.py test.py gmm_ubm_OSI.py FAKEBOB.py gmm_ubm_SV.py ivector_PLDA_CSI.py attackMain.py main loadData load_model FakeBob gmm_CSI gmm_ubm_kaldiHelper gmm_OSI gmm_SV ivector_PLDA_kaldiHelper iv_SV set_threshold gmm_CSI join iv_OSI gmm_SV iv_CSI iv_SV gmm_OSI join read str len range flatten spk_ids zip append listdir array make_decisions makedirs join load_model zip print loadData len write attack FakeBob estimate_threshold makedirs size abs array infty
# FAKEBOB Source code for paper "Who is real Bob? Adversarial Attacks on Speaker Recognition Systems". Demonstration Website: [FAKEBOB Website](https://sites.google.com/view/fakebob/home "FAKEBOB Website") (including a One-Minute Video Preview) Our paper has been accepted by [42nd IEEE Symposium on Security and Privacy (**IEEE S&P, Oakland**), 2021](https://www.ieee-security.org/TC/SP2021/program-papers.html). Paper link [Who is real Bob? Adversarial Attacks on Speaker Recognition Systems](https://arxiv.org/abs/1911.01840). Oakland 2021 Presentation Slide [Session #5-GuangkeChen-WhoisRealBob](http://guangkechen.site/FAKEBOB/Oakland2021-Session-5-GuangkeChen-WhoisRealBob.pdf) Oakland 2021 Talk Video: [Presentation Video](https://youtu.be/ZRfkcojsUD4) Cite our paper as follow: @INPROCEEDINGS {chen2019real, author = {G. Chen and S. Chen and L. Fan and X. Du and Z. Zhao and F. Song and Y. Liu},
333
FEE-Fair-Embedding-Engine/FEE
['word embeddings']
['Fair Embedding Engine: A Library for Analyzing and Mitigating Gender Bias in Word Embeddings']
fee/metrics/indirect_bias.py fee/visualize/gender_cluster_tsne.py fee/reports/biased_neighbours.py fee/embedding/loader.py fee/metrics/__init__.py fee/metrics/sembias.py fee/debias/hard_debias.py fee/visualize/pca_components.py fee/metrics/direct_bias.py fee/reports/word_report.py fee/debias/__init__.py fee/visualize/__init__.py docs/source/conf.py fee/metrics/gipe.py fee/reports/global_report.py fee/debias/hsr_debias.py fee/visualize/neighbour_bias_wordcloud.py fee/metrics/proximity_bias.py fee/utils.py fee/visualize/neighbour_plot.py fee/debias/ran_debias.py fee/reports/__init__.py fee/metrics/weat.py fee/metrics/pmn.py doPCA cosine get_nbs get_pair_idb get_g _hard_neutralize HardDebias HSRDebias get_ns_idb calc_ns_idb get_neighbors_idb_dict RANDebias _get_N_info RANOpt init_vector torch_cosine_similarity ran_objective WE DirectBias GIPE get_ns_idb score get_neighbors_idb_dict prox_bias get_neighbors _gipe IndirectBias _pmb PMN _get_nbs_i _prox_bias _bias_ratio _get_nbs_i ProxBias SemBias weat_association weat_p_value WEAT weat_score unit_vector cos_sim weat_differential_association NeighboursAnalysis GlobalReport WordReport GCT NBWordCloud NeighbourPlot generate_palette color_fader PCAComponents PCA bar append v explained_variance_ratio_ array range fit norm dot v norm isinstance append get_neighbors_idb_dict dict zip dict v get_nbs zip dict get_g zip deepcopy v mean dict v get_nbs zip eps defaultdict get_neighbors_idb_dict prox_bias get_pair_idb append get_neighbors float array enumerate len _get_nbs_i enumerate _get_nbs_i get_pair_idb append float enumerate unit_vector concatenate multiset_permutations weat_differential_association append zeros array len weat_association concatenate weat_p_value mean std to_rgb array
# Fair Embedding Engine Fair Embedding Engine: A Library for Analyzing and Mitigating Gender Bias in Word Embeddings. # Abstract Non-contextual word embedding models have been shown to inherit human-like stereotypical biases of gender, race and religion from the training corpora. To counter this issue, a large body of research has emerged which aims to mitigate these biases while keeping the syntactic and semantic utility of embeddings intact. This paper describes Fair Embedding Engine (FEE), a library for analysing and mitigating gender bias in word embeddings. FEE combines various state of the art techniques for quantifying, visualising and mitigating gender bias in word embeddings under a standard abstraction. FEE will aid practitioners in fast track analysis of existing debiasing methods on their embedding models. Further, it will also allow rapid prototyping of new methods by evaluating their performance on a suite of standard metrics. # Modules The core functionality of FEE is governed by five modules, namely *Loader*, *Debias*, *Bias Metrics*, *Visualization*, and *Report*. ``` . |-debias | |-ran_debias.py
334
FFTYYY/RoR_relation_extraction
['relation extraction']
['Relation of the Relations: A New Paradigm of the Relation Extraction Problem']
utils/logger.py utils/others.py dataloader/dataloader_semeval_2018_task7.py utils/watch_time.py train.py dataloader/dataloader_ace05.py models/graph_encoder.py utils/composed_model.py utils/train_util.py generate.py dataloader/base.py dataloader/data_config.py loss/losses.py loss/__init__.py models/__init__.py outputs/analyze/watch.py utils/tmp_file.py models/matrix_transformer.py dataloader/__init__.py test.py models/graph_trans.py main.py outputs/analyze/compare.py utils/scorer.py outputs/analyze/gene.py config.py utils/write_keyfile.py auto_hyperparam get_config before_parse_t2g after_parse_t2g Generator generate_from_pred generate main initialize load_data test get_output get_evaluate before_test before_train train update_batch Data Entity Relation get_file_content numberize tokenize_and_index data_process bertize validize cut get_rel_weights _read_data read_data parse_a_file file_content2data read_data parse_a_text_file parse_a_key_file get_dataloader get_loss_func loss_1 loss_2 get_loss_func LossFunction Encoder Attention FFN Encoder_Layer Model Encoder Attention FFN Encoder_Layer get_model compare gene_golden gene generate_output EnsembleModel TwoPhaseModel Logger intize _get_rels test _get_data _file2matrix get_f1 random_tmp_name pad_sents get_data_from_batch time_str now_time write_keyfile add_argument debug log_print_w_time Logger log_file log auto_hyperparam seed list random_tmp_name set_device device_count nolog range manual_seed_all __dict__ no_fitlog add_hyper debug t2g_seed pformat manual_seed set_log_dir no_log t2g_lr parse_args t2g_batch_size after_parse_t2g int size add_rel range len join softmax range generate_from_pred len train_rels_1 train_text_1 test_text rel_weight_smooth valid_rels dataset rel_weight_norm train_text_2 train_rels_2 test_rels log valid_text loss format get_loss_func Generator no_rel_name pos_only index no_rel_weight log len initialize hasattr ensemble_size add_hyper get_config model_save test log load_data finish cpu train module range append int Module isinstance eval device range len generator model loss_func device to enumerate write_keyfile get_f1 makedirs add_metric set_postfix_str format get_evaluate get_data_from_batch log tqdm set_description_str before_test range int batch_size gpus Adam DataParallel device to len backward model zero_grad loss_func step set_postfix_str get_data_from_batch update_batch epoch_numb before_train log tqdm test set_description_str add_loss range append ents log remove len e log s ents abs tokenize range count u abs convert_tokens_to_ids index type ans v ent_name2id list Counter array sum keys bertize numberize enumerate pop reverse append cut log enumerate from_pretrained data_post_process pop no_rel_name shuffle index log reverse ans append get_rel_weights enumerate join Data Entity replace Relation index loads split append len pop data_process reverse parse_a_file append log enumerate join list remove Entity replace Data name strip filter find append log split append Relation split update parse_a_text_file parse_a_key_file get_file_content view size device zeros to range cross_entropy view size device zeros to range cross_entropy from_pretrained decode beautiful_str list gene_file len dirname append range write_keyfile get_data_from_batch eval get_f1 int print tqdm filter split makedirs from_pretrained decode beautiful_str list gene_file len dirname append range write_keyfile get_data_from_batch eval get_f1 int print tqdm filter split makedirs from_pretrained int decode get_data_from_batch gene_file makedirs tqdm dirname append beautiful_str range len from_pretrained decode device beautiful_str list gene_file len dirname append range get_data_from_batch eval set_description_str int Module isinstance tqdm filter split makedirs append int range len sorted list _get_rels recall_score set _get_data precision_score classes_ transform f1_score keys log int format print show_var split append fit LabelEncoder int set_trace split print get_f1 time max range len to pad_sents append ans relations
This repo contains the code for the EMNLP 2020 paper "[Relation of the Relations: A New Paradigm of the Relation Extraction Problem](https://arxiv.org/pdf/2006.03719.pdf)" (Jin et al., 2020). ## How to Run ### Prepare data and environment - SemEval2018 Task 7.2 dataset: You can download the publicly available data by the following command: ``` bash prep_data_and_env.sh ``` This command will also prepare the python environment for you. It will install all the packages in [requirements.txt](requirements.txt). - ACE2005 dataset: You can download from its [LDC website](https://catalog.ldc.upenn.edu/LDC2006T06). ### Training
335
FGNN-Author/FGNN
['semantic segmentation']
['Finite Group Equivariant Neural Networks for Games']
U-Net/data.py Checkers/checkers_layers.py Checkers/checkers_display.py Checkers/checkers_models.py U-Net/train_test.py Checkers/checkers_load.py shared/layers.py U-Net/layers.py Checkers/checkers_train.py Checkers/checkers_helpers.py U-Net/training/setup_training.py U-Net/main.py U-Net/model.py SimpleSquareBoardDisplay MoveHeatmapDisplay SimpleBoardDisplay BoardDisplay boardToMat ixToMatPos getDefaultPosition matToBoard makeSubMove makeMove coordToIx oneHotMove GraphConv ConstGraphConv transpose reshape single_mode_dot mixed_mode_dot filter_dot Conv2DSymm dataGenerator makeBatches CheckersGame getData load_games fullyConvModelSymm getModelConv top_3_accuracy stupidGraphModel literallyFuckingStupidFullyConvModel getCheckersAdjMat getModel getModelMaxPool literallyFuckingStupidModel getGoModel top_5_accuracy ConcatSym flipV flipH Drop rot Conv2DSym Lift no_op testGenerator saveResult geneTrainNpy adjustData labelVisualize trainGenerator ConcatSym flipV flipH Drop rot Conv2DSym Lift no_op unet_sym unet zeros zeros ixToMatPos matToBoard makeSubMove zip boardToMat ixToMatPos matToBoard int_shape reshape transpose single_mode_dot is_sparse is_sparse sparse_tensor_dense_matmul is_sparse transpose matmul getDefaultPosition makeSubMove moves copy zip flip oneHotMove enumerate dataGenerator flatten zeros expand_dims load_games nbytes print flatten zeros expand_dims enumerate len zeros conn range compile Lambda Sequential Adam add summary Conv2DSymm Activation range Flatten compile Lambda Sequential Adam add summary Conv2D Activation range Flatten Adam Model addLayer getCheckersAdjMat summary Input range compile compile Sequential Adam add Dense summary Conv2D range Flatten compile Sequential Adam add Dense summary Conv2D range Flatten Sequential SGD add summary Conv2D range compile compile Sequential SGD add Dense summary Conv2D Flatten Dropout compile Sequential add Dense MaxPooling2D summary Conv2D Flatten Dropout zeros range shape ImageDataGenerator adjustData flow_from_directory zip join reshape shape resize imread range join replace glob array append imread adjustData enumerate zeros range shape join imsave enumerate load_weights Model Input compile concatenate Model load_weights Input compile
FGNN-Author/FGNN
336
FLming/CRNN.tf2
['optical character recognition', 'scene text recognition']
['An End-to-End Trainable Neural Network for Image-based Sequence Recognition and Its Application to Scene Text Recognition']
tools/demo.py crnn/train.py crnn/eval.py crnn/models.py crnn/export.py crnn/decoders.py crnn/metrics.py crnn/dataset_factory.py crnn/losses.py SimpleDataset ICDARDataset MJSynthDataset Dataset DatasetBuilder CTCBeamSearchDecoder CTCDecoder CTCGreedyDecoder CTCLoss SequenceAccuracy EditDistance vgg_style build_model read_img_and_resize postprocess vgg_style preprocess Model load_weights Input float64 shape read_file cast int32 resize decode_jpeg
# Convolutional Recurrent Neural Network for End-to-End Text Recognition - TensorFlow 2 ![TensorFlow version](https://img.shields.io/badge/TensorFlow->=2.3-FF6F00?logo=tensorflow) ![Python version](https://img.shields.io/badge/Python->=3.6-3776AB?logo=python) [![Paper](https://img.shields.io/badge/paper-arXiv:1507.05717-B3181B?logo=arXiv)](https://arxiv.org/abs/1507.05717) [![Zhihu](https://img.shields.io/badge/知乎-文本识别网络CRNN—实现简述-blue?logo=zhihu)](https://zhuanlan.zhihu.com/p/122512498) This is a re-implementation of the CRNN network, build by TensorFlow 2. This repository may help you to understand how to build an End-to-End text recognition network easily. Here is the official [repo](https://github.com/bgshih/crnn) implemented by [bgshih](https://github.com/bgshih). ## Abstract This repo aims to build a simple, efficient text recognize network by using the various components of TensorFlow 2. The model build by the Keras API, the data pipeline build by `tf.data`, and training with `model.fit`, so we can use most of the functions provided by TensorFlow 2, such as `Tensorboard`, `Distribution strategy`, `TensorFlow Profiler` etc. ## Installation ```bash
337
FLoosli/CP_GAN
['instance segmentation', 'semantic segmentation']
['Learning to Segment via Cut-and-Paste']
CP_GAN_models.py CP_GAN_train_and_eval.py Input_and_Utils.py paper_gen min_metric super_unet_gen conf_metric paper_disc cut_and_paste double_conv_layer super_paper_gen confidents_loss mask_loss unet_gen_for_gan min_mask_loss train_with_gt Paper_CPGAN get_optim evaluate resize_images get_val_dataset get_cropped_eval get_train_dataset get_image_and_anns cut_and_paste save_image get_cropped_images cut_to_size spatial_2d_padding concatenate Lambda double_conv_layer cut_paste_layer Model summary Input concatenate double_conv_layer Model summary Input Model Input summary Lambda cut_paste_layer Model summary Input Model Input summary get_cropped_eval save_weights save_image annToMask resize_images get_train_dataset exit append range get_image_and_anns super_paper_gen load_weights get_cropped_images compile super_unet_gen time print array fit get_cropped_eval save_image round annToMask paper_gen resize_images squeeze exit unet_gen_for_gan append expand_dims sum predict paper_disc get_image_and_anns astype super_paper_gen load_weights zip get_cropped_images super_unet_gen print get_val_dataset expand_dims pad squeeze get_cropped_images expand_dims squeeze append resize append int pad int print extend stack zip append randint expand_dims range len append int normal getAnnIds reshape shape getImgIds any append getCatIds loadAnns imread range gaussian_filter print squeeze axis imshow savefig COCO COCO
# CP_GAN Implementation of a Cut and Paste GAN in keras. Idea after the following paper by Tal Remez, Jonathan Huang and Matthew Brown https://arxiv.org/pdf/1803.06414.pdf Required Versions for the project to work. Tensorflow, 1.13.1 numpy, 1.16.1 keras, 2.2.4 skimage, 0.14.2 matplotlib, 3.0.3
338
FalongShen/styletransfer
['style transfer']
['Meta Networks for Neural Style Transfer']
python/demo.py
[![License](https://img.shields.io/badge/license-BSD-blue.svg)](LICENSE) Neural Style Transfer ===== This repository is for [Neural Style Transfer Via Meta Networks]. The meta network takes in the style image and generated an image transformation network for neural style transfer. The fast generated model is only `449KB`, which is able to real-time execute on a mobile device. For more details please refer and cite this paper @inproceedings{shen2018style, author = {Falong Shen, Shuicheng Yan and Gang Zeng}, title = {Neural Style Transfer Via Meta Networks}, booktitle = {CVPR2018}, year = {2018} }
339
Fangyh09/Autoregressive-Convolutional-Neural-Networks.Pytorch
['time series']
['Autoregressive Convolutional Neural Networks for Asynchronous Time Series']
model_tf.py main.py model.py SignificanceCNN EncoderCNN OffsetCNN ARKernel
# Autoregressive-Convolutional-Neural-Networks https://arxiv.org/pdf/1703.04122.pdf <!-- $\hat { y } _ { n } = \sum _ { m = 1 } ^ { M } W _ { \cdot , m } \otimes \left( \operatorname { off } \left( x _ { n - m } \right) + x _ { n - m } ^ { I } \right) \otimes \sigma \left( S _ { \cdot , m } \left( \mathbf { x } _ { n } ^ { - M } \right) \right) $ --> <!-- <img src="https://latex.codecogs.com/gif.latex?\hat { y } _ { n } = \sum _ { m = 1 } ^ { M } W _ { \cdot , m } \otimes \left( \operatorname { off } \left( x _ { n - m } \right) + x _ { n - m } ^ { I } \right) \otimes \sigma \left( S _ { \cdot , m } \left( \mathbf { x } _ { n } ^ { - M } \right) \right)" /> --> ## Model ![](model.png) ## Run ```python python main.py
340
FengleiFan/IndependentEvaluation
['medical diagnosis']
['On Interpretability of Artificial Neural Networks: A Survey']
Code For Figure 3/influence/logisticRegressionWithLBFGS.py Code For Figure 16/PIPO-FAN-master/pipo_fan/train_sf_partial.py Code For Figure 5/main.py Code For Figure 16/PIPO-FAN-master/pipo_fan/segment_sf_partial.py Code For Figure 16/model/unet.py Code For Figure 3/influence/nlprocessor.py Code For Figure 16/PIPO-FAN-master/pipo_fan/dataset/dataset_liverCT_2D.py Code For Figure 16/PIPO-FAN-master/pipo_fan/dataset/dataset_muor_2D.py Code For Figure 3/influence/dataset.py Code For Figure 9/main_lime.py Code For Figure 3/influence/imagenet_utils.py Code For Figure 7/main_rule_extraction.py Code For Figure 10/main_neural_ode.py Code For Figure 3/influence/inceptionModel.py Code For Figure 17/main_BioLearning.py Code For Figure 16/model/denseu_net.py Code For Figure 3/influence/image_utils.py Code For Figure 6/Gradient_Sundararajan.py Code For Figure 16/PIPO-FAN-master/pipo_fan/resample.py Code For Figure 3/influence/smooth_hinge.py Code For Figure 16/PIPO-FAN-master/pipo_fan/train_concave0.py Code For Figure 3/influence/hessians.py Code For Figure 3/main.py Code For Figure 3/influence/all_CNN_c.py Code For Figure 6/Gradient_DeepTaylorLRP.py Code For Figure 6/Gradient_Simonyan.py Code For Figure 6/Gradient_Smilkov.py Code For Figure 6/ShowSaliency.py Code For Figure 10/main_adjointMethods.py Code For Figure 3/influence/experiments.py Code For Figure 3/influence/dataset_poisoning.py Code For Figure 3/influence/inception_v3.py Code For Figure 6/lrp.py Code For Figure 3/influence/binaryLogisticRegressionWithLBFGS.py Code For Figure 16/model/concave_dps.py Code For Figure 16/model/resu_net.py Code For Figure 16/model/concave_dps_w.py Code For Figure 3/influence/genericNeuralNet.py Lambda compute_gradients_and_update plot_trajectory NeuralODE euler_update euler_step defun_neural_ode rk2_step zip_map rk4_step outconv ResUNet up one_conv double_conv down res_conv inconv ResUNet attention outconv DenseUNet up_out up_in upblock _DenseLayer _DenseBlock _Transition outconv ResUNet up one_conv double_conv down inconv outconv up double_conv UNet down inconv ResampleBySize_view SimpleITKAsNibabel compute_dice SimpleITKAsNibabelHeader make_affine load_network load_image construct_volume extract_volume Nifti_from_numpy validate LoCeLoss2d CrossEntropyLoss2d compute_length AverageMeter LovaszLoss2d adjust_learning_rate save_checkpoint HybridLoss2d DiceLoss FocalLoss2d dice_similarity train validate visualize_val1 CrossEntropyLoss2d visualize_train1 AverageMeter dice_similarity_u adjust_learning_rate save_checkpoint visualize_val dice_similarity train visualize_train ToTensor Clip RandomVerticalFlip RandomCrop Normalize RandomHorizontalFlip get_composed_transform LiverCTDataset ToTensor Clip RandomVerticalFlip RandomCrop Normalize RandomHorizontalFlip get_composed_transform LiverCTDataset draw_weights load_cifar100 fill_feed_dict_with_some_ex get_test_grad_loss_no_reg_val fill_feed_dict_with_batch fill_feed_dict_with_all_ex update_feed_dict_with_v_placeholder get_inverse_hvp_lissa get_influence_on_test_loss fill_feed_dict_with_all_but_one_ex conv2d All_CNN_C softplus BinaryLogisticRegressionWithLBFGS filter_dataset find_distances DataSet generate_inception_features iterative_attack select_examples_to_attack get_projection_to_box_around_orig_point poison_with_influence_proj_gradient_step hessian_vector_product _AsList hessians preprocess_input decode_predictions plot_flat_bwimage plot_flat_colorgrad plot_flat_colorimage plot_flat_bwgrad InceptionV3 conv2d_bn preprocess_input NLProcessor get_lrp_im fprop_first lrp fprop_conv fprop_pool get_traversed traverse fprop fprop_conv_first visualize bias_variable weight_variable plot xlabel axis ylabel figure legend apply_gradients gradient zip backward euler_update func euler_update func forward_odeint defun forward backward SetOutputSpacing asarray GetSpacing Execute GetSize print ResampleImageFilter sitkLinear tolist WriteImage AffineTransform SetTransform ReadImage SetSize SetReferenceImage SetInterpolator load load ResUNet format print load_state_dict isfile cpu cuda sum concatenate transpose matmul array diag append range ones shape div cuda append zeros sum max range cat len float sum update data time format criterion model backward print AverageMeter size zero_grad dice_similarity float step cuda enumerate len update data time format criterion model print size AverageMeter eval dice_similarity float cuda enumerate len param_groups copyfile join format save view squeeze mean conv2d unsqueeze float sum size view dice_similarity clone shape zeros range join transpose numpy imsave join numpy imsave join transpose numpy imsave join numpy imsave dice_similarity_u cat long clamp clone clone dice_similarity_u long Compose zeros reshape draw axis absolute colorbar imshow clf randint range amax norm time arange concatenate print dot get_inverse_hvp_lissa get_test_grad_loss_no_reg_val zeros run int fill_feed_dict_with_some_ex min run ceil range len array print update_feed_dict_with_v_placeholder fill_feed_dict_with_batch range run zip astype reshape minimum ones_like maximum abs sum print project_fn sign int DataSet num_examples fill_feed_dict_with_batch inception_features reset_batch run xrange Datasets train append len arange savez print copyfile model_name update_train_x_y range len gradients len ndims _AsList enumerate image_dim_ordering get_file load append open reshape reshape abs max reshape reshape _obtain_input_shape get_file concatenate get_source_inputs warn Model conv2d_bn load_weights convert_all_kernels_in_model Input range fprop_first fprop_conv fprop_pool get_traversed fprop append fprop_conv_first get_tensor_by_name append name input transpose matmul transpose maximum matmul minimum maximum conv2d shape conv2d_backprop_input as_list reshape maximum conv2d shape conv2d_backprop_input as_list reshape max_pool_grad max_pool range minimum subplot percentile format norm show print maximum mean imshow title figure max range len truncated_normal constant
This is the code of our independent evaluation for some existing intepretability methods. This independent evaluation is for our review paper "On Interpretability of Artificial Neural Networks: A Survey " (https://arxiv.org/abs/2001.02522). The required environment is TensorFlow 1.XX. You may have to download the necessary packages before you run this code. You can directly run most of code without getting a warning or error except the following one. 1. For PIPO-FAN code, the saved model named "resunet_checkpoint_final.pth" was not uploaded due to its large size. 2. For Figure 17, you need to download cifar-100 dataset first. The complete file can be obtained from Google Drive https://drive.google.com/drive/folders/1836f_sruRBG19BVEp-905CR6JNS5o5cs?usp=sharing
341
FengleiFan/SparseShortcutTopology
['gaussian processes']
['On Exact Computation with an Infinitely Wide Neural Net']
generalizability experiment/TinyImageNet/utils.py generalizability experiment/TinyImageNet/Tiny_EfficientNet.py generalizability experiment/TinyImageNet/se_resnet.py generalizability experiment/TinyImageNet/scalenet.py generalizability experiment/TinyImageNet/Tiny_S3Net.py generalizability experiment/TinyImageNet/RandomWiredModel.py generalizability experiment/TinyImageNet/octconv.py interpretability experiment/full_grad.py generalizability experiment/ImageNet/train.py generalizability experiment/TinyImageNet/ghost_net.py generalizability experiment/cifar100/s3model.py generalizability experiment/TinyImageNet/Tiny_GhostNet.py generalizability experiment/cifar100/train_S3Net.py generalizability experiment/ImageNet/S3Net.py generalizability experiment/TinyImageNet/se_module.py generalizability experiment/cifar100/optim.py generalizability experiment/TinyImageNet/Tiny_ScaleNet.py generalizability experiment/ImageNet/folder2lmdb.py generalizability experiment/TinyImageNet/oct_resnet.py generalizability experiment/TinyImageNet/LambdaNet.py generalizability experiment/TinyImageNet/Tiny_RandomlyWired.py generalizability experiment/ImageNet/validate_10crop.py interpretability experiment/S3Net.py generalizability experiment/TinyImageNet/Tiny_LambdaNet.py generalizability experiment/TinyImageNet/Tiny_OctResNet.py generalizability experiment/TinyImageNet/Tiny_SEResNet.py generalizability experiment/TinyImageNet/SparseShortcutTopologyModels.py interpretability experiment/Plot_segmentation_map.py interpretability experiment/Interpret_fullgradient.py generalizability experiment/TinyImageNet/Tiny_MobileNetV2.py ScheduledOptim _DenseBlock _Transition DenseNet _DenseBLayer train evaluate dataLoader folder2lmdb ImageFolderLMDB raw_reader dumps_pyarrow s3net_new_big2 _load_state_dict s3net_new1 S3Net _S3Block s3net_new2 s3net65 _S3Layer _s3net s3net_new_big _Transition validate AverageMeter accuracy save_checkpoint ProgressMeter adjust_learning_rate main_worker main train validate AverageMeter accuracy save_checkpoint ProgressMeter adjust_learning_rate main_worker main GhostNet GhostBottleneck _make_divisible ghost_net depthwise_conv SELayer GhostModule LambdaResNet350 LambdaConv LambdaResNet152 LambdaResNet420 ResNet LambdaResNet18 ResNet18 check_params Bottleneck LambdaBottleneck LambdaResNet270 ResNet50 get_n_params LambdaResNet200 LambdaResNet50 OctaveConv Conv_BN Conv_BN_ACT oct_resnet101 oct_resnet50 Bottleneck oct_resnet200 oct_resnet26 oct_resnet152 OctResNet CNN depthwise_separable_conv_3x3 Triplet_unit StageBlock Node_OP scalenet101 scalenet50 SABlock scalenet152 ScaleNet SELayer CifarSEResNet se_resnet56 se_resnet34 se_resnet18 CifarSEPreActResNet SEBottleneck se_preactresnet32 se_resnet20 se_resnet152 conv3x3 SEBasicBlock se_resnet50 CifarSEBasicBlock se_preactresnet20 se_resnet32 se_preactresnet56 se_resnet101 _load_state_dict S3Net _S3Block s3net_new _S3Layer _s3net _Transition train_model test_model adjust_learning_rate load_allimages TImgNetDataset parseClasses train_model test_model adjust_learning_rate load_allimages TImgNetDataset parseClasses train_model test_model adjust_learning_rate load_allimages TImgNetDataset parseClasses train_model test_model adjust_learning_rate load_allimages TImgNetDataset parseClasses train_model test_model adjust_learning_rate load_allimages TImgNetDataset parseClasses train_model test_model adjust_learning_rate load_allimages TImgNetDataset parseClasses train_model test_model adjust_learning_rate load_allimages TImgNetDataset parseClasses train_model test_model adjust_learning_rate load_allimages TImgNetDataset parseClasses train_model test_model adjust_learning_rate load_allimages TImgNetDataset parseClasses load_graph save_graph get_graph_info build_graph FullGrad load preprocess deprocess load get_segmentation_for_model compute_dice preprocess deprocess get_dice_for_model get_saliency_map _load_state_dict s3net_large S3Net _S3Block s3net65 _S3Layer _s3net _Transition DataLoader Compose CIFAR100 eval densenet criterion tqdm densenet criterion backward zero_grad tqdm step begin commit sync isdir print close put ImageFolder DataLoader encode expanduser dumps_pyarrow enumerate open list group match load_state_dict load_state_dict_from_url keys compile _load_state_dict S3Net seed int world_size spawn multiprocessing_distributed warn device_count manual_seed main_worker parse_args gpu workers validate batch_size multiprocessing_distributed SGD pretrained DataParallel DistributedDataParallel ImageFolderLMDB DataLoader adjust_learning_rate save_checkpoint features cuda max open set_device DistributedSampler rank load_state_dict to sum range val format init_process_group get_model_complexity_info Compose close start_epoch distributed lr resume Normalize load int s3net_new_big2 evaluate print set_epoch parameters isfile zeros train epochs gpu update time model display size AverageMeter accuracy ProgressMeter item cuda gpu enumerate len len eval AverageMeter ProgressMeter copyfile save param_groups lr s3net_new1 int max size list parameters LambdaResNet350 LambdaResNet420 LambdaResNet152 print ResNet18 get_n_params ResNet50 LambdaResNet270 LambdaResNet200 LambdaResNet50 OctResNet OctResNet OctResNet OctResNet OctResNet load read loads load_state_dict ScaleNet load read loads load_state_dict ScaleNet load read loads load_state_dict ScaleNet ResNet AdaptiveAvgPool2d ResNet AdaptiveAvgPool2d ResNet AdaptiveAvgPool2d load_state_dict load_state_dict_from_url ResNet AdaptiveAvgPool2d ResNet AdaptiveAvgPool2d CifarSEResNet CifarSEResNet CifarSEResNet CifarSEPreActResNet CifarSEPreActResNet CifarSEPreActResNet append len range split join sorted exit has_file_allowed_extension append walk print zero_grad adjust_learning_rate save device str to double range state_dict format size eval item flush enumerate deepcopy time print makedirs train step len time format print size zero_grad eval item device to double enumerate flush len list number_of_nodes sort neighbors Node append range write_yaml format print SGD s3net_new parameters DataParallel lr resume load_state_dict isfile sum CrossEntropyLoss Compose Compose percentile sum checkCompleteness numpy FullGrad saliency load vgg19 arange compute_dice densenet169 print resnet50 parameters shape zeros get_saliency_map squeezenet1_0 load percentile vgg19 densenet169 resnet50 parameters get_saliency_map squeezenet1_0
<!-- ABOUT THE PROJECT --> # SparseShortcutTopology This repository shows you the code regarding the paper [On a Sparse Shortcut Topology of Artificial Neural Networks](https://arxiv.org/abs/1811.09003), whose main contribution is to present a promising sparse shortcut topology for deep learning. Besides the theoretical analyses, we conduct comprehensive experiments including prediction and classification experiments to show the superiority of the proposed topology. <p align="center"> <img width="800" src="https://github.com/FengleiFan/SparseShortcutTopology/blob/main/topology.png" alt="Material Bread logo"> </p> <p align="center"> Figure 1. The proposed topology </p> ### Struture
342
FilipMiscevic/random_walk
['language acquisition']
['Predicting and Explaining Human Semantic Search in a Cognitive Model']
gen_graphs.py precision_recall.py starter.py adj_rand.py generateCorpus.py final_analyze_methods.py create_learner_af.py frequency.py omega.py plotting.py animals.py generate_animal_graphs.py create_learner.py swc.py clustering.py rw.py test_avg_adj_rand test_get_cluster_cat_labels fit_hdbscan get_hdbscan_fscore_unweighted get_hdbscan_adj_rand_idx get_hdbscan_clusters get_hdbscan_fscore get_hdbscan_fscore_unweighted_all get_hdbscan_fscore_all count_occurences compare_features adjacency_names comparative_adjacency_csv plot_scatter plot_curve plot_scatter2 observed_walk firstHits get_walk_length genX has_irt_pattern random_walk_am random_walk_possible a_len annotate_curve_IRTs random_walk_trial path_from_walk create_irt_graph random_walk write_summary plot_irt mean gen_random_walk update_position irt_self_longterm_avg get_fluid_cat_switches test_avg_adj_rand test_get_cluster_cat_labels fit_hdbscan get_hdbscan_fscore_unweighted get_hdbscan_adj_rand_idx get_hdbscan_clusters get_hdbscan_fscore get_hdbscan_fscore_unweighted_all get_hdbscan_fscore_all count_occurences compare_features adjacency_names comparative_adjacency_csv plot_scatter plot_curve plot_scatter2 observed_walk firstHits get_walk_length genX has_irt_pattern random_walk_am random_walk_possible a_len annotate_curve_IRTs random_walk_trial path_from_walk create_irt_graph random_walk write_summary plot_irt mean gen_random_walk update_position irt_self_longterm_avg get_fluid_cat_switches append index get_cluster shortest_paths_dijkstra HDBSCAN array fit_predict append pop labels_ enumerate fit_hdbscan precision_recall get_hdbscan_clusters fit_hdbscan precision_recall_with_singletons get_hdbscan_clusters fit_hdbscan get_hdbscan_clusters test_get_cluster_cat_labels sort tuple ecount target source range griddata set_xlabel map set_ylabel savefig figure set_zlabel gca plot_surface set_title set_xlabel add_subplot scatter set_ylabel figure set_zlabel set_title set_xlabel add_subplot scatter set_ylabel figure set_zlabel get_cluster append join range gen_random_walk lower genX get_fluid_cat_switches get_cluster remove divide set choice binomial append sum range len remove neighbors index set choice binomial append vcount sum range append path_from_walk observed_walk index append path_from_walk append list list observed_walk random_walk get enumerate append intersection get sem index append float enumerate len get sem index mean append float enumerate len mean items sem str errorbar set_title plot set_xticklabels set_xlabel add_axes irt_self_longterm_avg bar set_xticks set_ylabel figure mean savefig array range join zip mpl_connect set_xlim get_proj read_png add_artist OffsetImage AnnotationBbox append proj_transform draggable set_ylim draw get_proj renderer proj_transform update_positions
# random_walk Scripts to compare random walks on semantic networks generated by cognitive models of word learning. Publications resulting from this work include a poster at CogSci 2016 (https://mindmodeling.org/cogsci2016/papers/0234/index.html) and an oral presentation at CMCL 2018 (https://aclweb.org/anthology/papers/W/W18/W18-0105/).
343
FireBERT-author/FireBERT
['adversarial attack']
['FireBERT: Hardening BERT-based classifiers against adversarial attack']
firebert_fct.py firebert_fve.py switch.py firebert_base.py randomsearchIMDB.py bert_base_model.py processors.py firebert_fse.py randomsearchMNLI.py test LightningBertForSequenceClassification elapsed_time FireBERT_base test_FireBERT_base test_FireBERT_FCT FireBERT_FCT test_iter_FireBERT_FSE FireBERT_FSE test_param_FireBERT_FSE FireBERT_FVE elapsed_time test_FireBERT_FVE _read_corpus test MnliProcessor ImdbProcessor _clean_str elapsed_time load_examples elapsed_time load_examples get_stopwords load_cos_sim debug load_vocab USE load_cos_nn test_SWITCH clean_str get_pos SWITCH set_test_dataset tqdm_metrics get_processor print Trainer LightningBertForSequenceClassification load_and_cache_examples elapsed_time set_test_dataset tqdm_metrics print close MnliProcessor Trainer test ImdbProcessor load_and_cache_examples FireBERT_base open print time round set_test_dataset tqdm_metrics get_processor print FireBERT_FCT MnliProcessor Trainer test load_and_cache_examples str FireBERT_FSE set_test_dataset tqdm_metrics get_processor print write close test Trainer MnliProcessor ImdbProcessor load_and_cache_examples open get_processor MnliProcessor Trainer ImdbProcessor getrandbits open str enable close test FireBERT_FSE set_test_dataset tqdm_metrics collect print write randint bool load_and_cache_examples elapsed_time MnliProcessor Trainer ImdbProcessor FireBERT_FVE open str update_hparams tokenizer set_tokenizer range close test choice items set_test_dataset tqdm_metrics print load_and_cache_examples sub shuffle list range len set_train_dataset set_val_dataset ModelCheckpoint TensorBoardLogger fit load tensor TensorDataset load load_cos_sim collect print load_vocab save append array enumerate print sub load T norm format print dot save array pos_tag zip from_pretrained print text_to_list clean_str generate_candidates SWITCH make_single_example_with_features FireBERT_FSE generate_candidates_from_example print get_important_indices_from_example update_hparams top_n
# FireBERT Hardening BERT classifiers against adversarial attack Gunnar Mein, UC Berkeley MIDS Program ([email protected])\ Kevin Hartman, UC Berkeley MIDS Program ([email protected])\ Andrew Morris, UC Berkeley MIDS Program ([email protected]) With many thanks to our advisors: Mike Tamir, Daniel Cer and Mark Butler for their guidance on this research. And to our significant others as the three of us hunkered down over the three month project. *Note: This repo used to be anonymous while the paper was in blind review. ## Paper Please read our paper: [FireBERT 1.0](https://github.com/FireBERT-author/FireBERT/blob/master/FireBERT.pdf). When citing our work, please include a link to this repository.
344
FishSmile-syx/ITAE-Pytorch-Anomaly_Detection
['anomaly detection']
['Attribute Restoration Framework for Anomaly Detection']
train.py config/config_cifar10_bn_relu.py config/config_mnist.py data_tools/data.py data_tools/utils.py model/ITAE.py main.py config/config_cifar10.py parse_args test_epoch train_epoch get_cifar_anomaly_dataset collate_ITAE_eval get_mnist2_anomaly_dataset load_data collate_ITAE get_mnist_anomaly_dataset evaluate auprc AverageMeter roc L1_measure log_txt Up Model DoubleConv Down add_argument ArgumentParser log_train model zero_grad save cuda strftime work_dir update format size avg item enumerate join time criterion backward print AverageMeter now mkdir_or_exist train step len best_auroc log_test eval transformations ToPILImage ToTensor rotate unloader mean stack IntTensor append loader range len ToPILImage ToTensor len rotate unloader mean stack IntTensor append loader range enumerate MNIST normal_class int format get_cifar_anomaly_dataset Compose get_mnist2_anomaly_dataset CIFAR10 dataset get_mnist_anomaly_dataset seed int arange concatenate shuffle copy array len seed int arange clone shuffle from_numpy cat len from_numpy clone cat manual_seed join plot xlabel roc_curve close ylabel dict ylim title savefig figure brentq legend cpu xlim auc average_precision_score size view
# ITAE-Pytorch-Anomaly_Detection An **unofficial reproduced** implementation of 'Inverse-Transform AutoEncoder for Anomaly Detection', paper see https://arxiv.org/abs/1911.10676 ## requirements * python3 * pytorch-1.0 or higher version * mmcv * torchvision * tqdm ## how to use * run *python main.py config/config_mnist* to train and test on mnist dataset
345
FlashTek/mask-rcnn-edge-agreement-loss
['instance segmentation', 'edge detection', 'semantic segmentation']
['Faster Training of Mask R-CNN by Focusing on Instance Boundaries']
samples/cityscapes/cityscapes.py samples/cityscapes/cityscapesscripts/helpers/labels_cityPersons.py samples/cityscapes/cityscapesscripts/evaluation/instances2dict.py samples/cityscapes/cityscapesscripts/viewer/cityscapesViewer.py samples/cityscapes/cityscapesscripts/preparation/createTrainIdInstanceImgs.py samples/cityscapes/cityscapesscripts/annotation/cityscapesLabelTool.py mrcnn/visualize.py samples/cityscapes/cityscapesscripts/helpers/labels.py samples/cityscapes/cityscapesscripts/preparation/json2instanceImg.py pycocotools/mask.py pycocotools/coco.py samples/cityscapes/cityscapesscripts/helpers/annotation.py mrcnn/config.py samples/cityscapes/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py samples/cityscapes/cityscapesscripts/preparation/json2labelImg.py samples/cityscapes/cityscapesscripts/evaluation/instance.py pycocotools/__init__.py samples/coco/visualize_coco.py samples/cityscapes/cityscapesscripts/preparation/createTrainIdLabelImgs.py mrcnn/parallel_model.py pycocotools/cocoeval.py mrcnn/utils.py samples/cityscapes/cityscapesscripts/helpers/csHelpers.py mrcnn/model.py samples/coco/coco.py samples/cityscapes/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py Config fpn_classifier_graph MaskRCNN mrcnn_mask_edge_loss_graph compose_image_meta rpn_bbox_loss_graph norm_boxes_graph compute_backbone_shapes rpn_class_loss_graph log DetectionTargetLayer trim_zeros_graph log2_graph parse_image_meta parse_image_meta_graph data_generator rpn_graph identity_block BatchNorm build_fpn_mask_graph load_image_gt build_rpn_targets resnet_graph unmold_image PyramidROIAlign apply_box_deltas_graph denorm_boxes_graph generate_random_rois detection_targets_graph build_detection_targets overlaps_graph mrcnn_bbox_loss_graph conv_block batch_pack_graph ProposalLayer smooth_l1_loss clip_boxes_graph mrcnn_class_loss_graph mrcnn_mask_loss_graph mold_image build_rpn_model DetectionLayer refine_detections_graph ParallelModel build_model compute_ap norm_boxes compute_recall apply_box_deltas compute_overlaps compute_iou resize_image box_refinement_graph generate_pyramid_anchors mold_mask generate_anchors compute_ap_range compute_overlaps_masks denorm_boxes unmold_mask download_trained_weights non_max_suppression minimize_mask resize_mask extract_bboxes trim_zeros compute_matches batch_slice expand_mask box_refinement Dataset display_differences draw_box display_images draw_rois draw_boxes apply_mask random_colors display_instances display_table display_weight_stats plot_overlaps plot_precision_recall display_top_masks COCO Params COCOeval encode decode area toBbox CityscapesDataset CityscapesConfig parse_eval_line COCOWrapper build_coco_results evaluate_coco write_eval_to_csv CorrectionBox configuration CityscapesLabelTool main enum evaluateMatches readPredInfo CArgs computeAverages prepareJSONDataForResults assignGt2Preds getGtInstances setInstanceLabels filterGtInstances readGTImage getPrediction evaluateImgLists matchGtWithPreds main printResults getPrediction getPrior evaluatePair printClassScores getInstanceIouScoreForCategory getIouScoreForLabel generateInstanceStats evaluateImgLists printCategoryScores main CArgs generateMatrix getMatrixFieldValue printConfMatrix getIouScoreForCategory writeJSONFile createResultDict getScoreAverage getInstanceIouScoreForLabel Instance main instances2dict CsPoly CsObjectType CsBbox CsObject Annotation getCoreImageFileName printError getCsFileInfo getColorEntry getDirectory ensurePath writeDict2JSON colors assureSingleInstanceName main main printHelp createInstanceImage printError main json2instanceImg printHelp printError json2labelImg main createLabelImage main CityscapesViewer CocoDataset parse_eval_line build_coco_results evaluate_coco CocoConfig write_eval_to_csv compare_instance_masks load_model trim2 trim visualize_feature_map main fig2data get_ax visualize_masks prepare_coco array ljust print BACKBONE callable str str conv_block identity_block range stack minimum concat maximum set_shape split minimum reshape maximum tile expand_dims split concat reduce_max boolean_mask MASK_SHAPE crop_and_resize gather box_refinement_graph round trim_zeros_graph ROI_POSITIVE_RATIO transpose squeeze pad cast expand_dims range USE_MINI_MASK overlaps_graph cond int TRAIN_ROIS_PER_IMAGE float32 greater maximum int32 split minimum apply_box_deltas_graph reshape clip_boxes_graph concat gather map_fn DETECTION_MAX_INSTANCES stack gather_nd DETECTION_MIN_CONFIDENCE pad set_intersection expand_dims argmax BBOX_STD_DEV Input rpn_graph int_shape int range log less abs cast switch constant not_equal squeeze where mean sparse_categorical_crossentropy gather_nd cast int32 equal IMAGES_PER_GPU batch_pack_graph switch constant abs squeeze where mean gather_nd cast int32 less sum equal reduce_sum sparse_softmax_cross_entropy_with_logits cast gather argmax switch constant reshape smooth_l1_loss mean int64 stack cast gather_nd gather switch constant reshape transpose mean shape int64 stack cast gather_nd gather binary_crossentropy switch lp_loss constant exp reshape concat squeeze mean conv2d expand_dims binary_crossentropy uint8 minimize_mask compose_image_meta extract_bboxes load_mask zeros astype randint resize_image shape warning resize_mask MINI_MASK_SHAPE load_image bool fliplr augment_image to_deterministic int ROI_POSITIVE_RATIO concatenate resize astype TRAIN_ROIS_PER_IMAGE compute_iou choice MASK_SHAPE int32 box_refinement USE_MINI_MASK zeros argmax range sum zip ones compute_overlaps choice RPN_TRAIN_ANCHORS_PER_IMAGE zeros argmax amax len int sort min hstack randint zeros max range split image_ids arange IMAGE_SHAPE compute_backbone_shapes RPN_ANCHOR_RATIOS generate_pyramid_anchors BACKBONE_STRIDES MAX_GT_INSTANCES shape expand_dims load_image_gt build_rpn_targets astype shuffle copy choice generate_random_rois build_detection_targets RPN_ANCHOR_SCALES mold_image RPN_ANCHOR_STRIDE float32 extend zeros len list array boolean_mask reduce_sum cast bool abs append range constant concat float32 cast split constant concat float32 cast split reset_default_graph Input zeros array range minimum maximum zeros range compute_iou T astype float32 dot sum astype delete float32 compute_iou append astype float32 stack cast float32 log astype float32 log dtype min pad resize randint max pad astype resize zeros bool range astype resize zeros bool range zeros bool astype resize arange concatenate reshape flatten sqrt meshgrid array append generate_anchors range len ones trim_zeros compute_overlaps_masks range len arange concatenate cumsum compute_matches astype float32 maximum sum range len compute_ap format print mean append compute_overlaps set argmax max len list graph_fn zip append range len print array array show subplot uint8 axis astype imshow title figure zip len shuffle list map range where subplots axis apply_mask imshow find_contours range set_xlim astype tight_layout copy zeros uint8 Polygon print text add_patch Rectangle randint fliplr set_ylim compute_matches display_instances concatenate len subplots arange rand axis Line2D unmold_mask shape title apply_mask imshow format set_xlim astype copy enumerate add_line print text add_patch Rectangle int32 set_ylim len format arange display_images unique append sum range format subplots set_title plot set_xlim set_ylim format arange product yticks text xlabel tight_layout ylabel imshow figure xticks max range len subplots axis Line2D random_colors set_title apply_mask imshow find_contours range set_xlim astype copy zeros add_line uint8 Polygon text add_patch Rectangle int32 randint fliplr set_ylim HTML display get_trainable_layers name weights display_table append get_weights enumerate shape print around range append join iouType format dirname summarize load_mask loadRes load_image format astype accumulate COCOWrapper enumerate time uint8 evaluate COCOeval print extend tqdm build_coco_results write_eval_to_csv len enum exec_ argv exit CityscapesLabelTool QApplication join predictionWalk format printError getCsFileInfo predictionPath realpath filter city sequenceNb dirname abspath append frameNb walk append name format printError gtInstancesFile print instances2dict isfile writeDict2JSON readPredInfo format print assignGt2Preds filterGtInstances readGTImage abspath zip flush len count_nonzero deepcopy int instLabels name reshape convert logical_and id copy shape ignoreInEval append array enumerate open instLabels cumsum max printError ones append minRegionSizes copy unique zip float empty enumerate distanceConfs convolve distanceThs min overlaps argsort dot zeros len instLabels distanceAvailable distanceThs overlaps where nanmean average argmax isclose enumerate format instLabels distanceAvailable print enumerate tolist instLabels evaluateMatches computeAverages prepareJSONDataForResults exportFile getGtInstances JSONOutput setInstanceLabels matchGtWithPreds dirname ensurePath writeDict2JSON printResults format groundTruthSearch printError gtInstancesFile glob print getPrediction evaluateImgLists append append evalLabels max id append id sum normalized sum longlong ignoreInEval name sum longlong ignoreInEval sum longlong sum longlong evalLabels tolist getPrior getScoreAverage ensurePath writeDict2JSON exportFile dirname format nocol getMatrixFieldValue print name evalLabels getPrior getColorEntry range len str format nocol print name evalLabels getColorEntry quiet ignoreInEval bold format nocol all print getColorEntry quiet bold printError name evalLabels len printClassScores sum range format nocol getInstanceIouScoreForCategory getIouScoreForLabel generateInstanceStats getColorEntry printCategoryScores keys flush generateMatrix printConfMatrix print getIouScoreForCategory writeJSONFile createResultDict getScoreAverage getInstanceIouScoreForLabel open count_nonzero evalPixelAccuracy printError evalLabels logical_and evalInstLevelScore shape ignoreInEval format replace astype unique zip float cEvaluatePair int reshape category int32 array format toDict print Instance len unique abspath append array flush open instances2dict print str exit format basename printError CsFile split len getCsFileInfo dirname makedirs join replace sort realpath dirname json2instanceImg flush len json2labelImg print basename format format printHelp hasInstances format trainId Draw deleted printError print new id objects polygon label createInstanceImage Annotation fromJsonFile save printHelp getopt format trainId Draw deleted printError print new id color objects polygon label createLabelImage Annotation fromJsonFile save CityscapesViewer subplots getpixel size new add difference getbbox mode array get_width_height tostring_argb fromstring draw tight_layout roll display CocoDataset prepare load_coco CocoConfig InferenceConfig print load_weights load_image_gt unmold_image join format print display_images transpose imshow savefig figure run_graph load_image_gt join format class_names print trim image_reference detect display_instances savefig save get_ax log open load_image_gt int join format class_names zeros_like concatenate print compute_matches astype image_reference display_instances savefig abs range len coco_dir compare_instance_masks image_ids load_model model_checkpoint output_dir visualize_feature_map visualize_masks prepare_coco
# Faster Training of Mask R-CNN by Focusing on Instance Boundaries ![Instance Mask Visualizations](https://github.com/FlashTek/mask-rcnn-edge-agreement-loss/raw/master/instance_mask_visualizations.jpg) This is an implementation of the improved training scheme [Faster Training of Mask R-CNN by Focusing on Instance Boundaries](https://arxiv.org/abs/1809.07069) on Python 3, Keras, and TensorFlow. The code is an extension of the existing implementation of [Mask R-CNN by Matterport](https://github.com/matterport/Mask_RCNN). It can be seen as a fork of the original repository based on [commit cbff80f](https://github.com/matterport/Mask_RCNN/commit/cbff80f3e3f653a9eeee43d0d383a0385aba546b). The model generates bounding boxes and segmentation masks for each instance of an object in the image. It's based on Feature Pyramid Network (FPN) and a ResNet101 backbone. The training speed has been increased by introducing an auxiliary objective. Adding the new auxillary task can be done using this simple network head: <p align="center"> <img src="https://github.com/FlashTek/mask-rcnn-edge-agreement-loss/raw/master/edge_agreement_head_small.png" alt="Architecture" height="250"> </p> The choice of the edge detection filter influences the convergence speed up; as written in the paper, the best results were obtained using the `Sobel` filter. <p align="center"> <img src="https://github.com/FlashTek/mask-rcnn-edge-agreement-loss/raw/master/mask_loss.jpg" alt="Loss curves" height="350">
346
FlorianKrey/DNC
['data augmentation']
['Discriminative Neural Clustering for Speaker Diarisation']
datapreperation/gen_augment_data.py scoring/gen_rttm.py datapreperation/utils.py scoring/split_rttm.py scoring/score_rttm.py scoring/utils.py scoring/run_spectralclustering.py utils.py datapreperation/gen_dvecdict.py set_script_step_id print_debug print_error get_dir_name get_abs_path change_dir cache_command check_exists get_script_step_id get_file_name_base make_dirs check_is_file get_rel_path check_output_file write_one_text_file check_output_dir Abspath get_env_var_value set_env_var_value print_message check_is_dir get_base_name join_paths setup prepare_data get_label_from_spk mlf_to_dict get_maxlen augment_meetings augment_single_meeting get_startidx main filter_encompassed_segments l2_normalise_matrix setup prepare_data mlf_to_dict concatenate_list_in_dict generate_dvecdict main get_dvector_dict_from_meetings split_segments filter_encompassed_segments two_level_to_single_level_dict set_script_step_id print_debug print_error get_dir_name get_abs_path change_dir cache_command check_exists get_script_step_id get_file_name_base make_dirs check_is_file get_rel_path check_output_file write_one_text_file check_output_dir Abspath get_env_var_value set_env_var_value print_message check_is_dir get_base_name join_paths filter_encompassed_segments setup read_json match_meetings load_scp main write_rttm setup evaluate_spectralclustering write_results_dict permutation_invariant_seqmatch main do_spectral_clustering main segment_rttm main get_time_boundary load_rttm set_script_step_id print_debug print_error get_dir_name get_abs_path change_dir cache_command check_exists get_script_step_id get_file_name_base make_dirs check_is_file get_rel_path check_output_file write_one_text_file check_output_dir Abspath get_env_var_value set_env_var_value print_message check_is_dir get_base_name join_paths error exit info debug set_env_var_value get_file_name_base print_error make_dirs isinstance print_error check_exists isinstance print_error get_dir_name check_output_dir isfile join copy get_file_name_base write_one_text_file get_rel_path join_paths print_message chdir print_error print_debug check_exists isinstance print_error makedirs check_is_file append join_paths split isdir str print_debug write close check_output_file open inscps evensplit inmlfs cache_command argv print_error randomspeaker change_dir add_argument outdir ArgumentParser check_output_dir parse_args get_abs_path enumerate remove sort isdisjoint append enumerate evensplit int min uniform ceil maxlen filtEncomp get_abs_path values sorted list load_mat OrderedDict get_label_from_spk append next get_startidx range concatenate randomspeaker set mean get_maxlen sqrt choice item augment keys join norm isinstance sort array filter_encompassed_segments len load Process sorted list join items update tqdm dict dvectordict OrderedDict Manager start append keys enumerate len randint len lstrip rstrip len inscps int join items inmlfs mlf_to_dict augment_meetings mkdir abspath zip append prepare_data setup append items load_mat filter_encompassed_segments items segLenConstraint array_split extend floor append max append items extend isinstance concatenate keys isinstance print_error items l2_normalise_matrix concatenate get_dvector_dict_from_meetings split_segments array savez generate_dvecdict js_dir output_dir sorted join range input_scp js_num read_json load_scp output_dir sorted append popleft sum js_name js_dir deque rttm_name min_len enumerate join filter_encompassed_segments print write_rttm match_meetings dirname output_json SpectralClusterer permutations arange set append sum max len items list print load_mat tqdm permutation_invariant_seqmatch do_spectral_clustering split join items output_json write_results_dict evaluate_spectralclustering add_argument call realpath dirname ArgumentParser parse_args change_dir items min append max split get_time_boundary load_rttm input_rttm submeeting_rttm output_rttm segment_rttm
# Discriminative Neural Clustering (DNC) for Speaker Diarisation This repository is the code used in our paper: >**[Discriminative Neural Clustering for Speaker Diarisation](https://arxiv.org/abs/1910.09703)** > >*Qiujia Li\*, Florian Kreyssig\*, Chao Zhang, Phil Woodland* (\* indicates equal contribution) ## Overview We propose to use encoder-decoder models for supervised clustering. This repository contains: * a submodule for spectral clustering, a modified version of [this repository by Google](https://github.com/wq2012/SpectralCluster) * a submodule for DNC using Transformers, implemented in [ESPnet](https://github.com/espnet/espnet) * data processing procedures for data augmentation & curriculum learning in our paper
347
Forethought-Technologies/ieee-dsmp-2018-paper
['text classification']
['Automated labeling of bugs and tickets using attention-based mechanisms in recurrent neural networks']
ind_rnn.py attention_layer.py naive_bayes.py convert_chromium.py text_processing.py fasttext_classify.py grid_search.py scrape_bugzilla.py HA.py tfidf_classify.py utils.py our.py rnn_based.py dot_product AttentionWithContext fasttext_classify grid_search map_sentence map_doc make_model read_chrome build_sentences read_linux remove_stopwords_from_sent report clean_str IndRNNCell IndRNN naive_bayes_classify map_sentence map_doc make_model read_chrome build_sentences read_linux remove_stopwords_from_sent report clean_str bidir_rnn_classify scrape_bugzilla scrape_page create_csv remove_short_words remove_long_words remove_stopwords stemm_text remove_rare_words strip_punctuations cast_to_lowercase remove_linux_garbage tfidf_classify load_chromium_bug_data load_linux_bug_data merge_title_and_message str print shuffle to_csv map test split append f1_score train_supervised len str list format product print dict zip keys method remove_linux_garbage read_csv merge_title_and_message map read_csv merge_title_and_message sub translate append append list map sent_tokenize empty enumerate empty map_sentence enumerate print f1_score accuracy_score argmax predict f1_score str print accuracy set split append NBC max range len to_categorical Input argmax str list len RMSprop Model append range predict vocab shuffle set empty compile enumerate print index history Word2Vec zeros fit str read join text strip BeautifulSoup split str create_csv print append scrape_page range len punctuation map maketrans LancasterStemmer PorterStemmer SnowballStemmer map map map map map punctuation words map set maketrans proc_word iterrows map MLPClassifier str print fit SVC shuffle TfidfVectorizer transform f1_score sum fit_transform predict len drop remove_linux_garbage strip_punctuations read_csv merge_title_and_message strip_punctuations read_csv merge_title_and_message
# "Automated labeling of bugs and tickets using attention-based mechanisms in recurrent neural networks" paper for DSMP'2018: 2018 IEEE Second International Conference on Data Stream Mining &amp; Processing This repository contains the code and datasets used in the paper. Following, is the list of contents. Datasets: * linux_bugs_usage_ready.csv - dataset of Arch linux bugs. * chromium.csv - dataset of Chromium bugs. Both datasets are tab-separated with first row being a header.
348
Francis515/SYNTHIA-PANO
['semantic segmentation']
['Semantic Segmentation of Panoramic Images Using a Synthetic Dataset']
pano_cylindrcal_projection.py cvt2color.py main.py pano_synthia.py cvt2color load_label cylind_prj synthia_prj pano_stitch zeros uint8 astype range imread int zeros_like fy fx range cylind_prj zeros uint8 astype
# SYNTHIA-PANO ## Description SYNTHIA-PANO is the panoramic version of SYNTHIA dataset. Five sequences are included: Seqs02-summer, Seqs02-fall, Seqs04-summer, Seqs04-fall and Seqs05-summer. Panomaramic images with fine annotation for semantic segmentation. ![Label](pics/label.png) ![Examples](pics/dataset5.png) Video: [![Video](https://img.youtube.com/vi/--Mhldpd6nI/0.jpg)](https://youtu.be/--Mhldpd6nI) ## Paper [SYNTHIA](https://ieeexplore.ieee.org/document/7780721)
349
FrankCAN/GAPNet
['graph attention']
['GAPNet: Graph Attention based Point Neural Network for Exploiting Local Feature of Point Cloud']
train.py part_seg/train_multi_gpu.py provider.py utils/data_prep_util.py utils/eulerangles.py utils/tf_util.py models/network.py utils/plyfile.py utils/pc_util.py evaluate.py part_seg/part_seg_model.py part_seg/test.py models/gat_layers.py models/transform_nets.py eval_one_epoch log_string evaluate rotate_point_cloud load_h5_data_label_seg loadDataFile getDataFiles load_h5 rotate_point_cloud_by_angle shuffle_data jitter_point_cloud rotate_perturbation_point_cloud shift_point_cloud random_scale_point_cloud get_learning_rate eval_one_epoch log_string train_one_epoch train get_bn_decay attn_feature get_model get_loss placeholder_inputs input_transform_net get_model get_loss pc_normalize convert_label_to_one_hot printout load_pts_seg_files output_color_point_cloud_red_blue placeholder_inputs pc_augment_to_point_num predict output_color_point_cloud average_gradients train convert_label_to_one_hot printout load_ply_normal pad_arr_rows batch_mkdir save_h5_data_label_normal load_h5_data_label_normal load_h5_data_label_seg get_sampling_command load_h5 get_category_names save_h5 load_ply_data get_obj_filenames export_ply quat2euler euler2quat mat2euler angle_axis2euler euler2angle_axis euler2mat write_ply pyplot_draw_point_cloud draw_point_cloud read_ply point_cloud_three_views_demo point_cloud_to_volume pyplot_draw_volume point_cloud_to_volume_batch point_cloud_three_views volume_to_point_cloud _open_stream _lookup_type PlyData _split_line PlyProperty PlyParseError make2d PlyListProperty PlyElement batch_norm_template get_neighbors dropout pairwise_distance fully_connected batch_norm_for_conv2d batch_norm_for_fc knn conv2d conv2d_nobias batch_norm_dist_template max_pool2d _variable_with_weight_decay _variable_on_cpu print write flush restore eval_one_epoch log_string ConfigProto Session pi rotate_point_cloud_by_angle argmax run open str squeeze shape sum imsave range log_string mean float zeros enumerate join print write loadDataFile point_cloud_three_views array len arange shuffle len reshape cos pi dot shape uniform sin zeros array range reshape cos dot shape sin zeros array range randn reshape dot shape zeros range array clip shape clip randn shape uniform range shape uniform range File File exponential_decay maximum minimum exponential_decay arange jitter_point_cloud random_scale_point_cloud argmax run str squeeze shift_point_cloud sum range log_string shuffle rotate_perturbation_point_cloud float rotate_point_cloud loadDataFile shuffle_data add_summary len append value leaky_relu squeeze transpose matmul activation conv2d_nobias conv2d softmax tile get_neighbors expand_dims bias_add int32 float32 placeholder value attn_feature dropout pairwise_distance reshape concat reduce_max fully_connected matmul knn conv2d append range reduce_mean one_hot softmax_cross_entropy value reshape concat fully_connected conv2d max_pool2d tile max_pool2d argmax sparse_softmax_cross_entropy_with_logits print write mean sqrt sum max array concatenate zeros range ConfigProto Saver concat reduce_mean zip append expand_dims zeros PlyData write range join print join len join mkdir File close create_dataset File close create_dataset File read array read array append array cos sin eps asarray atan2 sqrt flat cos sin angle_axis2mat squeeze point_cloud_to_volume flatten append expand_dims range zeros float astype append vstack array range data read array write array describe int exp abs transpose min mean sqrt argsort round argwhere zeros sum max range euler2mat concatenate draw_point_cloud fromarray uint8 read_ply save point_cloud_three_views set_xlabel add_subplot scatter set_ylabel figure set_zlabel pyplot_draw_point_cloud volume_to_point_cloud append split dtype len property hasattr property property property multiply add_to_collection xavier_initializer _variable_on_cpu l2_loss truncated_normal_initializer squeeze transpose square reduce_sum matmul expand_dims top_k get_shape value reshape squeeze gather expand_dims range
# GAPNet:Graph Attention based Point Neural Network for Exploiting Local Feature of Point Cloud created by Can Chen, Luca Zanotti Fragonara, Antonios Tsourdos from Cranfield University [[Paper]](https://arxiv.org/abs/1905.08705) # Overview We propose a graph attention based point neural network, named GAPNet, to learn shape representations for point cloud. Experiments show state-of-the-art performance in shape classification and semantic part segmentation tasks. In this repository, we release code for training a GAPNet classification network on ModelNet40 dataset and a part segmentation network on ShapeNet part dataset. # Requirement * [TensorFlow](https://www.tensorflow.org/) # Point Cloud Classification * Run the training script:
350
Fraunhofer-AISEC/A3
['semi supervised anomaly detection', 'anomaly detection']
['$\\text{A}^3$: Activation Anomaly Analysis']
libs/DataHandler.py do_mnist_manually.py libs/architecture.py evaluate_results.py parameter_mnist.py utils.py do_nsl_kdd.py libs/NeuralNet.py do_mnist.py libs/A3.py libs/ExperimentWrapper.py libs/Metrics.py libs/DataTypes.py do_mnist_emnist.py do_creditcard.py evaluate_models.py libs/regulariser.py keras_mnist.py do_ids.py roc_to_threshold roc_to_pandas list_subdirs open_and_combine A3 conv_vae alarm_net RandomNoise VariationalAutoEncoder conv_ae Decoder Encoder Sampling dense_ae MNIST NSL_KDD IDS DataLabels EMNIST CreditCard AutoencoderLayers Metrics ExperimentWrapper ExperimentData ExperimentConfig thresh_pred evaluate_multiple evaluate NeuralNet ShiftedL1L2 argmax concat round drop_duplicates iterdir extend groupby concat index Sequential add Dense add_symmetric_autoencoder Dropout Input Input Sequential add Dense add_dense Dropout auto copy thresh_pred recall_score precision_score f1_score predict evaluate extend predict
This project is not maintained. It has been published as part of the following conference paper at ECML-PKDD 2020: # Activation Anomaly Analysis ## by Philip Sperl*, Jan-Philipp Schulze* and Konstantin Böttinger \* Philip Sperl and Jan-Philipp Schulze are co-first authors. Inspired by recent advances in coverage-guided analysis of neural networks, we propose a novel anomaly detection method. We show that the hidden activation values contain information useful to distinguish between normal and anomalous samples. Our approach combines three neural networks in a purely data-driven end-to-end model. Based on the activation values in the target network, the alarm network decides if the given sample is normal. Thanks to the anomaly network, our method even works in strict semi-supervised settings. Strong anomaly detection results are achieved on common data sets surpassing current baseline methods.
351
Fraunhofer-AISEC/towards-resistant-audio-adversarial-examples
['speech recognition', 'adversarial attack']
['Towards Resistant Audio Adversarial Examples']
add_silence_to_start.py xdg.py tf_logits.py plot.py attack.py classify.py score.py Attack main convert_mp3 main output_plot calculate_edit_distance compute_mfcc get_logits BaseDirectory write export from_mp3 round array clip pop rstrip replace add_argument write target ArgumentParser parse_args ConfigProto round array clip reset_default_graph input_files show save_as savefig join str sum format replace print group editops append listdir len as_list hamming rfft eps T arange concat float32 square reduce_sum matmul pi stack cast sin abs array log compute_mfcc initialize_globals reshape concat create_flags BiRNN stack append zeros
[Accompanying Website](https://tom-doerr.github.io/website_towards_resistant_audio_adversarial_examples/) To generate adversarial examples for your own files, follow the below process and modify the arguments to attack,py. Ensure that the file is sampled at 16KHz and uses signed 16-bit ints as the data type. You may want to modify the number of iterations that the attack algorithm is allowed to run. ## Setup 1. Install Docker. On Ubuntu/Debian/Linux-Mint etc.: ``` sudo apt-get install docker.io
352
Fraunhofer-AISEC/towards_resistant_audio_adversarial_examples
['speech recognition', 'adversarial attack']
['Towards Resistant Audio Adversarial Examples']
add_silence_to_start.py xdg.py tf_logits.py plot.py attack.py classify.py score.py Attack main convert_mp3 main output_plot calculate_edit_distance compute_mfcc get_logits BaseDirectory write export from_mp3 round array clip pop rstrip replace add_argument write target ArgumentParser parse_args ConfigProto round array clip reset_default_graph input_files show save_as savefig join str sum format replace print group editops append listdir len as_list hamming rfft eps T arange concat float32 square reduce_sum matmul pi stack cast sin abs array log compute_mfcc initialize_globals reshape concat create_flags BiRNN stack append zeros
[Accompanying Website](https://tom-doerr.github.io/website_towards_resistant_audio_adversarial_examples/) To generate adversarial examples for your own files, follow the below process and modify the arguments to attack,py. Ensure that the file is sampled at 16KHz and uses signed 16-bit ints as the data type. You may want to modify the number of iterations that the attack algorithm is allowed to run. ## Setup 1. Install Docker. On Ubuntu/Debian/Linux-Mint etc.: ``` sudo apt-get install docker.io
353
FredericGodin/ContextualDecomposition-NLP
['morphological tagging']
['Explaining Character-Aware Neural Networks for Word-Level Prediction: Do They Discover Linguistic Rules?']
morpho_tagging/data_iterator.py contextual_decomposition/evaluate_segmentation.py morpho_tagging/train.py morpho_tagging/networks.py contextual_decomposition/cd_conv.py contextual_decomposition/example.py CDConv Bunch remove_markers segment_line read_gold_segments generate_consecutive_gram_dict generate_all_gram_dict gram_is_part_of calculate_gram_indexes Morph calculate_gram_indexes CharacterGramVocab read_tags load_morphdata_ud DataIterator Tag Tagger init_ortho join index split append get_tag_index Morph defaultdict read_tags append tuple arange range combinations list range append readlines close lower open Tag enumerate split language join parse_corpus CharacterGramVocab read_tags data_path_ud data hasattr orthogonal_ filter getattr _all_weights uniform_ constant_
# Introduction This project contains the necessary files to reproduce the paper: ["Explaining Character-Aware Neural Networks for Word-Level Prediction: Do They Discover Linguistic Rules?"](https://arxiv.org/abs/1808.09551). This [paper](http://aclweb.org/anthology/D18-1365) was presented at [EMNLP 2018](https://aclanthology.coli.uni-saarland.de/events/emnlp-2018). An example is given in the figure below. It shows the individual character contributions of the Spanish adjective económicas. The character a has the highest positive (red) contribution for predicting the label Gender=Fem, and the character s for predicting the label Number=Plur. This coincides with our linguistic knowledge of Spanish. ![alt text](images/example_spanish.png "Example of contextual decomposition for a Spanish word.") # Framework All code was implemented in Python 3.5. We used Pytorch 0.4.0..
354
FudanNLP/Capsule4TextClassification
['sentiment analysis', 'text classification']
['Information Aggregation via Dynamic Routing for Sequence Encoding']
caps_attn_flatten/utils.py caps_attn_flatten/nest.py caps_attn_hierarchical/Config.py caps_attn_hierarchical/data_iterator.py caps_attn_hierarchical/train_test.py caps_attn_flatten/TfUtils.py caps_attn_hierarchical/dataprocess/dataprocess_sentence.py caps_attn_flatten/train_test.py caps_attn_flatten/model.py caps_attn_hierarchical/utils.py caps_attn_flatten/dataprocess/vocab.py caps_attn_hierarchical/nest.py caps_attn_hierarchical/TfUtils.py caps_attn_hierarchical/dataprocess/vocab.py caps_attn_hierarchical/model.py caps_attn_flatten/Config.py caps_attn_flatten/data_iterator.py caps_attn_hierarchical/Capsule_masked.py caps_attn_flatten/dataprocess/dataprocess_sentence.py caps_attn_flatten/dataprocess/dataprocess.py caps_attn_hierarchical/dataprocess/dataprocess.py caps_attn_flatten/Capsule_masked.py masked_reverse_routing_iter Capusule _squash shared_routing_uhat margin_loss masked_routing_iter Config preparedata TextIterator paddata model _sequence_like _sorted _yield_flat_nest _yield_value is_sequence flatten_dict_items _packed_nest_with_indices assert_same_structure flatten map_structure_up_to flatten_up_to map_structure get_traverse_shallow_structure pack_sequence_as assert_shallow_structure _recursive_assert_same_structure _warn_once _yield_flat_up_to mkMask linear masked_softmax reduce_sum entry_stop_gradients embed_lookup_last_dim reduce_avg last_dim_linear get_available_gpus debug Train calculate_confusion_single mkEmbedMatrix calculate_accuracy_single load_objs prepare_data write_status save_objs pad read_status readEmbedding pred_from_prob_single print_confusion_single valid_entry extract constructLabel_dict readEmbedding buildEmbedding loadLabel_dict extract constructLabel_dict readEmbedding buildEmbedding loadLabel_dict Vocab masked_reverse_routing_iter Capusule _squash shared_routing_uhat margin_loss masked_routing_iter Config preparedata TextIterator paddata model _sequence_like _sorted _yield_flat_nest _yield_value is_sequence flatten_dict_items _packed_nest_with_indices assert_same_structure flatten map_structure_up_to flatten_up_to map_structure get_traverse_shallow_structure pack_sequence_as assert_shallow_structure _recursive_assert_same_structure _warn_once _yield_flat_up_to mkMask linear masked_softmax reduce_sum entry_stop_gradients embed_lookup_last_dim reduce_avg last_dim_linear get_available_gpus debug Train calculate_confusion_single mkEmbedMatrix calculate_accuracy_single load_objs prepare_data write_status save_objs pad read_status readEmbedding pred_from_prob_single print_confusion_single valid_entry extract constructLabel_dict readEmbedding buildEmbedding loadLabel_dict extract constructLabel_dict readEmbedding buildEmbedding loadLabel_dict Vocab int ones_like mkMask squeeze _squash where reduce_sum cast softmax zeros expand_dims range equal int ones_like mkMask squeeze _squash where reduce_sum softmax tile zeros expand_dims range equal reduce_any is_nan maximum square logical_not Assert is_inf sqrt square reduce_sum put array paddata min array zeros max enumerate len dict _sorted isinstance zip _sorted isinstance _yield_value is_sequence _warn_once isinstance is_sequence isinstance is_sequence iterkeys set zip type _recursive_assert_same_structure flatten iteritems zip _sequence_like _yield_value is_sequence append flatten _packed_nest_with_indices assert_same_structure zip zip _yield_value is_sequence is_sequence zip assert_shallow_structure assert_shallow_structure _yield_value isinstance traverse_fn zip append assert_shallow_structure shape concat reshape sequence_mask to_float get_shape mkMask reshape concat reduce_sum shape cast len get_shape mkMask reshape concat shape cast len dtype pack constant while_loop shape unpack _create_ta stop_gradient cast logical_not int linear reshape concat shape ones_like mkMask where softmax equal list_local_devices print str time close write flush open time strip close float open len astype zip max enumerate get items ones astype float32 max values len argmax mean equal astype float32 zeros float sum array range len print items sorted info list defaultdict dict zip float sum keys values len readEmbedding map
# Capsule4TextClassification Implementation of our paper ["Information Aggregation via Dynamic Routing for Sequence Encoding"](https://arxiv.org/pdf/1806.01501.pdf) # Sytem Requirements OS: Linux (Ubuntu 14.04+) Python: v3.6 Tensorflow: v1.4.0 Numpy: v1.14 CUDA : v8.0 CUDNN: v6.0
355
FulminisH/Style-Transfer
['style transfer']
['A Neural Algorithm of Artistic Style']
gramian.py imshow.py main.py load.py extract_features.py extract_features gramian imshow load list items to layer t shape mm view squeeze transpose array clip detach size Compose convert unsqueeze max
# Style-Transfer Contains an implementation of neural style transfer in PyTorch. Style Transfer was first implemented by Leon A. Gatys, Alexander S. Ecker and Matthias Bethge in their paper 'A Neural Algorithm of Artistic Style'. https://arxiv.org/abs/1508.06576 The model uses VGGNet pretrained model layers with replacing Max pooling with Average pooling to extract the features from the content image and style image. Then, it applies the artistic intricacies of the style image on the content image to produce a target image the mixes between both style and content.
356
GA-17a/PSENet
['optical character recognition', 'scene text detection', 'curved text detection']
['Shape Robust Text Detection with Progressive Scale Expansion Network', 'Shape Robust Text Detection with Progressive Scale Expansion Network']
util/statistic.py util/tf.py pypse.py util/event.py train_ic15.py train.py metrics.py util/feature.py util/proc.py test_ctw1500.py util/rand.py util/t.py util/neighbour.py util/caffe_.py models/__init__.py util/dtype.py dataset/icdar2015_loader.py util/ml.py util/str_.py util/test.py util/log.py test_ic15.py pse/.ycm_extra_conf.py pse/__init__.py util/misc.py test.py eval/ic15/rrc_evaluation_funcs_v2.py pse/__main__.py util/__init__.py util/logger.py dataset/ctw1500_test_loader.py util/mask.py util/url.py eval/ic15/rrc_evaluation_funcs_v1.py dataset/ctw1500_loader.py eval/ctw1500/eval_ctw1500.py eval/ctw1500/file_util.py util/thread_.py models/fpn_resnet.py util/np.py util/dec.py util/io_.py util/mod.py train_ctw1500.py dataset/icdar2015_test_loader.py eval/ic15/rrc_evaluation_funcs.py eval/ic15/script.py util/cmd.py util/img.py dataset/__init__.py util/plt.py eval/ic15/file_util.py runningScore pse cal_kernel_score dice_loss ohem_batch adjust_learning_rate save_checkpoint ohem_single main cal_text_score train cal_kernel_score dice_loss ohem_batch adjust_learning_rate save_checkpoint ohem_single main cal_text_score train cal_kernel_score dice_loss ohem_batch adjust_learning_rate save_checkpoint ohem_single main cal_text_score train write_file_not_cover write_file read_file read_dir write_file_not_cover write_file read_file read_dir ResNet resnet50 Bottleneck resnet152 conv3x3 resnet34 resnet18 BasicBlock resnet101 GetCompilationInfoForFile IsHeaderFile MakeRelativePathsInFlagsAbsolute FlagsForFile DirectoryOfThisScript pse cmd print_calling_in_short_for_tf timeit print_calling print_test print_calling_in_short is_tuple int is_number is_str cast is_list double wait_key hog get_contour_min_area_box blur imwrite get_rect_iou black get_value put_text bgr2rgb get_roi render_points bgr2gray get_contour_region_iou resize convex_hull draw_contours get_contour_rect_box get_shape set_value is_in_contour is_valid_jpg move_win get_contour_region_in_rect fill_bbox imshow apply_mask random_color_3 imread bilateral_blur find_contours points_to_contours maximize_win rect_area rgb2bgr contour_to_points ds_size points_to_contour eq_color find_two_level_contours get_wh average_blur rgb2gray get_contour_region_in_min_area_rect rotate_point_by_90 white filter2D is_white translate get_contour_area rectangle min_area_rect rect_perimeter get_rect_points rotate_about_center gaussian_blur circle get_dir search is_dir dump_mat read_h5_attrs exists get_filename cd join_path load_mat get_file_size get_absolute_path cat dir_mat dump_json create_h5 dump pwd copy is_path mkdir ls open_h5 load remove write_lines read_h5 make_parent_dir find_files read_lines get_date_str init_logger find_black_components find_white_components init_params AverageMeter mkdir_p get_mean_and_std kmeans try_import_by_name add_ancester_dir_to_path import_by_name is_main get_mod_by_name add_to_path load_mod_from_path n2 _in_image count_neighbours get_neighbours n1 n1_count n8 n2_count n4 norm2_squared smooth flatten empty_list norm2 sum_all angle_with_x has_infty sin arcsin norm1 eu_dist iterable is_2D shuffle cos_dist chi_squared_dist clone is_empty has_nan has_nan_or_infty show plot_solver_data line show_images get_random_line_style to_ROI draw imshow rectangle hist maximize_figure set_subtitle save_image shuffle normal randint sample D E join index_of find_all is_none_or_empty ends_with remove_all remove_invisible to_lowercase starts_with is_str int_array_to_str contains to_uppercase split replace_all add_noise crop_into get_latest_ckpt get_init_fn gpu_config Print is_gpu_available min_area_rect focal_loss_layer_initializer get_variable_names_in_checkpoint sum_gradients get_all_ckpts get_update_op get_variables_to_train focal_loss get_iter get_available_gpus wait_for_checkpoint download argv cit get_count exit sit runningScore pse cal_kernel_score dice_loss ohem_batch adjust_learning_rate save_checkpoint ohem_single main cal_text_score train cal_kernel_score dice_loss ohem_batch adjust_learning_rate save_checkpoint ohem_single main cal_text_score train cal_kernel_score dice_loss ohem_batch adjust_learning_rate save_checkpoint ohem_single main cal_text_score train write_file_not_cover write_file read_file read_dir write_file_not_cover write_file read_file read_dir ResNet resnet50 Bottleneck resnet152 conv3x3 resnet34 resnet18 BasicBlock resnet101 GetCompilationInfoForFile IsHeaderFile MakeRelativePathsInFlagsAbsolute FlagsForFile DirectoryOfThisScript pse cmd print_calling_in_short_for_tf timeit print_calling print_test print_calling_in_short is_tuple int is_number is_str cast is_list double wait_key hog get_contour_min_area_box blur imwrite get_rect_iou black get_value put_text bgr2rgb get_roi render_points bgr2gray get_contour_region_iou resize convex_hull draw_contours get_contour_rect_box get_shape set_value is_in_contour is_valid_jpg move_win get_contour_region_in_rect fill_bbox imshow apply_mask random_color_3 imread bilateral_blur find_contours points_to_contours maximize_win rect_area rgb2bgr contour_to_points ds_size points_to_contour eq_color find_two_level_contours get_wh average_blur rgb2gray get_contour_region_in_min_area_rect rotate_point_by_90 white filter2D is_white translate get_contour_area rectangle min_area_rect rect_perimeter get_rect_points rotate_about_center gaussian_blur circle get_dir search is_dir dump_mat read_h5_attrs exists get_filename cd join_path load_mat get_file_size get_absolute_path cat dir_mat dump_json create_h5 dump pwd copy is_path mkdir ls open_h5 load remove write_lines read_h5 make_parent_dir find_files read_lines get_date_str init_logger find_black_components find_white_components init_params AverageMeter mkdir_p get_mean_and_std kmeans try_import_by_name add_ancester_dir_to_path import_by_name is_main get_mod_by_name add_to_path load_mod_from_path n2 _in_image count_neighbours get_neighbours n1 n1_count n8 n2_count n4 norm2_squared smooth flatten empty_list norm2 sum_all angle_with_x has_infty sin arcsin norm1 eu_dist iterable is_2D shuffle cos_dist chi_squared_dist clone is_empty has_nan has_nan_or_infty show plot_solver_data line show_images get_random_line_style to_ROI draw imshow rectangle hist maximize_figure set_subtitle save_image shuffle normal randint sample D E join index_of find_all is_none_or_empty ends_with remove_all remove_invisible to_lowercase starts_with is_str int_array_to_str contains to_uppercase split replace_all add_noise crop_into get_latest_ckpt get_init_fn gpu_config Print is_gpu_available min_area_rect focal_loss_layer_initializer get_variable_names_in_checkpoint sum_gradients get_all_ckpts get_update_op get_variables_to_train focal_loss get_iter get_available_gpus wait_for_checkpoint download argv cit get_count exit sit connectedComponents get transpose copy put shape Queue zeros range len int sort min astype sum concatenate ohem_single append float numpy range sigmoid sum view mean update astype int32 get_scores numpy update astype int32 get_scores numpy model zero_grad runningScore cuda cal_kernel_score step append cal_text_score sum range update format size astype ohem_batch item float flush enumerate time criterion backward Variable print AverageMeter numpy len param_groups lr join save SGD DataLoader adjust_learning_rate save_checkpoint Logger resnet152 cuda hasattr load_state_dict resnet101 append module range IC15Loader pretrain resnet50 close schedule lr resume flush optimizer checkpoint join load print parameters n_epoch train set_names makedirs CTW1500Loader append sort walk replace read close open join makedirs close write open join makedirs close write open load_url ResNet load_state_dict load_url ResNet load_state_dict ResNet load_url load_state_dict keys state_dict ResNet load_url load_state_dict keys state_dict ResNet load_url load_state_dict keys state_dict append join startswith IsHeaderFile compiler_flags_ exists compiler_flags_ GetCompilationInfoForFile compiler_working_dir_ MakeRelativePathsInFlagsAbsolute DirectoryOfThisScript cpse array isinstance debug waitKey ord bgr2rgb get_absolute_path wait_key namedWindow isinstance destroyAllWindows move_win rgb2bgr WINDOW_NORMAL imread maximize_win get_absolute_path rgb2bgr make_parent_dir moveWindow setWindowProperty WND_PROP_FULLSCREEN enumerate get_shape get_shape min max drawContours boundingRect get_contour_rect_box minAreaRect BoxPoints int0 get_shape get_contour_rect_box warpAffine int BoxPoints transpose hstack getRotationMatrix2D dot get_roi minAreaRect points_to_contour black draw_contours shape to_contours draw_contours assert_equal asarray range GaussianBlur bilateralFilter putText int32 FONT_HERSHEY_SIMPLEX get_shape int tuple warpAffine get_wh float32 cos deg2rad getRotationMatrix2D dot sin abs array _get_area transpose _get_inter zeros range len findContours asarray copy findContours copy pointPolygonTest convexHull randint asarray xrange zip minAreaRect empty points_to_contour get_absolute_path makedirs get_dir mkdir get_absolute_path get_dir mkdir get_absolute_path get_absolute_path get_absolute_path is_dir get_absolute_path expanduser startswith chdir get_absolute_path append listdir get_absolute_path ends_with get_absolute_path open get_absolute_path make_parent_dir get_absolute_path get_absolute_path get_absolute_path savemat make_parent_dir get_absolute_path getsize get_absolute_path get_absolute_path make_parent_dir get_absolute_path get_absolute_path get_absolute_path join_path extend ls is_dir get_absolute_path find_files append get_absolute_path make_parent_dir now setFormatter basicConfig print join_path addHandler make_parent_dir StreamHandler get_date_str Formatter setLevel pop black set_root insert copy get_neighbours N4 shape get_new_root get_root xrange append set_visited is_visited print DataLoader div_ zeros range len normal constant isinstance kaiming_normal Conv2d bias modules BatchNorm2d weight Linear makedirs asarray warn flatten append enumerate insert join_path add_to_path get_dir __import__ import_by_name get_absolute_path get_filename append _in_image append _in_image append _in_image append _in_image norm2 zip shape asarray extend len pi asarray asarray reshape flatten sqrt shape xrange shape asarray has_infty has_nan enumerate len show asarray join_path flatten linspace figure save_image load show val_accuracies plot training_losses val_losses training_accuracies figure legend range len Rectangle add_patch full_screen_toggle get_current_fig_manager add_line Line2D linspace show_images show set_title set_subtitle axis colorbar bgr2rgb imshow maximize_figure subplot2grid append save_image enumerate get_absolute_path savefig imsave make_parent_dir set_xlim set_ylim suptitle maximize_figure randint len flatten flatten append pop list tuple to_lowercase is_str enumerate list tuple to_lowercase is_str enumerate to_lowercase findall replace replace_all binomial list_local_devices get_checkpoint_state is_dir get_absolute_path model_checkpoint_path get_checkpoint_state all_model_checkpoint_paths int get_latest_ckpt is_none_or_empty latest_checkpoint print get_model_variables startswith info append extend get_collection TRAINABLE_VARIABLES get_latest_ckpt NewCheckpointReader get_variable_to_shape_map dtype set_shape py_func ConfigProto ones_like zeros_like sigmoid_cross_entropy_with_logits float32 where reduce_sum sigmoid pow cast stop_gradient name reduce_mean add_n histogram zip append scalar UPDATE_OPS get_collection print urlretrieve st_size stat show_images get_count imwrite get_count imwrite asarray
# Shape Robust Text Detection with Progressive Scale Expansion Network --- Used on Camera-captured Invoice Image ## Requirements * Python 2.7 * PyTorch v0.4.1+ * OpenCV 3.4 (for c++ version pse) * opencv-python 4.1.0.25 ## Introduction Progressive Scale Expansion Network (PSENet) is a text detector which is able to well detect the arbitrary-shape text in natural scene. We apply it to camera-captured invoice image here. ## Training
357
GAMES-UChile/BayesianSpectralEstimation
['time series']
['Bayesian Nonparametric Spectral Estimation']
bnse.py time_freq_SM_im time_freq_covariances bse outersum time_freq_SM_re freq_covariances Spec_Mix_spectral Spec_Mix Spec_Mix_sine sqrt pi Spec_Mix_spectral eye len pi pi time_freq_SM_re time_freq_SM_im
# Bayesian nonparametric spectral estimation This repository hosts the code for F. Tobar. _Bayesian nonparametric spectral estimation_, Advances in Neural Information Processing Systems, 2018. Proceedings link: https://papers.nips.cc/paper/8216-bayesian-nonparametric-spectral-estimation
358
GGPGLDS/GGP_GLDS
['time series']
['Graph Gamma Process Generalized Linear Dynamical Systems']
readingdata.py
# HGPLDS
359
GMU-vision-robotics/mapping_navigation
['semantic segmentation']
['Simultaneous Mapping and Target Driven Navigation']
train_MapNet.py test_NavNet.py helper.py IL_Net.py train_NavNet.py data_helper.py dataloader.py test_MapNet.py baseline_rotate_and_see.py mapNet.py visualize_episodes.py baseline_random_walker.py parameters.py baseline_utils.py read_cached_data ActiveVisionDatasetEnv minus_theta_fn read_all_poses load_structs get_pose project_pixels_to_world_coords cameraPose2currentPose readDepthImage AVD AVD_IL AVD_online create_scene_graph load_depth_file get_scene_target_graphs get_state_action_cost absolute_poses getCamera get_im_pose load_scene_info getImageData load_detections get_image_poses get_sseg convert_image_by_pixformat_normalize discretize_coords invert_pose generate_detection_image build_p_gt read_label_map depth_to_3D get_det_mask candidate_targets relative_poses save_params plot_loss save_model load_model ILNet Encoder MapNet Parameters_IL Parameters ParametersMapNet evaluate_MapNet undo_discretization get_pose evaluate_NavNet softmax prepare_mapNet_input get_minibatch get_minibatch run_mapNet unroll_policy select_minibatch plot_step_loc get_images visualize_nav visualize_loc get_pcloud plot_step_nav minus_theta_fn range atan2 format asarray astype float32 stack Reader resize append asDirect ones dot append empty array range len join loadmat range join item cameraPose2currentPose join loadmat load join hstack loadmat open dot inv zeros zeros mod discretize_coords pi floor orientations zeros range reduce astype where intersect1d floor zeros zeros reshape where float transpose astype array open add_edge DiGraph range add_node len decode load_scene_info create_scene_graph keys append keys zeros asarray range len append shortest_path len resize convert_image_by_pixformat_normalize depth_to_3D load_depth_file resize getCamera imread isfile readlines len split append float range open int readlines close open split zeros range int read_label_map det_dir_path label_map_path item label_index_path generate_detection_image __getattribute__ dir str list asarray plot xlabel reshape axis ylabel mean title clf savefig range makedirs print str save load str print eval train cuda view where orientations numpy max cell_size print exp asarray append unsqueeze print batch_size dets_nClasses seq_len append zeros range cuda exp EPS_START random EPS_DECAY EPS_END build_p_gt mapNet clone print zeros read shape zeros imread range len set_aspect show str plot axis imshow scatter savefig set_aspect show str plot axis scatter savefig str load_scene_info asarray get_images get_image_poses subplots set_title len get_pcloud scatter clf item range plot_step_nav makedirs str load_scene_info asarray get_images subplots absolute_poses len get_pcloud scatter clf range plot_step_loc makedirs
## Simultaneous Mapping and Target Driven Navigation [[pdf]](https://arxiv.org/pdf/1911.07980.pdf) G. Georgakis, Y. Li, J. Kosecka, arXiv 2019 This is Python code for training and testing both mapnet and navigation components. ### Requirements - Python 3.7 - Torch 0.4.1 #### Python Packages ``` networkx torchvision 0.2.1
360
GPrathap/OpenBCIPython
['dynamic time warping']
['Near Real-Time Data Labeling Using a Depth Sensor for EMG Based Prosthetic Arms']
utils/utils.py try/ablone_model.py neuralnet/utils/variables.py py_qt/compat.py plugins/csv_collect.py neuralnet/net/model3/try_0.py utils/feature_extractor.py py_qt/sharedmem.py features/mfcc.py neuralnet/utils/scopes.py open_bci_v3.py try/try_rnnn.py plugins/sample_rate.py plugins/streamer_osc.py neuralnet/net/cnn/model1/convolutional_network.py plugin_interface.py preprocessing/noise_reducer.py lib/hmm.py preprocessing/preprocessing.py try/rnn_test.py test_log.py py_qt/kernels.py try/peakdetect.py preprocessing/server.py py_qt/kernel_smoothing.py neuralnet/net/cnn/model2/inception_resnet_v2.py features/generic_type.py visualization/LocalPolynomialRegression.py preprocessing/test_udp_client.py py_qt/cyth.py visualization/panda_try.py preprocessing/plot_kernel_regression.py py_qt/pyqt_fit1d.py visualization/draw_graph_channels_pattern.py plugins/print.py py_qt/utils.py features/mean.py py_qt/kde_methods.py py_qt/py_binning.py preprocessing/logistic_regression.py scripts/socket_client.py scripts/stream_data.py try/rnn.py plugins/streamer_lsl.py open_bci_v_ganglion.py try/classification.py try/reader.py plugins/csv_collect_and_publish.py plugins/udp_server.py visualization/try_eog.py preprocessing/RingBuffer.py preprocessing/pattern_detection.py features/zcr.py py_qt/loader.py plugins/tmp/new_server.py visualization/plot_visualize_raw.py visualization/draw_graph_23_15.py try/try_maltivatiate.py utils/data_types.py plugins/tmp/client.py lib/dtw.py py_qt/py_local_linear.py scripts/simple_serial.py features/fft.py externals/mne_openbci.py visualization/draw_graph_channels.py scripts/udp_client.py neuralnet/net/model3/try_1.py preprocessing/manager.py plugins/noise_test.py preprocessing/ssa.py visualization/draw_graph_kinect_angles.py utils/data_types_utils.py neuralnet/utils/cnn_common.py visualization/plot_interpolate_bad_channels.py py_qt/_kernels_py.py py_qt/kde.py py_qt/npr_methods.py build/filterout_dataset.py try/05_nonlinear_svm.py scripts/udp_server.py preprocessing/tmp.py try/text_classification_character_rnn.py preprocessing/init_buffer.py visualization/gvgvg.py csv_collect_and_publish_test.py py_qt/curve_fitting.py features/spectral.py py_qt/bootstrap.py preprocessing/processor.py manager.py test_net.py analyzer.py try/logistic_regression.py user.py neuralnet/net/model3/logistic_regression_train_rnn.py try/text_classification.py utils/dataset_writer_utils.py neuralnet/utils/losses.py scripts/test.py features/energy.py py_qt/nonparam_regression.py try/contrib_learn.py lib/forward_backword.py try/polynomial.py utils/Audio.py preprocessing/czxc.py visualization/draw_graph.py py_qt/binning.py preprocessing/analyzer.py visualization/plot_compute_raw_data_spectrum.py py_qt/bootstrap_workers.py preprocessing/kernel_regression.py try/estimator_test.py visualization/plot_objects_from_arrays.py open_bci_ganglion.py processing/dtw_testing.py analyzer/tmp.py py_qt/plot_fit.py processor.py utils/dataset_reader_utils.py loader.py py_qt/kde_bandwidth.py dep_analyzer.py plugins/streamer_tcp_server.py SignalAnalyzer DataFeeder plot_single_clip add_subplot_axes plot_clip_overview plot_single_feature_all_clips generate_feature_summary view_clip_overview plot_single_feature_one_clip plot_single_feature_aggregate conv18bitToInt32 GanglionDelegate decompressDeltas19Bit decompressDeltas18Bit OpenBCIBoard conv24bitsToInt conv8bitToInt8 conv19bitToInt32 OpenBCISample OpenBCISample OpenBCIBoard OpenBCISample OpenBCIBoard Clip runTest cleanUp read_raw_openbci RawOpenBCI EMG CNNModel1 CNNModel2 Deviatev1 Deviatev1 one_hot_encoding batch_norm dropout fc max_pool _two_element_tuple conv2d flatten repeat_op avg_pool l2_regularizer cross_entropy_loss l1_loss l1_l2_regularizer l1_regularizer l2_loss _current_arg_scope _add_op add_arg_scope _get_arg_stack arg_scope has_arg_scope variable_device global_step variable get_unique_variable get_variables_by_name add_variable get_variables_to_restore VariableDeviceChooser get_variables MainThread PluginCSVCollectAndPublish animate frames RegrMagic KernelRegression normalize_cols NoiseReducer f normalize_cols PreProcessor Clip RingBuffer UDPServer SingularSpectrumAnalysis SubplotAnimation usePython useCython percentile bootstrap_regression getCIs test profile bootstrap_residuals bootstrap bootstrap_result initialize_shared initialize unicode_csv_writer unicode_csv_reader CurveFitting addFlags KDE1D botev_bandwidth silverman_covariance scotts_covariance variance_bandwidth _botev_fixed_point _inverse compute_bandwidth transform_distribution LinearCombinationMethod _fakeKDE transformKDE1D generate_grid KDE1DMethod TransformKDE1DMethod ReflectionMethod RenormalizationMethod CyclicMethod create_transform normal_order4 normal_kernel1d Epanechnikov_order4 useCython Kernel1D tricube usePython Epanechnikov normal_kernel LocalPolynomialKernel SpatialAverage useCython LocalPolynomialKernel1D PolynomialDesignMatrix1D usePython LocalLinearKernel1D PolynomialDesignMatrix load load_module NonParamRegression compute_bandwidth RegressionKernelMethod LocalPolynomialKernel SpatialAverage useCython LocalPolynomialKernel1D PolynomialDesignMatrix1D usePython LocalLinearKernel1D PolynomialDesignMatrix qqplot scaled_location_plot fit_evaluation write1d plot1d plot_residuals residual_measures plot_residual_tests plot_dist_residuals QtFitDlg get_args ParametersModel main find fast_bin local_linear_1d _dummy ones SharedArray _shmem_as_ndarray _allocate_raw_array _get_ctype_size zeros array approx_jacobian numpy_trans namedtuple numpy_trans_idx finite make_ufunc numpy_method_idx epanechnikov_pm2 epanechnikov_pm1 norm1d_pm2 epanechnikov_o4_pm1 tricube_pm1 normal_o4_pm2 normal_o4_cdf tricube_pm2 normal_o4_pm1 normal_o4_pdf tricube_pdf norm1d_pm1 tricube_cdf epanechnikov_cdf norm1d_cdf norm1d_pdf epanechnikov_o4_pdf epanechnikov_o4_cdf epanechnikov_pdf epanechnikov_o4_pm2 main model_fn maybe_download main SequenceClassification lazy_property main extract _model_fn_ops EstimatorModelFnTest iris_input_fn boston_eval_fn iris_input_fn_labels_dict linear_model_fn_with_model_fn_ops linear_model_fn _build_estimator_for_export_tests InferRealValuedColumnsTest ReplicaDeviceSetterTest _make_input_fn CheckCallsMonitor EstimatorTest boston_input_fn linear_model_params_fn logistic_model_no_mode_fn _build_estimator_for_resource_export_test my_model peakdet ptb_producer _file_to_word_ids ptb_raw_data _read_words _build_vocab train_network rnn_cell plot_learning_curve gen_data gen_epochs gen_batch SmallConfig main_op get_config import_data data_type csv_to_numpy_array nomalize_signal PTBInput PTBModel run_epoch rnn_model main bag_of_words_model main char_rnn_model Audio load_dataset_from_ogg read_tf_recode draw_sample_plot_and_save read_and_decode create_sample_from_data create_sample_from_image _int64_feature _bytes_feature _float_feature _int64_feature _bytes_feature _float_feature _get_frame_array _get_frame get_label f normalize_cols get_position transform set_visible add_subplot_axes show list set_frame_on format add_subplot_axes boxplot set_xlim category add_axes set_visible title figure filename DataFrame despine set_title distplot despine set_title boxplot despine set_title distplot show format concatenate set_xlim plot_single_feature_all_clips category subplots_adjust subplot2grid figure filename plot_single_feature_one_clip DataFrame plot_single_feature_aggregate range get_zcr len show subplots plot_clip_overview subplots_adjust range pack fromhex bytes conv19bitToInt32 conv18bitToInt32 y_target merge_all Saver argmax equal scalar disconnect print deactivate RawOpenBCI get_shape TensorShape isinstance num_elements pop get_shape assert_is_compatible_with add_to_collection get_collection _get_arg_stack add update append copy isinstance _add_op append add_to_collection get_collection GLOBAL_VARIABLES name NodeDef callable device GLOBAL_STEP get_collection append list set append min max sort floor len add_residual subtract len fit kde add fct randint NonParamRegression array residuals randint len percentile tuple izip enumerate izip cpu_count getCIs np Pool initialize apply_async bootstrap_result irange getattr append shuffle_method asarray close copy empty y_fit join initialize_shared zeros array fit show format arange plot randn print rand curve_fit title quadratic clf figure legend array bootstrap Stats run initialize np update locals izip irange getattr new_fit fit reader join atleast_2d cov shape atleast_2d shape atleast_2d exp arange large_float pi sqrt irange sum prod bandwidth xdata min upper lower cut max xdata sqrt float covariance_function bandwidth_function _inverse abs multiply Dinv hasattr isinstance inv Dinv __call__ inf tricube_width epanechnikov_width inf find_module update find_functions getmodule namebase files add set dirname abspath load_module atleast_2d sqrtm real arange plot xlabel min ylabel pdf title hist max arange plot xlabel min ylabel av ylim title NonParamRegression max fit arange plot xlabel set_yticks min ylabel sqrt av title NonParamRegression abs max gamma fit xlabel title ylabel popt residual_measures res CIs fit erfinv arange argsort sqrt std len izip xdata xname yopts param_names res_name _asdict CI ylabel title legend update format yname normq join fct_desc xlabel plot_residual_tests res figure scaled_res sorted_yopts qqplot subplot scaled_location_plot asarray format suptitle squeeze plot_residuals residual_measures figure plot_dist_residuals property show QtFitDlg raise_ astype sum exp get _dummy addressof tuple _type_ sizeof len get dtype property dtype asarray tuple SharedArray _allocate_raw_array shape prod len _allocate_raw_array prod SharedArray _allocate_raw_array prod SharedArray get _iskeyword list isdigit join isinstance print tuple len exec map set add dict startswith enumerate split asarray asfarray func zeros type range len exp multiply atleast_1d shape empty divide erf multiply exp shape isfinite divide erf multiply abs multiply multiply multiply multiply multiply multiply multiply norm1d_pdf norm1d_cdf isfinite norm1d_pdf power power power power power urlretrieve name print close NamedTemporaryFile relu linear reshape optimize_loss mean_squared_error evaluate print fit Estimator load_csv_without_header test_data train_data predict_data enumerate predict maybe_download __name__ optimize format global_variables_initializer error float32 placeholder Mnist shape sample SequenceClassification range Session run load_csv_with_header read list DNNClassifier data constant reshape load_boston target limit_epochs data constant reshape load_iris target data reshape load_iris constant data constant reshape target load_boston len isinstance extract linear_regression_zero_init get_global_step optimize_loss extract items get_global_step isinstance linear_regression_zero_init optimize_loss linear_regression_zero_init get_global_step optimize_loss extract logistic_regression_zero_init get_global_step one_hot optimize_loss build_parsing_serving_input_fn fit create_feature_spec_for_parsing LinearRegressor build_parsing_serving_input_fn create_feature_spec_for_parsing Estimator fit tuple logistic_regression get_global_step one_hot fully_connected optimize_loss stack asarray arange exit append len items list sorted Counter dict zip _read_words range len _read_words join _file_to_word_ids _build_vocab len append array range choice zeros range len range setup_graph train_network plot get_default_graph reset_default_graph RNN_config mean int list choice set nomalize_signal dropna round array range len time initial_state print epoch_size h c range enumerate run get_config bow_encoder get_global_step one_hot fully_connected optimize_loss softmax_cross_entropy get_global_step one_hot static_rnn fully_connected optimize_loss unstack embed_sequence GRUCell softmax_cross_entropy VocabularyProcessor Series vocabulary_ target array load_dataset transform bow_model accuracy_score fit_transform len get_global_step one_hot static_rnn fully_connected optimize_loss unstack GRUCell softmax_cross_entropy ByteProcessor sorted format print Clip append listdir int reshape fromstring Example ParseFromString tf_record_iterator append tostring int asarray ANTIALIAS flatten tostring Example resize open int inverted add_subplot axis close transformed savefig figure append tick_params specshow array int read TFRecordReader decode_raw reshape float32 int8 int64 cast parse_single_example zeros
OpenBCI_Python ============== The Python software library designed to work with OpenBCI hardware. Please direct any questions, suggestions and bug reports to the github repo at: https://github.com/OpenBCI/OpenBCI_Python ## Dependancies: * Python 2.7 or later (https://www.python.org/download/releases/2.7/) * Numpy 1.7 or later (http://www.numpy.org/) * Yapsy -- if using pluging (http://yapsy.sourceforge.net/) * librosa -- (https://librosa.github.io/librosa/) * matplotlib -- (https://matplotlib.org/)
361
GWUvision/Hotels-50K
['data augmentation']
['Hotels-50K: A Global Hotel Recognition Dataset']
baseline_implementation/extract_features.py baseline_implementation/classfile.py evaluate/log_loss.py baseline_implementation/feats_to_csv.py evaluate/utils.py evaluate/convert_knn_to_probabilities.py download_train.py evaluate/retrieval.py main url_to_image AppURLopener download_and_resize NonTripletSet BatchAllSet main save_h5 main load_h5 main main main id_to_class_parser class_to_chain_parser read asarray bytearray imdecode IMREAD_UNCHANGED open join str imwrite print round resize url_to_image makedirs join reader cpu_count apply_async close append next Pool range open File close create_dataset l2_normalize Saver save_h5 Session run str restore squeeze placeholder dirname glob NonTripletSet ConfigProto keys int print getBatchFromImageList float32 zeros array makedirs GpuIndexFlatIP GpuIndexFlatConfig add load_h5 StandardGpuResources expand_dims id_to_class_parser unique values mean class_to_chain_parser len
# Hotels-50K <p align="center"> <img width=50% src="https://www2.seas.gwu.edu/~astylianou/images/hotels50k/trafficking_hotel_recognition.png"> </p> The Hotels-50K dataset was created to encourage work in hotel recognition, the task of identifying the hotel in images taken in hotel rooms. This task is particularly important as many photographs of human trafficking victims are captured in hotel rooms, and identifying which hotels the victims were photographed in is a top priority for trafficking investigators. The Hotels-50K dataset (introduced in https://www2.seas.gwu.edu/~pless/papers/Hotels50k.pdf) consists of over 1 million images from 50,000 different hotels around the world. These images come from both travel websites, as well as the TraffickCam mobile application, which allows every day travelers to submit images of their hotel room in order to help combat trafficking. The TraffickCam images are more visually similar to images from trafficking investigations than the images from travel websites. The training dataset includes 1,027,871 images from 50,000 hotels, and 92 major hotel chains. Of the 50,000 hotels, 13,900 include user contributed images from the TraffickCam application (a total of 55,061 TraffickCam images are included in the training set). The test dataset includes 17,954 TraffickCam images from 5,000 different hotels (as well as versions of the test images that have medium and large occlusions to replicate the occlusions seen in real world trafficking victim photographs). (UPDATE: A large number of the hyperlinks provided in the original dataset were moved after the fact by the imagery providers. We have updated the dataset to have valid imagery as of April 27, 2020. The above values are roughly correct but need to be updated to reflect the new dataset.) ## Dependencies
362
Gabsha/ssbr
['anomaly detection']
['Unsupervised Body Part Regression via Spatially Self-ordering Convolutional Neural Networks']
ssbr/runners/train.py ssbr/datasets/ircad.py ssbr/datasets/ops.py ssbr/main.py ssbr/datasets/utils.py tests/test_dataset.py ssbr/model.py ssbr/loss.py setup.py tests/test_model.py ssbr/visuals.py ssbr/__init__.py tests/test_loss.py ssbr/runners/evaluate.py smoothL1 loss_distance loss_order loss_ssbr train cli ssbr_model vgg16_features make_tiles IrcadData DowloadProgress maybe_unzip maybe_download load_dicom rescale grey2rgb resize image2np compose SSBRDataset cyclic_shuffler DicomVolumeStore batcher stack_sampler slice_sampler TrainConfig train_experiment test_shuffler test_batcher test_slice_sampler test_dicom_store test_order_loss_equidistant np_smooth_l1 loss_order_func test_distance_loss_equidistant loss_huber_func loss_distance_func sigmoid test_huber_loss test_order_loss test_distance_loss test_model test_fit switch abs smoothL1 sum load train_experiment VGG16 RMSprop Model loss_ssbr vgg16_features vgg Input compile shape ceil zeros range len info info Execute ImageSeriesReader SetFileNames GetGDCMSeriesFileNames shape zeros asarray shape append randint range chain islice iter list shuffle cyclic_shuffler list keys batcher str ssbr_model asdict valid SSBRDataset DicomVolumeStore makedirs File fit_generator TrainConfig Path ModelCheckpoint train IrcadData next batcher cyclic_shuffler sort append next range rand slice_sampler str asarray DicomVolumeStore rand File assert_equal zeros zeros abs where shape Input loss_order smoothL1 placeholder loss_distance Input loss_order_func rand assert_almost_equal list loss_order_func tile assert_almost_equal array range np_smooth_l1 loss_huber_func rand assert_almost_equal sum np_smooth_l1 rand loss_distance_func assert_almost_equal sum list np_smooth_l1 loss_distance_func tile assert_almost_equal sum array range ssbr_model rand predict_on_batch ssbr_model rand zeros fit
# SSBR Implementation of self-supervised body part regression. Reference: Unsupervised body part regression using convolutional neural network (Ke Yan, Le Lu, Ronald M. Summers) [https://arxiv.org/pdf/1707.03891.pdf] ## Setup ```bash virtualenv -p python3 venv source venv/bin/activate
363
GaganNarula/Bootstrap_particle_filter
['time series']
['Stochastic Gradient MCMC for Nonlinear State Space Models']
modelGradfuncs.py ParticleFilter.py prior_grads_linearproposal3 get_grads_linear_proposal3 prior_grads_linearproposal4 linear_proposal2 emission_meanfunc_proposal3 get_grads_linear_proposal2 loggauss_prior_grad loggamma_prior_grad ursino_proposal prior_grads_linearproposal2 linear_proposal4 emission_pdf_gauss emission_meanfunc_proposal4 get_grads_linear_proposal4 linear_proposal3 get_grads_ursino_proposal SGLD_step BootstrapPF_withLLgradients randn randn randn randn sqrt emission_meanfunc exp pi loggamma_prior_grad loggauss_prior_grad loggamma_prior_grad loggauss_prior_grad loggamma_prior_grad
# Bootstrap_particle_filter Bootstrap particle filter with ancestor resampling and learning with Stochastic Gradient Langevin Inference: bootstrap Particle Filtering with ancestor resampling (Andrieu et al 2010) https://rss.onlinelibrary.wiley.com/doi/10.1111/j.1467-9868.2009.00736.x Learning: Stochastic gradient Langevin (Welling and Teh 2011) Also useful : Aicher et al 2019 https://arxiv.org/pdf/1901.10568.pdf
364
GalaxyCruiser/SR-SAN
['session based recommendations']
['Session-based Recommendation with Self-Attention Networks']
utils.py main.py datasets/preprocess.py model.py main trans_to_cuda train_test trans_to_cpu forward SelfAttentionNetwork data_masks split_validation Data process_seqs obtian_tra obtian_tes load validation Data trans_to_cuda time epoch print train_test valid_portion split_validation dataset range SelfAttentionNetwork open is_available is_available trans_to_cuda get_slice model stack float long arange batch_size zero_grad numpy forward mask append isin generate_batch mean eval zip long trans_to_cuda backward print now loss_function train step len max int arange shuffle round len print range zip len
# SR-SAN Implementation for the paper entitled "[Session-based Recommendation with Self-Attention Networks](https://arxiv.org/abs/2102.01922)" You can download the datasets which used in our paper from the following links. Then put them in the folder `datasets/`: - YOOCHOOSE: <http://2015.recsyschallenge.com/challenge.html> - DIGINETICA: <http://cikm2016.cs.iupui.edu/cikm-cup> or <https://competitions.codalab.org/competitions/11161> After you download the YOOCHOOSE dataset, add headline with `session_id,timestamp,item_id,category` in the yoochoose-clicks.dat. ## Usage Run the file `datasets/preprocess.py` to preprocess the data before train the model. For example: `cd datasets; python preprocess.py --dataset=yoochoose` ```bash
365
GalaxyFox/DS-GA-3001-Deep_Kalman_Filter
['time series', 'counterfactual inference']
['Deep Kalman Filters']
healing_mnist_penalty.py healing_mnist_indep.py vae_mnist.py HealingMNIST apply_square binarize apply_noise get_rotations heal_image HealingMNIST apply_square binarize apply_noise get_rotations heal_image HealingMNIST apply_square binarize apply_noise get_rotations heal_image array array rotate apply_square random apply_noise get_rotations append randint enumerate append len range
# DS-GA-3001-Deep_Kalman_Filter This is a re-implementation and test on paper Deep Kalman Filter: https://arxiv.org/pdf/1511.05121.pdf
366
Gandor26/covid-open
['time series']
['Inter-Series Attention Model for COVID-19 Forecasting']
data/__init__.py data/constants.py run.py data/dw.py data/demo.py hosps.py data/delphi.py expsmooth.py case.py base.py data/cdc.py attention.py death.py XSeriesAttention GlobalLocalModel CaseModel load_data load_data CausalRegressor DeathModel ExpSmooth HospModel CausalRegressor load_data inference train load_cdc_truth load_case_baselines load_hosp_baselines load_death_baselines Epidata load_demograph_data load_world_covid_dataset load_us_covid_dataset load_bed_and_population_data load_census_embedding load_mobility_data load_hospitalized_data deepcopy min copy mean rename std fillna T bfill load_hospitalized_data update model backward zero_grad close tqdm eval set_postfix save float step range state_dict load eval load_state_dict sum to_datetime index Timedelta DataFrame read_csv groupby reset_index set_index concat apply append read_csv groupby set_index concat append read_csv groupby set_index concat append read_csv joinpath columns read_csv concat groupby list to_datetime concat index set zip append Timedelta date sum fillna groupby to_datetime concat index append Timedelta date sum fillna values groupby list format set_index sort_index to_datetime rename append Timedelta DataFrame keys fillna sum read_csv split list to_datetime concat zip Timedelta read_csv load joinpath DataFrame
# Attention Crossing Time Series for COVID-19 forecasting The repository contains scripts and outputs of COVID-19 forecasting developed by University of California, Santa Barbara. ## Introdution We employ a purely data-driven model named ACTS to forecast COVID-19 related data, e.g. confirmed cases, hospitalizations and deaths, along time. We assume that the development of the pandemic in the current region will be highly similar to another region with similar patterns a few months ago. We use attention mechanism to compare and match such patterns and generate forecasts. We also leverage additional features such as demographic data and medical resources to more precisely measure the similarity between regions. ## Architecture ![arch](figs/architecture.png) ## Evaluation and Sample forecasts ### Past forecasting accuracy as of Aug 31 ![sample](figs/sample.png) For more details about our methodology, previous forecasts and comparison with other models, please refer to our [manuscript](https://arxiv.org/abs/2010.13006) on Arxiv.
367
Gaopeng-Bai/MANN_model
['one shot learning']
['One-shot Learning with Memory-Augmented Neural Networks']
Train.py utlis/preprocessing_module.py dataLoader.py module/model.py ntm/ntm_cell.py ntm/mann_cell.py check_duplicated_dict get_File_size dataLoader main train predict NTMOneShotLearningModel MANNCell NTMCell one_hot_encode one_hot_decode preprocessing unicode getsize float dict len range zip add_argument ArgumentParser parse_args train predict pre tensorboard_dir ConfigProto NTMOneShotLearningModel save_dir GPUOptions makedirs zeros iternext nditer shape
# Meta-learning for next song recommendation. ## Introduction In this topic, [Meta-learning](https://github.com/Gaopeng-Bai/Meta-Learning-Papers) with [Memory-Augmented Neural Network](https://arxiv.org/pdf/1605.06065.pdf) has been used to recommend the next song to a user who tends to listen. From the user's existing playlist, the algorithm is used to predict the next song recommended to the user. And by allowing the user to provide feedback action on the current song, it is used to adjust the playlist to recommend the next new song. User feedback can determine whether a song that the user is currently listening to is added to the next predicted list, then the algorithm predicts the next song that the user may prefer to listen to based on this predicted list. Model: ![avatar](images/models.png) ## Reqirements 1. Please install the package specified in requirements before run this programm 2. This model require [spotify datasets](https://research.spotify.com/datasets). ## Problem Description The sequence of songs be assumed as a special language, that means a sequence of songs list represents individual meaning like a sentence or paragraph also carries specific meaning. Then my goal becomes a task that is comparable to natural language processing. For the natural language processing, a neural network model is needed to identify the meaning of the context. Moreover, due to the diversity of song sequences. The models must also be able to predict sequences that have never been seen and also ensure high output accuracy.
368
GarfieldLyu/OCR_POST_DE
['optical character recognition']
['Neural OCR Post-Hoc Correction of Historical Corpora']
keras_implement/nlc_preprocess.py keras_implement/cross_validation.py keras_implement/gated_cnn.py torch_implement/Model.py torch_implement/pipeline.py torch_implement/dataset.py create_data/parseText.py create_data/dataScrapy.py torch_implement/main.py keras_implement/networks.py keras_implement/statistic.py torch_implement/nlc_preprocess.py CRF/word_segment.py torch_implement/statistic.py create_data/sentenceAlignment.py torch_implement/seq2seq.py CRF/ngramWiki.py keras_implement/ocr_corrector.py getContentForManifest resolve_content_book parse getTxtByID text2Token defineCharacter removePunctuation tokenLengthWrapup tokenLength tokenLengthStatistic skipNgramTokenize clean hasDigit gen_grams make_pairs_for_book refer_query read_file wrapup_lsh_books gen_pairs target_lsh refine_pair parseWikisource read getWikiPureText trainNGramCharLM parseText ngramLM text2lines wrapupTrainNGramCharLM generate_letter parseWikiSamples get_all_training_corpus getNewsDTA17 create_char_labels create_char_features evaluate segment_text segment_text_pipe create_text_features build_crf wrapup_crf create_X_Y get_tagger prepare_corpus sents_2_ids get_cmd_args get_random_splits ids_2_tensors update_args main build_vocab GatedConvBlock custom_loss Encoder define_glu_rnn_single define_cnn_rnn define_glu_rnn define_simple_enc_dec data_to_token_ids id_to_token ngram_tokenize_by_char get_tokenizer ngram_tokenize create_vocab sentence_to_token_ids get_data prepare_nlc_data char_tokenize get_cmd_args ConvRnn prepare_data update_args main word_error_rate char_error_rate CustomDataInput pad_data CustomData LoadData Corpus main load_corpus get_cmd_args Seq2Seq Attention Decoder Encoder ngram_tokenize_by_char get_tokenizer ngram_tokenize get_data char_tokenize clean_book load_ocr_model fix_digits read_book digits_prob split_char_digit translate_page has_digit clean_page pipeline_corpus main is_short read_barcodes count_char pipeline sentenize Train word_error_rate char_error_rate get text loads join format print resolve_content_book isfile punctuation strip sub defineCharacter replace print tokenize append len isdigit filter strip removePunctuation split clean text2Token append set list len Series sort_index tokenLength tokenLengthStatistic print split range ngrams update join replace insert index add MinHashLSHForest MinHash MinHashLSH enumerate ngrams update join replace query MinHash ngrams get_alignments append refer_query target_lsh refine_pair gen_grams strip sub zip append gen_pairs make_pairs_for_book sum print encode strip sub isfile append parseText read isfile parseWikisource getWikiPureText format print len getNewsDTA17 defaultdict range len random join lower trainNGramCharLM FreqDist word_tokenize getWikiPureText lower float keys len str time list zip print sub isfile sample get_all_training_corpus append append extend print str time str time set_params print Tagger Trainer zip append train open list print tag zip float evaluate build_crf create_X_Y split prepare_corpus KFold Tagger open create_text_features replace tag enumerate segment_text Tagger open parse_args add_argument ArgumentParser kernels layer_num get_cmd_args range sorted _START_VOCAB dict tokenizer append tokenizer pad_sequences shape array print update_args max open str sents_2_ids name sum range build_vocab format create_model get_random_splits directory close batch join time ConvRnn print makedirs write tqdm ids_2_tensors generate_in_batch len cast equal enc_embedding Encoder dot Model summary append Input compile enumerate dot Model summary Input compile states enc_embedding Encoder dot Model inp summary Conv2D Input compile states enc_embedding Encoder dot Model inp summary Conv2D Input compile items sorted format str print tokenizer dict len tokenizer append sentence_to_token_ids dict data_to_token_ids create_vocab get_data max len join directory name get_tokenizer tokenizer dataset vocab_path print pad_sequences tokenizer data_path shape prepare_nlc_data prepare_data translate str split zeros get_pairs_by_group directory dataset random_group Corpus out_dim valid embedding_dim enc_units dec_units model_dir translate_in_batch inp_dim Train clip sparse_max basicConfig tokenizer LoadData n_features max_length vocab get epoch dropout start_train test load_corpus test_in_batch info prepare_other_corpus prepare_corpus get_cmd_args max_len model_name train tf vocab LoadData build_vocab_on_the_fly Train device len print len split print join getContentForManifest isfile isalpha lstrip sub replace int lstrip split append len append clean_page isdigit split len isdigit len append sub digits_prob split_char_digit join fix_digits zip copy translate has_digit translate_in_batch is_short append enumerate split join time format clean_book print read_book translate_page mkdir sentenize enumerate split join format load_ocr_model print read_barcodes pipeline exists enumerate pipeline_corpus
# OCR_POST_DE OCR post correction for old German corpus. More details can be found in our paper(https://arxiv.org/abs/2102.00583). Libraries: python 3.7 keras 2.4.3, tensorflow 2.3.1, pytorch 1.4.0 Other packages: NLTK, numpy, gensim, datasketch, Bio.pairwise2, entmax create_data: 1. Download OCRed book from ÖNB(https://iiif.onb.ac.at/gui/manifest.html) by the unique barcode, see dataScrapy.py.
369
GeorgeLuImmortal/Adaptive-Tuning-Active-Learning
['word embeddings', 'active learning']
['Investigating the Effectiveness of Representations Based on Word-Embeddings in Active Learning for Labelling Text Datasets']
encoding_text_transformer.py ACC_PRE_YIELD_BURDEN_active_learning.py encoding_text.py utilities.py inference.py utils.py fine_tuned.py main read_data clean_corpus generate_representation main stemmed_words main clean_corpus generate_representation read_data evaluate get_eval_report compute_metrics get_mismatched train load_and_cache_examples fine_tuned clean_corpus generate_representation read_data inference compute_p_nd BaseSelectionFunction QBC compute_candidate_set compute_density RandomSelection experiment UncertaintySelection CertiantyInformationGainSelection EnsembleModel compute_vote_entropy EGAL TrainModel Normalize BaseModel get_k_random_samples CertaintySelection DensityWeighted TheAlgorithm SvmModel InputFeatures InputExample BinaryProcessor _truncate_seq_pair convert_example_to_feature convert_examples_to_features DataProcessor required_max genfromtxt LabelEncoder DataFrame list add_option experiment gridsearch_step len input parse_args OptionParser dir_neg concatenate n_estimators set eval unique enumerate print error to_csv text_rep dir_out transform samples_init dir_pos fit tolist append listdir read_csv len punctuation translate lower sub maketrans append print mean append array split FreqDist load_facebook_vectors read_data clean_corpus generate_representation savetxt append normalize fit_transform range dir_input CountVectorizer keys load join toarray tqdm split from_pretrained cuda load join get_labels TensorDataset convert_examples_to_features save info tensor model get_linear_schedule_with_warmup tuple clip_grad_norm_ zero_grad DataLoader str initialize list master_params SummaryWriter format save_pretrained info trange enumerate int items join evaluate backward AdamW print add_scalar makedirs RandomSampler parameters tqdm_notebook step len get_dev_examples matthews_corrcoef ravel argmax update join format tuple squeeze len DataLoader eval numpy tqdm_notebook compute_metrics info append SequentialSampler load_and_cache_examples makedirs from_pretrained list evaluate print train get_labels info setLevel WARN to load_and_cache_examples len to print generate_representation savetxt read_data norm transpose PCA matmul components_ enumerate fit sum copy int sort cosine_similarity max len nan_to_num log2 sum min seed print reshape astype apply unique bincount DataFrame print raw_result TheAlgorithm run text_b convert_tokens_to_ids _truncate_seq_pair tokenize label float text_a len cpu_count pop len
# Adaptive Tuning Active Learning Investigating the Effectiveness of Representations Based on Pretrained Transformer-based Language Models in Active Learning for Labelling Text Datasets This repository is temporarily associated with paper [Lu, J., Henchion, M. and Mac Namee, B., 2019. Investigating the Effectiveness of Word-Embedding Based Active Learning for Labelling Text Datasets. arXiv preprint arXiv:1910.03505.](https://arxiv.org/abs/1910.03505) ### Dependencies Tested Python 3.6, and requiring the following packages, which are available via PIP: * Required: [numpy >= 1.16.4](http://www.numpy.org/) * Required: [scikit-learn >= 0.21.1](http://scikit-learn.org/stable/) * Required: [pandas >= 0.25.1](https://pandas.pydata.org/) * Required: [gensim >= 3.7.3](https://radimrehurek.com/gensim/) * Required: [matplotlib >= 2.2.2](https://matplotlib.org/)
370
GeorgeLuImmortal/Effectiveness-of-Pretrained-Transformer-based-Language-Models-in-Active-Learning-for-Labelling-Data
['word embeddings', 'active learning']
['Investigating the Effectiveness of Representations Based on Word-Embeddings in Active Learning for Labelling Text Datasets']
encoding_text_transformer.py encoding_text.py ACC_PRE_YIELD_BURDEN_active_learning.py utilities.py main read_data clean_corpus generate_representation main stemmed_words main clean_corpus generate_representation read_data compute_p_nd BaseSelectionFunction QBC compute_candidate_set compute_density RandomSelection experiment UncertaintySelection CertiantyInformationGainSelection EnsembleModel compute_vote_entropy EGAL TrainModel Normalize BaseModel get_k_random_samples CertaintySelection DensityWeighted TheAlgorithm SvmModel required_max genfromtxt LabelEncoder DataFrame list add_option experiment gridsearch_step len input parse_args OptionParser dir_neg concatenate n_estimators set eval unique enumerate print error to_csv text_rep dir_out transform samples_init dir_pos fit tolist append listdir read_csv len punctuation translate lower sub maketrans append print mean append array split FreqDist load_facebook_vectors read_data clean_corpus generate_representation savetxt append normalize fit_transform range dir_input CountVectorizer keys load join toarray tqdm split from_pretrained cuda norm transpose PCA matmul components_ enumerate fit sum copy int sort cosine_similarity max len nan_to_num log2 sum min seed print reshape astype apply unique bincount DataFrame print raw_result TheAlgorithm run
# Effectiveness-of-Pretrained-Transformer-based-Language-Models-in-Active-Learning-for-Labelling-Data Investigating the Effectiveness of Representations Based on Pretrained Transformer-based Language Models in Active Learning for Labelling Text Datasets This repository is temporarily associated with paper [Lu, J., Henchion, M. and Mac Namee, B., 2019. Investigating the Effectiveness of Word-Embedding Based Active Learning for Labelling Text Datasets. arXiv preprint arXiv:1910.03505.](https://arxiv.org/abs/1910.03505) ### Dependencies Tested Python 3.6, and requiring the following packages, which are available via PIP: * Required: [numpy >= 1.16.4](http://www.numpy.org/) * Required: [scikit-learn >= 0.21.1](http://scikit-learn.org/stable/) * Required: [pandas >= 0.25.1](https://pandas.pydata.org/) * Required: [gensim >= 3.7.3](https://radimrehurek.com/gensim/) * Required: [matplotlib >= 2.2.2](https://matplotlib.org/)
371
Ghadjeres/schoenberg-rao
['density estimation']
['Schoenberg-Rao distances: Entropy-based and geometry-aware statistical Hilbert distances']
SR.py SRAE.py SR_mixtures_optimized discret_continu rao_quad_entropy rao_sim_entropy SR_discrete_optimized_batched SR_gaussian_optimized SR_gaussian gaussian SR_mixture_discrete_optimized SR_discrete_optimized SR_gaussian_discrete_optimized SR_discrete_same_atoms sample_and_resize generate_mixture batch_pdist mixtures_Jgaussiansl2 SR_mixtures loss_function_SSR loss_function_SR VAE test train einsum append rand softmax randn batch_pdist unsqueeze cat exp sqrt pi erf exp pi erf sqrt unsqueeze exp pi erf sqrt unsqueeze SR_gaussian_discrete_optimized SR_gaussian_optimized abs unsqueeze SR_gaussian_optimized f sum f sum int sum abs choice resize append numpy range len SR_mixtures_optimized view randn binary_cross_entropy zeros_like size sqrt Tensor to sum cat ones_like exp view binary_cross_entropy size randn_like SR_discrete_optimized to sum format model backward print dataset zero_grad loss_function item to step enumerate len print eval format
# Schoenberg-Rao distances Code accompanying the "Schoenberg-Rao distances: Entropy-based and Geometry-aware Statistical Hilbert Distances" paper by Hadjeres and Nielsen. Barycenters computation of Fig. 5 is done in the `barycenters.ipynb` Jupyter notebook. The Schoenberg-Rao Auto-Encoder is in the `SRAE.py` file. Use `python SRAE.py -h` for the available parameters. All other figures are reproducible with the `main.ipynb` notebook. Results from different SRAE settings are included in the `resutls_*`folders.
372
GhiXu/ACMP
['depth estimation']
['Planar Prior Assisted PatchMatch Multi-View Stereo']
colmap2mvsnet_acm.py read_points3D_text Image read_cameras_binary qvec2rotmat processing_single_scene read_cameras_text read_images_text read_points3d_binary read_model calc_score read_next_bytes read_images_binary rotmat2qvec read join read_points3D_text read_cameras_binary read_cameras_text read_points3d_binary read_images_text read_images_binary eigh array flat norm sorted arccos pi dot point3D_ids append xyz dense_folder imwrite save_folder tvec max_d Pool exists list sorted model_ext name map matmul read_model copyfile append imread interval_scale range format partial asscalar keys enumerate join items norm print qvec2rotmat makedirs inv min rmtree qvec point3D_ids zeros array len
# ACMP [News] The code for [ACMH](https://github.com/GhiXu/ACMH) is released!!! [News] The code for [ACMM](https://github.com/GhiXu/ACMM) is released!!! [News] The code for [ACMMP](https://github.com/GhiXu/ACMMP) is released!!! ## About This repository contains the code for the paper [Planar Prior Assisted PatchMatch Multi-View Stereo](https://arxiv.org/abs/1912.11744), Qingshan Xu and Wenbing Tao, AAAI2020. If you find this project useful for your research, please cite: ``` @article{Xu2020ACMP, title={Planar Prior Assisted PatchMatch Multi-View Stereo}, author={Xu, Qingshan and Tao, Wenbing},
373
GilesStrong/HiggsML_Lumin
['data augmentation']
['On the impact of selected modern deep-learning techniques to the performance and celerity of classification models in an experimental high-energy physics use case']
modules/basics.py modules/plotting.py modules/data_import.py Result ExpComp bs_ams export_test_to_csv Experiment bootstrap_score_test_data get_bottleneck_output score_test_data convert_to_df score_test_data_per_fold proc_targets import_data calc_pair_transverse_mass parse_cats add_mass_feats run_data_import plot_convergence _format_scatter_plot_axes plot_objective partial_dependence plot_evaluations get_column sum to_binary_class format DataFrame print calc_ams range get_column sum to_binary_class print calc_ams n_folds append DataFrame range uncert_round seed arange choice put calc_ams append sum range len get_column to_binary_class print mean mp_run DataFrame std range uncert_round list print get_feature File sort_values to_csv DataFrame range len print get_feature DataFrame FowardHook predict sqrt DataFrame square calc_pair_transverse_mass calc_pair_mass log proc_event format print len rename add_mass_feats train_test_split DataFrame read_csv drop k_means partial fit_input_pipe gen_sample proc_targets import_data set fold_func transform proc_cats append grid axhline linspace list set_title set_yscale set_xlabel legend gca range get plot mean zip x_iters isinstance viridis set_ylabel len set_yticklabels axis tick_top tick_params set_major_locator tick_right set_xlabel set_scale range MaxNLocator set_xticklabels set_xlim n_dims set_label_position set_ticks_position zip set_xscale set_ylabel set_ylim rvs bounds predict mean linspace transform array append rvs subplots savepath show _format_scatter_plot_axes axvline scatter contourf savefig space range asarray plot LogLocator n_dims x_iters subplots_adjust partial_dependence transform show list asarray x_iters subplots _format_scatter_plot_axes n_dims subplots_adjust scatter log10 hist logspace space savefig range savepath
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3754670.svg)](https://doi.org/10.5281/zenodo.3754670) # On the impact of modern deep-learning techniques to the performance and time-requirements of classification models in experimental high-energy physics This repo is designed to support the paper "On the impact of modern deep-learning techniques to the performance and time-requirements of classification models in experimental high-energy physics", Strong 2020 Mach. Learn.: Sci. Technol.https://doi.org/10.1088/2632-2153/ab983a (Preprint: [arXiv:2002.01427 [physics.data-an]](https://arxiv.org/abs/2002.01427)). It contains code to rerun the experiments performed in the paper to reproduce the results, allow users to better understand how each method is used, and provide a baseline for future comparisons. ## Installation ### Get code & data 1. `git clone https://github.com/GilesStrong/HiggsML_Lumin.git` 1. `cd HiggsML_Lumin` 1. `mkdir data` 1. `wget -O data/atlas-higgs-challenge-2014-v2.csv.gz http://opendata.cern.ch/record/328/files/atlas-higgs-challenge-2014-v2.csv.gz` 1. `gunzip data/atlas-higgs-challenge-2014-v2.csv.gz`
374
GinGinWang/MTQ
['density estimation']
['Masked Autoregressive Flow for Density Estimation', 'Sum-of-Squares Polynomial Flow', 'Multivariate Triangular Quantile Maps for Novelty Detection']
models/loss_functions/sosloss.py models/loss_functions/flow_loss.py models/estimator.py datasets/base.py models/transform_maf.py datasets/fmnist.py result_helpers/utils.py result_helpers/test_one_class.py datasets/kddcup.py models/__init__.py models/flow_maf_models.py result_helpers/__init__.py datasets/transforms.py models/loss_functions/autoregression_loss.py utils.py datasets/thyroid.py models/LSA_mnist.py datasets/mnist.py models/loss_functions/__init__.py models/base.py models/blocks_2d.py models/LSA_kddcup.py datasets/__init__.py models/transform_sos.py main.py models/flow_sos_models.py models/loss_functions/lsasosloss.py models/estimator_1D.py models/loss_functions/reconstruction_loss.py datasets/utils.py models/loss_functions/lsaloss.py models/loss_functions/lsaenloss.py main parse_arguments create_checkpoints_dir create_file_path set_random_seed weights_init _init_fn OneClassDataset DatasetBase FMNIST KDDCUP MNIST THYROID AddNoise OCRemoveMean ToFloatTensor3DMask ToFloatTensor3D OCToFloatTensor2D ToFloatTensor2Dt RemoveMean ToFloat32 ToCrops ToFloatTensor1D OCToFloatTensor3D DropoutNoise SubtractBackground OCToFloatTensor2D_cifar10 RemoveBackgroundAndConcatMaskToY ToFloatTensor2D OCToFloatTensor2Dt ToRandomCrops RemoveBackground ToFloatTensor2D_cifar10 RandomMirror OCToFloatTensor1D normalize novelty_score BaseModule ResidualBlock residual_op BaseBlock DownsampleBlock UpsampleBlock DE MaskedFullyConnection Estimator1D MADE MaskedLinear ActNorm get_mask InvertibleMM CouplingLayer LUInvertibleMM Reverse BatchNormFlow Sigmoid Shuffle FlowSequential Logit ConditionerNet MaskedLinear get_mask SOSFlow Reverse BatchNormFlow FlowSequential Decoder LSA_KDDCUP Encoder Decoder PrintLayer LSA_MNIST Encoder TinvMAF TinvSOS AutoregressionLoss FlowLoss LSAENLoss LSALoss LSASOSLoss ReconstructionLoss SOSLoss OneClassTestHelper _init_fn modify_inf compute_density_metric compute_quantile_metric custom_viz plot_source_dist_by_dimensions create_checkpoints_dir plot_training_loss_auroc set_random_seed pretrained lam device fixed dataset cuda seed testflag mulobj test_one_class_classification_with_trainset ValueError name create_file_path select shape parse_arguments num_blocks trainflag code_length using_train_set estimator FMNIST compute_AUROC checkpoint MNIST test_classification print OneClassTestHelper KDDCUP score_normed train_one_class_classification THYROID hidden_size add_argument ArgumentParser print makedirs seed str manual_seed_all manual_seed seed int data ConvTranspose1d isinstance xavier_normal_ BatchNorm3d Conv3d Linear Conv2d normal_ Conv1d BatchNorm1d BatchNorm2d ConvTranspose2d constant_ ConvTranspose3d bn1 bn2 f2 activation_fn f1 f3 bn3 arange isinf range len print shape savefig scatter_matrix DataFrame range len percentile print where accuracy_score precision_recall_fscore_support float sum len percentile print where precision_recall_fscore_support float sum len show add_subplot tight_layout axis imshow set_size savefig figure range
# Multivariate Triangular Quantiles for Novelty Detection Pytorch implementation to replicate the experiments in our NIPS2019 paper "Multivariate Triangular Quantiles for Novelty Detection" (published soon). # Datasets * Thyroid: http://odds.cs.stonybrook.edu/thyroid-disease-dataset/ * KDDCUP: http://kdd.ics.uci.edu/databases/kddcup99/kddcup.testdata.unlabeled_10_percent.gz * MNIST/Fashion MNIST: automatically download by torchvision # Models This implementation includes the following models for novelty detection. * LSA: Autoencoder part in [2] * LSA_EN[2]: Autoencoder + Density Estimator in [2]
375
GlebBrykin/SANET
['style transfer']
['Arbitrary Style Transfer with Style-Attentional Networks']
Eval.py
# SANET This is unofficial PyTorch implementation of "Arbitrary Style Transfer with Style-Attentional Networks". Official paper: https://arxiv.org/abs/1812.02342v5 To run, download the weights and place them in the folder with Eval.py. Links to weights on Yandex.Disk: * decoder: https://yadi.sk/d/xsZ7j6FhK1dmfQ * transformer: https://yadi.sk/d/GhQe3g_iRzLKMQ * vgg_normalised: https://yadi.sk/d/7IrysY8q8dtneQ Or, you can download the latest release. It contains all weights, codes and examples. # How to evaluate To test the code, make changes to the following lines in the file Eval.py. here you need to specify the path to the image style and content. After that, save the changes to the file and run it.
376
Goda-Research-Group/MLMC_stochastic_gradient
['experimental design', 'stochastic optimization']
['Unbiased MLMC stochastic gradient-based optimization of Bayesian experimental designs']
mlmc_eig_grad/optimize.py mlmc_eig_grad/models.py mlmc_eig_grad/experiments.py mlmc_eig_grad/visualize_eig.py setup.py mlmc_eig_grad/mlmc_eig.py mlmc_eig_grad/visualize_path.py main expr_test expr_pk mlmc_eig_value_and_grad nested_mc mlmc_eig_value mlmc_eig_grad randomized_mlmc variance_check_graph bias_variance_check bias_variance_check_and_graph variance_check_with_path ggrad_pk J_and_H_pk J_t A g_t dd_log_p_t eig_t H_pk ggrad_t dd_log_p_pk J_pk dA H_t show_xi g_pk qY_t progress_show_xi restriction_clip progress_pass amsgrad_initialize stochastic_gradient gradient_randomized_mlmc get_restriction_clip get_progress_show_xi get_progress_show_list progress_show_list restriction_pass amsgrad get_gradient_randomized_mlmc get_gradient_nested condition_num_iters gradient_nested_mc amsgrad_iterate get_condition_num_iters eig_with_path contour_and_paths expr_test expr_pk model_t print mlmc_eig_value get_progress_show_list eig_t mlmc_eig_grad savetxt restriction_pass get_gradient_randomized_mlmc amsgrad get_gradient_nested contour_and_paths bias_variance_check_and_graph variance_check_with_path get_condition_num_iters xi_scale progress_pass variance_check_with_path show_xi str model_pk uniform savetxt eig_with_path append range RandomState get_progress_show_xi get_restriction_clip mlmc_eig_grad mean amsgrad bias_variance_check_and_graph get_condition_num_iters print get_gradient_randomized_mlmc rvs ggrad g list T spawn inv close pdf cov repeat zip Pool sum array log str arange set_title plot text dot log2 round legend sum len print subplots print close savefig variance_check_graph bias_variance_check mlmc_fn subplots print bias_variance_check close savefig variance_check_graph zip append T A inv dot mean cov cov inv where T exp squeeze maximum array minimum ones_like subplots plot ones text print close maximum set_figheight set_visible savefig tick_params xticks yticks sqrt maximum gradient initialize progress restriction condition iterate append array minimum maximum str print mean show_xi len print len arange plot xlabel print close ylabel mean savefig figure legend append range len arange linspace ylabel ylim savefig legend meshgrid range plot close eig_func xlim contour xlabel print figure zeros array len
# mlmc-eig-grad ## Overview The codes used for the numerical experiments in the paper(https://doi.org/10.1137/20M1338848). ## Usage To install: ``` pip install git+https://github.com/Goda-Research-Group/MLMC_stochastic_gradient.git ``` To execute: ```
377
GokulKarthik/EAST.pytorch
['optical character recognition', 'scene text detection', 'curved text detection']
['EAST: An Efficient and Accurate Scene Text Detector']
train.py eval.py test.py dataset.py config.py model.py utils.py format_data.py loss.py Config ImageTestDataSet ImageDataSet load_shapes_coords quads_to_rboxes load_score_and_geometry_map_raw list_images load_image load_score_and_geometry_map_formatted eval_dataset load_shapes_coords quads_to_rboxes LossFunction EAST compute_iou_using_cv2 compute_iou_using_sympy non_maximal_supression draw_bbs send_message check_overlap send_picture reverse_shift join format join list listdir shuffle reshape quads_to_rboxes read_csv values imread moveaxis astype float32 max_pool_2d astype MaxPool2d load_shapes_coords flatten from_numpy int32 zip zeros moveaxis zeros moveaxis reshape values imwrite draw_bbs DataLoader compute_loss forward cuda tolist strftime ceil imread double format non_maximal_supression gmtime mkdir item zip reverse_shift enumerate join time print ImageDataSet extend to_csv tqdm numpy len Polygon area intersection float abs zeros_like fillPoly astype int32 zeros sum encloses vertices Polygon T filter_function tolist repeat zip append len chat_postMessage files_upload line reshape moveaxis array range
# EAST Text Detection Model in PyTorch ### - with single QUAD Representation of Bounding Boxes A PyTorch implementation of EAST: An Efficient and Accurate Scene Text Detector for bounding box detection ## References: 1. https://arxiv.org/pdf/1704.03155.pdf 2. https://github.com/liushuchun/EAST.pytorch 3. https://www.pyimagesearch.com/2018/08/20/opencv-text-detection-east-text-detector/ ## Steps: 1. Data folder should be organised as follows: The data dir should have 3 sub dirs namely, "train", "dev" and "test".
378
GoodAI/torchsim
['model based reinforcement learning']
['ToyArchitecture: Unsupervised Learning of Interpretable Models of the World']
torchsim/core/nodes/dataset_alphabet_node.py torchsim/core/logging/__init__.py torchsim/core/eval2/scaffolding.py torchsim/core/models/expert_params.py torchsim/research/se_tasks/topologies/se_io/se_io_task0_dataset.py torchsim/core/eval2/document_publisher.py torchsim/core/nodes/salient_region_node.py torchsim/core/logging/ui_log_handler.py torchsim/gui/observers/memory_block_observer.py tests/core/eval/test_cluster_agreement.py tests/core/nodes/test_images_dataset_node.py tests/templates/test_template_helpers.py tests/core/gui/observers/test_flock_process_observable.py tests/gui/test_observer_view.py torchsim/research/research_topics/rt_1_1_3_space_benchmarks/topologies/se_dataset_sp_lrf_debug.py tests/research/rt_1_1_1/test_rt1.py torchsim/research/research_topics/rt_1_1_3_space_benchmarks/topologies/se_dataset_ta_lrf.py torchsim/core/nodes/space_engineers_connector_node.py torchsim/topologies/expert_hierarchy_topology.py torchsim/core/nodes/dataset_se_objects_node.py torchsim/core/eval/sliding_window.py tests/research/test_research_topics_topologies.py tests/core/utils/test_node_utils.py torchsim/core/eval/metrics/simple_classifier_nn.py tests/core/nodes/test_spatial_pooler_node.py torchsim/significant_nodes/sp_reconstruction_layer.py torchsim/core/eval2/experiment_controller.py torchsim/research/research_topics/rt_2_1_1_relearning/topologies/task0_basic_topology.py tests/core/nodes/test_receptive_field.py torchsim/research/research_topics/rt_4_3_1_gradual_world/node_groups/gate_network_group.py torchsim/topologies/gradual_learning_topology.py torchsim/core/nodes/constant_node.py torchsim/research/research_topics/rt_2_1_2_learning_rate/topologies/task0_ta_se_topology.py torchsim/core/models/spatial_pooler/kernels/__init__.py torchsim/research/research_topics/rt_4_3_1_gradual_world/topologies/gl_nn_world_topology.py torchsim/core/nodes/flock_node_utils.py tests/core/models/flock/test_buffer.py tests/core/models/spatial_pooler/test_buffer.py tests/core/models/spatial_pooler/test_process.py torchsim/core/physics_model/pymunk_physics.py torchsim/core/models/temporal_pooler/learning.py torchsim/research/research_topics/rt_1_1_3_space_benchmarks/adapters/se_ta_running_stats_adapter.py torchsim/research/research_topics/rt_4_3_1_gradual_world/topologies/general_gl_topology.py torchsim/research/se_tasks/experiments/task_0_experiment_template.py torchsim/gui/validators.py torchsim/gui/observers/buffer_observer.py tests/core/graph/test_node_ordering.py torchsim/research/research_topics/rt_2_1_3_conv_temporal_compression/topologies/watch_l_3_conv_topology.py torchsim/core/datasets/dataset_se_task_one.py torchsim/core/persistence/persistor.py torchsim/research/research_topics/rt_3_6_1_inductive_bias_attention/node_groups/attention_classification_group.py torchsim/core/eval/run_measurement.py main.py tests/core/nodes/test_simpleBouncingBallNode.py torchsim/core/nodes/motion_detection_node.py tests/core/eval/test_classifier_interface.py torchsim/core/eval/node_accessors/se_node_accessor.py torchsim/core/physics_model/debug_world.py torchsim/core/models/flock/__init__.py tests/core/graph/node_stub.py torchsim/core/nodes/receptive_field_node.py torchsim/topologies/bouncing_ball_topology.py tests/core/graph/test_invertible_node_base.py torchsim/research/experiment_templates/lrf_1sp_flock_template.py torchsim/core/eval/metrics/entropy.py torchsim/core/models/expert_params_props.py torchsim/core/models/spatial_pooler/learning.py torchsim/core/nodes/expert_node.py torchsim/research/research_topics/rt_1_1_2_one_expert_lrf/topologies/lrf_topology.py torchsim/topologies/bottom_up_attention_topology.py torchsim/core/nodes/conv_expert_node.py torchsim/research/research_topics/rt_3_7_1_task0_analysis/experiments/bat_test.py torchsim/significant_nodes/reconstruction_interface.py torchsim/core/eval/metrics/simple_classifier_svm.py torchsim/research/research_topics/rt_4_3_1_gradual_world/node_groups/switchable_world_group.py tests/core/datasets/alphabet/test_alphabet_generator.py tests/core/node_accessors/test_sp_flock_node_accessor.py kubench.py torchsim/research/research_topics/rt_3_2_1_symbolic_input_words/experiments/ta_symbolic_input_words.py torchsim/topologies/convolutional_topology.py torchsim/research/se_tasks/topologies/se_task_topology.py tests/core/nodes/test_se_objects_dataset_node.py tests/core/nodes/test_weighted_avg_node.py tests/core/nodes/test_fork_node.py torchsim/core/nodes/flock_networks/network_factory.py tests/core/models/flock/test_flock_integration.py tests/core/nodes/test_scatter_node.py torchsim/research/research_topics/rt_4_3_1_gradual_world/nodes/custom_nodes.py torchsim/research/research_topics/rt_1_1_3_space_benchmarks/topologies/benchmark_lrf_flock_topology.py tests/test_testing_utils.py torchsim/core/utils/tensor_utils.py torchsim/research/research_topics/rt_1_1_1_one_expert_sp/experiments/se_dataset_sp_experiment.py torchsim/research/research_topics/rt_4_1_1_gradual_learning_basic/templates/gradual_learning_basic_template.py torchsim/research/nn_gl/gl_nn_experiment.py torchsim/core/models/spatial_pooler/process.py torchsim/core/nodes/__init__.py torchsim/research/research_topics/rt_1_1_1_one_expert_sp/adapters/sp_mnist_learning_convergence_adapter.py torchsim/research/se_tasks/topologies/task1_base_topology.py torchsim/research/se_tasks/experiments/task_0_experiment.py torchsim/core/nodes/pass_node.py tests/core/models/spatial_pooler/test_spatial_pooler.py tests/core/nodes/test_periodic_update_node_group.py torchsim/research/research_topics/rt_2_1_3_conv_temporal_compression/rt_2_1_3_experiment_template.py torchsim/topologies/toyarch_groups/ncm_group.py torchsim/core/eval2/experiment_template_base.py torchsim/research/research_topics/rt_1_1_3_space_benchmarks/adapters/se_dataset_ta_running_stats_adapter.py torchsim/core/nodes/sequence_node.py torchsim/research/research_topics/rt_1_1_2_one_expert_lrf/experiments/LRF_1SPFlock_MNIST_stability.py torchsim/gui/observers/hierarchical_observer.py torchsim/core/eval/series_plotter.py tests/templates/random_number_topology_adapter.py torchsim/gui/observers/cluster_observer.py main_expert_flock_profiling.py torchsim/research/nn_gl/gl_nn_experiment_template.py torchsim/research/experiment_templates/simulation_running_stats_template.py tests/core/graph/test_memory_block.py torchsim/research/research_topics/rt_3_1_lr_subfields/experiments/watch_CN_C1_R1.py torchsim/research/research_topics/rt_3_7_1_task0_analysis/node_groups/dummy_model_group.py tests/core/datasets/test_mnist.py torchsim/gui/observers/flock_process_observable.py tests/core/nodes/test_expand_node.py torchsim/core/eval/topology_adapter_base.py torchsim/core/eval2/measurement_manager.py torchsim/research/research_topics/rt_1_1_1_one_expert_sp/adapters/se_dataset_sp_learning_convergence_adapter.py torchsim/gui/observers/tensor_observable.py tests/core/models/test_expert_params.py torchsim/research/research_topics/rt_1_1_3_space_benchmarks/debug/debug_lrf_runner.py torchsim/utils/os_utils.py tests/core/graph/test_graph.py torchsim/research/se_tasks/experiments/task_1_experiment.py torchsim/topologies/goal_directed_narrow_hierarchy_topology.py torchsim/topologies/context_test_topology.py torchsim/research/research_topics/rt_2_1_3_conv_temporal_compression/experiments/rt_2_1_3_low_cc.py torchsim/core/physics_model/torchtensor_utils.py torchsim/research/research_topics/rt_4_1_1_gradual_learning_basic/topologies/flock_partial_switch_node_group.py torchsim/topologies/symbolic_input_topology.py tests/core/models/receptive_field/test_grid.py torchsim/core/graph/slot_container_base.py tests/core/eval2/test_experiment_runner.py torchsim/research/nn_gl/main.py torchsim/core/eval/metrics/mean_squared_error.py torchsim/core/nodes/grid_world_node.py tests/core/models/integration_test_utils.py torchsim/research/research_topics/rt_2_1_2_learning_rate/adapters/modular/learning_rate_ta_modular_adapter.py tests/core/nodes/test_flatten_node.py torchsim/core/eval/node_accessors/mnist_node_accessor.py torchsim/research/research_topics/rt_2_1_2_learning_rate2/node_groups/ta_multilayer_node_group.py torchsim/research/research_topics/rt_2_1_2_learning_rate/topologies/task0_nn_topology.py torchsim/research/research_topics/rt_4_2_1_actions/topologies/goal_directed_template_topology.py torchsim/research/baselines/adapters/task_0_baselines_adapter.py torchsim/gui/observer_system.py torchsim/core/models/flock/expert_flock.py torchsim/topologies/looping_topology.py torchsim/research/se_tasks/topologies/se_task0_convolutional_expert_topology.py torchsim/research/research_topics/rt_3_2_1_symbolic_input_words/topologies/symbolic_input_words_topology.py torchsim/research/research_topics/rt_4_3_1_gradual_world/node_groups/flock_network_group.py tests/core/nodes/test_random_number_node.py torchsim/core/nodes/cartographic_map_node.py eval_utils.py torchsim/research/research_topics/rt_1_1_4_task0_experiments/experiments/task_0_conv_two_layer_experiments.py torchsim/research/baselines/experiments/task_0_experiment.py torchsim/core/graph/node_ordering.py torchsim/core/graph/worker_node_base.py torchsim/core/models/temporal_pooler/tp_output_projection.py tests/core/eval2/test_scaffolding_graph.py tests/core/models/temporal_pooler/test_untrained_forward_process.py torchsim/research/research_topics/rt_1_1_4_task0_experiments/adapters/task0_basic_adapter.py torchsim/research/research_topics/rt_2_1_2_learning_rate2/node_groups/classification_model_group.py tests/test_circular_import.py torchsim/research/research_topics/rt_1_1_4_task0_experiments/topologies/task0_conv_wide_topology.py torchsim/research/se_tasks/experiments/task_0_baselines_experiment.py torchsim/research/research_topics/rt_2_1_2_learning_rate/adapters/task0_ta_multilayer_adapter.py torchsim/research/se_tasks/topologies/se_io/se_io_task0_dataset_phased.py torchsim/utils/baselines_utils.py torchsim/core/graph/invertible_node.py torchsim/research/research_topics/rt_4_1_1_gradual_learning_basic/topologies/dataset_switch_node_group.py tests/core/nodes/test_to_one_hot_node.py torchsim/core/nodes/agent_actions_parser_node.py tests/gui/server/test_ui_api.py torchsim/core/memory/tensor_creator.py torchsim/core/models/spatial_pooler/reconstruction.py torchsim/topologies/noise_topology.py tests/core/models/temporal_pooler/test_integration.py tests/core/nodes/test_constant_node.py torchsim/gui/server/ui_server_connector.py torchsim/core/nodes/simple_bouncing_ball_node.py torchsim/research/se_tasks/adapters/task_1_stats_basic_adapter.py torchsim/core/experiment_runner.py torchsim/research/research_topics/rt_3_1_lr_subfields/node_groups/CN_C1_R1.py tests/core/models/temporal_pooler/test_tp_output_projection.py torchsim/core/models/spatial_pooler/__init__.py torchsim/core/physics_model/latent_world.py torchsim/utils/dict_utils.py torchsim/research/research_topics/rt_2_1_3_conv_temporal_compression/experiments/rt_2_1_3_experiment.py torchsim/core/datasets/dataset_se_task_zero.py tests/core/models/flock/test_process.py tests/core/models/receptive_field/test_mapping.py torchsim/core/models/temporal_pooler/untrained_forward_and_backward.py torchsim/research/research_topics/rt_3_7_1_task0_analysis/topologies/task0_dummy_model_topology.py tests/core/graph/test_save_load.py torchsim/core/graph/slot_container.py torchsim/significant_nodes/conv_layer.py torchsim/research/research_topics/rt_4_1_1_gradual_learning_basic/topologies/dataset_alphabet_node_group.py tests/core/eval/classifier_tests.py torchsim/research/research_topics/rt_1_1_1_one_expert_sp/topologies/mnist_sp_topology.py torchsim/core/eval2/parameter_extractor.py tests/core/eval/test_comparison_matrix.py tests/research/se_tasks2/test_se_train_test_component.py torchsim/core/nodes/flock_networks/multi_layer_perceptron_flock.py tests/core/utils/list_list_utils.py tests/templates/test_measurement_manager.py tests/core/graph/test_connections.py torchsim/research/research_topics/rt_2_1_3_conv_temporal_compression/topologies/l1_topology.py torchsim/topologies/single_expert_expermental_topology.py torchsim/topologies/ta_exploration_grid_world_topology.py tests/gui/test_observer_properties_builder.py torchsim/core/utils/sequence_generator.py torchsim/utils/node_utils.py torchsim/core/models/receptive_field/grid.py torchsim/core/nodes/lambda_node.py torchsim/core/nodes/periodic_update_node_group.py torchsim/core/nodes/dataset_mnist_node.py torchsim/core/graph/inverse_pass_packet.py tests/core/nodes/test_unsqueeze_node.py torchsim/topologies/goal_directed_topology.py torchsim/topologies/network_flock_topology.py tests/core/eval/test_cluster_mutual_information.py tests/core/nodes/test_accuracy_node.py torchsim/research/research_topics/rt_1_1_4_task0_experiments/topologies/task0_conv_wide_topology_more_labels.py torchsim/core/eval/node_accessors/se_io_accessor.py torchsim/topologies/se_toyarch_debug_topology.py torchsim/research/research_topics/rt_1_1_4_task0_experiments/adapters/task0_conv_wide_adapter.py torchsim/significant_nodes/switchable_sequence_nodegroup.py torchsim/research/research_topics/rt_4_2_1_actions/templates/goal_directed_template.py torchsim/core/eval/doc_generator/figure.py tests/utils/test_cache_utils.py torchsim/core/nodes/multi_dataset_alphabet_node.py torchsim/core/nodes/flock_networks/delay_buffer.py torchsim/core/eval/measurement_manager.py torchsim/research/research_topics/rt_2_1_2_learning_rate/node_groups/nn_node_group.py torchsim/core/nodes/disentagled_world_renderer.py torchsim/research/research_topics/rt_4_2_1_actions/node_groups/single_expert_group.py torchsim/significant_nodes/ball_env.py torchsim/research/research_topics/rt_1_1_3_space_benchmarks/nodes/rgb_debug_node.py tests/core/datasets/test_sequence.py torchsim/core/nodes/weighted_avg_node.py torchsim/core/physics_model/rendered_world.py torchsim/research/experiment_templates/task0_train_test_template_base.py torchsim/research/research_topics/rt_2_1_1_relearning/experiments/task_0_experiments.py torchsim/core/models/spatial_pooler/forward.py tests/core/memory/test_on_device.py tests/core/models/temporal_pooler/test_temporal_pooler.py torchsim/core/eval2/train_test_switchable.py torchsim/research/research_topics/rt_1_1_1_one_expert_sp/experiments/mnist_sp_experiment.py tests/research/rt_2_1_3/test_l3_conv_topology.py torchsim/topologies/debug_agent_topology.py torchsim/research/research_topics/rt_1_1_4_task0_experiments/topologies/task0_base_topology.py tests/core/gui/observers/test_tensor_observable.py torchsim/core/nodes/internals/grid_world.py torchsim/core/graph/unit.py torchsim/topologies/toyarch_groups/r1ncm_group.py tests/test_imports.py torchsim/topologies/multi_dataset_alphabet_topology.py torchsim/research/research_topics/rt_1_1_4_task0_experiments/topologies/task0_narrow_topology.py tests/core/nodes/test_salient_region_node.py torchsim/topologies/toyarch_groups/ncmr1_group.py torchsim/core/models/temporal_pooler/forward_and_backward.py tests/gui/server/test_ui_server_connector.py torchsim/core/kernels/__init__.py torchsim/core/eval/node_accessors/random_number_accessor.py torchsim/core/graph/connection.py torchsim/research/research_topics/rt_3_7_1_task0_analysis/node_groups/multilayer_model_group.py torchsim/research/se_tasks/topologies/se_task1_conv_topology.py torchsim/research/research_topics/rt_1_1_3_space_benchmarks/experiments/se_ta_running_stats_experiment.py torchsim/research/research_topics/rt_2_1_1_relearning/adapters/task0_relearn_basic_adapter.py torchsim/research/research_topics/rt_3_7_1_task0_analysis/experiments/task0_analysis_experiment.py tests/core/nodes/test_mse_node.py torchsim/topologies/switch_topology.py tests/core/utils/test_signals.py torchsim/core/graph/node_base.py tests/topologies/test_dataclass.py torchsim/core/graph/__init__.py tests/core/graph/test_node_base.py torchsim/research/research_topics/rt_2_1_2_learning_rate/node_groups/ta_multilayer_node_group_params.py tests/core/models/receptive_field/test_reverse_mapping.py tests/core/nodes/test_grayscale_node.py torchsim/core/models/flock/flock.py torchsim/utils/template_utils/template_helpers.py tests/core/gui/observers/test_observer_persistence.py torchsim/research/experiment_templates/dataset_simulation_running_stats_template.py tests/core/nodes/test_random_subfield_node.py torchsim/core/nodes/accuracy_node.py torchsim/core/persistence/persistable.py torchsim/topologies/nnet_topology.py torchsim/research/research_topics/rt_3_1_lr_subfields/watch_conv_subfiled_layer.py torchsim/research/research_topics/rt_3_7_1_task0_analysis/topologies/task0_ta_analysis_topology.py torchsim/research/experiment_templates/task0_train_test_template_relearning.py torchsim/topologies/task0_ta_bottom_up_classification_topology.py tests/core/memory/test_tensor_creator.py torchsim/core/graph/hierarchical_observable_node.py torchsim/research/research_topics/rt_4_1_1_gradual_learning_basic/topologies/gradual_learning_basic_topology.py tests/core/eval2/test_dict_utils.py torchsim/research/research_topics/rt_1_1_2_one_expert_lrf/experiments/LRF_1SPFlock_MNIST.py tests/core/nodes/test_switch_node.py torchsim/research/se_tasks/topologies/se_task0_basic_topology.py tests/core/nodes/test_dataset_sequence_mnist_node.py torchsim/research/se_tasks/se_tasks.py torchsim/significant_nodes/environment_base.py tests/core/test_experiment_runner.py tests/core/nodes/test_lambda_node.py torchsim/research/research_topics/rt_1_1_1_one_expert_sp/topologies/se_dataset_sp_representation.py tests/core/eval2/test_parameter_extractor.py torchsim/core/eval/node_accessors/sp_node_accessor.py torchsim/research/research_topics/rt_2_1_2_learning_rate/experiments/nnet_classification_accuracy_experiment.py torchsim/core/eval/doc_generator/document.py torchsim/research/research_topics/rt_3_7_1_task0_analysis/experiments/task0_dummy_experiment.py torchsim/research/research_topics/rt_2_1_2_learning_rate/experiments/ta_classification_accuracy_experiment.py torchsim/core/models/neural_network/network_flock_buffer.py torchsim/research/research_topics/rt_4_2_1_actions/node_groups/two_experts_group.py torchsim/core/datasets/alphabet/alphabet.py torchsim/playground/.ipynb_checkpoints/decorator-checkpoint.py torchsim/utils/template_utils/train_test_topology_saver.py tests/core/nodes/test_dataset_simple_point_gravity.py tests/benchmarks.py tests/core/node_accessors/test_se_io_accessor.py torchsim/gui/observer_view.py torchsim/research/se_tasks/experiments/all_tasks_experiment.py torchsim/gui/server/ui_helper.py tests/core/eval2/test_experiment_component.py tests/core/nodes/test_dataset_alphabet.py tests/core/utils/test_space_engineers_connector.py torchsim/research/research_topics/rt_1_1_3_space_benchmarks/topologies/se_ta_lrf_t0.py torchsim/research/research_topics/rt_2_1_2_learning_rate2/experiments/ta_classification_accuracy_experiment.py torchsim/core/eval2/single_experiment_run.py torchsim/research/figure_viewer.py torchsim/significant_nodes/space_engineers_env.py tests/test_multiple_inheritance.py torchsim/research/research_topics/rt_1_1_4_task0_experiments/adapters/task0_adapter_base.py torchsim/core/utils/inverse_projection_utils.py torchsim/topologies/grid_world_topology.py torchsim/core/eval2/run_measurement.py tests/core/nodes/test_random_noise_node.py torchsim/research/research_topics/rt_4_1_1_gradual_learning_basic/experiments/gl_basic_experiment.py torchsim/research/experiment_templates/sp_learning_convergence_template.py torchsim/core/graph/node_group.py tests/utils/test_os_utils.py torchsim/core/models/spatial_pooler/spatial_pooler.py torchsim/topologies/lrf_object_detection_topology.py torchsim/core/graph/id_generator.py torchsim/research/research_topics/rt_2_1_2_learning_rate/adapters/modular/classification_accuracy_modular_adapter.py tests/core/nodes/test_dataset_mnist_node.py torchsim/core/nodes/focus_node.py torchsim/research/research_topics/rt_2_1_3_conv_temporal_compression/topologies/l3_conv_topology.py tests/core/eval/test_doc_generator.py torchsim/core/physics_model/physics_model.py torchsim/research/research_topics/rt_1_1_2_one_expert_lrf/experiments/LRF_1SPFlock_se_nav.py tests/core/models/temporal_pooler/test_conv_tp.py torchsim/topologies/toyarch_groups/ncm_group_base.py torchsim/research/research_topics/rt_2_1_2_learning_rate/node_groups/ta_multilayer_node_group.py torchsim/core/eval/metrics/comparison_matrix.py torchsim/core/datasets/mnist.py torchsim/research/research_topics/rt_2_1_2_learning_rate/adapters/modular/model_classification_adapter_base.py torchsim/topologies/SampleCollectionTopology.py torchsim/core/actions.py torchsim/core/eval2/experiment_runner_params.py torchsim/core/memory/on_device.py torchsim/core/nodes/visited_area_node.py tests/core/graph/test_unit.py torchsim/core/nodes/internals/learning_switchable.py tests/core/nodes/test_join_node.py tests/core/eval2/test_run_topology.py torchsim/research/research_topics/rt_3_1_lr_subfields/conv_subfield_layer.py tests/test_circular_import_b.py tests/core/utils/test_inverse_projection_utils.py torchsim/core/nodes/fork_node.py tests/research/rt_3_7_1/test_layer_wise_stats.py torchsim/core/eval/metrics/abstract_classifier.py torchsim/research/se_tasks2/se_train_test_component.py torchsim/core/graph/slots.py tests/gui/test_validators.py tests/core/eval/test_entropy.py torchsim/core/nodes/mse_node.py torchsim/topologies/receptive_field_topology.py tests/topologies/test_topologies.py torchsim/topologies/disentangled_world_node_topology.py torchsim/research/experiment_templates/task0_train_test_learning_rate_template.py torchsim/research/research_topics/rt_4_1_1_gradual_learning_basic/topologies/specialist_node_group.py torchsim/core/nodes/bottom_up_attention_group.py torchsim/core/test_optimizations.py torchsim/core/nodes/conv_spatial_pooler_node.py torchsim/research/research_topics/rt_3_1_lr_subfields/node_groups/SFCN_C1_R1.py tests/research/rt_2_1_2/test_experiment_tamplate_base.py torchsim/core/nodes/receptive_field_reverse_node.py tests/core/node_accessors/test_random_number_node_accessor.py tests/core/models/spatial_pooler/test_learning.py torchsim/research/se_tasks/adapters/task_stats_adapter.py torchsim/core/nodes/squeeze_node.py torchsim/core/nodes/flock_networks/neural_network_flock.py torchsim/gui/observer_system_browser.py torchsim/research/research_topics/rt_3_2_1_symbolic_input_words/templates/symbolic_input_template.py torchsim/utils/seed_utils.py torchsim/research/research_topics/rt_4_3_1_gradual_world/experiments/gl_nn_world_experiment.py tests/core/datasets/test_SE_dataset.py tests/core/nodes/test_action_monitor_node.py tests/core/gui/observers/test_cluster_observer.py torchsim/research/research_topics/rt_2_1_2_learning_rate/adapters/wide_ta_learning_rate_adapter.py torchsim/gui/ui_utils.py torchsim/research/research_topics/rt_4_3_1_gradual_world/experiments/gl_nn_world_experiment_template.py tests/research/rt_2_1_2/test_multiple_layer_params.py torchsim/core/eval/testable_measurement_manager.py torchsim/core/eval2/experiment.py tests/core/models/temporal_pooler/test_learn_process.py torchsim/significant_nodes/__init__.py torchsim/research/se_tasks/topologies/se_task0_narrow_hierarchy.py tests/core/nodes/test_expert_flock_node.py torchsim/gui/server/ui_api.py torchsim/research/research_topics/rt_3_1_lr_subfields/random_subfield_node.py tests/core/nodes/test_convSpatialPoolerFlockNode.py tests/core/test_seed_utils.py torchsim/core/nodes/internals/actions_observable.py torchsim/research/research_topics/rt_4_1_1_gradual_learning_basic/topologies/sp_format_context_node_group.py torchsim/core/eval/doc_generator/matrix.py torchsim/research/research_topics/rt_1_1_3_space_benchmarks/experiments/se_dataset_sp_running_stats_experiment.py tests/core/nodes/test_nnet_node.py torchsim/core/nodes/actions_monitor_node.py torchsim/core/models/temporal_pooler/__init__.py torchsim/research/research_topics/rt_1_1_4_task0_experiments/topologies/conv_wide_two_layer_topology.py torchsim/core/logging/ui/__init__.py torchsim/research/research_topics/rt_2_1_2_learning_rate/node_groups/se_node_group.py torchsim/core/nodes/dataset_phased_se_objects_task_node.py torchsim/core/nodes/disentangled_world_node.py torchsim/core/models/flock/flock_utils.py torchsim/core/models/receptive_field/mapping.py torchsim/core/persistence/saver.py tests/core/nodes/node_unit_test_base.py torchsim/core/eval/doc_generator/element.py torchsim/core/nodes/join_node.py torchsim/core/__init__.py torchsim/research/research_topics/rt_2_1_1_relearning/topologies/task0_basic_topology_phased.py torchsim/utils/cache_utils.py tests/core/eval/run_measurement_tests.py tests/core/nodes/test_inverse_projection.py torchsim/research/research_topics/rt_2_1_2_learning_rate/adapters/classification_accuracy_adapter.py torchsim/research/research_topics/rt_1_1_2_one_expert_lrf/adapters/LRF_1SPFlock_SE_NAV.py torchsim/research/research_topics/rt_1_1_2_one_expert_lrf/adapters/LRF_1SPFlock_MNIST.py torchsim/research/experiment_templates/task0_online_learning_template.py torchsim/core/eval/metrics/cluster_agreement.py torchsim/core/nodes/dataset_simple_point_gravity_node.py torchsim/research/research_topics/rt_4_2_1_actions/experiments/goal_directed_experiment.py tests/core/nodes/test_visited_area_node.py torchsim/research/experiment_templates2/task0_ta_analysis_template.py torchsim/core/graph/topology.py torchsim/core/models/temporal_pooler/buffer.py torchsim/research/experiment_templates/task0_train_test_classification_acc_template.py torchsim/topologies/sequence_topology.py torchsim/gui/observers/plot_observers.py torchsim/topologies/mse_demo_topology.py torchsim/core/nodes/random_noise_node.py torchsim/core/utils/singals.py torchsim/core/models/flock/buffer.py torchsim/gui/observer_system_void.py torchsim/core/nodes/to_one_hot_node.py torchsim/core/nodes/dataset_sequence_mnist_node.py tests/core/nodes/test_multilayer_perceptron_flock.py torchsim/research/research_topics/rt_2_1_2_learning_rate/adapters/ta_classification_accuracy_adapter.py torchsim/core/model.py torchsim/research/research_topics/rt_1_1_2_one_expert_lrf/topologies/se_nav_lrf_topology.py tests/profiling.py torchsim/core/utils/image_processing_utilities.py torchsim/core/nodes/unsqueeze_node.py tests/core/models/temporal_pooler/test_forward_process.py tests/core/models/flock/test_expert_flock.py torchsim/gui/observables.py torchsim/research/se_tasks/topologies/se_io/se_io_task0.py torchsim/core/nodes/images_dataset_node.py torchsim/research/experiment_templates2/task0_train_test_classification_acc_template.py tests/test_circular_import_a.py torchsim/core/nodes/network_flock_node.py torchsim/topologies/SeDatasetObjectsTopology.py tests/core/eval2/test_measurement_manager_new.py tests/core/graph/test_node_group.py torchsim/core/models/spatial_pooler/buffer.py torchsim/topologies/expert_topology.py torchsim/topologies/gl_nn_topology.py torchsim/core/nodes/expand_node.py tests/core/test_global_settings.py tests/testing_utils.py torchsim/research/research_topics/rt_2_1_3_conv_temporal_compression/topologies/watch_l_1_topology.py torchsim/research/research_topics/rt_1_1_3_space_benchmarks/experiments/se_dataset_ta_running_stats_experiment.py torchsim/research/se_tasks/topologies/se_io/se_io_general.py torchsim/core/nodes/eval_nodes/four_points.py torchsim/__init__.py tests/core/test_tensor_utils.py torchsim/core/datasets/dataset_se_base.py torchsim/research/research_topics/rt_2_1_2_learning_rate/experiments/learning_rate_experiment.py tests/research/rt_1_1_3/test_rt0_dataset.py torchsim/research/se_tasks/topologies/se_task1_basic_topology.py torchsim/research/se_tasks/experiments/task_1_experiment_template.py torchsim/research/research_topics/rt_1_1_4_task0_experiments/experiments/task_0_conv_experiments.py torchsim/research/se_tasks/topologies/task0_base_topology.py tests/core/nodes/test_motion_detection_node.py torchsim/core/models/temporal_pooler/kernels/__init__.py torchsim/core/nodes/temporal_pooler_node.py torchsim/core/nodes/flatten_node.py torchsim/utils/space_engineers_connector.py tests/core/nodes/test_multi_dataset_alphabet.py torchsim/topologies/ta_actions_grid_world_topology.py torchsim/core/eval/node_accessors/flock_node_accessor.py tests/core/nodes/test_focusNode.py torchsim/core/eval2/basic_experiment_template.py torchsim/research/se_tasks/adapters/task_0_stats_basic_adapter.py tests/research/se_tasks/test_se_task_topologies.py tests/core/models/spatial_pooler/test_forward.py torchsim/research/research_topics/rt_2_1_2_learning_rate/topologies/learning_rate_topology.py torchsim/core/persistence/loader.py torchsim/core/models/flock/process.py torchsim/research/research_topics/rt_2_1_2_learning_rate/utils/cluster_agreement_measurement.py torchsim/core/nodes/scatter_node.py torchsim/topologies/random_number_topology.py torchsim/core/logging/log_observable.py torchsim/research/research_topics/rt_1_1_4_task0_experiments/experiments/task_0_conv_more_labels.py torchsim/core/models/temporal_pooler/process.py torchsim/core/models/temporal_pooler/temporal_pooler.py torchsim/core/nodes/dataset_se_navigation_node.py torchsim/core/nodes/spatial_pooler_node.py tests/research/rt_1_1_2/test_rt2.py torchsim/research/se_tasks/topologies/se_io/se_io_base.py torchsim/research/research_topics/rt_1_1_4_task0_experiments/experiments/experiment_template_params.py torchsim/utils/list_utils.py torchsim/research/research_topics/rt_3_6_1_inductive_bias_attention/experiments/ta_attention_classification_experiment.py torchsim/core/global_settings.py tests/core/nodes/test_se_dataset_node.py torchsim/research/research_topics/rt_1_1_4_task0_experiments/adapters/task0_narrow_adapter.py torchsim/core/nodes/grayscale_node.py torchsim/research/research_topics/rt_1_1_3_space_benchmarks/topologies/se_dataset_sp_lrf.py torchsim/research/se_tasks/topologies/se_task0_convolutionalSP_topology.py tests/core/eval/test_series_plotter.py torchsim/core/datasets/convert_dataset.py tests/core/nodes/test_grid_world.py torchsim/core/exceptions.py torchsim/core/datasets/space_divisor.py tests/core/node_accessors/test_mnist_node_accessor.py torchsim/utils/sample_collection_overseer.py torchsim/topologies/sp_topologies.py torchsim/core/nodes/nn_node.py torchsim/topologies/mnist_topology.py torchsim/core/models/flock/kernels/__init__.py torchsim/core/nodes/switch_node.py tests/core/utils/test_param_utils.py tests/core/node_accessors/test_se_dataset_navigation_accessor.py ui_server.py torchsim/core/eval/doc_generator/heading.py torchsim/research/research_topics/rt_2_1_2_learning_rate2/topologies/classification_accuracy_modular_topology.py tests/core/nodes/test_network_flock_node.py torchsim/research/research_topics/rt_1_1_4_task0_experiments/experiments/task_0_experiments.py tests/conftest.py torchsim/gui/server/server.py torchsim/utils/param_utils.py torchsim/core/eval/metrics/sp_convergence_metrics.py torchsim/core/eval/experiment_template_base.py torchsim/research/research_topics/rt_1_1_1_one_expert_sp/topologies/se_dataset_sp_topology.py torchsim/topologies/sequence_mnist_topology.py torchsim/research/research_topics/rt_3_7_1_task0_analysis/node_groups/ta_multilayer_classification_group.py torchsim/core/models/receptive_field/reverse_mapping.py torchsim/core/nodes/random_number_node.py torchsim/research/se_tasks/topologies/se_task0_topology.py tests/core/nodes/test_pass_node.py torchsim/core/eval/metrics/mutual_information_metric.py run_topology_factory create_observer_system run_experiment run_experiment_with_ui run_topology_factory_with_ui create_non_persisting_observer_system add_test_args garbage_collect parse_test_args observer_system_context filter_params run_topology run_topology_with_ui run_just_model _create_basic_run_manager _run_in_ui bench_block_size bench main main_benchmarking_1flock cpu_copy_benchmark create_flock bench_2gpu configure_bench_flock test_indexing test_indexing_2 pytest_addoption pytest_collection_modifyitems test_compute_squared_distances test_gather_from_dim test_low_intesity_lots_of_calls discover_child_classes measure_time discover_main_topology_classes is_abstract remove_skipped_classes remove_abstract_classes copy_or_fill_tensor get_subclasses_recursive test_flaky TestCircularImport Circular_A Circular_B test_main_expert_flock_profiling_py test_main_py A B D test_base_class_method_is_called_only_once C E Base Grandson test_remove_abstract_class AbstractClass test_recursive_subclass_search AbstractClassChild Subclass Base run_task TestUiExperimentRunner TopologyStub test_constructor_raises_exception test_instance_read_access test_instance_write_access test_is_singleton test_no_direct_access test_global_seeds test_generate_seed test_move_probabilities_towards_50 test_gather_from_dim test_normalize_probabilities test_clamp_tensor test_negate test_move_probabilities_towards_50_inplace_vs_not test_same test_kl_divergence test_safe_id_to_one_hot test_view_dim_as_dims test_move_probabilities_towards_50_ test_id_to_one_hot test_load_all test_get_filtered_sum_all test_get_filtered_0 test_get_filtered_sum_first_three test_multiple test_seq_node test_range_seq test_seq test_t1_24 test_t1_32 test_t1_64 test_t1_256 test_landmarks get_data test_t0_available_sizes test_t1_128 get_pos test_t0_unavailable_sizes test_one_hot TestAlphabetGenerator test_overflow test_trivial_training_task_hidden_layer test_forward_pass generate_data tensors_equal test_svm_classifier test_metric_works_on_ids test_metric_works_on_ids_different_than_no_classes test_metric_works test_metric_does_not_work_when_should_not test_metric_works_on_ids_with_hidden_layer test_high_dimensional_input test_trivial_training_task_no_hidden_layer test_sampler test_metric_does_not_work_on_ids_when_should_not test_get_item test_get_step test_items_list test_wrong_add_catch test_iterator test_get_count test_names test_dict test_classifier_interface test_phases test_input_type_ids test_cluster_agreement test_cluster_mutual_information test_comparison_matrix test_classification_matrix test_doc_creation test_one_hot_entropy to_array_of_lists test_plot_multiple_runs different_plots create_baseline test_remove_from_dict test_dict_intersection DataclassStub DataClassTestItem test_dict_with_defaults test_dataclasses_to_dict TwoDictsTestItem test_train_test_experiment test_experiment_controller_should_end test_experiment_controller_chain ComponentStub TemplateStub TrainTestTopologyStub TopologyStub ExperimentComponentStub ExperimentTemplateStub ExperimentTemplateMeasuringStub TopologyFactoryStub TopologyStub ExperimentComponentMeasuringStub test_basic_experiment_template_run test_experiment_runner test_measurement_functions get_something get_odd test_measurement_manager test_parameters TestItem TestBasicExperimentTemplate TopologyStub NodeGroupStubBase NodeGroupStubInputs ScaffoldingGraphStub NodeGroupStubOutputs test_scaffolding NodeGroupStub RandomNodeOutputs RandomNodeStub RandomUnitStub StubMemoryBlocks StubInputs NodeStub test_disconnecting test_connecting _create_graph StubOutputs test_is_initialized test_graph_node_group_ordering test_graph_save_load create_graph test_inversion_cycles _create_graph DummyNode InfiniteLoopException test_owner test_tensor_reference test_add_interpretation DummyNode test_no_node_base_subclass_overrides_get_observables test_validation import_submodules test_save_load test_skip_execution test_node_group_no_data_on_output test_node_group_inverse_projection_fork_inside test_node_group_pass_through test_node_group_empty test_node_group_pass_through_node test_node_group_inverse_projection_pass_through create_pass_through_node test_node_id_generation test_node_group_inner_source create_source_node test_node_ordering_low_priority_cycle NodeStub test_node_ordering_join_graph test_node_ordering_low_priority_mini test_node_ordering_fork_graph tricky_bad_cycle test_node_ordering_group test_node_ordering_full_cycle _create_graph test_node_ordering_linear_graph TAState ComparableTensor test_save_load TestPcaTransformer TestClusterUtils ProcessStub test_process_observer_init TestObserverPersistence ObservableStub TestObserverPersistencePropsAreStoredOnValueChange g MyTensorObservable r gr pad TestTensorObservable nan TestTensorViewProjection OnDeviceStub create_on_device test_to_ test_pin test_recurse test_failing_copy_ move_to_and_check StubBase test_copy_ test_save_load _full _zeros _ones_single_dim _tensor_from_data _eye_default _ones _eye_non_default _empty _arange_divisible _zeros_single_dim _cat_dim_1 _cat_dim_0 test_creator_methods _arange_non_divisible TestTensorSurrogate calculate_expected_results check_integration_results randomize_subflock test_max_new_seqs_default_autocalculate test_max_new_seqs_modified_autocalculate test_max_new_seqs_overridden test_buffer_batch_storing test_buffer_set_stored test_buffer_get_stored test_batch_sampling test_reorder test_next_n_steps test_bad_batch_size test_next_step test_buffer_force_cpu test_buffer_index_storing test_buffer_storing test_buffer_index_sampling create_buffer test_buffer_mask_creation test_run prepare_flock_for_context test_run_flock_without_context_and_rewards test_context_creation test_whole_flock_default_vs_nondefault_stream create_flock test_whole_flock_flock_sizes test_integrate test_extra_buffers DummyProc init_process DummyBuffer test_extra_tensors TestGrids TestMapping TestReverseMapping check_values_close TestSPFlockBuffer TestSPFlockForward TestSPFlockLearning TestSPProcess SPProcessStub get_subflock_integration_testing_flock get_subflock_creation_testing_flock create_execution_counter TestSPFlock TestConvSPFlock create_tp_buffer create_tp_flock_learn_process test_forward_learn_disambiguating_context test_convolutional_aspect test_update_knowledge_new_seqs test_combine_flocks test_pad_and_combine_tensor create_tp_buffer create_tp_flock_forward_process test_compute_seq_likelihoods_priors_clusters_context test_compute_rewards2 test_exploration test_compute_rewards_closest_reward test_compute_seq_likelihoods_priors_clusters test_compute_rewards test_compute_predicted_clusters test_compute_seq_probs_without_priors test_forward_passive test_compute_provider_informativeness test_compute_seq_likelihoods_for_each_provider test_apply_output_projection test_calculate_predicted_clusters test_forward_passive_vs_active test_disambiguating_provider_informativeness setup_forward_passive_vs_active test_compute_seq_likelihoods test_compute_rewards_with_scaled_likelihoods_and_expert_rewards get_subflock_integration_testing_flock test_forward_subflock_integration test_learning_subflock_integration create_tp_buffer test_learn create_tp_flock_learn_process test_learn_from_batch test_identify_new_seqs test_sort_all_encountered test_update_knowledge_new_seqs test_extract_info_new_seqs test_subbatch test_forget test_extract_frequent_seqs test_erase_unseen_seqs test_erase_unseen_seqs_real_data test_update_knowledge_known_seqs test_extract_info_known_seqs test_trained_untrained test_determine_learning TestTPFlock test_forward_learn_enable_learning test_forward_learn_no_context test_forward_learn_reward test_forward_learn_non_default_stream test_forward_learn_context create_tp_flock TestTPOutputProjection create_tp_flock_untrained_forward_process test_forward_passive TestGrayscaleNode AnyResult NodeTestBase TestAccuracyNode test_action_monitor_node TestConstantNode TestConvSpatialPoolerFlockNode TestDatasetAlphabetUnit TestDatasetAlphabetNode test_allowed_labels do_some_ugly_cpu_random_stuff test_sequence_mnist_node_works test_seed_independence test_allowed_labels_more_sequences collect_labels add_tensor_if_not_there get_dataset test_examples_per_class labels_in_filter test_class_filter test_mnist_node_determinism collect_allowed_labels test_sequence_generator_deterministic_nondeterministic put_tensors_in_dictionary test_deterministic_samples_per_class_choice labels_contained_in_params sequences_equal collect_data test_sequence_transition test_sequence_iteration TestDatasetSimplePointGravityUnit test_expansion test_inverse_projection TestInputContextScalarPos SerializationTestBase TestInputPair ConvInputCombinationsBase TestInputContextPair TestInputScalarNeg ConvSerializationTestBase TestConvInputFull TestInputScalarPos TestInputNone TestConvInputContextNone TestInputFull TestInputContextNone TestConvInputContextFull TestConvInputNone TestLearningSerializationSP TestConvLearningSerializationTP TestLearningSerializationBoth TestExpertFlockNode TestLearningSerializationTP TestInputContextFull RewardInputType TestInputContextScalarNeg InputCombinationsBase TestConvLearningSerializationBoth TestConvLearningSerializationSP TestFlattenNode3 TestFlattenNode0 TestFlattenNode1 TestFlattenNode5 TestFlattenNode4 TestFlattenNode2 TestFocusNode3d TestFocusNodeTrim TestFocusNode TestFork _test_fork_inverse test_fork_node_inverse_0 test_fork_inverse_dim_0 test_fork_inverse_dim_1 TestGridWorld TestImagesDatasetNode test_inverse_fork_join TestJoinFlatten test_join_inverse_dim_1 test_join_node_inverse_flatten test_join_inverse_dim_0 test_join_node_inverse_0 _test_join_inverse TestJoin test_lambda_node TestMotionDetectionNodeGrayScale TestMotionDetectionNode TestMotionDetectionNodeThresholded TestMseNode test_mse_multiple_steps prepare_inputs_and_targets test_ta_neural_nets test_coefficients TestMultiDatasetAlphabetUnit TestMultiDatasetAlphabetNode TestNetworkFlockNodeDelaysCorrect TestNetworkFlockNodeDelaysCorrectCpu make_data_loaders DataCheckingDataLoader _push TestNetworkFlockNodeChangingLrHasEffect test_one_step_tensor_delay_buffer test_net_deterministic_inference test_nnet_module_equality run_num_steps are_complete_nodes_identical test_net_serialization _random_label test_params_equality_implementation make_nnet NNetMockTopology are_layers_same test_observation_storage_equality_implementation are_nnet_modules_same _random_image create_network_inputs test_pass_node TestPeriodicUpdateNodeGroup ExamplePeriodicUpdateGroup test_random_noise_unit TestRandomNoise test_node_accessor_and_determinism_seed test_node_accessor_and_determinism generate_and_validate_sequence TestRandomNumberNode TestRandomSubfieldForkNode TestReceptiveFieldNodeDirect TestReceptiveFieldUnit TestReceptiveFieldNode TestSalientRegionNode TestFixedSizeSalientRegionNode test_actions_parser_node TestScatterNode test_dataset_determinism test_one_hot_landmark_id sequences_equal collect_data DatasetSE compare_sequences test_test_cycles SeDatasetObjectsTopologyTest run_testing_phase test_train_cycles get_common_params test_topology_train_save_test_load_train SeDatasetObjectsTopologyTestAdapter test_save_gpu make_n_steps test_train_test_position_persistence train_pos test_class_filter_and_data_sizes test_train_test_position_reset collect_data TestSimpleBouncingBallNode test_inverse_projection TestSwitchIndexOnInput TestSwitch TestSwitchIndexOnInputVector TestToOneHotNode TestUnsqueezeNode1 TestUnsqueezeNode0 TestVisitedAreaNode TestWeightedAvgNodeWithInput TestWeightedAvgNode TestWeightedAvgNodeWithInputUnused test_node_accessor test_rnd_node_accessor_return_type test_dataset_accessor_return_type test_correct_dimensions EmptyTopology test_sp_flock_node_accessor_types_and_dimensions test_same_list test_flatten_list TestInverseProjectionUtils TestTensorShapePatternMatcher Group TestRecursiveGetNodes TestDeriveFlockShape test_namedtuple_to_dict_simple NestedParams Params test_namedtuple_to_dict_nested Data TestSignals TestSpaceEngineersConnector Types Data DataIsNotInitializable TestObserverPropertiesBuilder DataIsInitializable test_strip_observer_name_prefix TestValidator TestUIApi TestEventParser discover_local_topology_classes test_task_0_conv_wide_topology_step test_task_0_narrow_topology_step test_task_0_base_topology_step test_topologies_can_be_initialized instantiate_graph walk_topics test_topologies_can_run_step test_rt1_se_sp_determinism test_rt1_mnist_sp_determinism test_rt2_experiment_mnist_stability test_rt2_experiment_mnist test_rt2_experiment_se_nav test_se_dataset_ta test_se_ta test_se_dataset_sp test_extract_constructor_params MockFluffyTopology test_split_to_unique_and_changing_params make_test_params test_unpack_params test_params_to_string test_create_flock_params test_default_top_params test_group_constructor test_validate_params_for_n_layers test_empty_change test_add_common_params test_change_params test_l3_conv_topology check_value_on_diagonal check_series_around_constant check_series_similar check_layer_outputs test_layer_wise_stats Params get_task_0_topologies inheritors test_se_task0_topologies_step test_get_task_0_topologies test_se_task0_topologies_init SeNodeGroupStub SEOrDatasetGraphStub test_se_component RandomNumberTopologyTrainTestAdapter test_train_test_split test_partition_to_phases SimpleTestableExperimentTemplate test_compute_derivations test_partition_to_list_of_ids NameParams TestDataClass DataParams test_save_load_many_steps test_topologies_can_be_initialized change_topology test_save_load test_topologies_can_run_step TestResettableCache TestSimpleResettableCache test_project_root_dir AgentActionsDescriptor SpaceEngineersActionsDescriptor IllegalArgumentException TensorNotSetException FailedValidationException IllegalStateException NetworkProtocolException ShouldNotBeCalledException PrivateConstructorException BasicExperimentRunner GraphObservable SimulationMonitor RunState UiExperimentRunner ExperimentRunner RunResult FpsHelper ExperimentRunnerException CleverTimer GlobalSettings SimulationThreadRunner Model ObservableProvider PropertiesProvider small_dataset_for_tests_allowed get_float DatasetConverter SeDatasetSize DatasetSeBase DatasetResult DatasetSeTask1 DatasetResult DatasetSeTask0 DatasetResult DatasetMNIST LimitedDatasetMNIST SpaceDivisor AlphabetGenerator ExperimentTemplateBase TestableExperimentTemplateBase TaskExperimentStatistics MeasurementManager MeasurementManagerBase save_zip RunMeasurement load_zip to_list_list plot_with_confidence_intervals get_experiment_results_folder save_figure plot_multiple_runs_with_baselines plot_multiple_runs get_stamp to_safe_path add_fig_to_doc to_safe_name sliding_window TestableMeasurementManager TestableRunMeasurement TopologyAdapterBase TestableTopologyAdapterBase Document XmlElement Image Cell Caption Figure Row Heading Matrix AbstractClassifier cluster_agreement classification_matrix comparison_matrix one_hot_entropy mse_loss compute_mutual_information_matrix_rfs flatten_rfs reduce_to_mean center_indexes compute_mutual_information_for_phases compute_mutual_information id_to_one_hot reduce_to_center_rf ClassifierHiddenNet LossDifferenceTrainStoppingCriterion NNClassifier TrainStoppingCriterion BatchSampler compute_nn_classifier_accuracy ClassifierTrainer train_nn_classifier ClassifierNoHiddenNet train_svm_classifier SvmClassifier SvmParams compute_svm_classifier_accuracy evaluate_svm_classifier SvmParamSearchParams SvmModel average_sp_delta average_boosting_duration num_boosted_clusters FlockNodeAccessor MnistNodeAccessor RandomNumberNodeAccessor SeIoAccessor SpatialPoolerFlockNodeAccessor BasicExperimentTemplate BasicTopologyFactory DocumentPublisher Experiment garbage_collect ExperimentRunnerException ExperimentController ExperimentComponent TrainTestMeasuringComponent TrainTestComponentParams TrainTestControllingComponent SingleExperimentRunParams ExperimentParams TopologyFactory ExperimentTemplateBase MeasurementManager CacheException MeasurementManagerParams RunMeasurementManager CacheNotFoundException ParameterExtractor save_zip SingleRunMeasurements load_zip TrainTestMeasurementPartitioning TopologyScaffoldingFactory get_defaults SingleExperimentRun SingleExperimentManager TrainTestSwitchable ConnectionNotPresentException InputAlreadyUsedException Connection Connector HierarchicalObservableNode IdGenerator InversePassOutputPacket InversePassInputPacket InvertibleNode NodeBase NodeInitializationException NodeValidationException EmptyOutputs EmptyInputs SimpleGroupInputs GenericGroupInputs NodeGroupBase SimpleGroupOutputs GenericNodeGroup NodeAlreadyPresentException GenericGroupOutputs GroupInputs NodeGroupWithInternalsBase GroupOutputs order_nodes _find_destinations _visit_node _detect_illegal_cycles _clear_ordering IllegalCycleException GroupVirtualOutputSlot SlotBase OutputSlotBase InputSlot GroupInputSlot BufferMemoryBlock NoneSlot MemoryBlock GroupOutputSlot InputsSection MemoryBlocks SlotSection Inputs MemoryBlocksSection MemoryBlockInitializationException SlotContainerBase OutputsBase NonExistentMemoryBlockException GenericInputsBase GenericMemoryBlocks InputsBase MemoryBlockSizesNotConvergingException Topology InvertibleUnit Unit WorkerNodeWithInternalsBase WorkerNodeBase InvertibleWorkerNodeWithInternalsBase InvertibleWorkerNodeBase load_kernels check_cuda_errors LogObservable UILogHandler _setup_logging _get_console_handler flush_logs _get_file_handler setup_logging_no_ui LoggerNameFormatter setup_logging_ui _recurse_f OnDevice recurse TensorSaver generate_torch_delegators get_dims TensorSurrogate TensorCreator MeasuringCreator AllocatingCreator ParamsBase ExpertParams TemporalPoolerParams SpatialPoolerParams SamplingMethod TemporalPoolerParamsProps SpatialPoolerParamsProps ExpertParamsProps VariableItemCountExpander StaticItemCountExpander Buffer CurrentValueNotStoredException Expander BufferStorage ExpertFlock ConvExpertFlock Flock get_tensor_stats memory_report round_memory dummy_printer get_subcontainers Process NetworkFlockBuffer Grids Stride Mapping ReverseMapping SPFlockBuffer ConvSPStoreToBuffer SPFlockForward SPFlockLearning SPProcess SPReconstruction SPFlock ConvSPFlock TPFlockBuffer TPFlockForwardAndBackward ConvTPFlockLearning TPFlockLearning TPProcess TrainedForwardProcessFactory UntrainedForwardProcessFactory TPFlock ConvTPFlock ForwardProcessFactory TPOutputProjection TPFlockUntrainedForwardAndBackward DatasetSimplePointGravityNode DatasetSimplePointGravityUnit MoveStrategy DatasetSimplePointGravityParams States DatasetSimplePointGravityOutputs MotionDetectionUnit MotionDetectionParams MotionDetectionNodeInputs MotionDetectionNodeOutputs MotionDetectionNode AccuracyInputs AccuracyNode AccuracyUnit AccuracyInternals AccuracyNodeParams AccuracyOutputs ActionMonitorInputs ActionMonitorOutputs ActionMonitor ActionMonitorNode AgentActionsParserNode BottomUpAttentionGroupOutputs BottomUpAttentionGroupInputs BottomUpAttentionGroup CartographicParams CartographicNodeInternals CartographicNode CartographicNodeUnit CartographicNodeInputs ConstantNodeOutputs ConstantNode ConstantNodeUnit ConvExpertFlockUnit ConvExpertFlockNode ConvExpertFlockInternals ConvSPFlockInternals ConvSpatialPoolerFlockNode ConvSpatialPoolerFlockUnit DatasetAlphabetNode DatasetAlphabetSequenceProbsModeParams DatasetAlphabetInternals DatasetAlphabetUnit DatasetAlphabetParams DatasetAlphabetOutputs DatasetAlphabetMode DatasetMNISTParams DatasetMNISTOutputs DatasetSequenceMNISTNodeParams DatasetMNISTNode DatasetMNISTUnit PhasedSeObjectsTaskNode PhasedSeObjectsTaskUnit PhasedSeObjectsTaskParams SeObjectsTaskPhaseParams DatasetSequenceMNISTOutputs DatasetSequenceMNISTNode DatasetSENavigationParams DatasetSeNavigationOutputs DatasetSeNavigationUnit DatasetSeNavigationNode SamplingMethod DatasetSeObjectsParams DatasetSeObjectsOutputs DatasetSeObjectsNode DatasetSeObjectsUnit DatasetSeObjectsInternals DatasetConfig DisentangledWorldRendererOutputs DisentangledWorldRendererInputs DisentangledWorldRendererUnit DisentangledWorldRendererNode DisentangledWorldNode DisentangledWorldNodeUnit DisentangledWorldNodeParams create_default_temporal_classes DisentangledWorldNodeOutputs Expand ExpandNode ExpandOutputs ExpandInputs ExpertFlockInputs ExpertTPFlockInternalsSection ExpertFlockUnit ExpertFlockOutputs ExpertSPFlockInternalsSection ExpertFlockInternals ExpertFlockNode FlattenUnit FlattenNodeInputs FlattenNode FlattenNodeOutputs create_sp_learn_observables create_observables create_tp_learn_observables create_tp_forward_observables FocusNodeInputs FocusNode FocusNodeOutputs FocusNodeParams FocusNodeUnit ForkOutputs Fork ForkNode ForkInputs InvalidParameterException GrayscaleNodeUnit GrayscaleNode GrayscaleNodeInputs GrayscaleNodeOutputs MultiGridWorld MultiGridWorldOutputs GridWorldNode MultiGridWorldInputs MultiGridWorldNode ImagesDatasetUnit ImagesDatasetNode ImagesDatasetParams ImagesDatasetOutputs DatasetLoader JoinInputs JoinOutputs JoinNode Join IncompatibleInputsException LambdaNodeUnit NOutputs LambdaNode NInputs MseNodeParams MseNode MseInputs MseOutputs MseUnit MseInternals MultiDatasetAlphabetInternals MultiDatasetAlphabetNode MultiDatasetAlphabetOutputs MultiDatasetAlphabetUnit NetworkFlockInternals NetworkFlockUnit NetworkFlockOutputs NetworkFlockNodeParams LearningEvaluator NetworkFlockNode NetworkFlockInputs NNetParams NNetNodeInputs NNetNodeUnit NNetNode NNetNodeOutputs PassNodeInputs PassNodeUnit PassNode PassNodeOutputs PeriodicUpdateNodeGroup Distribution RandomNoiseUnit RandomNoiseInputs RandomNoiseOutputs RandomNoiseNode RandomNoiseParams RandomNumberNode RandomNumberUnit RandomNumberNodeParams RandomNumberOutputs ReceptiveFieldInputs ReceptiveFieldUnit ReceptiveFieldNodeParams ReceptiveFieldOutputs ReceptiveFieldNode ReceptiveFieldReverseUnit ReceptiveFieldReverseInputs ReceptiveFieldReverseNode ReceptiveFieldReverseNodeParams ReceptiveFieldReverseOutputs SalientRegionOutputs SalientRegionNode SalientRegionUnit SalientRegionInputs SalientRegionParams ScatterNodeOutputs ScatterNodeUnit ScatterNodeInputs ScatterNode SequenceOutputs Sequence SequenceNode SimpleBouncingBallNodeParams SimpleBouncingBallUnit SimpleBouncingBallNode SimpleBouncingBallOutputs BallRenderer BallShapes SimpleBall SpaceEngineersConnectorInputs SpaceEngineersConnectorNode SpaceEngineersConnectorOutputs AbstractLocationObservable TaskMetadataObservable LocationObservable SpaceEngineersConnectorUnit LocationTargetObservable SPFlockOutputs SPFlockReconstructionSection SPFlockInternals SpatialPoolerFlockNode OutputShapeProvider SPFlockInternalsSection SPFlockForwardClustersSlotSection SPFlockInputsSection SPFlockInputs SPFlockOutputsSection SPMemoryBlocksSection SpatialPoolerFlockUnit SqueezeNodeUnit SqueezeNodeInputs SqueezeNodeOutputs SqueezeNode SwitchInputs SwitchNode SwitchUnit SwitchOutputs SwitchNodeParams TPFlockInternalsSection TPFlockInternals TPFlockOutputs TemporalPoolerFlockNode TPFlockOutputsSection TPFlockInputs TPMemoryBlocksSection TPFlockInputsContextAndRewardSection TPFlockInputsSection TPFlockInputsDataSection TemporalPoolerFlockUnit ToOneHotUnit ToOneHotMemoryBlocks ToOneHotNode ToOneHotOutputs ToOneHotMode ToOneHotInputs UnsqueezeNodeUnit UnsqueezeNodeInputs UnsqueezeNodeOutputs UnsqueezeNode VisitedAreaInputs VisitedAreaNode VisitedAreaParams VisitedAreaOutputs VisitedAreaUnit WeightedAvgNodeOutputs WeightedAvgNode WeightedAvgNodeParams WeightedAvgNodeInputs WeightedAvgNodeUnit FourPoints FourPointsOutputs FourPointsNode create_delay_buffer OneStepTensorDelayBuffer ZeroStepTensorDelayBuffer DelayBuffer MultiLayerPerceptron pairwise_consecutive MultilayerPerceptronFlock create_networks NeuralNetworkFlockTypes NeuralNetworkFlock OutputActivation NeuralNetworkFlockParams ActionsDescriptorProvider ActionsObservable GridWorldActionDescriptor ResetStrategy GridWorldInputs GridWorldParams GridWorldOutputs GridWorld LearningSwitchable TestingSwitcher Loader Persistable Persistor Saver DebugWorld MyDrawOptions LatentWorld Instance PymunkParams TemporalClass InstanceShape PyMunkPhysics Attribute InstanceColor RenderWorld ImageProcessingUtilities replace_cluster_ids_with_projections get_inverse_projections_for_all_clusters SequenceGenerator diagonal_transition_matrix Signal1 Signal2 SignalBase Signal0 signal SignalConnection Signal4 Signal6 Signal5 Signal3 add_small_constant_ scatter_ multi_unsqueeze clamp_tensor kl_divergence write_stats negate check_shape trace_calls gather_from_dim add_small_constant id_to_one_hot safe_id_to_one_hot weighted_sum_ weighted_sum get_name tensor_allocation_monitor move_probs_towards_50_ change_dim WrongTensorShapeException average_abs_values_to_float same detect_qualitative_difference normalize_probs_ trace_inside view_dim_as_dims one_hot_to_id move_probs_towards_50 normalize_probs memory_profiling log_torch_memory ObserverPropertiesItemType ObserverPropertiesItemSourceType enable_on_runtime ObserverPropertiesItem MatplotObservable Initializable disable_on_runtime ObserverCallbackItem ImageObservable TextObservable PropertiesObservable Observable EditStrategyResult ObserverPropertiesItemState ObserverPropertiesItemSelectValueItem LambdaPropertiesObservable ObserverPropertiesBuilder check_type ObserverCallbacks MemoryBlockObservable ObserverSystemBase SimpleFileObserverPersistence ObserverSystemSignals ClusterObservable ObserverSystem ObserverPersistence HierarchicalPullObserver ClusterPullObserver ImagePullObserver RegisteringPullObserverBase PropertiesPullObserver Observer ObserverSystemBrowser TextPullObserver MemoryBlockPullObserver MatplotPullObserver TextPushObserver BufferPullObserver ImagePushObserver PullObserver ObserverSystemVoid ObserverView encode_image parse_bool validate_positive_with_zero_int validate_positive_optional_int validate_list_str validate_predicate validate_float_in_range validate_positive_float _validate_predicate_automatic_message validate_positive_int validate_positive_with_zero_float validate_dimension_vs_shape validate_list_list_float_or_int validate_list_list_int validate_list_of_size BufferObserver BufferObserverData BufferObserverParams ClusterObserverData ClusterDatapointsData PcaTransformer TensorProviderSPFlock ClusterObserverProjection PcaData ClusterCentersDataBuilder TensorProvider ClusterObserverExpertFlock ClusterObserverSPFlock SpringLinesData ClusterObserverClusterSimilaritiesAlgorithm DataTransformer SplineArrowsBuilder ClusterUtils SequenceSignificanceSource PcaDataBuilder ClusterProjectionsGroupProperties SequencesBuilder FDsimDataBuilder ClusterObserver SplineArrowsData ClusterDatapointsDataBuilder SequencesData SpringLinesBuilder ClusterCentersData FDsimData TensorProviderExpertFlock ClusterObserverDataBuilderBase FlockProcessObservable HierarchicalObserver HierarchicalGroupProperties HierarchicalObservableGroupsStacking HierarchicalObservableParams HierarchicalObservableData is_valid_tensor MemoryBlockObserver CustomTensorObserver LinePlotObserver BufferedLinePlotObserver StepLinePlotObserver TensorViewProjectionUIParams update_scale_to_respect_minimum_size TensorObservableParams dummy_tensor_observable_data TensorObservableData TensorViewProjection sanitize_tensor TensorObservable UIWebSocket SocketSender run_ui_server TorchSimWebSocket make_app DataStore RootHandler setup_logging UIApi MemoryBlockParams PropertiesHolder UIHelper RequestData EventData EventDataPropertyUpdated EventParser UIServerConnector decorate decorate_field A B decorate_class get_text _load_and_view_figures Task0BaselinesStatsBasicAdapter run_t0_medium_experiment run_t0_fast_experiment run_t0_full_experiment read_max_steps run_measurement DatasetSeSimulationRunningStatsExperimentTemplate DatasetSeTaSimulationRunningStatsAdapter Lrf1SpFlockTemplate Lrf1SpFlockExperimentTemplate SeTaSimulationRunningStatsAdapter SeSimulationRunningStatsExperimentTemplate SpLearningConvergenceExperimentTemplate SpLearningConvergenceTopologyAdapter Task0OnlineLearningTemplate Task0LearningAdapterBase Task0OnlineLearningAdapterBase Task0LayerMeasurementManager Task0TrainTestClassificationAccAdapter Task0TrainTestClassificationAccTemplate TaTask0TrainTestClassificationAccAdapter Task0TrainTestLayerMeasurementManager Task0TrainTestLearningRateTemplate Task0TrainTestTemplateBase Task0TrainTestTemplateAdapterBase AbstractRelearnAdapter Task0TrainTestTemplateRelearning Task0TaAnalysisComponent Task0TaAnalysisLayerComponent Task0TaAnalysisTemplate Task0TaAnalysisParams Task0TrainTestClassificationAccParams Task0TrainTestClassificationAccTemplate Task0TrainTestClassificationAccComponent GlNnTopologyFactory run_measurement GradualLearningExperimentTemplateParams GradualLearningExperimentComponent GradualLearningExperimentTemplate main SeDatasetSpLearningConvergenceTopologyAdapter MnistSpLearningConvergenceTopologyAdapter run_cluster_boost_threshold run_debug_ run_learning_rate run_different_seeds run_examples_per_class read_max_steps run_batch_size run_num_cc run_measurement run_debug run_cluster_boost_threshold run_learning_rate run_learning_rate_rand run_different_seeds read_max_steps run_batch_size run_num_cc run_batch_size_longer run_measurement MnistSpTopology SEDatasetSPRepresentationTopology SeDatasetSpTopology Lrf1SpFlockMnistTemplate Lrf1SpFlockSeNavTemplate run_experiment run_experiment run_experiment LrfTopology SeNavLrfTopology SeDatasetTaRunningStatsAdapter SeTaRunningStatsAdapter run_experiment run_num_experts read_max_steps run_batch_size run_num_cc run_measurement run_tp_max_encountered_seq run_debug run_num_experts run_tp_learn_period read_max_steps run_batch_size run_num_cc run_measurement run_skip_frames run_num_experts run_skip_frames_more_experts read_max_steps run_num_cc run_measurement RgbDebugNode RgbDebugUnit RgbDebugOutputs RgbDebugInputs compute_lrf_params compute_flock_sizes setup_flock_params init_se_dataset_world_params BenchmarkLrfFlockTopology SeDatasetSpLrf SeDatasetSpLrfDebug run_debug SeDatasetTaLrf SeTaLrfT0 Task0AdapterBase Task0BasicAdapter Task0ConvWideAdapter Task0NarrowAdapter TrainTestExperimentTemplateParams ExperimentTemplateParamsBase ExperimentTemplateParams run_debug run_learning_rate run_gui run_multiplier read_max_steps run_num_cc run_num_cc_short run_measurement run_seq_len run_learning_rate run_measurement run_learning_rate_slower run_label_scale run_debug run_learning_rate read_max_steps run_batch_size run_num_cc run_num_cc_short run_measurement run_seq_len ConvWideTwoLayerTopology Task0BaseTopology Task0ConvWideTopology Task0ConvWideTopologyMoreLabels Task0NarrowTopology Task0RelearnBasicAdapter run_measurements_for_task0 extend_with_phase_information extend_with_location_filter_params extend_with_class_filter_params _extend_params_with_param extend_with_buffer_size_params read_max_steps extend_with_sampling_method_params run_measurement_task0 extend_with_cluster_center_params SeT0BasicTopologyRT211 SeT0BasicTopologyRT211Phased ClassificationAccuracyAdapterBase Task0TaMultilayerAdapter TaClassificationAccuracyAdapter WideTALearningRateAdapter ClassificationAccuracyModularAdapter LearningRateTaModularAdapter ModelClassificationAdapterBase run_more_classes_tp run_simple run_learning_rate_determine_max_steps run_debug multiple_runs_class_filter_example run_cluster_boost_threshold run_more_classes_three_layers run_learning_rate multiple_runs_lr_example run_two_layer_net_new run_more_classes run_eox_three_layers_full run_debug_II run_two_layer_net run_eox_three_layers run_measurement run_seeds run_debug run_full run_learning_rate run_class_filter run_model_seed run_batch_size run_grayscale run_measurement run_seeds_high_res run_debug run_learning_rate run_cc run_measurement_with_params run_measurement NnNodeGroup SeNodeGroup Nc1r1GroupWithAdapter MultipleLayersParams LearningRateTopology Task0NnTopology Task0TaSeTopology ClusterAgreementMeasurement run_learning_rate run_cc Params run_debug_comparison run_measurement ClassificationModelGroupInputs ClassificationModelGroup Nc1r1ClassificationGroup ClassificationAccuracyModularTopology Rt213ExperimentTemplate Rt213Adapter L1Topology L3ConvTopology L3SpConvTopology SubLrfSpConvLayer SubLrfConvLayer RandomSubfieldForkNode RandomSubfieldForkNodeMemoryBlocks RandomSubfieldForkNodeOutputs RandomSubfieldForkNodeUnit RandomSubfieldForkNodeInputs SCN_SC1_R1 CN_C1_R1 SFSCN_SC1_R1 SFCN_C1_R1 run_words run_measurement SpatialPoolerClusterForceSetter SymbolicInputTemplate SymbolicInputWordsTopology Params run_measurement run AttentionClassificationGroup run_good_topology run_opp run_debug_base run_rf_size good_one_layer_config_for_four_objects run_num_cc_on_top Params run_measurement run_debug_comparison Params run_measurement DummyModelGroup MultilayerModelGroup TaMultilayerClassificationGroup Task0TaAnalysisTopology run_measurement run_this_experiment KnowledgeReuseExperimentComponent plot_testing_accuracy StepCountingExperimentComponent compute_moving_average OneShotLearningExperimentComponent GradualLearningBasicTemplate GradualLearningBasicMeasuringComponent SpatialPoolerClusterForceSetter LearnForgetTestExperimentComponentBase NotForgettingExperimentComponent DatasetAlphabetNodeGroupOutputs DatasetAlphabetNodeGroupParams DatasetAlphabetNodeGroup DatasetSwitchOutputs DatasetSwitchNodeGroup DatasetSwitchNodeGroupParams FlockPartialSwitchNodeGroup FlockPartialSwitchNodeGroupParams FlockPartialSwitchInputs FlockPartialSwitchOutputs GradualLearningBasicTopologyParams GradualLearningBasicTopology NotForgettingExperimentParams GLExperimentParams SpecialistInputs SpecialistNodeGroup SpecialistOutputs SpecialistNodeGroupParams SPFormatContextOutputs SPFormatContextInputs SPFormatContextNodeGroup reward_hint run_measurement three_rooms_tiny SingleExpertGroup TwoExpertsGroup partition_runs TimeSeries multi_means_mins_maxes means_mins_maxes GoalDirectedTemplate GoalDirectedTemplateMainComponent GoalDirectedExpertGroupOutputs GoalDirectedExpertGroupBase GoalDirectedExpertGroupInputs GoalDirectedTemplateTopologyParams GoalDirectedTemplateTopology GlNnWorldFactory run_measurement GradualLearningWorldTemplate GradualLearningWorldComponent GradualLearningWorldParams create_arg_min_node create_discount_node create_delay_node create_predictions_gather_node create_dot_product_node FlockNetworkGroup GateNetworkGroup SwitchableWorldTopology SwitchableWorldGroup SwitchableEnvironmentGroup SplittedWorldOutputs GateGroupInputs PredictorGroupOutputs PredictorGroupInputs GateGroupOutputs GradualLearningTopology define_temporal_classes GlNnWorldTopology setup_demo_model define_sequence_generators run_task1_conv run_task0 run_task0_conv run_task0_convSP run_task0_narrow run_task1 Task0StatsBasicAdapter Task1StatsBasicAdapter TaskStatsAdapter read_max_steps run_measurement run_measurements_for_task0 read_max_steps run_measurement_task0 Task0ExperimentTemplate run_measurements_for_task1 read_max_steps run_measurement_task1 Task1ExperimentTemplate SeT0BasicTopology SeT0ConvSPTopology SeT0ConvTopology SeT0NarrowHierarchy SeT0TopologicalGraph Task1BasicExpertGroup Task1BasicExpertGroupOutputs Task1BasicExpertGroupInputs SeT1Bt SeT1ConvTopologicalGraph TestableTopology Task0BaseGroupInputs Task0BaseGroupOutputs Task0BaseGroup Task1BaseGroupWorldOutputs Task1BaseGroupWorld Task1BaseGroupWorldInputs Task1BaseGroupInputs Task1BaseGroupOutputs Task1BaseGroup SeIoBase SeIoGeneral SeIoTask0 SeIoTask0Dataset SeIoTask0DatasetPhased SENodeGroupProvider SETrainTestComponent SETrainTestModel BallEnvironmentParams BallEnvironment ConvLayerParams ConvLayer TaLayer LayerInputs SpConvLayer LayerOutputs create_connected_conv_layers compute_grid_size create_conv_layer CommonEnvironmentOutputs EnvironmentParamsBase EnvironmentBase ClassificationInputs ClassificationOutputs SeEnvironmentParams SEEnvironment SpReconstructionLayer SequenceOutputs SwitchableSequencesNodeGroup LoopingTopology BottomUpAttentionTopology BouncingBallTopology ContextTestTopology ConvExpertTopology DebugAgentTopology DisentangledWorldNodeTopology ExpertHierarchyTopology ExpertTopology GlNnTopology GlFakeGateNnTopology FakeGateNodeGroup NNGateNodeGroup to_one_hot NNPredictorNodeGroup GoalDirectedNarrowHierarchyTopology GoalDirectedTopology GateOutputs PredictorOutputs PredictorInputs GateInputs GradualLearningTopology GridWorldTopology LrfObjectDetectionTopology MnistTopology MseDemoTopology MultiDatasetAlphabetTopology NetworkFlockTopology NNet NNetTopology RandomNoiseTopology RandomNoiseOnlyTopology RandomNumberTopology ReceptiveFieldTopology SEDatasetSampleCollectionTopology SeDatasetObjectsTopology SequenceMnistTopology SequenceTopology SeToyArchDebugTopology SingleExpertExperimentalTopology SpatialPoolerTopology SpatialPoolerHierarchy ConvSpatialPoolerTopology SpatialTemporalPoolerTopology SwitchTopology SymbolicInputTopology Task0TaBottomUpClassificationTopology TaActionsGridWorldTopology TaExplorationGridWorldTopology ActionTaskOutputs NCMR1Group NCMGroup ClassificationTaskOutputs NCMGroupBase NCMGroupInputs ClassificationTaskInputs R1NCMGroup parse_vars BaselinesParams ObservationStorage output_size SimpleResettableCache ResettableCache dict_with_defaults to_nested_dict get_dict_intersection NestedDictException remove_from_dict flatten_list dim_prod same_lists recursive_get_nodes TensorShapePatternMatcher TestMemoryBlocks derive_flock_shape create_dir last_exception_as_html create_temp_dir project_root_dir Point2D Size2D SampleCollectionOverseer get_rand_generator_or_set_cuda_seed set_global_seeds _set_cuda_seed _set_cuda_deterministic generate_seed MouseButton SpaceEngineersConnector SpaceEngineersConnectorConfig compute_se_classification_accuracy compute_nn_classifier_accuracy_in_phases is_containing_nans _partition_tensor_to_ids _compute_classification_accuracy compute_mse_from do_compute_nn_classifier_accuracy compute_label_reconstruction_accuracies compute_classification_accuracy argmax_tensor list_int_to_long_tensors compute_derivations _collect_answers_for partition_to_list_of_ids compute_mse_values argmax_list_list_tensors PersistableSaver LogObservable create_observer_system create_non_persisting_observer_system setup_logging_ui stop run_topology add_test_args ArgumentParser parse_args add_argument empty_cache collect MeasurementManager BasicExperimentTemplate ExperimentParams BasicTopologyFactory create_run init_run wait UiExperimentRunner start run_topology_factory run_topology_factory_with_ui BasicExperimentRunner create_run init_run wait start _create_basic_run_manager _create_basic_run_manager _run_in_ui setup_logging_no_ui BasicExperimentRunner run time print synchronize copy_ range bench_block_size print add_argument eval ArgumentParser input random_seed parse_args temporal spatial print ExpertParams seq_length max time print synchronize memory_allocated rand create_flock Stream numel element_size pin copy_to append to_ range max_memory_allocated BALANCED memory_report create_flock rand scatter_ run_just_sp max run memory_allocated numel expand append context_size to range n_providers synchronize nonzero configure_bench_flock max_memory_allocated time print element_size len time max_memory_allocated print synchronize memory_allocated rand clone element_size numel scatter_ nonzero run to configure_bench_flock max range append len print synchronize to clock print synchronize to clock addoption skip add_marker get_float microseconds print rand now func seconds range get_float rand full get_float set_global_seeds squeeze rand empty glob __file__ remove_skipped_classes import_module dirname remove_abstract_classes get_subclasses_recursive discover_child_classes random fill_ Tensor copy_ isinstance import_module import_module E add get_subclasses_recursive remove_abstract_classes start Thread instance rand set_global_seeds generate_seed get_float tensor move_probs_towards_50_ get_float tensor move_probs_towards_50 get_float rand move_probs_towards_50_ move_probs_towards_50 normalize_probs zeros ones ones zeros Tensor tensor gather_from_dim get_float tensor id_to_one_hot get_float tensor safe_id_to_one_hot get_float tensor AllocatingCreator clamp_tensor get_float tensor kl_divergence zeros zeros view_dim_as_dims size get_all small_dataset_for_tests_allowed small_dataset_for_tests_allowed get_filtered small_dataset_for_tests_allowed sum size sum from_list list range from_list step _prepare_unit SequenceNode from_list AllocatingCreator matrix from_multiple zip get_all get_all get_all get_all get_all get_all tensor view SpaceDivisor get_data get_landmarks get_pos range SpaceDivisor get_data get_landmarks get_landmark get_pos max range sum ClassifierHiddenNet rand item to forward range unsqueeze rand long scatter_add_ ClassifierHiddenNet generate_data ClassifierTrainer train compute_accuracy generate_data ClassifierTrainer train compute_accuracy ClassifierNoHiddenNet generate_data compute_nn_classifier_accuracy generate_data rand print compute_nn_classifier_accuracy generate_data clone compute_nn_classifier_accuracy generate_data compute_nn_classifier_accuracy long compute_nn_classifier_accuracy generate_data clone compute_nn_classifier_accuracy scatter_add_ print rand unsqueeze compute_nn_classifier_accuracy long print shape range len generate_data tensors_equal sample_to_batch BatchSampler range float sum tensor compute_svm_classifier_accuracy get_item_names RunMeasurement add RunMeasurement add RunMeasurement add RunMeasurement add RunMeasurement get_step_item_dict add RunMeasurement add RunMeasurement get_items add RunMeasurement add evaluate LongTensor classifier_class Tensor train range classifier_class train range evaluate classifier_class train_and_evaluate_in_phases tensor compute_mutual_information_matrix_rfs reduce_to_mean zeros range reduce_to_center_rf range comparison_matrix len classification_matrix range LongTensor len Matrix rand add zeros range list plot_multiple_runs_with_baselines plot_multiple_runs show to_array_of_lists close create_baseline different_plots ExperimentController after_topology_step before_topology_step calculate_run_results ComponentStub register ComponentStub register ExperimentController MeasurementManager BasicExperimentRunner create_run SingleExperimentManager SingleExperimentRunParams partition_to_testing_phases TemplateStub TrainTestComponentParams BasicTopologyFactory TrainTestMeasurementPartitioning partition_to_training_phases run ExperimentTemplateStub TopologyFactoryStub BasicExperimentRunner Experiment results_path ExperimentParams run ExperimentTemplateMeasuringStub TopologyFactoryStub BasicExperimentRunner Experiment ExperimentParams run add_measurement_f measurements add_measurement_f_once RunMeasurementManager step range MeasurementManager add_results add MeasurementManagerParams create_new_run extract parameters ParameterExtractor default_parameters TopologyScaffoldingFactory create_topology NodeStub output1 input1 connect _create_graph input1 _create_graph disconnect_input RandomNodeStub Topology add_node step create_graph range random_ order_nodes Topology connect output NodeStub GenericNodeGroup input add_node NodeStub prepare Topology add_node zip InversePassOutputPacket output1 recursive_inverse_projection_from_output _create_graph zeros MemoryBlock DummyNode MemoryBlock DummyNode random_ zeros reshape_tensor MemoryBlock DummyNode update isinstance walk_packages import_module __path__ __name__ RandomNodeStub Topology clone tensor step add_node allocate_memory_blocks AllocatingCreator RandomNodeStub prepare ValidationNodeStub Topology add_node import_submodules nodes __subclasses__ IdGenerator NodeStub GenericNodeGroup _assign_ids_to_nodes add_node create_generic_node_group Topology create_source_node connect step add_node create_generic_node_group Topology connect output create_pass_through_node input step add_node create_source_node create_generic_node_group Topology create_source_node connect output create_pass_through_node input step add_node create_generic_node_group Topology create_source_node connect output create_pass_through_node add_node create_generic_node_group Topology create_source_node connect create_pass_through_node input step add_node get_float create_generic_node_group Topology rand connect output ConstantNode JoinNode InversePassOutputPacket recursive_inverse_projection_from_output input ForkNode step add_node get_float create_generic_node_group Topology rand connect output ConstantNode JoinNode InversePassOutputPacket recursive_inverse_projection_from_output input ForkNode step add_node order_nodes _create_graph order_nodes _create_graph order_nodes _create_graph order_nodes _create_graph order_nodes _create_graph _create_graph _create_graph order_nodes output connect NodeStub GenericNodeGroup input RandomUnitStub get_tensor tensor FlockProcessObservable recurse create_on_device remove to_ create_on_device move_to_and_check create_on_device pin create_on_device copy_to to_ tensor rand_like tensor create_on_device rand_like to_ create_on_device random_ MeasuringCreator AllocatingCreator create_func chain random_ append range enumerate zip ExpertParams seq_length batch_size ExpertParams seq_length batch_size ExpertParams _create_storage AllocatingCreator Buffer get_stored_data uniform_ tensor create_buffer set_flock_indices rand set_flock_indices tensor create_buffer set_stored_data tensor full create_buffer store tensor _detect_any_difference create_buffer compare_with_last_data zeros _sample_batch create_buffer randn size stack _sample_batch zeros tensor full create_buffer cat rand clone tensor store create_buffer set_flock_indices zeros rand clone stack _sample_batch tensor create_buffer set_flock_indices tensor create_buffer tensor create_buffer get_float tensor test_reorder_with_buffer_indices tensor full create_buffer store_batch sample_contiguous_batch Buffer ones to numel type _detect_any_difference compare_with_last_data _create_storage tensor store AllocatingCreator full zeros set_flock_indices ExpertFlock ExpertParams AllocatingCreator get_float flock_size view n_cluster_centers prepare_flock_for_context _assemble_output_context uniform_ float full get_float input_rewards input_context rand prepare_flock_for_context zeros run get_float rand clone copy_ prepare_flock_for_context full range run get_float synchronize rand create_flock copy_to range run get_float rand create_flock range run DummyProc init_process rand arange _read_write arange _get_buffer init_process AllocatingCreator DummyBuffer integrate fill_ init_process _read tensor _read_write full get_float get_float uint8 rand choice ExpertParams type int64 SPFlock tensor AllocatingCreator get_float uint8 flock_size sorted buffer_size rand dims size choice type unsqueeze SPFlock randint AllocatingCreator create_tp_buffer get_float unsqueeze zeros full get_float create_tp_flock_learn_process _update_knowledge_new_seqs _all_encountered_seqs clone expand _all_encountered_seq_occurrences tensor full _exploration_attempts_prior get_float temporal forward_learn ExpertParams ConvTPFlock _forward tensor range get_float create_tp_flock_learn_process view pad_and_combine_tensor cat get_float temporal forward_learn ExpertParams ConvTPFlock _forward zip tensor range _combine_flocks get_float create_tp_flock_learn_process view rand create_tp_buffer get_float unsqueeze zeros empty create_tp_buffer get_float normalize_probs_ create_tp_flock_forward_process fill_ run_and_integrate move_probs_towards_50_ add_small_constant_ tensor full stored_data create_tp_buffer get_float normalize_probs_ create_tp_flock_forward_process fill_ move_probs_towards_50_ add_small_constant_ tensor full stored_data get_float normalize_probs_ run_and_integrate move_probs_towards_50_ unsqueeze tensor setup_forward_passive_vs_active create_tp_buffer get_float normalize_probs_ create_tp_flock_forward_process fill_ check_cuda_errors move_probs_towards_50_ cluster_history add_small_constant_ unsqueeze _compute_seq_likelihoods context_history tensor full zeros stored_data create_tp_buffer get_float create_tp_flock_forward_process _compute_seq_likelihoods_priors_clusters tensor zeros create_tp_buffer get_float create_tp_flock_forward_process tensor _compute_seq_likelihoods_for_each_provider zeros create_tp_buffer get_float create_tp_flock_forward_process _compute_provider_informativeness tensor _compute_seq_likelihoods_for_each_provider zeros create_tp_buffer get_float create_tp_flock_forward_process _compute_provider_informativeness zeros tensor create_tp_buffer get_float create_tp_flock_forward_process _compute_seq_likelihoods_priors_clusters_context zeros tensor get_float tensor _compute_seq_probs_without_priors full create_tp_buffer get_float create_tp_flock_forward_process _seq_likelihoods tensor full _apply_output_projection get_float create_tp_flock_forward_process _compute_predicted_clusters tensor zeros create_tp_buffer get_float create_tp_flock_forward_process _compute_rewards tensor zeros create_tp_buffer get_float create_tp_flock_forward_process _compute_rewards tensor zeros create_tp_buffer get_float create_tp_flock_forward_process _compute_rewards tensor zeros create_tp_buffer get_float create_tp_flock_forward_process _compute_rewards tensor zeros get_float create_tp_flock_forward_process _compute_predicted_clusters tensor zeros create_tp_flock_forward_process _action_rewards fill_ _exploration_cluster clone _action_outputs exploration_random_numbers sum max TPFlock random_ get_float TrainedForwardProcessFactory create integrate get_subflock_integration_testing_flock rand calculate_expected_results randomize_subflock check_integration_results ExpertParams integrate _create_learning_process get_subflock_integration_testing_flock calculate_expected_results randomize_subflock check_integration_results ExpertParams get_float create_tp_flock_learn_process run_and_integrate TPFlockBuffer tensor AllocatingCreator full get_float create_tp_flock_learn_process unsqueeze _learn_from_batch tensor get_float encountered_batch_seq_occurrences create_tp_flock_learn_process encountered_subbatch_rewards_punishments check_cuda_errors encountered_subbatch_context_occurrences encountered_batch_exploration_attempts newly_encountered_seqs_indicator encountered_batch_exploration_results encountered_subbatch_exploration_results encountered_batch_context_occurrences unsqueeze encountered_subbatch_exploration_attempts encountered_subbatch_seq_occurrences tensor encountered_batch_rewards_punishments _extract_info_known_seqs get_float tensor _update_knowledge_known_seqs _identify_new_seqs get_float create_tp_flock_learn_process fill_ most_probable_batch_seqs check_cuda_errors most_probable_batch_seq_probs tensor get_float create_tp_flock_learn_process fill_ most_probable_batch_seqs check_cuda_errors newly_encountered_seqs_counts most_probable_batch_seq_probs _extract_info_new_seqs tensor most_probable_batch_seqs newly_encountered_seqs_counts get_float create_tp_flock_learn_process _sort_all_encountered TPFlockBuffer tensor AllocatingCreator get_float create_tp_flock_learn_process fill_ total_encountered_occurrences tensor _forget get_float create_tp_flock_learn_process _extract_frequent_seqs tensor full get_float create_tp_flock_learn_process _subbatch_size _combined_batch_size _subbatch rand cluster_subbatch _subbatch_overlap zeros range ExpertParams temporal get_float forward_learn tensor range create_tp_flock get_float forward_learn tensor range create_tp_flock get_float forward_learn expand tensor range create_tp_flock get_float tensor create_tp_flock get_float clone forward_learn tensor range create_tp_flock TPFlock temporal data_since_last_sample _determine_learning ExpertParams copy_or_fill_tensor total_data_written tensor get_float TrainedForwardProcessFactory UntrainedForwardProcessFactory create_tp_buffer zeros unsqueeze create_tp_flock_untrained_forward_process get_float allocate_memory_blocks step connect tensor to AllocatingCreator ActionMonitorNode MemoryBlock action_in ExpertParams append step get_label_id range get_label_id get_data append step range range len sequences_equal allocate_memory_blocks DatasetMNISTParams collect_labels DatasetMNISTNode do_some_ugly_cpu_random_stuff AllocatingCreator list union collect_allowed_labels collect_allowed_labels DatasetSequenceMNISTNodeParams collect_allowed_labels DatasetSequenceMNISTNodeParams seed randint RandomState sample sequences_equal allocate_memory_blocks DatasetMNISTParams collect_labels DatasetSequenceMNISTNodeParams do_some_ugly_cpu_random_stuff AllocatingCreator DatasetSequenceMNISTNode allocate_memory_blocks DatasetMNISTParams collect_labels DatasetMNISTNode AllocatingCreator same append dict add_tensor_if_not_there range len items list allocate_memory_blocks DatasetMNISTParams class_filter put_tensors_in_dictionary DatasetSequenceMNISTNodeParams AllocatingCreator collect_data DatasetSequenceMNISTNode allocate_memory_blocks DatasetMNISTParams DatasetSequenceMNISTNodeParams zip AllocatingCreator collect_data DatasetSequenceMNISTNode allocate_memory_blocks get_label_id DatasetMNISTParams step get_data DatasetSequenceMNISTNodeParams AllocatingCreator DatasetSequenceMNISTNode seed randint RandomState allocate_memory_blocks AllocatingCreator clone cat append step range DatasetSequenceMNISTNode len get_float sum allocate_memory_blocks step clone normalize_probs type DatasetSequenceMNISTNodeParams cat append AllocatingCreator range DatasetSequenceMNISTNode Expand zeros AllocatingCreator step Expand zeros inverse_projection AllocatingCreator tensor inverse_projection Fork zip _test_fork_inverse AllocatingCreator float32 device _test_fork_inverse AllocatingCreator float32 device allocate_memory_blocks zeros float32 connect InversePassOutputPacket recursive_inverse_projection_from_output zip input tensor AllocatingCreator ForkNode MemoryBlock agent_pos GridWorldParams Topology rand float32 connect ConstantNode output JoinNode InversePassOutputPacket recursive_inverse_projection_from_output input ForkNode step add_node tensor Join inverse_projection zip _test_join_inverse AllocatingCreator float32 device _test_join_inverse AllocatingCreator float32 device allocate_memory_blocks float32 connect output JoinNode InversePassOutputPacket recursive_inverse_projection_from_output zip zeros tensor AllocatingCreator allocate_memory_blocks float32 connect output JoinNode InversePassOutputPacket recursive_inverse_projection_from_output zip zeros tensor AllocatingCreator allocate_memory_blocks LambdaNode float32 connect _step tensor AllocatingCreator step float32 MseUnit tensor AllocatingCreator tensor expand flock_size mini_batch_size NeuralNetworkFlockParams expand test prepare_inputs_and_targets make_data_loaders coefficients_minimum_max item tensor train MultilayerPerceptronFlock same flock_size mini_batch_size NeuralNetworkFlockParams prepare_inputs_and_targets unsqueeze make_data_loaders coefficients_minimum_max append tensor enumerate MultilayerPerceptronFlock len push create_delay_buffer ones _push AllocatingCreator full append DataCheckingDataLoader orig_make_data_loaders enumerate NNetParams default_params run_num_steps make_nnet create_network_inputs run_num_steps make_nnet _network create_network_inputs set_global_seeds allocate_memory_blocks ObservationStorage Adam parameters NNetNode buffer_size NNetParams to AllocatingCreator default_params zeros label input_shape output_size connect testing_phase _random_label input tensor _random_image MemoryBlock input_shape set_global_seeds output_size _random_label copy_ _random_image step range run_num_steps make_nnet create_network_inputs load_data_into run_num_steps save_data_of prepare add my_net make_nnet NNetMockTopology PersistableSaver create_network_inputs allocate_memory_blocks PassNode float32 connect _step input tensor AllocatingCreator MemoryBlock AllocatingCreator ones RandomNoiseUnit step fix_random_seed append step get_output_id range AllocatingCreator RandomNumberNode allocate_memory_blocks set_global_seeds AllocatingCreator RandomNumberNode allocate_memory_blocks get_float allocate_memory_blocks step connect SpaceEngineersActionsDescriptor AgentActionsParserNode input tensor AllocatingCreator MemoryBlock clone _step tensor zip DatasetSENavigationParams sequences_equal allocate_memory_blocks SIZE_24 SpaceDivisor get_landmarks DatasetSeNavigationNode stack AllocatingCreator collect_data DatasetSENavigationParams RANDOM_SAMPLING allocate_memory_blocks SpaceDivisor vertical_segments DatasetSeNavigationNode horizontal_segments AllocatingCreator collect_data zip allocate_memory_blocks DatasetSeObjectsParams DatasetSeObjectsNode SIZE_24 item TRAIN_ONLY AllocatingCreator max collect_data TRAIN_ONLY SIZE_24 DatasetSeObjectsParams allocate_memory_blocks switch_training DatasetSeObjectsNode get_common_params AllocatingCreator collect_data allocate_memory_blocks switch_training DatasetSeObjectsNode get_common_params AllocatingCreator collect_data DatasetSeObjectsParams allocate_memory_blocks DatasetSeObjectsNode SIZE_24 copy TRAIN_ONLY AllocatingCreator collect_data _train_images allocate_memory_blocks DatasetSeObjectsParams DatasetSeObjectsNode SIZE_24 step TRAIN_ONLY tensor AllocatingCreator len allocate_memory_blocks DatasetSeObjectsParams DatasetSeObjectsNode SIZE_24 step _test_images TEST_ONLY tensor AllocatingCreator len step range __name__ load_data_into SeDatasetObjectsTopologyTest run_testing_phase save_data_of prepare set_topology switch_to_training make_n_steps _node_se_dataset train_pos SeDatasetObjectsTopologyTestAdapter PersistableSaver switch_to_testing _pos clone_ground_truth_label_tensor append step range get_float data_input SpatialPoolerFlockNode Topology rand connect prepare forward_clusters ExpertParams InversePassOutputPacket recursive_inverse_projection_from_output tensor MemoryBlock add_node allocate_memory_blocks get_label_id DatasetMNISTParams step get_data DatasetSequenceMNISTNodeParams AllocatingCreator DatasetSequenceMNISTNode RandomNumberNode get_output_id allocate_memory_blocks _step AllocatingCreator DatasetSENavigationParams allocate_memory_blocks SIZE_24 outputs DatasetSeNavigationNode _step AllocatingCreator get_landmark_id_int EmptyTopology order_nodes _se_io DatasetSeObjectsParams get_label_id print TRAIN_TEST SIZE_24 outputs TEST_ONLY _update_memory_blocks step get_landmark_id_int allocate_memory_blocks AllocatingCreator SpatialPoolerFlockNode get_output_id connect UnsqueezeNode get_reconstruction one_hot_output input get_sp_boosting_durations data_input average_sp_delta RandomNumberNode get_sp_deltas num_boosted_clusters get_sp_params clone output average_boosting_duration step flatten_list tensor comparison_method NestedParams to_nested_dict NestedParams Params to_nested_dict list ObserverView set_observables join isdir __file__ import_module dirname walk discover_child_classes union walk_topics set get instantiate_graph step instantiate_graph collect step instantiate_graph collect step instantiate_graph collect step SpLearningConvergenceExperimentTemplate _collect_measurements MnistSpLearningConvergenceTopologyAdapter SpLearningConvergenceExperimentTemplate _collect_measurements SeDatasetSpLearningConvergenceTopologyAdapter Lrf1SpFlockExperimentTemplate Lrf1SpFlockMnistTemplate _collect_measurements _compute_experiment_statistics Lrf1SpFlockExperimentTemplate Lrf1SpFlockMnistTemplate _collect_measurements _compute_experiment_statistics Lrf1SpFlockExperimentTemplate Lrf1SpFlockSeNavTemplate _collect_measurements _compute_experiment_statistics SeDatasetTaRunningStatsAdapter DatasetSeSimulationRunningStatsExperimentTemplate _collect_measurements _compute_experiment_statistics SeDatasetTaRunningStatsAdapter DatasetSeSimulationRunningStatsExperimentTemplate _collect_measurements _compute_experiment_statistics _collect_measurements SeTaRunningStatsAdapter _compute_experiment_statistics SeSimulationRunningStatsExperimentTemplate add_common_params MultipleLayersParams make_test_params items list MockFluffyTopology make_test_params add_common_params zip _get_default_topology_params _find_constant_parameters print MockFluffyTopology _unpack_params _remove_params make_test_params add_common_params zip _get_default_topology_params constructor_param _param_value_to_string make_test_params change parameters_to_string validate_params_for_n_layers MultipleLayersParams read_list_of_params convert_to_expert_params read_param MultipleLayersParams convert_to_expert_params MultipleLayersParams MultipleLayersParams NCMR1Group change MultipleLayersParams add_common_params MultipleLayersParams MultipleLayersParams L3ConvTopology step BasicExperimentRunner get_values_from_all_runs check_value_on_diagonal check_series_around_constant measurement_manager experiment_params TopologyScaffoldingFactory run str Task0TaAnalysisTemplate range Experiment SIZE_24 ExperimentParams get_custom_data_from_all_runs info print check_series_similar check_layer_outputs train_test_params len enumerate zip value_computation pop __subclasses__ add set append get_task_0_topologies topology_class order_nodes DatasetSeObjectsParams get_label_id se_io SIZE_24 topology_class outputs _update_memory_blocks step get_landmark_id_int SETrainTestComponent after_topology_step name SEOrDatasetGraphStub RunMeasurementManager list _get_measurement_manager RandomNumberTopologyTrainTestAdapter len SimpleTestableExperimentTemplate extend range run TestableRunMeasurement partition_to_list_of_phases partition_to_list_of_testing_phases partition_to_training_phases partition_to_phases partition_to_list_of_training_phases partition_to_list_of_ids _partition_tensor_to_ids compute_derivations topology_class TAState topology_class step TAState topology_class range step memory_blocks add_ get_ta_nodes project_root_dir Path perf_counter auto join join join to_safe_path replace replace plot_multiple_runs add_fig_to_doc to_list_list len plot xlabel ylabel title figure legend fill_between add_fig_to_doc grid add_fig_to_doc to_list_list str ones ylabel ylim scatter title legend append gca range plot xlim enumerate int convolve xlabel figure fill_between len tolist localtime strftime savefig basename Image add savefig dirname to_safe_path makedirs tuple islice iter numel item comparison full range len evaluate train_and_evaluate classifier_class full range len sum append compute_mutual_information array range len normalized_mutual_info_score id_to_one_hot mutual_info_score zeros view scatter_ normalized_mutual_info_score shape zeros numpy range shape center_indexes shape train_nn_classifier id_to_one_hot str ClassifierHiddenNet device ClassifierTrainer train ClassifierNoHiddenNet train_svm_classifier one_hot_to_id cpu train array id_to_one_hot SvmModel size float items list asdict isinstance __init__ signature is_dataclass _find_destinations _visit_node _detect_illegal_cycles _clear_ordering enumerate owner inputs is_backward connection append list remove owner inputs connection list owner connections outputs append load join dirname abspath info append check_error _get_cuda_error_code _setup_logging get setFormatter list getLogger handlers addHandler INFO removeHandler removeFilter DEBUG filters setLevel LoggerNameFormatter handlers flush getLogger _setup_logging _get_console_handler _get_file_handler register_observer _recurse_f f member_getter append len int16 uint8 float64 float32 int8 float16 int64 int32 __abstractmethods__ setattr getattr staticmethod get_tensor_stats items round_memory printer ljust get_subcontainers append sum Tensor __dict__ isinstance append type __dict__ range reversed len auto PymunkParams join next tee auto len projection_input zeros_like synchronize len projected_values copy_ warning append range InversePassInputPacket recursive_inverse_projection_from_input index_select clone view len print collect set list unsqueeze sum add_small_constant sum add negate any isnan warn copy_ weighted_sum isnan masked_fill_ copy_ sum log size index_copy_ view item size device id_to_one_hot min max settrace func write_stats list add_row print keys array get_string PrettyTable sum max f_back get_name memory_allocated append max_memory_cached f_back get_name memory_allocated add set max_memory_cached f_code co_name check_type check_type is_initialized signal fromarray numpy save BytesIO _validate_predicate_automatic_message replace strip eval sub find split auto min stdout setFormatter addHandler StreamHandler Formatter setLevel INFO DataStore listen start make_app info setup_logging show join list isdir iglob print enumerate print run_gui Task0ExperimentTemplate Task0BaselinesStatsBasicAdapter run_just_model NNetTopology run run_measurement run_measurement run_measurement GlNnTopologyFactory run_experiment Experiment GradualLearningExperimentTemplate ExperimentParams SpLearningConvergenceExperimentTemplate MnistSpLearningConvergenceTopologyAdapter MnistSpTopology run_measurement run_measurement run_measurement run_measurement run_measurement run_measurement run_measurement SeDatasetSpTopology SeDatasetSpLearningConvergenceTopologyAdapter run_measurement run_measurement run_measurement list print sort Lrf1SpFlockMnistTemplate Lrf1SpFlockExperimentTemplate LrfTopology append run_just_model range enumerate Lrf1SpFlockSeNavTemplate SeDatasetSpLrfDebug SeDatasetTaLrf SeDatasetTaRunningStatsAdapter DatasetSeSimulationRunningStatsExperimentTemplate run_measurement run_measurement run_measurement SeTaRunningStatsAdapter SeTaLrfT0 SeSimulationRunningStatsExperimentTemplate run_measurement run_measurement ExpertParams seq_lookbehind DatasetSENavigationParams RANDOM_ORDER Task0OnlineLearningTemplate Task0NarrowTopology Task0ConvWideAdapter run_measurement run_measurement run_measurement ConvWideTwoLayerTopology run_measurement Task0NarrowAdapter run_measurement Task0TrainTestTemplateRelearning print topology_class run_gui Task0RelearnBasicAdapter run_just_model run append range SeObjectsTaskPhaseParams _extend_params_with_param _extend_params_with_param _extend_params_with_param _extend_params_with_param _extend_params_with_param append copy SeObjectsTaskPhaseParams extend_with_phase_information extend_with_location_filter_params extend_with_class_filter_params extend_with_buffer_size_params parse_test_args round extend_with_sampling_method_params append range run_measurement_task0 extend_with_cluster_center_params Task0TaSeTopology LearningRateTaModularAdapter Task0TrainTestLearningRateTemplate run_measurement run_measurement run_measurement items list run_measurement items list run_measurement items list run_measurement items list run_measurement items list run_measurement items list run_measurement add_common_params run_measurement MultipleLayersParams add_common_params run_measurement MultipleLayersParams run_measurement MultipleLayersParams run_measurement_with_params run_measurement run_measurement run_measurement run_measurement run_measurement run_measurement print topology_class run_gui Task0TrainTestClassificationAccTemplate ClassificationAccuracyModularAdapter run_just_model run run_measurement run_experiment_with_ui Task0TrainTestClassificationAccTemplate experiment_params TopologyScaffoldingFactory train_test_params run_measurement get_float SymbolicInputTemplate run_measurement show show_plots info int prod run_measurement Task0TaAnalysisTemplate SIZE_64 run_measurement MultipleLayersParams SIZE_64 run_measurement MultipleLayersParams good_one_layer_config_for_four_objects run_measurement SIZE_64 MultipleLayersParams SIZE_64 run_measurement MultipleLayersParams str SIZE_64 run_measurement MultipleLayersParams SIZE_24 GradualLearningBasicTemplate phase_3_steps phase_2_steps phase_1_steps run_measurement join compute_moving_average plot_multiple_runs stack expand_dims add_fig_to_doc GoalDirectedTemplate parse_test_args run_measurement GridWorldParams parse_test_args run_measurement GridWorldParams iter mean min max std GlNnWorldFactory LambdaNode LambdaNode LambdaNode LambdaNode LambdaNode SequenceGenerator diagonal_transition_matrix Instance PymunkParams SwitchableWorldTopology GlNnWorldTopology define_temporal_classes create_non_persisting_observer_system run_topology_with_ui SeT0BasicTopology create_non_persisting_observer_system run_topology_with_ui SeT0NarrowHierarchy run_topology_with_ui SeT0ConvSPTopology run_topology_with_ui SeT0ConvTopology run_topology_with_ui SeT1Bt run_topology_with_ui SeT1ConvTopologicalGraph Task0StatsBasicAdapter BaselinesReinforcementLearningTopology Task0StatsBasicAdapter SeT0BasicTopology Task0ExperimentTemplate __name__ Task1ExperimentTemplate print Task1StatsBasicAdapter run_gui run_just_model SeT1Bt run parse_test_args __name__ run_measurement_task1 Size Size2D ExpertParams ExpertParams conv_layer_class compute_grid_size input_rf_sizes input_rf_stride data connect zip append create_conv_layer enumerate copy_ zeros CrossEntropyLoss floor partial getattr ObserverPropertiesItem append vars items list dict items list get items list add set hasattr isinstance is_dataclass warn tensor len isinstance mul matches TensorShapePatternMatcher reduce raise_exception Sum isdir makedirs create_dir join gettempdir format_exc is_available synchronize manual_seed_all manual_seed is_available warning seed _set_cuda_seed _set_cuda_deterministic generate_seed manual_seed seed RandomState _set_cuda_seed generate_seed error zip zip _collect_answers_for zeros zip append argmax_tensor max view append do_compute_nn_classifier_accuracy zip error tensor is_containing_nans compute_nn_classifier_accuracy any _compute_classification_accuracy append accuracy_method zip argmax_list_list_tensors append zip item append tensor tolist index append max range append _partition_tensor_to_ids enumerate
# TorchSim TorchSim is a simulation platform developed for the purpose of AGI research, but is generic enough to be used in other scenarios. The models which the simulator executes are graph-based: Nodes execute in the order defined by connections between outputs and inputs and data is passed along these connections. PyTorch is used as the backend for tensor storage and processing. The simulation can be optionally controlled and observed via an included web-based UI. Internal and output tensors of nodes can be observed with basic tensor/matrix observers, and custom observers are written for other use-cases. A part of this repository is an implementation of ToyArchitecture - a simple interpretable AGI model described in this paper: [ToyArchitecture: Unsupervised Learning of Interpretable Models of the World](https://arxiv.org/abs/1903.08772). The original code was developed internally at GoodAI by (in alphabetical order): Simon Andersson, Joe Davidson, Petr Dluhos, Jan Feyereisl, Petr Hlubucek, Martin Hyben, Matej Nikl, Premysl Paska, Martin Poliak, Jan Sinkora, Martin Stransky, Josef Strunc, Jaroslav Vitku ## How to install TorchSim ### Windows #### Installation steps 1. If you don't have Visual Studio 2017, install [Visual Studio 2017 Community](https://visualstudio.microsoft.com/downloads/). You will use its tools for building cuda kernels.
379
Gorov/LowRankFCM
['relation extraction']
['Embedding Lexical Features via Low-Rank Tensors']
data/getVocab.py getBest.py
# LowRankFCM code and data
380
Gorov/three_player_for_emnlp
['sentiment analysis']
['Rationalizing Neural Predictions']
three_player_games/rationale_3players_for_emnlp.py three_player_games/rationale_3players_relation_classification_models.py three_player_games/rationale_3players_text_matching_models.py datasets/beer_dataset_single_aspect.py models/base_classification_models.py three_player_games/run_beer_single_aspect_introspection_3players.py three_player_games/util_functions.py utils/utils.py datasets/dataset.py three_player_games/run_beer_single_aspect_rationale_3players.py three_player_games/rationale_3players_sentence_classification_models.py models/rnn_model.py models/generator.py single_soft_regularization_loss BernoulliSoftGenerator Generator DepGenerator _get_entropy DepRnnModel SoftGenerator RnnModel CnnModel count_regularization_baos_for_both HardRationale3PlayerClassificationModelForEmnlp IntrospectionGeneratorModuleForRelation create_relative_pos_embed_layer HardIntrospection3PlayerRelationClassificationModel ClassifierModuleForRelation MatchingClassifierModule Rationale3PlayerMatchingModel HardRationale3PlayerMatchingModel IntrospectionGeneratorModule HardIntrospection3PlayerMatchingModel bao_regularization_hinge_loss_batch_with_none_loss Transpose show_1d_rationale count_regularization_loss_batch count_regularization_hinge_loss_batch bao_regularization_loss_batch bao_regularization_hinge_loss_batch single_regularization_loss_batch single_soft_regularization_loss BernoulliSoftGenerator Generator DepGenerator _get_entropy DepRnnModel SoftGenerator RnnModel CnnModel count_regularization_baos_for_both HardRationale3PlayerClassificationModelForEmnlp IntrospectionGeneratorModuleForRelation create_relative_pos_embed_layer HardIntrospection3PlayerRelationClassificationModel ClassifierModuleForRelation MatchingClassifierModule Rationale3PlayerMatchingModel HardRationale3PlayerMatchingModel IntrospectionGeneratorModule HardIntrospection3PlayerMatchingModel bao_regularization_hinge_loss_batch_with_none_loss Transpose show_1d_rationale count_regularization_loss_batch count_regularization_hinge_loss_batch bao_regularization_loss_batch bao_regularization_hinge_loss_batch single_regularization_loss_batch log mean abs _get_entropy cat abs sum cat Embedding normal_ abs sum cat abs sum cat abs sum threshold cat abs sum cat abs sum threshold cat abs sum threshold cat show bar title figure xticks range len
# A Three-Player Game for Rationalization This repo contains the PyTorch implementation of the EMNLP 2019 paper [Rethinking Cooperative Rationalization: Introspective Extraction and Complement Control](https://arxiv.org/pdf/1910.13294.pdf). To make this repo neat and light-weight, we release the core code and data for the newly proposed single-aspect beer review dataset (i.e. the evaluation on the left of Table 4 in the paper) for the demo purpose. If you are interested in reproducing the exact results for other datasets, please contact us, and we are very happy to provide the code and help. You can start with the following entry script. ```bash run_beer_single_aspect_rationale_3players.py ``` **Data requirement:** Please download the beer review data following the paper [Rationalizing Neural Predictions](https://arxiv.org/pdf/1606.04155.pdf), then put ```data/sec_name_dict.json``` to your data directory. Also download ```glove.6B.100d``` word embedding to your data directory. **Tested environment:** Python 2.7.13, PyTorch: 0.3.0.post4
381
Gradiant/bob.paper.icb2019.gradgpad
['face anti spoofing']
['Generalized Presentation Attack Detection: a face anti-spoofing evaluation proposal']
clean.py bob/paper/icb2019/gradgpad/classes/utils/crop_image.py bob/paper/icb2019/gradgpad/classes/utils/preprocess.py bob/paper/icb2019/gradgpad/classes/mtcnn/mtcnn_utils.py bob/paper/icb2019/gradgpad/classes/utils/input_preprocessor.py experiments/helpers/aggregate_database_provider.py bob/paper/icb2019/gradgpad/test/test_utils.py bob/paper/icb2019/gradgpad/test/mtcnn/test_mtcnn_face_detector.py bob/paper/icb2019/gradgpad/__init__.py bob/paper/icb2019/gradgpad/test/mtcnn/test_mtcnn_utils.py bob/paper/icb2019/gradgpad/classes/mtcnn/__init__.py experiments/helpers/tensorflow_debug.py bob/paper/icb2019/gradgpad/test/utils/test_preprocess.py bob/paper/icb2019/gradgpad/test/color_based/test_boulkenafet_features_extractor.py doc/conf.py experiments/helpers/pipeline_provider.py bob/paper/__init__.py experiments/color_based/configuration_boulkenafet_face_cropped.py experiments/quality_based/configuration_msu_iqm_face_cropped.py bob/paper/icb2019/gradgpad/scripts/create_summary_table.py bob/paper/icb2019/gradgpad/classes/color_based/__init__.py bob/paper/icb2019/gradgpad/classes/color_based/lpq.py bob/paper/icb2019/gradgpad/classes/color_based/boulkenafet_features_extractor.py bob/paper/icb2019/gradgpad/classes/__init__.py bob/paper/icb2019/gradgpad/test/utils/test_crop_images.py bob/paper/icb2019/gradgpad/test/mtcnn/disable_tensorflow_debugging_info.py bob/paper/icb2019/gradgpad/classes/color_based/coalbp.py bob/paper/icb2019/gradgpad/classes/utils/annotations_utils.py bob/paper/icb2019/gradgpad/classes/utils/parser.py bob/paper/icb2019/gradgpad/classes/mtcnn/mtcnn_face_detector.py version.py bob/paper/icb2019/gradgpad/classes/quality_based/msu_iqm_features_extractor.py experiments/helpers/defaults.py bob/paper/icb2019/gradgpad/scripts/__init__.py bob/paper/icb2019/gradgpad/classes/utils/__init__.py bootstrap-buildout.py bob/paper/icb2019/gradgpad/scripts/download_resources.py bob/__init__.py bob/paper/icb2019/gradgpad/scripts/generate_grad_gpad_protocols_text_lists.py bob/paper/icb2019/__init__.py setup.py bob/paper/icb2019/gradgpad/test/quality_based/test_msu_iqm_features_extractor.py bob/paper/icb2019/gradgpad/classes/quality_based/__init__.py bob/paper/icb2019/gradgpad/test/utils/test_annotations_utils.py _final_version main remove main get_version update_version BoulkenafetFeaturesExtractor calculate_coalbp_features euc_dist lpq MtcnnFaceDetector bulk_detect_face _get_rnet_features _run_Onet nms pad Network imresample PNet _generate_scales run_rnet_features bbreg detect_face generateBoundingBox create_mtcnn _run_Pnet run_pnet_features ONet _run_Pnet_one_scale rerec RNet layer _run_Rnet MsuIqmFeaturesExtractor get_averaged_annotation annotations_meet_the_restrictions rescale_annotation get_bounding_box_from_annotations crop_square_face crop_image meet_the_restrictions crop_from_bbox input_resize_preprocessor rescale_bounding_boxes rescale_landmarks parser normalize preprocess_image resize_image_with_side_target_size check_bounding_box_integrity main recursive_glob has_args is_already_downloaded main get_logger main has_args write_dict_to_file TestUtils UnitTestBoulkafanetExtractor disable_tensorflow_debugging_info UnitTestMtcnnFaceDetector UnitTestMtcnnUtils UnitTestMsuIqmFeaturesExtractor UnitAnnotationUtils UnitCropImages UnitPreprocess setup member_function_test aggregate_database_provider get_available_protocols get_pipeline_average_features_scaled_rbfsvc tf_set_quiet_mode format isdir print rmtree isfile print remove glob sorted Repo tags str join int format print get_version Repo create_tag push split show_version update_version add_argument get_version new_version ArgumentParser parse_args update_minor_version concatenate reshape astype ravel mean shape dot array bincount zeros empty std range T astype maximum dot float arange pi svd exp len shape real meshgrid power matrix T euc_dist reshape dot convolve2d histogram array imag conj realpath split amin int nms rerec transpose _run_Pnet_one_scale astype copy range pad vstack int32 ceil zeros empty imresample append len pnet transpose copy generateBoundingBox expand_dims _get_rnet_features _run_Pnet transpose rnet pnet expand_dims transpose nms transpose hstack astype rnet where bbreg copy pad int32 rerec zeros range imresample nms transpose hstack onet where bbreg copy tile _generate_scales _run_Onet _run_Pnet _run_Rnet where vstack pnet nms transpose pad ceil append imresample range hstack astype copy bbreg tile generateBoundingBox power empty zeros enumerate minimum int rnet onet int32 rerec amin len reshape transpose vstack transpose hstack where fix flipud vstack empty argsort maximum zeros_like minimum ones astype where int32 expand_dims transpose maximum tile resize items meet_the_restrictions zeros max items min max astype min max array get_bounding_box_from_annotations fromarray height ANTIALIAS resize astype width float array append append parse_args set_defaults add_argument ArgumentParser crop_square_face resize min max float resize endswith join walk append vars join result_path to_latex base_path concat recursive_glob from_csv to_html to_csv print_help append sort_values makedirs isdir install getLogger items sorted format debug check_output parser verbose isfile get_logger get_database_from_key list get_ground_truth_list json_file lower export_database_paths_from_file keys write_dict_to_file dirname get_image_and_bbox ERROR set_verbosity get_image_and_bbox get_image_and_bbox get_image_and_bbox connect get_database_from_key export_database_paths_from_file list keys remove Pipeline ERROR set_verbosity
# bob.paper.icb2019.gradgpad [![Build Status](https://travis-ci.org/Gradiant/bob.paper.icb2019.gradgpad.svg?branch=master)](https://travis-ci.org/Gradiant/bob.paper.icb2019.gradgpad) [![Doc](http://img.shields.io/badge/docs-latest-green.svg)](https://gradiant.github.io/bob.paper.icb2019.gradgpad/) 👉 👉 New version of `gradgpad` (v2) is already available in [https://github.com/acostapazo/gradgpad](https://github.com/acostapazo/gradgpad) 🗿 👈 👈 [Bob](https://www.idiap.ch/software/bob/) package to reproduce the work carried out in the paper [Generalized Presentation Attack Detection: a face anti-spoofing evaluation proposal](https://arxiv.org/abs/1904.06213) accepted in the [12th IAPR International Conference On Biometrics](http://www.icb2019.org/). ## Abstract _Over the past few years, Presentation Attack Detection (PAD) has become a fundamental part of facial recognition systems. Although much effort has been devoted to anti-spoofing research, generalization in real scenarios remains a challenge. In this paper we present a new opensource evaluation framework to study the generalization capacity of face PAD methods, coined here as face-GPAD. This framework facilitates the creation of new protocols focused on the generalization problem establishing fair procedures of evaluation and comparison between PAD solutions. We also introduce a large aggregated and categorized dataset to address the problem of incompatibility between publicly available datasets. Finally, we propose a benchmark adding two novel evaluation protocols: one for measuring the effect introduced by the variations in face resolution, and the second for evaluating the influence of adversarial operating conditions._ ## Acknowledgements If you use this framework, please cite the following publications:
382
Group-TanakaIshii/word_taylor
['time series']
["Taylor's law for Human Linguistic Sequences"]
word_taylor.py load_pickle residual_func log_leastsq_regression TFS basic_process plot_TFS return_wordloc input_cmdln main load_textdata nodraw add_argument ArgumentParser width parse_args pickle append range len sub join basic_process set return_wordloc load_textdata len int log10 residual_func list mean sqrt leastsq power array yscale show log_leastsq_regression plot print xlabel text xscale min ylabel ylim tick_params xlim max load_pickle time items print input_cmdln append DataFrame len
# word_taylor Application of Taylor's Law to natural language and related data based on word count. This project is already published in https://arxiv.org/abs/1804.07893. When using this source code, please cite us.
383
Gstaerman/FIF
['outlier detection', 'anomaly detection']
['Functional Isolation Forest', 'Outlier detection in multivariate functional data through a contaminated mixture model']
old_fif.py setup.py version.py __init__.py MFIF_python/Section_5/MFIF.py PathFactor c_factor derivate_piecewise iTree derivate Node FIForest read PathFactor c_factor MFIForest iTree Node derivateM zeros astype range diff zeros range diff zeros astype range diff
Gstaerman/FIF
384
GuHongyang/Person-ReID-Pytorch
['person retrieval', 'person re identification']
['Beyond Part Models: Person Retrieval with Refined Part Pooling (and a Strong Convolutional Baseline)']
PCB.py data.py metrics.py trainer.py dataset.py main.py model.py Data Dataset list_pictures cmc_map mean_ap _unique_sample cmc Model weights_init_classifier weights_init_kaiming PCB Trainer zeros items choice asarray arange defaultdict astype average_precision_score argsort shape _unique_sample int32 zip append zeros range enumerate len asarray arange defaultdict astype argsort shape _unique_sample int32 zip append zeros range enumerate len asarray arange astype average_precision_score argsort shape int32 append range data normal_ kaiming_normal_ __name__ constant_ data normal_ __name__ constant_
# Person_ReID Beyond Part Models: Person Retrieval with Refined Part Pooling (and A Strong Convolutional Baseline) https://arxiv.org/pdf/1711.09349.pdf ![image](model.png) | | mAP | R-1 | R-5 |R-10 | | --------| -----: | :----:|:----:|:----:| | Paper | 77.3 | 92.4| 97 |97.9 | | Reproduce| <font color=red>79.6 | <font color=red>92.7|<font color=red>97.0 |<font color=red>98.1 | | SF+SC | 66.6 | 86.9 |94.4|96.3|
385
Guillem96/deep-erase
['optical character recognition']
['DeepErase: Weakly Supervised Ink Artifact Removal in Document Text Images']
deeperase/callbacks.py train.py deeperase/model.py deeperase/backbones.py deeperase/data.py deeperase/__init__.py setup.py deeperase/discriminator.py deeperase/generator.py read_rel get_version _build_model train _show_batch list_supported_models build_unet_backbone TensorBoard PlotPredictions _plot_to_image _compose_prediction_image random_crop build_dataset random_flip read_images build_discriminator _conv_bn _unet_forward _DecoderBlock _Conv3x3BnReLU build_unet DeepErase dirname abspath splitlines startswith show subplot axis close imshow title zip Adam build build_discriminator DeepErase build_unet compile strftime _build_model load_weights _show_batch Path summary build_dataset batch fit join list_supported_models model_cls subplots set_title suptitle astype axis imshow enumerate BytesIO seek close getvalue savefig expand_dims decode_png float32 convert_image_dtype read_file resize decode_jpeg cond stack int partial list_files shuffle map skip zip sum _conv_bn Input range concatenate activation _unet_forward Input layers build_unet_backbone range
# DeepErase - Ink Artifact Removal This repository contains the implementation of the de-noising model described in [DeepErase: Weakly Supervised Ink Artifact Removal in Document Text Images ](https://arxiv.org/abs/1910.07070). Our implementation also has the ability to work with Generative Adversarial Networks (GAN) framework to improve the noise removal. ## Training ```python import deeperase input_pattern = "data/noised/*.jpg"
386
Guitaricet/unet_transformer_translation
['machine translation']
['Injecting Hierarchy with U-Net Transformers']
unet_transformer/unet_transformer_2col.py unet_transformer/unet_transformer_layer.py profiile.py unet_transformer/__init__.py unet_transformer/unet_transformer.py timeitnow call_layer call_encoder call_model Embedding UNetTransformerEncoder base_architecture UnetTransformerModel Linear base_architecture UNetTransformer2ColEncoder UnetTransformer2ColModel UNetTransformerEncoderLayer Linear append fn time range to model to model to eq layer bias xavier_uniform_ weight constant_ normal_ weight constant_ encoder_embed_dim encoder_ffn_embed_dim getattr decoder_embed_dim
# Injecting Hierarchy with U-Net Transformers David Donahue, Vladislav Lialin, Anna Rumshisky, 2020 Paper: [arxiv.org/abs/1910.10488](https://arxiv.org/abs/1910.10488) This repository contains code for translation experiments. It is a [fairseq](https://github.com/pytorch/fairseq) [plug-in](https://fairseq.readthedocs.io/en/latest/overview.html) and can be easily applied to a new task. To use it specify `--user-dir ./unet_transformer` to extend fairseq with UNet Transformer
387
GuoLiuFang/seglink-lfs
['scene text detection', 'curved text detection']
['Detecting Oriented Text in Natural Images by Linking Segments']
tool/convert_caffe_model/convert_caffemodel_to_ckpt.py seglink/unit_tests.py seglink/visualizations.py manage.py tool/convert_caffe_model/tests.py seglink/model_cnn.py seglink/evaluate.py seglink/config.py seglink/solver.py seglink/data.py tool/convert_caffe_model/dump_caffemodel_weights.py tool/create_datasets.py seglink/ops.py seglink/model.py seglink/utils.py clear build_op run_tf_program_with_json_config upload_logs start_tb test train clean_op test_preprocess input_stream train_preprocess postprocess_and_write_results_ic13 postprocess_and_write_results_ic15 evaluate SegLinkDetector SsdVgg16 load_oplib atrous_conv2d score_loss smooth_l1_loss max_pool conv2d conv_relu avg_pool _nn_variable Solver test_encode_decode_synth_data test_clip_rboxes test_encode_decode_real_data test_data_loading_and_preprocess test_max_pool_on_odd_sized_maps test_decode_combine_rboxes summarize_losses print_tensor_summary rboxes_to_polygons setup_logger log_git_version summarize_activations log_flags mkdir_if_not_exist rboxes_to_bboxes visualize_nodes visualize_rboxes visualize_segments_and_links convert_image_for_visualization visualize_combined_rboxes visualize_detection_each_layer visualize_links visualize_bboxes create_icdar2015_incidental_dataset _int64_feature _bytes_list_feature read_jpeg_check DatasetCreator_Icdar2013 DatasetCreator_Scut DatasetCreator_Td500 _int64_list_feature create_merge_multiple _bytes_feature create_synthtext_dataset _float_feature DatasetCreator DatasetCreator_Icdar2015Incidental _float_list_feature convert_caffemodel_to_ckpt dump_caffemodel_weights test_classify_image join chdir print system mkdir print join system join remove format print glob input len pop join items format isinstance print chdir system abspath split len run_tf_program_with_json_config run_tf_program_with_json_config system greater_equal info build_model multiply reshape OFFSET_VARIANCE shape node_threshold link_threshold softmax cast int32 decode_segments_links SegLinkDetector combine_segments ConfigProto append enumerate minimum decode join format test_batch_size info rboxes_to_polygons astype maximum system int32 result_suffix float range minimum str join astype maximum save_image_and_lexicon bbox_scale int32 float range rboxes_to_bboxes join format load_op_library copyfile realpath dirname isinstance xavier_initializer_conv2d sqrt constant_initializer add_to_collection get_variable xavier_initializer truncated_normal_initializer get get softmax_cross_entropy_with_logits int64 cast one_hot encode_groundtruth print decode_prediction _generate_random_gt randint range len pack constant encode_groundtruth build_model train_preprocess train_record_path FctdDetector input_stream mkdir_if_not_exist batch enumerate constant clip_rboxes float32 _generate_random_rboxes shuffle_batch train_preprocess add_subplot figure input_stream mkdir_if_not_exist print astype float32 set_trace decode_combine_rboxes stdout setFormatter getLogger addHandler StreamHandler Formatter info DEBUG setLevel FileHandler items join format info append str format check_output strip info name zero_fraction sub histogram info scalar ExponentialMovingAverage apply reduce_max zero_fraction reduce_mean shape reduce_min Print makedirs hstack hstack min max _rboxes_to_polygons uint8 asarray astype float32 IMAGE_BGR_MEAN join isinstance transpose copy add_patch imshow clf savefig Rectangle info range enumerate set_transform print add_patch rotate_around Rectangle transData expand_dims range Circle add_artist shape xrange float plot min shape xrange float range clear visualize_rboxes print add_subplot convert_image_for_visualization imshow savefig Rectangle legend append range enumerate clear iteritems str join format visualize_rboxes print add_subplot shuffle convert_image_for_visualization det_layers imshow savefig visualize_links xrange append range len clear join format visualize_rboxes print add_subplot convert_image_for_visualization imshow savefig range join str permutation arange print TFRecordWriter transpose min write extend SerializeToString tqdm Example expand_dims loadmat range split join format basename print glob TFRecordWriter write shuffle SerializeToString Example exists enumerate format _create_next_sample n_samples concatenate print TFRecordWriter write shuffle SerializeToString _read_list append full range enumerate load build_model Vgg16Model float32 placeholder caffe_weights_path model_scope dump layers prototxt_path Net caffe_weights_path caffemodel_path range TEST len Vgg16Model float32 placeholder load_image_and_preprocess vgg16
# SegLink ## how to run the docker https://hub.docker.com/r/xxxxxxxxxxxxxxxxxxxx/cosseglink/ sudo docker run --runtime=nvidia -it -v /home/jerry/workspace/seglink:/workspace -v /home/jerry/workspace/datasets/scene_text/icdar_2015_incidental:/mnt/datasets/scene_text/icdar_2015_incidental/ xxxxxxxxxxxxxxxxxxxx/cosseglink:0.1 bash source activate py36 cd workspace/ python manage.py train exp/sgd finetune_ic15 Detecting Oriented Text in Natural Images by Linking Segments (https://arxiv.org/abs/1703.06520). ## Prerequisites The project is written in Python3 and C++ and relies on TensorFlow v1.0 or newer. We have only tested it on Ubuntu 14.04. If you are using other Linux versions, we suggest using Docker. CMake (version >= 2.8) is required to compile the C++ code. Install TensorFlow (GPU-enabled) by following the instructions on https://www.tensorflow.org/install/. The project requires no other Python packages.
388
GuoShi28/GCP-Net
['demosaicking', 'denoising']
['Joint Denoising and Demosaicking with Green Channel Prior for Real-world Burst Images']
codes/test_real.py codes/models/archs/arch_util.py codes/models/module_util.py codes/models/Video_base_model.py codes/utils/util.py codes/models/archs/dcn/setup.py codes/data/util.py codes/utils/process.py codes/utils/Demosaicing_malvar2004.py codes/data/Vimeo90K_JDDB.py codes/train.py codes/data/test_JDDB.py codes/models/archs/arch_gcpnet.py codes/options/options.py codes/models/base_model.py codes/models/archs/dcn/__init__.py codes/models/networks.py codes/data/__init__.py codes/models/loss.py codes/models/__init__.py codes/models/lr_scheduler.py codes/test.py codes/models/archs/dcn/deform_conv_debug.py codes/utils/masks.py codes/data/data_sampler.py codes/utils/unprocess.py codes/models/archs/dcn/deform_conv.py codes/metrics/calculate_PSNR_SSIM.py codes/data_scripts/create_lmdb.py codes/utils/misc.py main read_rggb_img_seq_opts_joint main read_raw_img_seq main init_dist DistIterSampler read_rggb_img_seq_opts_joint VideoTestDataset_RGGB is_image_file BGR2RGB RGB2RGGB ycbcr2rgb modcrop imresize_np calculate_weights_indices bgr2ycbcr rgb2ycbcr index_generation get_image_paths _get_paths_from_lmdb imresize _read_img_lmdb _get_paths_from_images augment read_img channel_convert RGB2Gray augment_flow func_degamma glob_file_list RGB2RGGB2 cubic Vimeo90KDataset_RGGB create_dataloader create_dataset main read_image_worker vimeo90k test_lmdb bgr2ycbcr calculate_ssim calculate_psnr main ssim BaseModel smooth_loss CharbonnierLoss GANLoss explainability_loss photometric_reconstruction_loss GradientPenaltyLoss TVLoss MultiStepLR_Restart CosineAnnealingLR_Restart initialize_weights flow_warp make_layer ResidualBlock_noBN define_G VideoBaseModel create_model SimpleLSTM SimpleBlock PCD_Align DualABlock GCPNet GCABlock initialize_weights flow_warp make_layer ResidualBlock_noBN DeformConvFunction ModulatedDeformConv DeformConvPack ModulatedDeformConvPack DeformConv ModulatedDeformConvFunction DeformConvFunction ModulatedDeformConv DeformConvPack ModulatedDeformConvPack DeformConv ModulatedDeformConvFunction make_cuda_ext parse dict_to_nonedict NoneDict dict2str check_resume demosaicing_CFA_Bayer_Malvar2004 masks_CFA_Bayer start_exp format_metric_dict_to_line representsInt AvgMeter save_hdf5 torch_accuracy save_args load_checkpoint make_symlink read_hdf5 add_path mkdir cur_time log_important demosaic apply_gains_jdd process_train gamma_compression apply_gains process_test process apply_ccms smoothstep add_noise random_ccm random_noise_levels_kpn safe_invert_gains mosaic random_gains random_noise_levels apply_ccm unprocess unprocess_gt gamma_expansion add_noise_test inverse_smoothstep unprocess_meta_gt OrderedYaml calculate_ssim set_random_seed print_model_parm_flops single_forward ssim yuv2rgb crop_border DUF_downsample mkdirs rggb_tensor2img bayer2bgr setup_logger ProgressBar rggb2bgr calculate_psnr mkdir get_network_description get_timestamp rgb2yuv save_img mkdir_and_rename flipx4_forward tensor2img join sorted BGR2RGB mosaic glob print read_img clamp ascontiguousarray from_numpy stack add_noise_test permute append unprocess_meta_gt imwrite getLogger calculate_ssim unsqueeze inverse device single_forward round clip tensor2img sorted basename FloatTensor squeeze transpose GCPNet crop_border mkdirs load_state_dict index_generation append to sum format glob read_rggb_img_seq_opts_joint astype setup_logger copy eval calculate_psnr info process_test zip enumerate load join uint8 print mm len T print File float32 stack append zeros float enumerate numpy floor cuda str view from_numpy pad range bayer2bgr float reshape read_raw_img_seq zeros array int init_process_group set_device set_start_method device_count get_current_learning_rate reduce set_random_seed dict2str ArgumentParser save opt get_current_log split get_rank ceil parse_args check_resume update_learning_rate current_device feed_data get_current_visuals get update SummaryWriter parse create_model init_dist DistIterSampler dict_to_nonedict save_training_state get_world_size create_dataloader ProgressBar test close mkdir optimize_parameters_rgb item resume_training int items unsqueeze_ add_scalar add_argument save_img barrier mkdir_and_rename set_epoch create_dataset randint add_noise is_image_file join sorted append walk load join len open sorted _get_paths_from_images _get_paths_from_lmdb reshape frombuffer astype float32 expand_dims _read_img_lmdb IMREAD_UNCHANGED imread reshape zeros zeros append range dtype astype float32 matmul dot round dtype astype float32 matmul dot round dtype astype float32 matmul round shape copy abs view contiguous expand floor linspace ceil sum max narrow cubic int FloatTensor size copy_ index_select mv calculate_weights_indices long range int FloatTensor size from_numpy copy_ index_select mv calculate_weights_indices long range get_world_size len format getLogger D info __name__ vimeo90k IMREAD_UNCHANGED imread commit put Pool exists open begin sorted list apply_async exit add shape append encode range update dump format nbytes glob close ProgressBar set zip enumerate join print extend split len load join format imwrite print reshape open frombuffer len bgr2ycbcr imread mean float64 astype float64 transpose astype outer getGaussianKernel append ssim range len expand_as float range enumerate ones_like gradient data isinstance Conv2d kaiming_normal_ modules zero_ BatchNorm2d weight constant_ Linear append block range arange grid_sample print size type_as stack meshgrid float max GCPNet format __name__ info M join items replace print endswith abspath expanduser pardir items isinstance dict items isinstance join format getLogger warning info asarray convolve ones transpose masks_CFA_Bayer logical_and where shape logical_or upper dict zip print cur_time int print format close len print format close len format print memory_allocated add_argument device_count ArgumentParser parse_args try_arg topk size t eq mul_ expand_as append sum max join resume print format load format print load_state_dict isfile print symlink remove exists print format append items stack squeeze ones_like permute ones_like permute cat size Upsample permute flip cat mul sum permute clamp permute demosaic clamp gamma_compression apply_gains apply_ccms apply_gains_jdd gamma_compression apply_ccms clamp apply_gains_jdd clamp gamma_compression apply_ccms smoothstep FloatTensor mm uniform_ sum len sample Normal uniform_ clamp asin permute sin clamp permute reshape size permute tensordot clamp squeeze mean stack permute max stack size permute reshape random_ccm safe_invert_gains mosaic random_gains clamp apply_ccm inverse gamma_expansion inverse_smoothstep random_ccm safe_invert_gains random_gains clamp apply_ccm inverse gamma_expansion inverse_smoothstep safe_invert_gains clamp apply_ccm gamma_expansion inverse_smoothstep exp line sample Normal uniform_ log FloatTensor from_numpy uniform power type sample Normal permute normal Generator permute manual_seed DEFAULT_MAPPING_TAG add_constructor add_representer makedirs mkdir isinstance format info getLogger print rename get_timestamp exists makedirs seed manual_seed_all manual_seed join setFormatter format getLogger addHandler StreamHandler Formatter get_timestamp setLevel FileHandler transpose clamp_ round numpy dim len clamp_ numpy transpose round zeros demosaicing_CFA_Bayer_Malvar2004 clip demosaicing_CFA_Bayer_Malvar2004 clip shape matrix I reshape imwrite view size conv2d pad unsqueeze cpu Variable print foo unsqueeze append float sum cuda range single_forward flip
# Joint Denoising and Demosaicking with Green Channel Prior for Real-world Burst Images Implement of our GCP-Net. Arxiv: https://arxiv.org/abs/2101.09870 IEEE Final Version: https://ieeexplore.ieee.org/document/9503334 ## Testing #### pretrain model: * store in [gcpnet_model/600000_G.pth](https://github.com/GuoShi28/GCP-Net/blob/main/experiments/gcpnet_model/600000_G.pth) #### Testing Vid4 and REDS4: * set data_mode in test.py to 'REDS4' and 'Vid4', the default noise level is set as the 'high noise level' mentioned in the paper. ```
389
GuoxiaWang/DOOBNet
['boundary detection']
['DOOBNet: Deep Object Occlusion Boundary Detection from an Image']
examples/doobnet/doobnet_demo.py python/caffe/io.py python/caffe/test/test_nccl.py examples/doobnet/PIOD/deploy_doobnet_piod.py python/train.py python/caffe/test/test_python_layer.py scripts/download_model_binary.py python/caffe/net_spec.py python/caffe/coord_map.py python/caffe/test/test_net.py tools/extra/resize_and_crop_images.py python/draw_net.py python/caffe/test/test_draw.py python/caffe/test/test_net_spec.py src/caffe/test/test_data/generate_sample_data.py python/caffe/draw.py python/caffe/pycaffe.py tools/extra/extract_seconds.py scripts/cpp_lint.py python/classify.py python/caffe/test/test_solver.py examples/doobnet/BSDSownership/deploy_doobnet_bsdsownership.py python/caffe/classifier.py python/caffe/test/test_io.py python/caffe/test/test_python_layer_with_param_str.py tools/extra/parse_log.py scripts/split_caffe_proto.py doobscripts/doobnet_mat2hdf5_edge_ori.py python/caffe/__init__.py python/caffe/test/test_layer_type_list.py scripts/copy_notebook.py python/caffe/detector.py python/detect.py python/caffe/test/test_coord_map.py tools/extra/summarize.py gt_flip cal_height_width img_rescale gt_rescale gt_rotate BSDS__augmentation PIOD_augmentation main gt_scale parse_args main main main parse_args train solve time Classifier coord_map UndefinedMapException conv_params coord_map_from_to AxisMismatchException inverse crop_params compose crop Detector get_edge_label draw_net get_layer_lr_mult get_layer_label get_pooling_types_dict choose_color_by_layertype get_pydot_graph draw_net_to_file Transformer blobproto_to_array datum_to_array array_to_blobproto array_to_datum resize_image arraylist_to_blobprotovector_str blobprotovector_str_to_arraylist load_image oversample Layers Function Parameters Top NetSpec assign_proto param_name_dict to_proto _Net_blobs _Net_forward_all _Net_set_input_arrays _Net_backward _Net_params _Net_forward _Net_outputs _Net_forward_backward_all _Net_blob_loss_weights _Net_batch _Net_get_id_name _Net_inputs _Net_layer_dict TestCoordMap coord_net_spec getFilenames TestDraw TestBlobProtoToArray TestArrayToDatum TestLayerTypeList TestNCCL TestLevels TestStages simple_net_file TestNet TestAllInOne lenet TestNetSpec silent_net anon_lenet exception_net_file parameter_net_file SimpleLayer phase_net_file TestPythonLayer ParameterLayer PhaseLayer python_net_file ExceptionLayer SimpleParamLayer TestLayerWithParam python_param_net_file TestSolver ParseNolintSuppressions CheckVlogArguments CheckSectionSpacing FindNextMultiLineCommentEnd ReplaceAll CheckForFunctionLengths _SetOutputFormat _IsTestFilename _VerboseLevel CheckBraces RemoveMultiLineComments ResetNolintSuppressions CheckForNonStandardConstructs _SetVerboseLevel PrintUsage _NestingState CheckIncludeLine CheckAccess _CppLintState Search CheckInvalidIncrement RemoveMultiLineCommentsFromRange CleansedLines CheckForBadCharacters UpdateIncludeState FindPreviousMatchingAngleBracket CheckEmptyBlockBody FindNextMultiLineCommentStart Match _NamespaceInfo CheckMakePairUsesDeduction CheckCheck IsBlankLine _SetFilters ProcessLine _FunctionState CheckPosixThreading GetLineWidth GetHeaderGuardCPPVariable IsCppString _IncludeState CheckSpacing _ClassInfo CheckForCopyright IsErrorSuppressedByNolint ProcessFileData CheckForMultilineCommentsAndStrings CloseExpression _PreprocessorInfo _OutputFormat CheckForIncludeWhatYouUse CheckSpacingForFunctionCall FindEndOfExpressionInLine FindNextMatchingAngleBracket _SetCountingStyle ProcessFile _IncludeError CleanseRawStrings CheckAltTokens CheckForNewlineAtEOF ParseArguments CheckForNonConstReference PrintCategories _Filters main FilesBelongToSameModule CheckCStyleCast FileInfo _BlockInfo CheckForHeaderGuard CheckCaffeDataLayerSetUp ReverseCloseExpression CleanseComments _DropCommonSuffixes _ClassifyInclude CheckStyle CheckCaffeAlternatives FindStartOfExpressionInLine _ShouldPrintError CheckComment Error _GetTextInside CheckLanguage CheckCaffeRandom GetPreviousNonBlankLine reporthook parse_readme_frontmatter model_checks_out valid_dirname get_start_time extract_seconds extract_datetime_from_line get_log_created_year write_csv parse_log fix_initial_nan_learning_rate save_csv_files main parse_args parse_line_for_net_output ResizeCropImagesMapper PILResizeCrop OpenCVResizeCrop print_table printed_len summarize_net main read_net format_param save open FLIP_LEFT_RIGHT transpose shape range format gt_rescale astype copy join gt_flip img_rescale print float32 unravel_index zeros loadmat len fliplr radians pi shape zeros rot90 shape zeros zoom int float size cal_height_width BILINEAR resize shape cal_height_width zeros resize ROTATE_90 join FLIP_LEFT_RIGHT format gt_flip transpose gt_rotate copy shape save zeros loadmat range open add_argument exit ArgumentParser print_help join format print glob float exit BSDS__augmentation img_dir mkdir output_dir label_dir PIOD_augmentation parse_args dataset abspath append bsdsownership_testfg model_def endswith ArgumentParser save mean_file channel_swap output_file dirname expanduser input_file predict Classifier set_mode_cpu load time isdir add_argument set_mode_gpu pretrained_model gpu len DataFrame Detector to_hdf detect_selective_search mean set_index to_csv detect_windows read_csv display_lrm read NetParameter output_image_file rankdir Merge TRAIN draw_net_to_file TEST Process str join init_log start append new_uid range log len before_backward layers display add_callback after_backward after_forward Timer append before_forward range len max_iter restore time set_solver_count set_solver_rank add_callback set_device set_multiprocess SGDSolver after_backward set_mode_gpu layer_wise_reduce step bcast NCCL len get params array get params array crop_params conv_params pop collect_bottoms add fn coord_map compose coord_map_from_to items list DESCRIPTOR batch_size str num_output getattr join get_layer_lr_mult name kernel_size stride get_pooling_types_dict pad any append type add_edge get_edge_label list Dot exclude get_layer_label add_node values choose_color_by_layertype Edge Node bottom append type layer include top data array diff shape BlobProto extend flat extend BlobProtoVector ParseFromString BlobProtoVector extend tostring shape Datum flat data len astype float32 tile zoom tuple resize fill empty array concatenate shape tile empty array LayerParameter list NetParameter _to_proto extend Counter OrderedDict values iteritems hasattr isinstance extend add getattr setattr list OrderedDict _blobs _blob_names zip list _blob_loss_weights OrderedDict _blob_names zip _layer_names list layers OrderedDict zip OrderedDict list keys list keys iteritems layers index set outputs _forward len iteritems _backward layers inputs index set len iteritems asarray extend copy next _batch itervalues forward len iteritems asarray backward extend copy next _batch itervalues zip_longest zip forward len ascontiguousarray concatenate itervalues zeros next range len data Pooling pool Convolution NetSpec Deconvolution conv Input join walk dirname abspath NamedTemporaryFile str close write data Pooling pool1 conv2 pool2 ip1 relu1 SoftmaxWithLoss Convolution NetSpec DummyData ip2 ReLU InnerProduct label conv1 Pooling SoftmaxWithLoss Convolution DummyData ReLU InnerProduct data NetSpec DummyData Silence data2 error search add group clear compile compile compile SetOutputFormat SetCountingStyle SetFilters _Filters startswith IsErrorSuppressedByNolint _ShouldPrintError write IncrementErrorCount replace append Match group find startswith endswith range error FindNextMultiLineCommentEnd RemoveMultiLineCommentsFromRange FindNextMultiLineCommentStart rstrip find range len FindEndOfExpressionInLine range len FindStartOfExpressionInLine error min search I range len FileInfo RepositoryName sep sub ParseNolintSuppressions error startswith split GetHeaderGuardCPPVariable enumerate error enumerate error len error replace count error find error find error find error find error Search error match InnermostClass replace error escape Match Search error group Search Check error lines Count End group Begin NumLines Match raw_lines range Search error match group error Match group pop group append Search pop group append Search elided replace CheckSpacingForFunctionCall rfind error len group min CloseExpression NumLines sub find CheckComment Match range Search lines_without_raw_strings error group starting_linenum Match range Search error rfind len group ReverseCloseExpression Search Match CloseExpression find error Match CloseExpression find elided error strip group FindEndOfExpressionInLine find Match range CloseExpression len error Match finditer normalize isinstance PY2 GetLineWidth int InnermostClass CheckCheck error CheckAltTokens CheckBraces CheckSpacing CheckSectionSpacing CheckEmptyBlockBody CheckAccess GetHeaderGuardCPPVariable lines_without_raw_strings _DropCommonSuffixes RepositoryName match split CheckNextIncludeOrder CanonicalizeAlphabeticalOrder FileInfo error search group SetLastHeader match _ClassifyInclude Match pop end search set itervalues append M rstrip replace CheckCStyleCast error _GetTextInside CheckIncludeLine search group lstrip startswith Match ResetSection Search split rfind error group ReverseCloseExpression lstrip findall Match range Search ReplaceAll error Match Search endswith replace setdefault group search CleanseComments open list FilesBelongToSameModule error search copy sub NumLines FullName keys range error search CheckPosixThreading ParseNolintSuppressions CheckVlogArguments CheckMakePairUsesDeduction CheckCaffeDataLayerSetUp CheckLanguage CheckInvalidIncrement CheckCaffeRandom CheckForNonConstReference check_fn Update CheckForNonStandardConstructs CheckStyle raw_lines CheckForMultilineCommentsAndStrings CheckCaffeAlternatives CheckForFunctionLengths CleansedLines _NestingState CheckForBadCharacters CheckForNewlineAtEOF _IncludeState RemoveMultiLineComments CheckForCopyright ResetNolintSuppressions CheckForHeaderGuard NumLines CheckCompletedBlocks CheckForIncludeWhatYouUse range ProcessLine _FunctionState Error rstrip endswith len write ProcessFileData _SetVerboseLevel range split write exit join write exit _VerboseLevel int getopt _SetOutputFormat set _SetVerboseLevel PrintCategories _SetFilters _OutputFormat PrintUsage _SetCountingStyle split getreader ParseArguments ResetErrorCounts stderr verbose_level PrintErrorCounts StreamReaderWriter ProcessFile getwriter PY2 int time write flush load join index int rfind datetime split getctime year strip extract_datetime_from_line get_start_time total_seconds strip write get_log_created_year close extract_datetime_from_line open float get_log_created_year compile fix_initial_nan_learning_rate search group OrderedDict append float join basename write_csv print excel parse_log save_csv_files logfile_path NetParameter decay_mult format name lr_mult append print zip len get join str format convolution_param list setdefault param kernel_size map set top bottom append type module layer enumerate print_table filename summarize_net read_net
# DOOBNet: Deep Object Occlusion Boundary Detection from an Image ([arXiv](https://arxiv.org/abs/1806.03772)) accepted by ACCV2018[Oral] Created by Guoxia Wang. ### Introduction Object occlusion boundary detection is a fundamental and crucial research problem in computer vision. This is challenging to solve as encountering the extreme boundary/non-boundary class imbalance during training an object occlusion boundary detector. In this paper, we propose to address this class imbalance by up-weighting the loss contribution of false negative and false positive examples with our novel Attention Loss function. We also propose a unified end-to-end multi-task deep object occlusion boundary detection network (DOOBNet) by sharing convolutional features to simultaneously predict object boundary and occlusion orientation. DOOBNet adopts an encoder-decoder structure with skip connection in order to automatically learn multi-scale and multi-level features. We significantly surpass the state-of-the-art on the PIOD dataset (ODS F-score of .702) and the BSDS ownership dataset (ODS F-score of .555), as well as improving the detecting speed to as 0.037s per image on the PIOD dataset. ### Citation If you find DOOBNet useful in your research, please consider citing: ``` @article{wang2018doobnet, Title = {DOOBNet: Deep Object Occlusion Boundary Detection from an Image}, Author = {Guoxia Wang and XiaoChuan Wang and Frederick W. B. Li and Xiaohui Liang},
390
GuyTevet/SeqGAN-eval
['text generation']
['SeqGAN: Sequence Generative Adversarial Nets with Policy Gradient', 'Evaluating Text GANs as Language Models']
rollout.py discriminator.py sequence_gan.py dataloader.py generator.py target_lstm.py language_model_evaluation.py Dis_dataloader_text Gen_Data_loader_text Gen_Data_loader Dis_dataloader linear highway Discriminator Generator language_model_evaluation_direct convergence_experiment restore_param_from_config main language_model_evaluation_by_approximation ROLLOUT generate_samples generate_real_data_samples split_text8 target_loss main pre_train_epoch create_real_data_dict TARGET_LSTM as_list language_model_eval_step norm zeros_like average array zeros next_batch reset_pointer range language_model_eval_step reshape average num_batch append next_batch range reset_pointer clip language_model_eval_step clip reshape tqdm average num_batch append zeros next_batch reset_pointer range NewCheckpointReader Dis_dataloader Gen_Data_loader Saver create_batches save reset_default_graph Session seed sorted list restore global_variables Generator split_text8 create_real_data_dict generate_real_data_samples test upper restore_param_from_config zip ConfigProto get_variable_to_shape_map enumerate join int language_model_evaluation_direct print sort Gen_Data_loader_text dict dump_samples epoch_exp zeros language_model_evaluation_by_approximation len int list print extend generate range int list print extend generate range next_batch num_batch append pretrain_loss reset_pointer range run pretrain_step num_batch append next_batch reset_pointer range print int str list replace Counter split append most_common keys len trainable_variables batch_size dataset_path num_batch gen_num_recurrent_layers Dis_dataloader_text TARGET_LSTM target_loss ROLLOUT open run str basename dis_emb_dim dis_pretrain_epoch_num gen_pretrain_epoch_num gen_hidden_dim Discriminator generate experiment_name pre_train_epoch range format gen_dropout_keep_prob load_train_data gen_learning_rate close get_reward update_params base_token gen_emb_dim num_epochs load generate_samples train_op g_updates write seq_len global_variables_initializer next_batch reset_pointer
# SeqGAN ## Requirements: * **Tensorflow r1.0.1** * Python 2.7 * CUDA 7.5+ (For GPU) ## Introduction Apply Generative Adversarial Nets to generating sequences of discrete tokens. ![](https://github.com/LantaoYu/SeqGAN/blob/master/figures/seqgan.png) The illustration of SeqGAN. Left: D is trained over the real data and the generated data by G. Right: G is trained by policy gradient where the final reward signal is provided by D and is passed back to the intermediate action value via Monte Carlo search. The research paper [SeqGAN: Sequence Generative Adversarial Nets with Policy Gradient](http://arxiv.org/abs/1609.05473) has been accepted at the Thirty-First AAAI Conference on Artificial Intelligence (AAAI-17).
391
GuyTevet/diversity-eval
['text generation']
['Evaluating the Evaluation of Diversity in Natural Language Generation']
metric.py similarity_metrics.py metrics_test.py run_metrics.py diversity_metrics.py run_experiments.py con_test.py utils.py dec_test.py ConTest DecTest print_metric CosineSimilarity2Diversity BertScore BertSts AveragedCosineSimilarity AveragedDistinctNgrams SentBert DistinctNgrams DiversityMetric Similarity2DiversityMetric Metric SimilarityMetric Similarity2DiversityFromFileMetric AveragedNgramDiversityMetric MetricsTest run_experiment calc_metrics CosineSimilarity optimal_classification_accuracy stringify_keys dict_print download_and_place_data lines_to_ngrams represents_int CamleCase2snake_case parse_path_list print format metric __name__ update items format print makedirs update items getmembers DictReader deepcopy print writerow dict_print makedirs close fieldnames DictWriter writeheader ignore_cache dirname open enumerate split str items isinstance print append join walk isdir int append keys isinstance get remove format print append argmax sum len
# Evaluating the Evaluation of Diversity in Natural Language Generation [arxiv.org/abs/2004.02990](http://arxiv.org/abs/2004.02990) Our code and data are a platform for evaluating NLG diversity *metrics*. ## What's released? #### Data - Data used for our experiments - McDiv dataset #### Code - Running the metrics used in the paper (and easily add your own) - Running all of our experiments (and easily add your own)
392
GuyTevet/rnn-gan-eval
['text generation']
['Language Generation with Recurrent Generative Adversarial Networks without Pre-training']
discover_N_eval.py curriculum_training.py model_and_data_serialization.py plot_N_graph.py our_evaluate.py lm_evaluation.py generate.py single_length_train.py config.py objective.py summaries.py language_helpers.py evaluate.py model.py create_logs_dir RestoreConfig get_internal_checkpoint_dir evaluate replace_trash generate_samples inf_train_gen decode_indices_to_string generate_argmax_samples_and_gt_samples tokenize_string split_text8 load_dataset_text8 load_dataset NgramLanguageModel get_internal_checkpoint_dir evaluate get_last_seq restore_param_from_config get_models_list get_discriminator get_generator create_initial_states Discriminator_GRU get_noise params_with_name get_train_op Generator_GRU_CL_VL_TH make_noise get_inference_op rnn_step_prediction get_internal_checkpoint_dir load_picklized save_picklized load_dataset load_dataset_from_pkl optimistic_restore loss_d_g get_optimization_ops get_substrings_from_gt define_objective get_internal_checkpoint_dir evaluate run log_run_settings percentage_startswith get_grams_cached percentage_real log_samples get_grams define_summaries makedirs get_internal_checkpoint_dir define_objective set_restore_dir Session run ylabel placeholder title load_dataset append initialize_all_variables optimistic_restore range plot latest_checkpoint mean norm Variable print xlabel int32 figure zeros array join get_restore_dir makedirs printable set format print tuple shuffle Counter zfill append most_common range len join format print tuple shuffle Counter split_text8 append most_common len print join int generate_samples range extend argmax squeeze decode_indices_to_string run append tuple range len range shuffle len save reset_default_graph DISC_STATE_SIZE short_run get_models_list GEN_STATE_SIZE log_samples restore_param_from_config zip time reshape num_samples seq_len generate_argmax_samples_and_gt_samples len append GEN_GRU_LAYERS range reshape concat matmul gather random_uniform append LIMIT_BATCH range rnn_step_prediction reshape softmax matmul one_hot concat matmul range append expand_dims argmax rnn_step_prediction load_picklized basename mkdir load_dataset_text8 startswith save_picklized load_dataset_from_pkl sorted NewCheckpointReader list restore global_variables print dict Saver zip get_variable_to_shape_map print params_with_name minimize reshape concat gather random_uniform append LIMIT_BATCH range get_discriminator get_generator DISCRIMINATOR_MODEL one_hot Generator get_substrings_from_gt loss_d_g Discriminator GENERATOR_MODEL len square reduce_sum sqrt reduce_mean random_uniform trainable_variables define_objective Variable ConfigProto placeholder Saver int32 load_dataset get_optimization_ops define_summaries merge_all FileWriter scalar sorted list close zip close get_grams exists save_picklized PICKLE_PATH get dict split range len startswith
# Language Generation with Recurrent Generative Adversarial Networks without Pre-training Code for training and evaluation of the model from ["Language Generation with Recurrent Generative Adversarial Networks without Pre-training"](https://arxiv.org/abs/1706.01399). A short summary of the paper is available [here](http://www.shortscience.org/paper?bibtexKey=journals/corr/PressBBBW17#ofirpress). ### Sample outputs (32 chars) ``` " There has been to be a place w On Friday , the stories in Kapac From should be taken to make it He is conference for the first t
393
Guzpenha/slice_based_learning
['autonomous driving']
['Slice-based Learning: A Programming Model for Residual Learning in Critical Data Slices']
snorkel/snorkel/test/classification/test_classifier_convergence.py transformers/transformers/tokenization_xlnet.py snorkel/snorkel/snorkel/augmentation/apply/core.py transformers/docs/source/conf.py snorkel/snorkel/test/labeling/test_analysis.py snorkel/snorkel/snorkel/contrib/__init__.py transformers/transformers/modeling_gpt2.py transformers/transformers/tests/modeling_bert_test.py transformers/examples/run_squad.py snorkel/snorkel/test/analysis/test_metrics.py snorkel/snorkel/snorkel/classification/multitask_classifier.py transformers/transformers/tests/modeling_xlnet_test.py snorkel/snorkel/snorkel/labeling/lf/nlp.py snorkel/snorkel/snorkel/augmentation/__init__.py transformers/transformers/data/processors/glue.py snorkel/snorkel/snorkel/slicing/sf/__init__.py snorkel/snorkel/test/labeling/test_utils.py snorkel/snorkel/snorkel/augmentation/tf.py snorkel/snorkel/test/classification/training/loggers/test_checkpointer.py transformers/transformers/convert_openai_original_tf_checkpoint_to_pytorch.py transformers/transformers/tests/modeling_roberta_test.py snorkel/snorkel/test/map/test_spark.py snorkel/snorkel/snorkel/classification/task.py transformers/examples/distillation/scripts/token_counts.py transformers/transformers/modeling_tf_utils.py snorkel/snorkel/snorkel/version.py transformers/transformers/data/processors/__init__.py snorkel/snorkel/snorkel/augmentation/apply/pandas.py transformers/transformers/modeling_bert.py snorkel/snorkel/test/slicing/test_sliceaware_classifier.py snorkel/snorkel/snorkel/labeling/model/logger.py transformers/transformers/tests/modeling_tf_roberta_test.py snorkel/snorkel/test/classification/training/schedulers/test_schedulers.py snorkel/snorkel/snorkel/classification/training/loggers/log_writer.py transformers/transformers/tests/tokenization_gpt2_test.py snorkel/snorkel/test/utils/test_config_utils.py snorkel/snorkel/snorkel/classification/utils.py snorkel/snorkel/snorkel/slicing/apply/dask.py snorkel/snorkel/test/classification/test_multitask_classifier.py snorkel/snorkel/test/slicing/sf/test_nlp.py transformers/transformers/data/processors/utils.py ir_slices/setup.py snorkel/snorkel/snorkel/map/__init__.py transformers/transformers/convert_bert_pytorch_checkpoint_to_original_tf.py transformers/transformers/tests/optimization_test.py transformers/transformers/configuration_distilbert.py transformers/transformers/tests/modeling_distilbert_test.py ir_slices/ir_slices/get_query_topic.py snorkel/snorkel/snorkel/__init__.py transformers/transformers/tests/tokenization_tests_commons.py snorkel/snorkel/test/labeling/apply/test_spark.py transformers/transformers/modeling_tf_xlnet.py snorkel/snorkel/test/augmentation/policy/test_sampling.py transformers/transformers/configuration_openai.py transformers/transformers/modeling_ctrl.py snorkel/snorkel/snorkel/utils/config_utils.py snorkel/snorkel/docs/conf.py transformers/examples/run_generation.py transformers/transformers/modeling_tf_pytorch_utils.py transformers/transformers/configuration_ctrl.py transformers/setup.py snorkel/snorkel/snorkel/analysis/metrics.py snorkel/snorkel/snorkel/types/classifier.py snorkel/snorkel/snorkel/preprocess/spark.py transformers/transformers/optimization.py transformers/transformers/tests/configuration_common_test.py snorkel/snorkel/snorkel/labeling/apply/dask.py snorkel/snorkel/snorkel/labeling/model/baselines.py transformers/transformers/configuration_transfo_xl.py snorkel/snorkel/snorkel/map/spark.py snorkel/snorkel/snorkel/classification/__init__.py snorkel/snorkel/test/labeling/model/test_logger.py transformers/examples/run_lm_finetuning.py transformers/transformers/modeling_utils.py snorkel/snorkel/snorkel/preprocess/nlp.py transformers/transformers/modeling_xlm.py transformers/transformers/configuration_gpt2.py snorkel/snorkel/test/labeling/test_convergence.py transformers/transformers/modeling_tf_ctrl.py transformers/examples/distillation/grouped_batch_sampler.py snorkel/snorkel/test/classification/test_utils.py snorkel/snorkel/snorkel/slicing/sf/core.py snorkel/snorkel/snorkel/classification/training/loggers/checkpointer.py snorkel/snorkel/snorkel/slicing/apply/spark.py transformers/examples/distillation/distiller.py transformers/transformers/modeling_tf_distilbert.py transformers/transformers/modeling_tf_transfo_xl.py snorkel/snorkel/snorkel/classification/training/loggers/__init__.py snorkel/snorkel/snorkel/slicing/modules/slice_combiner.py transformers/transformers/tests/tokenization_distilbert_test.py transformers/transformers/tests/tokenization_transfo_xl_test.py transformers/transformers/tests/modeling_gpt2_test.py transformers/transformers/tests/modeling_tf_common_test.py snorkel/snorkel/snorkel/labeling/__init__.py transformers/transformers/tokenization_auto.py snorkel/snorkel/test/classification/training/test_trainer.py transformers/transformers/modeling_tf_transfo_xl_utilities.py snorkel/snorkel/test/slicing/test_convergence.py transformers/transformers/tests/modeling_transfo_xl_test.py snorkel/snorkel/snorkel/labeling/lf/nlp_spark.py transformers/transformers/tokenization_xlm.py transformers/transformers/tokenization_transfo_xl.py snorkel/snorkel/test/labeling/apply/lf_applier_spark_test_script.py ir_slices/ir_slices/__init__.py transformers/examples/distillation/lm_seqs_dataset.py snorkel/snorkel/test/labeling/lf/test_core.py snorkel/snorkel/test/labeling/apply/test_lf_applier.py snorkel/snorkel/snorkel/labeling/apply/core.py transformers/examples/test_examples.py transformers/transformers/tests/tokenization_xlnet_test.py transformers/transformers/convert_bert_original_tf_checkpoint_to_pytorch.py transformers/transformers/tokenization_bert.py snorkel/snorkel/test/classification/training/loggers/test_log_manager.py snorkel/snorkel/test/labeling/model/test_label_model.py snorkel/snorkel/snorkel/labeling/utils.py transformers/transformers/modeling_distilbert.py snorkel/snorkel/test/labeling/preprocess/test_nlp.py transformers/transformers/tests/modeling_tf_xlm_test.py transformers/examples/utils_squad_evaluate.py snorkel/snorkel/snorkel/labeling/apply/spark.py snorkel/snorkel/snorkel/labeling/lf/core.py snorkel/snorkel/snorkel/classification/data.py snorkel/snorkel/snorkel/utils/core.py snorkel/snorkel/snorkel/utils/optimizers.py transformers/transformers/configuration_auto.py transformers/transformers/tests/tokenization_openai_test.py transformers/transformers/__init__.py transformers/transformers/tests/modeling_tf_bert_test.py transformers/examples/contrib/run_openai_gpt.py transformers/transformers/file_utils.py transformers/transformers/modeling_xlnet.py snorkel/snorkel/snorkel/labeling/analysis.py snorkel/snorkel/test/slicing/test_monitor.py transformers/transformers/tests/modeling_tf_openai_gpt_test.py transformers/transformers/tests/modeling_auto_test.py ir_slices/ir_slices/data_processors.py snorkel/snorkel/snorkel/analysis/error_analysis.py snorkel/snorkel/snorkel/slicing/monitor.py snorkel/snorkel/scripts/check_requirements.py transformers/transformers/configuration_utils.py snorkel/snorkel/test/analysis/test_scorer.py snorkel/snorkel/test/synthetic/test_synthetic_data.py transformers/transformers/configuration_roberta.py transformers/transformers/configuration_xlnet.py snorkel/snorkel/snorkel/labeling/model/label_model.py transformers/examples/utils_squad.py transformers/examples/contrib/run_swag.py transformers/transformers/convert_gpt2_original_tf_checkpoint_to_pytorch.py transformers/examples/distillation/scripts/binarized_data.py transformers/transformers/tests/tokenization_ctrl_test.py transformers/transformers/tests/tokenization_utils_test.py transformers/transformers/tokenization_utils.py snorkel/snorkel/test/utils/test_data_operators.py snorkel/snorkel/snorkel/types/data.py snorkel/snorkel/test/labeling/lf/test_nlp.py transformers/transformers/modeling_transfo_xl.py transformers/transformers/tests/modeling_tf_xlnet_test.py transformers/transformers/tests/modeling_xlm_test.py snorkel/snorkel/test/augmentation/apply/test_tf_applier.py transformers/examples/run_glue.py transformers/transformers/tests/tokenization_xlm_test.py transformers/transformers/tests/modeling_ctrl_test.py transformers/examples/distillation/run_squad_w_distillation.py snorkel/snorkel/test/slicing/sf/test_core.py snorkel/snorkel/snorkel/analysis/scorer.py snorkel/snorkel/test/slicing/test_utils.py snorkel/snorkel/snorkel/augmentation/policy/core.py snorkel/snorkel/snorkel/classification/training/loggers/tensorboard_writer.py ir_slices/ir_slices/get_sensitivity_rsfs_results.py transformers/transformers/tokenization_roberta.py snorkel/snorkel/snorkel/preprocess/__init__.py transformers/transformers/data/__init__.py snorkel/snorkel/snorkel/augmentation/policy/sampling.py snorkel/snorkel/test/labeling/model/test_baseline.py transformers/transformers/tests/modeling_tf_auto_test.py transformers/examples/contrib/run_transfo_xl.py snorkel/snorkel/snorkel/labeling/apply/pandas.py transformers/hubconf.py snorkel/snorkel/snorkel/types/__init__.py transformers/transformers/tests/conftest.py snorkel/snorkel/test/labeling/lf/test_nlp_spark.py snorkel/snorkel/snorkel/utils/data_operators.py snorkel/snorkel/test/classification/test_data.py transformers/transformers/tests/modeling_tf_distilbert_test.py transformers/transformers/modeling_auto.py transformers/transformers/modeling_tf_bert.py transformers/transformers/modeling_openai.py transformers/examples/distillation/utils.py transformers/transformers/tests/modeling_common_test.py transformers/transformers/__main__.py snorkel/snorkel/snorkel/analysis/__init__.py ir_slices/ir_slices/test_slices.py snorkel/snorkel/snorkel/classification/training/schedulers/__init__.py snorkel/snorkel/test/slicing/apply/test_sf_applier.py snorkel/snorkel/snorkel/slicing/sf/nlp.py transformers/examples/run_bertology.py transformers/examples/utils_multiple_choice.py snorkel/snorkel/snorkel/preprocess/core.py transformers/transformers/tests/modeling_openai_test.py transformers/transformers/tests/tokenization_roberta_test.py transformers/transformers/tokenization_openai.py transformers/transformers/tests/tokenization_auto_test.py transformers/transformers/modeling_tf_gpt2.py transformers/transformers/configuration_xlm.py transformers/examples/distillation/scripts/extract_distilbert.py snorkel/snorkel/test/slicing/test_slice_combiner.py snorkel/snorkel/test/classification/test_loss.py transformers/examples/distillation/scripts/extract.py snorkel/snorkel/scripts/sync_api_docs.py snorkel/snorkel/snorkel/classification/training/loggers/log_manager.py snorkel/snorkel/snorkel/classification/training/schedulers/shuffled_scheduler.py snorkel/snorkel/test/analysis/test_error_analysis.py transformers/transformers/modeling_roberta.py snorkel/snorkel/snorkel/classification/training/schedulers/sequential_scheduler.py ir_slices/ir_slices/slice_functions.py snorkel/snorkel/test/augmentation/policy/test_core.py snorkel/snorkel/test/classification/training/loggers/test_log_writer.py snorkel/snorkel/test/classification/training/loggers/test_tensorboard_writer.py snorkel/snorkel/snorkel/classification/loss.py snorkel/snorkel/snorkel/utils/__init__.py transformers/transformers/tests/modeling_tf_transfo_xl_test.py transformers/transformers/tests/modeling_tf_ctrl_test.py transformers/examples/run_tf_glue.py ir_slices/ir_slices/get_runs_avg_std.py snorkel/snorkel/snorkel/classification/training/trainer.py snorkel/snorkel/snorkel/utils/lr_schedulers.py ir_slices/ir_slices/slices_eval.py transformers/transformers/tokenization_distilbert.py transformers/transformers/modeling_tf_auto.py transformers/transformers/modeling_tf_xlm.py transformers/examples/distillation/train.py transformers/transformers/modeling_transfo_xl_utilities.py transformers/transformers/tests/tokenization_bert_test.py transformers/transformers/convert_xlm_original_pytorch_checkpoint_to_pytorch.py snorkel/snorkel/snorkel/slicing/utils.py transformers/transformers/data/metrics/__init__.py snorkel/snorkel/snorkel/slicing/sliceaware_classifier.py snorkel/snorkel/snorkel/slicing/__init__.py snorkel/snorkel/snorkel/labeling/model/graph_utils.py snorkel/snorkel/snorkel/synthetic/synthetic_data.py snorkel/snorkel/snorkel/map/core.py transformers/transformers/convert_pytorch_checkpoint_to_tf2.py transformers/transformers/tests/modeling_tf_gpt2_test.py snorkel/snorkel/snorkel/labeling/lf/__init__.py snorkel/snorkel/setup.py snorkel/snorkel/test/classification/test_task.py transformers/examples/run_multiple_choice.py transformers/transformers/modeling_tf_roberta.py transformers/transformers/tokenization_gpt2.py snorkel/snorkel/test/map/test_core.py transformers/transformers/modeling_tf_openai.py transformers/transformers/tokenization_ctrl.py snorkel/snorkel/test/utils/test_core.py transformers/transformers/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py transformers/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py snorkel/snorkel/snorkel/classification/training/schedulers/scheduler.py snorkel/snorkel/snorkel/slicing/apply/core.py transformers/transformers/convert_xlnet_original_tf_checkpoint_to_pytorch.py transformers/transformers/configuration_bert.py QuoraProcessor transform_to_q_docs_format CRRProcessor InputExample QAProcessor DataProcessor get_dialogue_domains_and_intent load_dataset_crr main main unpack_qid_per_doc unpack_rel_per_doc confidence_interval main unpack_x_per_doc word_in_query make_word_in_query_sf query_category make_words_match_count_less_than_sf fine_tuned_bert_pred_diff_smaller_than num_turns_bigger_than make_docs_sim_to_rel_bigger_than_sf all_instances docs_sim_to_rel_bigger_than query_wc_bigger_than random_slice_percentage make_query_wc_bigger_than_sf make_num_turns_bigger_than_sf make_fine_tuned_bert_pred_diff_smaller_than_sf make_random_slice_percentage_sf make_query_cat_in_sf words_match_count_less_than main confidence_interval linkcode_resolve setup skip_torch_module_member parse_setup parse_section_name parse_requirements parse_package main main get_package_members get_title_and_underscore get_label_buckets _roc_auc_score _f1_macro_score Metric _f1_micro_score metric_score _coverage_score _f1_score Scorer TransformationFunction LambdaTransformationFunction transformation_function TFApplier BaseTFApplier PandasTFApplier ApplyEachPolicy ApplyAllPolicy Policy ApplyOnePolicy MeanFieldPolicy RandomPolicy DictDataLoader DictDataset collate_dicts cross_entropy_with_probs MultitaskClassifier ClassifierConfig Task Operation move_to_device pad_batch list_to_tensor metrics_dict_to_dataframe collect_flow_outputs_by_suffix TrainerConfig Trainer CheckpointerConfig Checkpointer LogManagerConfig LogManager LogWriterConfig LogWriter TensorBoardWriter Scheduler SequentialScheduler ShuffledScheduler LFAnalysis filter_unlabeled_dataframe LFApplier apply_lfs_to_data_point ApplierMetadata _FunctionCaller BaseLFApplier DaskLFApplier PandasParallelLFApplier PandasLFApplier rows_to_triplets apply_lfs_to_data_point SparkLFApplier LabelingFunction labeling_function base_nlp_labeling_function BaseNLPLabelingFunction SpacyPreprocessorParameters NLPLabelingFunction nlp_labeling_function SpacyPreprocessorConfig spark_nlp_labeling_function SparkNLPLabelingFunction BaselineVoter MajorityLabelVoter RandomVoter MajorityClassVoter get_clique_tree TrainConfig LabelModelConfig LabelModel _CliqueData Logger Mapper BaseMapper get_parameters lambda_mapper is_hashable get_hashable LambdaMapper _update_fields make_spark_mapper Preprocessor LambdaPreprocessor preprocessor SpacyPreprocessor slice_dataframe SliceAwareClassifier add_slice_labels convert_to_slice_tasks SFApplier PandasSFApplier DaskSFApplier PandasParallelSFApplier SliceCombinerModule SlicingFunction slicing_function nlp_slicing_function NLPSlicingFunction generate_simple_label_matrix merge_config filter_labels _get_mask probs_to_preds to_int_label_array preds_to_probs _hash check_unique_names ExponentialLRSchedulerConfig LRSchedulerConfig StepLRSchedulerConfig AdamaxOptimizerConfig SGDOptimizerConfig AdamOptimizerConfig OptimizerConfig ErrorAnalysisTest MetricsTest ScorerTest TestTFApplier modify_in_place make_df get_data_dict square square_returns_none TestPandasTFApplier TestPolicy TestSamplingPolicy ClassifierConvergenceTest create_data create_dataloader create_task DatasetTest SoftCrossEntropyTest ClassifierTest create_task create_dataloader TaskTest UtilsTest TrainerTest create_task create_dataloader TestLogManager TestLogManager TestLogWriter TempConfig TestTensorBoardWriter TempConfig SequentialTest TestAnalysis f create_data f LabelingConvergenceTest copy_features get_positive_labeling_function get_negative_labeling_function TestAnalysis g f build_lf_matrix g f_bad TestDaskApplier TestLFApplier f fp square SquareHitTracker h f_np g_np TestPandasApplier g f_bad f fp square TestSparkApplier g returns_none TestLabelingFunction f square TestNLPLabelingFunction has_person_mention combine_text TestNLPLabelingFunction has_person_mention BaselineModelTest LabelModelTest TestLabelModelAdvanced LoggerTest TestSpacyPreprocessor MapperWithPre MapperReturnsNone modify_in_place MapperWithArgs TestMapperCore MapperWithKwargs SquareHitTracker square TestGetHashable SplitWordsMapperDefaultArgs MapperWithPre2 SplitWordsMapper MapperReturnsNone modify_in_place TestMapperCore square SquareHitTracker SplitWordsMapperDefaultArgs SplitWordsMapper g create_data create_task f h create_dataloader SlicingConvergenceTest PandasSlicerTest sf SliceCombinerTest g f create_dataset SliceCombinerTest create_dummy_task UtilsTest f TestSFApplier g f fp square SquareHitTracker TestSlicingFunction has_person_mention TestNLPSlicingFunction combine_text TestGenerateSimpleLabelMatrix UtilsTest FooConfig BarConfig UtilsTest DataOperatorsTest modelForSequenceClassification config model tokenizer modelForQuestionAnswering modelWithLMHead setup entropy print_2d_tensor compute_heads_importance main mask_heads prune_heads main set_seed top_k_top_p_filtering sample_sequence run_experiment set_seed evaluate main train load_and_cache_examples TextDataset set_seed evaluate train mask_tokens main _rotate_checkpoints load_and_cache_examples simple_accuracy set_seed evaluate select_field main train load_and_cache_examples set_seed evaluate main to_list train load_and_cache_examples ExamplesTests get_setup_file ArcProcessor InputFeatures RaceProcessor InputExample convert_examples_to_features SwagProcessor DataProcessor _check_is_max_context _compute_softmax write_predictions_extended InputFeatures get_final_text _improve_answer_span _get_best_indexes read_squad_examples convert_examples_to_features SquadExample write_predictions find_best_thresh find_all_best_thresh_v2 apply_no_ans_threshold make_qid_to_has_ans make_eval_dict plot_pr_curve parse_args make_precision_recall_eval find_all_best_thresh compute_exact main merge_eval EVAL_OPTS compute_f1 find_best_thresh_v2 normalize_answer run_precision_recall_analysis histogram_na_prob get_raw_scores get_tokens load_rocstories_dataset main pre_process_datasets accuracy read_swag_examples set_seed evaluate InputFeatures accuracy _truncate_seq_pair SwagExample convert_examples_to_features select_field main train load_and_cache_examples main Distiller GroupedBatchSampler _quantize create_lengths_groups LmSeqsDataset set_seed evaluate main to_list train load_and_cache_examples main freeze_token_type_embeddings sanity_checks freeze_pos_embeddings set_seed init_gpu_params git_log main AutoConfig BertConfig CTRLConfig DistilBertConfig GPT2Config OpenAIGPTConfig RobertaConfig TransfoXLConfig PretrainedConfig XLMConfig XLNetConfig convert_tf_checkpoint_to_pytorch convert_gpt2_checkpoint_to_pytorch convert_openai_checkpoint_to_pytorch convert_all_pt_checkpoints_to_tf convert_pt_checkpoint_to_tf convert_roberta_checkpoint_to_pytorch convert_transfo_xl_checkpoint_to_pytorch convert_xlm_checkpoint_to_pytorch convert_xlnet_checkpoint_to_pytorch is_torch_available cached_path s3_etag http_get s3_request s3_get get_from_cache add_start_docstrings filename_to_url url_to_filename split_s3_path is_tf_available add_end_docstrings AutoModel AutoModelForQuestionAnswering AutoModelWithLMHead AutoModelForSequenceClassification BertPreTrainingHeads BertMTLForSequenceClassification BertForQuestionAnswering gelu_new BertEncoder BertSelfAttention BertForMaskedLM BertOnlyMLMHead BertOnlyNSPHead BertEmbeddings BertOutput BertPredictionHeadTransform BertAttention BertPooler gelu BertPreTrainedModel BertForMultipleChoice BertLayer BertForTokenClassification BertModel BertForNextSentencePrediction BertIntermediate BertForSequenceClassification BertForPreTraining BertForSnorkelSequenceClassification swish BertLMPredictionHead load_tf_weights_in_bert BertSelfOutput point_wise_feed_forward_network scaled_dot_product_attention CTRLLMHeadModel MultiHeadAttention CTRLModel angle_defn positional_encoding CTRLPreTrainedModel EncoderLayer Transformer DistilBertForSequenceClassification create_sinusoidal_embeddings DistilBertForMaskedLM gelu Embeddings FFN DistilBertPreTrainedModel TransformerBlock DistilBertForQuestionAnswering DistilBertModel MultiHeadSelfAttention GPT2LMHeadModel Block GPT2DoubleHeadsModel load_tf_weights_in_gpt2 MLP gelu GPT2PreTrainedModel GPT2Model Attention Block OpenAIGPTPreTrainedModel MLP gelu swish OpenAIGPTDoubleHeadsModel OpenAIGPTLMHeadModel Attention OpenAIGPTModel load_tf_weights_in_openai_gpt TFAutoModelForSequenceClassification TFAutoModelForQuestionAnswering TFAutoModel TFAutoModelWithLMHead TFBertModel TFBertAttention TFBertPreTrainedModel gelu_new TFBertForPreTraining TFBertIntermediate TFBertNSPHead TFBertForMaskedLM TFBertSelfAttention TFBertLayer TFBertPredictionHeadTransform TFBertEmbeddings TFBertEncoder TFBertPooler TFBertOutput TFBertForSequenceClassification TFBertForTokenClassification gelu TFBertMainLayer TFBertSelfOutput TFBertForNextSentencePrediction TFBertMLMHead load_bert_pt_weights_in_tf2 swish TFBertForMultipleChoice TFBertForQuestionAnswering TFBertLMPredictionHead point_wise_feed_forward_network scaled_dot_product_attention TFCTRLLMHeadModel TFEncoderLayer TFMultiHeadAttention TFCTRLPreTrainedModel angle_defn TFCTRLModel TFCTRLMainLayer positional_encoding TFCTRLLMHead load_ctrl_pt_weights_in_tf2 TFDistilBertForMaskedLM TFDistilBertPreTrainedModel TFFFN TFDistilBertMainLayer TFDistilBertForQuestionAnswering gelu TFMultiHeadSelfAttention load_distilbert_pt_weights_in_tf2 TFDistilBertModel TFTransformerBlock TFTransformer TFDistilBertForSequenceClassification TFEmbeddings gelu_new TFDistilBertLMHead TFAttention TFGPT2MainLayer TFMLP TFGPT2LMHeadModel TFGPT2Model load_gpt2_pt_weights_in_tf2 gelu TFGPT2PreTrainedModel TFGPT2DoubleHeadsModel TFBlock TFAttention TFMLP gelu swish TFOpenAIGPTModel TFOpenAIGPTMainLayer TFOpenAIGPTPreTrainedModel TFOpenAIGPTLMHeadModel TFOpenAIGPTDoubleHeadsModel load_openai_gpt_pt_weights_in_tf2 TFBlock load_pytorch_model_in_tf2_model load_pytorch_weights_in_tf2_model convert_tf_weight_name_to_pt_weight_name load_pytorch_checkpoint_in_tf2_model load_tf2_checkpoint_in_pytorch_model load_tf2_weights_in_pytorch_model load_tf2_model_in_pytorch_model TFRobertaForMaskedLM TFRobertaLMHead TFRobertaForSequenceClassification TFRobertaClassificationHead TFRobertaPreTrainedModel TFRobertaMainLayer TFRobertaEmbeddings TFRobertaModel load_roberta_pt_weights_in_tf2 TFPositionalEmbedding TFTransfoXLLMHeadModel load_transfo_xl_pt_weights_in_tf2 TFTransfoXLPreTrainedModel TFRelPartialLearnableDecoderLayer TFPositionwiseFF TFAdaptiveEmbedding TFTransfoXLMainLayer TFRelPartialLearnableMultiHeadAttn TFTransfoXLModel TFAdaptiveSoftmaxMask TFSequenceSummary TFPreTrainedModel TFSharedEmbeddings TFConv1D get_initializer shape_list TFXLMModel get_masks create_sinusoidal_embeddings TFMultiHeadAttention gelu load_xlm_pt_weights_in_tf2 TFTransformerFFN TFXLMPreTrainedModel TFXLMForSequenceClassification TFXLMWithLMHeadModel TFXLMForQuestionAnsweringSimple TFXLMPredLayer TFXLMMainLayer TFXLNetLayer TFXLNetRelativeAttention TFXLNetPreTrainedModel gelu swish TFXLNetModel TFXLNetForSequenceClassification TFXLNetForQuestionAnsweringSimple TFXLNetLMHead TFXLNetLMHeadModel load_xlnet_pt_weights_in_tf2 TFXLNetFeedForward TFXLNetMainLayer TransfoXLModel PositionalEmbedding load_tf_weights_in_transfo_xl AdaptiveEmbedding TransfoXLPreTrainedModel RelPartialLearnableDecoderLayer TransfoXLLMHeadModel PositionwiseFF build_tf_to_pytorch_map RelPartialLearnableMultiHeadAttn ProjectedAdaptiveLogSoftmax LogUniformSampler sample_logits prune_layer SQuADHead PoolerAnswerClass SequenceSummary prune_linear_layer prune_conv1d_layer PoolerEndLogits Conv1D PreTrainedModel PoolerStartLogits XLNetRelativeAttention XLNetPreTrainedModel XLNetForQuestionAnswering build_tf_xlnet_to_pytorch_map XLNetLMHeadModel XLNetForMultipleChoice gelu swish XLNetForQuestionAnsweringSimple load_tf_weights_in_xlnet XLNetFeedForward XLNetForSequenceClassification XLNetModel XLNetLayer AdamW WarmupCosineSchedule WarmupCosineWithHardRestartsSchedule WarmupLinearSchedule WarmupConstantSchedule ConstantLRSchedule AutoTokenizer BasicTokenizer WordpieceTokenizer load_vocab whitespace_tokenize _is_whitespace _is_control BertTokenizer _is_punctuation get_pairs CTRLTokenizer DistilBertTokenizer bytes_to_unicode get_pairs GPT2Tokenizer get_pairs text_standardize OpenAIGPTTokenizer RobertaTokenizer LMOrderedIterator TransfoXLCorpus TransfoXLTokenizer LMMultiFileIterator get_lm_corpus LMShuffledIterator PreTrainedTokenizer get_pairs replace_unicode_punct remove_non_printing_char romanian_preprocessing XLMTokenizer lowercase_and_remove_accent XLNetTokenizer main main convert_pytorch_checkpoint_to_tf RobertaForSequenceClassification RobertaLMHead RobertaClassificationHead RobertaEmbeddings RobertaForMultipleChoice RobertaForMaskedLM RobertaModel XLMModel XLMWithLMHeadModel XLMPreTrainedModel XLMPredLayer XLMForSequenceClassification get_masks create_sinusoidal_embeddings MultiHeadAttention gelu XLMForQuestionAnswering TransformerFFN XLMForQuestionAnsweringSimple simple_accuracy pearson_and_spearman glue_compute_metrics is_sklearn_available acc_and_f1 _to_list group_by_queries mean_average_precision compute_aps ap QuoraProcessor CRRProcessor MrpcProcessor ColaProcessor MnliMismatchedProcessor QqpProcessor MnliProcessor StsbProcessor QnliProcessor RteProcessor glue_convert_examples_to_features QAProcessorInverted WnliProcessor QAProcessor Sst2Processor InputFeatures DataProcessor InputExample TFCTRLModelTest ConfigTester pytest_addoption pytest_collection_modifyitems AutoModelTest BertModelTest ConfigTester ModelUtilsTest _config_zero_init CommonTestCases ids_tensor CTRLModelTest DistilBertModelTest GPT2ModelTest OpenAIGPTModelTest RobertaModelTest RobertaModelIntegrationTest TFAutoModelTest TFBertModelTest _config_zero_init ids_tensor TFCommonTestCases TFModelUtilsTest TFDistilBertModelTest TFGPT2ModelTest TFOpenAIGPTModelTest TFRobertaModelTest TFRobertaModelIntegrationTest TFTransfoXLModelTest TFXLMModelTest TFXLNetModelTest TransfoXLModelTest XLMModelTest XLNetModelTest ScheduleInitTest unwrap_and_save_reload_schedule OptimizationTest unwrap_schedule AutoTokenizerTest BertTokenizationTest CTRLTokenizationTest DistilBertTokenizationTest GPT2TokenizationTest OpenAIGPTTokenizationTest RobertaTokenizationTest TemporaryDirectory CommonTestCases TransfoXLTokenizationTest TokenizerUtilsTest XLMTokenizationTest XLNetTokenizationTest append sum enumerate join iterrows read_json apply append read_csv drop output_folder print add_argument to_csv merge apply round ArgumentParser parse_args task_name DataFrame drop_duplicates split ppf array len append documents zip append labels query join documents append kendalltau concat reduce pearsonr columns data_dir append fit_transform reset_index replace nan zip get_dev_examples isfile get_test_examples numpy read_csv len flatten TfidfVectorizer sorted fit_transform to tuple labels InputExample device DataLoader eval TensorDataset convert_examples_to_features softmax zip append tensor SequentialSampler documents max replace replace slice_function name tqdm sum replace connect replace split strip search add parse_package DOTALL split items parse_setup dict parse_requirements len append getattr dir startswith get join sorted get_package_members format extend import_module get_title_and_underscore __doc__ makedirs list map zip append enumerate difference filter_labels keys set append items isinstance list_to_tensor new_full new_zeros shape range cross_entropy pad_batch stack all new_full int min type_as max enumerate len Tensor isinstance append tuple items split any append f_caller enumerate add_edge add_edges_from chordal_graph_cliques Graph len nodes add_nodes_from intersection add_node enumerate getfullargspec hash vars ndarray is_hashable isinstance update asDict apply append_fields LongTensor clone names list SliceCombinerModule isinstance name in_features extend Operation Task DataParallel ModuleDict out_features append module_pool module Linear rand range choice empty sum diag getattr items isinstance encode reshape choice shape empty abs range any dtype mod not_equal squeeze astype any append items _get_mask squeeze astype num num DataFrame random astype DictDataLoader DictDataset Task ModuleDict Operation long FloatTensor copy_ eye zeros Linear main x2 toarray apply assert_equal addPyFile SparkLFApplier info parallelize SparkContext d asDict Task ModuleDict add_stylesheet add_js_file log join len range info arange model tuple unsqueeze save device output_dir max numel append to sum detach requires_grad_ info view_as enumerate join entropy backward min print_2d_tensor tqdm pow zeros numpy numpy save output_dir max str view tolist numel masking_threshold sum ones_like info float view_as int join clone print_2d_tensor compute_heads_importance masking_amount now dict compute_heads_importance info sum enable_attach from_pretrained DataParallel DistributedDataParallel DataLoader device save output_dir data_subset prune_heads basicConfig list set_seed set_device get_labels device_count to range init_process_group lower info wait_for_attach mask_heads n_gpu model_name_or_path min barrier Subset compute_heads_importance bool load_and_cache_examples local_rank seed manual_seed_all manual_seed cumsum sort size min clone softmax tensor repeat decode prompt str tolist input encode mask_token_id eval max_position_embeddings keys xlm_lang sample_sequence gradient_accumulation_steps sfs get_train_examples model tuple clip_grad_norm_ zero_grad Trainer DataLoader DataParallel DistributedDataParallel max_grad_norm output_dir save max str initialize set_seed SFApplier data_dir apply logging_steps append master_params range SummaryWriter format DictDataset make_slice_dataloader close mean save_pretrained num_train_epochs info fp16 SliceAwareClassifier per_gpu_train_batch_size max_steps enumerate documents int items n_gpu join evaluate backward AdamW add_scalar log_scalar makedirs fit parameters WarmupLinearSchedule isfile step array train_batch_size len score tuple reduce DataLoader save argmax max eval_batch_size SFApplier debug_mode data_dir register_forward_hook per_gpu_eval_batch_size squeeze apply compute_metrics append run_id range predict update format DictDataset make_slice_dataloader eval softmax info zip load_and_cache_examples documents enumerate join n_gpu evaluate_on makedirs get_dev_examples compute_aps isfile get_test_examples numpy array len pop join str format load get_train_examples max_seq_length data_dir barrier get_labels get_dev_examples TensorDataset convert_examples_to_features save info get_test_examples tensor from_pretrained do_train device output_dir len get_labels model_type to run_id lower save_pretrained info task_name _id join evaluate model_name_or_path barrier train load_and_cache_examples makedirs warning fp16 FileStorageObserver add_config TextDataset join sorted format save_total_limit glob rmtree match output_dir info append max len mask_token bool convert_tokens_to_ids clone randint shape masked_fill_ tensor mlm_probability full len device to _rotate_checkpoints trange tqdm exp tqdm device output_dir tensor to block_size do_train eval_all_checkpoints setLevel max_len_single_sentence WARN update save_pretrained train evaluate save_vocabulary do_test simple_accuracy lower select_field len update RawResult end_n_top max_answer_length write_predictions do_lower_case n_best_size write_predictions_extended evaluate_on_squad verbose_logging RawResultExtended EVAL_OPTS start_n_top int predict_file version_2_with_negative null_score_diff_threshold unique_id arange size read_squad_examples dirname parse_args add_argument ArgumentParser endings join format replace zip example_id InputFeatures map tqdm question info append contexts encode_plus enumerate len join is_whitespace whitespace_tokenize warning SquadExample append len _DocSpan _improve_answer_span is_impossible orig_answer_text length convert_tokens_to_ids range doc_tokens question_text start tokenize _check_is_max_context namedtuple join tokenize range length start min enumerate strip end_logit _get_best_indexes sorted defaultdict get_final_text _NbestPrediction end_logits OrderedDict append start_logit replace _compute_softmax insert start_logits info enumerate join namedtuple text _PrelimPrediction split end_log_prob cls_logits strip find_all_best_thresh_v2 make_qid_to_has_ans sorted defaultdict get_final_text _NbestPrediction do_lower_case OrderedDict append range _compute_softmax info convert_tokens_to_string enumerate join namedtuple text min _PrelimPrediction get_raw_scores start_log_prob split join _strip_spaces items BasicTokenizer len info tokenize find sorted append range enumerate len append exp add_argument exit ArgumentParser print_help bool Counter get_tokens sum values len print max items float len xlabel ylabel ylim title savefig clf fill_between xlim step sorted plot_pr_curve append float enumerate sum make_precision_recall_eval merge_eval makedirs join ones_like xlabel ylabel title hist savefig clf float len sorted sum enumerate sorted sum enumerate find_best_thresh find_best_thresh_v2 make_eval_dict na_prob_file find_all_best_thresh na_prob_thresh run_precision_recall_analysis histogram_na_prob dumps apply_no_ans_threshold get_raw_scores out_image_dir out_file make_qid_to_has_ans merge_eval argmax tuple len append zeros full enumerate gradient_accumulation_steps do_eval resize_token_embeddings model tuple n_positions zero_grad add_tokens tokenize_and_encode pre_process_datasets max seed save_vocabulary convert_tokens_to_ids TensorDataset to_json_file eval_dataset SequentialSampler state_dict manual_seed_all cached_path lm_coef num_train_epochs manual_seed trange max_steps enumerate int backward AdamW accuracy train_dataset RandomSampler named_parameters model_name load_rocstories_dataset WarmupLinearSchedule step label start_ending swag_id _truncate_seq_pair context_sentence pop len read_swag_examples accuracy batch_size reset_length tgt_len clamp_len mem_len vocab get_iterator same_length ext_len list deepcopy sorted map _quantize format info KLDivLoss alpha_squad temperature log_softmax eval softmax loss_fct alpha_ce teacher_name_or_path mlm freeze_token_type_embds freeze_pos_embs LmSeqsDataset is_master exists values freeze_pos_embeddings Distiller mlm from_numpy freeze_token_type_embds freeze_token_type_embeddings student_config init_gpu_params sanity_checks dump_path teacher_name student_pretrained_weights maximum index rmtree empty_cache student_model_class git_log Repo int str multi_gpu n_gpu_per_node init_process_group gethostname set_device global_rank n_nodes is_master multi_node info world_size node_id local_rank time tokenizer_name shuffle str format print BertForPreTraining save load_tf_weights_in_bert from_json_file state_dict format load_tf_weights_in_gpt2 print GPT2Model save from_json_file GPT2Config state_dict format OpenAIGPTConfig print OpenAIGPTModel save load_tf_weights_in_openai_gpt from_json_file state_dict cached_path str from_pretrained format model_class constant print abs save_weights tf_model tensor numpy from_json_file loading_fct amax cached_path list format remove zip print convert_pt_checkpoint_to_tf isfile keys enumerate len from_pretrained num_classes zeros_like allclose print num_hidden_layers bias extract_features eval BertConfig shape save_pretrained item sentence_encoder weight range pop str join format __dict__ load_tf_weights_in_transfo_xl print TransfoXLLMHeadModel save abspath TransfoXLConfig from_json_file state_dict load items format print dict save str join format XLNetForQuestionAnswering print XLNetLMHeadModel XLNetForSequenceClassification load_tf_weights_in_xlnet abspath save from_json_file state_dict encode endswith hexdigest sha256 str join str urlparse exists path netloc urlparse startswith resource split_s3_path Object resource split_s3_path download_fileobj get update write close tqdm iter_content len get str s3_etag decode join list url_to_filename filter startswith listdir head makedirs load_variable join int format info zip transpose fullmatch from_numpy any getattr list_variables abspath append split pow cos angle_defn unsqueeze sin cat sqrt softmax permute matmul detach_ FloatTensor cos sin array load_variable int format info zip squeeze fullmatch from_numpy getattr list_variables abspath append split load pop int format zip cumsum fullmatch from_numpy split getattr dirname info open tf_model constant sqrt erf pow tanh sqrt pi tf_model constant power float32 concatenate cast float32 cast tf_model constant tf_model constant pow tanh pi tf_model constant join replace sub bool split load format abspath info state_dict base_model_prefix trainable_weights tf_model list discard name transpose squeeze non_trainable_weights append expand_dims format replace convert_tf_weight_name_to_pt_weight_name set info zip keys batch_set_value pop constant numpy config format constant load_weights getattr abspath info tf_model tf_model_class __name__ weights items list format discard convert_tf_weight_name_to_pt_weight_name name transpose squeeze base_model_prefix named_parameters dict set from_numpy load_state_dict info expand_dims keys __name__ tf_model constant tf_model constant as_list shape tf_model constant constant less_equal cast tile less range count tf_model constant update r_r_bias hasattr tie_weight layers out_layers tie_projs emb_layers r_w_bias transformer emb_projs untie_r zip append out_projs enumerate load_variable pop items format join transpose from_numpy list_variables info keys build_tf_to_pytorch_map enumerate embedding view size einsum masked_fill_ sample cat detach list size len contiguous copy_ device to detach list size len contiguous copy_ device to detach isinstance Linear update r_r_bias r_s_bias hasattr r_w_bias bias transformer untie_r seg_embed append weight layer enumerate load_variable pop items format join isinstance build_tf_xlnet_to_pytorch_map transpose from_numpy list_variables info keys enumerate OrderedDict rstrip enumerate strip split category category startswith startswith category ord add set append list range ord sub replace load join format TransfoXLCorpus save info exists join category lower append normalize sub replace append category startswith replace pop convert_openai_checkpoint_to_pytorch convert_transfo_xl_checkpoint_to_pytorch convert_tf_checkpoint_to_pytorch convert_xlm_checkpoint_to_pytorch convert_gpt2_checkpoint_to_pytorch convert_xlnet_checkpoint_to_pytorch reset_default_graph state_dict makedirs convert_pytorch_checkpoint_to_tf size arange count simple_accuracy f1_score isinstance sorted tolist shuffle _to_list enumerate append group_by_queries ap append compute_aps join text_b InputFeatures len get_labels guid info label get_example_from_tensor_dict float text_a enumerate append encode_plus addoption getoption skip add_marker deepcopy setattr keys append randint range constant Random append step get_lr range append step get_lr range
## Slice-Aware Neural Ranking This repo has the source code for the SCAI'20 paper _'Slice-Aware Neural Ranking'_. It has two base libraries forked in the project to do so Huggingface transformers (for fine-tunning BERT) and snorkel (for the SRAMs). The _ir_slices_ folder contains the source code for the slicing functions specific to the conversational tasks and the retrieval domain. The model is a simple adaptation of the Slice-based learning (https://arxiv.org/pdf/1909.06349.pdf) for ranking models using BERT as the backbone. <p align="center"> <img src="slice_aware_neural_ranking.PNG" align="center" width=600px> </p> On the paper we focus on finding slices of data for which neural ranking models might be ineffective. To do so we use slicing functions (SFs) that are functions that return a boolean indicating if an instance belongs to that slice or not. See two examples below: <p align="center"> <img src="slicing_functions.PNG" align="center" width=500px> </p> The SFs are implemented on ir_slices/ir_slices/slice_functions.py. In order to run the experiments, first do the following to install dependencies:
394
Guzpenha/transformers_cl
['information retrieval']
['Curriculum Learning Strategies for IR: An Empirical Study on Conversation Response Ranking']
pytorch_transformers_cl/hubconfs/gpt2_hubconf.py pytorch_transformers_cl/pytorch_transformers/optimization.py pytorch_transformers_cl/pytorch_transformers/modeling_utils.py pytorch_transformers_cl/extract_results.py pytorch_transformers_cl/hubconfs/xlm_hubconf.py pytorch_transformers_cl/examples/lm_finetuning/finetune_on_pregenerated.py pytorch_transformers_cl/pytorch_transformers/tests/tokenization_tests_commons.py pytorch_transformers_cl/examples/lm_finetuning/simple_lm_finetuning.py pytorch_transformers_cl/pytorch_transformers/tests/tokenization_gpt2_test.py scoring_functions/calculate_scoring_function.py pytorch_transformers_cl/pytorch_transformers/file_utils.py pytorch_transformers_cl/pytorch_transformers/tests/modeling_gpt2_test.py pytorch_transformers_cl/pytorch_transformers/tests/modeling_transfo_xl_test.py pytorch_transformers_cl/hubconfs/bert_hubconf.py pytorch_transformers_cl/calculate_p_values.py pytorch_transformers_cl/pytorch_transformers/tests/conftest.py pytorch_transformers_cl/pytorch_transformers/tests/modeling_common_test.py pytorch_transformers_cl/pytorch_transformers/convert_xlnet_checkpoint_to_pytorch.py pytorch_transformers_cl/examples/lm_finetuning/pregenerate_training_data.py pytorch_transformers_cl/examples/test_examples.py pytorch_transformers_cl/hubconfs/gpt_hubconf.py pytorch_transformers_cl/pytorch_transformers/convert_gpt2_checkpoint_to_pytorch.py pytorch_transformers_cl/examples/run_squad.py pytorch_transformers_cl/pytorch_transformers/convert_openai_checkpoint_to_pytorch.py pytorch_transformers_cl/pytorch_transformers/convert_xlm_checkpoint_to_pytorch.py pytorch_transformers_cl/pytorch_transformers/__main__.py pytorch_transformers_cl/hubconf.py scoring_functions/conv_curriculum_helper.py pytorch_transformers_cl/pytorch_transformers/tests/modeling_xlnet_test.py pytorch_transformers_cl/pytorch_transformers/tokenization_xlm.py pytorch_transformers_cl/examples/single_model_scripts/run_swag.py pytorch_transformers_cl/pytorch_transformers/tests/tokenization_utils_test.py pytorch_transformers_cl/calculate_p_values_pacing.py pytorch_transformers_cl/pytorch_transformers/modeling_xlm.py scoring_functions/from_preds_to_dif.py pytorch_transformers_cl/pytorch_transformers/modeling_transfo_xl.py pytorch_transformers_cl/examples/run_glue.py pytorch_transformers_cl/pytorch_transformers/modeling_transfo_xl_utilities.py pytorch_transformers_cl/extract_pacing_functions_results.py pytorch_transformers_cl/pytorch_transformers/__init__.py pytorch_transformers_cl/pytorch_transformers/modeling_gpt2.py pytorch_transformers_cl/pytorch_transformers/tokenization_bert.py pytorch_transformers_cl/pytorch_transformers/tokenization_openai.py pytorch_transformers_cl/pytorch_transformers/tests/tokenization_xlnet_test.py pytorch_transformers_cl/docs/source/conf.py pytorch_transformers_cl/hubconfs/xlnet_hubconf.1.py pytorch_transformers_cl/examples/single_model_scripts/run_transfo_xl.py pytorch_transformers_cl/pytorch_transformers/convert_tf_checkpoint_to_pytorch.py pytorch_transformers_cl/pytorch_transformers/tests/tokenization_transfo_xl_test.py pytorch_transformers_cl/pytorch_transformers/modeling_xlnet.py pytorch_transformers_cl/examples/utils_squad_evaluate.py pytorch_transformers_cl/examples/pacing_functions.py pytorch_transformers_cl/setup.py pytorch_transformers_cl/pytorch_transformers/convert_pytorch_checkpoint_to_tf.py pytorch_transformers_cl/pytorch_transformers/tokenization_transfo_xl.py pytorch_transformers_cl/examples/single_model_scripts/run_openai_gpt.py pytorch_transformers_cl/pytorch_transformers/tests/tokenization_openai_test.py pytorch_transformers_cl/pytorch_transformers/tests/modeling_bert_test.py pytorch_transformers_cl/pytorch_transformers/modeling_openai.py pytorch_transformers_cl/pytorch_transformers/tests/tokenization_xlm_test.py pytorch_transformers_cl/hubconfs/transformer_xl_hubconf.py pytorch_transformers_cl/examples/run_bertology.py pytorch_transformers_cl/examples/utils_squad.py pytorch_transformers_cl/pytorch_transformers/tokenization_gpt2.py pytorch_transformers_cl/pytorch_transformers/tests/modeling_openai_test.py pytorch_transformers_cl/pytorch_transformers/tests/modeling_xlm_test.py pytorch_transformers_cl/pytorch_transformers/modeling_bert.py pytorch_transformers_cl/pytorch_transformers/tests/tokenization_bert_test.py pytorch_transformers_cl/pytorch_transformers/tests/optimization_test.py pytorch_transformers_cl/pytorch_transformers/tokenization_utils.py pytorch_transformers_cl/examples/run_generation.py pytorch_transformers_cl/pytorch_transformers/tokenization_xlnet.py pytorch_transformers_cl/examples/utils_glue.py pytorch_transformers_cl/pytorch_transformers/convert_transfo_xl_checkpoint_to_pytorch.py setup root_5 linear root_50 root_2 quadratic root_10 standard_training root_20 geom_progression step cubic entropy print_2d_tensor compute_heads_importance main mask_heads prune_heads main set_seed top_k_top_p_filtering sample_sequence set_seed evaluate main train load_and_cache_examples set_seed evaluate main to_list train load_and_cache_examples ExamplesTests get_setup_file ndcg ColaProcessor MnliProcessor InputExample acc_and_f1 _to_list MantisProcessor RteProcessor simple_accuracy MSDialogProcessor MnliMismatchedProcessor QqpProcessor StsbProcessor QnliProcessor mean_average_precision compute_metrics convert_examples_to_features DataProcessor read_scores_file ndcg_at_10 InputFeatures _truncate_seq_pair WnliProcessor Sst2Processor ap pearson_and_spearman MrpcProcessor FAQProcessor cycle UDCProcessor compute_aps read_curriculum_file _check_is_max_context _compute_softmax write_predictions_extended InputFeatures get_final_text _improve_answer_span _get_best_indexes read_squad_examples convert_examples_to_features SquadExample write_predictions find_best_thresh find_all_best_thresh_v2 apply_no_ans_threshold make_qid_to_has_ans make_eval_dict plot_pr_curve parse_args make_precision_recall_eval find_all_best_thresh compute_exact main merge_eval EVAL_OPTS compute_f1 find_best_thresh_v2 normalize_answer run_precision_recall_analysis histogram_na_prob get_raw_scores get_tokens main convert_example_to_features PregeneratedDataset create_training_file create_instances_from_document main truncate_seq_pair create_masked_lm_predictions DocumentDatabase convert_example_to_features BERTDataset InputFeatures accuracy InputExample _truncate_seq_pair random_word main load_rocstories_dataset main pre_process_datasets accuracy read_swag_examples InputFeatures accuracy _truncate_seq_pair SwagExample convert_examples_to_features select_field main main bertForPreTraining bertForSequenceClassification bertForQuestionAnswering bertForMaskedLM _append_from_pretrained_docstring bertForTokenClassification bertForNextSentencePrediction bertTokenizer bertModel bertForMultipleChoice gpt2DoubleHeadsModel _append_from_pretrained_docstring gpt2Model gpt2Tokenizer gpt2LMHeadModel openAIGPTTokenizer openAIGPTLMHeadModel openAIGPTModel openAIGPTDoubleHeadsModel _append_from_pretrained_docstring transformerXLLMHeadModel transformerXLModel transformerXLTokenizer _append_from_pretrained_docstring _begin_with_docstring xlmTokenizer xlmModel xlmLMHeadModel _end_with_docstring xlnetTokenizer xlnetLMHeadModel _append_from_pretrained_docstring xlnetModel convert_gpt2_checkpoint_to_pytorch convert_openai_checkpoint_to_pytorch main convert_pytorch_checkpoint_to_tf convert_tf_checkpoint_to_pytorch convert_transfo_xl_checkpoint_to_pytorch convert_xlm_checkpoint_to_pytorch convert_xlnet_checkpoint_to_pytorch cached_path s3_etag http_get s3_request s3_get get_from_cache filename_to_url url_to_filename split_s3_path BertPreTrainingHeads BertForQuestionAnswering BertEncoder BertSelfAttention BertForMaskedLM BertOnlyMLMHead BertOnlyNSPHead BertEmbeddings BertOutput BertPredictionHeadTransform BertAttention BertPooler gelu BertPreTrainedModel BertForMultipleChoice BertConfig BertLayer BertForTokenClassification BertModel BertForNextSentencePrediction BertIntermediate BertForSequenceClassification BertForPreTraining swish BertLMPredictionHead load_tf_weights_in_bert BertSelfOutput GPT2LMHeadModel Block GPT2DoubleHeadsModel load_tf_weights_in_gpt2 MLP gelu GPT2PreTrainedModel GPT2Model Attention GPT2Config Block OpenAIGPTPreTrainedModel OpenAIGPTConfig MLP gelu swish OpenAIGPTDoubleHeadsModel OpenAIGPTLMHeadModel Attention OpenAIGPTModel load_tf_weights_in_openai_gpt DecoderLayer TransfoXLModel PositionalEmbedding load_tf_weights_in_transfo_xl RelLearnableDecoderLayer AdaptiveEmbedding RelLearnableMultiHeadAttn TransfoXLPreTrainedModel MultiHeadAttn RelPartialLearnableDecoderLayer TransfoXLLMHeadModel PositionwiseFF TransfoXLConfig RelMultiHeadAttn build_tf_to_pytorch_map RelPartialLearnableMultiHeadAttn ProjectedAdaptiveLogSoftmax LogUniformSampler sample_logits prune_layer SQuADHead PoolerAnswerClass SequenceSummary prune_linear_layer prune_conv1d_layer PoolerEndLogits add_start_docstrings Conv1D PreTrainedModel PretrainedConfig PoolerStartLogits XLMConfig XLMModel XLMPreTrainedModel XLMWithLMHeadModel XLMPredLayer XLMForSequenceClassification MultiHeadAttention create_sinusoidal_embeddings get_masks gelu XLMForQuestionAnswering TransformerFFN XLNetRelativeAttention XLNetPreTrainedModel XLNetForQuestionAnswering build_tf_xlnet_to_pytorch_map XLNetLMHeadModel gelu swish XLNetConfig load_tf_weights_in_xlnet XLNetFeedForward XLNetForSequenceClassification XLNetModel XLNetLayer AdamW WarmupCosineSchedule WarmupCosineWithHardRestartsSchedule WarmupLinearSchedule WarmupConstantSchedule ConstantLRSchedule BasicTokenizer WordpieceTokenizer load_vocab whitespace_tokenize _is_whitespace _is_control BertTokenizer _is_punctuation bytes_to_unicode get_pairs GPT2Tokenizer get_pairs text_standardize OpenAIGPTTokenizer LMOrderedIterator TransfoXLCorpus TransfoXLTokenizer LMMultiFileIterator get_lm_corpus LMShuffledIterator PreTrainedTokenizer clean_up_tokenization get_pairs text_standardize XLMTokenizer XLNetTokenizer main pytest_addoption pytest_collection_modifyitems BertModelTest ConfigTester ModelUtilsTest _config_zero_init CommonTestCases ids_tensor GPT2ModelTest OpenAIModelTest TransfoXLModelTest XLMModelTest XLNetModelTest ScheduleInitTest unwrap_and_save_reload_schedule OptimizationTest unwrap_schedule TokenizationTest GPT2TokenizationTest OpenAIGPTTokenizationTest create_and_check_pretrained_model_lists create_and_check_required_methods_tokenizer create_and_check_tokenizer_commons create_and_check_save_and_load_tokenizer create_and_check_add_tokens_tokenizer create_and_check_pickle_tokenizer TemporaryDirectory TransfoXLTokenizationTest TokenizerUtilsTest XLMTokenizationTest XLNetTokenizationTest semantic_match_curriculum random_curriculum q_num_turns_curriculum main exact_match_curriculum d_avg_num_words_curriculum q_avg_num_words_curriculum split_into_buckets save_curriculum ap load_dataset add_stylesheet add_js_file log join len range info arange model tuple unsqueeze save device output_dir max numel append to sum detach requires_grad_ info view_as enumerate join entropy backward min print_2d_tensor tqdm pow zeros numpy numpy save output_dir max str view tolist numel masking_threshold sum ones_like info float view_as int join clone print_2d_tensor compute_heads_importance masking_amount now dict compute_heads_importance info sum enable_attach from_pretrained DataParallel DistributedDataParallel DataLoader ArgumentParser device save output_dir data_subset prune_heads basicConfig list set_seed set_device get_labels device_count parse_args to range format init_process_group lower info wait_for_attach task_name mask_heads join n_gpu print add_argument min barrier Subset compute_heads_importance model_name bool load_and_cache_examples local_rank len seed manual_seed_all manual_seed cumsum sort size min clone softmax tensor repeat decode model_name_or_path sample_sequence tolist prompt eval max_position_embeddings encode gradient_accumulation_steps percentage_data_by_epoch model use_additive_cl tuple clip_grad_norm_ zero_grad DataLoader DataParallel DistributedDataParallel max_grad_norm output_dir save max str sorted initialize set_seed debug_mode logging_steps normal_ iter run_name append master_params sum next range read_scores_file SummaryWriter format close mean reset_clf_weights save_pretrained num_train_epochs info sample fp16 keys per_gpu_train_batch_size max_steps int items n_gpu join evaluate backward AdamW add_scalar makedirs min curriculum_file parameters WarmupLinearSchedule step train_batch_size read_curriculum_file len tuple DataLoader argmax max eval_batch_size debug_mode squeeze per_gpu_eval_batch_size compute_metrics run_name append SequentialSampler sum range update format eval softmax info zip load_and_cache_examples join n_gpu makedirs compute_aps numpy len get_train_examples save tensor exists str max_seq_length data_dir get_labels TensorDataset convert_examples_to_features format eval_difficult info get_test_examples_difficult pop join load get_dev_examples get_test_examples warning do_train pacing_function eval_all_checkpoints setLevel seed str WARN update save_aps fp16 train evaluate dict update trange enumerate tqdm RawResult end_n_top output_dir max_answer_length write_predictions do_lower_case n_best_size write_predictions_extended evaluate_on_squad verbose_logging RawResultExtended EVAL_OPTS enumerate start_n_top int predict_file tqdm version_2_with_negative null_score_diff_threshold unique_id arange size read_squad_examples dirname save_pretrained makedirs parse_args add_argument ArgumentParser join text_b InputFeatures convert_tokens_to_ids _truncate_seq_pair tokenize guid info append label float text_a enumerate len pop len simple_accuracy f1_score isinstance sorted tolist shuffle _to_list enumerate append ap zip append ap zip append ndcg zip join is_whitespace whitespace_tokenize warning SquadExample append len _DocSpan _improve_answer_span is_impossible orig_answer_text length range doc_tokens question_text start _check_is_max_context namedtuple join tokenize range length start min enumerate strip end_logit _get_best_indexes sorted defaultdict get_final_text _NbestPrediction end_logits OrderedDict append start_logit replace _compute_softmax insert start_logits info enumerate join namedtuple text _PrelimPrediction split end_log_prob cls_logits strip find_all_best_thresh_v2 make_qid_to_has_ans sorted defaultdict get_final_text _NbestPrediction do_lower_case OrderedDict append range _compute_softmax info convert_tokens_to_string enumerate join namedtuple text min _PrelimPrediction get_raw_scores start_log_prob split join _strip_spaces items BasicTokenizer len info tokenize find sorted append range enumerate len append exp add_argument exit ArgumentParser print_help bool Counter get_tokens sum values len print max items float len xlabel ylabel ylim title savefig clf fill_between xlim step sorted plot_pr_curve append float enumerate sum make_precision_recall_eval merge_eval makedirs join ones_like xlabel ylabel title hist savefig clf float len sorted sum enumerate sorted sum enumerate find_best_thresh find_best_thresh_v2 make_eval_dict na_prob_file find_all_best_thresh na_prob_thresh run_precision_recall_analysis histogram_na_prob dumps apply_no_ans_threshold get_raw_scores out_image_dir out_file make_qid_to_has_ans merge_eval InputFeatures zeros convert_tokens_to_ids full gradient_accumulation_steps loads FP16_Optimizer DDP DistributedSampler exit half pregenerated_data FusedAdam append manual_seed_all get_world_size PregeneratedDataset mkdir manual_seed int bert_model AdamW read_text named_parameters RandomSampler WarmupLinearSchedule epochs train_batch_size pop len int sorted min len shuffle MaskedLmInstance set add choice append round max enumerate sample_doc extend randrange append randint truncate_seq_pair create_masked_lm_predictions range len format output_dir keys append random enumerate join tokens_a tokens_b info _truncate_seq_pair guid random_word append is_next len model tuple zero_grad mean num_train_epochs trange enumerate BERTDataset backward tqdm train_corpus step argmax tuple len append zeros full enumerate do_eval n_positions tokenize_and_encode numpy pre_process_datasets max save_vocabulary TensorDataset to_json_file eval_dataset SequentialSampler state_dict cached_path lm_coef max_steps accuracy train_dataset load_rocstories_dataset endings start_ending format map swag_id context_sentence read_swag_examples BertAdam tensor eval_batch_size data_dir max_seq_length convert_examples_to_features get_lr param_groups warmup_proportion learning_rate loss_scale select_field batch_size reset_length tgt_len clamp_len mem_len vocab get_iterator same_length ext_len from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained from_pretrained format load_tf_weights_in_gpt2 print GPT2Model save GPT2Config state_dict format OpenAIGPTConfig print save load_tf_weights_in_openai_gpt OpenAIGPTModel state_dict join T format replace print makedirs len to_tf_var_name any Saver assign_tf_var save append numpy Session state_dict convert_pytorch_checkpoint_to_tf str format print BertForPreTraining save load_tf_weights_in_bert from_json_file state_dict pop str join format __dict__ load_tf_weights_in_transfo_xl print TransfoXLLMHeadModel save abspath TransfoXLConfig state_dict load format print dict save str join format XLNetForQuestionAnswering print XLNetLMHeadModel XLNetForSequenceClassification load_tf_weights_in_xlnet abspath save from_json_file state_dict encode hexdigest sha256 str join str urlparse exists path netloc urlparse startswith resource split_s3_path Object resource split_s3_path download_fileobj get update write close tqdm iter_content len get str s3_etag decode join list url_to_filename filter startswith listdir head makedirs load_variable join int format info zip transpose fullmatch from_numpy any getattr list_variables abspath append split load_variable int format info zip squeeze fullmatch from_numpy getattr list_variables abspath append split load pop int format zip cumsum fullmatch from_numpy split getattr dirname info open update r_r_bias hasattr tie_weight layers out_layers tie_projs emb_layers r_w_bias transformer emb_projs untie_r zip append out_projs enumerate load_variable pop items format join transpose from_numpy list_variables info keys build_tf_to_pytorch_map enumerate embedding view size einsum masked_fill_ sample cat detach list size len contiguous copy_ device to detach list size len contiguous copy_ device to detach isinstance Linear detach_ FloatTensor cos sin array size arange count update r_r_bias r_s_bias hasattr r_w_bias bias transformer untie_r seg_embed append weight layer enumerate load_variable pop items format join isinstance build_tf_xlnet_to_pytorch_map transpose from_numpy list_variables info keys enumerate pow tanh sqrt pi OrderedDict rstrip enumerate strip split category category startswith startswith category ord append list range ord add set sub replace load join format TransfoXLCorpus save info exists replace pop convert_openai_checkpoint_to_pytorch convert_transfo_xl_checkpoint_to_pytorch convert_tf_checkpoint_to_pytorch convert_xlm_checkpoint_to_pytorch convert_gpt2_checkpoint_to_pytorch convert_xlnet_checkpoint_to_pytorch addoption getoption skip add_marker deepcopy setattr keys append randint range Random append step get_lr range append step get_lr range AdamW parameters Linear from_pretrained encode assertListEqual from_pretrained assertIsNotNone tokenize assertListEqual from_pretrained add_special_tokens assertGreater assertEqual len assertGreaterEqual convert_tokens_to_ids add_tokens vocab_size pad_token encode eos_token assertNotEqual from_pretrained decode assertIsInstance convert_ids_to_tokens convert_tokens_to_ids assertEqual len assertListEqual encode tokenize assertNotEqual items list assertListEqual append keys create_and_check_pretrained_model_lists create_and_check_required_methods_tokenizer create_and_check_add_tokens_tokenizer create_and_check_save_and_load_tokenizer create_and_check_pickle_tokenizer int sorted str count print append range len join randint print append len print mean append join print len mean append split join print tqdm mean load_word2vec_format std append get_score join print tqdm keys BM25 append std enumerate split exact_match_curriculum save_curriculum q_avg_num_words_curriculum semantic_match_curriculum dataset_path q_num_turns_curriculum load_dataset d_avg_num_words_curriculum output_path split_into_buckets
# Curriculum Learning Strategies for IR ## An Empirical Study on Conversation Response Ranking This code builds upon the [🤗 Transformers](https://github.com/huggingface/transformers) to evaluate curriculum learning strategies for information retrieval. Specifically, the code can be used to fine-tune BERT on conversation response ranking with the option of employing [curriculum learning](https://ronan.collobert.com/pub/matos/2009_curriculum_icml.pdf). We adapted the script _'run_glue.py'_ to receive two main additional parameters: ``` --pacing_function PACING_FUNCTION: str with one of the predefined pacing functions: [linear, root_2, root_5, root_10, quadraticm, geom_progression, cubic, step, standard_training]. You can also modify pacing_functions.py to add your own. --scoring_function SCORING_FUNCTION_FILE: str with the path of a file with the difficulty score for each instance in the train set. Each line must have just a float score for the training instance at that index in the training file. ``` The framework is based on previous work on curriculum learning and it has two main functions: pacing functions and scoring functions. The scoring functions measure the difficulty of an instance (in our case the query is a dialogue context, i.e. set of previous utterances, and the documents are a set of candidate responses) and the pacing function determines how fast you go from easy to difficult instances during training. <p align="center"> <img src="framework.png" align="center">
395
HAHA-DL/Episodic-DG
['domain generalization']
['Episodic Training for Domain Generalization']
common/utils.py PACS/resnet_vanilla.py PACS/resnet_epi_fcr.py PACS/main_agg.py PACS/main_epi_fcr.py PACS/model_resnet.py common/data_reader.py BatchImageGenerator fix_torch_seed unfold_label mseloss shuffle_list sgd fix_all_seed shuffle_data crossentropyloss write_log fix_python_seed num_flat_features compute_accuracy shuffle_list_with_ind main main ModelEpiFCR ModelAggregate ResNetFeatModule conv3x3 DomainAGG DomainSpecificNN BasicBlock ResNetClassifierModule ResNet conv3x3 BasicBlock resnet18 int min astype int8 append range len permutation arange len shuffle permutation arange len CrossEntropyLoss MSELoss SGD str close write open print seed print manual_seed_all manual_seed print seed manual_seed_all manual_seed accuracy_score argmax ModelAggregate add_argument ArgumentParser parse_args train ModelEpiFCR load_url ResNet load_state_dict
# Episodic-DG This is the repo for reproducing the results in the paper Episodic Training for Domain Generalization. ##### Data Please download the data from https://drive.google.com/drive/folders/0B6x7gtvErXgfUU1WcGY5SzdwZVk?resourcekey=0-2fvpQY_QSyJf2uIECzqPuQ&usp=sharing and use the official train/val split. ##### ImageNet pretrained model We use the pytorch pretrained ResNet-18 model from https://download.pytorch.org/models/resnet18-5c106cde.pth ## Enviroments verified on > GPU GeForce RTX 2080 Ti \ > pytorch 1.0.0 \
396
HCPLab-SYSU/KERN
['graph generation', 'scene graph generation']
['Knowledge-Embedded Routing Network for Scene Graph Generation']
dataloaders/blob.py lib/surgery.py lib/draw_rectangles/setup.py dataloaders/image_transforms.py lib/object_detector.py lib/ggnn.py models/train_detector.py lib/resnet.py lib/evaluation/sg_eval.py prior_matrices/generate_knowledge.py models/eval_rels.py lib/fpn/roi_align/functions/roi_align.py lib/fpn/proposal_assignments/proposal_assignments_postnms.py lib/get_union_boxes.py lib/fpn/anchor_targets.py lib/fpn/roi_align/_ext/roi_align/__init__.py lib/fpn/proposal_assignments/proposal_assignments_rel.py lib/evaluation/sg_eval_all_rel_cates.py lib/fpn/proposal_assignments/rel_assignments.py lib/fpn/box_intersections_cpu/setup.py lib/fpn/proposal_assignments/proposal_assignments_det.py models/train_rels.py lib/fpn/nms/build.py lib/pytorch_misc.py lib/fpn/roi_align/build.py lib/fpn/roi_align/modules/roi_align.py lib/evaluation/sg_eval_slow.py dataloaders/visual_genome.py config.py lib/evaluation/test_sg_eval.py lib/fpn/proposal_assignments/proposal_assignments_gtbox.py lib/kern_model.py lib/fpn/box_utils.py lib/fpn/nms/functions/nms.py lib/fpn/generate_anchors.py stanford_path path ModelConfig random_crop Grayscale Contrast Brightness SquarePad RandomOrder Sharpness Hue VGDataLoader load_image_filenames VG load_graphs assertion_checks vg_collate load_info union_boxes UnionBoxesAndFeats GGNNRel GGNNObj GGNNRelReason KERN GGNNObjReason VRFC Result gather_res filter_roi_proposals load_resnet filter_det load_vgg RPNHead ObjectDetector transpose_packed_sequence_inds arange Flattener intersect_2d get_ranking enumerate_imsize const_row to_variable pairwise batch_map diagonal_inds right_shift_packed_sequence_inds to_onehot gather_nd argsort_desc optimistic_restore cache np_to_variable random_choose clip_grad_norm update_lr save_net batch_index_iterator de_chunkize load_net print_para accuracy nonintersecting_2d_inds enumerate_by_image unravel_index ResNet resnet_l4 vgg_fc Bottleneck resnet_l123 resnet101 filter_dets _triplet evaluate_from_dict BasicSceneGraphEvaluator evaluate_recall _compute_pred_matches _triplet evaluate_from_dict BasicSceneGraphEvaluator evaluate_recall _compute_pred_matches _triplet iou BasicSceneGraphEvaluator eval_relation_recall _relation_recall _triplet eval_relation_recall _relation_recall iou anchor_target_layer nms_overlaps bbox_loss bbox_intersections center_size point_form bbox_overlaps bbox_preds generate_anchors _scale_enum _whctrs _ratio_enum generate_base_anchors _mkanchors _nms_single_im apply_nms _sel_inds proposal_assignments_det proposal_assignments_gtbox RoIAlignFunction RoIAlign RoIAlignAvg RoIAlignMax _import_symbols val_batch val_epoch val_batch train_epoch train_batch val_epoch train_batch train_epoch get_optim val_batch box_filter get_obj_cooccurrence_mat get_counts stanford_path path ModelConfig random_crop Grayscale Contrast Brightness SquarePad RandomOrder Sharpness Hue VGDataLoader load_image_filenames VG load_graphs assertion_checks vg_collate load_info union_boxes UnionBoxesAndFeats GGNNRel GGNNObj GGNNRelReason KERN GGNNObjReason VRFC Result gather_res filter_roi_proposals load_resnet filter_det load_vgg RPNHead ObjectDetector transpose_packed_sequence_inds arange Flattener intersect_2d get_ranking enumerate_imsize const_row to_variable pairwise batch_map diagonal_inds right_shift_packed_sequence_inds to_onehot gather_nd argsort_desc optimistic_restore cache np_to_variable random_choose clip_grad_norm update_lr save_net batch_index_iterator de_chunkize load_net print_para accuracy nonintersecting_2d_inds enumerate_by_image unravel_index ResNet resnet_l4 vgg_fc Bottleneck resnet_l123 resnet101 filter_dets _triplet evaluate_from_dict BasicSceneGraphEvaluator evaluate_recall _compute_pred_matches _triplet evaluate_from_dict BasicSceneGraphEvaluator evaluate_recall _compute_pred_matches _triplet iou BasicSceneGraphEvaluator eval_relation_recall _relation_recall _triplet eval_relation_recall _relation_recall iou anchor_target_layer nms_overlaps bbox_loss bbox_intersections center_size point_form bbox_overlaps bbox_preds generate_anchors _scale_enum _whctrs _ratio_enum generate_base_anchors _mkanchors _nms_single_im apply_nms _sel_inds proposal_assignments_det proposal_assignments_gtbox RoIAlignFunction RoIAlign RoIAlignAvg RoIAlignMax _import_symbols val_batch val_epoch val_batch train_epoch train_batch val_epoch train_batch train_epoch get_optim val_batch box_filter get_obj_cooccurrence_mat get_counts int size min crop astype int32 randint max column_stack tuple size join format append exists enumerate File astype len append zeros bbox_overlaps range column_stack load sorted open append reduce Blob cat data Variable sort squeeze clone apply_nms nonzero zero_ cpu max get_device cuda apply_nms cat resnet101 vgg16 items join format print size set copy_ keys state_dict next tee size topk squeeze long size long arange fill_ items list File create_dataset items list asarray format print size File from_numpy copy_ range format print size f append batch_index_iterator Variable cuda LongTensor is_available join sorted format items named_parameters append topk size t eq mul_ expand_as append sum max ones diag where column_stack all Variable type cuda size dim range clone int numpy enumerate size long arange int enumerate append clone size min contiguous choice get_device cuda concatenate cumsum copy append range len append range zip items norm format sorted print size mul_ float print param_groups format load_url ResNet load_state_dict resnet101 resnet101 layer4 classifier vgg16 view sort size numpy max sum evaluate_recall intersect_2d ones len astype reduce union1d prod argsort_desc append float argmax max column_stack column_stack _triplet format print prod _compute_pred_matches column_stack int concatenate intersect_2d reshape any zip append list items min items list _triplet ones min astype copy _relation_recall append prod column_stack float32 range astype int32 iou astype intersect1d zip enumerate minimum maximum argmax max reshape vstack ravel array range enumerate int generate_anchors sum ones reshape where argmax bbox_overlaps column_stack smooth_l1_loss size center_size log cat center_size exp ndarray isinstance ndarray isinstance ndarray isinstance clamp size min expand max bbox_intersections ndarray isinstance expand_as view clamp size min expand max meshgrid stack arange generate_base_anchors vstack _ratio_enum array hstack sqrt _whctrs _mkanchors _whctrs _mkanchors _nms_single_im int size append cat IntTensor sort size contiguous min nms_apply long int max sort squeeze _sel_inds numpy nonzero cuda get_device append round bbox_overlaps range cat size min choice int sort size min clone contiguous random_choose enumerate_by_image nonzero long cat append dir getattr _wrap_function append eval_entry enumerate mode time print_interval format train_batch print mean append train enumerate len data od_box_priors bbox_loss backward od_box_deltas od_box_targets rpn_scores squeeze size Series zero_grad rpn_box_deltas od_obj_labels clip_grad_norm step od_obj_dists cross_entropy val_batch evaluate concatenate print coco COCOeval summarize accumulate eval loadRes append enumerate coco numpy adam Adam SGD use_ggnn_obj rm_obj_dists values rel_dists sum rm_obj_labels num_gpus all_modes mode print_stats calculate_mR_from_evaluator_list items list num_classes transpose copy set nonzero eye zeros expand_dims range len copy zip zeros range len ones_like fill_diagonal where column_stack
# Knowledge-Embedded Routing Network for Scene Graph Generation Tianshui Chen*, Weihao Yu*, Riquan Chen, and Liang Lin, “Knowledge-Embedded Routing Network for Scene Graph Generation”, CVPR, 2019. (* co-first authors) [[manuscript](https://arxiv.org/abs/1903.03326)] (camera version will be available soon) This repository contains trained models and PyTorch version code for the above paper, If the paper significantly inspires you, we request that you cite our work: ### Bibtex ``` @inproceedings{chen2019knowledge, title={Knowledge-Embedded Routing Network for Scene Graph Generation}, author={Chen, Tianshui and Yu, Weihao and Chen, Riquan and Lin, Liang}, booktitle = "Conference on Computer Vision and Pattern Recognition", year={2019}
397
HDI-Project/ATMSeer
['automl']
['ATMSeer: Increasing Transparency and Controllability in Automated Machine Learning']
server/atm_server/__init__.py server/atm_server/recommender/OneHotEncoder.py server/setup.py server/atm_server/cache.py server/atm_server/atm_helper/worker.py server/atm_server/utils.py server/atm_server/atm_helper/helpers.py server/atm_server/api.py server/atm_server/atm_helper/__init__.py server/atm_server/recommender/logging_.py server/atm_server/db.py server/atm_server/error.py server/atm_server/recommender/encoder.py server/atm_server/cli.py server/atm_server/atmvis.py server/atm_server/recommender/predict_dataset.py server/atm_server/server.py server/atm_server/atm_helper/datarun_config.py server/atm_server/recommender/metafeature.py server/atm_server/recommender/metafeatures.py server/atm_server/atm_helper/btb_wireup.py server/atm_server/config.py post_disable_hyperpartition update_hyperparameters get_hyperpartition fetch_entity_as_json allowed_file post_click_event get_classifier get_dataset_file get_datasets get_dataset get_datarun_summary handle_invalid_usage post_enter_data get_hyperpartitions post_update_datarun_config get_classifier_summary handle_db_request_error getRecommendation dispatch_single_worker stop_single_worker configs_info get_classifiers post_enable_hyperpartition get_datarun get_dataruns post_new_datarun post_new_dataset get_datarun_steps_scores dispatch_simple_worker send_media index send_index send_css send_js get_cache Cache cli Config TestingConfig DevelopmentConfig ProductionConfig fetch_dataset_path table_fetcher teardown_db params_string fetch_entity get_db hyperpartition_string metric_string object_as_dict init_app summarize_datarun fetch_classifiers summarize_classifiers fetch_hyperpartitions check_db_mappers ApiError add_arguments_server start_server create_app nice_json_encoder ucb_bandit_scores _selector_scores2rewards selector_bandit_scores save_datarun_method_config update_datarun_config maybe_create_datarun_configs load_datarun_config load_datarun_method_config datarun_config NewMethod update_datarun_method_config get_datarun_config_path load_datarun_config_dict new_datarun get_datarun_steps_info dispatch_worker should_worker_stop signal_worker_stop stop_worker mark_running_datarun_pending work datarun_id2key clean_worker_cache start_worker monitor_dispatch_worker register_worker_process return_stdout_stderr DataEncoder MetaData _create_logger PickableLoggerAdapter get_logger setup_logger MetaFeature HelperFunction MetaFeatureValue AbstractMetaFeature DatasetMetafeatures ClassProbabilitySTD SkewnessMean KurtosisSTD PercentageOfMissingValues PCASkewnessFirstPC LogDatasetRatio LandmarkRandomNodeLearner calculate_all_metafeatures_encoded_labels NumSymbols PercentageOfInstancesWithMissingValues ClassProbabilityMax Landmark1NN Skewnesses HelperFunctions SymbolsSum LogInverseDatasetRatio SkewnessMin ClassOccurences calculate_all_metafeatures_with_labels LandmarkDecisionTree SymbolsMean NumberOfInstancesWithMissingValues SymbolsSTD RatioNominalToNumerical LogNumberOfInstances InverseDatasetRatio KurtosisMax NumberOfInstances SkewnessMax SymbolsMax LandmarkDecisionNodeLearner PCAKurtosisFirstPC LandmarkNaiveBayes KurtosisMin NumberOfFeaturesWithMissingValues MissingValues NumberOfNumericFeatures PercentageOfFeaturesWithMissingValues ClassProbabilityMean LandmarkLDA NumberOfMissingValues PCAFractionOfComponentsFor95PercentVariance LogNumberOfFeatures SymbolsMin SkewnessSTD NumberOfCategoricalFeatures ClassProbabilityMin PCA RatioNumericalToNominal NumberOfFeatures KurtosisMean ClassEntropy DatasetRatio MetafeatureFunctions calculate_all_metafeatures Kurtosisses NumberOfClasses _transform_selected OneHotEncoder Recommender jsonify exception print jsonify exception get fetch_dataset_path get get get get get COMPLETE get get summarize_classifiers get get_datarun_steps_info join deepcopy enter_data secure_filename warning abspath filename splitext save exists makedirs join deepcopy secure_filename get_db create_dataset warning abspath filename splitext save exists makedirs deepcopy items new_datarun get_db maybe_create_datarun_configs loads setattr start Process start_worker get_datarun get_db stop_worker get update loads load_config load_datarun_config_dict get items update_datarun_method_config get_json mark_hyperpartition_errored get_db get_json commit INCOMPLETE get_db get_hyperpartition get_json get_json append remote_addr get_json get fetch_dataset_path Recommender predict_dataset join set Classifier Datarun Hyperpartition Dataset inspect get_session check_db_mappers query username port password host Database dialect database pop close teardown_appcontext one getattr get_db all append categoricals filter get_db get_db get_dataset int defaultdict get_db get_datarun filter zip argmax get_hyperpartitions get_classifiers get_db get_hyperpartitions get_db update register_blueprint from_object CORS init_app Flask load_config add_argument vars add_arguments_datarun add_arguments_logging debug add_arguments_server ArgumentParser create_app add_arguments_sql parse_args add_arguments_aws_s3 run items compute_rewards isinstance min reward_func items sqrt sum max log len _selector_scores2rewards isinstance len join str get_datarun_config_path copytree copy join get_datarun_config_path update get_db get_datarun load_datarun_config_dict RunConfig commit tuner query selector Hyperpartition hasattr get update get_db set in_ setattr deepcopy join items print get_datarun filter notin_ get_datarun_config_path get_datarun_config_path items get_hyperpartitions list save_datarun_method_config all tunables commit get_db warn load_datarun_method_config filter NewMethod get_datarun_config_path get_hyperpartitions items defaultdict inf get_classifiers get_db get_datarun selector selector_bandit_scores append float Worker enumerate get_hyperpartitions list items train_path methods join tuner debug create_hyperpartition create_datarun get_dataset id map selector info dataset_id Method append initialize_logging setFormatter join getLogger addHandler add_arguments_logging parse_args add_argument PROJECT_ROOT load_config Formatter warning ArgumentParser add_arguments_sql DEBUG setLevel add_arguments_aws_s3 FileHandler start Process register dispatch_worker should_worker_stop pid error clean_worker_cache register_worker_process terminate warning sleep is_alive Process get_db setpgrp get_datarun start sleep register time signal_worker_stop has datarun_id2key mark_running_datarun_pending get_db sleep get_datarun commit now PENDING pid datarun_id2key set get_cache warning datarun_id2key get_cache get has datarun_id2key set get_cache warning datarun_id2key get_cache delete dictConfig basicConfig PickableLoggerAdapter update set update deepcopy set arange check_array itemsize todense set_value add DatasetMetafeatures get_logger fit_transform appendleft issparse RandomState shuffle OneHotEncoder set info deque Imputer clear pop get_dependency print extend dict any StandardScaler arange check_array logical_not transform zeros sum
<p align="left"> <img width=15% src="https://dai.lids.mit.edu/wp-content/uploads/2018/06/Logo_DAI_highres.png" alt=“DAI-Lab” /> <i>An open source project from Data to AI Lab at MIT.</i> </p> # ATMSeer ATMSeer is an interactive visualization tool for automated machine learning (AutoML). It supports users to monitor an ongoing AutoML process, analyze the searched models, and refine the search space in real-time through a multi-granularity visualization. In this instantiation, we build on top of the [ATM AutoML system](https://github.com/HDI-Project/ATM). Our paper, "ATMSeer: Increasing Transparency and Controllability in Automated Machine Learning", was presented at CHI 2019 ([pdf](https://arxiv.org/abs/1902.05009), [site](https://dai.lids.mit.edu/projects/atmseer/)). [![ATMSEER VIDEO](https://img.youtube.com/vi/7QwN3mmiCzY/0.jpg)](http://www.youtube.com/watch?v=7QwN3mmiCzY "Video Title") ## Installation ### Prerequisites
398
HDI-Project/AutoBazaar
['automl']
['The Machine Learning Bazaar: Harnessing the ML Ecosystem for Effective System Development']
docs/conf.py autobazaar/pipeline.py autobazaar/utils.py autobazaar/__init__.py autobazaar/search.py setup.py autobazaar/__main__.py ABPipeline PipelineSearcher UnsupportedProblem StopSearch log_times restore_dots make_keras_picklable _walk make_dumpable ensure_dir remove_dots encode_score _run get_version _get_commit _insert_test_result _box_print _get_metric _list _get_dataset_paths ArgumentParser _format_exception _test_pipeline _search_pipeline _get_datasets _score_datasets _insert_test _load_targets _search _prepare_search _update_test main _score_predictions _score_dataset _get_parser _path_type transform fit_transform LabelEncoder makedirs int items defaultdict ndarray isinstance bool_ float tolist integer bool isoformat floating items isinstance dict transform Model _run _get_commit join join join PipelineSearcher D3MDS _get_dataset_paths join format print D3MDS _get_dataset_paths format set_index describe print _get_metric any _get_dataset_paths encode_score str format __name__ print format len utcnow insert_one update_one insert_one utcnow copy test_id _insert_test_result _box_print utcnow db _test_pipeline _search_pipeline checkpoints splits template _insert_test input append budget update format copy tuner_type _update_test test_id collect _score_predictions output rmtree problem checkpoints timeout print _get_datasets exit get_db strftime make_keras_picklable db iterrows to_csv _score_dataset copy report append reindex DataFrame read_csv merge to_string format print _score_datasets _prepare_search reindex values test_id get_stats all print exit datasets getattr input reindex empty to_string format set_index print _get_datasets to_csv report reindex add_argument add_parser ArgumentParser set_defaults add_subparsers enable exit print_help verbose _get_parser logging_setup parse_args logfile
<p align="left"> <img width=15% src="https://dai.lids.mit.edu/wp-content/uploads/2018/06/Logo_DAI_highres.png" alt=“AutoBazaar” /> <i>An open source project from Data to AI Lab at MIT.</i> </p> [![Development Status](https://img.shields.io/badge/Development%20Status-2%20--%20Pre--Alpha-yellow)](https://pypi.org/search/?c=Development+Status+%3A%3A+2+-+Pre-Alpha) [![PyPi](https://img.shields.io/pypi/v/autobazaar.svg)](https://pypi.python.org/pypi/autobazaar) [![Tests](https://github.com/MLBazaar/AutoBazaar/workflows/Run%20Tests/badge.svg)](https://github.com/MLBazaar/AutoBazaar/actions?query=workflow%3A%22Run+Tests%22+branch%3Amaster) [![Downloads](https://pepy.tech/badge/autobazaar)](https://pepy.tech/project/autobazaar) # AutoBazaar * License: [MIT](https://github.com/MLBazaar/AutoBazaar/blob/master/LICENSE)
399