repo
stringlengths
8
116
tasks
stringlengths
8
117
titles
stringlengths
17
302
dependencies
stringlengths
5
372k
readme
stringlengths
5
4.26k
__index_level_0__
int64
0
4.36k
anonymous47823493/EagleEye
['network pruning']
['EagleEye: Fast Sub-net Evaluation for Efficient Neural Network Pruning']
distiller/thinning.py distiller/learning_rate.py finetune.py distiller/pruning/pruner.py choose_strategy.py distiller/models/imagenet/alexnet_batchnorm.py distiller/pruning/ranked_structures_pruner.py distiller/pruning/greedy_filter_pruning.py distiller/quantization/range_linear.py distiller/regularization/regularizer.py distiller/pruning/__init__.py distiller/apputils/data_loaders.py distiller/pruning/baidu_rnn_pruner.py distiller/directives.py distiller/data_loggers/__init__.py distiller/regularization/group_regularizer.py distiller/data_loggers/collector.py distiller/pruning/automated_gradual_pruner.py distiller/quantization/quantizer.py data/__init__.py distiller/__init__.py distiller/data_loggers/logger.py distiller/modules/rnn.py distiller/apputils/__init__.py data/imagenet.py distiller/thresholding.py inference.py data/imagenet_train_val_split.py models/resnet.py distiller/data_loggers/tbbackend.py distiller/pruning/sensitivity_pruner.py distiller/scheduler.py report/__init__.py distiller/quantization/clipped_linear.py thinning/__init__.py models/mobilenet.py distiller/models/imagenet/mobilenet.py distiller/apputils/checkpoint.py distiller/models/imagenet/resnet.py distiller/models/imagenet/resnet_earlyexit.py search.py distiller/models/__init__.py options/base_options.py distiller/policy.py distiller/modules/__init__.py distiller/quantization/q_utils.py distiller/apputils/dataset_summaries.py models/wrapper.py distiller/utils.py distiller/regularization/drop_filter.py distiller/modules/eltwise.py distiller/modules/grouping.py distiller/pruning/magnitude_pruner.py distiller/config.py distiller/model_summaries.py distiller/pruning/structure_pruner.py distiller/models/imagenet/__init__.py distiller/summary_graph.py distiller/pruning/level_pruner.py distiller/quantization/__init__.py distiller/apputils/execution_env.py distiller/pruning/splicing_pruner.py distiller/regularization/__init__.py distiller/models/imagenet/preresnet_imagenet.py distiller/sensitivity.py distiller/knowledge_distillation.py distiller/regularization/l1_regularizer.py get_channel_config train_epoch random_compression_scheduler save_checkpoints main main main random_compression_scheduler get_pruning_strategy get_dataloaders get_dataloaders custom_get_dataloaders dict_config file_config __factory __policy_params build_component add_policy_to_scheduler config_component_from_file_by_class FreezeTraining freeze_all freeze_training adjust_dropout KnowledgeDistillationPolicy add_distillation_args PolynomialLR MultiStepMultiGammaLR connectivity_summary connectivity_summary_verbose weights_sparsity_tbl_summary export_img_classifier_to_onnx model_performance_tbl_summary attributes_summary_tbl model_performance_summary masks_sparsity_tbl_summary create_png module_visitor masks_sparsity_summary data_node_has_parent attributes_summary create_pydot_graph model_summary connectivity_tbl_summary draw_model_to_file weights_sparsity_summary conv_visitor draw_img_classifier_to_file fc_visitor ScheduledTrainingPolicy QuantizationPolicy PruningPolicy RegularizationPolicy LRPolicy CompressionScheduler create_model_masks_dict ParameterMasker perform_sensitivity_analysis sensitivities_to_csv sensitivities_to_png onnx_name_2_pytorch_name increment_instance SummaryGraph remove_channels resnet_cifar_remove_layers find_nonzero_channels StructureRemover get_normalized_recipe apply_and_save_recipe directives_equal remove_filters create_thinning_recipe_channels create_graph execute_thinning_recipes_list append_param_directive find_nonzero_channels_list execute_thinning_recipe append_bn_thinning_directive append_module_directive optimizer_thinning create_thinning_recipe_filters GroupThresholdMixin threshold_policy threshold_mask group_threshold_binary_map group_threshold_mask set_deterministic density_ch sparsity_rows volume sparsity_3D convert_tensors_recursively_to model_device density_3D sparsity_2D normalize_module_name density_cols activation_channels_means float_range_argparse_checker model_params_size filter_kwargs MutableNamedTuple sparsity_ch model_params_stats activation_channels_apoz size_to_str log_weights_sparsity density log_activation_statsitics to_np activation_channels_l1 log_model_buffers density_rows log_training_progress denormalize_module_name size2str model_numel has_children make_non_parallel_copy yaml_ordered_load pretty_int density_2D find_module_by_fq_name sparsity sparsity_cols optimizer_device_name assign_layer_fq_names get_dummy_input norm_filters model_sparsity sparsity_blocks sparsity_matrix model_find_module_name model_find_param model_find_param_name model_find_module load_checkpoint save_checkpoint load_lean_checkpoint get_contents_table dataset_summary __image_size SwitchingSubsetRandomSampler _get_subset_length cifar10_get_datasets __split_list _get_sampler get_data_loaders load_data imagenet_get_datasets __deterministic_worker_init_fn config_pylogger log_execution_env_state TrainingProgressCollector ActivationStatsCollector ActivationHistogramsCollector collectors_context collector_context collect_quant_stats collect_histograms _verify_no_dataparallel QuantCalibrationStatsCollector RecordsActivationStatsCollector SummaryActivationStatsCollector _QuantStatsRecord DataLogger PythonLogger CsvLogger TensorBoardLogger TBBackend create_model alexnet_bn AlexNetBN mobilenet mobilenet_050 MobileNet mobilenet_075 mobilenet_025 preact_resnet101 preact_resnet34 preact_resnet50 PreactBasicBlock preact_resnet152 preact_resnet18 PreactResNet conv3x3 PreactBottleneck ResNet resnet50 Bottleneck resnet152 conv3x3 resnet34 resnet18 BasicBlock resnet101 conv3x3 ResNetEarlyExit resnet50_earlyexit EltwiseAdd EltwiseMult Stack Split Chunk Concat _unpack_bidirectional_input_h _repackage_hidden_bidirectional DistillerLSTMCell process_sequence_wise DistillerLSTM convert_model_to_distiller_lstm _repackage_hidden_unidirectional RandomRankedFilterPruner_AGP AutomatedGradualPruner AutomatedGradualPrunerBase L1RankedStructureParameterPruner_AGP GradientRankedFilterPruner_AGP ActivationMeanRankedFilterPruner_AGP BernoulliFilterPruner_AGP StructuredAGP ActivationAPoZRankedFilterPruner_AGP L2RankedStructureParameterPruner_AGP BaiduRNNPruner find_most_robust_layer_mp create_network_record_file find_most_robust_layer prune_tensor create_scheduler record_network_details get_param_densities add_greedy_pruner_args prune_finetune_test greedy_pruner get_model_compute_budget SparsityLevelParameterPruner MagnitudeParameterPruner threshold_model _ParameterPruner ActivationRankedFilterPruner mask_from_filter_order BernoulliFilterPruner RandomRankedFilterPruner RankedStructureParameterPruner RandomLevelStructureParameterPruner LpRankedStructureParameterPruner L1RankedStructureParameterPruner ActivationAPoZRankedFilterPruner GradientRankedFilterPruner L2RankedStructureParameterPruner ActivationMeanRankedFilterPruner SensitivityPruner SplicingPruner StructureParameterPruner dorefa_quantize_param PACTQuantizer LearnedClippedLinearQuantizeSTE ClippedLinearQuantization DorefaQuantizer LearnedClippedLinearQuantization WRPNQuantizer DorefaParamsBinarizationSTE has_bias Quantizer hack_float_backup_parameter _ParamToQuant get_tensor_avg_min_max get_quantized_range get_tensor_min_max get_tensor_max_abs approx_scale_as_mult_and_shift _prep_saturation_val_tensor get_tensor_mean_n_stds_max_abs linear_dequantize get_tensor_mean_n_stds_min_max asymmetric_linear_quantization_params get_tensor_avg_max_abs LinearQuantizeSTE symmetric_linear_quantization_params get_scale_approximation_mult get_scale_approximation_params linear_quantize_clamp clamp linear_quantize get_scale_approximation_shift_bits _verify_enum_value NoStatsError RangeLinearQuantParamLayerWrapper _get_saturation_fn RangeLinearQuantConcatWrapper update_ema ClipMode LinearQuantMode RangeLinearQuantWrapper RangeLinearEmbeddingWrapper verify_quant_mode _get_quant_params_from_tensor add_post_train_quant_args QuantAwareTrainRangeLinearQuantizer FP16Wrapper _get_quant_params_from_stats_dict verify_clip_mode FakeLinearQuantization FakeQuantizationWrapper RangeLinearQuantEltwiseMultWrapper inputs_quantize_wrapped_forward RangeLinearQuantEltwiseAddWrapper PostTrainLinearQuantizer _enum_to_str DropFilterRegularizer Conv2dWithMask replace_conv2d GroupLassoRegularizer GroupVarianceRegularizer L1Regularizer _Regularizer Block MobileNet test resnet50 accuracy ModelWrapper BaseOptions Reporter model_summary performance_summary _check_mk_path weights_sparsity_summary thinning enumerate get_loss backward zero_grad tqdm train step enumerate file_config get_eval_scores _net rand search_result ModelWrapper strategy_id unsqueeze thinning __getitem__ exp_name str hasattr Reporter compress_schedule_path save_checkpoints to parallel range get_channel_config epoch format model_summary random_compression_scheduler is_available CosineAnnealingLR checkpoint optimizer gpu_ids log_metric print load_checkpoint custom_get_dataloaders train_epoch get_compress_part train step save_checkpoint format state_dict dataset_name parse model_name rand max_rate min_rate get_pruning_strategy open output_file close write len join Compose ImageFolder DataLoader Normalize load list format int dump print len SubsetRandomSampler sample range open list items dataset_name import_module __factory debug dumps QuantizationPolicy __policy_params PruningPolicy CompressionScheduler append add_policy_to_scheduler RegularizationPolicy LRPolicy add_policy build_component list items pop update filter_kwargs __init__ class_ get data parameters model_find_param_name info parameters format info children info add_argument_group add_argument named_modules sum format CsvLogger print log_weights_sparsity model_device model_performance_summary PythonLogger startswith get_dummy_input append draw_img_classifier_to_file tabulate items list set_option density DataFrame weights_sparsity_summary tabulate items list set_option mask density DataFrame masks_sparsity_summary join volume out_channels groups in_channels module_visitor module_visitor in_features out_features model_find_module_name size make_non_parallel_copy remove model model_device apply to DataFrame tabulate model_performance_summary DataFrame set_option values enumerate attributes_summary DataFrame set_option values enumerate str format_list set_option append DataFrame values enumerate connectivity_summary connectivity_summary_verbose add_edge Dot Edge Node add_node items list data_node_has_parent create_pydot_graph edges append create_png make_non_parallel_copy SummaryGraph print model_device get_dummy_input draw_model_to_file make_non_parallel_copy Softmax info model_device realpath get_dummy_input export forward edges named_parameters ParameterMasker deepcopy L1RankedStructureParameterPruner OrderedDict SparsityLevelParameterPruner CompressionScheduler PruningPolicy add_policy mask_all_weights info float test_func on_epoch_begin items list use plot xlabel ylabel title savefig legend info findall debug format str int namedtuple get_dummy_input model_device format info get format debug directives_equal append get debug format get debug format apply_and_save_recipe create_thinning_recipe_channels create_graph view size t nonzero nelement info sum numel view find_nonzero_channels hasattr error info append execute_thinning_recipe apply_and_save_recipe create_graph create_thinning_recipe_filters find_nonzero_channels debug size ThinningRecipe handle_layer nelement info named_params_layers format view debug size ThinningRecipe handle_layer shape nonzero nelement info named_params_layers sum debug info execute_thinning_recipe enumerate len get format param_groups debug resize_ index_select data resize_ id model_device device list view mask shape getattr nelement to optimizer_thinning format debug grad setattr items index_select model_find_param int info split view size threshold_policy mean t device to type size contiguous expand t unsqueeze group_threshold_binary_map max is_cuda Size Variable isinstance named_modules modules sum view size nonzero sum len view size nonzero sum len view size t nonzero sum len view size volume nonzero sum len sum nonzero len model_params_stats model_params_stats items list density items list size norm view mean size view size float view log_weights_distribution stat_name value log_activation_statsitic children next to randn deepcopy isinstance DataParallel replace_data_parallel module seed debug manual_seed DEFAULT_MAPPING_TAG add_constructor signature Tensor isinstance named_parameters named_modules named_parameters named_modules join quantizer_metadata hasattr replace copyfile save info type thinning_recipes state_dict sorted load get zeros_mask_dict format str get_contents_table _load_optimizer prepare_model dict CompressionScheduler execute_thinning_recipes_list warning load_state_dict info to thinning_recipes get items list format print sampler enumerate len Compose CIFAR10 join ImageFolder Compose Normalize seed manual_seed int floor len _get_subset_length permutation len __image_size list set_deterministic __split_list _get_sampler shuffle DataLoader datasets_fn range len log_git_state _pip_freeze format join argv basename debug len makedirs copy device_count version cuda exists release sched_getaffinity join info getLogger strftime unlink realpath symlink isfile fileConfig makedirs join QuantCalibrationStatsCollector save info join ActivationHistogramsCollector collect_quant_stats save info start stop start list values stop format lower DataParallel info features AlexNetBN PreactResNet PreactResNet PreactResNet PreactResNet PreactResNet load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict ResNetEarlyExit append cell named_children isinstance LSTMCell LSTM setattr add_argument_group add_argument apply_mask L1RankedStructureParameterPruner set_param_mask train_fn test_fn SGD parameters PythonLogger arch dataset remove_filters create_model_masks_dict prune_finetune_test empty_cache deepcopy info get deepcopy Process join start Queue share_memory prune_finetune_test append SummaryGraph named_modules find_op isinstance Conv2d get_dummy_input normalize_module_name append numel load_state_dict CompressionScheduler format add_scalar test_fn find_most_robust_layer record_network_details create_scheduler effective_train_size get_param_densities save_checkpoint info arch dataset get_model_compute_budget data threshold_mask mul_ named_parameters contiguous cuda detach tanh num_bits apply max asymmetric_linear_quantization_params data Parameter register_buffer delattr zeros_like extra_repr setattr register_parameter to unsqueeze float32 any zeros_like _prep_saturation_val_tensor _prep_saturation_val_tensor zeros_like min any device to round max clamp_ round_ linear_quantize div_ view get_tensor_min_max get_tensor_min_max get_tensor_avg_min_max std min get_tensor_min_max mean max get_tensor_mean_n_stds_min_max min floor get_scale_approximation_mult get_scale_approximation_shift_bits get_scale_approximation_params __name__ isinstance view _get_saturation_fn sat_fn approx_scale_as_mult_and_shift symmetric_linear_quantization_params dim asymmetric_linear_quantization_params symmetric_linear_quantization_params min abs_ approx_scale_as_mult_and_shift tensor float max asymmetric_linear_quantization_params add_argument_group add_argument partial add_mutually_exclusive_group inputs_quant named_children isinstance print Conv2dWithMask Conv2d setattr MobileNet randn print size net topk size t eq mul_ expand_as append sum max module sum module model_performance_summary makedirs zeros_mask_dict list _net create_graph parameters mask_all_weights apply_and_save_recipe optimizer on_epoch_begin create_thinning_recipe_filters
# EagleEye: Fast Sub-net Evaluation for Efficient Neural Network Pruning ![Python version support](https://img.shields.io/badge/python-3.6-blue.svg) ![PyTorch version support](https://img.shields.io/badge/pytorch-1.1.0-red.svg) PyTorch implementation for *[EagleEye: Fast Sub-net Evaluation for Efficient Neural Network Pruning](https://arxiv.org/abs/2007.02491)* [Bailin Li,](https://github.com/bezorro) [Bowen Wu](https://github.com/Bowenwu1), Jiang Su, [Guangrun Wang](https://wanggrun.github.io/projects/zw), [Liang Lin](http://www.linliang.net/) Presented at [ECCV 2020 (Oral)](https://eccv2020.eu/accepted-papers/) Check [slides](https://dmmo.dm-ai.cn/eagle_eye/dmai_eagleeye_jiqizhixin202008.pdf) about EagleEye: “High-performance AI on the Edge: from perspectives of model compression and hardware architecture design“, DMAI HiPerAIR, Aug. 2020. ![pipeline](fig/eye.png) ## Citation If you use EagleEye in your research, please consider citing:
1,400
anshul3899/Structured-Graph-Learning
['graph learning']
['Structured Graph Learning Via Laplacian Spectral Constraints', 'A Unified Framework for Structured Graph Learning via Spectral Constraints']
objectives.py optimizer.py sgl.py main2.py metrics.py animals.py cancer.py main3.py utils.py main.py empirical_estimate generate_kcomponent_data empirical_estimate generate_bipartite_data Metrics ModelSelection Objectives Optimizer LearnGraphTopolgy Operators int set_cmap str multivariate_normal ones block_diag L colorbar pinv title imshow savefig figure Operators zeros diag T print close colorbar dot pinv title imshow savefig figure Metrics diag set_cmap multivariate_normal ones rand hstack close colorbar pinv title imshow vstack figure savefig zeros sum diag
# [Structured Graph Learning Python implementation](https://github.com/anshul3899/Structured-Graph-Learning) A naive Python implementation of the NeurIPS paper: Structured Graph Learning (SGL) algorithm by Kumar et. al (2019, https://papers.nips.cc/paper/9339-structured-graph-learning-via-laplacian-spectral-constraints) ## Work in Progress! ### learn k-component graph Currently only learn_k_component_graph API is available from sgl library. Load the dataset in main.py. Currently two moon dataset is there. Others are coming sooner! ```python python main.py ``` plots are generated in the directory [plots](https://github.com/anshul3899/Structured-Graph-Learning/blob/master/plots)
1,401
antoine-dedieu/subset_selection_with_shrinkage
['sparse learning']
['Subset Selection with Shrinkage: Sparse Linear Modeling when the SNR is low']
python/algorithms/DFO.py python/algorithms/neighborhood_continuation.py python/example/simulate_data.py python/example/example.py python/algorithms/aux_DFO.py python/algorithms/MIO.py soft_thresholding_l2_2 solve_restricted_problem soft_thresholding_l2 power_method soft_thresholding_l1 DFO shuffle_half_support neighborhood_continuation Lasso_Ridge_path simulate_data T norm ones rand dot norm norm T coef_ len DFO dot shape array LinearRegression fit T solve_restricted_problem copy dot shape power_method str time T norm print Lasso_Ridge_path argmin DFO copy dot shape round shuffle_half_support append power_method array range svd T coef_ DFO copy dot shape Lasso append zeros diag fit zeros int shuffle copy seed normal list T var norm str ones zeros print dot sqrt cholesky append randint float range
# Subset Selection with Shrinkage: Sparse Linear Modeling when the SNR is low <!--- ## Getting Started ## Algorithms --> ### Antoine Dedieu, Rahul Mazumder, and Peter Radchenko ## Introduction We consider a regularized version of the canonical best subset estimator, which is given by the following optimization problem ``` minimize 0.5*\| y - X \beta \|_2^2 + \lambda \|\beta\|_{q}^q
1,402
antoine77340/Mixture-of-Embedding-Experts
['video retrieval']
['Learning a Text-Video Embedding from Incomplete and Heterogeneous Data']
LSMDC.py train.py loupe.py loss.py qcm_sampler.py MSR_sampler.py MSRVTT.py model.py MaxMarginRankingLoss NetVLAD NetRVLAD LSMDC LSMDC_qcm Gated_Embedding_Unit Net MEE Net2 Context_Gating MSRVTT MSRSampler QCMSampler verbose compute_metric make_tensor print sort where median float sum diag len zeros range len
# Mixture-of-Embeddings-Experts This github repo provides a Pytorch implementation of the Mixture-of-Embeddings-Experts model (MEE) [1]. ## Dependencies Python 2 and Pytorch 0.3 ## Usage example Creating an MEE block: ```python from model import MEE ''' Initializig an MEE module
1,403
antoine77340/RareAct
['action recognition']
['RareAct: A video dataset of unusual interactions']
compute_score.py normalize_nid vid_sampling add set zeros enumerate len ones defaultdict range len
# RareAct This repository contains annotation for the [RareAct](https://arxiv.org/abs/2008.01018) dataset as well as an evaluation script for computing the wAP and sAP metrics described in the paper. ![RareAct](rareact.png) ## Requirements (only for the evaluation script) - Python 3 - Pandas - Scikit-learn ## Data You can download the videos zipped into one file [here](https://www.rocq.inria.fr/cluster-willow/amiech/rareact.zip).
1,404
anuradha1992/EmpatheticIntents
['response generation']
['A Taxonomy of Empathetic Response Intents in Human Social Conversations']
create_datasets.py train.py annotate.py utilities.py optimize.py model.py predict_emotion create_datasets point_wise_feed_forward_network MultiHeadAttention gelu loss_function EmoBERT EncoderLayer CustomSchedule validate train_step create_masks create_padding_mask build_model create_masks constant emobert ones zeros numpy len join format concatenate print vocab_size create_dataset batch SparseCategoricalCrossentropy scce train_loss experimental_run_v2 SUM reduce create_masks mean numpy append argmax enumerate emobert float32 cast equal create_padding_mask create_masks constant model ones randint
# EmpatheticIntents ### Introduction In empathetic human social conversations, the speaker often carries a certain emotion, however, the listener being empathetic does not necessarily carry a specific emotion. Instead, by means of a question or an expression of acknowledgement or agreement, a listener can show his empathy towards the other person. By manually analyzing a subset of listener utterances in the EmpatheicDialogues datasets (Rashkin et al., 2019) containing 25K empathetic human coversations, we discovered specific means or intents that a listener uses to express his empathy towards the speaker. The following are the most frequent intents that were discovered: 1. **Questioning** (to know further details orclarify) e.g. *What are you looking forward to?* 2. **Acknowledging** (Admitting as beingfact) e.g. *That sounds like double good news. It was probably fun having your hard work rewarded* 3. **Consoling** e.g. *I hope he gets the help he needs.* 4. **Agreeing** (Thinking/Saying the same) e.g. *That’s a great feeling, I agree!* 5. **Encouraging** e.g. *Hopefully you will catch those great deals!* 6. **Sympathizing** (Express feeling pity orsorrow for the person in trouble) e.g. *So sorry to hear that.* 7. **Suggesting** e.g. *Maybe you two should go to the pet store to try and find a new dog for him!*
1,405
anuragbaurai/Portable-camera-based-assistive-text-reader-for-blind-persons
['optical character recognition', 'scene text recognition']
['An End-to-End Trainable Neural Network for Image-based Sequence Recognition and Its Application to Scene Text Recognition']
data_utils.py cnn_basenet.py base_data_provider.py test_shadownet.py data_provider.py __init__.py log_utils.py config.py crnn_model.py demo_shadownet.py train_shadownet.py establish_char_dict.py write_text_features.py Dataset CNNBaseModel ShadowNet TextDataset TextDataProvider TextFeatureWriter TextFeatureIO TextFeatureReader FeatureIO recognize init_args CharDictBuilder init_logger init_args test_shadownet train_shadownet init_args init_args write_features add_argument ArgumentParser ShadowNet ctc_beam_search_decoder TextFeatureIO astype float32 placeholder IMREAD_COLOR ConfigProto GPU_MEMORY_FRACTION Saver resize TF_ALLOW_GROWTH imread Session join setFormatter getLogger addHandler getcwd WARNING StreamHandler Formatter dirname TimedRotatingFileHandler setLevel makedirs join int ShadowNet ctc_beam_search_decoder read_features reader ones close ceil GPU_MEMORY_FRACTION cast Saver tf_record_iterator TF_ALLOW_GROWTH ConfigProto shuffle_batch Session batch ctc_beam_search_decoder read_features localtime Saver LR_DECAY_STEPS exponential_decay Session str ShadowNet LR_DECAY_RATE ones ctc_loss get_collection merge_all strftime EPOCHS cast LEARNING_RATE TF_ALLOW_GROWTH format close FileWriter ConfigProto join time reader Variable graph add_graph edit_distance GPU_MEMORY_FRACTION reduce_mean UPDATE_OPS int32 shuffle_batch scalar makedirs join print TextFeatureIO labels images TextDataProvider imagenames makedirs
# CRNN_Tensorflow Use tensorflow to implement a Deep Neural Network for scene text recognition mainly based on the paper "An End-to-End Trainable Neural Network for Image-based Sequence Recognition and Its Application to Scene Text Recognition". You can refer to their paper for details http://arxiv.org/abs/1507.05717. Thanks for the author [Baoguang Shi](https://github.com/bgshih). This model consists of a CNN stage, RNN stage and CTC loss for scene text recognition task. ## Installation This software has only been tested on ubuntu 16.04(x64), python3.5, cuda-8.0, cudnn-6.0 with a GTX-1070 GPU. To install this software you need tensorflow 1.3.0 and other version of tensorflow has not been tested but I think it will be able to work properly in tensorflow above version 1.0. Other required package you may install them by ``` pip3 install -r requirements.txt ```
1,406
anuragcp/iocl-deepocr
['optical character recognition', 'scene text recognition']
['An End-to-End Trainable Neural Network for Image-based Sequence Recognition and Its Application to Scene Text Recognition']
Model.py Image_Generator.py realtime.py parameter.py Prediction.py Image_Generator_fe.py training.py preprocess/data_preprocess.py text_to_labels labels_to_text TextImageGenerator text_to_labels labels_to_text TextImageGenerator get_Model ctc_lambda_func real concatenate Input add str T list print ones astype float32 write expand_dims argmax predict
# CRNN (CNN+RNN) **Initial codebase is taken from** https://github.com/qjadud1994/CRNN-Keras. **Part of the code documentation is also taken from the above repo unless otherwise stated.** **CRNN** is a network that combines CNN and RNN to process images containing sequence information such as letters. https://arxiv.org/pdf/1507.05717.pdf It is mainly used for OCR technology and has the following advantages. 1. End-to-end learning is possible. 2. Sequence data of arbitrary length can be processed because of LSTM which is free in size of input and output sequence. 3. There is no need for a detector or cropping technique to find each character one by one. Slightly modified version of the original CRNN model.
1,407
anushaa18/bookig2
['genre classification']
['Judging a Book By its Cover']
learn_embeddings.py preprocess_text prepare_data clean_text plot_embeddings sampling generate_batches window_words build_vocab join sub split append preprocess_text read_csv enumerate append split sorted Counter Counter len randint set window_words range extend len scatter savefig figure annotate enumerate
anushaa18/bookig2
1,408
anwu21/future-image-similarity
['imitation learning']
['Model-based Behavioral Cloning with Future Image Similarity Learning']
models/model_value.py data/lab_value.py action_example.py data/gaz_value.py train_predictor.py utils.py data/lab_pose.py train_value.py models/model_predictor.py data/gaz_pose.py make_dataset ImageFolder pil_loader load_odom kl_criterion get_training_batch plot get_testing_batch train train get_testing_batch get_training_batch test image_tensor save_image save_gif_with_text mse_metric normalize_data load_dataset gaussian2 prod fspecial_gauss make_image draw_text_tensor clear_progressbar is_sequence init_weights save_np_img sequence_input batch_flatten save_gif eval_seq save_tensors_image Gazebo Gazebo Lab Lab encoder_conv conv_network lstm gaussian_lstm pose_network decoder_conv cnn_layer make_layers SimpleMaxUnpool2d ModelValue FloatTensor cos sin append range append join range load_odom exp log normalize_data normalize_data g_dim int n_past save_tensors_image view batch_size z_dim min posterior append encoder range n_future init_hidden detach zero_grad pose_network cuda prior g_dim n_past view z_dim posterior encoder range cat int decoder conv_network backward Variable beta step n_future init_hidden data batch_size value_network append detach stack data batch_size pose_network cuda prior g_dim value_network view z_dim append encoder range cat detach stack int decoder conv_network Variable init_hidden Lab Gazebo transpose_ ones size len copy_ enumerate save tile toimage clamp size expand fromarray uint8 asarray Draw text numpy image_tensor clamp mimsave cpu numpy append image_tensor mimsave zip cpu numpy append make_image save image_tensor print sum zeros mse_metric range len exp pi exp normal_ __name__ fill_ ConvTranspose2d Conv2d
# Model-based Behavioral Cloning with Future Image Similarity Learning This repository is for our [CoRL 2019 paper](http://arxiv.org/abs/1910.03157): Alan Wu, AJ Piergiovanni, and Michael S. Ryoo "Model-based Behavioral Cloning with Future Image Similarity Learning" in CoRL 2019 If you find this repository useful for your research, please cite our paper: @inproceedings{wu2019fisl, title={Model-based Behavioral Cloning with Future Image Similarity Learning}, booktitle={Conference on Robot Learning (CoRL)}, author={Alan Wu, AJ Piergiovanni, and Michael S. Ryoo},
1,409
aod321/Face-parsing-via-tanh-warping
['face parsing']
['Face Parsing with RoI Tanh-Warping']
resfpn.py train_stage2_my.py model_test.py train_stage1_my.py train_stage1_lightning.py test.py dataset.py model_lightning.py template.py preprocess.py test_out.py train_test.py augmentation.py warpper.py train_stage2_lightning.py model.py Stage2DataAugmentation DataAugmentation new_HelenDataset Warped_HelenDataset napply_mat_tensor inner_Module BackBone Stage1 outer_Seg inner_Seg ComponentRegress Hybird BackBone Stage1 outer_Seg inner_Seg ComponentRegress Hybird outer_Seg BackBone Hybird PrepareLabels Warping ToTensor FastTanhWarping labels2boxes apply_mat_tensor RandomAffine atanh GaussianNoise resnet_fpn_backbone NewBackBone_FPN F1Accuracy TemplateModel TrainModel DataLoaderX start_train TrainModel DataLoaderX start_train TrainModel DataLoaderX start_train eps T view stack permute append range T eps isinstance transpose type_as from_numpy stack to type array one_hot array float getbbox long inplanes requires_grad_ named_parameters print epochs eval TrainModel train step range
# Face-parsing-via-tanh-warping PyTorch implementations of "Face Parsing via tanh-warping" Paper: Lin. et.al. Face Parsing with RoI Tanh-Warping http://arxiv.org/abs/1906.01342 ## Still in progress TODO - [x] Backbone - [x] SubModules - [x] Implement Tanh Warping(cuda supported) - [x] Data augmentation - [ ] Hyperparameter adjustment and result optimization
1,410
aod321/STN-iCNN
['face parsing']
['End-to-End Face Parsing via Interlinked Convolutional Neural Networks']
1_tuning_cropper_model2.py f1_score.py exp_endtoend.py train_stage1_skinhair.py train_stage2.py data_augmentation.py new_data_augmentation.py test_AB.py train_croper.py dataset.py test_highest.py visualize_data_augmentation.py model1.py exp_challenge.py train_stage1.py preprocess.py test_ABC.py testC.py simple_train.py model.py test_f1_score.py test_hair.py 2_tuning_cropper_model1_new_aug.py icnnmodel.py template.py 2_tuning_cropper_model1.py helper_funcs.py testA.py 3_end2end_tunning_all.py TrainModel start_train TrainModel DataLoaderX start_train TrainModel DataLoaderX start_train TrainModel DataLoaderX start_train TrainModel_F1val PartsDataset HelenDataset Stage1Augmentation Stage2Augmentation SkinHairAugmentation fast_histogram _merge _read_names F1Score affine_crop affine_mapback calc_centroid stage2_pred_softmax F1Accuracy crop_labels stage2_pred_onehot Interpolate iCNN_Node FaceModel iCNN_Cell Stage2FaceModel SelectNet SelectNet_dw Stage1Model SelectNet_resnet SelectNet_dw_resblock BasicBlock Stage2Model Stage1Model SelectNet Stage2Model Stage1Augmentation Stage2Augmentation OldStage2_ToPILImage ToPILImage Stage2ToPILImage Stage2_GaussianNoise ToTensor OrigPad Stage2_nose_mouth_RandomAffine Resize Stage2_ToTensor Stage2_RandomAffine RandomAffine OldStage2Resize Stage1Aug Stage2Aug GaussianNoise OldStage2ToTensor TrainModel start_train F1Accuracy TemplateModel TrainModel start_train TrainModel_accu TrainModel DataLoaderX start_train TrainModel start_train TrainModel DataLoaderX start_train worker_init_fn show_stage1 show_stage2 print epochs eval TrainModel train step range test TrainModel_F1val reshape append strip open view float shape device to sum cat crop_labels grid_sample type_as shape stack floor interpolate append zeros to range cat grid_sample type_as append tensor sum range cat grid_sample inverse append to range cat TrainModel_accu seed make_grid print sleep to argmax add_image make_grid print to range add_image
# new_train
1,411
apekshapriya/Text-Localization-in-Image
['scene text detection']
['Accurate Text Localization in Natural Image with Cascaded Convolutional Text Network']
code/model.py train_model text_detection define_cascaded_architecture metrics predict create_train_test_set define_optimizer preprocess_image save_model_fig ANTIALIAS convert divide resize array join print append preprocess_image array exists input Model VGG16 output plot_model adam load_model print SGD compile fit evaluate train_model create_train_test_set define_cascaded_architecture save_model_fig
# Project Text Localization in image, ie detection of "Blocks of Text" from an image. # Approach Implemented the paper "Accurate Text Localization in Natural Image with Cascaded Convolutional Text Network" ## Dataset Creation The dataset is created by following these steps- ### Raw Data The dataset consists of images and their corresponding xml files. <br> Input images are RGB images consiting of text within it. <br> The xml files consists of four cordinates representing bounding boxes of text area for each image. <br>
1,412
apertium/apertium-weighting-tools
['morphological analysis']
['An Unsupervised Method for Weighting Finite-state Morphological Analyzers']
eval/constraintgrammar_fit.py utils/utils.py eval/metrics_report.py eval/w2v_fit.py eval/corpus_split.py utils/w2v_generate_weights.py utils/w2v_utils.py eval/eval_utils.py utils/w2v_get_similar_words.py utils/w2v_train_model.py eval/equalweight_fit.py utils/bpe_generate_weights.py eval/unigram_fit.py tests/annotated-corpus-to-weightlist-test.py eval/random_fit.py tests/lt_weight/__init__.py eval/bpe_fit.py tests/weighttest.py tests/run_tests.py tests/basictest.py eval/analysis_length_fit.py write_to_file stream_parser_split_X_y get_apertium_analyses stream_parser_extract_analyses split_X_y compute_weighted_recall get_sorted_files_in_directory compute_weighted_precision TestAnnotatedCorpusToWeightlist BasicTest Alarm WeightTest MultipleWeightlists WeightAnalysisUsingTheFirstMatchingWeightlist WeigthedTransducerMatchSameInputWithCorrectWeight WeigthedTransducerMatchSameInput get_first_seg get_no_of_tags bpe_disambiguate bpe_disambiguate_1 get_lemmas get_lemma get_no_of_affixes extract_tag_from_analysis extract_analysis generate_regex extract_surface get_weight get_similar_tokens get_naive_similar WikiDataset clean_line Dataset join str Path append run join parse wordform append reading_to_string repeat min get_first_seg get_lemmas min get_first_seg get_lemmas findall extract_analysis join format sub sum Counter predict_output_word punctuation replace
# apertium-weighting-tools The project aims at implementing a set of algorithms for weighting transducers. - [apertium-weighting-tools](#apertium-weighting-tools) * [Dependencies](#dependencies) * [Models Description](#models-description) - [lt-weight](#lt-weight) * [Methodology](#methodology) * [Usage](#usage) * [Example](#example)
1,413
apexrl/CoDAIL
['imitation learning']
['Multi-Agent Interactions Modeling with Correlated Policies']
multi-agent-irl/rl/acktr/filters.py multi-agent-particle-envs/multiagent/policy.py multi-agent-particle-envs/make_env.py multi-agent-irl/rl/common/vec_env/mpi_vec_env1.py multi-agent-irl/sandbox/mppo/run_simple_walker.py multi-agent-irl/rl/envs/multi_ant.py multi-agent-irl/rl/common/segment_tree.py multi-agent-irl/rl/acktr/utils.py multi-agent-irl/rl/envs/mujoco_env/walker2d.py multi-agent-irl/irl/mack/run_mack_codail.py multi-agent-irl/sandbox/mack/policies.py multi-agent-irl/irl/render.py multi-agent-irl/sandbox/imitation/render.py multi-agent-irl/rl/common/mpi_moments.py multi-agent-irl/irl/mack/gail.py multi-agent-particle-envs/multiagent/core.py multi-agent-irl/sandbox/mack/run_walker.py multi-agent-irl/rl/common/console_util.py multi-agent-irl/rl/common/math_util.py multi-agent-irl/sandbox/imitation/run_cmappo.py multi-agent-irl/sandbox/mack/render.py multi-agent-particle-envs/multiagent/scenarios/simple_world_comm.py multi-agent-irl/rl/bench/__init__.py multi-agent-irl/rl/acktr/running_stat.py multi-agent-irl/irl/mack/kfac_discriminator_ncdail.py multi-agent-irl/sandbox/mack/run_simple.py multi-agent-irl/irl/mack/run_mack_airl.py multi-agent-particle-envs/multiagent/scenario.py multi-agent-irl/rl/common/mpi_fork.py multi-agent-irl/irl/mack/run_mack_gail.py multi-agent-irl/rl/common/__init__.py multi-agent-particle-envs/multiagent/scenarios/simple.py multi-agent-particle-envs/multiagent/scenarios/simple_tag.py multi-agent-irl/irl/mack/tf_util.py multi-agent-irl/rl/bench/monitor.py multi-agent-particle-envs/multiagent/scenarios/simple_reference.py multi-agent-irl/sandbox/imitation/crender.py multi-agent-irl/irl/mack/kfac_discriminator.py multi-agent-particle-envs/multiagent/environment.py multi-agent-particle-envs/multiagent/scenarios/simple_spread.py multi-agent-irl/sandbox/mack/policies_om.py multi-agent-irl/irl/mack/kfac_discriminator_airl.py multi-agent-irl/rl/common/vec_env/dummy_vec_env.py multi-agent-irl/rl/common/vec_env/vec_frame_stack.py multi-agent-particle-envs/multiagent/__init__.py multi-agent-irl/rl/common/vec_env/subproc_vec_env.py multi-agent-irl/rl/common/vec_env/subproc_vec_env_walker.py multi-agent-irl/rl/__init__.py multi-agent-irl/irl/mack/kfac_discriminator_codail.py multi-agent-particle-envs/multiagent/scenarios/simple_speaker_listener.py multi-agent-particle-envs/multiagent/rendering.py multi-agent-irl/sandbox/mack/run_simple_om.py multi-agent-particle-envs/multiagent/scenarios/simple_adversary.py multi-agent-irl/sandbox/mack/opponent_policies.py multi-agent-irl/rl/common/ma_wrappers.py multi-agent-irl/rl/common/atari_wrappers.py multi-agent-irl/rl/common/vec_env/__init__.py multi-agent-irl/rl/acktr/kfac.py multi-agent-irl/sandbox/mack/acktr_disc_om.py multi-agent-irl/sandbox/mack/run_clone.py multi-agent-irl/sandbox/mack/acktr_multi_disc.py multi-agent-irl/irl/mack/run_mack_ncdail.py multi-agent-irl/irl/dataset.py multi-agent-irl/irl/mack/airl.py multi-agent-irl/rl/envs/multi_walker.py multi-agent-irl/rl/common/vec_env/vec_normalize.py multi-agent-irl/sandbox/mack/acktr_cont.py multi-agent-irl/irl/mack/ncdail.py multi-agent-irl/sandbox/mppo/run_walker.py multi-agent-particle-envs/multiagent/scenarios/__init__.py multi-agent-irl/sandbox/imitation/run_mujoco.py multi-agent-irl/rl/common/tf_util.py multi-agent-irl/sandbox/mppo/run_sumo.py multi-agent-irl/rl/common/distributions.py multi-agent-irl/sandbox/mack/run_walker_multi_disc.py multi-agent-particle-envs/multiagent/scenarios/simple_push.py multi-agent-irl/sandbox/mppo/ppo2.py multi-agent-irl/rl/common/vec_env/speedtest.py multi-agent-irl/rl/common/dataset.py multi-agent-irl/sandbox/mppo/policies.py multi-agent-particle-envs/multiagent/scenarios/simple_crypto.py multi-agent-irl/rl/bench/benchmarks.py multi-agent-irl/rl/acktr/kfac_utils.py multi-agent-irl/rl/common/mpi_running_mean_std.py multi-agent-particle-envs/bin/interactive.py multi-agent-irl/rl/logger.py multi-agent-irl/rl/common/misc_util.py multi-agent-irl/irl/mack/codail.py multi-agent-particle-envs/multiagent/multi_discrete.py multi-agent-irl/rl/common/mpi_adam.py multi-agent-particle-envs/setup.py multi-agent-irl/rl/common/schedules.py multi-agent-irl/sandbox/mack/acktr_disc.py test Dset MADataSet kde_prob render kl_divergence Discriminator Discriminator Discriminator Discriminator main train main train main train main train relu_layer softplus_layer tanh_layer linear get_session_config assert_shape load_prior_params discounted_reduce_sum dumpkvs HumanOutputFormat get_dir warn set_level Logger log logkvs make_output_format JSONOutputFormat getkvs configure TensorBoardOutputFormat OutputFormat debug logkv info _demo error reset make_env Filter IdentityFilter StackFilter ZFilter FlattenFilter CompositionFilter AddClock DivFilter Ind2OneHotFilter detectMinVal clipoutNeg gmatmul factorReshape test_running_stat RunningStat multionehot fc discount_with_dones conv kl_div double_linear_con cat_entropy onehot ortho_init find_trainable_variables cat_entropy_softmax double_middle_drop mse sample dense constant conv_to_fc linear make_path EpisodeStats middle_drop Scheduler std env_shortname get_task find_task_for_env_id_in_any_benchmark register_benchmark get_benchmark list_benchmarks JSONLogger get_monitor_files load_results LoadMonitorResultsError Monitor WarpFrame FireResetEnv EpisodicLifeEnv wrap_deepmind NoopResetEnv FrameStack MaxAndSkipEnv ClipRewardEnv fmt_row colorize fmt_item timed Dataset iterbatches MultiCategoricalPd BernoulliPd Pd test_probtypes DiagGaussianPdType BernoulliPdType CategoricalPdType CategoricalPdOneHotType shape_el DiagGaussianPd validate_probtype make_pdtype MultiCategoricalPdType PdType CategoricalPdOneHot CategoricalPd test_discount_with_boundaries explained_variance_2d explained_variance flatten_arrays ncc discount_with_boundaries unflatten_vector discount MAWrapper pickle_load RunningAvg boolean_flag set_global_seeds EzPickle pretty_eta relatively_safe_pickle_dump zipsame unpack SimpleMonitor get_wrapper_by_name MpiAdam test_MpiAdam mpi_fork mpi_moments test_runningmeanstd test_dist RunningMeanStd test_runningmeanstd linear_interpolation Schedule ConstantSchedule PiecewiseSchedule LinearSchedule SegmentTree MinSegmentTree SumSegmentTree function GetFlat BatchInput absolute_scope_name l2loss huber_loss scope_name single_threaded_session save_state fancy_slice_2d argmax max lrelu initialize set_value scope_vars get_placeholder mem_friendly_function numel TfInput conv2d is_placeholder intprod SetFromFlat sum make_session module switch minimize_and_clip densenobias concatenate Uint8Input normc_initializer dropout flattenallbut0 in_session mean get_parents eval topsorted wndense load_state get_placeholder_cached _MemFriendlyFunction PlacholderTfInput dense var _Function get_session Module min lengths_to_mask categorical_sample_logits reset var_shape flatgrad ensure_tf_input std DummyVecEnv MpiVecEnv make_env worker create_env SubprocVecEnv worker create_env SubprocVecEnv VecFrameStack VecNormalize MAVecNormalize VecEnv MultiAnt AntLeg Agent _discount_sum BipedalWalker DiagnosticsWrapper AbstractMAEnv StandardizedEnv EzPickle MultiWalkerEnv WrappedAgent ObservationBuffer stack_dict_list ContactDetector Walker2dEnv render render main train main train CategoricalPolicy MultiCategoricalPolicy GaussianPolicy CategoricalPolicy MultiCategoricalPolicy GaussianPolicy CategoricalPolicy MultiCategoricalPolicy GaussianPolicy render train learn main train main train main train main train LnLstmPolicy nature_cnn MlpPolicy CnnPolicy LstmPolicy learn safemean Runner Model constfn lsf01 sf01 main train main train main train get_identical make_env Agent Entity Landmark AgentState EntityState Action World MultiAgentEnv BatchMultiAgentEnv MultiDiscrete Policy InteractivePolicy Transform PolyLine Image _add_attrs Point make_polyline Geom get_display SimpleImageViewer LineStyle Line make_polygon FilledPolygon Viewer Compound Color Attr LineWidth make_capsule make_circle BaseScenario Scenario Scenario CryptoAgent Scenario Scenario Scenario Scenario Scenario Scenario Scenario load print get_next_batch MADataSet p_pos save reset_default_graph open str squeeze shape create_env savefig sleep append sum range dump format make_model concatenate mean load print jointplot action_space observation_space reset step array len kde_prob list exp score_samples fit configure set_global_seeds learn print close SubprocVecEnv MADataSet str train format product print cumprod ones_like ConfigProto reset_default_graph open join makedirs items list logkv log log log log join strftime gettempdir CURRENT getenv Logger log log DEFAULT configure dumpkvs debug rmtree set_level reset logkv info exists get_shape list remove value insert reshape transpose matmul range len cast float32 clipoutNeg reduce_max greater logical_or less reduce_min cond get_shape reshape len RunningStat randn mean append range push shape random_uniform reduce_mean square exp reduce_max reduce_sum reshape prod square append zip zeros zeros range append rfind replace append sorted loads get_monitor_files WarpFrame EpisodicLifeEnv FireResetEnv NoopResetEnv MaxAndSkipEnv ClipRewardEnv join len str ndarray isinstance item append str print colorize time asarray arange array_split tuple shuffle map Box isinstance MultiDiscrete Discrete MultiBinary seed size DiagGaussianPdType BernoulliPdType CategoricalPdType validate_probtype array function entropy randn param_placeholder sample_placeholder logp size mean sqrt eval repeat sample kl calcloglik std var var append reshape prod range zeros_like discount_with_boundaries array len list __next__ iter append range seed helper add_argument replace Wrapper isinstance env rename seed MpiAdam update function lossandgrad minimize Variable print astype square reduce_sum set_random_seed sin global_variables_initializer range run update copy check_call pop list asarray concatenate Allreduce maximum square shape sqrt nan zeros empty prod COMM_WORLD seed concatenate print mpi_moments zipsame update initialize RunningMeanStd eval seed update initialize COMM_WORLD RunningMeanStd concatenate mean get_shape copy set_shape cast cond shape random_uniform is_placeholder isinstance compute_gradients enumerate ConfigProto update variables_initializer global_variables set run dtype assign placeholder run restore Saver get_session get_session Saver dirname save makedirs matmul get_variable square matmul sqrt sum get_variable list _Function isinstance values _MemFriendlyFunction isinstance append get_parents get pop as_list gradients shape int64 cast reshape convert_to_tensor placeholder reset_default_graph recv close render reset send step x list isinstance dict array keys single_threaded_session __enter__ restore Box act n policy_fn extend make_env run seed make_env Monitor WARN setLevel __enter__ get init len get_next_batch get_dir save reset_default_graph set_global_seeds num_agents range num_envs make_model float int time join print action_space observation_space request_stop Coordinator record_tabular dump_tabular int format activ conv_to_fc relu float32 conv cast shape append shape reshape arange dumpkvs safemean constfn run append format close shuffle mean cliprange lr logkv zip deque isinstance clone extend makedirs Runner loss_names train len ConfigProto MAVecNormalize seed configure mujoco_arg_parser add_argument lr parse_args batch env make_world observation Scenario benchmark_data reward MultiAgentEnv reset_world string_types isinstance set_linewidth set_color append range pi Transform add_attr Compound make_circle make_polygon join dirname
# CoDAIL Implementation of CoDAIL in the paper [Multi-Agent Interactions Modeling with Correlated Policies](https://openreview.net/forum?id=B1gZV1HYvS), based on [MA-AIRL](https://github.com/ermongroup/ma-airl). ## Running the Code - For code implementing CoDAIL, please visit `multi-agent-irl` folder. - For the OpenAI particle environment code, please visit `multi-agent-particle-envs` folder. **NOTE**: Early implementation can be seen at [codailiclr2020/CoDAIL](https://github.com/codailiclr2020/CoDAIL)
1,414
apmoore1/Bella
['sentiment analysis']
['Bringing replication and reproduction together with generalisability in NLP: Three reproduction studies for Target Dependent Sentiment Analysis']
bella/scikit_features/syntactic_context.py bella/models/tdparse.py bella/scikit_features/lexicon_filter.py bella/scikit_features/join_context_vectors.py bella/data_types.py tests/test_helper.py tests/test_tokenisers.py bella/contexts.py bella/neural_pooling.py bella/dependency_parsers.py docs/conf.py tests/test_contexts.py tests/test_dependency_tokens.py bella/models/tdlstm.py bella/scikit_features/tokeniser.py tests/test_dependency_parsers.py bella/scikit_features/debug.py bella/parsers.py bella/tokenisers.py bella/helper.py bella/error_analysis.py tests/test_parsers.py bella/scikit_features/__init__.py bella/moses_tools.py bella/stanford_tools.py tests/test_lexicons.py bella/models/__init__.py tests/test_syntactic_contexts.py bella/models/base.py bella/scikit_features/context.py bella/write_data.py tests/test_neural_pooling.py bella/__init__.py bella/word_vectors.py bella/models/target.py tests/test_data_types.py bella/evaluation.py bella/scikit_features/neural_pooling.py bella/lexicons.py tests/test_word_vectors.py setup.py bella/dependency_tokens.py bella/scikit_features/word_vector.py bella/syntactic_contexts.py context TargetCollection Target _convert_conll TweeboParser stanford tweebo _to_dependencies_tokens DependencyToken unknown_targets same_multi_sentiment targets_to_samples different_sentiment similar_sentiment target_sentiments same_one_sentiment plot_acc_f1 score summary_errors evaluate_models distinct_sentiment_metrics plot_probability get_kwargs evaluate_model datasets_df download_model read_config download_file NRC HuLiu Mpqa Lexicon MosesTokenizer matrix_avg matrix_max matrix_std matrix_min matrix_checking matrix_prod inf_nan_check matrix_median semeval_14 _semeval_extract_data election_train hu_liu semeval_15_16 dong election_test election mitchel StanfordNlp dependency_parse constituency_parse tokenise context normalise_target dependency_context target_normalisation normalise_context dependency_relation_context whitespace stanford ark_twokenize _get_spacy_model spacy_tokeniser moses GloveCommonCrawl GensimVectors VoVectors WordVectors GloveTwitterVectors GloveWikiGiga PreTrained SSWE semeval_14 BaseModel KerasModel SKLearnModel ModelMixin TargetInd TargetDepMinus TargetDep TargetDepPlus LSTM TDLSTM TCLSTM TDParse TDParseMinus TDParsePlus Context Debug JoinContextVectors LexiconFilter NeuralPooling Context DependencyChildContext SyntacticContext ContextTokeniser ContextWordVectors setup skip TestContexts TestTarget TestDependencyParsers TestDependencyTokens TestHelper TestLexicons matrix_row_error TestNeuralPooling matrix_inf_nan_check matrix_error matrix_dim_error TestParsers TestTarget TestTokenisers TestWordVectors append items list defaultdict add_edge sorted DependencyToken Graph strip bfs_successors add set dep_search append add_node enumerate _convert_conll TweeboParser parse_conll append _to_dependencies_tokens append int strip split dependency_parse len append _to_dependencies_tokens range enumerate append lower data data lower defaultdict add data targets_to_samples add set lower target_sentiments data targets_to_samples add set lower target_sentiments data targets_to_samples add set lower intersection target_sentiments data targets_to_samples add set lower target_sentiments data targets_to_samples add set lower target_sentiments score items list items list defaultdict T score mean DataFrame deepcopy sentiment_data add_pred_sentiment subset_by_sentiment subplots boxplot min catplot set barplot max set_ylim set_ylabel subplots set_title boxplot items list defaultdict dataset_metric_scores name append get_kwargs len from_tuples list subset_by_ids product dataset_metric_scores name mean empty error_func append DataFrame std get_kwargs len parent replace with_suffix name joinpath download_file mkdir join chars text_type splitext TargetCollection basename abspath add_text text add TargetCollection Target extract_aspect_terms enumerate data basename parse _semeval_extract_data extend getroot splitext abspath basename parse getroot splitext abspath join basename get_data get_file_data splitext abspath election election TargetCollection basename abspath TargetCollection annotate StanfordNlp loads annotate StanfordNlp loads annotate StanfordNlp append join format replace check_target_unique strip len sub add_target_to_text append enumerate split join sorted normalise_target split append target_normalisation lower join get_n_relations replace lower parser append normalise_context range enumerate len connected_target_span lower parser append normalise_context range enumerate len isinstance isinstance isinstance MosesTokenizer isinstance blank append text _get_spacy_model spacy_model items list SubElement Element group_by_sentence startswith connect reshape min asarray
# Bella [![Build Status](https://travis-ci.org/apmoore1/Bella.svg?branch=master)](https://travis-ci.org/apmoore1/Bella) ![PyPI - Downloads](https://img.shields.io/pypi/dm/bella-tdsa.svg) Target Dependent Sentiment Analysis (TDSA) framework. The paper associated with this repository is the [following](https://aclanthology.info/papers/C18-1097/c18-1097): ``` @InProceedings{C18-1097, author = "Moore, Andrew and Rayson, Paul", title = "Bringing replication and reproduction together with generalisability in NLP: Three reproduction studies for Target Dependent Sentiment Analysis", booktitle = "Proceedings of the 27th International Conference on Computational Linguistics", year = "2018",
1,415
apple/ml-multiple-futures-prediction
['motion prediction']
['Multiple Futures Prediction', 'Multiple Futures Prediction']
multiple_futures_prediction/dataset_ngsim.py multiple_futures_prediction/train_ngsim.py multiple_futures_prediction/model_ngsim.py multiple_futures_prediction/cmd/train_ngsim_cmd.py multiple_futures_prediction/my_utils.py multiple_futures_prediction/assets/__init__.py multiple_futures_prediction/__init__.py NgsimDataset mfpNet load_json_file Gaussian2d nll_loss_multimodes nll_loss_test_multimodes nll_loss_per_sample nll_loss logsumexp pi nll_loss_test rotate_to write_json_file rotate_to_inv mse_loss mse_loss_test compute_angles setup_logger get_mean eval train Params main parse_args tanh exp pow log pow log pow sum log pow pow sum squeeze max log view sum view shape pow repeat device to range log len exp KLDivLoss nll_loss_per_sample reshape shape device zeros to numpy range len str dtype ndarray std isinstance print min shape device Tensor float max shape range zeros arctan2 shape array shape array subsampling forward_mfp nll_loss_test_multimodes use_cuda print fut_len_orig_hz pow nll_loss_test remove_y_mean device zeros to cuda mse_loss_test enumerate zeros_like concatenate divide numpy append sum enumerate print open strftime makedirs use_gru clip_grad_norm_ zero_grad nbr_search_depth DataLoader hist_len_orig_hz mfpNet unsqueeze y_mean save tensor cuda exists log open seed subsampling max nll_loss_multimodes str data_aug lr_init self_norm encoder_size Adam use_context append range state_dict dump updates_div_by_10 nll_loss setup_logger get_mean eval remove_y_mean manual_seed power float flush enumerate load forward_mfp time NgsimDataset use_cuda backward fut_len_orig_hz print write parameters min_lr step add_argument ArgumentParser parse_config_file parse_args config train
# Multiple Futures Prediction ## Paper This software accompanies the paper [**Multiple Futures Prediction**](https://arxiv.org/abs/1911.00997). ([Poster](multiple_futures_prediction/assets/imgs/neurips_mfp_poster.pdf))<br> [Yichuan Charlie Tang](https://www.cs.toronto.edu/~tang) and Ruslan Salakhutdinov<br> Neural Information Processing Systems, 2019. (NeurIPS 2019) Please cite our paper if you find our work useful for your research: ``` @article{tang2019mfp, title={Multiple Futures Prediction}, author={Tang, Yichuan Charlie and Salakhutdinov, Ruslan},
1,416
apple/ml-tree-dst
['semantic parsing']
['Conversational Semantic Parsing for Dialog State Tracking']
utils/tree.py utils/dotted_tree.py escape_node_name debug_print default_format_node pretty_print_tree format_node_helper unescape_node_name main TreeNode go limit debug_print add_argument ArgumentParser parse_args input_file
# Conversational Semantic Parsing for Dialog State Tracking We introduce TreeDST (**T**ree-based **D**ialog **S**tate **T**racking), a multi-turn, multi-domain task-oriented dialog dataset annotated with tree-based user dialog states and system dialog acts. The goal of this dataset is to provide a novel solution for end-to-end dialog state tracking as a conversational semantic parsing task. Please refer to our paper for [Conversational Semantic Parsing for Dialog State Tracking](https://arxiv.org/pdf/2010.12770.pdf) for details. ## Task Description The task in TreeDST is to predict the user dialog state for each turn of a conversation. The dialog state is a representation of the user's goal up to the current turn of the conversation. ## Dataset Description The dataset contains 27,280 conversations covering 10 domains with shared types of person, time and location. The dataset and schema can be accessed in the [dataset](dataset) folder. A tool for visualizing the data is in the "dotted" format is provided at [utils](utils) folder. ## License The code in this repository is licensed according to the [LICENSE](LICENSE) file. The TreeDST dataset is licensed under the Creative Commons Attribution-ShareAlike 3.0 Unported License. To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/3.0/.
1,417
apple2373/min-seg-road
['autonomous navigation', 'weakly supervised segmentation', 'semantic segmentation', 'autonomous driving']
['Minimizing Supervision for Free-space Segmentation']
drn.py drn_d_54 drn_c_58 drn_d_38 drn_c_26 Bottleneck drn_d_105 drn_d_22 conv3x3 DRN drn_c_42 BasicBlock load_url DRN load_state_dict load_url DRN load_state_dict load_url DRN load_state_dict load_url DRN load_state_dict load_url DRN load_state_dict load_url DRN load_state_dict load_url DRN load_state_dict
# Minimizing Supervision for Free-space Segmentation This is the **informal** soruce code for the following paper. The formal one is here: https://github.com/pfnet-research/superpixel-align http://openaccess.thecvf.com/content_cvpr_2018_workshops/papers/w14/Tsutsui_Minimizing_Supervision_for_CVPR_2018_paper.pdf ``` @inproceedings{tsutsui2018minimizing, title={Minimizing Supervision for Free-Space Segmentation}, author={Tsutsui, Satoshi and Kerola, Tommi and Saito, Shunta and Crandall, David J}, comment = {the first three authors contributed equally}, booktitle={Conference on Computer Vision and Pattern Recognition (CVPR) Workshop on Autonomous Driving}, year={2018}
1,418
aravindvenu7/Handwritten-Character-Recognition
['scene text detection']
['Detecting Text in Natural Image with Connectionist Text Proposal Network']
src/makedata/imdb.py src/anchors/anchors.py src/makedata/ds_utils.py src/roi_data_layer/minibatch.py src/fast_rcnn/train.py src/ops/blob.py src/makedata/__init__.py src/roi_data_layer/roidb.py src/fast_rcnn/test.py src/makedata/pascal_voc.py src/fast_rcnn/__init__.py src/fast_rcnn/bbox_transform.py src/makedata/builder.py src/roi_data_layer/__init__.py src/roi_data_layer/layer.py src/ops/timer.py dataprep/split_label.py src/ops/boxes_grid.py src/fast_rcnn/config.py dataprep/ToVoc.py generate_xml _is_hard build_voc_dirs generate_anchors generate_basic_anchors scale_anchor clip_boxes bbox_transform bbox_transform_inv cfg_from_list cfg_from_file _merge_a_into_b get_log_dir get_output_dir test_ctpn _get_image_blob _get_blobs train_net get_training_roidb get_data_layer SolverWrapper _selective_search_IJCV_top_k get_imdb list_imdbs unique_boxes xywh_to_xyxy validate_boxes xyxy_to_xywh filter_small_boxes imdb _which im_list_to_blob prep_im_for_blob get_boxes_grid Timer RoIDataLayer _project_im_rois _sample_rois _get_image_blob get_minibatch _get_bbox_regression_labels add_bbox_regression_targets prepare_roidb _compute_targets str int Document lower append float append_xml_node_attr split join mkdir zeros array int32 scale_anchor copy append transpose log dtype exp astype shape zeros minimum maximum join EXP_DIR name abspath ROOT_DIR makedirs join name strftime localtime LOG_DIR abspath ROOT_DIR makedirs items list ndarray isinstance type array _merge_a_into_b literal_eval zip split MAX_SIZE min astype float32 SCALES shape resize append im_list_to_blob float max _get_image_blob HAS_RPN array _get_blobs run prepare_roidb print USE_FLIPPED append_flipped_images HAS_RPN RoIDataLayer IS_MULTISCALE HAS_RPN ConfigProto pascal_voc selective_search_IJCV_roidb print list_imdbs dot array unique join strip is_exe pathsep split zeros max range len min astype float32 rand shape RANDOM_DOWNSAMPLE resize float max arange reshape transpose hstack SPATIAL_SCALE dstack ASPECTS sqrt SCALES_BASE floor KERNEL_SIZE repeat meshgrid zeros max range len vstack round _project_im_rois basename BATCH_SIZE ones len shape _get_image_blob range FG_FRACTION hstack astype HAS_RPN empty zeros _sample_rois float32 randint array BBOX_REG minimum size choice append _get_bbox_regression_labels prep_im_for_blob PIXEL_MEANS imread range len zeros BBOX_INSIDE_WEIGHTS shape image_index toarray roidb argmax max range image_path_at len EPS BBOX_NORMALIZE_STDS print _compute_targets BBOX_NORMALIZE_MEANS BBOX_NORMALIZE_TARGETS_PRECOMPUTED mean sqrt BBOX_NORMALIZE_TARGETS tile zeros array range len bbox_transform ascontiguousarray zeros argmax bbox_overlaps
# Handwritten-Character-Recognition The aim of this project is to develop an end to end Neural Network for both detecting and recognizing handwritten characters in natural images.The first approach that was proposed was to build a text detection module .The architecture of this module was proposed to be based on the Connectionist Text Proposal Network : https://arxiv.org/abs/1609.03605 ![download 2](https://user-images.githubusercontent.com/28951885/52520220-2f794b80-2c8d-11e9-82e4-90d47c482924.png) CURRENT WORK The proposed network uses a Faster RCNN inside the CTPN.Currently,the Faster RCNN has been implemented in code.The work in progress involves the implementation fo the Text Connector LSTM part of the network.Once this is complete,the CTPN can be trained. FURTHER WORK Further work will be to train the model using reinforcement learning as proposed by the following paper titled “Deep Reinforcement Learning of Region Proposal Networks for Object Detection” : http://openaccess.thecvf.com/content_cvpr_2018/papers/Pirinen_Deep_Reinforcement_Learning_CVPR_2018_paper.pdf The paper proposes a drl-RPN, a deep reinforcement learning based visual recognition model consisting of a sequential region proposal network (RPN) and an object detector. This is achieved by replacing the greedy RoI selection process with a sequential attention mechanism which is trained via deep reinforcement learning (RL)
1,419
arbellea/DeepCellSeg
['cell segmentation']
['Microscopy Cell Segmentation via Adversarial Neural Networks']
SourceCode/ConvLSTM/main_conv_lstm.py SourceCode/AdverserialSeg/main_GAN.py SourceCode/ConvLSTM/main_conv.py SourceCode/RNNSeg/Isbi_Params.py SourceCode/RNNSeg/Params.py SourceCode/DataHandeling.py SourceCode/RNNSeg/train_SegNeBitGRU.py SourceCode/ConvLSTM/BasicConvLSTMCell.py SourceCode/RNNSeg/eval_SegNetBiLSTM.py SourceCode/ConvLSTM/__init__.py SourceCode/RNNSeg/LSTM_Network.py SourceCode/RNNSeg/eval_SegNetBiGRU.py SourceCode/RNNSeg/train_SegNetLSTM.py SourceCode/RNNSeg/main_LSTM.py SourceCode/RNNSeg/eval_SegNetLSTM_ISBI.py SourceCode/ConvLSTM/bouncing_balls.py SourceCode/RNNSeg/eval_SegNetBiGRU_ISBI.py SourceCode/RNNSeg/eval_SegNetLSTM.py SourceCode/RNNSeg/train_SegNeBitLSTM.py SourceCode/utils.py SourceCode/Network.py SourceCode/Layers.py SourceCode/__init__.py SourceCode/ConvLSTM/layer_def.py CSVSegReader CSVSegReader2 tif2png_dir CSVSegReaderEvalLSTM DIRSegReaderEvalLSTM CSVSegReaderSequence CSVSegReaderRandomLSTM CSVSegReaderEvalBiLSTM DIRSegReaderEvalBiLSTM CSVSegReaderRandom2 CSVSegReaderRandom conv2d_transpose leaky_relu batch_norm fc max_pool conv layer_norm layer Network run_session one_hot plot_segmentation put_kernels_on_grid my_clustering_loss summary_tag_replace SegUNetG3 SegUNetG SegNetG2 VGGNet GANTrainer RibSegNet2 RibSegNet SegNetG SegUNetG2 BasicConvLSTMCell BasicConvGRUCell LayerNormConvLSTMCell ConvRNNCell _conv_linear show_single_V norm bounce_n ar bounce_vec bounce_mat sigmoid new_speeds unsigmoid show_A matricize show_V fc_layer transpose_conv_layer _variable_with_weight_decay _activation_summary conv_layer _variable_on_cpu main train generate_bouncing_ball_sample main train generate_bouncing_ball_sample network run_net run_net run_net run_net run_net DataSet ParamsEvalIsbiLSTM ParamsEvalIsbiBiGRU ParamBaseISBI Sequence BiGRUNetwork BiLSTMNetwork LSTMNetworkValid LSTMNetwork NormBiLSTMNetwork LSTMNetwork_Trans SegUNetG3 SegUNetG SegNetG2 VGGNet GANTrainer RibSegNet2 RibSegNet SegNetG SegUNetG2 ParamsBase ParamsEvalLSTM ParamsEvalBiGRU ParamsLSTM ParamsBiGRU ParamsBiLSTM ParamsEvalIsbiBiGRU ParamsEvalBiLSTM train train train join basename imwrite replace copyMakeBorder glob sort shape imread BORDER_REFLECT_101 makedirs as_list reshape isinstance transpose constant reshape transpose reduce_max pad stack reduce_min constant subtract reshape square reduce_sum matmul add div get_variable masked_where squeeze logical_and logical_not imshow figure binary_erosion start_queue_runners group Coordinator run global_variables_initializer ConfigProto local_variables_initializer InteractiveSession reshape concat cast int32 range value replace __setattr__ ParseFromString Summary enumerate friction int norm randn transpose rand dot new_speeds zeros abs array range ar meshgrid zeros array range matricize bounce_n array matricize bounce_n array show int sqrt reshape show int reshape sqrt range len show range len name zero_fraction histogram scalar multiply add_to_collection set_shape _variable_on_cpu l2_loss truncated_normal_initializer bounce_vec zeros range Exists DeleteRecursively MakeDirs train train_dir conv_layer transpose_conv_layer BiGRUNetwork group Coordinator Saver data_provider ConfigProto local_variables_initializer BiLSTMNetwork LSTMNetwork global_variables_initializer DIRSegReaderEvalBiLSTM DIRSegReaderEvalLSTM join CSVSegReaderSequence join join CSVSegReaderRandomLSTM join CSVSegReaderRandomLSTM join CSVSegReaderEvalLSTM join CSVSegReaderEvalBiLSTM join CSVSegReaderEvalBiLSTM join CSVSegReaderEvalBiLSTM SUMMARIES image Saver BiGRUNetwork transpose get_collection val_data_provider group FileWriter softmax ConfigProto experiment_log_dir local_variables_initializer merge join learning_rate train_data_provider Coordinator one_seg global_variables_initializer scalar BiLSTMNetwork LSTMNetwork
# DeepCellSeg This work code is the implementation of the method proposed in Arbelle and Riklin Raviv "Microscopy Cell Segmentation via Adversarial Neural Networks" https://arxiv.org/abs/1709.05860 Any use of this work requires a citation the the paper
1,420
arbellea/LSTM-UNet
['cell segmentation']
['Microscopy Cell Segmentation via Convolutional LSTM Networks']
losses.py Params.py Inference2D.py Networks.py DataHandeling.py create_sequence_metadata.py utils.py train2D.py main get_default_run CTCRAMReaderSequence2D CTCSegReaderSequence3D LSCRAMReader2D CTCInferenceReader inference AddReader AddDatasets AddNets seg_measure seg_measure_unit_test WeightedCELoss UpBlock2D ULSTMnet2D DownBlock2D ParamsBase CTCInferenceParams CTCParams AddReader AWSError AddDatasets AddNets train read_multi_tiff load_model bbox_crop bbox_fill get_model log_print grey_dilation zeros_like max shape imshow title dirname append imread format astype copy stack lower listdir minimum join print pause min maximum int32 figure zeros cla join format FOV arange uint16 imwrite model zeros_like logical_not flatten save_intermediate_path save_intermediate dataset bbox_fill connectedComponentsWithStats squeeze transpose logical_and shape range ones_like format greater_equal setdiff1d distance_transform_edt astype pre_sequence_frames unique save_intermediate_vis_path CV_32S flip log_print enumerate join uint8 save_intermediate_label_path reshape float32 maximum bbox_crop get_model numpy output_path zeros array calc_seg_meas seg_measure print astype float32 range enumerate ULSTMnet2D CTCRAMReaderSequence2D join CTCInferenceReader astype float32 copy close open append enumerate Iterator print strftime getattr get_model any max min copy
# LSTM-UNet The code in this repository is suplamentary to our paper "Microscopy Cell Segmentation via Convolutional LSTM Networks" published in ISBI 2019. If this code is used please cite the paper: @article{arbelleIsbi2019, title={Microscopy cell segmentation via convolutional LSTM networks}, author={Arbelle, Assaf and Raviv, Tammy Riklin}, booktitle={2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)}, pages={1008--1012}, year={2019}, organization={IEEE}
1,421
arcelien/pba
['data augmentation', 'image augmentation']
['Population Based Augmentation: Efficient Learning of Augmentation Policy Schedules']
autoaugment/wrn.py autoaugment/shake_shake.py autoaugment/shake_drop.py autoaugment/helper_utils.py pba/train.py pba/resnet.py pba/search.py autoaugment/data_utils.py pba/setup.py pba/wrn.py pba/data_utils.py pba/utils.py autoaugment/custom_ops.py pba/augmentation_transforms.py pba/policies.py pba/model.py autoaugment/policies.py pba/helper_utils.py autoaugment/train_cifar.py autoaugment/augmentation_transforms.py pba/augmentation_transforms_hp.py cutout_numpy _translate_y_impl random_flip create_cutout_mask _cutout_pil_impl TransformFunction _rotate_impl _shear_x_impl float_parameter _posterize_impl zero_pad_and_crop apply_policy _enhancer_impl _translate_x_impl _solarize_impl pil_wrap _crop_impl TransformT _shear_y_impl int_parameter pil_unwrap zero_pad global_avg_pool batch_norm stride_arr fc variable conv2d avg_pool unpickle DataSet run_epoch_training setup_loss cosine_lr eval_child_model decay_weights get_lr good_policies bottleneck_layer build_shake_drop_model round_int shortcut calc_prob _shake_shake_layer _shake_shake_block _shake_shake_skip_connection _shake_shake_branch build_shake_shake_model CifarModel setup_arg_scopes build_model CifarModelTrainer main _res_add residual_block build_wrn_model cutout_numpy _translate_y_impl random_flip create_cutout_mask _cutout_pil_impl TransformFunction _rotate_impl _shear_x_impl float_parameter _posterize_impl zero_pad_and_crop apply_policy _enhancer_impl _translate_x_impl _solarize_impl pil_wrap _crop_impl TransformT _shear_y_impl pil_unwrap int_parameter TransformT apply_policy parse_policy shuffle_data DataSet get_lr step_lr run_epoch_training eval_child_model setup_arg_scopes Model ModelTrainer build_model good_policies_svhn _building_block_v2 build_resnet_model _building_block_v1 batch_norm _bottleneck_block_v2 _bottleneck_block_v1 conv2d_fixed_padding fixed_padding block_layer main create_parser create_hparams main RayModel parse_log parse_log_schedule build_wrn_model zeros randint ones randint zeros create_cutout_mask reshape where pil_wrap pil_transformer xform_fn int_parameter int_parameter float_parameter float_parameter int_parameter int_parameter crop resize int_parameter load create_cutout_mask range int_parameter get_variable pad reshape load format close Open info softmax softmax_cross_entropy append trainable_variables l2_loss int format batch_size val_labels val_images eval_op info run test_images range test_labels len float int batch_size train_size cosine_lr lr num_epochs load int global_step format hparams batch_size train_size accuracy info next_batch get_lr range run pad int avg_pool int bottleneck_layer global_avg_pool batch_norm round_int relu fc conv2d calc_prob range int batch_norm stride_arr concat conv2d avg_pool stop_gradient conv2d batch_norm relu list _shake_shake_skip_connection add_n zip enumerate range shake_shake_widen_factor int global_avg_pool batch_norm relu fc conv2d append arg_scope setup_arg_scopes add_hparam HParams run_model CifarModelTrainer avg_pool zero_pad _res_add prod range format print shuffle copy choice append NUM_HP_TRANSFORM HP_TRANSFORM_NAMES enumerate seed arange shuffle len predictions log_first_n WARN step_lr pad fixed_padding conv2d_fixed_padding batch_norm projection_shortcut relu conv2d_fixed_padding projection_shortcut batch_norm relu conv2d_fixed_padding batch_norm projection_shortcut relu conv2d_fixed_padding projection_shortcut batch_norm relu block_fn range dense num_filters max_pooling2d batch_norm relu squeeze identity reduce_mean block_layer enumerate resnet_size create_parser restore list PopulationBasedTraining run_experiments init create_hparams values str add_argument ArgumentParser info parse_args flatten hp_policy_epochs use_hp_policy weight_decay_rate append range format set_hparam no_aug lr info add_hparam num_epochs data_path randint HParams epochs hp_policy split isinstance readlines loads append range len int format parse_log debug info append range len
# Population Based Augmentation (PBA) <b><i>New: Visualize PBA and applied augmentations with the notebook `pba.ipynb`!</b></i> <b><i>Now with Python 3 support.</b></i> ### Table of Contents 1. [Introduction](#introduction) 2. [Getting Started](#getting-started) 3. [Reproduce Results](#reproduce-results) 4. [Run PBA Search](#run-pba-search) 5. [Citation](#citation) ### Introduction
1,422
ardaduz/deep-video-mvs
['depth estimation']
['DeepVideoMVS: Multi-View Stereo on Video with Recurrent Spatio-Temporal Fusion']
dvmvs/config.py dvmvs/baselines/gpmvs/run-testing.py dvmvs/simulate_keyframe_buffer.py dvmvs/baselines/gpmvs/decoder.py dvmvs/fusionnet/run-testing.py dataset/tum-rgbd-export/tum-rgbd-export.py dvmvs/pairnet/run-training.py dvmvs/layers.py dvmvs/baselines/dpsnet/dpsnet.py dataset/7scenes-export/7scenes-export-color.py dataset/scannet-export/scannet-export.py dvmvs/baselines/mvdepthnet/encoder.py dvmvs/dataset_loader.py dvmvs/keyframe_buffer.py dvmvs/baselines/gpmvs/encoder.py dvmvs/pairnet/run-testing-online.py dataset/build_point_cloud.py dataset/augmented-iclnuim-export/iclnuim-export.py dvmvs/baselines/deltas/resnet_s2d.py dvmvs/convlstm.py dvmvs/pairnet/run-testing.py dataset/7scenes-export/7scenes-export-depth.py dvmvs/pairnet/model.py dvmvs/baselines/deltas/base_model.py dvmvs/fusionnet/model.py dvmvs/baselines/mvdepthnet/decoder.py dvmvs/fusionnet/run-testing-online.py dvmvs/errors.py dvmvs/baselines/deltas/utils.py dvmvs/baselines/deltas/densedepth.py dvmvs/baselines/dpsnet/run-testing.py dvmvs/baselines/gpmvs/gplayer.py dvmvs/fusionnet/run-training.py sample-data/run-tsdf-reconstruction.py dataset/utils.py dvmvs/baselines/mvdepthnet/run-testing.py dataset/rgbdscenes-export/rgbdscenes-export.py dvmvs/baselines/deltas/superpoint.py dvmvs/baselines/deltas/run-testing.py dvmvs/losses.py dvmvs/utils.py dvmvs/train.py setup.py dvmvs/baselines/deltas/triangulation.py build_point_cloud depth_image_to_point_cloud write_point_cloud create_depth_map_from_disparity read_pfm main process_scene main process_scene main process_scene RGBDFrame sanity_check_train SensorData export_samples sanity_check_test process_color_image find_longest_reliable_subsequence main get_closest_index process_scene Config MVSLayernormConvLSTMCell crawl read_split MVSDataset crawl_subprocess_short crawl_subprocess_long gather_pairs_train is_valid_pair load_depth PreprocessImage main load_image sanity_check_compute_errors compute_errors SimpleBuffer KeyframeBuffer depth_layer_3x3 conv_layer down_conv_layer up_conv_layer LossMeter calculate_loss update_losses main simulate_simple_buffer simulate_keyframe_buffer switch_mode train validate visualize_predictions save_predictions print_number_of_trainable_parameters cost_volume_fusion warp_frame_depth get_non_differentiable_rectangle_depth_estimation save_results save_optimizer freeze_batchnorm is_pose_available pose_distance calculate_cost_volume_by_warping get_warp_grid_for_cost_volume_calculation get_differentiable_square_depth_estimation save_checkpoint InferenceTimer zip_code BaseModel dict_update Unpool dilated_conv3x3 conv3x3 SparsetoDenseNet Gudi_UpProj_Block_Cat ASPP Gudi_UpProj_Block conv1x1 ResNet resnet50 Bottleneck resnet152 conv3x3 _resnet resnet34 resnet18 BasicBlock resnet101 get_model predict predict_for_subsequence top_k_keypoints Superpoint remove_borders simple_nms integrate_tensor_2d homogeneous_to_euclidean patch_sampler sample_descriptors_epi TriangulationNet triangulate_batch_of_points triangulate_point_from_multiple_views_linear_torch_batch match_corr patch_for_depth_guided_range vec_to_skew_symmetric get_fundamental_matrix create_transform_matrix patch_for_kp unproject_ij reproject_points make_symmetric reorder_desc pose_square convtext disparityregression PSNet matchshifted pixel2cam cam2pixel set_id_grid convbn feature_extraction inverse_warp convbn_3d BasicBlock check_sizes predict down_conv_layer depth_layer Decoder get_trainable_number up_conv_layer conv_layer refine_layer down_conv_layer depth_layer Encoder get_trainable_number up_conv_layer conv_layer refine_layer GPlayer predict down_conv_layer depth_layer Decoder get_trainable_number up_conv_layer conv_layer refine_layer down_conv_layer depth_layer Encoder get_trainable_number up_conv_layer conv_layer refine_layer predict LSTMFusion EncoderBlock CostVolumeDecoder UpconvolutionLayer DownconvolutionLayer FeatureExtractor StandardLayer FeatureShrinker CostVolumeEncoder DecoderBlock predict predict TrainingHyperparameters main forward_pass EncoderBlock CostVolumeDecoder UpconvolutionLayer DownconvolutionLayer FeatureExtractor StandardLayer FeatureShrinker CostVolumeEncoder DecoderBlock predict predict TrainingHyperparameters main forward_pass run TSDFFusion TSDFVolume clear join sorted str write_point_cloud list reshape astype float32 files extend depth_image_to_point_cloud tqdm Path fromfile imread range len append close write open list tolist astype dot vstack meshgrid ravel range str readline rstrip decode list reshape close map groups match fromfile float open join sorted format imwrite isdir print loadtxt tolist files zfill rmtree savetxt mkdir append imread array range len join partial print close imap_unordered Path Pool open readlines close reshape split uint16 vstack IMREAD_ANYDEPTH hstack astype as_matrix float32 zeros uint8 view grid_sample ones transpose inv astype dot shape stack unsqueeze linspace meshgrid float deepcopy list groupby len export_train join print SensorData export_test mkdir join sorted print loadtxt listdir len join sorted print loadtxt listdir len abs get_closest_index eye int time round pose_distance deepcopy set add is_valid_pair append range len value print reshape loadtxt gather_pairs_train set Path append deepcopy value train_minimum_pose_distance print reshape loadtxt train_crawl_step train_maximum_pose_distance set dict add Path is_valid_pair append range partial Value extend shuffle Manager imap_unordered Pool len loadtxt astype float32 cvtColor COLOR_BGR2RGB astype float32 uint8 MVSDataset uint16 COLOR_BGR2RGB min astype waitKey imshow DataLoader unsqueeze enumerate max range cvtColor len count_nonzero square float32 maximum mean sqrt abs len ones normal print compute_errors update calculate_loss item enumerate sum view smooth_l1_loss size interpolate abs sorted format try_new_keyframe print reshape strip files get_best_measurement_frames savetxt Path append listdir array range KeyframeBuffer len str sorted format try_new_keyframe print reshape strip SimpleBuffer get_measurement_frames files savetxt Path append listdir array range len simulate_simple_buffer simulate_keyframe_buffer apply eval train train_freeze_batch_normalization validate LossMeter zero_grad unsqueeze save_checkpoint interpolate forward_pass_function item_average squeeze train_validate train_image_width append detach update format size avg set_description_str add_image enumerate make_grid backward add_scalar min save_optimizer tqdm switch_mode step len switch_mode LossMeter norm inv min dot sqrt trace ones to stack linspace meshgrid float bmm view grid_sample abs size transpose unsqueeze inverse to sum range cat to size calculate_cost_volume_by_warping zip bmm int index_put_ view relu depth_to_3d transform_points shape argsort stack inverse permute cuda unique gather numpy long range bmm SfMPerspectiveCameras PointsRasterizer view depth_to_3d size Pointclouds PointsRasterizationSettings stack unsqueeze inverse permute eye zeros transform_points permute relu any eval join ZIP_DEFLATED write close files ZipFile format Path save format Path save state_dict param_groups requires_grad print format print compute_errors nanmean Path append array savez_compressed enumerate Path savez_compressed uint8 uint16 astype waitKey COLOR_RGB2BGR imshow array cvtColor get items list isinstance copy Mapping ResNet load_state_dict load_state_dict_from_url seed Superpoint load TriangulationNet print pretrained resume load_state_dict SparsetoDenseNet manual_seed is_available parse_args cuda depthnet reorder_desc make_symmetric supernet trinet pose_square t shape stack seq_length sum cat save_results print_statistics Path InferenceTimer device sorted append to format astype files test_dataset_name eval enumerate print loadtxt reshape float32 get_model test_offline_data_path split range zeros_like where topk is_tensor ndarray isinstance svd homogeneous_to_euclidean view ones transpose size expand unsqueeze len append range triangulate_point_from_multiple_views_linear_torch_batch relu reshape shape softmax device to sum cat squeeze cat transpose unsqueeze inverse device to stack unsqueeze repeat device to shape conv2d permute view to cos sin device view ones transpose shape repeat unsqueeze linspace create_transform_matrix device meshgrid tensor to reproject_points transpose set_trace atan2 get_fundamental_matrix sqrt isnan unsqueeze stack device to sum max detach shape normalize view grid_sample stack zeros_like view vec_to_skew_symmetric transpose unsqueeze inverse shape transpose view shape repeat device tensor to is_cuda cat shape stack cat view size type_as stack append isdigit enumerate size cuda view set_id_grid bmm view clamp size stack detach bmm grid_sample size pixel2cam cam2pixel check_sizes cuda PSNet load_state_dict load shape list DataParallel get_warp_grid_for_cost_volume_calculation exp Encoder sqrt item Decoder GPlayer zeros array state_dict update FeatureExtractor test_image_height test_online_scene_path test_n_measurement_frames FeatureShrinker CostVolumeEncoder range CostVolumeDecoder LSTMFusion test_image_width KeyframeBuffer len use_checkpoint dataset cuda sorted finetune_epochs Adam strftime load_state_dict chain SummaryWriter format print_number_of_trainable_parameters train_run_directory files mkdir manual_seed zip_code train_seed load train_epochs parameters scenes train cost_volume_decoder view size feature_shrinker LossMeter train_image_height update_losses cost_volume_encoder calculate_cost_volume_by_warping interpolate train_image_width append lstm_fusion cuda range len FeatureExtractor FeatureShrinker CostVolumeEncoder CostVolumeDecoder train_predict_two_way flip integrate Path resize sorted TSDFVolume logical_and shape append load_image range format astype files calculate_volume_bounds uint8 collect reshape loadtxt get_updated_intrinsics float32 index PreprocessImage zeros len
## *DeepVideoMVS*: Multi-View Stereo on Video with Recurrent Spatio-Temporal Fusion ### Paper (CVPR 2021): [arXiv](https://arxiv.org/abs/2012.02177) - [CVF](https://openaccess.thecvf.com/content/CVPR2021/html/Duzceker_DeepVideoMVS_Multi-View_Stereo_on_Video_With_Recurrent_Spatio-Temporal_Fusion_CVPR_2021_paper.html) ### Presentation (5 min.): [YouTube](https://www.youtube.com/watch?v=ikpotjxwcp4) <br /> ![](miscellaneous/teaser.jpg) ***DeepVideoMVS*** is a learning-based online multi-view depth prediction approach on posed video streams, where the scene geometry information computed in the previous time steps is propagated to the current time step. The backbone of the approach is a real-time capable, lightweight encoder-decoder that relies on cost volumes computed from pairs of images. We extend it with a ConvLSTM cell at the bottleneck layer,
1,423
aresPanos/DMGP_regression
['gaussian processes']
['How Good are Low-Rank Approximations in Gaussian Process Regression?']
src/dmgp_model.py src/helper.py src/main_realworld.py src/main_simulated_data.py DMGP_Regressor generate_4d compute_predictive_perform fun_1d standardize_dataset load_dataset plot_1D_data train_test_split generate_1d optimization_step objective_closure objective_closure_gp objective_closure_1d seed randn fun_1d sort rand seed T exp randn rand pi sqrt solve_triangular eye cholesky zeros sum log load join seed permutation arange int realpath dirname abspath concatenate arange mean transform StandardScaler std fit square pi mean sqrt log plot ones xlabel grid ylabel savefig figure legend len trainable_variables list apply_gradients zip
# Deep Mercer Gaussian Process (DMGP) Regression We provide the code used in our paper [Faster Gaussian Processes via Deep Embeddings](https://arxiv.org/abs/2004.01584). ### Prerequisites TensorFlow version 2.1.0 TensorFlow Probability version 0.9.0 GPflow version 2.0.0 or newer ### Source code The following files can be found in the **src** directory : - *dmgp_model.py*: implementation of the DMGP model - *helper.py*: various utility gunctions
1,424
aresPanos/dmgp_dfgp_regression
['gaussian processes']
['How Good are Low-Rank Approximations in Gaussian Process Regression?']
src/main_toyData.py src/helper.py src/main_realData.py src/models.py initialize_dmgp fun_1d initialize_dfgp load_dataset_train_test_split compute_rmse_nlpd get_flags generate_1d run_dmgp_model run_dfgp_model objective_closure objective_closure_fgp objective_closure_mgp DMGP_model DFGP_model FGP_model MGP_model seed randn fun_1d sort linspace load join format print size realpath dirname abspath transform train_test_split StandardScaler fit square pi mean sqrt log DEFINE_integer DEFINE_bool FLAGS flags DEFINE_string trainable_variables float64 save_weights Scipy dataset set_weights str Adam dirname get_weights use_dnn_init astype realpath set_floatx compile minimize print MGP_model dnn_dmgp fit trainable_variables float64 save_weights FGP_model Scipy dataset set_weights str Adam dirname get_weights use_dnn_init astype realpath set_floatx compile minimize print dnn_dfgp fit batch_size tuple dataset seed set_seed Adam DMGP_model iter range num_splits format size load_dataset_train_test_split shuffle predict_y compute_rmse_nlpd optimization_step_dnn optimization_step_kernel num_epochs batch initialize_dmgp total_seconds print now zeros batch_size tuple dataset seed set_seed Adam iter range num_splits format initialize_dfgp size load_dataset_train_test_split shuffle predict_y compute_rmse_nlpd optimization_step_dnn optimization_step_kernel num_epochs zeros batch total_seconds print now DFGP_model
# Deep Mercer Gaussian Process (DMGP) and Deep Fourier Gaussian Process (DFGP) Regression # <img src="plots/1d_example_2.png" height="300"> We provide the code used in our paper [Scalable Gaussian Processes, with Guarantees: Kernel Approximations and Deep Feature Extraction](https://arxiv.org/abs/2004.01584) to reproduce results. The code includes implementation of Deep Mercer GP, Deep Fourier GP, and their corresspnding shallow counterparts. ## Requirements ## TensorFlow - version 2.1.0 TensorFlow Probability - version 0.9.0 GPflow - version 2.0.0 or newer silence-tensorflow - version 1.1.1 (optional) ## Flags ## * batch_size: Batch size (integer - default=1000)
1,425
argman/EAST
['optical character recognition', 'scene text detection', 'curved text detection']
['EAST: An Efficient and Accurate Scene Text Detector']
lanms/.ycm_extra_conf.py lanms/__init__.py icdar.py eval.py lanms/__main__.py nets/resnet_v1.py locality_aware_nms.py run_demo_server.py multigpu_train.py data_util.py nets/resnet_utils.py model.py GeneratorEnqueuer get_images sort_poly resize_image detect main generator get_images load_annoataion line_verticle shrink_poly crop_area polygon_area point_dist_to_line get_batch fit_line restore_rectangle_rbox line_cross_point generate_rbox check_and_validate_polys sort_rectangle restore_rectangle rectangle_from_parallelogram standard_nms weighted_merge nms_locality intersection model dice_coefficient mean_image_subtraction unpool loss average_gradients main tower_loss Config get_host_info get_predictor save_result index draw_illu main index_post GetCompilationInfoForFile IsHeaderFile MakeRelativePathsInFlagsAbsolute FlagsForFile DirectoryOfThisScript merge_quadrangle_n9 Block conv2d_same subsample resnet_arg_scope stack_blocks_dense resnet_v1_152 resnet_v1_101 bottleneck resnet_v1_200 resnet_v1_50 resnet_v1 join test_data_path format endswith print append walk len int shape resize float max time format zeros_like print reshape fillPoly astype argwhere int32 zeros restore_rectangle merge_quadrangle_n9 enumerate sum argmin output_dir gpu_list makedirs glob extend training_data_path polygon_area print zip append clip min astype choice shape int32 zeros range max clip arctan2 polyfit print norm arccos line_verticle fit_line dot line_cross_point sum arctan print argmin argmax concatenate reshape transpose zeros array norm point_dist_to_line ones fillPoly min argmin fit_line sort_rectangle line_cross_point argwhere zip append zeros sum array range rectangle_from_parallelogram enumerate load_annoataion arange subplots resize abs max show ones shape imshow generate_rbox check_and_validate_polys append training_data_path imread format replace crop_area close shuffle copy choice add_artist tight_layout astype float get_images Polygon print text set_yticks min float32 set_xticks zeros array generator get is_running print start sleep GeneratorEnqueuer reshape area Polygon append array append weighted_merge range split mean_image_subtraction scalar reduce_sum minimum cos dice_coefficient reduce_mean scalar split REGULARIZATION_LOSSES get_collection image add_n loss scalar concat reduce_mean zip append expand_dims trainable_variables checkpoint_path pretrained_model_path MkDir moving_average_decay Saver exponential_decay get_variable global_variables average_gradients merge_all placeholder apply apply_gradients get_default_graph FileWriter get_trainable_variables assign_from_checkpoint_fn enumerate learning_rate float32 DeleteRecursively AdamOptimizer ExponentialMovingAverage split global_variables_initializer scalar len join restore basename format model get_checkpoint_state float32 placeholder model_checkpoint_path Saver ExponentialMovingAverage info variables_to_restore Session get_variable polylines reshape array str join imwrite copy SAVE_DIR draw_illu uuid1 makedirs BytesIO save_result getvalue imdecode save frombuffer add_argument port ArgumentParser parse_args run append join startswith IsHeaderFile compiler_flags_ exists compiler_flags_ GetCompilationInfoForFile compiler_working_dir_ MakeRelativePathsInFlagsAbsolute DirectoryOfThisScript nms_impl array copy pad
argman/EAST
1,426
argonne-lcf/active-learning-md
['active learning']
['Machine Learning Inter-Atomic Potentials Generation Driven by Active Learning: A Case Study for Amorphous and Liquid Hafnium dioxide']
workflow/BayesOpt_SOAP.py workflow/activesample.py activesample
## Active learning workflow for Gaussian Approximation Potential (GAP) Documentation for the active learning workflow developed as a part of the article "Machine Learning Inter-Atomic Potentials Generation Driven by Active Learning: A Case Study for Amorphous and Liquid Hafnium dioxide". __For more details, please refer to the [paper](https://www.nature.com/articles/s41524-020-00367-7).__ If you are using this active learning workflow in your research paper, please cite us as ``` @article{sivaraman2020machine, title={Machine-learned interatomic potentials by active learning: amorphous and liquid hafnium dioxide}, author={Sivaraman, Ganesh and Krishnamoorthy, Anand Narayanan and Baur, Matthias and Holm, Christian and Stan, Marius and Cs{\'a}nyi, G{\'a}bor and Benmore, Chris and V{\'a}zquez-Mayagoitia, {\'A}lvaro}, journal={npj Computational Materials},
1,427
arjun-rao/slam18
['language acquisition']
['Context Based Approach for Second Language Acquisition']
src/prepare_data.py src/eval.py src/utils.py src/train_model.py src/config.py Config compute_f1 evaluate_metrics test_metrics compute_avg_log_loss compute_auroc main compute_acc main load_and_compute LogisticRegressionInstance LogisticRegression InstanceData load_data main load_labels convert_to_bool load_context_json Config params_file test_key evaluate_metrics join print add_argument iterkeys load_labels output_predictions ArgumentParser append parse_args range len range len list sorted zip float sum range len print range len compute_f1 compute_auroc compute_acc compute_avg_log_loss evaluate_metrics print test_file train_file load_and_compute dev_file print predict_test_set LogisticRegression dirname PrettyTable dev_key use_dev load_context_json load_data train add_row makedirs dict print dict
# Context based Approach for Second Language Acquisition This project is the implementation of the system submitted to the SLAM 2018 (Second Language Acquisition Modeling 2018) shared task. This page gives instructions for replicating the results in our system. ## Table of Contents <!-- toc --> - [Table of Contents](#table-of-contents) - [Installation](#installation) - [Downloading Data](#downloading-data) - [Parameters for the Experiment](#parameters-for-the-experiment) - [Prepare Data for training](#prepare-data)
1,428
armelf/Financial-Algorithms
['stock market prediction']
['Global Stock Market Prediction Based on Stock Chart Images Using Deep Q-Network']
Equity/Technical Indicators/volume.py Equity/Fundamental Trading/FundDatasetCreation.py Equity/Technical Indicators/VWMA-SMA-MeanReversion.py Equity/NLPTrading/NLPTrader.py Equity/NLPTrading/NLPDailyScoreCreation.py Forex/KalmanFilterPairsTrading.py Equity/Deep Learning Trading/train.py Equity/Robust Strategies/ThresholdWeeklyStrategies.py Equity/NLPTrading/TwitterCrawler.py Equity/Fundamental Trading/FundTradingAlgo.py Equity/Technical Indicators/technicalindicators_strategies.py Equity/Deep Learning Trading/exReplay.py Equity/Deep Learning Trading/convNN.py Equity/Deep Learning Trading/DataPPRL.py ConstructCNN DataReaderRL exRep trainModel combined_data_calculus combined_data_calculus2 balance_data_transfo price_data_transfo income_data_transfo backtest status_calc sanitise_tweet get_tweet_sentiment plot obtain_parse_wiki_stocks_sp500 calc selectdates2 create_traintest_bundles computeCAGR rets_ta_tickers selectdates convert_daily_weekly naivetraintest bb_rsi_strategy vwsma_strategy rsi_obv_bb_strategy ma_cross_strategy cci_adx_strategy wr_strategy rsi_strategy stoch_macd_strategy adx_strategy sar_stoch_strategy acc_dist_index volume_oscillator negative_volume_index volume_weighted_moving_average force_index on_balance_volume put_call_ratio chaikin_money_flow volume_price_trend ease_of_movement plot_equity_curve take_profit_stop_loss test_factor_acc create_df dateparse2 func pct_change sqrt mean shift shift shift int list format sum RandomForestClassifier print fit predict status_calc transform StandardScaler array values len sanitise_tweet TextBlob tables decode feed Request urlopen append HTMLTableParser agg pct_change int sum arange Series shift rsi where mean diff DataFrame sar range cci list index get_level_values apply append dropna cumsum to_datetime index where copy sqrt mean append float DataFrame std range days len show list cumsum shift figure legend dropna log DatetimeIndex get_level_values DatetimeIndex get_level_values list strptime index strftime timedelta append cumsum computeCAGR accuracy_score days str list std precision_score append selectdates sum format recall_score copy mean sqrt zip float int print to_datetime index dict array len mean list len stoch_signal list Series len sar stoch stoch_signal list Series len macd macd_signal stoch list rsi len list bollinger_lband bollinger_hband rsi len list bollinger_lband bollinger_hband rsi mean on_balance_volume len list adx len list len adx cci mean list wr len list mean volume_weighted_moving_average std len shift fillna cumsum transpose any nan fillna sum fillna fillna diff mean fillna diff shift fillna mean fillna list fillna Series average range len pct_change Series range fillna len pct_change list take_profit_stop_loss std vwsma_strategy where mean interpolate dropna range head read_csv len format cumsum print shift range mean sqrt interpolate dropna DataFrame std log len show plot title figure legend array format plot xlabel float find_cointegrated_pairs print ylabel backtest mean sqrt title round figure append dropna sum std days
# Trading strategies on Equity & Forex : Proposal of several realistic & optimizable strategies [![forthebadge made-with-python](https://ForTheBadge.com/images/badges/made-with-python.svg)](https://www.python.org/) [![GitHub license](https://img.shields.io/badge/License-MIT-brightgreen.svg?style=flat-square)](https://github.com/armelf/Financial-Algorithms/blob/main/LICENSE) (https://github.com/armelf/Financial-Algorithms/graphs/commit-activity) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square)](http://makeapullrequest.com) This repository proposes a bunch of profitable trading algorithms and trading ideas, designed to be **extensible** and **optimizable**. We focus more on Equity Market for the moment, but with time we will add more asset classes. There is only one algorithm for Forex Market, as we currently don't have enough experience on this market. I hope that the overall project would be of interest and that people will eventually participate and share ideas and knowledge as well, or at least **make improvements** to the strategies already proposed ## Contents - [Overview](#overview) - [Preliminaries](#preliminaries) - [Technical Indicators](#technical-indicators) - [Technical Indicators Library](#technical-indicators-library) - [Historical price data](#historical-price-data)
1,429
arnab39/Semi-supervised-segmentation-cycleGAN
['style transfer', 'semantic segmentation']
['Revisiting CycleGAN for semi-supervised segmentation']
arch/discriminators.py validation.py arch/generators.py data_utils/__init__.py arch/ops.py data_utils/dataloader.py arch/__init__.py utils.py main.py data_utils/augmentations.py testing.py model.py main get_args semisuper_cycleGAN supervised_model test LambdaLR save_checkpoint runningScore perceptual_loss PIL_to_tensor cuda GaussianNoise Vgg16 make_one_hot smoothen_label print_networks create_link Sample_from_Pool recursive_glob get_testdata_link mkdir averageMeter colorize_mask load_checkpoint get_traindata_link validation define_Dis NLayerDiscriminator FCDiscriminator PixelDiscriminator Classifier_Module define_Gen ResnetGenerator ResNet ENet UnetGenerator Bottleneck UnetSkipConnectionBlock LEDNet Downsample_Block_led Channel_Split ResidualBlock UpsamplingBottleneck get_norm_layer conv_norm_relu DownsamplingBottleneck init_network SSnbt init_weights ShuffleBlock InitialBlock RegularBottleneck dconv_norm_relu set_grad APN conv_norm_lrelu CenterCrop RandomRotate RandomSizedCrop Compose Scale RandomCrop RandomSized VOCDataset ACDCDataset CityscapesDataset ToLabel PILaugment Colorize Relabel get_transformation colormap parse_args add_argument ArgumentParser validation int get_args print training test semisuper_cycleGAN append train supervised_model testing split checkpoint_dir DataLoader save dataset VOCDataset cuda load_state_dict results_dir range define_Gen get_transformation eval activation_softmax gpu_ids enumerate join colorize_mask CityscapesDataset ACDCDataset print load_checkpoint Softmax2d Gsi numpy convert putpalette int zeros tensor array range rand cuda manual_seed relu2_2 MSELoss mse_loss vgg cuda range makedirs is_available join list remove symlink mkdir abspath values join join save load print data scatter_ cuda zero_ long print parameters checkpoint_dir DataLoader numpy save VOCDataset dataset cuda save_image load_state_dict validation_dir range detach define_Gen get_transformation Tanh eval activation_tanh interp activation_softmax gpu_ids enumerate join colorize_mask CityscapesDataset cpu load_checkpoint print Softmax2d Upsample Gsi ACDCDataset NLayerDiscriminator FCDiscriminator PixelDiscriminator get_norm_layer get_norm_layer ResnetGenerator ResNet ENet UnetGenerator LEDNet BatchNorm2d partial InstanceNorm2d print apply init_weights cuda parameters uint8 arange astype zeros array int size random rotate uniform flip filter mirror GaussianBlur crop enhance Compose
# Revisting Cycle-GAN for semi-supervised segmentation This repo contains the official Pytorch implementation of the paper: [Revisiting CycleGAN for semi-supervised segmentation](https://arxiv.org/abs/1908.11569) ## Contents 1. [Summary of the Model](#1-summary-of-the-model) 2. [Setup instructions and dependancies](#2-setup-instructions-and-dependancies) 3. [Repository Overview](#3-repository-overview) 4. [Running the model](#4-running-the-model) 5. [Some results of the paper](#5-some-results-of-the-paper) 6. [Contact](#6-contact) 7. [License](#7-license)
1,430
arnavdodiedo/Neural-Style-Transfer
['style transfer']
['A Neural Algorithm of Artistic Style']
style-transfer-tf.py get_feature_representations load_img deprocess_img get_content_loss get_style_loss compute_grads show_results gram_matrix load_and_process_img imshow compute_loss get_model run_style_transfer img_to_array ANTIALIAS size resize expand_dims max open title squeeze astype preprocess_input load_img squeeze astype copy VGG19 int reshape matmul as_list gram_matrix load_and_process_img model float zip model layers deprocess_img assign clip_by_value xticks yticks fromarray subplot clear_output display_png imshow apply_gradients append expand_dims range format compute_grads load_and_process_img stack get_feature_representations enumerate time Variable print AdamOptimizer figure zeros get_model numpy show subplot load_img imshow title figure
# Neural-Style-Transfer Inspired from Leon A. Gatys’ paper, A Neural Algorithm of Artistic Style (https://arxiv.org/abs/1508.06576) Code referred from: https://medium.com/tensorflow/neural-style-transfer-creating-art-with-deep-learning-using-tf-keras-and-eager-execution-7d541ac31398
1,431
arne-cl/feng-hirst-rst-parser
['discourse segmentation']
['Two-pass Discourse Segmentation with Pairing and Global Features']
src/test_feng.py src/trees/lexicalized_tree.py src/classifiers/crf_classifier.py tools/crfsuite/crfsuite-0.12/swig/python/setup.py src/utils/serialize.py src/utils/treebank_parser.py tools/crfsuite/crfsuite-0.12/example/chunking.py src/document/dependency.py src/document/constituent.py tools/crfsuite/crfsuite-0.12/example/ner.py src/utils/utils.py tools/crfsuite/crfsuite-0.12/example/pos.py src/parsers/base_parser.py src/paths.py src/logs/log_writer.py src/features/tree_feature_writer.py src/sanity_check.py src/parser_wrapper.py src/document/token.py src/utils/cue_phrases.py tools/crfsuite/crfsuite-0.12/swig/python/crfsuite.py src/features/segmenter_feature_writer.py src/prep/preprocesser.py src/utils/rst_lib.py src/parse.py src/document/base_representation.py src/parsers/multi_sentential_parser.py src/prep/prep_utils.py src/segmenters/crf_segmenter.py src/parsers/intra_sentential_parser.py src/utils/RST_Classes.py src/utils/yappsrt.py tools/crfsuite/crfsuite-0.12/example/crfutils.py tools/crfsuite/crfsuite-0.12/swig/python/sample_train.py src/document/sentence.py tools/crfsuite/crfsuite-0.12/example/template.py src/treebuilder/build_tree_CRF.py src/prep/syntax_parser.py tools/crfsuite/crfsuite-0.12/swig/python/sample_tag.py src/utils/Stanford_Deps.py src/document/doc.py src/trees/parse_tree.py main DiscourseParser parse_args get_parser_stdout main get_output_filepath ParserException check_CRFSuite check_ssplit check_syntax_parser test_feng_fail parse_file test_feng_short test_feng_long CRFClassifier BaseRepresentation Constituent Dependency Document Sentence Token SegmenterFeatureWriter CRFTreeFeatureWriter LogWriter BaseParser IntraSententialParser MultiSententialParser Preprocesser create_lexicalized_tree get_parsed_trees_from_string replace_words SyntaxParser CRFSegmenter CRFTreeBuilder LexicalizedTree ParseTree get_concat_text get_one_ngram load_tree_from_string get_ngrams common_ancestor_list traverse_tree_with_offset concat_lists filter_syntactic_tag get_main_edus filter_lexical_head common_ancestor filter_ngrams get_PoS_list_from_span is_right_nucleus slice_text get_main_spans load_tree traverse_tree concat_2_lists extract_relations get_word_list_from_main_edus convert_tree get_word_list_from_span locate traverse_tree_path is_left_nucleus load_raw_tree loadData saveData parse Treebank TreebankScanner copy_subtree permutation_indices print_SGML_tree argsmax count_how_many split_mrg_by_sentence get_syntactic_subtrees make_new_subtree split_hilda_inputfile_by_sentence get_edu_entity_grid simplify_tree sorted_dict_values_by_key compute_edit_distance is_punctuation load_tree_from_file find_EDU_in_sentence_index replace_words simplified_tag get_sent_dependencies sorted_dict_keys unescape_penn_special_word feature_extractor readiter to_crfsuite apply_templates escape main output_features get_shape get_all_other observation contains_digit get_4d get_capperiod disjunctive contains_symbol get_2d contains_alpha feature_extractor contains_upper degenerate contains_lower b get_dand get_da get_type feature_extractor _swig_repr swig_import_helper _swig_setattr_nondynamic _swig_getattr StringList Attribute Tagger Trainer SwigPyIterator version _swig_setattr Item ItemSequence instances instances Trainer get_librarydir get_rootdir get_includedir join parse logging print unload strip readlines write LOGS_PATH filelist output_dir append DiscourseParser exists enumerate open add_option OptionParser exit print_help read close open basename stdout feng_main remove get_output_filepath exit __repr__ isfile parse_args communicate print extend split Popen enumerate unload SyntaxParser parse_sentence enumerate join print unload CRFSUITE_PATH CRFClassifier classify split parse_file parse_file join map compile escape append fromstring strip lexicalize copy walk extend join isinstance leaves startswith append lower range len append print items list max isinstance fn isinstance fn isinstance fn isinstance append read read extend get_main_edus get_word_list_from_span isinstance leaves extend isinstance isinstance leaves get_main_edus append range len append range leaves len join dump close open load join close open Treebank TreebankScanner append sorted keys list keys reverse append range len find append readlines strip split int group match append enumerate split range isinstance replace_words __deepcopy__ isinstance isinstance copy_subtree enumerate endswith read fromstring load_tree range len __deepcopy__ leaves ParseTree len extend leaves append range len append strip split min range len append apply_templates append join range len strip split append range len isinstance write escape range len isinstance escape Attribute append Item ItemSequence stdin model Tagger to_crfsuite add_option separator range readiter OptionParser tag output_features feature_extractor split len islower isdigit isupper discard len set islower range isupper isalpha isdigit isdigit isalnum get_capperiod get_shape get_2d islower get_all_other isupper isdigit contains_digit lower degenerate b get_dand get_da contains_upper get_4d contains_symbol contains_alpha contains_lower get_type append range observation disjunctive range len find_module load_module get get __repr__ delete_SwigPyIterator delete_Item delete_ItemSequence delete_StringList Attribute_value_get _swig_property Attribute_value_set delete_Attribute Attribute_attr_get Attribute_attr_set delete_Trainer delete_Tagger rfind float strip ItemSequence Attribute append Item split StringList
# feng-hirst-rst-parser [![Travis Build Status](https://travis-ci.org/arne-cl/feng-hirst-rst-parser.svg?branch=master)](https://travis-ci.org/arne-cl/feng-hirst-rst-parser) [![Docker Build Status](https://img.shields.io/docker/cloud/build/nlpbox/feng-hirst-rst-parser.svg)](https://hub.docker.com/r/nlpbox/feng-hirst-rst-parser) This repository contains my fork of the RST parser published by Vanessa Wei Feng and Graeme Hirst. I updated some of its dependencies, dockerized the application, added some end-to-end tests and changed its output format to make it simpler to parse (e.g. by [discoursegraphs](https://github.com/arne-cl/discoursegraphs) or the [rst-converter-service](https://[email protected]/NLPbox/rst-converter-service)). If you want to run the parser as a web service, have a look at [nlpbox/feng-hirst-service](https://github.com/nlpbox/feng-hirst-service).
1,432
ars-ashuha/sparse-vd-pytorch
['sparse learning']
['Variational Dropout Sparsifies Deep Neural Networks']
logger.py Logger
# Sparse Variational Dropout Sparse Variational Dropout a Minimal Working Example, Variational Dropout Sparsifies Deep Neural Networks (https://arxiv.org/pdf/1701.05369.pdf). <p align="center"> <img height="640" src="neurons.png"/> </p> The sample of neuron weights of shape 10x10 from the first layer. Original repo https://github.com/ars-ashuha/variational-dropout-sparsifies-dnn. # Citation If you found this code useful please the original paper ```
1,433
art-programmer/PlaneNet
['depth estimation']
['PlaneNet: Piece-wise Planar Reconstruction from a Single RGB Image']
code/RecordReaderRGBD.py code/train_group.py code/polls/models/move_to_origin.py train_planenet.py pytorch/options.py code/RecordWriterRGBD.py kaffe/caffe/__init__.py pool/models/move_to_origin.py layers.py code/polls/polls.py PlaneSetGeneration/layers.py utils.py code/train_finetuning.py code/kaffe/layers.py code/kaffe/transformers.py pytorch/tf/data_converter.py code/html.py code/script.py pytorch/models/drn.py code/planenet_layer.py code/RecordReader3D.py code/RecordWriterRGBD_backup.py kaffe/errors.py kaffe/caffe/resolver.py code/train_hybrid.py kaffe/graph.py nndistance/__init__.py pytorch/datasets/plane_dataset_scannet.py kaffe/transformers.py code/kaffe/__init__.py kaffe/tensorflow/transformer.py code/polls/models/obj2egg.py code/train_planenet_confidence.py PlaneSetGeneration/utils.py code/kaffe/errors.py pool/models/writeDisk.py planenet_inference.py code/kaffe/tensorflow/network.py code/kaffe/tensorflow/transformer.py code/RecordFilter.py RecordReaderAll.py code/compare.py pool/pool.py train_pixelwise.py modules.py code/kaffe/tensorflow/__init__.py pool/parts_scene.py code/train_planenet_layer.py kaffe/shapes.py pytorch/utils.py code/RecordWriterWithoutPlane.py code/planenet_group.py PlaneSetGeneration/modules.py pytorch/train_planenet.py code/cluster.py code/utils.py code/high_dim_filter_grad.py code/predict.py code/kaffe/shapes.py pytorch/datasets/scannet_scene.py PlaneSetGeneration/planenet.py pool/models/obj2egg.py code/layers.py crfasrnn/high_dim_filter_grad.py code/LayeredSceneDecomposition.py code/RecordSampler.py code/utils_backup.py kaffe/caffe/caffepb.py code/train_planenet_backup.py nndistance/tf_nndistance.py code/train_planenet_separate.py pool/obj2egg.py code/train_sample.py PlaneSetGeneration/RecordReader.py code/RecordReaderAll.py crfasrnn/crfasrnn_layer.py PlaneSetGeneration/train_planenet.py kaffe/layers.py code/RecordConverterRGBD.py kaffe/__init__.py crfasrnn/__init__.py predict.py pool/models/add_texture.py code/RecordWriter.py html.py train_finetuning.py planenet.py code/room_layout.py code/predict_custom.py code/tf_nndistance.py code/kaffe/caffe/caffepb.py code/kaffe/caffe/resolver.py kaffe/tensorflow/network.py pytorch/datasets/plane_dataset.py pytorch/tf/RecordReaderAll.py pytorch/models/planenet.py code/RecordWriter3D.py code/polls/plane_scene.py code/planenet.py code/RecordConverter.py code/kaffe/graph.py train_hybrid.py code/test_sampling.py code/crfasrnn_layer.py code/RecordReader.py pytorch/augmentation.py code/polls/main.py code/modules.py code/SegmentationRefinement.py pytorch/models/modules.py pool/plane_scene.py evaluate.py code/PlaneStatisticsGlobal.py code/evaluate_separate.py pytorch/tf/modules.py data_preparation/parse.py code/RecordReaderWithoutPlane.py PlaneSetGeneration/tf_nndistance.py kaffe/tensorflow/__init__.py code/train_pixelwise.py code/RecordChecker.py code/test_bernoulli.py code/RecordConverter3D.py code/train_planenet.py code/kaffe/caffe/__init__.py code/evaluate_depth.py code/CopyTexture.py code/evaluate.py writeHTML evaluateDepthPrediction gridSearch evaluatePlanes getResults plotResults getPrediction plotAll getGroundTruth XML XHTML HTML TestCase PlaneNormalLayer RangesLayer PlaneDepthLayer segmentationRefinementModuleBoundary findBoundaries meanfieldModuleBoundary calcMessages calcImageDiff gaussian segmentationRefinementModule planeMapModule meanfieldModuleLayer divideLayers findBoundaryModuleSmooth findBoundaryModule crfModule fitPlaneMasksModule meanfieldModule planeDepthsModule planeNormalsModule crfrnnModule findLocalPlanes depthToNormalModule PlaneNet PlaneNetDetector writeHTML evaluatePlanes getResults getPredictionScanNet getPredictionCustom RecordReaderAll build_graph writeInfo build_loss test main parse_args writeInfo build_loss test main parse_args build_graph writeInfo build_loss test main parse_args build_graph writeInfo build_loss test main parse_args _int64_feature writeRecordFile _bytes_feature _float_feature clusterPlanes writeHTML gridSearch evaluatePlanes getResults plotResults getPrediction plotAll getGroundTruth copyTextureTest copyTexture getResults findCornerPoints findFloorPlane CrfRnnLayer writeHTML evaluateDepthPrediction gridSearch evaluatePlanes getResults plotResults getPrediction plotAll getGroundTruth writeHTML evaluateDepthPrediction evaluatePlanePrediction getResults getPrediction getGroundTruth getPredictionHighRes getGroundTruthHighRes evaluateAll writeHTML evaluateDepthPrediction evaluatePlanePrediction getResults getPrediction getGroundTruth _high_dim_filter_grad XML XHTML HTML TestCase getLayerSwapProposals getProposals decompose getExpansionProposals drawSolution getConcaveHullProposal PlaneNormalLayer RangesLayer PlaneDepthLayer segmentationRefinementModuleBoundary findBoundaries meanfieldModuleBoundary calcMessages calcImageDiff gaussian segmentationRefinementModule planeMapModule meanfieldModuleLayer divideLayers findBoundaryModuleSmooth findBoundaryModule crfModule fitPlaneMasksModule meanfieldModule planeDepthsModule planeNormalsModule crfrnnModule findLocalPlanes depthToNormalModule PlaneNet PlaneNet PlaneNet PlaneStatistics evaluatePlanes writeHTML evaluateDepthPrediction evaluatePlanes getResults plotResults getPrediction plotAll getGroundTruth writeHTML evaluatePlanes getResults getPrediction _int64_feature writeRecordFile _bytes_feature _float_feature _int64_feature writeRecordFile _bytes_feature _float_feature _int64_feature writeRecordFile _bytes_feature _float_feature _int64_feature writeRecordFile _bytes_feature _float_feature RecordReader RecordReader3D RecordReaderAll RecordReaderRGBD loadImagePaths RecordReader _int64_feature writeRecordFile _bytes_feature _float_feature build_graph _int64_feature build_loss _bytes_feature _float_feature writeRecordFile _int64_feature writeExample _bytes_feature _float_feature writeRecordFile _int64_feature writeExample _bytes_feature _float_feature writeRecordFile _int64_feature writeExample _bytes_feature _float_feature loadImagePaths writeRecordFile _int64_feature writeExample readRecordFile _bytes_feature _float_feature loadImagePaths writeRecordFile testRoomLayout getResults getPrediction getGroundTruth testRoomNet failureCases resultsFigure refineSegmentation getSegmentationsTRWS findProposals removeSmallSegments build_split_apply_merge_model minus_1 plus_1 split_apply_merge REINFORCESimpleExample createModel buildLoss writeInfo build_loss test main parse_args writeInfo build_loss test main parse_args writeInfo build_loss test main parse_args build_graph writeInfo build_loss test main parse_args build_loss_rgbd build_graph fitPlanesRGBD build_loss test main predict parse_args build_loss_rgbd build_graph fitPlanesSceneNN fitPlanesRGBD build_loss test fitPlanesScanNet main predict parse_args build_graph fitPlanesRGBD writeInfo build_loss test main predict parse_args build_loss_rgbd build_graph build_loss_3d fitPlanesRGBD writeInfo build_loss test main predict parse_args build_graph writeInfo build_loss test main parse_args print_stderr KaffeError Graph Node GraphBuilder NodeMapper NodeKind NodeDispatchError NodeDispatch LayerAdapter shape_data shape_not_implemented get_filter_output_shape shape_concat shape_convolution shape_inner_product shape_scalar shape_pool get_strided_kernel_output_shape shape_mem_data shape_identity SubNodeFuser BatchNormScaleBiasFuser DataReshaper BatchNormPreprocessor ReLUFuser DataInjector ParameterNamer NodeRenamer ReductionParameter HingeLossParameter BlobProto BlobProtoVector NetStateRule LayerParameter PowerParameter FillerParameter ArgMaxParameter V0LayerParameter InnerProductParameter ConvolutionParameter SolverState EltwiseParameter LossParameter SliceParameter BatchNormParameter WindowDataParameter DummyDataParameter HDF5OutputParameter TanHParameter TransformationParameter SoftmaxParameter ConcatParameter DataParameter SPPParameter ParamSpec EmbedParameter SolverParameter InputParameter MVNParameter ContrastiveLossParameter NetState NetParameter BiasParameter CropParameter DropoutParameter PoolingParameter Datum SigmoidParameter BlobShape ExpParameter AccuracyParameter LogParameter ThresholdParameter TileParameter MemoryDataParameter LRNParameter ReLUParameter ImageDataParameter ELUParameter ReshapeParameter InfogainLossParameter ScaleParameter V1LayerParameter HDF5DataParameter PReLUParameter FlattenParameter PythonParameter show_fallback_warning CaffeResolver has_pycaffe get_caffe_resolver layer Network MaybeActivated TensorFlowNode get_padding_type TensorFlowEmitter TensorFlowTransformer TensorFlowMapper BallInMazeDemo calcDistance calcLineDim PlaneScene BallInMazeDemo ints ObjMaterial MtlFile ObjFile pathify floats main CrfRnnLayer _high_dim_filter_grad mergePlanesNew loadClassMap ColorPalette fitPlane writePointCloudFace readMesh print_stderr KaffeError Graph Node GraphBuilder NodeMapper NodeKind NodeDispatchError NodeDispatch LayerAdapter shape_data shape_not_implemented get_filter_output_shape shape_concat shape_convolution shape_inner_product shape_scalar shape_pool get_strided_kernel_output_shape shape_mem_data shape_identity SubNodeFuser BatchNormScaleBiasFuser DataReshaper BatchNormPreprocessor ReLUFuser DataInjector ParameterNamer NodeRenamer ReductionParameter HingeLossParameter BlobProto BlobProtoVector NetStateRule LayerParameter PowerParameter FillerParameter ArgMaxParameter V0LayerParameter InnerProductParameter ConvolutionParameter SolverState EltwiseParameter LossParameter SliceParameter BatchNormParameter WindowDataParameter DummyDataParameter HDF5OutputParameter TanHParameter TransformationParameter SoftmaxParameter ConcatParameter DataParameter SPPParameter ParamSpec EmbedParameter SolverParameter InputParameter MVNParameter ContrastiveLossParameter NetState NetParameter BiasParameter CropParameter DropoutParameter PoolingParameter Datum SigmoidParameter BlobShape ExpParameter AccuracyParameter LogParameter ThresholdParameter TileParameter MemoryDataParameter LRNParameter ReLUParameter ImageDataParameter ELUParameter ReshapeParameter InfogainLossParameter ScaleParameter V1LayerParameter HDF5DataParameter PReLUParameter FlattenParameter PythonParameter show_fallback_warning CaffeResolver has_pycaffe get_caffe_resolver layer Network MaybeActivated TensorFlowNode get_padding_type TensorFlowEmitter TensorFlowTransformer TensorFlowMapper PlaneNormalLayer RangesLayer PlaneDepthLayer meanfieldModuleBoundary planeMapModule planeNormalsModule gaussian segmentationRefinementModule fitPlaneMasksModule findLocalPlanes segmentationRefinementModuleBoundary meanfieldModule findBoundaries planeDepthsModule meanfieldModuleLayer planeFittingModule PlaneNet RecordReader build_graph build_loss test findBadImages main predict parse_args ints ObjMaterial MtlFile ObjFile pathify floats main calcDistance PartsScene calcLineDim calcDistance calcLineDim PlaneScene PoolBallGame ints ObjMaterial MtlFile ObjFile pathify floats main cropPatch horizontalFlip parse_args main testOneEpoch visualizeBatchPlanes calcPlaneDepths one_hot metadataToIntrinsics drawDepthImage ColorPalette drawSegmentationImage sigmoid softmax fitPlane PlaneDataset PlaneDatasetScanNet ScanNetScene drn_d_54 drn_c_58 drn_d_38 drn_c_26 Bottleneck drn_d_105 drn_d_22 conv3x3 DRN drn_c_42 BasicBlock oneHotModule warpImages calcPlaneDepthsModule assignmentModule calcAssignment PyramidModule ConvBlock calcDepthModule PlaneNet _int64_feature writeRecordFile _bytes_feature _float_feature segmentationRefinementModuleBoundary findBoundaries meanfieldModuleBoundary calcMessages calcImageDiff gaussian segmentationRefinementModule planeMapModule meanfieldModuleLayer divideLayers findBoundaryModuleSmooth findBoundaryModule crfModule fitPlaneMasksModule meanfieldModule planeDepthsModule planeNormalsModule crfrnnModule findLocalPlanes depthToNormalModule RecordReaderAll br str td table p img test_dir write numImages close open titles tr range HTML enumerate visualizeImages calcPlaneDepths imwrite concat fitPlanesPiecewise fitPlanesNYU save argmax writeHTML str list result_filename calcImageDiff test_dir fitPlanesManhattan transpose squeeze placeholder plotResults shape array append sum range concatenate group getResults drawSegmentationImage numImages plotAll softmax drawMaskImage removeSmallSegments drawNormalImage ConfigProto zeros local_variables_initializer enumerate items norm methods print reshape system drawDepthImage float32 maximum sigmoid meanfieldModule planeDepthsModule refineSegmentation global_variables_initializer numOutputPlanes load items list concatenate glob test_dir print len plotResults shape range enumerate calcPlaneDepths replace evaluatePlanePrediction test_dir plotCurvesSplit tolist titles zeros numOutputPlanes range append enumerate visualizeImages calcPlaneDepths imwrite fitPlanesPiecewise fitPlanesNYU reset_default_graph exists result_filename list str test_dir fitPlanesManhattan squeeze exit placeholder titles append sum range evaluatePlanePrediction concatenate group numImages drawSegmentationImage mean drawMaskImage drawNormalImage ConfigProto local_variables_initializer enumerate load items methods print drawDepthImage float32 global_variables_initializer visualizeImages calcPlaneDepths imwrite concat save argmax numOutputPlanes result_filename list str calcImageDiff test_dir ones transpose squeeze placeholder shape fitPlanesSegmentation titles append range concatenate group getResults drawSegmentationImage numImages evaluateDepths softmax removeSmallSegments drawMaskImage ConfigProto zeros local_variables_initializer enumerate load items norm fitPlanes methods print reshape system drawDepthImage float32 maximum sigmoid meanfieldModule planeDepthsModule refineSegmentation global_variables_initializer array load result_filename deepcopy int checkpoint_dir methods print insert getPrediction getGroundTruth save append exists enumerate RecordReaderAll constant string_input_producer build_graph global_variables global_variables_initializer group ConfigProto reset_default_graph bool local_variables_initializer getBatch RecordReaderAll constant string_input_producer group ConfigProto global_variables_initializer bool local_variables_initializer getBatch split set split set set norm print reshape transpose maximum dot repeat norm reshape transpose maximum repeat reciprocal norm reshape multiply matmul sign div stack clip_by_value tile negative abs range negative div norm clip_by_value meshgrid exp arange concat clip_by_value argmax max log exp gaussian depthwise_conv2d reduce_sum cast append range one_hot softmax tile int constant reshape min maximum float32 zeros constant reshape gaussian depthwise_conv2d reduce_sum pow reduce_mean tile int exp constant one_hot reshape concat gaussian depthwise_conv2d reduce_sum softmax clip_by_value tile argmax log pow meanfieldModule range int constant exp reshape gaussian depthwise_conv2d reduce_sum pad softmax clip_by_value tile abs log meanfieldModuleBoundary range reduce_sum concat reduce_max argmax resize_bilinear transpose logical_and reduce_sum cast append less expand_dims range one_hot slice stack tile float minimum int reshape maximum float32 greater pow int norm greater_equal ones reshape transpose sparse_to_dense logical_and shape stack unstack clip_by_value append less expand_dims range concat reduce_max div clip_by_value abs argmax logical_and matmul cast less expand_dims range one_hot tile negative norm reshape greater float32 max_pool int norm constant reshape concat gaussian depthwise_conv2d reduce_sum pad stack unstack tile zeros expand_dims range concat cos deg2rad abs gaussian logical_and depthwise_conv2d less_equal pad cast sqrt tile norm constant reshape greater float32 max_pool logical_or constant reshape concat gaussian depthwise_conv2d greater max_pool float32 pad cast abs int reshape transpose concat meanfieldModule planeDepthsModule range top_k transpose logical_and reduce_sum matmul cast less expand_dims range one_hot tile int reshape greater float32 logical_or reduce_mean planeDepthsModule bool minimum int reshape reduce_sum pow load int load_op_library Variable ones transpose reshape astype float32 matmul concat realpath softmax dirname high_dim_filter range exists append startIndex startIndex tuple copyLogoVideo copyLogo exit glob writeGridImage astype textureImageFilename stack full uint8 min copyWallTexture addRulerComplete len exit getPredictionScanNet getPredictionCustom RecordReaderAll constant string_input_producer build_graph global_variables global_variables_initializer group ConfigProto reset_default_graph bool local_variables_initializer getBatch visualizeImages constant build_graph global_variables_initializer global_variables customImageFolder glob min group float32 placeholder ConfigProto reset_default_graph bool local_variables_initializer len checkpoint_dir build_graph build_loss Saver string_input_producer test_dir merge_all placeholder append range getBatch LR group ConfigProto rootFolder local_variables_initializer RecordReaderAll minimize system AdamOptimizer global_variables_initializer bool scalar RecordReaderAll constant string_input_producer bool build_graph test_dir global_variables build_loss system ConfigProto group global_variables_initializer dataset local_variables_initializer getBatch append tolist plotCurves rstrip replace keyname add_argument ArgumentParser append dataset rootFolder TRAINABLE_VARIABLES get_collection print dataFolder RecordReaderAll string_input_producer TFRecordWriter group global_variables_initializer local_variables_initializer getBatch RecordReaderAll constant string_input_producer build_graph global_variables global_variables_initializer group ConfigProto reset_default_graph bool local_variables_initializer getBatch one_hot concatenate plotCurvesSimple array save argmax numOutputPlanes rootFolder build_graph_sample enumerate norm concatenate reshape min stack repeat array expand_dims max load str findHomography calcPlaneDepths imwrite warpPerspective findCornerPoints array resize imread argmax findFloorPlane range imwrite save resize findFloorPlane result_filename str test_dir findCornerPoints imread range glob getResults copy numImages warpPerspective enumerate load print findHomography array constant build_graph global_variables test_dir glob build_graph_sample system float32 placeholder ConfigProto group global_variables_initializer bool local_variables_initializer visualizeImages calcPlaneDepths imwrite evaluatePlaneSegmentation concat save numOutputPlanes str list writeHTML calcImageDiff test_dir transpose squeeze tolist placeholder shape fitPlanesSegmentation width titles append range height replace plotCurves concatenate group getResults drawSegmentationImage numImages softmax removeSmallSegments drawMaskImage drawNormalImage ConfigProto zeros local_variables_initializer enumerate load items norm fitPlanes methods print reshape system drawDepthImage float32 maximum sigmoid meanfieldModule planeDepthsModule refineSegmentation global_variables_initializer array fitPlanesNYU abs writeHTML fitPlanesManhattan exit width height calcPlaneNormals drawNormalImage str replace getPredictionHighRes getGroundTruthHighRes hybrid print RecordReaderAll constant string_input_producer build_graph global_variables global_variables_initializer print RecordReaderMake3D group ConfigProto reset_default_graph bool local_variables_initializer getBatch RecordReaderAll constant string_input_producer RecordReaderMake3D group ConfigProto global_variables_initializer bool zeros local_variables_initializer getBatch checkpoint_dir build_graph reset_default_graph str string_input_producer global_variables test_dir getBatch replace global_variables_initializer group ConfigProto rootFolder local_variables_initializer RecordReaderAll constant print RecordReaderMake3D system hybrid bool useCRF camera getCameraFromInfo round imread imageIndex useSemantics astype useCRF camera round imread imageIndex useSemantics astype RecordReaderRGBD RecordReader int32 RecordReader3D RecordReaderRGBD cos deg2rad RecordReader int32 fitPlaneMasksModule RecordReader3D high_dim_filter str imwrite range drawSegmentationImage random logical_not floor argmin len append sum range copy choice stack enumerate int print min pow logical_or zeros array split print fill argmin delete copy choice enumerate stack argwhere unique getConcaveHullProposal randint range append split copy choice stack unique append randint range split full calcPlaneDepths arange imwrite getProposals cos deg2rad logical_not readProposalInfo getCameraFromInfo drawSolution save abs tensordot str exp logical_and append expand_dims sum range concatenate astype copy inference_ogm drawSegmentationImage stack tile drawMaskImage enumerate load norm print reshape min float32 maximum pow logical_or repeat zeros full len max reciprocal ones replace float dot repeat randint writePLYFile resize plotCurves startIndex addCharacter glob float32 placeholder concat findBoundaryModule RecordReader fitPlaneMasksModule argmax reshape less float32 findBoundaryModuleSmooth cast RecordReader3D range equal RecordReaderRGBD zeros ConfigProto str append one_hot sigmoid_cross_entropy_with_logits reshape transpose float32 matmul reduce_mean cast nn_distance enumerate build_graph global_variables slice build_loss placeholder bool imwrite resize str list ones transformPlanes transpose tolist exit SerializeToString matmul shape Example append imread sum range concatenate astype drawSegmentationImage copy unique drawMaskImage zip dilate enumerate load uint8 print inv write float32 drawDepthImage maximum tostring int32 zeros array len print close writeExample enumerate bool norm int value print reshape fromstring exit Example ParseFromString shape tf_record_iterator load test_dir glob print squeeze transpose shape save append zeros loadmat cos deg2rad test_dir concatenate numImages zeros min system getGroundTruth len calcEdgeMap imwrite cos deg2rad resize numOutputPlanes str list test_dir ones argmin exit shape append imread sum range astype numImages drawSegmentationImage stack drawMaskImage full float enumerate load combinations uint8 print reshape min drawDepthImage getGroundTruth array len imwrite arange cos deg2rad floor max str test_dir ones logical_and exit shape imread sum range numImages drawSegmentationImage full label float enumerate load int norm print reshape min getGroundTruth array len str system int str system int stack pad append max range exp concatenate print reshape astype float32 maximum inference_ogm readProposalInfo findProposals stack append expand_dims abs clip arange cos readProposalInfo getCameraFromInfo argmax abs tensordot exp append sum range concatenate astype inference_ogm stack norm reshape maximum float32 pow repeat arange logical_not getCameraFromInfo abs max tensordot ones argmin logical_and append range concatenate astype stack fitPlane dilate label enumerate norm reshape float32 maximum pow repeat erode bool array len dynamic_partition range len StochasticTensor constant reshape surrogate_loss square matmul split_apply_merge reduce_sum Categorical get_variable fully_connected StochasticTensor Bernoulli reduce_min surrogate_loss float32 sigmoid reduce_mean cast expand_dims abs RecordReaderMake3D RecordReaderRGBD RecordReader less build_loss_rgbd FileWriter cond log_dir int32 RecordReaderRGBD RecordReader int32 build_loss_rgbd join constant build_graph global_variables test_dir glob system float32 placeholder group int32 global_variables_initializer ConfigProto local_variables_initializer checkpoint_dir string_input_producer RecordReaderRGBD test_dir system exit ConfigProto group global_variables_initializer writeHTMLRGBD local_variables_initializer getBatch writeHTMLPlane str fitPlanes imwrite test_dir ones system float32 astype drawDepthImage drawSegmentationImage evaluateDepths shape resize append imread array range str imwrite test_dir ones system float32 astype drawDepthImage drawSegmentationImage evaluateDepths shape fitPlanesSegmentation resize append imread array range build_loss_3d RecordReader3D build_loss_3d RecordReader3D write pad_h stride_w pad_w kernel_h kernel_w float stride_h height hasattr get_filter_output_shape kernel_parameters output_shape parameters width output_shape parameters list parents axis output_shape output_shape CaffeResolver write ceil float height width replace print lower isfile split getopt argv toEgg GlobPattern recomputeTangentBinormal recomputePolygonNormals TConvex writeEgg Filename TPolygon ObjFile recomputeVertexNormals splitext removeUnusedVertices float triangulatePolygons abs list sorted ones matmul append range concatenate insert set mean fitPlane zip enumerate norm print maximum dot bool len arange delete logical_not save abs open list ones tolist exit matmul shape append sum range getColorMap concatenate astype set stack fitPlane zip full writePointCloudFace enumerate load read remove loadClassMap norm mergePlanesNew print reshape min maximum int32 zeros array len l2_normalize cumsum float64 concat reduce_max boolean_mask cos deg2rad top_k clip_by_value abs round argmax tensordot count_nonzero space_to_depth squeeze gaussian transpose depthwise_conv2d reduce_sum matmul logical_and less_equal int64 resize_bilinear cast planeMapModule append less expand_dims range erosion2d greater_equal unique_with_counts one_hot slice stack tile dilation2d float equal minimum int norm constant reshape float32 greater max_pool maximum pow logical_or avg_pool int32 eye zeros findLocalPlanes ones placeholder zeros string_input_producer build_graph global_variables build_loss system ConfigProto placeholder group RecordReader global_variables_initializer bool local_variables_initializer getBatch bool useCRF str float resize model zero_grad DataLoader unsqueeze set_description numpy save cuda max outputHeight str view PlaneDataset transpose exit Adam matmul MSELoss calcPlaneDepthsModule shape numEpochs load_state_dict sum PlaneNet CrossEntropyLoss cat state_dict segmentation_criterion visualizeBatchPlanes mean stack testOneEpoch outputWidth enumerate load norm backward tqdm parameters pow plane_criterion train step calcDepthModule model DataLoader unsqueeze set_description max outputHeight view transpose calcPlaneDepthsModule exit matmul shape append sum cat segmentation_criterion visualizeBatchPlanes mean eval stack float outputWidth enumerate norm print tqdm pow plane_criterion train numpy str uint8 imwrite replace test_dir transpose astype drawDepthImage drawSegmentationImage range len getColorMap concatenate astype int32 argmax max applyColorMap uint8 astype exp max reshape list zeros shape zeros norm transpose maximum dot stack repeat clip load_url DRN load_state_dict load_url DRN load_state_dict load_url DRN load_state_dict load_url DRN load_state_dict load_url DRN load_state_dict update load_url DRN load_state_dict state_dict load_url DRN load_state_dict norm clamp stack repeat unsqueeze float sum calcPlaneDepthsModule cat calcAssignment numpy int list append zeros argmax full range unsqueeze cuda view scatter_ height grid_sample print calcPlaneDepthsModule exit matmul stack unsqueeze numNeighborImages width append numOutputPlanes range cat
# PlaneNet: Piece-wise Planar Reconstruction from a Single RGB Image By Chen Liu, Jimei Yang, Duygu Ceylan, Ersin Yumer, and Yasutaka Furukawa ## Introduction This paper presents the first end-to-end neural architecture for piece-wise planar reconstruction from a single RGB image. The proposed network, PlaneNet, learns to directly infer a set of plane parameters and corresponding plane segmentation masks. For more details, please refer to our CVPR 2018 [paper](http://art-programmer.github.io/planenet/paper.pdf) or visit our [project website](http://art-programmer.github.io/planenet.html). ## Updates We developed a better technique, PlaneRCNN, for piece-wise planar detection as described in our recent arXiv [paper](https://arxiv.org/abs/1812.04072). Unfortunately, we cannot release the code and data yet. We add script for extracting plane information from the original ScanNet dataset and rendering 3D planar segmentation results to 2D views. Please see the README in folder *data_preparation/* for details. Note that we made some modifications to the heuristic-heavy plane fitting algorithms when cleaning up the messy codes developed over time. So the plane fitting results will be slightly different with the training data we used (provided in the *.tfrecords* files). PyTorch training and testing codes are available now (still experimental and without the CRF module). ## Dependencies Python 2.7, TensorFlow (>= 1.3), numpy, opencv 3.
1,434
artest08/LateTemporalModeling3DCNN
['action recognition']
['Late Temporal Modeling in 3D CNN Architectures with BERT for Action Recognition']
models/NLB/__init__.py models/SlowFast/slowfast/models/nonlocal_helper.py scripts/eval/combined_demo.py models/BERT/utils/sublayer.py models/SlowFast/slowfast/utils/ava_evaluation/np_box_list.py models/SlowFast/slowfast/utils/ava_evaluation/np_box_mask_list.py models/SlowFast/slowfast/utils/ava_evaluation/np_box_ops.py models/TSM/temporal_shift.py models/r2plus1d/__init__.py models/BERT/transformer.py models/SlowFast/slowfast/config/defaults.py models/SlowFast/slowfast/utils/meters.py models/SlowFast/slowfast/utils/parser.py models/non_local/utils/gtransforms.py models/SlowFast/slowfast/utils/__init__.py models/SlowFast/slowfast/utils/lr_policy.py models/SlowFast/tools/run_net.py models/SlowFast/slowfast/utils/ava_evaluation/np_box_mask_list_ops.py models/TSM/__init__.py models/SlowFast/__init__.py utils/model_path.py models/SlowFast/slowfast/utils/env.py models/BERT/attention/single.py models/SlowFast/slowfast/datasets/video_container.py models/BERT/utils/feed_forward.py models/SlowFast/slowfast/config/custom_config.py two_stream_bert2.py models/r2plus1d/resnet.py models/SlowFast/slowfast/datasets/cv2_transform.py video_transforms.py models/SlowFast/slowfast_connector.py models/SlowFast/slowfast/utils/misc.py models/SlowFast/slowfast/models/batchnorm_helper.py models/non_local/utils/convert_weights.py models/SlowFast/slowfast/utils/checkpoint.py models/rgb_resneXt3D.py models/SlowFast/slowfast/config/__init__.py opt/__init__.py models/BERT/embedding/token.py models/BERT/language_model.py models/SlowFast/slowfast/datasets/ssv2.py models/BERT/embedding/position.py models/non_local/data/kinetics.py scripts/eval/spatial_demo_bert.py models/BERT/utils/gelu.py models/non_local/utils/layer_by_layer.py models/non_local/models/resnet_helper.py models/SlowFast/slowfast/datasets/charades.py models/__init__.py models/SlowFast/slowfast/datasets/__init__.py models/SlowFast/tools/train_net.py models/SlowFast/tools/benchmark.py datasets/ucf101.py models/SlowFast/setup.py models/BERT/attention/multi_head.py scripts/eval/spatial_demo3D.py models/SlowFast/slowfast/models/losses.py models/SlowFast/slowfast/utils/ava_evaluation/np_mask_ops.py models/non_local/models/model_builder_video.py models/representation_flow/rep_flow_layer.py models/SlowFast/slowfast/utils/ava_evaluation/np_box_list_ops.py models/rgb_slowfast.py models/SlowFast/slowfast/models/optimizer.py models/non_local/models/resnet_video_org.py utils/video_transforms.py models/SlowFast/slowfast/utils/ava_evaluation/standard_fields.py models/SlowFast/slowfast/models/resnet_helper.py models/SlowFast/slowfast/utils/metrics.py models/SlowFast/slowfast/datasets/ava_dataset.py models/SlowFast/slowfast/__init__.py models/SlowFast/slowfast/utils/ava_eval_helper.py models/SlowFast/slowfast/utils/logging.py models/SlowFast/slowfast/models/build.py two_stream2.py models/SlowFast/slowfast/utils/ava_evaluation/metrics.py datasets/hmdb51.py models/SlowFast/slowfast/datasets/utils.py models/SlowFast/slowfast/utils/distributed.py models/representation_flow/kinetics_2p1d_model.py models/BERT/__init__.py models/SlowFast/slowfast/utils/ava_evaluation/label_map_util.py models/BERT/utils/layer_norm.py models/SlowFast/slowfast/utils/c2_model_loading.py models/SlowFast/slowfast/datasets/ava_helper.py models/rgb_I3D.py datasets/__init__.py utils/architecture_transform.py scripts/eval/VideoSpatialPrediction3D_bert.py models/SlowFast/slowfast/utils/multiprocessing.py models/non_local/models/resnet.py models/SlowFast/slowfast/datasets/kinetics.py models/non_local/eval.py models/BERT/embedding/__init__.py models/representation_flow/__init__.py models/TSM/non_local.py models/non_local/utils/util.py models/SlowFast/slowfast/utils/ava_evaluation/object_detection_evaluation.py opt/AdamW.py models/SlowFast/slowfast/models/stem_helper.py models/SlowFast/slowfast/models/head_helper.py models/rgb_r2plus1d.py models/BERT/utils/__init__.py models/poseNet/poseNet.py models/SlowFast/slowfast/models/custom_video_model_builder.py models/SlowFast/tools/test_net.py models/NLB/NLBlockND.py models/SlowFast/slowfast/utils/weight_init_helper.py models/SlowFast/slowfast/datasets/loader.py models/SlowFast/slowfast/utils/bn_helper.py models/poseNet/__init__.py models/SlowFast/slowfast/utils/ava_evaluation/per_image_evaluation.py scripts/eval/VideoSpatialPrediction3D.py models/SlowFast/slowfast/utils/benchmark.py models/non_local/utils/train.py models/SlowFast/slowfast/datasets/build.py models/BERT/embedding/segment.py models/SlowFast/slowfast/models/__init__.py models/SlowFast/slowfast_connector_deneme.py models/BERT/attention/__init__.py models/SlowFast/slowfast/datasets/decoder.py models/BERT/embedding/bert.py models/non_local/models/nonlocal_helper.py models/SlowFast/slowfast/datasets/transform.py models/BERT/bert.py models/SlowFast/slowfast/models/video_model_builder.py validate build_model build_model_validate AverageMeter accuracy save_checkpoint build_model_continue main train validate build_model build_model_validate adjust_learning_rate2 adjust_learning_rate3 AverageMeter adjust_learning_rate4 accuracy save_checkpoint adjust_learning_rate build_model_continue main train CenterCrop ToTensor ToTensorPose pose_one_hot_decoding2 MultiScaleFixedCrop Reset ToTensor3 ToTensor2 RandomHorizontalFlip pose_one_hot_decoding Lambda Compose Normalize3 RandomVerticalFlip Normalize MultiScaleCrop Normalize2 DeNormalize RandomSizedCrop Scale rawPoseAugmentation find_classes ReadSegmentFlow make_dataset ReadSegmentRGB hmdb51 find_classes ReadSegmentFlow make_dataset ucf101 ReadSegmentRGB flow_I3D64f_bert2_FRMB InceptionI3d MaxPool3dSamePadding rgb_I3D64f_bert2 rgb_I3D64f_bert2_FRMB _inception_flow flow_I3D64f_bert2_FRAB flow_I3D64f_bert2 flow_I3D64f rgb_resnet50I3D64f rgb_I3D64f_bert2_FRAB InceptionModule _inception Unit3D rgb_I3D64f rgb_r2plus1d_64f_34_bert10 rgb_r2plus1d_32f_34_bert10 rgb_r2plus1d_32f_34 _trained_resnext101 get_fine_tuning_parameters rgb_resneXt3D64f101_concatenation rgb_resneXt3D64f101 flow_resneXt3D64f101_bert10_FRAB flow_resneXt3D64f101 ResNeXtBottleneck rgb_resneXt3D64f101_adamw resnext3D101 downsample_basic_block rgb_resneXt3D64f101_FRMB_lstm rgb_resneXt3D64f101_bert10_FRAB resnext3D50 flow_mars_resnext3D64f101 rgb_resneXt3D64f101_FRMB_NLB_concatenation ResNeXt conv3x3x3 _trained_resnext101_flow rgb_resneXt3D64f101_bert10_FRMB flow_resneXt3D64f101_bert10_FRMB rgb_resneXt3D64f101_FRMB_adamw resnext3D152 rgb_mars_resnext3D64f101 rgb_slowfast64f_50_bert10_FRAB_early rgb_slowfast64f_50 rgb_slowfast64f_50_bert10_FRMB_early Bottleneck rgb_slowfast64f_50_bert10_FRMB_late rgb_slowfast64f_50_bert10_FRAB_late BERT BERT3 BERT6 BERT7 BERT4 BERT5_BOTH BERT5 BERT2 MaskedLanguageModel BERTLM NextSentencePrediction TransformerBlock TransformerBlock2 MultiHeadedAttention MultiHeadedAttention2 Attention Attention2 BERTEmbedding4 BERTEmbedding BERTEmbedding3 BERTEmbedding2 LearnedPositionalEmbedding3 LearnedPositionalEmbedding2 PositionalEmbedding LearnedPositionalEmbedding SegmentEmbedding TokenEmbedding PositionwiseFeedForward GELU LayerNorm SublayerConnection2 SublayerConnection NLBlockND test KineticsMultiCrop parse_annotations Kinetics ModelBuilder add_nonlocal spacetime_nonlocal FrozenBN I3Res50 Bottleneck I2Res50 i3_res50_nl freeze_bn i3_res50 I3Res50_8x8 NonLocalBlock res_stage_nonlocal _add_shortcut_3d _generic_residual_block_3d bottleneck_transformation_3d create_model GroupResize GroupCenterCrop GroupNormalize ToTensor LoopPad GroupRandomHorizontalFlip GroupRandomCrop hook train save clip_transform accuracy batch_cuda kinetics_mean_std L1Part OpenPose openPose concatLayer openPoseL2Part VGG L2Part stage r3d_18 Conv3DNoTemporal Bottleneck mc3_18 BasicBlock flow_r2plus1d_34_32_ig65m R2Plus1dStem r2plus1d_34_8_kinetics r2plus1d_34_8_ig65m r2plus1d_34 _video_resnet Conv2Plus1D r2plus1d_34_32_kinetics flow_r2plus1d_34 Conv3DSimple r2plus1d_18 r2plus1d_34_32_ig65m BasicStem VideoResNet ResNet3D resnet_3d_v1 Bottleneck3D Block3D SamePadding resnet_50_rep_flow FlowLayer slowfast_50 create_args add_custom_config get_cfg _assert_and_infer_cfg Ava get_keyframe_data load_boxes_and_labels load_image_lists get_num_boxes_used build_dataset Charades horizontal_flip_list random_scale_jitter saturation color_jitter flip_boxes lighting_list horizontal_flip random_sized_crop_list CHW2HWC spatial_shift_crop_list center_crop random_crop_list contrast color_jitter_list brightness crop_boxes scale lighting pad_image scale_boxes contrast_list grayscale random_sized_crop saturation_list color_normalization random_short_side_scale_jitter_list clip_boxes_to_image blend HWC2CHW brightness_list random_scale_jitter_list decode torchvision_decode get_start_end_idx pyav_decode_stream pyav_decode temporal_sampling Kinetics construct_loader shuffle_dataset detection_collate Ssv2 grayscale random_crop uniform_crop color_normalization horizontal_flip random_short_side_scale_jitter lighting_jitter crop_boxes clip_boxes_to_image blend color_jitter contrast_jitter saturation_jitter brightness_jitter load_image_lists pack_pathway_output get_sequence aggregate_labels spatial_sampling convert_to_video_level_labels tensor_normalize retry_load_images as_binary_vector get_video_container SubBatchNorm3d NaiveSyncBatchNorm3d GroupGather get_norm build_model ResNetBasicHead get_loss_func Nonlocal construct_optimizer set_lr get_epoch_lr BasicTransform ResBlock get_trans_func BottleneckTransform ResStage VideoModelStem ResNetBasicStem ResNet FuseFastToSlow SlowFast write_results evaluate_ava_from_files get_ava_eval_data read_exclusions run_evaluation make_image_key read_labelmap read_csv evaluate_ava benchmark_data_loading compute_and_update_bn_stats get_name_convert_func make_checkpoint_dir get_path_to_checkpoint load_checkpoint is_checkpoint_epoch get_last_checkpoint get_checkpoint_dir save_checkpoint has_checkpoint inflate_weight is_master_proc get_local_size init_process_group synchronize all_gather_unaligned init_distributed_training get_world_size get_local_rank all_reduce _get_global_gloo_group all_gather get_rank _serialize_to_tensor _pad_to_largest_tensor setup_environment log_json_stats _suppress_print get_logger setup_logging get_lr_at_epoch lr_func_steps_with_relative_lrs lr_func_cosine get_lr_func get_step_index ScalarMeter TrainMeter get_ava_mini_groundtruth AVAMeter get_map TestMeter ValMeter topk_accuracies topk_errors topks_correct cpu_mem_usage params_count check_nan_losses frozen_bn_stats plot_input is_eval_epoch aggregate_split_bn_stats gpu_mem_usage log_model_info get_flop_stats run parse_args load_config init_weights create_category_index_from_labelmap create_category_index create_class_agnostic_category_index get_max_label_map_index _validate_label_map get_label_map_dict convert_label_map_to_categories load_labelmap compute_average_precision compute_cor_loc compute_precision_recall BoxList multi_class_non_max_suppression sort_by_field iou clip_to_window _update_valid_indices_by_removing_high_iou_boxes concatenate filter_scores_greater_than _copy_extra_fields area SortOrder ioa change_coordinate_frame prune_non_overlapping_boxes prune_outside_window scale intersection gather non_max_suppression BoxMaskList multi_class_non_max_suppression sort_by_field iou concatenate filter_scores_greater_than area ioa box_list_to_box_mask_list intersection gather prune_non_overlapping_masks non_max_suppression iou area ioa intersection iou area ioa intersection OpenImagesDetectionEvaluator WeightedPascalDetectionEvaluator DetectionEvaluator ObjectDetectionEvaluator ObjectDetectionEvaluation WeightedPascalInstanceSegmentationEvaluator PascalInstanceSegmentationEvaluator PascalDetectionEvaluator PerImageEvaluation DetectionResultFields BoxListFields InputDataFields TfExampleFields main main perform_test test calculate_and_update_precise_bn train train_epoch eval_epoch _NonLocalBlockND make_non_local NONLocalBlock2D NONLocalBlock3D NONLocalBlock1D NL3DWrapper TemporalShift TemporalPool make_temporal_shift make_temporal_pool InplaceShift AdamW main buildModel main buildModel main buildModel VideoSpatialPrediction3D VideoSpatialPrediction3D_bert determine_architecture_transform2 determine_architecture_transform rgb_3d_model_path_selection CenterCrop ToTensor ToTensorPose pose_one_hot_decoding2 MultiScaleFixedCrop Reset ToTensor3 ToTensor2 RandomHorizontalFlip pose_one_hot_decoding Lambda Compose Normalize3 RandomVerticalFlip Normalize MultiScaleCrop Normalize2 DeNormalize RandomSizedCrop Scale rawPoseAugmentation validate SGD num_seg DataLoader ReduceLROnPlateau save_checkpoint modules arch export_scalars_to_json dataset cuda max str contine settings len half epochs parse_args range SummaryWriter format build_model param_groups Compose close Normalize build_model_continue float int join evaluate isinstance print build_model_validate AdamW add_scalar min parameters split BatchNorm2d train step makedirs join str print rgb_3d_model_path_selection DataParallel arch dataset cuda split load str join print DataParallel eval load_state_dict arch dataset cuda split load str join print SGD parameters load_state_dict arch dataset cuda split data iter_size model zero_grad cuda view transpose half update format avg enumerate time criterion backward print AverageMeter accuracy step add_scalar eval AverageMeter time copyfile join save topk size t eq mul_ expand_as append sum max AdamW DataParallel print param_groups sum lr print param_groups lr print param_groups lr print param_groups lr sort print exit COLOR_BGR2RGB print concatenate exit IMREAD_COLOR IMREAD_GRAYSCALE resize append INTER_LINEAR imread range cvtColor len concatenate print exit IMREAD_COLOR expand_dims IMREAD_GRAYSCALE resize append INTER_LINEAR imread range len load InceptionI3d load_state_dict load InceptionI3d load_state_dict data isinstance FloatTensor Variable zero_ avg_pool3d cuda cat append format range named_parameters ResNeXt ResNeXt ResNeXt Conv3d ResNeXt load update ResNeXt load_state_dict state_dict load update Conv3d ResNeXt load_state_dict state_dict ResNeXt items list defaultdict join zip print batch_cuda accuracy add eval item sum net values enumerate parse save Softmax Reshape ConstantFill MaxPool SpatialBN Scale ConvNd BatchMatMul Sum spacetime_nonlocal data eps FrozenBN num_features set_params setattr named_children dir momentum running_mean getattr running_var load I3Res50 load_state_dict load I3Res50 load_state_dict Conv3dBN Relu_ Sum trans_func _add_shortcut_3d Relu_ int format add_nonlocal astype _generic_residual_block_3d append range len res_stage_nonlocal MaxPool Relu _generic_residual_block_3d SpatialBN ConvNd AveragePool FC setattr print state_dict int list defaultdict items batch_cuda add item sum max net values len update items list cuda Compose kinetics_mean_std update OpenPose File load_state_dict state_dict Sequential openPose load_state_dict_from_url load_state_dict VideoResNet Conv2Plus1D isinstance BatchNorm3d in_features VideoResNet modules load_state_dict load_state_dict_from_url Linear Conv2Plus1D isinstance BatchNorm3d in_features Conv3d VideoResNet repeat modules load_state_dict load_state_dict_from_url Linear load load_state_dict resnet_3d_v1 parse_args add_argument ArgumentParser load join create_args SlowFast load_state_dict load_config USE_PRECISE_STATS join defaultdict info join list DETECTION_SCORE_THRESH format info zip keys values len list info append keys range len capitalize minimum maximum int uniform floor float round int float floor resize int float floor append shape swapaxes ceil int range len permutation arange saturation_list append brightness_list range contrast_list len normal reshape repeat append sum array range range pad copy int randint ceil int int uniform round scale int uniform round int sqrt uniform resize randint float round range normal reshape repeat sum array range int astype float32 sqrt uniform resize append randint float round range copy grayscale uniform dtype uniform astype grayscale uniform fill mean grayscale uniform blend append dtype astype blend uniform append grayscale mean blend uniform fill append permutation arange contrast brightness saturation append range len index_select long linspace uniform max decode max pts seek _read_video_from_memory int audio_sample_rate _probe_video_from_memory denominator video_timebase from_numpy get_start_end_idx has_audio video_fps video_duration has_video audio_duration frombuffer numerator audio_timebase int as_tensor average_rate duration close frames get_start_end_idx stack pyav_decode_stream float video pyav_decode get_start_end_idx temporal_sampling torchvision_decode list view concatenate default_collate zip float keys DATASET int BATCH_SIZE NUM_GPUS DataLoader build_dataset sampler isinstance set_epoch int uniform floor float round copy int randint flip copy ceil int copy tensor saturation_jitter contrast_jitter brightness_jitter blend zeros uniform shape grayscale uniform mean blend grayscale uniform blend normal zeros_like reshape repeat sum array range zeros_like len all warn stack sleep as_tensor range list range len REVERSE_INPUT_CHANNEL index_select long random_crop random_short_side_scale_jitter uniform_crop horizontal_flip zeros set append len range aggregate_labels list keys tensor float open DistributedDataParallel MODEL_NAME current_device append named_parameters param_groups defaultdict set set read_exclusions read_csv run_evaluation read_labelmap write_results time get_ava_eval_data run_evaluation info len evaluate add_single_ground_truth_image_info add_single_detected_image_info pprint info append PascalDetectionEvaluator int defaultdict tolist append round range enumerate time info setup_environment NUM_EPOCHS Timer seed cpu_mem_usage BATCH_SIZE LOG_PERIOD append range format mean pformat info manual_seed seconds enumerate setup_logging RNG_SEED tqdm reset NUM_SHARDS construct_loader std len isinstance model islice running_mean running_var cuda range enumerate len join makedirs format get_checkpoint_dir get_checkpoint_dir get_checkpoint_dir get_path_to_checkpoint makedirs items list format clone OrderedDict shape repeat info load name_convert_func list format tuple clone warn OrderedDict shape load_state_dict info keys get_name_convert_func inflate_weight append cat get_world_size get_world_size mul_ set_device is_initialized barrier get_world_size format getLogger from_buffer dumps get_rank warning get_backend device to len get_world_size all_gather tensor max zeros cat _serialize_to_tensor _get_global_gloo_group loads all_gather zip append max _pad_to_largest_tensor list new_group NUM_GPUS get_world_size range is_master_proc basicConfig _suppress_print format get_logger dumps info WARMUP_EPOCHS WARMUP_START_LR get_step_index STEPS enumerate list keys range mean format average_precision_score info topk t eq expand_as max topks_correct topks_correct isnan max_memory_allocated total available virtual_memory flop_count pack_pathway_output TRAIN_CROP_SIZE rand ENABLE NUM_FRAMES TEST_CROP_SIZE tensor sum cuda range values len format params_count system info gpu_mem_usage get_flop_stats subplots vlines text min axis imshow savefig permute hlines max range eval BatchNorm3d modules isinstance aggregate_stats children isinstance set_device func init_process_group add_argument ArgumentParser print_help merge_from_file cfg_file hasattr num_shards make_checkpoint_dir get_cfg merge_from_list rng_seed output_dir opts OUTPUT_DIR shard_id c2_msra_fill fill_ isinstance BatchNorm3d Conv3d normal_ modules zero_ Linear item name id display_name item info append range _validate_label_map item id load_labelmap max convert_label_map_to_categories load_labelmap argsort cumsum astype concatenate maximum sum range len get_coordinates add_field get_extra_fields size BoxList get_field get_field argsort get sort_by_field arange iou filter_scores_greater_than squeeze logical_and num_boxes append expand_dims full range get add_field sort_by_field zeros_like filter_scores_greater_than concatenate reshape BoxList num_boxes get_field range append non_max_suppression get add_field array_split get_extra_fields hstack BoxList get_field get array_split _copy_extra_fields hstack area astype BoxList fmax int32 fmin greater_equal ioa gather array amax get array_split reshape hstack where logical_not max add_field get_extra_fields BoxList shape vstack get_field astype int32 BoxList get _copy_extra_fields scale get_field get_extra_fields add_field max BoxMaskList get_extra_fields get_field append get_masks BoxMaskList greater_equal ioa gather array amax append minimum transpose maximum shape zeros split expand_dims area intersection expand_dims area intersection sum arange spawn benchmark_data_loading load_config run ENABLE test model cuda iter_tic list log_iter_stats iter_toc ENABLE update_stats all_gather finalize_metrics range cat eval all_gather_unaligned enumerate items isinstance reset cpu len is_master_proc ENSEMBLE_METHOD NUM_SPATIAL_CROPS CHECKPOINT_FILE_PATH dataset OUTPUT_DIR TestMeter log_model_info NUM_ENSEMBLE_VIEWS seed ENABLE get_last_checkpoint AVAMeter MULTI_LABEL format build_model info manual_seed has_checkpoint init_distributed_training perform_test setup_logging RNG_SEED load_checkpoint NUM_CLASSES construct_loader len loss_fun model check_nan_losses zero_grad cuda iter_tic set_lr list log_iter_stats iter_toc get_epoch_lr ENABLE update_stats MULTI_LABEL range size NUM_GPUS log_epoch_stats item float enumerate items isinstance backward topks_correct all_reduce reset train step len model cuda iter_tic list log_iter_stats iter_toc ENABLE update_stats all_gather MULTI_LABEL range cat size NUM_GPUS log_epoch_stats eval all_gather_unaligned enumerate items isinstance update_predictions topks_correct all_reduce reset cpu len update_bn_stats _gen_loader CHECKPOINT_PERIOD CHECKPOINT_FILE_PATH save_checkpoint OUTPUT_DIR log_model_info seed ENABLE get_last_checkpoint is_eval_epoch shuffle_dataset AVAMeter aggregate_split_bn_stats range build_model construct_optimizer TrainMeter pformat info manual_seed init_distributed_training eval_epoch ValMeter setup_logging NUM_BATCHES_PRECISE MAX_EPOCH RNG_SEED load_checkpoint is_checkpoint_epoch train_epoch calculate_and_update_precise_bn construct_loader ResNet Sequential isinstance NL3DWrapper layer1 make_block_temporal format print layer3 layer4 layer2 isinstance print ResNet TemporalPool layer2 load update DataParallel eval load_state_dict cuda state_dict window save VideoSpatialPrediction3D argmax open VideoSpatialPrediction3D_bert append window_val readlines mean buildModel time norm arch_flow confusion_matrix arch_rgb enumerate floor resize INTER_LINEAR val_transform list COLOR_BGR2RGB append ceil expand_dims imread range format concatenate Compose copy Normalize IMREAD_GRAYSCALE IMREAD_UNCHANGED float listdir int join reshape zeros numpy cvtColor len floor resize INTER_LINEAR val_transform COLOR_BGR2RGB append expand_dims imread range format concatenate Compose copy Normalize IMREAD_GRAYSCALE IMREAD_UNCHANGED listdir int join numpy cvtColor len print Compose ToTensor Scale ToTensor2 Normalize append print Compose ToTensor Scale ToTensor2 Normalize append
# LateTemporalModeling3DCNN Official Pytorch implementation of [Late Temporal Modeling in 3D CNN Architectures with BERT for Action Recognition](https://arxiv.org/pdf/2008.01232.pdf). This the repository which implements late temporal modeling on top of the 3D CNN architectures and mainly focus on BERT for this aim. ## Installation #For the installation, you need to install conda. The environment may contain also unnecessary packages but we want to give complete environment that we are using. #Create the environment with the command conda env create -f LateTemporalModeling3D.yml #Then you can activate the environment with the command conda activate LateTemporalModeling3D Later, please download the necessary files from the link, and copy them into the main directory.
1,435
artetxem/uncovec
['word embeddings']
['Uncovering divergent linguistic information in word embeddings with lessons for intrinsic and extrinsic evaluation']
sts/sts_utils.py sts/sts_centroid.py sts/sts_preprocess.py post-process.py read_embeddings write_embeddings main main main save_sentences save_labels read_embeddings read_data pearson remove_stopwords centroid_cosine strip_punctuation length_normalize_embeddings cosine recase centroid tokenize remove_oovs int fromstring append empty range split join print shape range len add_argument dot eigh ArgumentParser alpha parse_args read_embeddings data normalize read_data format pearson print centroid_cosine shape length_normalize_embeddings embeddings range zeros open print join close open print close open save_labels encoding save_sentences input_dir output_dir sqrt sum append float split append lower remove_oovs
UncoVec ============== This is an open source implementation of our word embedding post-processing and evaluation framework, described in the following paper: Mikel Artetxe, Gorka Labaka, Iñigo Lopez-Gazpio, and Eneko Agirre. 2018. **[Uncovering divergent linguistic information in word embeddings with lessons for intrinsic and extrinsic evaluation](https://arxiv.org/pdf/1809.02094.pdf)**. In *Proceedings of the 22nd Conference on Computational Natural Language Learning (CoNLL 2018)*. If you use this software for academic research, please cite the paper in question: ``` @inproceedings{artetxe2018conll, author = {Artetxe, Mikel and Labaka, Gorka and Lopez-Gazpio, Inigo and Agirre, Eneko}, title = {Uncovering divergent linguistic information in word embeddings with lessons for intrinsic and extrinsic evaluation}, booktitle = {Proceedings of the 22nd Conference on Computational Natural Language Learning (CoNLL 2018)},
1,436
arthua196/Leakage-Neutral-Learning-for-QuoraQP
['selection bias']
['Selection Bias Explorations and Debias Methods for Natural Language Sentence Matching Datasets']
quantify/propensity.py quantify/leaky_predict.py debias/main.py debias/make_data.py debias/utils.py make_model_f make_model_adn make_model_g set_trainability make_model_k text_cleaning get_logger ResultRecorder DataGenerator encode_data read_data extract_unlexicalized train_and_evaluate extract_network_based extract_leakage get_model extract_deepwalk run calculate_weight_fraction leaky_extracting layers lstm_layer concatenate Embedding embedding_layer lstm_cell Model summary info Input range compile info Model summary range compile info Model summary range compile info set_trainability compile model_f Model summary model_k Input model_g join lower sub split stdout getLogger addHandler StreamHandler setLevel FileHandler transform LabelEncoder append fit_transform fit apply coo_matrix sum max values print values len concatenate print system zip append zeros sum max values preferential_attachment add_edge list concatenate print Graph jaccard_coefficient adamic_adar_index add_nodes_from resource_allocation_index zip max range values add_node load encode_data permutation arange extract_unlexicalized extract_deepwalk dump print len extract_network_based apply extract_leakage read_csv fillna open Model Input range compile to_categorical ReduceLROnPlateau accuracy_score argmax max values ones len predict concatenate mean load_weights scale RandomForestClassifier load print EarlyStopping ModelCheckpoint get_model array fit train_and_evaluate read_data to_csv apply coo_matrix sum max values
# Selection Bias Explorations and Debias Methods for Natural Language Sentence Matching Datasets This is the code in [Selection Bias Explorations and Debias Methods for Natural Language Sentence Matching Datasets](<https://arxiv.org/abs/1905.06221>) which has been accepted by ACL 2019. ## Folders ​ *<u>quantify</u>* contains codes for generating weights and codes for *Section 2.1 Quantifying the Biasedness in Datasets* in which we explore the severity of the leakage in six NLSM datasest. ​ *<u>debias</u>* contains codes for *Section 5 Experimental Results for the Leakage-neutral Method on QuoraQP* where we apply our leakage-neutral learning in QuoraQP with a classical Siamese-LSTM model. ​ **Usage and requirements are stated inside folders.** ## Datasets We use following six datasets in our paper: - [QuoraQP](<https://drive.google.com/file/d/0B0PlTAo--BnaQWlsZl9FZ3l1c28/view>) - [MSRP](<https://www.microsoft.com/en-us/download/details.aspx?id=52398>)
1,437
arthurhero/ZbuffDepth
['depth estimation', 'monocular depth estimation']
['Improved Point Transformation Methods For Self-Supervised Depth Prediction']
bts_orig.py run_stereo.py dataloader.py evaluation_utils.py generate_valid.py validate.py evaluate_kitti.py utils.py data_process.py ops.py loss.py bts weights_init_xavier BtsModel reduction_1x1 silog_loss upconv local_planar_guidance _resize_like encoder bn_init_as_tf atrous_conv StereoKitti KITTIDataset MySampler get_diff_means load_timediff load_pos process_kitti_integ visualize_img convert_to_colormap get_ego_matrix integ_sequence padRT visualize_depth download_kitti load_depth get_rot_mat exec_cmd visualize_img_depth convert_to_colormap_batch hom2euc cam2laser test_reverse test_projection get_trans_mat load_proj process_kitti_raw euc2hom load_img load_calib load_odometry load_pc list_folder get_stereo_matrices scatter_pixel depth2color_map laser2cam eval_eigen sub2ind lin_interp convert_disps_to_depths_kitti read_calib_file generate_depth_map compute_errors read_file_data read_text_lines load_gt_disp_kitti load_velodyne_points get_focal_length_baseline parse_args generate convert_image_to_depth_path smooth_loss gradient_y gradient_smooth_loss match_loss gradient_x evaluation_errors ssim recon_loss inf_loss unsmooth_loss img_sampling_bilinear register_pc get_depth ego_transform depth2pc img_sampling parse_args train lr_lamb load_ckpt freeze_params init_weights unfreeze_params save_ckpt evaluate BatchNorm2d eval isinstance isinstance Conv2d xavier_uniform_ zeros_ bias weight shape interpolate communicate Popen split close add set splitlines exec_cmd open exec_cmd split print close list_folder splitlines exec_cmd open opj join load_calib float64 padRT load_proj zeros get_ego_matrix list load_img load_calib float64 len padRT matmul load_pos stack load_proj append zeros exec_cmd range split integ_sequence list str savez print len close list_folder stack splitlines split append exec_cmd range open float64 imread astype reshape close open reshape close matmul splitlines zeros open concatenate close open len open splitlines append range split fromfile reshape astype float64 list asarray reshape close splitlines append zeros open ones shape shape zeros hom2euc zeros_like padRT matmul euc2hom range hom2euc zeros_like inv padRT matmul range euc2hom zeros cos matmul sin eye inv cos padRT matmul get_trans_mat get_rot_mat imread astype namedWindow waitKey imshow convert_to_colormap WINDOW_NORMAL numpy imwrite namedWindow destroyAllWindows clone waitKey WINDOW_NORMAL imshow convert_to_colormap float numpy cat detach namedWindow waitKey imshow WINDOW_NORMAL numpy clamp clone format hom2euc cam2laser load_calib zeros_like inv load_pc matmul load_pos range euc2hom laser2cam get_ego_matrix zeros_like abs max list waitKey padRT matmul shape imshow load_depth append range format mean stack load_proj euc2hom var load_img load_calib print load_pc min len format load_calib ones float64 print load_pc inv padRT matmul load_proj zeros view scatter_ expand zero_ long cmapper print min log10 get_cmap numpy max append unsqueeze cat resize generate_depth_map exit logical_and read_file_data shape read_text_lines append to range format astype mean eval load_img load_ckpt isinstance print compute_errors float32 int32 isfile zeros len maximum mean sqrt abs log astype float32 zfill append imread range shape resize append range len readlines close open format print int32 isfile append split reshape T arange LinearNDInterpolator reshape meshgrid set reshape read_calib_file int T sub2ind lin_interp read_calib_file reshape hstack min dot shape vstack round eye zeros load_velodyne_points add_argument ArgumentParser sep split seed list randint dirname abspath parse_args range len unsqueeze sum double exp gradient_y mean zero_ gradient_x sum exp gradient_y mean pow gradient_x double mean pow gradient_y gradient_x mean pool AvgPool2d clamp MaxPool2d float sum mean pow zero_ Tensor max shape view zero_ bmm bmm view clone expand shape inverse zero_ cpu shape view index_select clamp view clone shape index_select unsqueeze float long int time index_copy_ arange view img_sampling_bilinear print clone matmul shape index_select floor nonzero zero_ float sum long cat add_images LambdaLR sky_only_smooth zero_grad kitti_path nd_lambda DataParallel DataLoader floor visualize_img view smooth_lambda register_pc step exit Adam log_directory visualize_depth shape log_rate parse_args to convert_to_colormap_batch visualize_img_depth range cat img_sampling save_ckpt SummaryWriter format img_sampling_bilinear param_groups MySampler experiment_directory egodepth mkdir zero_ StereoKitti float num_epochs enumerate smooth_loss join deepcopy load_ckpt evaluate backward print clone parameters isfile ssim_lambda depth2pc add_scalar xavier_uniform_ weight fill_ parameters parameters dict join save state_dict load join items load_state_dict enumerate load_ckpt isinstance print KITTIDataset exit eval DataLoader isfile to
# ZbuffDepth An self-supervised monocular depth learning method utilizing image reconstruction loss, with the point occlusion issue solved by the novel z-buffer. This repo trains and tests the model on stereo pairs using the Eigen splits. # Author(s) Ziwen Chen (github: arthurhero) Zixuan Guo (github: Olament) ## Usage ### Preparing 1. Create a blank folder.
1,438
arundhatikurup/3DResnet
['action recognition']
['Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet?']
generate_result_video/generate_result_video.py train.py validation.py models/pre_act_resnet.py models/resnext.py temporal_transforms.py spatial_transforms.py test.py dataset.py models/wide_resnet.py opts.py mean.py models/densenet.py classify.py main.py models/resnet.py model.py classify_video Video get_class_labels load_annotation_data video_loader make_dataset accimage_loader get_default_image_loader get_default_video_loader pil_loader get_video_names_and_annotations get_mean generate_model parse_opts CenterCrop ToTensor Compose Scale Normalize LoopPadding TemporalCenterCrop calculate_video_results test get_fps get_fine_tuning_parameters DenseNet densenet201 densenet169 densenet264 _DenseLayer _DenseBlock _Transition densenet121 conv3x3x3 get_fine_tuning_parameters resnet50 downsample_basic_block resnet152 PreActivationBasicBlock resnet34 resnet200 PreActivationBottleneck resnet18 PreActivationResNet resnet101 conv3x3x3 get_fine_tuning_parameters ResNet downsample_basic_block resnet50 Bottleneck resnet152 resnet34 resnet200 resnet18 resnet10 BasicBlock resnet101 ResNeXtBottleneck conv3x3x3 get_fine_tuning_parameters resnet50 downsample_basic_block ResNeXt resnet152 resnet101 conv3x3x3 get_fine_tuning_parameters WideBottleneck resnet50 downsample_basic_block WideResNet data Video model Variable size Compose tolist DataLoader sample_duration append LoopPadding max range cat enumerate join format image_loader append exists get_default_image_loader append items list format deepcopy list IntTensor append listdir range len densenet169 densenet201 resnet50 densenet264 DataParallel resnet101 resnet34 resnet200 resnet18 resnet152 resnet10 cuda densenet121 parse_args set_defaults add_argument ArgumentParser topk size mean stack append range update time format model print Variable cpu AverageMeter size eval calculate_video_results append range enumerate len decode format communicate len round float listdir Popen find DenseNet DenseNet DenseNet DenseNet append format range named_parameters data isinstance FloatTensor Variable zero_ avg_pool3d cuda cat PreActivationResNet PreActivationResNet PreActivationResNet PreActivationResNet PreActivationResNet PreActivationResNet ResNet ResNet ResNet ResNet ResNet ResNet ResNet ResNeXt ResNeXt ResNeXt WideResNet
# Video Classification Using 3D ResNet This is a pytorch code for video (action) classification using 3D ResNet trained by [this code](https://github.com/kenshohara/3D-ResNets-PyTorch). The 3D ResNet is trained on the Kinetics dataset, which includes 400 action classes. This code uses videos as inputs and outputs class names and predicted class scores for each 16 frames in the score mode. In the feature mode, this code outputs features of 512 dims (after global average pooling) for each 16 frames. **Torch (Lua) version of this code is available [here](https://github.com/kenshohara/video-classification-3d-cnn).** ## Requirements * [PyTorch](http://pytorch.org/) ``` conda install pytorch torchvision cuda80 -c soumith
1,439
arvind1998/Neural-Style-Transfer
['style transfer']
['A Neural Algorithm of Artistic Style']
model.py Evaluator loss_and_gradient gram_matrix get_layers preprocessImage content_loss save_image total_loss style_loss resize expand_dims asarray open dict VGG16 concatenate gram_matrix batch_flatten permute_dimensions style_weight content_weight reshape finalOutput astype fromarray reshape astype imsave
<h1> Neural Style Transfer </h1> <h3> What is Style Transfer? </h3> It is the technique of recomposing a given image (content image) in the style of another image (style image). <h3> So how is it done? </h3> We basically have to find the loss in the feature maps of the content image and the generated image which will give us the content loss. Then we find the style loss in the feature maps of the style image and the generated image. Their sum will give us the total loss. Our goal is to minimize the total loss. <h2> Working of the model - </h2> We will be using a VGG-16 architecture pre-trained with "ImageNet" weights. In a Convolutional Neural Network the more deeper we go, more number of features are extracted. This means the image starts becoming definite (the main subject is extracted). So in order to find the content loss, we need feature maps of the deeper layers .i.e. the hidden layers close to the output. According to the original paper, ![](/images/contentloss.png)
1,440
asadullah797/segan_impr
['speech enhancement']
['SEGAN: Speech Enhancement Generative Adversarial Network']
discriminator.py make_tfrecords.py generator.py ops.py bnorm.py main.py data_loader.py model.py VBN pre_emph de_emph read_and_decode discriminator AEGenerator Generator pre_emph_test main _int64_feature encoder_proc read_and_slice _bytes_feature slice_signal main Model SEAE SEGAN leakyrelu minmax_normalize minmax_denormalize tensor_summary prelu average_gradients batch_to_time atrous_conv1d conv2d conv1d gaussian_noise_layer highway time_to_batch deconv downconv histogram_summary sample_random_walk audio_summary residual_block variables_on_gpu0 scalar_summary reshape concat zeros range read TFRecordReader decode_raw float32 pre_emph cast parse_single_example as_list int expand_dims pre_emph float32 placeholder seed print save_path name append synthesis_path ConfigProto makedirs fft int hamming list zip append abs range len hamming read fft abs len join write SerializeToString tostring Example read_and_slice zip split join unlink out_file splitext random_normal zeros range reshape randn scalar histogram audio xavier_initializer expand_dims format _linear f sigmoid range as_list get_shape xavier_initializer as_list print xavier_initializer get_shape expand_dims get_variable concat reduce_mean zip append expand_dims
## SEGAN: Speech Enhancement Generative Adversarial Network ### Introduction This is the repository of the SEGAN project. Our original paper can be found [here](https://arxiv.org/abs/1703.09452), and test samples are available [here](http://veu.talp.cat/segan/). In this work a Generative Adversarial approach has been taken to do speech enhancement (i.e. removing noise from corrupted speech signals) with a fully convolutional architecture schematized as follows: ![SEGAN_G](assets/segan_g.png) This model deals with raw speech waveforms on many noise conditions at different SNRs (40 at training time and 20 during test). It also models the speech characteristics from many speakers mixed within the same structure (without any supervision of identities), which makes the generative structure generalizable in the noise and speaker dimensions. **All the project is developed with TensorFlow**. There are two repositories that were good references on how GANs are defined and deployed: * [improved-gan](https://github.com/openai/improved-gan): implementing improvements to train GANs in a more stable way * [DCGAN-tensorflow](https://github.com/carpedm20/DCGAN-tensorflow): implementation of the DCGAN in tensorflow ### Dependencies
1,441
asafamr/SymPatternWSI
['word sense induction']
['Word Sense Induction with Neural biLM and Symmetric Patterns']
spwsi_elmo_batch_run.py spwsi/wsi_clustering.py spwsi/bilm_elmo.py spwsi/semeval_utils.py spwsi/bilm_interface.py spwsi/spwsi.py spwsi_elmo.py create_lemmatized_if_needed get_configs_cluster_size worker_init worker_do get_configs_random_search get_configs_ablations BilmElmo Bilm evaluate_labeling generate_sem_eval_2013 SPWSI cluster_inst_ids_representatives range range flip_coin random_log_uni_int copy choice randint get int seed time join setFormatter getLogger addHandler SPWSI Formatter BilmElmo info setLevel INFO FileHandler update run copy info create_lemmatized_vocabulary_if_needed load join info info list todense fit AgglomerativeClustering Counter dict TfidfTransformer make_pipeline info append keys enumerate len
### Word Sense Induction with Neural biLM and Symmetric Patterns This repository contains reproducing code for the results in the paper: [Word Sense Induction with Neural biLM and Symmetric Patterns](https://arxiv.org/abs/1808.08518) ##### Usage instructions: The code is written in python 3.6 and runs the SemEval 2013 task 13 evaluation code provided by the SemEval 2013 workshop, which is written in Java and therefore require installed JRE. It also requires the AllenNLP python package as well as a few other packages. Below are detailed instructions for running the code.
1,442
asafamr/bertwsi
['word sense induction']
['Towards better substitution-based word sense induction']
wsi/wsi.py wsi/lm_bert.py wsi/WSISettings.py wsi_bert.py wsi/slm_interface.py wsi/wsi_clustering.py wsi/semeval_utils.py get_batches LMBert evaluate_labeling_2010 evaluate_labeling_2013 get_n_senses_corr generate_sem_eval_2013_no_tokenization generate_sem_eval_2010_no_tokenization generate_sem_eval_2013 SLM WordSenseInductor cluster_inst_ids_representatives append load join info join info load join parse lemma_ replace basic_stem text print tag nlp getroot info append walk exists enumerate spearmanr list defaultdict keys info join info where most_common max values list todense coef_ cdist Counter pdist append sum fit_transform range asarray set mean info linkage keys enumerate items fit LinearSVC dict fcluster zeros array DictVectorizer len
### Towards better substitution-based word sense induction - Word Sense Induction with BERT A follow up to https://github.com/asafamr/SymPatternWSI , adapted to BERT.<br> Paper: Towards better substitution-based word sense induction - https://arxiv.org/abs/1905.12598 ### prerequisites: Python 3.7<br> Install requirements.txt with pip -r<br> This will install python pacakges including pytorch and huggingface's BERT port.<br> (for CUDA support first install pytorch accroding to [their instructions](https://pytorch.org/)).<br> Run download_resources.sh to download datasets. ### WSI:
1,443
asagar60/Siamese-Neural-Networks-for-One-shot-Image-Recognition
['one shot learning']
['Siamese neural networks for one-shot image recognition']
siamese_train.py preprocess_data.py dataloader pkl_data anticlockwise_rotation affinetransform clockwise_rotation data_gen euclidean_dist siamese_network transform AffineTransform warp randint randint anticlockwise_rotation affinetransform clockwise_rotation epsilon sum maximum square
# Siamese Neural Networks for One-shot Image Recognition ![omniglot_grid](./output/omniglot_grid_cropped.jpg) Here we will implement the paper http://www.cs.cmu.edu/~rsalakhu/papers/oneshot1.pdf For Theoretical Explanation , check out my post on medium: https://medium.com/@asagar60/one-shot-learning-using-keras-8c1cd62ac382 ## Brief Description Our model is given a tiny labelled training set S, which has N examples, each vectors of the same dimension with a distinct label y. ![Support_Set.JPG](./output/Support_Set.JPG) It is also given x_test, the test example it has to classify. Since exactly one example in the support set has the right class, the aim is to correctly predict which y ∈ S is the same as x_test’s label. ![task_25.png](./output/task_25.png) But the problem becomes challenging when N increases. Now we have to compare our test image with N different images and look for the highest probability for the correct class.
1,444
asarantsev/BayesianLT
['time series']
['A New Valuation Measure for the Stock Market']
version3.py version1.py code.py version2.py simulation finalSim kernel withdrawal BayesSimMulti ichi2 simulation_withdraw exp_plot regress BayesianRegression ichi2 BayesSimMulti BayesSim ichi2 simulation finalSim kernel withdrawal simulation_withdraw exp_plot yscale show list plot xlabel ylabel title figure range multivariate_normal ichi2 append normal range choice normal kernel mean append range show str simulation print xlabel title hist append round range kernel normal exp show str print xlabel title hist append simulation_withdraw round range multivariate_normal inv dot ichi2 len size array std linregress normal ichi2
# BayesianLT This is code and data for my article: A Note on New Valuation Measures for Standard & Poor Composite Index Returns, co-authored with Taran Grove and Michael Reyes. See arXiv:1905.04603. See code.py for code and Data.xlsx for data. Old code versions include version1, version2, version3. Now you can ignore them.
1,445
aseveryn/deep-qa
['question similarity']
['Injecting Relational Structural Representation in Neural Networks for Question Similarity']
conv1d.py alphabet.py run_nnet.py parse.py nn_layers.py extract_embeddings.py sgd_trainer.py utils.py Alphabet test convolve1d_4D_conv2d test_dynamic_k_max_pooling test_grad_2d test_kmax_pooling_time k_max_pooling Convolve1d test_convolve1d_4D_conv2d convolve1d_4D_conv2d_image dynamic_k_max_pooling convolve1d_4D_einsum test_kmax Convolve1d_4D _max_pooling convolve1d_4D_scan test_convolve1d_4D convolve1d_4D_numpy convolve1d_2D max_pooling test_kmax_pool convolve1d_4D kmax_pool_unroll _k_max_pooling convolve1d_2D_scan convolve1d_2D_numpy main load_senna_vec PairwiseL2SVMWithFeatsRegression DropoutLayer PairwiseOnlySimWithFeatsLayer FoldingLayerSym CRF PairwiseNoFeatsLayer FeedForwardNet LookupTableStatic NonLinearityLayerForConv1d PairwiseMultiOnlySimWithFeatsLayer TranformInputTo4DTensorLayer Conv1dLayer FoldingLayer LogisticRegression PairwiseLogisticWithFeatsRegression LookupTableFastStatic MaxOutFoldingLayer PairwiseLogisticOnlySimRegression __PairwiseLogisticWithFeatsRegression LinearLayer relu_f LookupTable dropout relu PairwiseLogisticRegression NonLinearityLayer PairwiseLayerMulti PadLayer KMaxPoolLayer Conv2dLayer ParallelLookupTable L2SVM FlattenLayer MaxPoolLayer FastDropoutLayer MaxOutLayer trace_back viterbi PairwiseLayer load_nnet PairwiseWithFeatsLayer PairwiseOnlySimScoreLayer LookupTableFast ConvolutionLayer ParallelLayer _PairwiseLogisticWithFeatsRegression build_shared_zeros Layer compute_overlap_idx compute_dfs compute_overlap_features load_data add_to_vocab convert2indices main MiniBatchIteratorConstantBatchSize _get_adadelta_updates get_adagrad_updates Trainer MiniBatchIterator get_adadelta_updates DatasetMiniBatchIterator get_sgd_updates iter_tweets_term_ test_iter_tweets score_semeval2015_term score_semeval2015 load_bin_vec iter_tweets_term load_SST_data test_iter_tweets_term iter_tweets is_ascii load dump fid print add Alphabet open as_strided strides shape flipud vstack zeros einsum flatten shape sum convolve1d_2D as_strided concatenate shape zeros einsum concatenate scan flatten shape zeros sum shape zeros scan concatenate dimshuffle scan convolve1d_4D_conv2d f_conv_scan f_conv list function convolve1d_4D_einsum randn f_conv_einsum f_conv_conv2d print timeit check_grads f_conv_conv2d_image convolve1d_4D allclose tensor4 convolve1d_4D_scan convolve1d_4D_conv2d_image verify_grad f_conv_scan f_conv function RandomState convolve1d_2D randn print randint matrix range convolve1d_2D_scan compute_grad join arange dimshuffle argsort flatten repeat as_tensor arange dimshuffle argsort flatten repeat arange dimshuffle sort maximum argsort flatten repeat cast ceil seed arange print reshape dynamic_k_max_pooling astype shuffle shape shared prod f_max function arange print reshape f_kmax max_pooling k_max_pooling shuffle tensor4 prod function print reshape timeit k_max_pooling max_pooling astype tensor4 _k_max_pooling flatten argsort join arange dimshuffle ones print lt rand randint argsort flatten eval repeat shared as_tensor convolve1d_4D_conv2d f_conv_scan verify_grad function RandomState convolve1d_4D_einsum randn f_conv_einsum f_conv_conv2d print randint tensor4 convolve1d_4D_scan range compute_grad dict loadtxt list zip seed join list load get items format basename print len load_bin_vec shape uniform save zeros keys open load set_value print params zip enumerate open binomial randint RandomStreams scan concatenate argmax scan zeros_like print strip readlines group match append enumerate split zip set intersection append float array len zip ones astype set intersection append enumerate items list defaultdict set len add get ones astype append enumerate function arange randn PairwiseNoFeatsLayer params FeedForwardNet zerout_dummy_word DataFrame max roc_auc_score ivector sorted set_value exit strftime LogisticRegression call LookupTableFastStatic savetxt lmatrix append sum LinearLayer y_pred training_cost dump RandomState NonLinearityLayer dmatrix KMaxPoolLayer get_adadelta_updates Conv2dLayer unique map_score predict_prob_batch ParallelLookupTable enumerate tanh FlattenLayer set_input MiniBatchIteratorConstantBatchSize time train_fn makedirs output to_csv tqdm LookupTableFast ParallelLayer norm print grad OrderedDict zip clip norm print grad OrderedDict eval zip append clip build_shared_zeros print sqr grad OrderedDict sqrt eval zip append sum clip build_shared_zeros norm print grad OrderedDict sqrt eval zip append clip build_shared_zeros dtype T print hstack astype shape vstack loadmat format zip print open sub split append enumerate len print open sub zip append enumerate split print open sub startswith append enumerate split print iter_tweets print set call write_predictions format predict call write_predictions format predict print iter_tweets_term
# OVERVIEW This code implements a convolutional neural network architecture for learning to match question and answer sentences described in the paper: Aliaksei Severyn and Alessandro Moschitti. *Learning to Rank Short Text Pairs with Convolutional Deep Neural Networks*. SIGIR, 2015 The network features a state-of-the-art convolutional sentence model, advanced question-answer matching model, and introduces a novel relational model to encode related words in a question-answer pair. The addressed task is a popular answer sentence selection benchmark, where the goal is for each question to select relevant answer sentences. The dataset was first introduced by (Wang et al., 2007) and further elaborated by (Yao et al., 2013). It is freely [availabe](http://cs.jhu.edu/~xuchen/packages/jacana-qa-naacl2013-data-results.tar.bz2). Evaluation is performed using the standard 'trec_eval' script. # DEPENDENCIES - python 2.7+ - numpy - [theano](http://deeplearning.net/software/theano/)
1,446
asgsaeid/ComboLoss
['medical image segmentation', 'semantic segmentation']
['Combo Loss: Handling Input and Output Imbalance in Multi-Organ Segmentation']
combo_loss.py Combo_loss log flatten mean sum clip
# Combo loss This is the code corresponding to our CMIG 2019 paper, ["Combo loss: Handling input and output imbalance in multi-organ segmentation"](https://www.sciencedirect.com/science/article/abs/pii/S0895611118305688?via%3Dihub). If you use our code, please cite our paper: **Combo loss: Handling input and output imbalance in multi-organ segmentation** The corresponding bibtex entry is: ``` @article{taghanaki2019combo, title={Combo loss: Handling input and output imbalance in multi-organ segmentation}, author={Taghanaki, Saeid Asgari and Zheng, Yefeng and Zhou, S Kevin and Georgescu, Bogdan and Sharma, Puneet and Xu, Daguang and Comaniciu, Dorin and Hamarneh, Ghassan}, journal={Computerized Medical Imaging and Graphics},
1,447
ash3n/One-shot-Memory-Augmented-NN
['one shot learning']
['One-shot Learning with Memory-Augmented Neural Networks']
MANN.py utilys.py MANN one_hot extend_generation shuffle_xy extend_children load_image concat rand scan tfloat32 constant_initializer zeros xavier_initializer tensordot imread imresize append seed list shuffle range len
# One-shot Learning with Memory-Augmented Neural Networks TF implementation of Deepmind's memory augmented neural network for one-shot learning on the Omniglot dataset. "One-shot Learning with Memory-Augmented Neural Networks" https://arxiv.org/abs/1605.06065
1,448
ashedwards/ILPO
['imitation learning']
['Imitating Latent Policies from Observation']
models/bc.py models/image_policy.py results/plot_cartpole.py models/vector_policy_bco.py models/bco.py models/vector_bc.py train_expert/enjoy_cartpole.py models/vector_bco.py models/utils.py models/coin_utils.py models/image_policy_bc.py results/plot_acrobot.py train_expert/enjoy_acrobot.py models/vector_policy_bc.py models/vector_ilpo.py train_expert/train_acrobot.py models/image_bco.py models/image_bc.py results/plot_results.py train_expert/train_cartpole.py models/vector_policy.py models/image_ilpo.py models/image_policy_bco.py scripts/run_vector_policy_bco.py models/ilpo.py BC BCO lrelu deconv fully_connected preprocess conv check_image deprocess bias_variable weight_variable ILPO main ImageBC main ImageBCO main ImageILPO Policy Policy Policy process_obs lrelu deconv fully_connected preprocess conv check_image deprocess bias_variable weight_variable main VectorBC main VectorBCO main VectorILPO Policy Policy Policy getdata getdata getdata main main main main callback truncated_normal constant flatten get_shape list set_shape assert_equal ImageBC run ImageILPO imread VectorBC VectorILPO str open savgol_filter append float range split load make str print write close sample step open mlp learn save
# Imitating Latent Policies from Observation (ILPO) [[Paper]](https://arxiv.org/abs/1805.07914) Ashley D. Edwards, Himanshu Sahni, Yannick Schroecker, Charles L. Isbell</br> Georgia Institute of Technology <img src="https://github.com/ashedwards/ILPO/blob/master/resources/network.png" width="600"> ## Abstract We describe a novel approach to imitation learning that infers latent policies directly from state observations. We introduce a method that characterizes the causal effects of unknown actions on observations while simultaneously predicting their likelihood. We then outline an action alignment procedure that leverages a small amount of environment interactions to determine a mapping between latent and real-world actions. We show that this corrected labeling can be used for imitating the observed behavior, even though no expert actions are given. We evaluate our approach within classic control and photo-realistic visual environments and demonstrate that it performs well when compared to standard approaches. If you use any of the code here in your own work, you may cite: @article{edwards2018imitating, title={Imitating Latent Policies from Observation}, author={Edwards, Ashley D and Sahni, Himanshu and Schroecker, Yannick and Isbell, Charles L},
1,449
ashesknight/tof-mpi-remove
['denoising']
['Spatial Hierarchy Aware Residual Pyramid Network for Time-of-Flight Depth Denoising']
sim/kinect_spec.py pipe/model.py pipe/module/conv.py pipe/network/sample_pyramid_add_kpn_FiveLevel.py pipe/kinect_init.py pipe/loss.py pipe/network/sample_pyramid_add_kpn.py sim/deeptof_prepare.py pipe/module/bottleneck.py sim/gen_approx_motion.py pipe/module/activation.py pipe/network/dear_kpn_no_rgb.py pipe/module/dfus_block.py sim/tof_class.py sim/utils.py sim/phasor_prepare.py pipe/kinect_pipeline.py pipe/dataset.py pipe/network/sample_pyramid_add_kpn_NoFusion.py pipe/network/sample_pyramid_add_kpn_FourLevel.py pipe/start.py sim/kinect_prepare.py pipe/network/sample_pyramid_add_kpn_NoRefine.py pipe/metric.py pipe/network/sample_pyramid_add_kpn_NoRefineFusion.py pipe/network/dear_kpn_no_rgb_DeepToF.py imgs_input_fn_TB imgs_input_fn bilinear_interpolation preprocessing im2col imgs_input_fn_FT3 preprocessing_FLAT preprocessing_deeptof imgs_input_fn_FLAT colorize_img plane_correction get_input_fn preprocessing_TB preprocessing_tof_FT3 imgs_input_fn_deeptof filterPixelStage2 kinect_pipeline processMeasurementTriple processPixelStage1_mat processPixelStage2 processPixelStage1 filterPixelStage1 kinect_mask_tensor SSIM_l1 sobel_edges sign_and_elementwise huber mean_l1 smoothness SSIM sum_huber mean_huber l1 mean_SSIM_l1 ZNCC sum_l2 mean_SSIM mean_l2 l2 get_supervised_loss cos_similarity sobel_gradient_loss sum_l1 get_metrics_mae mean_SSIM SSIM get_metrics_psnr get_network tof_net_func dataset_training dataset_output dataset_testing sigmoid leaky_relu relu bottleneck conv transpose_conv dfus_block dfus_block_add_output_conv dear_kpn_no_rgb depth_output_subnet unet_subnet dear_kpn_no_rgb_DeepToF depth_output_subnet unet_subnet residual_output_subnet depth_output_subnet unet_subnet depth_residual_regresssion_subnet feature_extractor_subnet dear_kpn sample_pyramid_add_kpn residual_output_subnet sample_pyramid_add_kpn_FiveLevel depth_output_subnet unet_subnet depth_residual_regresssion_subnet feature_extractor_subnet dear_kpn residual_output_subnet depth_output_subnet sample_pyramid_add_kpn_FourLevel unet_subnet depth_residual_regresssion_subnet feature_extractor_subnet dear_kpn residual_output_subnet sample_pyramid_add_kpn_NoFusion depth_output_subnet unet_subnet depth_residual_regresssion_subnet feature_extractor_subnet dear_kpn residual_output_subnet sample_pyramid_add_kpn_NoRefine depth_output_subnet unet_subnet depth_residual_regresssion_subnet feature_extractor_subnet dear_kpn residual_output_subnet depth_output_subnet unet_subnet depth_residual_regresssion_subnet feature_extractor_subnet dear_kpn sample_pyramid_add_kpn_NoRefineFusion gen_raw gen_dataset gen_approx_motion data_augment_full testing gen_gt_dyn data_augment_ideal gen_gt gen_raw_dyn data_augment_reflection metric_valid gen_raw testing_msk data_augment_noise pixel_class cam_sin_func kinect_mask kinect_sin_spec kinect_real_spec compute_cor nonlinear_adjust kinect_real_tf_spec gen_raw gen_dataset phasor kinect_real deeptof cam_real_mult cam_sin cam_real_np kinect_sin cam_real_fast cam_real kinect_real_tf cam_baseline gen_cmd manual_corr tof_str map2mesh find_nonzero vis_prop tile_images convert_to_tensor list tan sqrt cast meshgrid expand_dims range constant to_int32 cast gather get_cmap round resize_images stack dist_to_depth kinect_mask_tensor append stack reduce_mean range dist_to_depth TFRecordDataset shuffle map repeat prefetch batch TFRecordDataset shuffle map repeat prefetch batch TFRecordDataset shuffle map repeat prefetch batch TFRecordDataset shuffle map repeat prefetch batch TFRecordDataset shuffle map repeat prefetch batch reshape squeeze shape stack gather_nd cast floor tile add_n meshgrid zeros expand_dims range split int reshape concat pad stack cast gather_nd tile meshgrid zeros expand_dims range append int concat less float32 greater cast processPixelStage1_mat processPixelStage2 zeros expand_dims kinect_mask_tensor convert_to_tensor float32 cast zeros range append stack processMeasurementTriple expand_dims sqrt stack constant sqrt stack cast less expand_dims abs minimum exp abs reduce_max greater reduce_sum atan2 maximum sqrt cast floor less float reduce_min log minimum constant exp ones concat delete reduce_sum flatten sqrt stack reduce_prod cast tile append less expand_dims zeros concat reduce_max reduce_prod abs squeeze conv2d cast append less expand_dims greater_equal sqrt stack tile equal minimum constant greater maximum zeros reduce_min array ones_like ones_like greater abs square where ones_like ones_like sqrt reduce_mean square reduce_sum avg_pool l1 pad SSIM exp gradient_y reduce_mean gradient_x abs ones_like ones_like ones_like ones_like sigmoid sign l2_normalize depthwise_conv2d_native expand_dims constant tile ones_like sobel_edges print list format keys mean_squared_error log ones_like reshape sort reduce_sum mean_absolute_error cast gather abs print list format keys get_global_step concat reduce_max get_network image exponential_decay abs get_collection identity kinect_pipeline colorize_img cast append expand_dims SummarySaverHook get_metrics_mae get_supervised_loss float32 AdamOptimizer UPDATE_OPS reduce_min scalar split train_and_evaluate Estimator EvalSpec MirroredStrategy TrainSpec RunConfig evaluate Estimator MirroredStrategy RunConfig save fromarray str list RunConfig squeeze Estimator shape range predict tofile astype mkdir join print convert float32 MirroredStrategy len relu str relu identity conv2d eval batch_normalization append zeros_initializer range len str replace exec tolist identity conv2d eval get_variable_names append zeros_initializer str zeros_initializer eval conv2d_transpose str replace concat exec tolist identity conv get_variable_names append str replace concat exec tolist identity conv get_variable_names append str conv2d_transpose replace max_pooling2d concat exec tolist identity conv2d eval get_variable_names append zeros_initializer range len str replace exec tolist conv2d eval get_variable_names append zeros_initializer range len im2col identity reduce_sum depth_output_subnet unet_subnet expand_dims abs expand_dims depth_output_subnet identity unet_subnet str replace max_pooling2d exec tolist identity conv2d eval get_variable_names append zeros_initializer range len str replace max_pooling2d exec tolist identity conv2d eval get_variable_names append zeros_initializer range len im2col identity reduce_sum depth_output_subnet unet_subnet abs str replace max_pooling2d exec tolist identity conv2d eval get_variable_names append zeros_initializer range len residual_output_subnet concat resize_bicubic dear_kpn depth_residual_regresssion_subnet feature_extractor_subnet expand_dims range append len residual_output_subnet concat resize_bicubic dear_kpn depth_residual_regresssion_subnet feature_extractor_subnet expand_dims range append len residual_output_subnet concat resize_bicubic dear_kpn depth_residual_regresssion_subnet feature_extractor_subnet expand_dims range append len concat resize_bicubic dear_kpn depth_residual_regresssion_subnet feature_extractor_subnet expand_dims range append len residual_output_subnet concat resize_bicubic depth_residual_regresssion_subnet feature_extractor_subnet expand_dims range append len concat resize_bicubic depth_residual_regresssion_subnet feature_extractor_subnet expand_dims range append len deepcopy process_gain_noise imresize print reshape arctan2 abs astype float32 tofile sum makedirs glob write close eval gen_raw mkdir open range len arange add_subplot axis where flatten nanargmin abs max griddata svd show dist_to_depth ones argmin matmul shape uniform imshow title meshgrid append expand_dims sum range process_gt_delay_vig_dist_surf_mapmax normal matrix_power imresize glob concatenate astype choice mean sqrt stack nan T print reshape float32 viz_flow figure zeros array len kinect_mask len cam int32 tile append expand_dims range print tofile astype float32 makedirs deepcopy kinect_mask print makedirs astype tofile stack cam int32 tile append expand_dims range len print tofile astype float32 makedirs concatenate print astype float32 stack cam zeros concatenate print astype float32 stack cam zeros concatenate print astype float32 stack cam zeros concatenate print astype float32 stack cam zeros ones shape abs dist_to_depth pixel_class evaluate imresize reshape tofile astype float32 len eval cam stack compute_cor nonlinear_adjust range append makedirs add_subplot axis max dist_to_depth colorbar imshow title savefig append range imresize close eval stack compute_cor deepcopy evaluate reshape min metric_valid cam figure nonlinear_adjust len array kinect_mask mod arange concatenate matmul stack sin append expand_dims zeros range minimum arange loadtxt maximum abs array range loadtxt array minimum arange maximum abs array range arctan2 reshape transpose sqrt stack floor append range uint8 str imwrite min func mkdir max meshgrid sqrt arange append join FONT_HERSHEY_DUPLEX int str namedWindow putText COLOR_GRAY2BGR astype float32 waitKey imshow WINDOW_NORMAL cvtColor sum range zeros sum arange nan reshape range zeros
# ECCV2020_Spatial Hierarchy Aware Residual Pyramid Network for Time-of-Flight Depth Denoising This repository provides the source code of SHARP-Net for time-of-flight (ToF) noise removal. The paper can be downloaded from https://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123690035.pdf ## How to use the code code running environment ``` tensorflow-gpu==1.12.0 ``` The google drive web includes tfrecords datasets, tfrecords convert scripts and pretrained models. But the training sets of tof_TF3 dataset is too large, so we just provide the convert script. ```
1,450
ashishrana160796/nalu-cell-counting
['data augmentation']
['Systematically designing better instance counting models on cell images with Neural Arithmetic Logic Units']
research-paper-utils/data-aug-img-util.py exploring-cell-counting/model.py exploring-cell-counting/generator.py research-paper-utils/nalu-comparison-util.py research-paper-utils/bbbc005-dataset-util.py research-paper-utils/zoomed-comparison-util.py dataset-prep-utils/generator.py flip_axis img_to_array random_channel_shift Iterator transform_matrix_offset_center NumpyArrayIterator ImageDataGenerator array_to_img apply_transform flip_axis img_to_array random_channel_shift Iterator transform_matrix_offset_center NumpyArrayIterator ImageDataGenerator array_to_img apply_transform buildmodel_u_net_nalu buildmodel_fcrn_nalu fcrn_base NALU _conv_bn_lin _conv_bn_relu_x2 fcrn_nalu _conv_bn_relu u_net_nalu U_net_base show_images clipped_zoom stack rollaxis dot float array stack rollaxis swapaxes transpose image_dim_ordering reshape transpose asarray image_dim_ordering concatenate merge concatenate SGD Model fcrn_nalu Input compile SGD Model Input u_net_nalu compile show set_size_inches text add_subplot gray title imshow savefig figure zip ceil get_size_inches float array enumerate len int round ndim zoom
## Exploring Cell counting with Neural Arithmetic Logic Units The big problem for neural network models which are trained to count instances is that whenever test range goes higher than training range generalization error increases i.e. they are not good generalizers outside training range. So, we trained regular CNN architectures with small addition of numerically biased layers which in turn resulted in increased accuracy while predicting the cell counts of data. For validation we used a custom dataset with higher counts of test images even then our model's performance sustained. Here, in this repository we'll provide the implementation of the models stated in the research paper. __Note:__ Within 24 hours the final commit containing all the necessary codes will be added to this repository. ## Python module versions used for implementation: pip list * jupyter 1.0.0 * Keras 2.3.1 * numpy 1.18.2 * imageio 2.4.1 * opencv-python 4.1.2.30 * Pillow 7.0.0
1,451
ashwin9999/Capstone-Speech-to-SQL
['speech recognition']
['Wav2Letter: an End-to-End ConvNet-based Speech Recognition System']
train.py test.py record.py preprocess.py test_executor.py speech_model.py speech_input.py audio.py AudioRecorder Preprocess DatasetReader calculatePowerSpectrogram letterToId recursiveTraverse sentenceToIds Record BaseInputLoader InputBatchLoader SingleInputLoader Wav2LetterModel create_default_model SpeechModel TestStatistics Test TestExecutor power_to_db print mean shape melspectrogram std filter walk Wav2LetterModel finalize add_training_ops add_decoding_ops
# End-to-end automatic speech recognition (ASR) using 1D convolutional neural network **Author:** Ashwin Mishra\ **Date:** May 2019 This is an application written in Python that translates human voice to text. It uses the deep learning toolkit [TensorFlow](https://www.tensorflow.org/tutorials/images/cnn) to create and train a convolutional neural network. Unlike hidden Markov models (HMM), this application uses letters (graphemes) to train the network instead of phonetic transcriptions (phonemes). This application was built as a part of my senior capstone project at Youngstown State University. ## Digitizing analog waves First step in speech processing is converting analog audio waves into digital data. Therefore, it first transforms the speech signal into a power spectrum. A power spectrogram is a 2-D array of the size `(x, y)` where `x` = length of audio and `y` = mel size. To transform audio files into power spectrogram, `librosa` was used. [Librosa](https://librosa.org/doc/latest/index.html) is a python library built for music and audio analysis. ![](assets/hello-spect.png) ![](assets/spect.png) <br> Fig: 2-D array transformation for the spectrogram
1,452
ashwin9999/speech-recognition-CNN
['speech recognition']
['Wav2Letter: an End-to-End ConvNet-based Speech Recognition System']
train.py test.py record.py preprocess.py test_executor.py speech_model.py speech_input.py audio.py AudioRecorder Preprocess DatasetReader calculatePowerSpectrogram letterToId recursiveTraverse sentenceToIds Record BaseInputLoader InputBatchLoader SingleInputLoader Wav2LetterModel create_default_model SpeechModel TestStatistics Test TestExecutor power_to_db print mean shape melspectrogram std filter walk Wav2LetterModel finalize add_training_ops add_decoding_ops
# End-to-end automatic speech recognition (ASR) using 1D convolutional neural network **Author:** Ashwin Mishra\ **Date:** May 2019 This is an application written in Python that translates human voice to text. It uses the deep learning toolkit [TensorFlow](https://www.tensorflow.org/tutorials/images/cnn) to create and train a convolutional neural network. Unlike hidden Markov models (HMM), this application uses letters (graphemes) to train the network instead of phonetic transcriptions (phonemes). This application was built as a part of my senior capstone project at Youngstown State University. ## Digitizing analog waves First step in speech processing is converting analog audio waves into digital data. Therefore, it first transforms the speech signal into a power spectrum. A power spectrogram is a 2-D array of the size `(x, y)` where `x` = length of audio and `y` = mel size. To transform audio files into power spectrogram, `librosa` was used. [Librosa](https://librosa.org/doc/latest/index.html) is a python library built for music and audio analysis. ![](assets/hello-spect.png) ![](assets/spect.png) <br> Fig: 2-D array transformation for the spectrogram
1,453
asoroa/ukb
['entity disambiguation']
['Studying the Wikipedia Hyperlink Graph for Relatedness and Disambiguation']
contrib/python-server/example.py contrib/python-server/ukbprotocol.py start_shutdown_server start_ukb_server test UkbSession call join format print call UkbSession str recv print send
# UKB: Graph Based Word Sense Disambiguation and Similarity [![Build Status](https://travis-ci.org/asoroa/ukb.svg?branch=master)](https://travis-ci.org/asoroa/ukb) [![License: GPL v3](https://img.shields.io/badge/License-GPL%20v3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) UKB is a collection of programs for performing graph-based **Word Sense Disambiguation** and **lexical similarity/relatedness** using a pre-existing knowledge base. Currently, UKB runs in **linux** and **macos**. UKB has been developed by the IXA group in the University of the Basque Country. UKB applies the so-called Personalized PageRank on a Lexical Knowledge Base (LKB) to rank the vertices of the LKB and thus perform disambiguation. The details of the method are described in [1]. It has also been applied on **WSD on specific domains** [2,5], **Named Entity Disambiguation** [6] and to obtain **graph embeddings** [7] . The algorithm can also be used to calculate lexical similarity/relatedness of words/sentences. See [3,4,6] for applications of UKB to similarity. Visit http://ixa2.si.ehu.es/ukb/ for more information about UKB. The latest source code for git can be found here: https://github.com/asoroa/ukb.git ## References - [1] Eneko Agirre and Aitor Soroa. 2009. Personalizing PageRank for Word Sense Disambiguation. Proceedings of the 12th conference of the European chapter of the Association for Computational Linguistics (EACL-2009). Athens, Greece.
1,454
aspnetcs/myim2latex-tensorflow-docker
['optical character recognition']
['Image-to-Markup Generation with Coarse-to-Fine Attention']
data_loaders.py im2markup/scripts/evaluation/render_latex.py im2markup/scripts/preprocessing/preprocess_images.py im2markup/scripts/preprocessing/preprocess_filter.py im2markup/scripts/utils/image_utils.py im2markup/scripts/evaluation/evaluate_image.py im2markup/scripts/utils/utils.py im2markup/scripts/preprocessing/generate_latex_vocab.py tflib/network.py predict.py im2markup/scripts/evaluation/render_html.py attention.py tflib/ops.py im2markup/scripts/evaluation/LevSeq.py tflib/__init__.py im2markup/scripts/preprocessing/preprocess_formulas.py im2markup/scripts/evaluation/evaluate_text_edit_distance.py im2markup/scripts/evaluation/evaluate_bleu.py score predict CustomRunner data_iterator show run_demo main process_args main process_args img_edit_distance img_edit_distance_file main process_args StringMatcher main process_args main_parallel main process_args output_err main_parallel main process_args main process_args main is_ascii process_args downsample_image pad_group_image pad_image crop_image run alex_net alex_net_att vgg16 im2latex_cnn norm im2latexAttentionCell initializer BiLSTM Embedding max_pool LSTMCell conv2d FreeRunIm2LatexAttention FreeRunIm2LatexAttentionCell LSTM GRUCell im2latexAttention GRU Linear delete_all_params print_model_settings param params_with_name flatten save resize exists run show list sorted ones transpose tolist shape sleep ceil append range NEAREST astype choice zip keys enumerate int print convert system blend time print data_iterator mean append run list print transpose tolist append zeros keys range enumerate len replace print astype copy run show str glob system pad_group_image choice downsample_image eval loads sleep input crop_image parse_args add_argument ArgumentParser check_output process_args setLevel open basicConfig addHandler label_path dirname setFormatter close StreamHandler realpath info INFO join result_path setdefault write data_path Formatter basename glob img_edit_distance_file float images_dir int uint8 asarray join transpose StringMatcher astype levenshtein get_opcodes append make_strs enumerate convert exists set_defaults map ThreadPool num_threads output_dir makedirs remove system readlines info output_err replace exists end strip finditer crop_image run sorted list append keys image_dir filter output_path decode remove move error call output_file input_file max asarray convert min where save crop print new paste save open range len new save paste open LANCZOS size save resize open communicate start Popen Timer norm dropout relu reshape max_pool conv2d Linear conv2d norm max_pool relu conv2d max_pool relu conv2d max_pool relu sqrt param reshape stack tile zeros param reshape stack tile zeros param reshape LSTMCell stack tile bidirectional_dynamic_rnn zeros expand_dims dynamic_rnn param im2latexAttentionCell reshape transpose astype scan tile expand_dims range dynamic_rnn param reshape transpose concat astype scan tile FreeRunIm2LatexAttentionCell zeros expand_dims range Variable clear print sorted format
# im2latex tensorflow implementat #docker,enjoy,good luck!!! docker pull aspnetcs88/dlp:latest-gpu-py3-im2latex-gpu-allinone-v1.0 nvidia-docker run -it aspnetcs88/dlp:latest-gpu-py3-im2latex-gpu-allinone-v1.0 This is a tensorflow implementation of the HarvardNLP paper - What You Get Is What You See: A Visual Markup Decompiler. This is also a potential solution to OpenAI's Requests For Research Problem - [im2latex](https://openai.com/requests-for-research/#im2latex) The paper (http://arxiv.org/pdf/1609.04938v1.pdf) provides technical details of the model. Original Torch implementation of the paper[https://github.com/harvardnlp/im2markup/blob/master/] What You Get Is What You See: A Visual Markup Decompiler Yuntian Deng, Anssi Kanervisto, and Alexander M. Rush
1,455
aspuru-guzik-group/gryffin
['density estimation']
['Gryffin: An algorithm for Bayesian optimization of categorical variables informed by expert knowledge']
src/gryffin/acquisition/__init__.py src/gryffin/database_handler/json_writers/db_writer.py src/gryffin/utilities/config_parser.py src/gryffin/observation_processor/chimera.py src/gryffin/setup.py src/gryffin/utilities/pickle_parser.py src/gryffin/random_sampler/random_sampler.py src/gryffin/database_handler/sqlite_interface/sqlite_interface.py src/gryffin/acquisition/numpy_optimizers/adam_optimizer.py src/gryffin/bayesian_network/edward_interface/__init__.py src/gryffin/database_handler/db_cache.py examples/synthetic_functions/category_writer.py src/gryffin/database_handler/pandas_writers/db_writer.py src/gryffin/acquisition/acquisition.py src/gryffin/utilities/defaults.py src/gryffin/utilities/exceptions.py src/gryffin/database_handler/db_werkzeug.py src/gryffin/descriptor_generator/descriptor_generator.py src/gryffin/database_handler/json_writers/__init__.py versioneer.py src/gryffin/database_handler/pickle_writers/db_writer.py src/gryffin/bayesian_network/tfprob_interface/tfprob_interface.py src/gryffin/database_handler/__init__.py src/gryffin/database_handler/sqlite_interface/__init__.py examples/synthetic_functions/run_example.py src/gryffin/_version.py src/gryffin/acquisition/numpy_optimizers/abstract_optimizer.py src/gryffin/observation_processor/__init__.py src/gryffin/bayesian_network/edward_interface/numpy_graph.py src/gryffin/gryffin.py src/gryffin/acquisition/numpy_optimizers/naive_discrete_optimizer.py src/gryffin/bayesian_network/__init__.py src/gryffin/database_handler/sqlite_interface/sqlite_operations.py src/gryffin/random_sampler/sobol.py src/gryffin/acquisition/numpy_optimizers/__init__.py src/gryffin/sample_selector/sample_selector.py src/gryffin/utilities/decorators.py src/gryffin/utilities/json_parser.py tests/test_gryffin.py src/gryffin/database_handler/database_handler.py src/gryffin/bayesian_network/bayesian_network.py src/gryffin/database_handler/pandas_writers/__init__.py src/gryffin/descriptor_generator/__init__.py src/gryffin/utilities/logger.py src/gryffin/random_sampler/uniform.py src/gryffin/bayesian_network/edward_interface/edward_interface.py src/gryffin/utilities/__init__.py examples/synthetic_functions/benchmark_functions.py src/gryffin/bayesian_network/edward_interface/_edward_network.py src/gryffin/bayesian_network/tfprob_interface/numpy_graph.py src/gryffin/observation_processor/observation_processor.py src/gryffin/bayesian_network/model_details.py src/gryffin/database_handler/sqlite_interface/sqlite_database.py src/gryffin/observation_processor/testing_chimera.py src/gryffin/sample_selector/__init__.py src/gryffin/descriptor_generator/generation_process.py src/gryffin/utilities/category_parser.py setup.py src/gryffin/bayesian_network/category_reshaper.py src/gryffin/database_handler/pickle_writers/__init__.py src/gryffin/__init__.py src/gryffin/acquisition/parameter_optimizer.py src/gryffin/acquisition/numpy_optimizers/naive_categorical_optimizer.py src/gryffin/bayesian_network/tfprob_interface/__init__.py src/gryffin/random_sampler/__init__.py requirements readme versions_from_parentdir do_vcs_install get_cmdclass get_version run_command get_root scan_setup_py render_git_describe_long render_pep440_old do_setup write_to_version_file git_pieces_from_vcs render VersioneerBadRootError NotThisMethod git_versions_from_keywords render_git_describe get_versions versions_from_file plus_or_dot render_pep440_pre get_config_from_root VersioneerConfig render_pep440_post git_get_keywords render_pep440 register_vcs_handler Dejong Ackley RandomCorrelated CategoricalEvaluator Michalewicz Camel Slope RandomUncorrelated CategoryWriter Gryffin versions_from_parentdir run_command render_git_describe_long render_pep440_old git_pieces_from_vcs get_keywords render NotThisMethod get_config git_versions_from_keywords render_git_describe get_versions plus_or_dot render_pep440_pre VersioneerConfig render_pep440_post git_get_keywords render_pep440 register_vcs_handler Acquisition ParameterOptimizer AbstractOptimizer AdamOptimizer func NaiveCategoricalOptimizer func NaiveDiscreteOptimizer BayesianNetwork CategoryReshaper EdwardNetwork sigmoid NumpyGraph EdwardNetwork sigmoid NumpyGraph TfprobNetwork DatabaseHandler DB_Cache DB_Werkzeug DB_Writer DB_Writer CsvWriter Writer ExcelWriter DB_Writer SqliteDatabase SQLiteDatabase FetchEntries AddEntry UpdateEntries DescriptorGenerator Generator Chimera ObservationProcessor RandomSampler SobolContinuous SobolCategorical UniformCategorical UniformContinuous UniformDiscrete SampleSelector CategoryParser ConfigParser Configuration thread safe_execute GryffinVersionError GryffinUnknownSettingsError GryffinValueError GryffinParseError AbstractError GryffinNotFoundError GryffinModuleError ParserJSON Logger ParserPickle test_import join print getcwd realpath dirname abspath normcase get join SafeConfigParser VersioneerConfig str decode print strip Popen readlines group search close startswith open get join sorted replace print strip set startswith int replace print endswith strip search group run_command relpath readlines close write run_command startswith append open basename print dirname startswith append range S search M unlink print dumps render_pep440_old render_pep440_pre render_git_describe render_pep440_post render_pep440 render_git_describe_long get join versions_from_parentdir get_config_from_root get_keywords_f from_keywords_f tag_prefix print style versionfile_source render VCS get_root from_vcs_f versions_from_file parentdir_prefix join do_vcs_install get_config_from_root print versionfile_source set get_root dirname exists print set VersioneerConfig get_config git_pieces_from_vcs realpath verbose dirname split sum square
[![build](https://github.com/aspuru-guzik-group/gryffin/actions/workflows/continuous-integration.yml/badge.svg)](https://github.com/aspuru-guzik-group/gryffin/actions/workflows/continuous-integration.yml) [![Documentation Status](https://readthedocs.org/projects/gryffin/badge/?version=latest)](http://gryffin.readthedocs.io/?badge=latest) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) Gryffin: Bayesian Optimization of Continuous and Categorical Variables ====================================================================== Welcome to **Gryffin**! Designing functional molecules and advanced materials requires complex design choices: tuning continuous process parameters such as temperatures or flow rates, while simultaneously selecting catalysts or solvents. To date, the development of data-driven experiment planning strategies for
1,456
aspuru-guzik-group/phoenics
['density estimation', 'gaussian processes']
['PHOENICS: A universal deep Bayesian optimizer']
examples/application_robot_calibration/model_training/data_set/generate_indices.py phoenics/ObservationParser/observation_parser.py examples/optimization_periodic_parameters/periodic.py phoenics/Utils/utils.py examples/benchmark_functions.py examples/optimization_parallel/opt_synchronous_evaluations.py examples/optimization_parallel/opt_asynchronous_evaluations.py phoenics/BayesianNeuralNetwork/bayesian_neural_network.py phoenics/file_logger.py examples/optimization_parallel/branin.py phoenics/Acquisitions/sampler.py examples/application_robot_calibration/model_training/single_model.py phoenics/BayesianNeuralNetwork/distributions.py chimera/chimera.py examples/optimization_periodic_parameters/optimize_periodic.py examples/optimization_sequential/optimize_branin.py phoenics/Acquisitions/optimizer.py phoenics/phoenics.py examples/optimization_sequential/branin.py examples/optimization_multiple_objectives/fonseca.py phoenics/BayesianNeuralNetwork/edward_interface.py phoenics/Utils/optimization_results.py phoenics/BayesianNeuralNetwork/pymc3_interface.py phoenics/ObservationParser/hierarchies.py phoenics/RandomNumberGenerator/random_number_generator.py examples/application_robot_calibration/run_robot_calibration.py phoenics/Acquisitions/optimization_algorithms.py examples/application_robot_calibration/model_training/model.py examples/application_robot_calibration/robot_emulator.py examples/optimization_multiple_objectives/optimize_multi_objective.py phoenics/SampleSelector/sample_selector.py phoenics/BayesianNeuralNetwork/pymc3_interface_backup.py obj_1 Chimera obj_0 obj_2 schwefel_function hyper_ellipsoid rosenbrock_function double_well ackley_path_function dejong discrete_ackley discrete_michalewicz narrow_funnel linear_funnel rastrigin_function discrete_valleys RobotEmulator OptimizationManager Model SingleModel fonseca OptimizationManager branin OptimizationManager OptimizationManager OptimizationManager periodic branin OptimizationManager FileLogger FileHandler Phoenics Optimizer Adam SimpleDiscrete LBFGS loss SimpleCategorical ParameterOptimizer AcquisitionFunctionSampler BayesianNeuralNetwork DiscreteLaplace EdwardNetwork Pymc3Network Pymc3Network obj_1 obj_0 obj_2 HierarchicalLossShaper heavyside ObservationParser RandomNumberGenerator SampleSelector OptimizeResult pickle_load pickle_dump VarDictParser ObsDictParser ParserJSON sum array array arange enumerate len array enumerate array enumerate len array enumerate exp cos pi sqrt float sum array len amin array amin array amin array linspace amin array amin array amin array exp array cos cos pi sin ceil abs dump open
# Phoenics ![](https://img.shields.io/github/release/aspuru-guzik-group/phoenics.svg?style=flat) ![](https://img.shields.io/github/license/aspuru-guzik-group/phoenics.svg?style=flat) ![](https://img.shields.io/github/issues-raw/aspuru-guzik-group/phoenics.svg?style=flat) Phoenics is an open source optimization algorithm combining ideas from Bayesian optimization with Bayesian Kernel Density estimation [1]. It performs global optimization on expensive to evaluate objectives, such as physical experiments or demanding computations. Phoenics supports sequential and batch optimizations and allows for the simultaneous optimization of multiple objectives via the Chimera scalarizing function [2]. Check out the `examples` folder for detailed descriptions and code examples for: | Example | Link | |:--------|:-----| | Sequential optimization | [examples/optimization_sequential](https://github.com/aspuru-guzik-group/phoenics/tree/master/examples/optimization_sequential) | | Parallelizable batch optimization | [examples/optimization_parallel](https://github.com/aspuru-guzik-group/phoenics/tree/master/examples/optimization_parallel) |
1,457
asreview/paper-asreview-introduction
['active learning']
['Open Source Software for Efficient and Transparent Reviews']
Visualization/scripts/ace_basic.py Visualization/scripts/virus_basic.py Visualization/scripts/hall_basic.py Visualization/scripts/ptsd_basic.py Visualization/scripts/plot.py _add_RRF _add_WSS _add_recall Analysis _add_random Plot dict text recall_at_limit plot wss text dict plot dict text rrf plot dict text
# Scripts for '*ASReview: Open Source Software for Efficient and Transparent Active Learning for Systematic Reviews*' This repository contains the scripts of the simulation study found in the paper *ASReview: Open Source Software for Efficient and Transparent Active Learning for Systematic Reviews*. This paper introduces the [ASReview project](https://github.com/asreview) and the [ASReview LAB software](https://github.com/asreview/asreview). The results of the simulation study are available via: [10.17605/OSF.IO/2JKD6](https://www.doi.org/10.17605/OSF.IO/2JKD6). :raised_hand: The scripts in this repository make use of ASReview version 0.7.2. Many improvements were made to the ASReview software afterward. We encourage you to use the [latest version](https://pypi.org/project/asreview/) for new studies. See the [extensive ASReview documentation](https://asreview.readthedocs.io/en/latest/) for all features of ASReview for screening and simulating. :arrows_counterclockwise: A persistent version of the scripts can be found on Zenodo `Ferdinands et al. (2020, September 11). Scripts for 'ASReview: Open Source Software for Efficient and Transparent Active Learning for Systematic Reviews' (Version v1.0.1). Zenodo.` http://doi.org/10.5281/zenodo.4024122. ## Installation Running this simulation study requires Python 3.6+. The results in this repository are generated with ASReview v0.7.2. To run the code in parallel, you also need an implementation of the MPI standard. The most well known standard is [OpenMPI](https://www.open-mpi.org/) (this is not a python package and should be installed separately). Install the python depencies with ``` pip install -r requirements.txt ```
1,458
assiaben/hed
['boundary detection', 'edge detection']
['Holistically-Nested Edge Detection']
python/caffe/io.py python/caffe/test/test_python_layer.py scripts/download_model_binary.py python/caffe/net_spec.py examples/hed/solve.py python/caffe/test/test_net.py tools/extra/resize_and_crop_images.py python/draw_net.py python/caffe/test/test_net_spec.py src/caffe/test/test_data/generate_sample_data.py python/caffe/draw.py python/caffe/pycaffe.py tools/extra/extract_seconds.py scripts/cpp_lint.py python/classify.py examples/hed/run.py python/caffe/test/test_solver.py examples/hed/time_perf.py python/caffe/classifier.py python/caffe/test/test_python_layer_with_param_str.py tools/extra/parse_log.py python/caffe/__init__.py python/caffe/test/test_layer_type_list.py scripts/copy_notebook.py examples/hed/add_semantic.py python/caffe/detector.py python/detect.py examples/hed/convert2tf.py examples/hed/run_cmu.py plot_single_scale assemble_multiscale main plot_single_scale assemble_multiscale interp_surgery upsample_filt plot_single_scale assemble_multiscale main main main parse_args Classifier Detector get_edge_label draw_net get_layer_label get_pydot_graph choose_color_by_layertype get_pooling_types_dict draw_net_to_file Transformer blobproto_to_array datum_to_array array_to_blobproto arraylist_to_blobprotovecor_str array_to_datum resize_image blobprotovector_str_to_arraylist load_image oversample Layers Function Parameters Top NetSpec assign_proto param_name_dict to_proto _Net_blobs _Net_forward_all _Net_set_input_arrays _Net_backward _Net_params _Net_forward _Net_outputs _Net_forward_backward_all _Net_blob_loss_weights _Net_batch _Net_inputs TestLayerTypeList simple_net_file TestNet lenet TestNetSpec silent_net anon_lenet exception_net_file parameter_net_file SimpleLayer TestPythonLayer ParameterLayer python_net_file ExceptionLayer SimpleParamLayer TestLayerWithParam python_param_net_file TestSolver ParseNolintSuppressions CheckVlogArguments CheckSectionSpacing FindNextMultiLineCommentEnd ReplaceAll CheckForFunctionLengths _SetOutputFormat _IsTestFilename _VerboseLevel CheckBraces RemoveMultiLineComments ResetNolintSuppressions CheckForNonStandardConstructs _SetVerboseLevel PrintUsage _NestingState CheckIncludeLine CheckAccess _CppLintState Search CheckInvalidIncrement RemoveMultiLineCommentsFromRange CleansedLines CheckForBadCharacters UpdateIncludeState FindPreviousMatchingAngleBracket CheckEmptyBlockBody FindNextMultiLineCommentStart Match _NamespaceInfo CheckMakePairUsesDeduction CheckCheck IsBlankLine _SetFilters ProcessLine _FunctionState CheckPosixThreading GetLineWidth GetHeaderGuardCPPVariable IsCppString _IncludeState CheckSpacing _ClassInfo CheckForCopyright IsErrorSuppressedByNolint ProcessFileData CheckForMultilineCommentsAndStrings CloseExpression _PreprocessorInfo _OutputFormat CheckForIncludeWhatYouUse CheckSpacingForFunctionCall FindEndOfExpressionInLine FindNextMatchingAngleBracket _SetCountingStyle ProcessFile _IncludeError CleanseRawStrings CheckAltTokens CheckForNewlineAtEOF ParseArguments CheckForNonConstReference PrintCategories _Filters main FilesBelongToSameModule CheckCStyleCast FileInfo _BlockInfo CheckForHeaderGuard CheckCaffeDataLayerSetUp ReverseCloseExpression CleanseComments _DropCommonSuffixes _ClassifyInclude CheckStyle CheckCaffeAlternatives FindStartOfExpressionInLine _ShouldPrintError CheckComment Error _GetTextInside CheckLanguage CheckCaffeRandom GetPreviousNonBlankLine reporthook parse_readme_frontmatter model_checks_out valid_dirname get_start_time extract_seconds extract_datetime_from_line get_log_created_year write_csv parse_log fix_initial_nan_learning_rate save_csv_files main parse_args parse_line_for_net_output ResizeCropImagesMapper PILResizeCrop OpenCVResizeCrop subplot set_xticklabels set_yticklabels print close imshow set_ticks_position figure savefig range len uint8 print hstack astype vstack max enumerate imwrite res_dir model forward exists pycaffe_folder set_device transpose imread TEST insert astype Net enumerate time uint8 print loadtxt reshape float32 set_mode_gpu prototxt gpu makedirs print shape upsample_filt model_def endswith ArgumentParser save mean_file channel_swap output_file dirname expanduser parse_args input_file predict Classifier set_mode_cpu load isdir add_argument pretrained_model len DataFrame Detector format to_hdf detect_selective_search mean set_index to_csv detect_windows read_csv add_argument ArgumentParser read NetParameter output_image_file rankdir Merge draw_net_to_file items list DESCRIPTOR batch_size str num_output get_pooling_types_dict add_edge get_edge_label list Dot get_layer_label values name choose_color_by_layertype Edge Node bottom append type layer add_node top shape BlobProto extend flat extend BlobProtoVector ParseFromString BlobProtoVector extend tostring shape Datum flat data len astype float32 tile zoom tuple resize fill empty array concatenate shape tile empty array LayerParameter list NetParameter _to_proto extend Counter OrderedDict values iteritems isinstance extend add getattr setattr items list layers index set outputs _forward len items list _backward layers inputs index set len items list asarray extend copy next _batch iter forward values len items list asarray backward extend next _batch zip_longest zip iter forward values len ascontiguousarray list concatenate iter num zeros next range values len NamedTemporaryFile str close write data Pooling pool1 conv2 pool2 ip1 relu1 SoftmaxWithLoss Convolution NetSpec DummyData ip2 ReLU InnerProduct label conv1 Pooling SoftmaxWithLoss Convolution DummyData ReLU InnerProduct data NetSpec DummyData Silence data2 error search add group clear compile compile compile SetOutputFormat SetCountingStyle SetFilters _Filters startswith IsErrorSuppressedByNolint _ShouldPrintError write IncrementErrorCount replace append Match group find startswith endswith range error FindNextMultiLineCommentEnd RemoveMultiLineCommentsFromRange FindNextMultiLineCommentStart rstrip find range len FindEndOfExpressionInLine range len FindStartOfExpressionInLine error min search I range len FileInfo RepositoryName sep sub ParseNolintSuppressions error startswith split GetHeaderGuardCPPVariable enumerate error enumerate error len error replace count error find error find error find error find error Search error match InnermostClass replace error escape Match Search error group Search Check error lines Count End group Begin NumLines Match raw_lines range Search error match group error Match group pop group append Search pop group append Search elided replace CheckSpacingForFunctionCall rfind error len group min CloseExpression NumLines sub find CheckComment Match range Search lines_without_raw_strings error group starting_linenum Match range Search error rfind len group ReverseCloseExpression Search Match CloseExpression find error Match CloseExpression find elided error strip group FindEndOfExpressionInLine find Match range CloseExpression len error Match finditer normalize isinstance GetLineWidth int InnermostClass CheckCheck error CheckAltTokens CheckBraces CheckSpacing CheckSectionSpacing CheckEmptyBlockBody CheckAccess GetHeaderGuardCPPVariable lines_without_raw_strings _DropCommonSuffixes RepositoryName match split CheckNextIncludeOrder CanonicalizeAlphabeticalOrder FileInfo error search group SetLastHeader match _ClassifyInclude Match pop end search set append values M rstrip replace CheckCStyleCast error _GetTextInside CheckIncludeLine search group lstrip startswith Match ResetSection Search split rfind error group ReverseCloseExpression lstrip findall Match range Search ReplaceAll error Match Search endswith replace setdefault group search CleanseComments open list FilesBelongToSameModule error search copy sub NumLines FullName keys range error search CheckPosixThreading ParseNolintSuppressions CheckVlogArguments CheckMakePairUsesDeduction CheckCaffeDataLayerSetUp CheckLanguage CheckInvalidIncrement CheckCaffeRandom CheckForNonConstReference check_fn Update CheckForNonStandardConstructs CheckStyle raw_lines CheckForMultilineCommentsAndStrings CheckCaffeAlternatives CheckForFunctionLengths CleansedLines _NestingState CheckForBadCharacters CheckForNewlineAtEOF _IncludeState RemoveMultiLineComments CheckForCopyright ResetNolintSuppressions CheckForHeaderGuard NumLines CheckCompletedBlocks CheckForIncludeWhatYouUse range ProcessLine _FunctionState Error rstrip endswith len write ProcessFileData _SetVerboseLevel range split write exit join write exit _VerboseLevel int getopt _SetOutputFormat set _SetVerboseLevel PrintCategories _SetFilters _OutputFormat PrintUsage _SetCountingStyle split getreader ParseArguments ResetErrorCounts stderr exit verbose_level PrintErrorCounts StreamReaderWriter ProcessFile getwriter int time write flush load join index int rfind datetime split getctime year strip extract_datetime_from_line get_start_time total_seconds strip write get_log_created_year close extract_datetime_from_line open float get_log_created_year compile fix_initial_nan_learning_rate search group OrderedDict append float join basename write_csv print excel parse_log save_csv_files output_dir logfile_path
## Added by Assia ### Install git clone https://github.com/assiaben/hed.git cd hed make -j12 all tools pycaffe ### Run Change data path in `run.py` DATA_ROOT_DIR = '/home/gpu_user/assia/ws/datasets/kitti' Run it python run.py
1,459
astirn/VariationalVariance
['active learning', 'gaussian processes']
['Reliable training and estimation of variance networks']
john-master/experiment_active_learning.py generative_experiments.py john-master/toy_regression.py john-master/locality_sampler.py regression_analysis.py active_learning_analysis.py generative_models.py john-master/toy_regression_orig.py callbacks.py john-master/utils.py john-master/utils_orig.py john-master/experiment_vae_orig.py active_learning_experiments.py john-master/experiment_active_learning_orig.py generative_data.py john-master/experiment_vae.py generative_analysis.py regression_experiments.py john-master/experiment_regression_orig.py utils_analysis.py john-master/experiment_regression.py regression_data.py utils_model.py regression_models.py integrate_active_learning_curves generate_plots active_learning_analysis fix_early_runs update_training_set run_experiments LatentVisualizationCallback2D ReconstructionCallback RegressionCallback LearningCurveCallback generative_plots keep_mnist image_reshape string_table generative_tables keep_fashion keep_svhn keep_celeb generative_analysis pre_process_data load_data_set configure_data_set run_vae_experiments NormalVAE VariationalVarianceVAE precision_prior_params mixture_network VAE decoder_dense encoder_convolution encoder_dense StudentVAE FixedVarianceNormalVAE decoder_convolution toy_regression_plot regression_subplot toy_regression_analysis drop_detlefsen uci_regression_analysis download_all generate_toy_data decimal_comma_to_decimal_point load_data trim_eol_whitespace run_experiments compute_metrics MeanVarianceLogger train_and_eval neural_network StudentRegression LocationScaleRegression NormalRegression fancy_plot PredictiveStudent prior_params VariationalPrecisionNormalRegression build_table string_table make_clean_method_names champions_club_table organize_regression_table student_log_prob expected_log_normal monte_carlo_student_t mixture_proportions VariationalVariance softplus_inverse argparser gp john mcdnn ensnn nn detlefsen_uci_baseline argparser gp john mcdnn ensnn nn gpnn argparser gp john mcdnn bnn sgp ensnn rbfnn nn detlefsen_uci_baseline gpnn argparser gp john mcdnn bnn sgp ensnn rbfnn nn argparser john BatchReshape vae BatchFlatten detlefsen_vae_baseline basemodel argparser john BatchReshape vae BatchFlatten basemodel local_batchify generate_data locality_sampler2 get_pseupoch gen_Qw locality_sampler local_batchify2 dropout plot generate_data gp neuralnet john detlefsen_toy_baseline plot2 bnn ens_john ensemble dropout plot generate_data gp neuralnet john plot2 bnn ens_john ensemble normalize_y Reciprocal PosLinear logmeanexp dist timer get_mnist normal_log_prob gmm get_svhn positivelinear to_float GaussianDropout get_fashionmnist _unpickle OneMinusX normal_log_prob_w_prior translatedSigmoid _loader get_image_dataset Norm2 Gaus_Dropout batchify get_cifar10 RBF plotpairwise RBF2 t_likelihood normalize_y Reciprocal PosLinear logmeanexp dist timer get_mnist normal_log_prob gmm get_svhn positivelinear to_float GaussianDropout get_fashionmnist _unpickle OneMinusX normal_log_prob_w_prior translatedSigmoid _loader get_image_dataset Norm2 Gaus_Dropout batchify get_cifar10 RBF plotpairwise RBF2 t_likelihood to_pickle read_pickle unique int remove subplots set_title reshape set_xlabel subplots_adjust lineplot rename set_ylabel unique legend ceil enumerate len update join glob dict lower savefig read_pickle append DataFrame keys exists argsort concatenate monte_carlo_student_t stddev update_training_set DataFrame max exists round seed set_seed compute_metrics train_and_eval ceil append train_test_split prod range to_pickle get update format replace seed_init mean manual_seed sample detlefsen_uci_baseline pop int join remove print fit variance index read_pickle transform makedirs columns apply Categorical idxmax DataFrame max idxmin columns append Method sort_values reset_index string_table copy set mean unique sort_index index fn std drop idxmin subplots replace concatenate squeeze image_reshape set_yticks subplots_adjust imshow set_xticks set_ylabel savefig unique len join listdir generative_plots replace glob read_pickle append DataFrame shuffle batch prefetch load configure_data_set to_string precision_prior_params save_weights DataFrame max exists seed set_seed posterior_predictive_checks append sum to_pickle range update format replace nanargmax concatenate manual_seed compile join remove load_data_set deepcopy print fit index isnan history read_pickle detlefsen_vae_baseline makedirs ELU Sequential add Dense Input BatchNormalization Flatten ELU Sequential add Dense Input BatchNormalization Flatten ELU Sequential add Dense Input BatchNormalization Flatten reduce_variance concat as_numpy_iterator choice clip_by_value append gather next range fit set_title plot generate_toy_data scatterplot fill_between idxmax set_yticklabels add_subplot set_xlabel make_clean_method_names add_gridspec append range plot set_xticklabels set_xlim lineplot unique remove regression_subplot generate_toy_data subplots_adjust set_ylabel figure set_ylim join toy_regression_plot glob set savefig read_pickle append DataFrame union update join glob dict read_pickle append DataFrame keys exists join remove urlretrieve list print mkdir keys pop join list glob warn read_excel to_numpy range read_csv normal data_std data_mean uniform linspace float64 cast model warn linspace max compute_metrics expand_dims sum get update from_tensor_slices nanargmax predictive_moments_and_samples sqrt compile batch min isnan prior_params bool fit detlefsen_toy_baseline pi argmax squeeze MeanVarianceLogger unique zip deepcopy generate_toy_data to_numpy Dense InputLayer Sequential add print fit subplots plot suptitle squeeze set_xlabel scatterplot set_ylabel legend fill_between set_ylim apply reset_index Algorithm Prior Categorical sort_values convert_dtypes DataFrame max list len organize_regression_table append sort_values range string_table copy mean unique enumerate items sort_index index fn to_numpy std find iterrows columns concat astype DataFrame range len log pi lgamma log pi append Normal unstack Independent add_argument_group parse_args add_argument ArgumentParser RBF optimize reshape pi flatten dot sqrt mean GPRegression constrain_positive log predict normalize_y Sequential zero_grad device ReLU tensor cuda Adam set_postfix chain to sum next update close batchify Linear backward tqdm parameters Softplus step Sequential zero_grad ReLU device cuda Adam set_postfix to next update close mean batchify Linear backward tqdm parameters step Dropout normalize_y Sequential zero_grad normal_log_prob device ReLU tensor n_models cuda Adam set_postfix append chain to sum next range update close mean sqrt stack batchify Linear backward tqdm parameters Softplus step n_clusters normalize_y batch_size model KMeans locality_sampler2 zero_grad device tensor round cuda argmax Adam gen_Qw ceil chain to next append concatenate eval item batchify GPNNModel int norm iters backward print min cluster_centers_ float32 parameters train step fit john argparser format SparseGPRegression RBF optimize inducing reshape min pi flatten dot sqrt mean constrain_positive log predict normal_log_prob sqrt flatten normal_log_prob mcmc range sqrt normalize_y sample_net var losses float32 placeholder mean sqrt normal_log_prob reset_default_graph sum n_clusters normalize_y Reciprocal PosLinear KMeans Sequential zero_grad normal_log_prob device ReLU tensor cuda Adam set_postfix chain to next sum update close sqrt batchify Linear RBF backward min cluster_centers_ float32 tqdm parameters step fit n_clusters normalize_y PosLinear KMeans Sequential zero_grad Sigmoid normal_log_prob device ReLU tensor cuda Adam set_postfix OneMinusX chain to next sum update concatenate close sqrt Norm2 batchify Linear RBF backward min cluster_centers_ float32 tqdm parameters step fit fit_transform PCA sqrt reshape t_likelihood int list reshape john shape ceil fit query bincount KDTree flatten choice unique append randint len choice unique append randint len min maximum PCA astype gen_Qw int32 fit_transform uniform f linspace randn gen_Qw int32 astype GaussianLikelihood format model backward print zero_grad Adam ExactMarginalLogLikelihood GPModel parameters eval item train step format NNModel model backward print zero_grad Adam parameters eval item step format NNModel model backward print zero_grad Adam mean parameters item step format NNModel model backward print zero_grad Adam parameters eval item step range int HamiltonianMonteCarlo std print reshape make_log_joint_fn array numpy sample_chain unsqueeze sum range get_pseupoch Tensor model KMeans locality_sampler2 zero_grad tensor Adam gen_Qw append chain sum range format concatenate mean eval stack item GPNNModel backward print cluster_centers_ parameters get_pseupoch Tensor train step fit str subplots set_fontsize concatenate get_yticklabels set_xlabel f axis get_xticklabels ravel set_ylabel savefig fill numpy str subplots set_fontsize plot get_yticklabels set_xlabel get_xticklabels isfinite set_ylabel savefig nan legend zip numpy print john float32 mean sqrt to t norm matmul hist scatter add_subplot range permutation isinstance ndarray log pi isinstance ndarray log pi sum log logsumexp mean std join reshape _loader _loader str reshape _unpickle zeros array range flatten transpose loadmat logmeanexp reshape pi
astirn/VariationalVariance
1,460
asudomoeva/Audio-Tagging
['audio tagging']
['General-purpose Tagging of Freesound Audio with AudioSet Labels: Task Description, Dataset, and Baseline']
final-project/py-script/accuracy_calc.py final-project/py-script/vae.py final-project/py-script/play_generated_sample.py final-project/py-script/run_example.py final-project/py-script/ppca.py assign_class calculate_probability play_sample train_ppca run_gen_example run_example train_cnn make_cnn_encoder single_vae mixture_vae predict_estimators predict_from_estimators _softplus_inverse fit_cnn_vae create_y_testdf mixvae_accuracy cnn_model_fn make_cnn_decoder list mean eval Normal append array range append argmax display print transpose Audio title figure waveplot randint array show list global_variables_initializer softplus minimize Variable ones plot make_log_joint_fn target AdamOptimizer reset_default_graph variational_model range load show format display print stft Audio amplitude_to_db title figure waveplot specshow abs load show format display print stft Audio amplitude_to_db title figure waveplot specshow abs Conv1D Sequential partial Conv1D Sequential partial to_float make_cnn_encoder reduce_logsumexp decoder get_or_create_global_step minimize audio log encoder mean cosine_decay reduce_mean AdamOptimizer sample MultivariateNormalDiag log_prob scalar make_cnn_decoder Estimator getattr nn train range predict pop format train_cnn print astype float32 copy numpy_input_fn unique append list train_cnn astype float32 numpy_input_fn DataFrame predict drop astype codes to_frame numpy_input_fn array predict_from_estimators array append drop print accuracy_score format
# Audio Tagging System with Probabilistic Programming ## Project Overview ### Description Due to the vastness of sounds we experience in the real world, no reliable automatic general-purpose audio tagging system currently exists. Many people still believe that audio-tagging presents one of the hardest machine learning problems today. We decided to challenge that perspective by tackling audio classification with probabilistic programming. **Goal:** Develop an automatic, general-purpose audio tagging system capable of accurately classifying sound collections for a wide range of real-world environments. **Data:** The original dataset is taken from Kaggle [1]. The samples (20,000 WAV files) are generated from Freesound's library and include things like musical instruments, domestic sounds, and animals [2]. Each input represents a WAV file with a corresponding annotative label. There are 41 labels overall, each generated from Google’s AudioSet ontology. The dataset also includes a boolean column indicating whether the label was manually verified. ### Approach To achieve the goal, we have cycled through Box’s loop [3]. Our journey consisted of 3 different iterations implementing Probabilistic PCA, and two versions of CNN VAE. ![](final-project/plots/journey.png) For details around model accuracies, generated samples, etc please consult *final-project* folder.
1,461
atharva-18/Neural-Style-Transfer
['style transfer']
['A Neural Algorithm of Artistic Style']
nst.py Evaluator gram_matrix eval_loss_and_grads content_loss total_variation_loss style_loss dot transpose batch_flatten permute_dimensions gram_matrix square reshape astype f_outputs
# Neural-Style-Transfer ### Keras implementation of Neural Style Transfer [Gatys et al., 2015]. <br> Neural style transfer is a method of transferring characteristics of an image to another image using a pretrained deep convolutional neural network. ``` Author - Atharva Pusalkar Date created - 10th August 2019 ``` ### <b>Usage</b> ```
1,462
atharvacc/SigmaNewsProject
['stock price prediction']
['Neural networks for stock price prediction']
main.py SVR_trainer/SVR.py preprocess_data normalizeY loadDataFiles main preprocess_data normalizeY loadDataFiles regularize load print open asarray reset_index print to_datetime merge append date fillna drop print loadDataFiles preprocess_data normalizeY head drop check_call print amin amax
# SigmaNewsProject ## 11/9 to 11/12 - Play with Dataset - Come up w graphs - Find methods to be implemented on the data set. ### Atharva - Looking at Graphs and reading https://arxiv.org/abs/1805.11317?fbclid=IwAR3kMH47tMtSn6RIt7HSxQDoXTm3_HO4-83QMH7t9BoSVr5XXhbe52Ac5LE ( Done) ## 11/22-11/24 - Run RNNs on Full Dataset (done) - Try training ANN using Keras(done)
1,463
atomistic-ml/ani-al
['active learning']
['Automated discovery of a robust interatomic potential for aluminum']
readers/example_data_sampler.py readers/lib/pyanitools.py datapacker anidataloader
### ANI-Al This repository will contain companion data and model to the paper _Automated discovery of a robust interatomic potential for aluminum_, by J. S. Smith et al. [[arxiv:2003.04934](https://arxiv.org/abs/2003.04934)]. In particular, we plan to release the final ANI-Al potential and the DFT calculations that comprise its training dataset. ### Required software Python3.5 or better Numpy H5PY ### Included extraction software pyanitools.py -Contains a class called "anidataloader" for loading
1,464
atreyasha/sentiment-argument-mining
['argument mining']
['Yes, we can! Mining Arguments in 50 Years of US Presidential Campaign Debates']
predict_UNSC.py utils/arg_metav_formatter.py pre_process_USElectionDebates.py train_USElectionDebates.py utils/model_utils.py pre_process_UNSC.py read_data_UNSC load_saved_model summary_info_UNSC_pred simplify_results pred_model_UNSC load_UNSC project_to_ids_UNSC corpus2tokenids_UNSC summary_info_UNSC basic_text_cleaning write_to_json post_process project_to_ids_US summary_info_US corpus2char corpus2tokenids_US flatten initialize_bert_tokenizer tokenize read_us_election_corpus char_tag correct_periods getCurrentTime read_data_US grid_train single_train mean_labels arg_metav_formatter create_model fetch_bert_layer class_acc class_report learning_rate_scheduler glob join load str fetch_bert_layer load_model load read_data_UNSC join str load_saved_model save argmax predict list extend Counter dict append keys enumerate read_data_US read_data_UNSC list summary_info_UNSC_pred tqdm keys range tolist search tqdm span sub enumerate convert_tokens_to_ids extend copy tqdm append range len project_to_ids_UNSC save initialize_bert_tokenizer str list load_UNSC sent_tokenize append sum range preprocess summary_info_UNSC info basic_text_cleaning tokenize enumerate join preprocess_text tqdm split len list zip glob tqdm info append enumerate split list len tqdm info append range enumerate append enumerate split search range len list preprocess_text Counter zip append range len append list enumerate zip write_to_json flatten read_us_election_corpus char_tag correct_periods join fetch_google_albert_model FullTokenizer convert_tokens_to_ids extend tqdm append range len extend Counter dict append enumerate post_process search flatten span save initialize_bert_tokenizer read_us_election_corpus correct_periods str list sent_tokenize append train_test_split sum range char_tag info tokenize enumerate join project_to_ids_US tqdm summary_info_US len glob join load str float read_data_US clear_session list getCurrentTime str fetch_bert_layer create_model class_report makedirs learning_rate_scheduler fit mean_labels argmax keys predict len read_data_US getCurrentTime clear_session argmax str list learning_rate_scheduler len mean_labels predict update fetch_bert_layer create_model keys enumerate remove ParameterGrid class_report fit makedirs flatten from_params join fetch_google_albert_model albert_params l_bert build Model load_albert_weights summary Input compile
# Sentiment Analysis and Argumentation Mining in UNSC Speeches [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4813013.svg)](https://doi.org/10.5281/zenodo.4813013) ## Overview This project entails sentiment analysis and argumentation mining into the recently published UN security council speeches (UNSC) corpus which is publicly accessible [here](https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/KGVSYH). The UNSC corpus contains ~65,000 UN security council speeches from ~5,000 security council meetings from years 1995-2017. Each meeting is split up into the various speeches given by member countries. Furthermore, speeches are annotated with dates, topics and overall meeting outcomes. The UNSC corpus is, however, not annotated for argumentation structures and sentiment polarities. In this project, we attempt to produce automatic machine-driven sentiment and argumentation annotations for the UNSC corpus; which could aid future human-driven annotations. To find out more about our methodologies, read the next parts of this readme. Additionally, a list of documents detailing our methodologies can be found below: * [Preliminary presentation](/docs/prelim_presentation/main.pdf) * [Progress-update presentation](/docs/progress_presentation/main.pdf) * [Final Report](/docs/final_report/main.pdf) ## Dependencies
1,465
attapol/conll16st
['discourse parsing']
['CoNLL 2016 Shared Task on Multilingual Shallow Discourse Parsing']
validator.py sample_parser.py report.py conn_head_mapper.py tira_eval.py aligner.py partial_scorer.py threading_timer_decorator_exit.py scorer.py tira_sup_eval.py sample_sup_parser.py confusion_matrix.py _arg_pos_alignment_score compute_score_matrix is_overlap align_relations _recurs_align_relations _separate_by_doc_id rel_alignment_score arg2_alignment_score compute_f1_span arg1_alignment_score save_alignment _align Alphabet matrix_to_string ConfusionMatrix ConnHeadMapper evaluate_args evaluate_rel_arg_whole_rel compute_prf evaluate_arg_partial_match evaluate_sense evaluate_arg_tokenwise main partial_evaluate main DiscourseParser DiscourseParser evaluate spans_exact_matching evaluate_argument_extractor combine_spans span_exact_matching connective_head_matching evaluate_sense _link_gold_predicted compute_binary_eval_metric main evaluate_connectives d print countdown c b main a exit_after cdquit main write_results write_proto_text write_partial_match_results main use_gold_standard_types check_connective check_type check_span identify_valid_senses validate_file identify_language check_sense validate_relation_list check_args list extend set _separate_by_doc_id keys _align compute_score_matrix set _recurs_align_relations append len zeros alignment_score_fn enumerate remove add is_overlap _arg_pos_alignment_score is_overlap is_overlap compute_f1_span deepcopy write dumps close append open float intersection len append defaultdict str isinstance tuple index len print evaluate_args evaluate_rel_arg_whole_rel identify_valid_senses compute_micro_average_f1 align_relations evaluate_sense print_summary compute_prf evaluate_arg_partial_match compute_f1_span compute_f1_span Alphabet NEGATIVE_CLASS ConfusionMatrix add print add_argument ArgumentParser partial_evaluate parse_args cutoff join read glob namelist findall ZipFile print_summary print evaluate_argument_extractor compute_micro_average_f1 get_prf evaluate_sense evaluate_connectives compute_binary_eval_metric compute_binary_eval_metric zip map_raw_connective identify_valid_senses _link_gold_predicted enumerate Alphabet enumerate ConfusionMatrix add matching_fn enumerate evaluate pop _print flush print interrupt_main format flush print print sleep print sleep print range sleep print range sleep d countdown c b a write get_prf write_proto_text write_proto_text write_results exit close write_partial_match_results identify_language validate_relation_list open print exit zip sorted use_gold_standard_types check_connective print check_type check_args loads check_sense enumerate open check_connective check_type check_args check_sense enumerate print check_span check_span identify_language
# Official Github Repo for CoNLL 2016 Shared Task ## Validator The validator is provided to make sure the discourse parser output is in the right format. In this version of the task, language must be specified when validating the output. Sample usage: ``` python2.7 validator.py en tutorial/output.json ``` If you would like to see what the error messages look like, try running: ```
1,466
aub-mind/Robust-Seizure-Prediction
['data augmentation']
['Augmenting DL with Adversarial Training for Robust Prediction of Epilepsy Seizures']
utils/load_signals.py utils/save_load.py inspect_AE.py models/helping_functions.py main.py models/model_ae.py utils/load_results.py calc_metrics train_val_test_split generate_adversarial collect_results train_val_cv_split gn_augment shuffle_data next_batch plot_eeg auc_results calculate_fpr summary_results load_results data_prep PrepData load_signals_FB load_signals_CHBMIT FB_notch_filter savefile save_hickle_file load_hickle_file load_ae argmax confusion_matrix roc_auc_score append randn arange shuffle concatenate print shuffle_data range len int concatenate print shuffle_data floor ceil len show format use plot xlabel minorticks_on grid ylabel title savefig linspace figure str all print reshape x_noise feed_forward adversarial run randint round array plot_eeg dict load count_nonzero array append len array concatenate append data_prep round calculate_fpr print mean append argmax std range len notch_filter join list int exists concatenate print read_raw_edf len ch_names to_data_frame as_matrix pick_channels range read_csv strcv str join sorted reset_index int concatenate print loadtxt reshape set strcv shape read_csv append range exists len notch_filter zeros range shape print dump print load isfile dump print load isfile
## Augmenting DL with Adversarial Training for Robust Prediction of Epilepsy Seizure This repository contains the code used for the journal paper titled "Augmenting DL with Adversarial Training for Robust Prediction of Epilepsy Seizures" by Hussein A., Djandji M. et al which was published at ACM Transactions on Computing for Healthcare. The paper can be found here: https://dl.acm.org/doi/abs/10.1145/3386580. ## Requirements * h5py (2.9.0) * hickle (3.4.5) * matplotlib (3.1.1) * mne (0.11.0) * pandas (0.25.1) * scikit-learn (0.21.3) * scipy (1.1.0)
1,467
austnbell/TextSummarizationFederatedLearning
['text summarization', 'document summarization', 'extractive summarization']
['SummaRuNNer: A Recurrent Neural Network based Sequence Model for Extractive Summarization of Documents']
app.py Programs/SumaRuNNer/Vocab.py Programs/SumaRuNNer/DataPrep.py Programs/__init__.py Programs/Application/Pipeline.py Programs/SumaRuNNer/Model.py Programs/SumaRuNNer/__init__.py Programs/Evaluation/Evaluator.py Programs/TopicModelling/NBSVM_TopicModel.py Summarize index SumPipeline Evaluator gen_keras_input SummaRuNNer Vocab model_20ng RunPipeline zip print tolist split append range enumerate Bidirectional pos_embed_dim avg_pooler floor rel_embed_layer Input sent_len doc_len Embedding Model append sent_LSTM range concatenate AveragePooling1D TimeDistributed doc_pooler abs_embed_layer compile embed_layer hidden_sz LSTM word_LSTM len
# TextSummarizationFederatedLearning Text Summarization Tool developed with Federated Learning. For full information, read the complete blog post found here: Other project on my work in Federated Learning: * Introduction to Federated Learning: * Federated Learning in Healthcare (Hospital Readmissions) * Blog * [Code](https://github.com/austnbell/FederatedLearningHealthcare/blob/master/README.md) ### Summary: Develop text summarization tool where distributed data is incorporated through federated learning. Federated Learning allows training on distributed datasets and guarantees data privacy through sharing model parameters rather than data throughout training process. Text Summarizer implemented with a SummaRuNNer model. Paper is located in "Papers" directory or the arxiv folder is found [here](https://arxiv.org/pdf/1611.04230.pdf)
1,468
automan000/Convolution_LSTM_pytorch
['weather forecasting', 'video prediction']
['Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting']
convolution_lstm.py ConvLSTMCell ConvLSTM
# Convolution_LSTM_pytorch Thanks for your attention. I haven't got time to maintain this repo for a long time. I recommend this [repo](https://github.com/Hzzone/Precipitation-Nowcasting) which provides an excellent implementation. # Usage A multi-layer convolution LSTM module Pytorch implementation of [Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting](https://arxiv.org/abs/1506.04214) ```python clstm = ConvLSTM(input_channels=512, hidden_channels=[128, 64, 64], kernel_size=5, step=9, effective_step=[2, 4, 8]) lstm_outputs = clstm(cnn_features)
1,469
automl/GenericWrapper4AC
['experimental design']
['Pitfalls and Best Practices in Algorithm Configuration']
test/test_calls/test_checker.py test/test_resources/pi.py genericWrapper4AC/generic_wrapper.py examples/SGD/sgd_ta.py test/test_resources/mem_str.py test/test_calls/test_calls.py genericWrapper4AC/argparser/parse.py examples/artificial_example/target_algorithm.py setup.py examples/dummy_wrapper/dummy_wrapper.py examples/artificial_example/wrapper.py genericWrapper4AC/data/data.py test/test_resources/test_limits.py genericWrapper4AC/domain_specific/satwrapper.py examples/SGD/SGDWrapper.py examples/MiniSAT/MiniSATWrapper.py InstallRunsolver ArtWrapper DummyWrapper MiniSATWrapper SGDWrapper AbstractWrapper signalHandler parse parse_config_old get_extended_parser parse_config_new get_parser Data SatWrapper TestCalls TestChecker TestResourceLimits exit add_argument ArgumentParser add_argument mem_limit parse_config_old max_quality parse_known_args get_extended_parser runsolver parse_config_new tmp_dir int Data min dict float cutoff seed int Data min dict instance cutoff
# Generic Wrapper for Algorithm Configuration The generic wrapper is a base class to easily implement your own interface between your algorithm and an algorithm configurator (such as ParamILS or SMAC). The generic wrapper is developed and maintained by members of the [AutoML](http://www.automl.org) Group at the University of Freiburg and the [Beta Lab](http://www.cs.ubc.ca/labs/beta/) of the university of British Columbia. Status for master branch: [![Build Status](https://travis-ci.org/automl/GenericWrapper4AC.svg?branch=master)](https://travis-ci.org/automl/GenericWrapper4AC) Status for dev branch: [![Build Status](https://travis-ci.org/automl/GenericWrapper4AC.svg?branch=dev)](https://travis-ci.org/automl/GenericWrapper4AC) ## INSTALLATION We provide a `setup.py` script which can be used to install generic wrapper as a package and which also installs all dependencies (including `runsolver`). ``` python setup.py install
1,470
automl/Squirrel-Optimizer-BBO-NeurIPS20-automlorg
['automl']
['Squirrel: A Switching Hyperparameter Optimizer']
squirrel-optimizer/smac_init_optim.py squirrel-optimizer/utils/inc_collection.py example_submissions/nevergrad/optimizer.py squirrel-optimizer/de_optim.py squirrel-optimizer/GaussianProcess/OWCK.py example_submissions/turbo/optimizer.py squirrel-optimizer/utils/warmstart_helper.py squirrel-optimizer/GaussianProcess/setup.py example_submissions/pysot/optimizer.py squirrel-optimizer/GaussianProcess/gpr.py example_submissions/hyperopt/optimizer.py squirrel-optimizer/GaussianProcess/cma_es.py squirrel-optimizer/random_search.py squirrel-optimizer/GaussianProcess/boundary_handling.py squirrel-optimizer/GaussianProcess/kernel.py squirrel-optimizer/GaussianProcess/function.py squirrel-optimizer/utils/inc_rank.py squirrel-optimizer/GaussianProcess/__init__.py squirrel-optimizer/cma_es_optim.py example_submissions/random-search/optimizer.py squirrel-optimizer/utils/__init__.py example_submissions/skopt/optimizer.py squirrel-optimizer/init_design.py squirrel-optimizer/GaussianProcess/OWCK_slave.py squirrel-optimizer/GaussianProcess/gpr.old.py example_submissions/opentuner/optimizer.py squirrel-optimizer/smac_optim.py squirrel-optimizer/GaussianProcess/trend.py squirrel-optimizer/GaussianProcess/utils.py squirrel-optimizer/optimizer.py squirrel-optimizer/GaussianProcess/gprhao.py squirrel-optimizer/points_min_disc.py only HyperoptOptimizer dummy_f NevergradOptimizer ClippedParam OpentunerOptimizer PySOTOptimizer RandomOptimizer ScikitOptimizer TurboOptimizer copula_standardize order_stats handle_box_constraint vector_to_configspace CMA DEOptimizer InitialDesign SwitchingOptimizer PointsMinDisc RandomOpt SMACInit SMAC4EPMOpimizer RunHistory2EPM4GaussianCopulaCorrect boundary_handling cma_es constant matern generalized_exponential linear squared_exponential GaussianProcess l1_cross_distances quadratic pure_nugget absolute_exponential cubic l1_cross_distances GaussianProcess my_dot GaussianProcess_extra my_dot Kernel CompositeKernel matern generalized_exponential KernelSum StationaryKernel squared_exponential ConstantKernel Matern pure_nugget HammingKernel KernelProduct absolute_exponential cubic train_model train_modelstar OWCK constant linear quadratic_trend BasisExpansionTrend constant_trend quadratic Trend linear_trend NonparametricTrend get_design_sites MSLL SMSE plot_contour_gradient plot_surface_contour load_config_spaces plot_LR_hull create_order plot_LR_hull_and_topN load_model find_param_matches find_matches _clip_to_range _sanitize_null_configs warmstart_load read_local_warmstarts get_cs_dimensions load_configs _dict_to_configspace get_partial_configs cumsum unique ppf asarray nan_to_num true_divide order_stats len T asarray atleast_2d isfinite bitwise_and atleast_1d shape floor zeros abs arange astype upper lower sample_configuration get_hyperparameters enumerate isfinite bitwise_and shape floor zeros abs ones asarray asarray hstack shape asarray range hstack asarray exp reshape sqrt fill gamma sum abs asarray asarray asarray exp reshape size hstack abs zeros asarray asarray reshape size abs prod arange check_array shape zeros abs range zeros sum range fit var mean append range len var log mean append array range len get int time atleast_2d mod Halton rand i4_sobol lhs getpid zeros range grid floor linspace save log clabel set_title set_xlabel quiver meshgrid sum set_xlim sqrt contour reshape set_ylabel figure array set_ylim len grid floor linspace save max log clabel set_title Triangulation set_xlabel plot_trisurf quiver meshgrid sum set_xlim sqrt contour reshape min set_ylabel figure array set_ylim len listdir dropna DataFrame extend yscale show load_model plot xscale to_numpy mean scatter log10 legend append distance_matrix ConvexHull show set_xscale subplots plot suptitle distance_matrix set_yscale to_numpy mean scatter log10 legend append create_order range ConvexHull load_model argmin extend range sample mean log10 append distance_matrix argmax drop_duplicates ConvexHull len isinstance tolist extend dropna DataFrame enumerate list format isinstance print index append DataFrame keys items find_param_matches list extend Counter load_config_spaces append items list extend choice uniform randint DataFrame load_configs items list choice DataFrame clip get_partial_configs find_matches _clip_to_range extend load_configs dropna get sorted CategoricalHyperparameter sort OrdinalHyperparameter ConfigurationSpace UniformFloatHyperparameter add_hyperparameter keys UniformIntegerHyperparameter get_hyperparameter sample_configuration int isinstance arange min tolist choice _sanitize_null_configs warmstart_load to_numpy get_cs_dimensions DataFrame len
# Squirrel: A Switching Hyperparameter Optimizer Motivated by the fact that different optimizers work well on different problems, our approach switches between different optimizers. Since the team names on the competition's leaderboard were randomly generated, consisting of an adjective and an animal with the same initial letter, we called our approach the Switching Squirrel, short, Squirrel. In our Squirrel framework, we switched between the following components: 1. An initial design (for known hyperparameter spaces: found by meta-learning; otherwise: selected by differential evolution) (3 batches); 2. Optimization using Bayesian optimization by integrating the SMAC optimizer with a portfolio of different triplets of surrogate model, acquisition function, and output space transformation (8 batches); and 3. Optimization using Differential Evolution with parameter adaptation (5 batches) ## Results Our Squirrel **ranked 3rd** with a **score of 92.551** on [offical learderboard](https://bbochallenge.com/leaderboard), and also won **1st place** in [alternate leaderboard](https://bbochallenge.com/altleaderboard) (with a score of **94.845476** and the organizers' bootstrap analysis showing a 100% confidence in this 1st place ranking). ## Run Squirrel locally We used the [Bayesmark](https://github.com/uber/bayesmark) benchmark framework for the local experiments with Squirrel. See the Bayesmark [documentation](https://bayesmark.readthedocs.io/en/latest/) for the details.
1,471
autonise/CRAFT-Remade
['scene text detection']
['Character Region Awareness for Text Detection']
src/utils/check_dataset.py src/craft_model.py train_synth/dataloader.py src/utils/data_structure_ic15.py src/utils/utils.py train_synth/synthesize.py train_synth/test.py src/UNET_ResNet.py train_weak_supervision/dataloader.py src/generic_model.py train_synth/train.py src/utils/running_mean.py train_weak_supervision/__init__.py src/utils/data_structure_ic13.py main.py train_weak_supervision/trainer.py train_synth/config.py src/vgg16bn.py src/utils/data_manipulation.py src/utils/data_structure_ic17.py config.py src/utils/utils_old.py train_weak_supervision/config.py src/utils/merger.py src/utils/parallel.py seed weak_supervision synthesize test_synth pre_process main train_synth CRAFT DoubleConv Criterian hard_negative_mining UpBlockForUNetWithResNet50 ConvBlock Bridge UNetWithResnet50Encoder init_weights VGG16BN return_height_width two_char_bbox_to_affinity add_character_others generate_target generate_target_others denormalize_mean_variance resize_generated generate_affinity_others four_point_transform add_affinity_others add_affinity normalize_mean_variance resize generate_affinity add_character icdar2013_train icdar2013_test icdar2015_test icdar2015_train clean_annots merge CallbackContext allreduce AllReduce DataParallelModel _criterion_parallel_apply execute_replication_callbacks DistributedDataParallelModel Reduce patch_replication_callback DataParallelCriterion get_weighted_character_target weighing_function calculate_fscore order_points poly_to_rect calc_iou generate_word_bbox_batch _init_fn link_to_word_bbox cutter calculate_batch_fscore scale_bbox resize_bbox get_smooth_polygon generate_word_bbox DataLoaderSYNTH DataLoaderEval synthesize generator_ main synthesize_with_score generate_next_targets main test save main train save DataLoaderMIX DataLoaderEvalOther test change_lr train save get_initial_model_optimizer save_model generate_target manual_seed_all manual_seed seed main main int save_model generate_target get_initial_model_optimizer print test start_iteration train range print main icdar2015_test icdar2013_train print exit icdar2013_test icdar2015_train numpy mse_loss data isinstance fill_ Conv2d xavier_uniform_ normal_ zero_ BatchNorm2d Linear reshape boxPoints copy minAreaRect astype float32 uint8 astype copy mean array getPerspectiveTransform warpPerspective threshold_point uint8 ones astype mean shape max uint8 ones astype mean shape resize max range len four_point_transform min float32 astype maximum max four_point_transform astype maximum float32 copy mean array mean reshape copy two_char_bbox_to_affinity transpose copy zeros range add_character zeros range add_character_others len transpose copy add_affinity append zeros range len add_affinity_others copy zeros range enumerate len join listdir join listdir join listdir join listdir listdir join merge_gt merge_image makedirs join isinstance _worker len start is_grad_enabled append range Lock list hasattr __data_parallel_replicate__ modules enumerate len replicate argsort array seed generate_word_bbox uint8 max resize order_points two_char_bbox_to_affinity float32 zeros range cutter weighing_function enumerate len threshold roll max clip RETR_TREE connectedComponentsWithStats argmin link_to_word_bbox MORPH_RECT shape append minAreaRect range findContours astype sqrt scale_bbox dilate int uint8 CHAIN_APPROX_SIMPLE getStructuringElement reshape boxPoints min zeros array order_points boxPoints area buffer append minAreaRect zeros enumerate append generate_word_bbox range area buffer calc_iou len astype float32 zeros sum range enumerate calculate_fscore range len int64 repeat astype concatenate get_weighted_character_target join uint8 drawContours threshold_fscore denormalize_mean_variance generate_target_others transpose unknown astype float32 copy tolist weight_threshold resize generate_word_bbox max imsave load join use_cuda synthesize UNetWithResnet50Encoder CRAFT DataParallelModel DataLoader load_state_dict DataLoaderEval cuda makedirs load use_cuda UNetWithResnet50Encoder CRAFT DataParallelModel DataLoaderEvalOther DataLoader load_state_dict cuda synthesize_with_score makedirs str denormalize_mean_variance transpose float32 range numpy imsave makedirs print test DataLoaderSYNTH DataParallelCriterion Criterian sum logical_or DataLoaderSYNTH_Train_Synthesis model zero_grad pretrained set_description clf save str step savefig append optimizer_iteration cat plot save_path change_lr generate_word_bbox_batch mean calculate_batch_fscore item enumerate int use_cuda backward tqdm numpy len pretrained clf pretrained_path save tolist Adam copyfile savefig plot save_path int parameters train imwrite threshold_character threshold_affinity_upper generate_word_bbox threshold_affinity scale_character threshold_character_upper threshold_word enumerate drawContours uint8 scale_affinity param_groups print DataLoader Criterian DataParallelCriterion sum DataLoaderMIX copy print optimizer_iterations empty_cache array split str save_path DataLoaderEvalOther DataLoader makedirs load use_cuda UNetWithResnet50Encoder CRAFT DataParallelModel Adam parameters load_state_dict cuda generator_ empty_cache str plot save_path clf savefig save makedirs
autonise/CRAFT-Remade
1,472
autonomousvision/data_aggregation
['imitation learning', 'autonomous driving']
['Exploring Data Aggregation in Policy Learning for Vision-Based Urban Autonomous Driving', 'DART: Noise Injection for Robust Imitation Learning']
carla08/driving_benchmark/experiment_suites/corl_2017.py coil_core/run_entropy.py network/models/building_blocks/__init__.py carla08/image_converter.py configs/namer.py logger/tensorboard_logger.py drive/CoILBaseline.py drive/__init__.py carla/planner/planner.py tools/batch_rename.py coilutils/checkpoint_schedule.py modules/noiser.py carla/agent/agent.py plotter/plotting_params/eccv_online_offline_plots.py carla08/client.py tools/move_data_fast.py drive/coil_agent.py plotter/plot_on_map.py modules/data_writer.py tools/post_process.py carla08/agent/modules/waypointer.py drive/suites/nocrash_training_suite.py coilutils/exporter.py coil_core/save_activations.py modules/collision_checker.py tools/plot_on_map.py plotter/metrics.py model_view/carla08interface.py drive/suites/nocrash_new_weather_suite.py carla/planner/graph.py network/loss.py carla/tcp.py carla08/planner/__init__.py carla/agent/__init__.py dataset_configurations/coil_training_dataset.py carla/sensor.py carla08/driving_benchmark/driving_benchmark.py carla08/agent/modules/controllers.py logger/monitorer.py drive/suites/nocrash_new_weather_town_suite.py tools/viewer.py carla08/util.py coil_core/train.py coilutils/drive_utils.py carla08/planner/map.py logger/json_formatter.py carla08/driving_benchmark/experiment_suites/basic_experiment_suite.py coil_core/validate_single_model.py input/coil_dataset.py carla08/driving_benchmark/experiment_suites/longcontrol_2018.py tools/filter_dagger_data.py carla08/agent/modules/utils.py coilutils/attribute_dict.py carla/agent/modules/controllers.py logger/printer.py coilutils/general.py coiltraine.py coil_core/run_drive.py carla/planner/city_track.py network/__init__.py carla/agent/forward_agent.py carla08/planner/city_track.py carla/image_converter.py plotter/plotter.py carla/settings.py carla08/planner/astar.py multi_gpu_collection.py carla08/planner/converter.py carla/agent/modules/waypointer.py carla/agent/modules/__init__.py carla08/transform.py carla08/driving_benchmark/results_printer.py plotter/__init__.py tools/create_plots.py tools/plot_infractions.py drive/suites/nocrash_new_town_suite.py tools/copy_data_fast.py carla08/planner/graph.py logger/coil_logger.py coil_core/validate.py network/models/coil_icra.py network/models/building_blocks/conv.py plotter/scatter_plotter.py carla08/driving_benchmark/experiment_suites/__init__.py network/optimizer.py network/models/__init__.py configs/__init__.py carla08/settings.py carla08/agent/forward_agent.py carla/client.py tools/create_video.py carla08/driving_benchmark/metrics.py carla/agent/lane_follower.py carla/agent/command_follower.py carla/planner/grid.py coilutils/experiment_schedule.py coilutils/checking.py logger/carla_metrics_parser.py carla/carla_game.py carla/planner/converter.py carla/planner/map.py plotter/plotting_params/plotting_all_cameras.py coilutils/__init__.py carla/util.py carla08/agent/command_follower.py carla08/agent/lane_follower.py carla/agent/modules/obstacle_avoidance.py carla08/agent/human_agent.py plotter/plotting_params/sample_plot.py tools/filter_dagger_data_var.py coil_core/adabound.py modules/__init__.py network/models/building_blocks/resnet.py input/data_parser.py carla08/agent/__init__.py input/augmenter.py coil_core/__init__.py carla08/driving_benchmark/__init__.py plotter/data_reading.py input/scheduler.py carla08/agent/modules/__init__.py input/__init__.py carla08/agent/modules/obstacle_avoidance.py carla08/sensor.py carla/planner/bezier.py carla08/driving_benchmark/experiment.py coil_core/executer.py modules/screen_manager.py carla/agent/human_agent.py network/models/building_blocks/join.py carla08/planner/grid.py configs/coil_global.py carla/planner/__init__.py carla08/tcp.py carla08/planner/planner.py network/models/building_blocks/branching.py carla/planner/astar.py carla/transform.py carla/carla_server_pb2.py carla08/agent/agent.py carla08/planner/bezier.py tools/count_time.py carla08/driving_benchmark/experiment_suites/experiment_suite.py collect.py logger/__init__.py coil_core/grad_cam.py network/coil_model.py network/loss_functional.py coil_core/adamaio.py coil_core/validate_for_expert.py carla08/carla_server_pb2.py carla08/driving_benchmark/recording.py carla/agent/modules/utils.py input/coil_sampler.py input/splitter.py model_view/carla09interface.py network/models/building_blocks/fc.py reach_timeout collect make_controlling_agent suppress_logs reset_episode update_controls get_directions new_episode check_episode_has_noise main inject_dart_noise get_normalized_covariance_mat calculate_timeout Arguments collect_loop execute_collector open_carla vector_to_degrees CarlaGame Timer make_carla_client _make_sensor_parsers CarlaClient labels_to_cityscapes_palette to_bgra_array depth_to_array depth_to_logarithmic_grayscale depth_to_local_point_cloud to_rgb_array labels_to_array Image Lidar SensorData Camera LidarMeasurement _append_extension Sensor PointCloud CarlaSettings TCPConnectionError TCPClient Transform make_connection print_over_same_line to_hex_str StopWatch Agent CommandFollower ForwardAgent HumanAgent LaneFollower Controller ObstacleAvoidance get_vec_dist get_angle sldist Waypointer angle_between AStar Cell bezier_curve bernstein_poly CityTrack Converter string_to_floats sldist Graph sldist3 string_to_node Grid angle_between CarlaMap color_to_angle sldist angle_between Planner signal compare make_carla_client _make_sensor_parsers CarlaClient labels_to_cityscapes_palette to_bgra_array depth_to_array depth_to_logarithmic_grayscale depth_to_local_point_cloud to_rgb_array labels_to_array Image Lidar SensorData Camera LidarMeasurement _append_extension Sensor PointCloud CarlaSettings TCPConnectionError TCPClient Transform make_connection print_over_same_line to_hex_str StopWatch Agent CommandFollower ForwardAgent HumanAgent LaneFollower Controller ObstacleAvoidance get_vec_dist get_angle sldist Waypointer angle_between sldist get_vec_dist DrivingBenchmark run_driving_benchmark Experiment Metrics Recording print_summary BasicExperimentSuite CoRL2017 ExperimentSuite LongitudinalControl2018 AStar Cell bezier_curve bernstein_poly CityTrack Converter string_to_floats sldist Graph sldist3 string_to_node Grid angle_between CarlaMap color_to_angle sldist angle_between Planner signal compare AttributeDict _is_tensor_image do_assert is_callable _is_numpy_image is_single_number is_hdf5_prepared check_loss_validation_stopped is_ready_to_save is_next_checkpoint_ready maximun_checkpoint_reach is_open get_next_checkpoint validation_stale_point get_latest_saved_checkpoint get_latest_evaluated_checkpoint checkpoint_parse_configuration_file get_gpu_resources get_remainig_exps execvec_to_names allocate_gpu_resources mount_experiment_heap dict_to_namevec export_csv_separate export_csv export_status erase_logs erase_wrong_plotting_summaries plot_test_image alphanum_key camelcase_to_snakecase write_data_point_control_summary compute_average_std command_number_to_index static_vars compute_average_std_separatetasks create_exp_path create_log_folder write_header_control_summary get_validation_datasets erase_validations get_latest_path snakecase_to_camelcase softmax unique tryint send_email get_driving_environments sort_nicely AdaBoundW AdaBound AdamAIO execute_drive execute_train execute_validation folder_execute assign_gpus execute driving_benchmark start_carla_simulator find_free_port parse_remove_configuration write_regular_output CoILDataset get_episode_weather extract_branch execute write_waypoints_output parse_remove_configuration write_regular_output CoILDataset get_episode_weather execute write_waypoints_output execute execute write_waypoints_output write_regular_output execute write_waypoints_output write_regular_output get_names merge_with_yaml _merge_a_into_b _decode_cfg_value set_type_of_process _check_and_coerce_cfg_value_type generate_name parse_split_configuration get_dropout_sum make_carla_settings CoILBaseline distance_vehicle CoILAgent NocrashNewTown NocrashNewWeather NocrashNewWeatherTown NocrashTraining Augmenter get_episode_weather parse_remove_configuration CoILDataset PreSplittedSampler LogitSplittedSampler RandomSampler FixedSampler get_rank SubsetSampler UniformDaggerSampler BatchSequenceSampler forward_speed get_speed orientation_vector check_available_measurements soft hard_harder medium TextureRandomization soft_harder high medium_harder ColorRandomization TexturePatchRandomization TextureAlphaRandomization remove_traffic_lights get_boost_pedestrian_vehicle_traffic_lights partition_keys_by_percentiles label_split remove_angle order_sequence parse_split_configuration remove_angle_traffic_lights split_lateral_noise_longitudinal_noise split_brake split_pedestrian_vehicle_traffic_lights_move split_speed_module_throttle convert_measurements split_left_central_right select_balancing_strategy split_sequence float_split split_speed_module get_inverse_freq_weights select_data_sequence split_pedestrian_vehicle_traffic_lights get_averaged_metrics erase_csv write_on_error_csv add_message add_image recover_loss_window close check_finish write_on_csv create_log write_stop add_scalar logger closeFileLogger readJSONlog streamlogger JSONFormatter filelogger get_episode_number get_status get_latest_checkpoint get_number_episodes_completed get_latest_checkpoint_drive get_error_summary get_summary get_latest_checkpoint_validation get_latest_output print_validation_summary print_train_summary plot_folder_summaries print_drive_summary print_folder_process_names Logger make_carla_settings ColorText game_loop HUD CameraManager FadingText KeyboardControl HelpText World PlayerMeasurements Listener FadingText LaneInvasionSensor World game_loop find_weather_presets get_actor_display_name Camera HUD CollisionSensor CameraManager ColorText KeyboardControl HelpText SensorCollector Measurements CollisionChecker add_data_point write_sensor_data add_metadata delete_episode make_dataset_path add_episode_metadata write_json_measurements Noiser get_average_over_interval draw_pt ScreenManager draw_vbar_on calc_lookahead_offset perspective_tform get_average_over_interval_stride draw_path calc_curvature draw_path_on generate_ncolors CoILModel branched_loss l1 l2 Loss l1_attention compute_branches_masks weight_decay_l1 weight_decay_l2 l1_loss normalize l2_loss adjust_learning_rate_auto adjust_learning_rate adjust_learning_rate_cosine_annealing CoILICRA Branching Conv FC Join ResNet resnet50 Bottleneck resnet152 conv3x3 resnet34 resnet18 BasicBlock resnet101 read_control_csv get_speed_ground_truth get_camera_labels _read_data read_summary_csv get_ground_truth augment_steering _read_step_data _read_control_data read_summary_tasks_csv compute_steering_error compute_displacement compute_steering_error_filter_gt compute_id compute_steering_avg_l1 compute_km_per_infraction compute_control_accuracy compute_steering_accuracy compute_count_errors_weighted compute_steering_avg_mse_filter_gt compute_step compute_and_aggregate compute_correlation compute_steering_classification_error compute_count_cumulative_displacement compute_experiment compute_control_average_completion aggregate_metric compute_control_success_rate compute_relative_error_smoothed compute_steering_accuracy_filter_gt compute_steering_avg_l1_speed compute_steering_avg_mse compute_cumulative_displacement compute_displacement_steer compute_count_errors_weighted_speed filter_data read_data plot_scatter process_data compute_metric sldist split_episodes plot_test_image get_causes_of_end plot_on_map plot_point plot_episodes_tracks plot plot_analysis compute_lims copy_episodes main filter_data get_required_episodes_thres main get_required_episodes filter_episode copy_episodes sldist split_episodes plot_test_image get_causes_of_end plot_on_map plot_point plot_episodes_tracks process_fn purge tryint alphanum_key join_classes_for join_classes sort_nicely reshape_images Control alphanum_key join_classes_for tryint sort_nicely join_classes reach_timeout collect make_controlling_agent suppress_logs reset_episode update_controls get_directions new_episode check_episode_has_noise main inject_dart_noise get_normalized_covariance_mat calculate_timeout Arguments collect_loop execute_collector open_carla vector_to_degrees CarlaGame Timer make_carla_client _make_sensor_parsers CarlaClient labels_to_cityscapes_palette to_bgra_array depth_to_array depth_to_logarithmic_grayscale depth_to_local_point_cloud to_rgb_array labels_to_array Image Lidar SensorData Camera LidarMeasurement _append_extension Sensor PointCloud CarlaSettings TCPConnectionError TCPClient Transform make_connection print_over_same_line to_hex_str StopWatch Agent CommandFollower ForwardAgent HumanAgent LaneFollower Controller ObstacleAvoidance get_vec_dist get_angle sldist Waypointer angle_between AStar Cell bezier_curve bernstein_poly CityTrack Converter string_to_floats sldist Graph sldist3 string_to_node Grid angle_between CarlaMap color_to_angle sldist angle_between Planner signal compare make_carla_client _make_sensor_parsers CarlaClient labels_to_cityscapes_palette to_bgra_array depth_to_array depth_to_logarithmic_grayscale depth_to_local_point_cloud to_rgb_array labels_to_array Image Lidar SensorData Camera LidarMeasurement _append_extension Sensor PointCloud CarlaSettings TCPConnectionError TCPClient Transform make_connection print_over_same_line to_hex_str StopWatch Agent CommandFollower ForwardAgent HumanAgent LaneFollower Controller ObstacleAvoidance get_vec_dist get_angle sldist Waypointer angle_between sldist get_vec_dist DrivingBenchmark run_driving_benchmark Experiment Metrics Recording print_summary BasicExperimentSuite CoRL2017 ExperimentSuite LongitudinalControl2018 AStar Cell bezier_curve bernstein_poly CityTrack Converter string_to_floats sldist Graph sldist3 string_to_node Grid angle_between CarlaMap color_to_angle sldist angle_between Planner signal compare AttributeDict _is_tensor_image do_assert is_callable _is_numpy_image is_single_number is_hdf5_prepared check_loss_validation_stopped is_ready_to_save is_next_checkpoint_ready maximun_checkpoint_reach is_open get_next_checkpoint validation_stale_point get_latest_saved_checkpoint get_latest_evaluated_checkpoint checkpoint_parse_configuration_file get_gpu_resources get_remainig_exps execvec_to_names allocate_gpu_resources mount_experiment_heap dict_to_namevec export_csv_separate export_csv export_status erase_logs erase_wrong_plotting_summaries plot_test_image alphanum_key camelcase_to_snakecase write_data_point_control_summary compute_average_std command_number_to_index static_vars compute_average_std_separatetasks create_exp_path create_log_folder write_header_control_summary get_validation_datasets erase_validations get_latest_path snakecase_to_camelcase softmax unique tryint send_email get_driving_environments sort_nicely AdaBoundW AdaBound AdamAIO execute_drive execute_train execute_validation folder_execute assign_gpus execute driving_benchmark start_carla_simulator find_free_port parse_remove_configuration write_regular_output CoILDataset get_episode_weather extract_branch execute write_waypoints_output parse_remove_configuration write_regular_output CoILDataset get_episode_weather execute write_waypoints_output execute write_waypoints_output write_regular_output get_names merge_with_yaml _merge_a_into_b _decode_cfg_value set_type_of_process _check_and_coerce_cfg_value_type generate_name parse_split_configuration get_dropout_sum make_carla_settings CoILBaseline distance_vehicle CoILAgent NocrashNewTown NocrashNewWeather NocrashNewWeatherTown NocrashTraining Augmenter get_episode_weather parse_remove_configuration CoILDataset PreSplittedSampler LogitSplittedSampler RandomSampler FixedSampler get_rank SubsetSampler UniformDaggerSampler BatchSequenceSampler forward_speed get_speed orientation_vector check_available_measurements soft hard_harder medium TextureRandomization soft_harder high medium_harder ColorRandomization TexturePatchRandomization TextureAlphaRandomization remove_traffic_lights get_boost_pedestrian_vehicle_traffic_lights partition_keys_by_percentiles label_split remove_angle order_sequence parse_split_configuration remove_angle_traffic_lights split_lateral_noise_longitudinal_noise split_brake split_pedestrian_vehicle_traffic_lights_move split_speed_module_throttle convert_measurements split_left_central_right select_balancing_strategy split_sequence float_split split_speed_module get_inverse_freq_weights select_data_sequence split_pedestrian_vehicle_traffic_lights get_averaged_metrics erase_csv write_on_error_csv add_message add_image recover_loss_window close check_finish write_on_csv create_log write_stop add_scalar logger closeFileLogger readJSONlog streamlogger JSONFormatter filelogger get_episode_number get_status get_latest_checkpoint get_number_episodes_completed get_latest_checkpoint_drive get_error_summary get_summary get_latest_checkpoint_validation get_latest_output print_validation_summary print_train_summary plot_folder_summaries print_drive_summary print_folder_process_names Logger make_carla_settings ColorText game_loop HUD CameraManager FadingText KeyboardControl HelpText World PlayerMeasurements Listener FadingText LaneInvasionSensor World game_loop find_weather_presets get_actor_display_name Camera HUD CollisionSensor CameraManager ColorText KeyboardControl HelpText SensorCollector Measurements CollisionChecker add_data_point write_sensor_data add_metadata delete_episode make_dataset_path add_episode_metadata write_json_measurements Noiser get_average_over_interval draw_pt ScreenManager draw_vbar_on calc_lookahead_offset perspective_tform get_average_over_interval_stride draw_path calc_curvature draw_path_on generate_ncolors CoILModel branched_loss l1 l2 Loss l1_attention compute_branches_masks weight_decay_l1 weight_decay_l2 l1_loss normalize l2_loss adjust_learning_rate_auto adjust_learning_rate adjust_learning_rate_cosine_annealing CoILICRA Branching Conv FC Join ResNet resnet50 Bottleneck resnet152 conv3x3 resnet34 resnet18 BasicBlock resnet101 read_control_csv get_speed_ground_truth get_camera_labels _read_data read_summary_csv get_ground_truth augment_steering _read_step_data _read_control_data read_summary_tasks_csv compute_steering_error compute_displacement compute_steering_error_filter_gt compute_id compute_steering_avg_l1 compute_km_per_infraction compute_control_accuracy compute_steering_accuracy compute_count_errors_weighted compute_steering_avg_mse_filter_gt compute_step compute_and_aggregate compute_correlation compute_steering_classification_error compute_count_cumulative_displacement compute_experiment compute_control_average_completion aggregate_metric compute_control_success_rate compute_relative_error_smoothed compute_steering_accuracy_filter_gt compute_steering_avg_l1_speed compute_steering_avg_mse compute_cumulative_displacement compute_displacement_steer compute_count_errors_weighted_speed filter_data read_data plot_scatter process_data compute_metric sldist split_episodes plot_test_image get_causes_of_end plot_on_map plot_point plot_episodes_tracks plot plot_analysis compute_lims copy_episodes main filter_data get_required_episodes_thres main get_required_episodes filter_episode copy_episodes sldist split_episodes plot_test_image get_causes_of_end plot_on_map plot_point plot_episodes_tracks process_fn purge tryint alphanum_key join_classes_for join_classes sort_nicely reshape_images Control alphanum_key join_classes_for tryint sort_nicely join_classes transform get_next_command start_episode set load_settings choice randint get_shortest_path_distance time NumberOfPedestrians make_carla_settings NumberOfVehicles POSITIONS choice new_episode set_of_weathers start_timer Planner set_objective calculate_timeout initialize_game join str getpid mkdir open multivariate_normal execute throttle brake steer compute_noise add_data_point make_controlling_agent CollisionChecker delete_episode add_episode_metadata run_step check_episode_has_noise number_of_episodes str sensors_frequency read_data data_configuration_name suppress_logs __import__ update_controls send_control render draw_traffic_lights long_noise_percent episode_number draw_vehicles update asarray debug model_checkpoint non_player_agents CoILAgent copy get_directions load int time lat_noise_percent reset_episode CarlaGame add_metadata zfill data_path make_dataset_path forward_speed Planner Noiser transform inject_dart_noise get_normalized_covariance_mat gpu draw_pedestrians basicConfig add_argument port ArgumentParser info host parse_args kill container_name call port open_carla town_name gpu start Process communicate Popen error type SensorDefinition reshape frombuffer raw_data to_bgra_array zeros list labels_to_array items dot astype float32 to_bgra_array depth_to_array ones log shape clip tan height depth_to_array reshape inv identity pi where delete dot width fov array connect client_type columns write max flush len sqrt array atan2 dot array linspace len split split items list print zip zeros range len File close sleep exists stat st_size join EXPERIMENT_BATCH_NAME EXPERIMENT_NAME sort_nicely listdir join EXPERIMENT_BATCH_NAME EXPERIMENT_NAME get_latest_evaluated_checkpoint exists print get_latest_evaluated_checkpoint index join EXPERIMENT_NAME EXPERIMENT_BATCH_NAME exists join EXPERIMENT_NAME EXPERIMENT_BATCH_NAME exists append append isinstance items list append dict_to_namevec heappush execvec_to_names dict_to_namevec join isdir print read_summary_csv listdir update join argmax isdir print read_summary_csv append listdir join listdir exp max set sort sub upper join list split fromarray save join mkdir join mkdir join isdir add set listdir join isdir add set listdir join remove listdir isdir update join remove print loadtxt listdir len print join remove listdir glob join sort_nicely SMTP sendmail MIMEText as_string quit update items list print zip zeros sum range len join print close write open join print close write open update items list print zip zeros sum range len start Process create_exp_path start Process create_exp_path update Process start create_exp_path append allocate_gpu_resources update join get_gpu_resources execute_drive execute_train print execute_validation mount_experiment_heap heappop allocate_gpu_resources plot_folder_summaries sleep append listdir assign_gpus add_message communicate print find_free_port Popen add_message weathers start_carla_simulator write_data_point_control_summary plot_episodes_tracks str compute_average_std_separatetasks call PROCESS_NAME range get_latest_path CoILAgent run_driving_benchmark load join kill print build_experiments len USE_ORACLE experiment_suite_module add_message camelcase_to_snakecase FINISH_ON_VALIDATION_STALE open str len __import__ is_next_checkpoint_ready getpid getattr sleep write_header_control_summary get_latest_evaluated_checkpoint PROCESS_NAME range get_next_checkpoint mkdir unique validation_stale_point TEST_SCHEDULE join driving_benchmark print merge_with_yaml build_experiments set_type_of_process split print list keys OrderedDict print BATCH_SIZE min write_on_csv max range write_on_csv range len command_number_to_index LongTensor squeeze stack type DataLoader save cuda branches dataset_name CoILDataset MODEL_CONFIGURATION CoILModel load_state_dict extract_branch append Augmenter save_path stack MODEL_TYPE checkpoint load var train numpy array get_prefinal_layer eval zip time zeros data model zero_grad Loss DataParallel AUGMENTATION PRELOAD_MODEL_CHECKPOINT abs recover_loss_window squeeze tolist Adam check_finish PRELOAD_MODEL_BATCH sum state_dict adjust_learning_rate_auto select_balancing_strategy PRELOAD_MODEL_ALIAS TRAIN_DATASET_NAME is_ready_to_save add_image write_on_error_csv criterion backward LOSS_FUNCTION parameters randint get_latest_saved_checkpoint step add_scalar forward_branch write_regular_output write_waypoints_output write_stop unsqueeze transpose matmul DART_MODEL_CHECKPOINT DART_COVMAT_DATA cpu split generate_name _merge_a_into_b update join merge_with_yaml immutable listdir join add_message LOG_SCALAR_WRITING_FREQUENCY EXPERIMENT_BATCH_NAME immutable LOG_IMAGE_WRITING_FREQUENCY EXPERIMENT_NAME create_log mkdir PROCESS_NAME deepcopy list items _decode_cfg_value _check_and_coerce_cfg_value_type literal_eval str list ndarray isinstance tuple eval type array print list keys OrderedDict SPLIT USE_FULL_ORACLE USE_NOISE_DATA parse_split_configuration set_rotation set_position CarlaSettings randomize_seeds add_sensor set Camera set_image_size len array deg2rad dot array orientation_vector update join list glob keys float Sequential float Sequential float Sequential float Sequential Sequential float min float Sequential append sum len append add_message range len append SEQUENCE_STRIDE range NUMBER_IMAGES_SEQUENCE int list isinstance set select_data_sequence append range len order_sequence partition_keys_by_percentiles convert_measurements print range append len items list fromkeys array append keys convert_measurements convert_measurements convert_measurements convert_measurements convert_measurements convert_measurements convert_measurements print append len DataLoader list BATCH_SIZE parse_split_configuration splitter_function getattr intersection range PreSplittedSampler SPLIT set measurements FixedSampler UniformDaggerSampler get_inverse_freq_weights NUMBER_IMAGES_SEQUENCE RandomSampler array len items list print zip append zeros range len join filelogger Logger isfile join closeFileLogger info join join str join str join join remove str join str scalar_summary transpose image_summary append get_cmap numpy range setFormatter getLogger addHandler JSONFormatter setLevel FileHandler getLogger close copy removeHandler flush StreamHandler append filterfunction loads read_summary_csv join read_summary_csv join range len range len join list sorted EXPERIMENT_BATCH_NAME set difference EXPERIMENT_NAME sort_nicely listdir PROCESS_NAME join readline print EXPERIMENT_BATCH_NAME loadtxt close EXPERIMENT_NAME split expand_dims PROCESS_NAME open readJSONlog join open print print join time get_episode_number print exists USE_ORACLE print_train_summary get_names print_drive_summary print_validation_summary str sorted list append range get_latest_path get_status listdir join print system immutable merge_with_yaml len join print merge_with_yaml immutable sort_nicely listdir EXPERIMENT_GENERATED_NAME output_folder init makedirs compile join get_world port run_step KeyboardControl host HWSURFACE tick render get_agent_sensor width DOUBLEBUF set_mode World height show_image_mini Client Clock mkdir get_attentions flip print latest_image get_forward_speed apply_control parse_events HUD set_timeout get_command join list items save_to_disk zfill makedirs join mkdir join write_sensor_data write_json_measurements mkdir rmtree join int range hsv_to_rgb append randint float range append sum range len append sum range len perspective_tform draw_pt zip pi arcsin calc_curvature tan clip calc_lookahead_offset arange draw_path compute_branches_masks update loss_function range sum requires_grad min add parameters float abs requires_grad min add parameters float sum append cuda cat append range len append abs range len LEARNING_RATE_DECAY_INTERVAL print param_groups LEARNING_RATE LEARNING_RATE_DECAY_LEVEL max LEARNING_RATE_THRESHOLD param_groups count_steps_without_decrease_robust LEARNING_RATE LEARNING_RATE_DECAY_LEVEL max count_steps_without_decrease param_groups pi cos LEARNING_RATE update ResNet load_url load_state_dict state_dict update ResNet load_url load_state_dict state_dict update ResNet load_url load_state_dict state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict radians fabs min atan max update readline loadtxt close open expand_dims split update readline print loadtxt len close set open expand_dims range append split update readline loadtxt close open expand_dims split update join loadtxt update join loadtxt update join loadtxt update loadtxt get_ground_truth join read_control_csv update int join str sorted items OrderedDict range len items list aggregate_metric metric_func append percentile ones mean isscalar nan float sum array len append items float list print sum array abs array digitize float array astype digitize float array astype digitize float array astype abs array fabs set_printoptions append sum range len sum multiply isnan float abs len abs multiply absolute isnan sqrt float sum len append sum range len print join _read_control_data _read_data items list get_camera_labels print where OrderedDict getattr metric_func compute_and_aggregate list items filter_data compute_metric update join list items read_data plot plot_analysis print debug get_names strftime rmtree gmtime process_data makedirs astype plot_square range pop readline loadtxt len close open range append split readline concatenate loadtxt len close where open zeros split load join split_episodes str asarray plot_test_image astype get_causes_of_end convert_to_pixel plot_on_map rescale mkdir open CarlaMap append makedirs min max std set_cmap subplots set_yticklabels reduce list sorted set_xlabel title scatter savefig hash legend sum plot set_xticklabels set_xlim ScalarMappable Normalize get_cmap keys items print rc locator_params set_ylabel to_rgba array set_ylim len set_cmap subplots set_yticklabels reduce list set_xlabel title scatter savefig sum set_xticklabels set_xlim compute_lims items print rc locator_params argsort set_ylabel array set_ylim join sorted mkdir copy2 listdir join sorted abs close mkdir copy2 float listdir join sorted time int Process list items print dict Manager start mkdir append copy2 float listdir join sorted target_dir mkdir source_dir copy2 listdir load preload print var_file len argsort shape append max range split load preload var_file print len append max range split target_dir get_required_episodes_thres move print print join remove listdir search items list copy range copy join imread imsave imresize join remove purge delete_semantic_segmentation print glob close delete_depth sort_nicely reshape_images open
## Exploring Data Aggregation for Urban Driving This repository contains the code for the CVPR 2020 paper [Exploring Data Aggregation in Policy Learning for Vision-based Urban Autonomous Driving](http://www.cvlibs.net/publications/Prakash2020CVPR.pdf). It is built on top of the [COiLTRAiNE](https://github.com/felipecode/coiltraine) and [CARLA 0.8.4 data-collector](https://github.com/carla-simulator/data-collector) frameworks. If you find this code useful, please cite: ``` @inproceedings{Prakash2020CVPR, title = {Exploring Data Aggregation in Policy Learning for Vision-based Urban Autonomous Driving}, author = {Prakash, Aditya and Behl, Aseem and Ohn-Bar, Eshed and Chitta, Kashyap and Geiger, Andreas}, booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2020} }
1,473
autumntoney/ValNorm
['word embeddings']
['ValNorm Quantifies Semantics to Reveal Consistent Valence Biases Across Languages and Over Centuries']
ValNorm.py computeEffectSizeAndPVal effectSize cs_sim findDeviation getNullDistribution WordEmbeddingFactualAssociationTestVocab removeCategoryWordsIfNotInDictionary getWordEmbedding calculateCumulativeProbability sort mean sqrt range len cdf sort findDeviation mean int list shuffle mean range findDeviation print list keys append effectSize print getNullDistribution range calculateCumulativeProbability len cs_sim print zeros shuffle range removeCategoryWordsIfNotInDictionary abs getWordEmbedding len
# ValNorm Code and validation datasets used to generate ValNorm scores from ValNorm: A New Word Embedding Intrinsic Evaluation Method Reveals Valence Biases are Consistent Across Languages and Over Decades [on arXiv](https://arxiv.org/abs/2006.03950). @article{toney2020valnorm, title={ValNorm: A New Word Embedding Intrinsic Evaluation Method Reveals Valence Biases are Consistent Across Languages and Over Decades}, author={Toney, Autumn and Caliskan, Aylin}, journal={arXiv preprint arXiv:2006.03950}, year={2020} } ## ValNorm.py To generate ValNorm scores use the WordEmbeddingFactualAssociation() function and provide the word embedding set (semanticModel) and the vocabulary list to test (vocabToTest). WordEmbeddingFactualAssociation() returns a table with each row containing a vocabulary word, the effect size (ValNorm score), and the p-value.
1,474
awasthiabhijeet/PIE
['optical character recognition', 'grammatical error correction']
['Parallel Iterative Edit Models for Local Sequence Transduction']
word_edit_model.py opcodes.py errorify/errorifier.py modified_modeling.py get_seq2edits.py errorify/parse_verbs.py tokenize_input.py modeling.py apply_opcode.py optimization.py wem_utils.py get_edit_vocab.py spellcheck_utils.py transform_suffixes.py errorify/error.py utils.py seq2edits_utils.py tokenization.py config fix_apos_break join_subwords apply_opcodes split_and_convert_to_ints add_arguments key_from_val merge_dicts update_dicts get_ins_dels add_arguments segregate_insertions add_arguments diffs_to_edits seq2edits embedding_lookup reshape_from_matrix dropout assert_rank reshape_to_matrix layer_norm_and_dropout get_shape_list gelu create_initializer BertConfig attention_layer get_activation layer_norm embedding_postprocessor transformer_model create_attention_mask_from_input_mask get_assignment_map_from_checkpoint BertModel get_mask_ids get_shape_list get_mid_position_embeddings embedding_postprocessor create_attention_mask_from_input_mask reshape_from_matrix reshape_to_matrix assert_rank layer_norm_and_dropout attention_layer layer_norm create_initializer get_assignment_map_from_checkpoint embedding_lookup dropout gelu BertConfig transformer_model get_activation create_input_rep_ins_attention_mask BertModel Opcodes create_optimizer AdamWeightDecayOptimizer edit_distance_backpointer get_opcodes_from_bp_table SequenceMatcher lowest_cost_action edit_distance ndiff highest_match_action containsMultiCapital can_spellcheck containsNumber BasicTokenizer contains_ampersand convert_ids_to_tokens printable_text WordpieceTokenizer containsNumber _is_whitespace check_alternate_in_vocab spell_check _is_punctuation end_with_dotcom checkAlternateDots last_dot_first_capital convert_by_vocab convert_tokens_to_ids contains_percent containsMultiCapital do_not_split convert_to_unicode _is_control contains_square_brackets whitespace_tokenize check_smilies FullTokenizer contains_slash starts_with_www load_vocab contains_at_rate add_arguments write_output get_tuple SuffixTransform is_append_suffix append_suffix transform_suffix ApplySuffixTransorm remove_suffix is_transform_suffix apply_transform assert_fileexists dump_text_to_list read_file_lines generator_based_read_file pretty open_r do_pickle open_w bcolors read_file custom_tokenize genealised_cross_entropy edit_embedding_loopkup timer expand_embedding_matrix list_embedding_lookup list_to_ids GECInputExample replacement_minus_replaced_logits gec_create_model gec_file_based_convert_examples_to_features gec_convert_single_example edit_word_embedding_lookup gec_model_fn_builder GECInputFeatures gec_file_based_input_fn_builder main GECProcessor get_file_length PaddingInputExample DataProcessor errorify_file flush_queue errorify readn Errorifier expand_dict add_argument input_tokens output_tokens edit_ids str join_subwords print write extend exit capitalize apply_suffix_transform lower _run_spell_check FAIL ENDC enumerate append split pop format print exit capitalize append enumerate list map split defaultdict merge_dicts ndiff custom_tokenize defaultdict print append convert_tokens_to_ids custom_tokenize ndiff diffs_to_edits print DEL exit CPY match append enumerate sqrt erf lower name group OrderedDict match list_variables layer_norm dropout one_hot reshape get_shape_list matmul expand_dims get_variable one_hot reshape get_shape_list layer_norm_and_dropout matmul assert_less_equal get_variable ones reshape get_shape_list float32 cast dense dropout multiply get_shape_list reshape transpose float32 matmul transpose_for_scores expand_dims sqrt cast softmax float reshape_to_matrix int get_shape_list append reshape_from_matrix range reshape_to_matrix as_list assert_rank name shape append enumerate reshape ndims get_shape_list name integer_types ndims isinstance to_int32 not_equal zeros concat to_float get_shape_list concat logical_not eye zeros create_attention_mask_from_input_mask trainable_variables list constant get_or_create_global_step gradients clip_by_global_norm group float32 apply_gradients cast int32 zip polynomial_decay CrossShardOptimizer AdamWeightDecayOptimizer min float max action_function range len action_function get_opcodes_from_bp_table range len append reverse len get_opcodes SequenceMatcher format append match match compile match compile match compile spell upper lower isinstance PY3 PY2 isinstance PY3 PY2 OrderedDict append strip split category category startswith startswith category ord register join do_spell_check convert_tokens_to_ids _run_spell_check custom_tokenize split join write range len len len print format len print format len print format len ApplySuffixTransorm dump ok start fname open start fname exit ok start fname fail fail start exit fname exit fail tokenize append convert_tokens_to_ids split embedding_lookup format one_hot print reduce_sum matmul embedding_lookup one_hot matmul to_float pow less_equal reduce_sum expand_dims tile time next join list isinstance print len exit map GECInputFeatures guid info append edit_sequence split segment_ids join format create_int_feature TFRecordWriter close gec_convert_single_example write SerializeToString OrderedDict Example input_sequence input_mask info edit_sequence enumerate print join format get_matching_files embedding_lookup one_hot matmul format dropout print reshape get_shape_list len edit_word_embedding_lookup get_sequence_output embedding_table edit_embedding_loopkup concat word_embedded_input vocab_size get_variable hidden_size BertModel expand_dims reduce_sum matmul sum do_eval get_train_examples gec_file_based_convert_examples_to_features TPUClusterResolver path_inserts set_random_seed Open TPUEstimator set_verbosity output_dir do_train do_predict from_json_file get_file_length eval_batch_size create_predict_tf_records tpu_name data_dir convert_tokens_to_ids max_seq_length bert_config_file append chain PaddingInputExample use_tpu predict create_train_tf_records format predict_batch_size gec_model_fn_builder MakeDirs num_train_epochs info gec_file_based_input_fn_builder random_seed type INFO load FullTokenizer join int warmup_proportion evaluate print path_multitoken_inserts get_dev_examples PER_HOST_V2 get_test_examples train GECProcessor list_to_ids train_batch_size RunConfig flush_queue Errorifier put append flush_queue cpu_count close Manager Queue Pool open difference list union
# PIE: Parallel Iterative Edit Models for Local Sequence Transduction Fast Grammatical Error Correction using BERT Code and Pre-trained models accompanying our paper "Parallel Iterative Edit Models for Local Sequence Transduction" (EMNLP-IJCNLP 2019) PIE is a BERT based architecture for local sequence transduction tasks like Grammatical Error Correction. Unlike the standard approach of modeling GEC as a task of translation from "incorrect" to "correct" language, we pose GEC as local sequence editing task. We further reduce local sequence editing problem to a sequence labeling setup where we utilize BERT to non-autoregressively label input tokens with edits. We rewire the BERT architecture (without retraining) specifically for the task of sequence editing. We find that PIE models for GEC are 5 to 15 times faster than existing state of the art architectures and still maintain a competitive accuracy. For more details please check out our [EMNLP-IJCNLP 2019 paper](https://www.aclweb.org/anthology/D19-1435.pdf) ``` @inproceedings{awasthi-etal-2019-parallel, title = "Parallel Iterative Edit Models for Local Sequence Transduction", author = "Awasthi, Abhijeet and Sarawagi, Sunita and
1,475
awebson/congressional_adversary
['information retrieval']
['Are "Undocumented Workers" the Same as "Illegal Aliens"? Disentangling Denotation and Connotation in Vector Spaces']
src/preprocess_CR/S5_proxy_grounded.py src/utils/improvised_typing.py src/preprocess_PN/S1_tokenize_with_NER.py src/preprocess_PN/S1.5_deduplicate.py src/preprocess_CR/S4_ideal_grounded.py src/preprocess_CR/S1_qsub_CoreNLP.py src/preprocess_PN/S1_tokenize.py src/preprocess_CR/S3_underscore_phrases.py src/utils/trace_memory_allocation.py src/utils/experiment.py src/preprocess_PN/S4_export_train_data.py src/models/ideal_grounded.py src/data.py src/preprocess_PN/S3_export_plain_text.py src/preprocess_CR/S5_plain_text.py src/models/proxy_grounded.py src/preprocess_CR/S2_extract_phrases.py src/preprocess_CR/S4_plain_text_bill_mentioned.py src/preprocess_CR/search_bill_mentions.py src/preprocess_CR/S0_partition_corpus.py setup.py src/preprocess_PN/S2_underscore_phrases.py GroundedWord LabeledDoc Sentence Decomposer IdealGroundedExperiment LabeledSentences IdealGroundedConfig Recomposer main LabeledDocuments ProxyGroundedConfig ProxyGroundedRecomposer ProxyGroundedExperiment main ProxyGroundedDecomposer main partition_corpus partition_jsonl_corpus speech_length_histogram parse extract_named_entities load_parsing_result sort_frequency_and_write compute_collocation main aggregate_phrases extract_noun_and_verb_phrases main underscored_token underscore_phrases build_vocabulary faux_sent_tokenize wrap Sentence process_sentences export_sampled_frequency_by_party subsampling build_vocabulary faux_sent_tokenize partition export_sorted_frequency balance_classes export_sorted_frequency_by_party main _export_sorted_frequency_by_party Speaker Speech export_bill_mentions main Bill main main parse_xml partition partition underscore_NER parse_xml Sentence main LabeledDoc main main build_vocabulary subsampling main Experiment ExperimentConfig Vector Scalar _check_rank Matrix R3Tensor R5Tensor R4Tensor display_top IdealGroundedExperiment IdealGroundedConfig ProxyGroundedConfig ProxyGroundedExperiment join decimate enumerate print join decimate enumerate subplots write tqdm savefig legend distplot list partition_corpus print range makedirs split run join list append_to_filtered_sentences write tqdm append range Counter sort isinstance append lower sort_frequency_and_write compile join list heuristic subtrees fromstring extend leaves add sort_frequency_and_write compile apply_ngram_filter join score_ngrams apply_freq_filter len from_words extend raw_freq range apply_word_filter sort load_counters join words set tqdm aggregate_phrases union join list map escape copy translate sub append compile print bill shuffle party getattr sum range enumerate print items len append sqrt sum items DefaultDict len append items sort append sort append sort build_vocabulary _export_sorted_frequency_by_party tqdm print sample min len sentences subsampled_tokens freq most_common values open build_vocabulary subsampling add append sum GroundedWord update close shuffle mkdir sample items faux_sent_tokenize numerical_tokens text write Sentence len join list text write map escape search group compile tokens normalized_tokens tuple search Counter lower compile join attrib parse print strip add getroot append itertext LabeledDoc partition parse_xml processor Path home Pipeline enumerate join print text append enumerate split stanza_processor raw_freq apply_word_filter from_words MWETokenizer score_ngrams tokenize underscored_tokens apply_freq_filter join filter_traces statistics print strip sum enumerate
awebson/congressional_adversary
1,476
awslabs/neural-retrieval
['passage retrieval']
['Embedding-based Zero-shot Retrieval through Query Generation']
data/data.py models/bart_utils.py data/__init__.py data_processing/nq_preprocess.py utils/__init__.py examples/neural_retrieval.py utils/logger_utils.py models/siamese_model.py models/bart_model.py setup.py examples/bart_qg.py SimpleDataset padding_util ICT_batchify ICTDataset SiameseDataset seq2seq_padding_util file_stream batchify load_test_data get_linear_schedule_with_warmup load_train_data read_qrels do_sample generate read_corpus_jsonl read_corpus save_model set_seed generate_retrieval evaluation train doc_embed MultiheadAttention LearnedPositionalEmbedding LayerNorm Seq2SeqState TransformerDecoder TransformerModel EncoderLayer TransformerEncoder DecoderLayer Config weight_init fill_with_neg_inf build_future_mask top_k_top_p_filtering collect_representation make_positions BertEmbed SeqPooler get_logger join ICTDataset SiameseDataset DataLoader info range len ones min len max enumerate ones min len max enumerate ones min index append zeros max enumerate len ones min append zeros max enumerate len text itertuples read_csv qid str itertuples qid id append label read_csv encode SimpleDataset append encode append eval eval seed manual_seed_all manual_seed save state_dict eval train len save_model tuple zero_grad save str list epochs passagefile file_stream GradScaler CrossEntropyLoss state_dict update range resume info enumerate int join isdir backward set_epoch tqdm ckpt_dir evaluation step makedirs save_model DataParallel DataLoader DataFrame values list device_count doc_embed_file load_state_dict SequentialSampler to eval info keys load SimpleDataset BertEmbed to_csv zeros len topk list dump query_embed_file transpose nlargest tqdm dot keys doc_embed_file info open retrieval_outputfile to_dict read_csv enumerate is_available list view size expand index_select is_available cuda data constant_ ConvTranspose3d BatchNorm3d Conv3d normal_ BatchNorm1d xavier_normal_ GRUCell GRU BatchNorm2d ConvTranspose1d LSTMCell Conv1d ConvTranspose2d Linear isinstance orthogonal_ Conv2d parameters LSTM triu zeros size fill_with_neg_inf int type_as cumsum sort size min clone scatter softmax max __setitem__ __delitem__ __getitem__ get stdout setFormatter getLogger addHandler StreamHandler Formatter DEBUG setLevel
# Neural Retrieval [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) Embedding-based Zero-shot Retrieval through Query Generation leverages query synthesis over large corpuses of unlabeled text (such as Wikipedia) to pre-train siamese neural retrieval models. The resulting models significantly improve over previous BM25 baselines as well as state-of-the-art neural methods. This package provides support for leveraging BART-large for query synthesis as well as code for training and finetuning a transformer based neural retriever. We also provide pre-generated synthetic queries on Wikipedia, and relevant pre-trained models that are obtainable through our download scripts. <p align=center><img src="neural-retrieval.png" width="650px"></p> **Paper:** Davis Liang\*, Peng Xu\*, Siamak Shakeri, Cicero Nogueira dos Santos, Ramesh Nallapati, Zhiheng Huang, Bing Xiang, [Embedding-based Zero-shot Retrieval through Query Generation](https://arxiv.org/pdf/2009.10270.pdf), 2020. ## Getting Started #### dependencies: `pip install torch torchvision transformers tqdm
1,477
ayaabdelsalam91/Input-Cell-Attention
['time series']
['Input-Cell Attention Reduces Vanishing Saliency of Recurrent Neural Networks']
Scripts/BoxStat.py Scripts/plotSaliency.py Scripts/saliency.py Scripts/trainModels.py Scripts/cell.py Scripts/net.py Scripts/createSimulationData.py Scripts/accuracyMethods.py Scripts/Helper.py createReferenceSample getBoxInfo rescale_ changeProbToClass normalizeAtoB getIndexOfMaxValues plotSample getEnrichmentScore rescale getIndexOfImpValues getJaccardSimilarityScore getWeightedJaccardSimilarityScore getNumberOfImportantFeatures main parse_arguments LSTMWithInputCellAttention Dim createSample createDataset parse_arguments main strTovalue str2bool checkAccuracyOnTestLstm load_CSV save_intoCSV reOrderLabels CustomRNN main plotHeatMapExampleWise MidpointNormalize parse_arguments main parse_arguments getModelType VanillaSaliency main parse_arguments zeros range len range len zeros shape getNumberOfImportantFeatures createReferenceSample zeros normalizeAtoB range shape minmax_scale flatten getIndexOfMaxValues getBoxInfo plotHeatMapExampleWise intersect1d sum setdiff1d union1d rescale_ input_size getJaccardSimilarityScore getWeightedJaccardSimilarityScore abs roc_auc_score str data_dir tolist getEnrichmentScore getIndexOfImpValues append range importance getIndexOfMaxValues astype mean save_intoCSV unique enumerate norm print load_CSV reshape sequence_length reOrderLabels zeros DataName array len add_argument ArgumentParser isinstance replace split normal range len zeros range createSample randint NumFeatures ImpTimeSteps StartImpFeatures createDataset NumTestingSamples multipleBox ImpFeatures EndImpFeatures EndImpTimeSteps StartImpTimeSteps NumTimeSteps NumTrainingSamples strTovalue print reshape read_csv values print reshape DataFrame to_csv append list index set eval show subplots set_title text transpose axis tight_layout imshow savefig plotHeatMapExampleWise DataName DataLoader getModelType from_numpy shape TensorDataset checkAccuracyOnTestLstm to VanillaSaliency load generate_gradients Variable transform StandardScaler fit d_a num_layers zero_grad LSTMdropout save num_classes Adam step hidden_size1 double CrossEntropyLoss netUniLstmCellAtten format item attention_hops train num_epochs long learning_rate criterion backward parameters rnndropout
# Input-Cell Attention Code implementing architecture introduced in "Input-Cell Attention Reduces Vanishing Saliency of Recurrent Neural Networks" by Aya Abdelsalam Ismail, Mohamed Gunady, Luiz Pessoa, Hector Corrada Bravo*, Soheil Feizi*. ![alt text](Images/cellAttentionLstm.png) ## Overview: Recent efforts to improve the interpretability of deep neural networks use saliency to characterize the importance of input features in predictions made by models. Work on interpretability using saliency-based methods on Recurrent Neural Networks (RNNs) has mostly targeted language tasks, and their applicability to time series data is less understood. In this work we analyze saliency-based methods for RNNs, both classical and gated cell architectures. We show that RNN saliency vanishes over time, biasing detection of salient features only to later time steps and are, therefore, incapable of reliably detecting important features at arbitrary time intervals. To address this vanishing saliency problem, we propose a novel RNN cell structure (input-cell attention), which can extend any RNN cell architecture. At each time step, instead of only looking at the current input vector, input-cell attention uses a fixed-size matrix embedding, each row of the matrix attending to different inputs from current or previous time steps. Using synthetic data, we show that the saliency map produced by the input-cell attention RNN is able to faithfully detect important features regardless of their occurrence in time. We also apply the input-cell attention RNN on a neuroscience task analyzing functional Magnetic Resonance Imaging (fMRI) data for human subjects performing a variety of tasks. In this case, we use saliency to characterize brain regions (input features) for which activity at specific time intervals is important to distinguish between tasks. We show that standard RNN architectures are only capable of detecting important brain regions in the last few time steps of the fMRI data, while the input-cell attention model is able to detect important brain region activity across time without latter time step biases. ## Prerequisites: * Python 3.6.3 or higher * NumPy * Pytorch
1,478
ayeshakhtar209/fashion-mnist
['data augmentation']
['DENSER: Deep Evolutionary Network Structured Representation']
utils/helper.py configs.py benchmark/convnet.py app.py benchmark/runner.py utils/argparser.py svm.py utils/mnist_reader.py visualization/project_zalando.py start_s3_sync get_json_logger touch touch_dir _get_logger main cnn_model_fn PredictJob JobWorker JobManager get_args_request parse_arg get_args_cli now_int upload_result_s3 get_sprite_image invert_grayscale create_sprite_image vector_to_matrix_mnist UploadS3Thread load_mnist UploadS3Thread start Event dirname makedirs makedirs setFormatter touch_dir DEBUG getLogger addHandler StreamHandler Formatter touch setLevel INFO FileHandler setFormatter getLogger addHandler Formatter touch setLevel INFO FileHandler dense max_pooling2d dropout one_hot minimize reshape GradientDescentOptimizer conv2d softmax_cross_entropy asarray evaluate print Estimator shuffle labels images numpy_input_fn train range read_data_sets int append items list defaultdict utcfromtimestamp info int isinstance ones sqrt ceil array range vector_to_matrix_mnist invert_grayscale join
# Fashion-MNIST [![GitHub stars](https://img.shields.io/github/stars/zalandoresearch/fashion-mnist.svg?style=flat&label=Star)](https://github.com/zalandoresearch/fashion-mnist/) [![Gitter](https://badges.gitter.im/zalandoresearch/fashion-mnist.svg)](https://gitter.im/fashion-mnist/Lobby?utm_source=share-link&utm_medium=link&utm_campaign=share-link) [![Readme-CN](https://img.shields.io/badge/README-中文-green.svg)](README.zh-CN.md) [![Readme-JA](https://img.shields.io/badge/README-日本語-green.svg)](README.ja.md) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Year-In-Review](https://img.shields.io/badge/%F0%9F%8E%82-Year%20in%20Review-orange.svg)](https://hanxiao.github.io/2018/09/28/Fashion-MNIST-Year-In-Review/) <details><summary>Table of Contents</summary><p> * [Why we made Fashion-MNIST](#why-we-made-fashion-mnist) * [Get the Data](#get-the-data)
1,479
ayumiymk/aster.pytorch
['optical character recognition', 'scene text recognition']
['ASTER: An Attentional Scene Text Recognizer with Flexible Rectification']
lib/tools/create_sub_lmdb.py lib/utils/logging.py lib/evaluation_metrics/metrics.py lib/trainers.py lib/utils/serialization.py lib/__init__.py lib/utils/labelmaps.py lib/loss/__init__.py lib/models/tps_spatial_transformer.py lib/datasets/dataset.py lib/utils/meters.py lib/evaluation_metrics/__init__.py lib/utils/__init__.py lib/models/model_builder.py lib/tools/create_svtp_lmdb.py lib/models/resnet_aster.py lib/models/attention_recognition_head.py lib/evaluators.py lib/utils/visualization_utils.py lib/models/__init__.py main.py demo.py lib/datasets/concatdataset.py lib/models/stn_head.py lib/loss/sequenceCrossEntropyLoss.py config.py lib/utils/osutils.py get_args main DataInfo image_process get_data main get_dataloader get_dataset BaseEvaluator Evaluator Trainer BaseTrainer ConcatDataset AlignCollate LmdbDataset test ResizeNormalize RandomSequentialSampler EditDistance_with_lexicon EditDistance _lexicon_search get_str_list RecPostProcess Accuracy_with_lexicon Accuracy _normalize_text names factory _assert_no_grad SequenceCrossEntropyLoss to_contiguous AttentionRecognitionHead AttentionUnit DecoderUnit ModelBuilder conv1x1 AsterBlock ResNet_ASTER conv3x3 get_sinusoid_encoding conv3x3_block STNHead compute_partial_repr TPSSpatialTransformer grid_sample build_output_control_points names create writeCache createDataset _is_difficult writeCache checkImageIsValid char2id labels2strs get_vocabulary id2char TFLogger Logger AverageMeter make_symlink_if_not_exists mkdir_if_missing load_checkpoint copy_state_dict read_json save_checkpoint write_json stn_vis _save_plot_pool recognition_vis to_numpy to_torch parse_args int size convert BILINEAR floor div_ resize float max model get_str_list voc_type DataParallel unsqueeze device cuda seed fill_ image_path ModelBuilder load_state_dict to manual_seed_all format DataInfo eval resume manual_seed set_default_tensor_type print load_checkpoint image_process isinstance ConcatDataset print LmdbDataset DataLoader append isinstance ConcatDataset print LmdbDataset append list permutation ConcatDataset print SubsetRandomSampler DataLoader len workers real_logs_dir batch_size keep_ratio Trainer get_data MultiStepLR Logger num_test max test_data_dir logs_dir vis_dir epochs dirname normpath TFLogger width range height inf num_train debug Evaluator close vars join time evaluate Adadelta max_len parameters synthetic_train_data_dir evaluation_metric make_symlink_if_not_exists train step makedirs fromarray show uint8 zip print char2id size labels2strs LmdbDataset DataLoader eval permute item id2char input to_numpy enumerate join list size append to_numpy keys range len asarray argmin eval append _normalize_text get_str_list sum len get_str_list sum append len get_str_list sum get_str_list sum append exp get_str_list size min map append to_numpy sum log enumerate len is_contiguous arange cos pow unsqueeze sin float BatchNorm2d Sequential ReLU Conv2d fill_ masked_fill_ size log view concatenate ones stack linspace Tensor imdecode fromstring IMREAD_GRAYSCALE join print len encode writeCache range open list ascii_lowercase append ascii_letters digits join unsqueeze append to_numpy range makedirs format system makedirs dirname mkdir_if_missing join print run_on_remote copy make_dirs dirname save mkdir_if_missing load format print run_on_remote shift isfile data items list isinstance print set copy_ add keys state_dict fromarray join uint8 format get_str_list permute save zip to_numpy enumerate fromarray uint8 save set_yticklabels get_str_list axis unsqueeze open show seek imshow scatter savefig permute append range format set_xticklabels size astype close zip enumerate int uint8 BytesIO join figure to_numpy is_tensor
# ASTER: Attentional Scene Text Recognizer with Flexible Rectification This repository implements the ASTER in pytorch. Origin software could be found in [here](https://github.com/bgshih/aster). ASTER is an accurate scene text recognizer with flexible rectification mechanism. The research paper can be found [here](https://ieeexplore.ieee.org/abstract/document/8395027/). ![ASTER Overview](overview.png) ## Installation ``` conda env create -f environment.yml ``` ## Train [**NOTE**] Some users say that they can't reproduce the reported performance with minor modification, like [1](https://github.com/ayumiymk/aster.pytorch/issues/17#issuecomment-527380815) and [2](https://github.com/ayumiymk/aster.pytorch/issues/17#issuecomment-528718596). I haven't try other settings, so I can't guarantee the same performance with different settings. The users should just run the following script without any modification to reproduce the results.
1,480
ayushais/DBLiDARNet
['semantic segmentation']
['DeepTemporalSeg: Temporally Consistent Semantic Segmentation of 3D LiDAR Scans']
python_scripts/network_layers.py python_scripts/DBLidarNet.py python_scripts/train_seg.py python_scripts/test.py python_scripts/utils.py DBLidarNet conv_2d_depth_separable conv_2d transpose_conv_2d add_layer add_block main main record_read_and_decode prepare_dataset conv_2d_depth_separable conv_2d dropout batch_norm relu array print DEFINE_integer FLAGS DEFINE_string gpu trainable_variables build_graph batch_size get_next DBLidarNet Saver save path_to_store_models run get_shape str global_variables DEFINE_float add_summary range format group train_record_filename FileWriter tf_record_iterator local_variables_initializer InteractiveSession log_dir prepare_dataset graph validation_record_filename model_name int32 global_variables_initializer len decode_raw one_hot reshape squeeze float32 cast int32 parse_single_example TFRecordDataset total_epochs batch_size map shuffle repeat prefetch batch
# DeepTemporalSeg This repository contains code to learn a model for semantic segmentation of 3D LiDAR scans <img src="http://deep-temporal-seg.informatik.uni-freiburg.de/ezgif.com-video-to-gif_small.gif" width="580" height="394" align="center" /> ## 1. License This software is released under GPLv3. If you use it in academic work, please cite: ``` @article{dewan-deeptemporalseg, author = {Ayush Dewan and Wolfram Burgard}, title = {DeepTemporalSeg: Temporally Consistent Semantic Segmentation of 3D LiDAR Scans}, booktitle = {https://arxiv.org/abs/1906.06962},
1,481
ayushbits/Semi-Supervised-LFs-Subset-Selection
['text classification']
['Semi-Supervised Data Programming with Subset Selection']
Data/SMS/obtain_embeddings.py ss_generic.py youtube/youtube_unsup_sub_selection_entropy_filter.py logistic_regression.py youtube/yt_unsup_sub_selection.py youtube/yt_sup_sub_selection.py trec/trec_ss.py Data/SMS/generate_data.py combine_lfs.py sms/sms_rand_selection.py sms/sms_sup_sub_selection.py sms/sms_ss1.py meta_layers.py trec/trec_sup_sub_selection.py rewt_generic.py sms/sms_unsup_sub_selection.py preprocess_recsys.py census/census_sup_sub_selection.py sms_ss.py census_ss.py synthetic_all.py utils_rec.py generic_unsup_sub_selection.py mitr/mitr_rewt.py find_labels.py ss_iono_noise_induce.py recommend/reco_ss.py mitr/mitr_sup_sub_selection.py generic_sup_sub_selection.py weighted_cage.py mitr/mitr_unsup_sub_selection_entropy_filtering.py sms/sms_unsup_sub_selection_entropy_filter.py trec/trec_rand_selection.py youtube/yotube_rewt.py trec/trec_unsup_sub_selection.py census/census_rand_selection.py mitr/mitr_ss.py census/census_ss.py census/census_unsup_sub_selection_entropy_filter.py sms_rewt.py sms/sms_rewt.py spam_random.py model.py recommend/recommend_ss.py losses.py mitr/mitr_rand_selection.py rewt_ss_generic.py youtube/youtube_unsup_sub_selection.py sms/sms_ss.py youtube/youtube_ss.py reduce_validation_data.py synthetic_semisupervised.py youtube/yt_rand_selection.py deep_net.py generic_rand_sub_selection.py cage.py ss_audit.py ss_kl_generic.py mitr/mitr_unsup_sub_selection.py census/census_unsup_sub_selection.py mitr/mitr_unsup_sub_selection_no_firing.py probability probability_s_given_y_l calculate_normalizer probability_y phi probability_l_y precision_loss log_likelihood_loss_supervised log_likelihood_loss DeepNet find_indices get_similarity_kernel find_indices find_indices LogisticRegression entropy_pre entropy getDiverseInstances vat_loss kl_div_with_logit svm_supervised kl_divergence cross_entropy MetaConvTranspose2d MetaBatchNorm1d MetaLinear MetaBatchNorm2d MetaModule MetaConv2d to_var MetaConvTranspose2d LeNet LogisticRegression MetaLinear MetaBatchNorm2d MetaModule MetaConv2d shared_first_author stars_in_review polarity_positive subjectivity_positive polarity_negative textblob_polarity get_features lsnork_to_l_m rewt_lfs load_pickle find_indices download_and_process_data precision_batch process_interactions_data get_n_epochs maybe_download_files load_small_sample split_data f1_batch recall_batch load_data get_timestamp process_books_data process_reviews_data save_small_sample probability probability_s_given_y_l calculate_normalizer probability_y phi probability_l_y precision_loss log_likelihood_loss_supervised log_likelihood_loss find_indices rewt_lfs get_similarity_kernel find_indices find_indices get_similarity_kernel find_indices find_indices get_similarity_kernel find_indices get_similarity_kernel find_indices get_similarity_kernel find_indices get_similarity_kernel find_indices load_data Generate_data load_rules sentences_to_elmo_sentence_embs find_indices rewt_lfs get_similarity_kernel find_indices find_indices get_similarity_kernel find_indices get_similarity_kernel find_indices get_similarity_kernel find_indices get_similarity_kernel find_indices rewt_lfs find_indices get_similarity_kernel find_indices find_indices get_similarity_kernel find_indices find_indices exp exp ones phi shape range exp calculate_normalizer phi zeros sum range exp Beta squeeze t range range double probability_s_given_y_l probability_l_y t probability exp view ones phi zeros double range tensordot append mode zeros transpose unique len matmul double tensor sum max model Variable _l2_normalize backward zero_grad normal_ kl_div_with_logit cpu range detach mean log_softmax softmax int list print extend sample keys intersection len review_text TextBlob isinstance blob blob blob astype values Parameter Adam register_parameter download_and_process_data concat sample drop_duplicates merge download makedirs dict datetime split astype map dict load_data DataFrame size astype map index dict load_data DataFrame dict map DataFrame load_data train_test_split drop list map maybe_download_files merge rename info process_books_data process_reviews_data values process_interactions_data sum round sum round recall_batch precision_batch view norm clamp list read_csv zip ConfigProto ERROR set_verbosity Module
### Refer to [SPEAR Library](http://github.com/decile-team/spear) for well documented implemenation of this paper. # Requirements This code has been developed with - python 3.6 - numpy 1.17.4 - torch 1.1.0 # Data Description The dataset directory contains dataset for the following 3 datasets: * IMDB
1,482
azadis/MC-GAN
['style transfer']
['Multi-Content GAN for Few-Shot Font Style Transfer']
options/train_options.py data/image_folder.py train_Stack.py data/data_loader.py train.py util/kernel_size.py util/image_pool.py util/png.py test_Stack.py models/base_model.py models/models.py models/StackGAN_model.py util/html.py data/base_data_loader.py options/base_options.py test.py util/util.py util/plot_loss.py test_video.py models/networks.py options/test_options.py util/visualizer.py models/cGAN_model.py BaseDataLoader Data normalize_stack PartialData CreateDataLoader FlatData StackDataLoader DataLoader PartialDataLoader BaseModel cGANModel create_model InputTransformation define_G_3d get_norm_layer GANLoss ResnetGenerator ResnetDecoder ResnetBlock define_D weights_init conv_norm_relu_module ResnetEncoder define_Dec define_G ResnetGenerator_3d_conv convTranspose_norm_relu_module define_Enc NLayerDiscriminator define_preNet print_network StackGANModel BaseOptions TestOptions TrainOptions HTML ImagePool conv2d convTranspose2d main parse_args moving_average plot_loss encode print_numpy varname diagnose_network VerticalFlip mkdirs HorizontalFlip mkdir info save_image tensor2im Visualizer size Compose initialize partial StackDataLoader stack DataLoader PartialDataLoader initialize model print cGANModel name StackGANModel hasattr fill_ print bias normal_ __name__ print BatchNorm2d partial InstanceNorm2d apply cuda ResnetGenerator_3d_conv get_norm_layer print ResnetGenerator UnetGenerator apply cuda get_norm_layer print UnetEncoder apply ResnetEncoder cuda get_norm_layer print ResnetDecoder apply UnetDecoder cuda get_norm_layer print apply NLayerDiscriminator cuda get_norm_layer InputTransformation print apply cuda print parameters floor add_argument ArgumentParser cumsum int moving_average strip readlines append float open subplots set_title plot print plot_loss logRoot avg savefig append parse_args array range len transpose numpy print parameters fromarray reshape squeeze astype save zeros range print join search print float64 flatten astype mkdir makedirs
# MC-GAN in PyTorch <img src="https://people.eecs.berkeley.edu/~sazadi/MCGAN/datasets/ft51_1_fake_B.gif" width="90%"/> This is the implementation of the [Multi-Content GAN for Few-Shot Font Style Transfer](https://arxiv.org/abs/1712.00516). The code was written by [Samaneh Azadi](https://github.com/azadis). If you use this code or our [collected font dataset](https://github.com/azadis/AdobeFontDropper#mc-gan-traintest) for your research, please cite: Multi-Content GAN for Few-Shot Font Style Transfer; [Samaneh Azadi](https://people.eecs.berkeley.edu/~sazadi/), [Matthew Fisher](https://research.adobe.com/person/matt-fisher/), [Vladimir Kim](http://vovakim.com/), [Zhaowen Wang](https://research.adobe.com/person/zhaowen-wang/), [Eli Shechtman](https://research.adobe.com/person/eli-shechtman/), [Trevor Darrell](https://people.eecs.berkeley.edu/~trevor/), in arXiv, 2017. ## Prerequisites: - Linux or macOS - Python 2.7 - CPU or NVIDIA GPU + CUDA CuDNN ## Getting Started
1,483
azarafrooz/FTNPL
['imitation learning']
['Follow the Neurally-Perturbed Leader for Adversarial Training']
gail-application/a2c_ppo_acktr/algo/ppo.py gail-application/a2c_ppo_acktr/algo/correlator.py gail-application/a2c_ppo_acktr/arguments.py matchingPennies-game-application/utils.py gail-application/a2c_ppo_acktr/storage.py gail-application/a2c_ppo_acktr/algo/cor_gail.py gail-application/evaluation.py matchingPennies-game-application/ftpl.py gail-application/a2c_ppo_acktr/algo/gail.py gail-application/a2c_ppo_acktr/envs.py gail-application/a2c_ppo_acktr/model.py matchingPennies-game-application/ftnpl.py gail-application/a2c_ppo_acktr/algo/__init__.py gym-added-environments/gym-synthetic2Dplane/gym_synthetic2Dplane/envs/synthetic2Dplane_env.py gail-application/save_synthetic_traj.py gan-application/ftnpl_gan.py matchingPennies-game-application/ftnpl_nonconvex.py gail-application/a2c_ppo_acktr/algo/kfac.py matchingPennies-game-application/ftpl_nonconvex.py gail-application/a2c_ppo_acktr/distributions.py gym-added-environments/gym-synthetic2Dplane/gym_synthetic2Dplane/envs/__init__.py gail-application/a2c_ppo_acktr/algo/regret_gail.py matchingPennies-game-application/ftrl_nonconvex.py gail-application/a2c_ppo_acktr/utils.py gail-application/save_coinrun_expert_traj.py matchingPennies-game-application/ftrl.py gail-application/main.py gail-application/visualize_training_dynamics.py gail-application/a2c_ppo_acktr/algo/a2c_acktr.py gail-application/save_expert_traj.py gym-added-environments/gym-synthetic2Dplane/setup.py gym-added-environments/gym-synthetic2Dplane/gym_synthetic2Dplane/__init__.py evaluate main generate_expert_traj generate_expert_traj generate_expert_traj get_args Bernoulli FixedCategorical FixedBernoulli Categorical DiagGaussian FixedNormal TransposeImage VecPyTorch make_env MaskGoal PytorchEpisodeRewardWrapper add_final_pytorch_wrappers TransposeObs CoinRunVecPyTorch VecNormalize VecPyTorchFrameStack make_vec_envs TimeLimitMask CNNBase MLPBase Policy NNBase RolloutStorage _flatten_helper queue_update get_vec_normalize AddBias update_linear_schedule cleanup_log_dir init get_render_func conv2d_size_out Flatten A2C_ACKTR NNBase CNNBase MLPBase weights_init_ Correlator CNNBase CorDiscriminator MLPBase NNBase NNBase CNNBase MLPBase Discriminator conv2d_size_out ExpertDataset Flatten _extract_patches compute_cov_g compute_cov_a KFACOptimizer SplitBias update_running_stat PPO NoRegretDiscriminator MLPBase NNBase Mediator Generator weights_init Discriminator Synthetic2DPlane Mediator Player weights_init_ Mediator Player weights_init_ Player Player Player Player queue_update format print close get_vec_normalize mean eval reset recurrent_hidden_state_size make_vec_envs zeros tensor step append len ppo_epoch queue_update update_linear_schedule recurrent_hidden_state_size max_grad_norm save seed _obfilt algo env_name manual_seed_all clip_param use_proper_time_limits act gail_experts_dir mean manual_seed join time check_point evaluate get_vec_normalize predict_reward use_gae median step num_levels num_processes DataLoader save_dir cor_gail NoRegretDiscriminator append expanduser to after_update num_steps insert mixed_update lower eval optimizer gail_epoch value_loss_coef continue_ppo_training ExpertDataset ob_rms len shape use_linear_lr_decay Discriminator range state_dict update get_args PPO cleanup_log_dir num_env_steps embed_size gamma num_mini_batch load log_dir print set_num_threads action_space reset gae_lambda insert_embedding copy_ device queue_size gail FloatTensor make_vec_envs Correlator no_regret_gail format compute_returns deque A2C_ACKTR int CorDiscriminator entropy_coef RolloutStorage Policy zeros makedirs imwrite warn COLOR_RGB2BGR recurrent_hidden_state_size device list squeeze tolist step shape dirname make_vec_envs append update format close eval item n load join items print dumps observation_space get_vec_normalize tqdm reset zeros numpy array cvtColor makedirs save make render stochastic_synthetic_policy parse_args recurrent_policy add_argument ArgumentParser ShmemVecEnv VecPyTorch add_final_pytorch_wrappers make_general_env CoinRunVecPyTorch setup_and_load VecNormalize DummyVecEnv VecPyTorchFrameStack GAME_TYPE PytorchEpisodeRewardWrapper hasattr hasattr isinstance param_groups float data weight_init bias_init makedirs append isinstance bias xavier_uniform_ weight constant_ Linear data view size contiguous unfold _extract_patches view ones size mean div_ cuda is_cuda view size contiguous mul_ sum data normal_ __name__ constant_ pop
# FTNPL Pytorch implementation of [Follow the Neurally-Perturbed Leader for Adversarial Training](https://arxiv.org/pdf/2002.06476.pdf) ![alt text](https://github.com/azarafrooz/FTNPL/blob/master/FTNPL.001.png) Each directory contains its own readme. The easiest way to understand the algorithm is by starting its application in zero-sum game of Matching Pennies Game, moving to GAN application and GAIL at last.
1,484
azhar0100/VASNet
['video summarization']
['Summarizing Videos with Attention']
main_transformer_cat.py vasnet_model.py sys_utils.py main_improved.py cpd_nonlin.py layer_norm.py knapsack.py cpd_auto.py config.py main_transformer.py vsum_tools.py main.py create_split.py main_transformer_ax.py HParameters estimate_vmax cpd_auto eval_score eval_cost centering calc_scatters cpd_nonlin create split_random write_json mkdir_if_missing test_knapsack_dp test_knapsack knapsack check_inputs knapsack_ortools knapsack_dp LayerNorm eval_split AONet parse_splits_filename weights_init lookup_weights_splits_file train eval_split AONet parse_splits_filename weights_init lookup_weights_splits_file train eval_split AONet parse_splits_filename weights_init lookup_weights_splits_file train eval_s eval_split AONet parse_splits_filename weights_init lookup_weights_splits_file train print_table list_files get_image_list run_command torch_summarize del_file ge_pkg_versions get_video_list print_pkg_versions MultiVASNet MultiVASNetWithPageRank SelfAttention CatMultiVASNet VASNet evaluate_summary evaluate_user_summaries generate_summary arange argmin zeros float log cpd_nonlin centering trace list range len float log len list T cumsum reshape astype zeros diag int inf calc_scatters print ones reshape min argmin copy shape zeros max range makedirs dirname mkdir_if_missing list choice append range enumerate save_dir dataset list train_percent append save_name ceil range format num_splits close write_json keys int join print File split_random len zeros max range print knapsack sort check_inputs append zeros range print knapsack_dp int tolist astype Init Solve array bias xavier_uniform_ weight __name__ constant_ splitext split glob format print lookup_weights_file initialize splits AONet load_datasets select_split print load_model print_table test_keys mean eval verbose load_split_file append range len parse_splits_filename output_dir load_split_file open str initialize splits AONet len range format close datasets splitext flush join get_dataset_by_name load_datasets select_split print system write split makedirs append update eval_split format splits print_table print HParameters append train enumerate literal_load_from_args print sort isdir remove Popen split run_command __version__ version platform isfile print items ge_pkg_versions items tuple _addindent __repr__ sum __name__ pop format print sum enumerate len int concatenate ones tolist astype delete mean floor int32 knapsack_ortools append zeros float range len argmax concatenate astype float32 mean shape append zeros sum max range len argmax astype float32 mean shape append sum max range
# Video Summarization with Attention A PyTorch implementation of our paper [Video Summarization with Attention](https://arxiv.org/abs/1812.01969) by Jiri Fajtl, Hajar Sadeghi Sokeh, Vasileios Argyriou, Dorothy Monekosso and Paolo Remagnino. This paper was presented at [ACCV 2018](http://accv2018.net/program/) [AIU2018 workshop](http://www.sys.info.hiroshima-cu.ac.jp/aiu2018/). ## Installation The development and evaluation was done on the following configuration: ### System configuration - Platform : Linux-4.15.0-43-generic-x86_64-with-Ubuntu-16.04-xenial - Display driver : NVRM version: NVIDIA UNIX x86_64 Kernel Module 384.130 Wed Mar 21 03:37:26 PDT 2018 GCC version: gcc version 5.4.0 20160609 (Ubuntu 5.4.0-6ubuntu1~16.04.10)
1,485
azpoliak/eco
['word embeddings']
['Efficient, Compositional, Order-sensitive n-gram Embeddings']
skipEmbeds/word2vec.py evaluations/phraseSim/eval_phrase_sim.py evaluations/supervisedPPDB/supervised.ppdb.py skipEmbeds/eco.py embed_baseline setup evaluate load_vocab make_plot load_baseline_embeds embed_cocoon embed_cocoon2 cosine_similarity plot_embds load_embeds main project spearman_score get_best_alpha get_feat_hash tictoc tolerant_print crossval mean create_feature_matrix main_job undesirable fit_and_test train_process VocabItem __init_process sigmoid Vocab save init_net train UnigramTable train_process VocabItem __init_process sigmoid Vocab save init_net train UnigramTable range len print str split open str print readlines close load_baseline_embeds open range split Set text axis close add title scatter savefig append range len make_plot fit_transform TSNE list len iter zeros next keys range split len zeros len range split int embed_baseline str cosine_similarity print strip embed_cocoon open float split Set strip add open split int load_embeds str evaluate print load_vocab load_embeds OrderedDict set zeros embed len print time stderr print getattr hasattr items list sorted defaultdict print get_best_alpha spearman_score dim_divide print crossval mean append dim embed fit Array as_ctypes _type_ uniform zeros strip indices list seek code value word_count close mean zip float flush enumerate bytes write dot path sigmoid split zeros array len join pack print word len write close zip open open time list Value print map encode_huffman Vocab save init_net range Pool UnigramTable len max min randint
# Efficient, Compositional, Order-Sensitive n-gram Embeddings A suite for creating & evaluating phrasal embeddings via the `ECO` model based on [Efficient, Compositional, Order-Sensitive n-gram Embeddings](https://www.cs.jhu.edu/~apoliak1/papers/ECO--EACL-2017.pdf) (EACL 2017). ### Data: The Skip-Embeddings and English Wikipedia used to generate the skip-embeddings can be downloaded [here](https://zenodo.org/record/439387#.WOERYxIrKRs). ### Directories: 1. `evaluations`: data and scripts for different evaluation tasks to evaluate the embeddings. 1. `skipEmbeds`: the script used to generate the `ECO Skip-Embeddings` and vanilla `word2vec` embeddings. ⋅⋅1. We extended Debora Sujono's [python version of word2vec](https://github.com/deborausujono/word2vecpy). ⋅⋅2. We also have a local C version that is not tested. ⋅⋅3. The embeddings used in the paper and released were created using the python version.
1,486
azuki-miho/SceneEncoder
['semantic segmentation']
['SceneEncoder: Scene-Aware Semantic Segmentation of Point Clouds with A Learnable Scene Descriptor']
scannet/scannetv2_seg_dataset_rgb21c_pointid.py tf_ops/3d_interpolation/tf_interpolate_op_test.py tf_ops/3d_interpolation/visu_interpolation.py tf_ops/grouping/tf_grouping_op_test.py tf_ops/sampling/tf_sampling.py utils/pointconv_util.py scannet/scannet_dataset_rgb.py utils/provider_shapenet.py scannet/eulerangles.py PointConv.py scannet/pc_util.py scannet/util.py tf_ops/grouping/tf_grouping.py shapenet/part_dataset_all_normal.py evaluate_scannet.py models/scene_encoder_rsl.py utils/tf_util.py scannet/visualize/visualize_labels_on_mesh.py scannet/visualize/my_visualize_labels_on_mesh.py scannet/visualize/util.py train_shapenet_IoU.py visualize_part.py tf_ops/3d_interpolation/tf_interpolate.py scannet/visualize/util_3d.py scannet/scannet_dataset_sw_rgb.py train_scannet_IoU.py utils/provider.py models/scene_encoder_rsl_shapenet.py visualize_scene.py add_vote eval_one_epoch log_string evaluate nonlinear_transform weight_net_hidden feature_decoding_layer_depthwise weight_net feature_decoding_layer placeholder_inputs feature_encoding_layer get_batch_wdp get_learning_rate get_batch eval_scene_one_epoch log_string train_scene_one_epoch train get_bn_decay eval_whole_scene_one_epoch get_batch get_learning_rate eval_scene_one_epoch eval_one_epoch log_string train_scene_one_epoch train_one_epoch train get_bn_decay create_output get_batch get_learning_rate visualize_all eval_scene_one_epoch log_string create_color_palette get_bn_decay visualize_instance add_vote create_output evaluate eval_one_epoch log_string create_color_palette visualize_instance placeholder_scene_inputs knn1 get_scene_model get_mask get_scene_loss placeholder_scene_inputs knn1 get_loss get_scene_model get_mask placeholder_inputs get_scene_loss get_model quat2euler euler2quat mat2euler angle_axis2euler euler2angle_axis euler2mat read_ply point_cloud_to_volume_v2_batch point_cloud_to_image_batch point_cloud_three_views_demo point_cloud_label_to_surface_voxel_label draw_point_cloud write_ply_rgb point_cloud_label_to_surface_voxel_label_fast read_ply_rgba point_cloud_to_volume_batch volume_to_point_cloud point_cloud_to_volume_v2 write_ply getColor write_ply_label write_ply_color write_ply_label2 pyplot_draw_point_cloud point_cloud_to_volume point_cloud_three_views pyplot_draw_volume point_cloud_to_image gen_pickle gen_label_map remove_unano ScannetDataset ScannetDatasetWholeScene ScannetDatasetWholeScene_evaluation visualize_label_image read_scene_types_mapping print_error read_label_mapping represents_int create_color_palette visualize_instance_image visualize visualize_label_image read_scene_types_mapping print_error read_label_mapping represents_int create_color_palette visualize_instance_image load_ids export_ids export_instance_ids_for_eval Instance read_instance_prediction_file transform_points read_mesh_vertices get_instances visualize pc_normalize PartNormalDataset three_nn three_interpolate _three_interpolate_grad GroupPointTest fun query_ball_point group_point select_top_k _group_point_grad knn_point GroupPointTest farthest_point_sample gather_point _gather_point_grad prob_sample knn_kdtree kernel_density_estimation kernel_density_estimation_ball sampling grouping random_jitter_rgb rotate_point_cloud shuffle_points loadDataFile getDataFiles load_h5 rotate_point_cloud_z rotate_point_cloud_by_angle shuffle_data jitter_point_cloud rotate_perturbation_point_cloud shift_point_cloud random_scale_point_cloud loadDataFile_with_normal rotate_point_cloud load_h5_data_label_seg rotate_point_cloud_with_normal loadDataFile getDataFiles load_h5 rotate_point_cloud_by_angle shuffle_data jitter_point_cloud loadDataFile_with_seg random_point_dropout conv2d_transpose fully_connected conv3d max_pool3d batch_norm_template conv2d conv1d _variable_with_weight_decay batch_norm_for_conv1d dropout batch_norm_for_conv2d avg_pool3d max_pool2d batch_norm_template_unused _variable_on_cpu reduce_sum2d_conv avg_pool2d batch_norm_for_fc batch_norm_for_conv3d print write flush restore eval_one_epoch log_string ConfigProto Session range str add_vote join print min log_string now run zeros argmax range len int32 float32 placeholder exponential_decay maximum minimum exponential_decay zeros random range zeros range int str sum arange get_batch log_string shuffle now rotate_point_cloud_z add_summary run float argmax max range len arange rotate_point_cloud_z argmax max run str list sum range get_batch log_string mean float int now histogram add_summary zeros array len str sum list concatenate log_string now astype float32 mean histogram add_summary run zeros float argmax array range len int str sum arange get_batch log_string shuffle now jitter_point_cloud add_summary run float argmax range len jitter_point_cloud arange list sorted append sum seg_classes get_batch astype mean float keys int int32 add_summary array sorted append seg_classes astype keys min int32 reshape hstack savetxt str create_output print exit create_color_palette zeros range len visualize_instance mkdir join visualize shape int32 float32 placeholder value dropout reduce_mean conv1d feature_decoding_layer feature_encoding_layer transpose float32 cast tile expand_dims equal get_mask squeeze transpose square reduce_sum matmul top_k reduce_max where div add_n stop_gradient gather argmax multiply squeeze get_collection reduce_sum cast append range sigmoid_cross_entropy categorical_crossentropy softmax tile cond equal constant print reshape sigmoid reduce_mean int32 scalar value dropout conv1d feature_decoding_layer feature_encoding_layer print sparse_softmax_cross_entropy get_collection reduce_mean softmax add_n scalar append array cos sin eps asarray atan2 sqrt flat cos sin angle_axis2mat min array unique ceil zeros max range len ceil min max unique squeeze point_cloud_to_volume flatten append expand_dims range zeros float astype append vstack array range append point_cloud_to_volume_v2 expand_dims range tuple astype choice pad vstack append zeros float array range append expand_dims range point_cloud_to_image tuple astype choice pad vstack append zeros float array range data read array data read array write array describe write array describe describe min astype write array append max range append array range getColor describe astype write require array int exp abs transpose min mean sqrt argsort round argwhere zeros sum max range euler2mat concatenate draw_point_cloud fromarray uint8 read_ply save point_cloud_three_views set_xlabel add_subplot scatter set_ylabel figure set_zlabel pyplot_draw_point_cloud volume_to_point_cloud write astype close max range open where print zeros range index join read dump print astype float32 close open int32 append range array gen_label_map remove_unano len str write exit int dict represents_int dict splitlines zeros create_color_palette imwrite enumerate imwrite create_color_palette unique zeros enumerate print_error astype write create_color_palette int32 len transpose matmul concatenate splitlines array join mkdir dirname unique zeros join int format print_error float splitlines dirname abspath isabs split append Instance to_dict unique load_ids mean sqrt sum max value print reshape slice reduce_sum select_top_k tile query zeros KDTree range farthest_point_sample gather_point reshape concat gather_nd set_shape int32 tile range py_func arange shuffle arange shuffle len reshape cos pi dot shape uniform sin zeros array range reshape cos pi dot shape uniform sin zeros array range reshape cos dot shape sin zeros array range randn reshape dot shape zeros range array clip shape uniform shape clip randn shape uniform range shape uniform range File concatenate reshape cos pi dot shape uniform sin zeros array range random range File File multiply add_to_collection xavier_initializer _variable_on_cpu l2_loss truncated_normal_initializer
# SceneEncoder: Scene-Aware Semantic Segmentation of Point Clouds with A Learnable Scene Descriptor by Jiachen Xu*, Jingyu Gong*, Jie Zhou, Xin Tan, Yuan Xie and Lizhuang Ma. (*=equal contribution) <p align="center"> <img src="imgs/framework.png" width="80%"> </p> ## Introduction This project is based on our IJCAI2020 paper. You can find the [arXiv](https://arxiv.org/abs/2001.09087) version here. ``` @article{xu2020sceneencoder, title={SceneEncoder: Scene-Aware Semantic Segmentation of Point Clouds with A Learnable Scene Descriptor}, author={Xu, Jiachen and Gong, Jingyu and Zhou, Jie and Tan, Xin and Xie, Yuan and Ma, Lizhuang}, journal={arXiv preprint arXiv:2001.09087},
1,487
b05902062/TDConvED
['video captioning']
['Temporal Deformable Convolutional Encoder-Decoder Networks for Video Captioning']
src/data_loader.py src/build_vocab.py src/encoder.py src/decoder.py src/train.py src/acquire_images.py src/generate_caption.py sample_image download_and_sample_msrvtt download_video __download_and_sample build_word msr_vtt_dataset Shifted_conv sig_gate beam_state TDconvD TDconvE frame_feature_extraction video_feature_extraction ResTDconvE sig_gate get_images generate_caption train get_sentence get_BLEU join starmap list remove close file rmtree repeat mkdir output_dir zip listdir Pool join remove download_video sample_image download YouTube join print mkdir probe float next range run join list image_dir len file mkdir output_dir append min_count listdir keys split int Sigmoid sig load extend encoder eval load_state_dict device sample_image to get_sentence predict join transpose from_numpy zeros listdir enumerate batch_size clip_grad_norm_ zero_grad DataLoader save device open list image_dir Adam test_vocab load_state_dict encoder to CrossEntropyLoss range epoch ckp_dir train_vocab eval msr_vtt_dataset load join enumerate log_dir decoder criterion backward print reshape write ckp_path parameters step encoder append item
b05902062/TDConvED
1,488
b4shy/Medical
['semantic segmentation']
['A large annotated medical image dataset for the development and evaluation of segmentation algorithms']
Task02_Heart/eval.py Task02_Heart/utils.py Task02_Heart/mriHandler.py Task02_Heart/train.py Task02_Heart/model.py SegNetBasic MRIHandler show_prediction_and_label pixel_accuracy create_prediction_and_label expand_dims argmax squeeze sum logical_and range show clear draw add_subplot imshow waitforbuttonpress figure ion range masked_where
# Medical Atrium Segmentation based on MRI Images. (A large annotated medical image dataset for the development and evaluation of segmentation algorithms: https://arxiv.org/abs/1902.09063) ![](atrium.gif)
1,489
babajide07/Redundant-Feature-Pruning-Pytorch-Implementation
['network pruning']
['Building Efficient ConvNets using Redundant Feature Pruning']
utils.py main_vgg16.py VGG19X cluster_weights_agglo test torch_summarize train format_time init_params progress_bar get_mean_and_std items list print tuple _addindent __repr__ sum __name__ data criterion backward print progress_bar zero_grad step max net enumerate len data criterion print progress_bar eval mkdir save max net enumerate len time T concatenate cumsum len argsort array fcluster unique split normalize diff linkage fclusterdata print DataLoader div_ zeros range len normal constant isinstance kaiming_normal Conv2d bias modules BatchNorm2d weight Linear int time join format_time write append range flush len int
# Redundant-Feature-Pruning-Pytorch-Implementation Ayinde, Babajide O., Tamer Inanc, and Jacek M. Zurada. "Redundant feature pruning for accelerated inference in deep neural networks." Neural Networks (2019). Ayinde, B.O. and Zurada, J.M., 2018. Building Efficient ConvNets using Redundant Feature Pruning. arXiv preprint arXiv:1802.07653.
1,490
backtime92/CRAFT-Reimplementation
['scene text detection']
['Character Region Awareness for Text Detection']
metrics/eval_det_iou.py data/imgproc.py data/load_icdar.py utils/inference_boxes.py data/pointClockOrder.py eval.py loss/mseloss.py data/imgaug.py craft.py data/boxEnlarge.py gaussianMap/gaussian.py data/SynData.py basenet/vgg16_bn.py trainSynth.py data/dataset.py gaussianMap/imgproc.py watershed.py CRAFT double_conv main copyStateDict str2bool adjust_learning_rate watershed1 copyStateDict str2bool test_net crop_image_by_bbox watershed init_weights vgg16_bn enlargebox pointDistance getX lineBiasAndK sidePoint pointAngle SynthTextDataLoader random_crop crop_area random_select is_poly_outside_rect random_scale EastRandomCropData is_poly_in_rect split_regions padding_image region_wise_random_select normalizeMeanVariance cvt2HeatmapImg resize_aspect_ratio loadImage denormalizeMeanVariance load_icdar2015_gt load_icdar2013_gt load_icdar2015_gt distance antipodal_pairs mep craftDataset GaussianTransformer normalizeMeanVariance cvt2HeatmapImg resize_aspect_ratio loadImage denormalizeMeanVariance Maploss DetectionIoUEvaluator test_net join list items OrderedDict startswith test_net test_folder print low_text mag_ratio text_threshold combine_results load_icdar2013_gt link_threshold canvas_size evaluate_image zip append cuda loadImage poly print param_groups int norm float32 getPerspectiveTransform warpPerspective array threshold area roll max connectedComponentsWithStats ones argmin waitKey morphologyEx imshow append COLORMAP_JET minAreaRect range connectedComponents COLOR_RGB2GRAY subtract copy dilate cvtColor uint8 Polygon print applyColorMap reshape boxPoints THRESH_BINARY MORPH_OPEN DIST_L2 threshold polylines roll max watershed argmin morphologyEx waitKey MORPH_RECT imshow append normalize COLORMAP_JET minAreaRect WINDOW_NORMAL range connectedComponents COLOR_BGR2GRAY subtract NORM_MINMAX distanceTransform copy dilate cvtColor uint8 namedWindow getStructuringElement applyColorMap print reshape boxPoints min THRESH_BINARY erode array MORPH_OPEN time normalizeMeanVariance cvt2HeatmapImg resize_aspect_ratio Variable print copy shape unsqueeze permute numpy cuda data isinstance fill_ Conv2d xavier_uniform_ normal_ zero_ BatchNorm2d Linear pointAngle abs pointDistance min cos sin lineBiasAndK atan max pointAngle lineBiasAndK sidePoint array array min max resize zeros max resize min array int32 padding_image append randint max range len array array append range min max clip choice int list min choice append max len random_select min astype int32 split_regions zeros max range region_wise_random_select COLOR_GRAY2RGB imread array cvtColor astype float32 uint8 astype copy shape max zeros resize applyColorMap uint8 astype COLORMAP_JET join norm replace polylines reshape readlines mep append imread listdir array range split join replace readlines append imread listdir split append distance range len range compute_parallelogram antipodal_pairs len adjustResultCoordinates hstack getDetBoxes range len
# CRAFT-Reimplementation # Note:If you have any problems, please comment. Or you can join us weChat group. The QR code will update in issues #49 . # 更新:重构工作已经开始,由于重构全部都是工作之外的时间,所以重构及训练周期相对较长。目前进度新的高斯图生成方式已经实验完成,对比作者论文中的图,结合以前的实验探索和向作者的提问,这次是与作者一样的方式,预计在20号左右release合成数据的训练部分。由于公司连接github较卡,暂时无法上传高斯图生成这部分。 # 更新2:目前已经在训练合成数据全监督这部分,同时弱监督部分同时在coding及实验。如果合成数据部分训练效果达到预期,预计周五会release这部分训练code。 ## Reimplementation:Character Region Awareness for Text Detection Reimplementation based on Pytorch ## Character Region Awareness for Text Detection Youngmin Baek, Bado Lee, Dongyoon Han, Sangdoo Yun, Hwalsuk Lee (Submitted on 3 Apr 2019) The full paper is available at: https://arxiv.org/pdf/1904.01941.pdf ## Install Requirements:
1,491
bai-shang/CRNN_CTC_Tensorflow
['optical character recognition', 'scene text recognition']
['An End-to-End Trainable Neural Network for Image-based Sequence Recognition and Its Application to Scene Text Recognition']
tools/train_crnn_ctc.py data/create_synth90k_tfrecord.py crnn_model/model.py tools/inference_crnn_ctc.py tools/eval_crnn_ctc.py tools/create_crnn_ctc_tfrecord.py CRNNCTCNetwork _string_to_int _int64_feature _write_tfrecord _convert_dataset _bytes_feature main _string_to_int _int64_feature _write_tfrecord _convert_dataset _bytes_feature main _sparse_matrix_to_list _int_to_string _read_tfrecord main _eval_crnn_ctc main _sparse_matrix_to_list _int_to_string _inference_crnn_ctc _sparse_matrix_to_list _int_to_string _read_tfrecord _train_crnn_ctc main load char_map_json_file append open data_dir join makedirs load int char_map_json_file len shuffle shuffle_list _write_tfrecord validation_split_fraction open _convert_dataset load join list _int_to_string ones char_map_json_file dense_shape len indices append keys enumerate values open load list char_map_json_file keys open read TFRecordReader string_input_producer float32 set_shape cast int32 parse_single_example decode_jpeg ctc_beam_search_decoder batch_size model_dir Saver open _read_tfrecord data_dir placeholder latest_checkpoint char_map_json_file tf_record_iterator ConfigProto CRNNCTCNetwork batch load join float32 sparse_placeholder int32 _eval_crnn_ctc load ctc_beam_search_decoder latest_checkpoint char_map_json_file placeholder model_dir Saver int32 CRNNCTCNetwork open _inference_crnn_ctc ctc_beam_search_decoder localtime model_dir Saver exponential_decay open decay_rate str _read_tfrecord data_dir ctc_loss decay_steps get_collection merge_all placeholder strftime cast format char_map_json_file create_global_step ConfigProto CRNNCTCNetwork batch load join time learning_rate float32 sparse_placeholder edit_distance reduce_mean UPDATE_OPS int32 global_variables_initializer scalar makedirs _train_crnn_ctc
# crnn_ctc_ocr_tf This software implements the Convolutional Recurrent Neural Network (CRNN), a combination of CNN, RNN and CTC loss for image-based sequence recognition tasks, such as scene text recognition and OCR. https://arxiv.org/abs/1507.05717 More details for CRNN and CTC loss (in chinese): https://zhuanlan.zhihu.com/p/43534801 ![](https://github.com/bai-shang/crnn_ctc_ocr_tf/blob/master/Arch.jpg?raw=true) ***The crnn+seq2seq+attention ocr code can be found here [bai-shang/crnn_seq2seq_ocr_pytorch](https://github.com/bai-shang/crnn_seq2seq_ocr_pytorch)*** # Dependencies All dependencies should be installed are as follow: * Python3 * tensorflow==1.15.0
1,492
bai-shang/OCR_TF_CRNN_CTC
['optical character recognition', 'scene text recognition']
['An End-to-End Trainable Neural Network for Image-based Sequence Recognition and Its Application to Scene Text Recognition']
tools/train_crnn_ctc.py data/create_synth90k_tfrecord.py crnn_model/model.py tools/inference_crnn_ctc.py tools/eval_crnn_ctc.py tools/create_crnn_ctc_tfrecord.py CRNNCTCNetwork _string_to_int _int64_feature _write_tfrecord _convert_dataset _bytes_feature main _string_to_int _int64_feature _write_tfrecord _convert_dataset _bytes_feature main _sparse_matrix_to_list _int_to_string _read_tfrecord main _eval_crnn_ctc main _sparse_matrix_to_list _int_to_string _inference_crnn_ctc _sparse_matrix_to_list _int_to_string _read_tfrecord _train_crnn_ctc main load char_map_json_file append open data_dir join makedirs load int char_map_json_file len shuffle shuffle_list _write_tfrecord validation_split_fraction open _convert_dataset load join list _int_to_string ones char_map_json_file dense_shape len indices append keys enumerate values open load list char_map_json_file keys open read TFRecordReader string_input_producer float32 set_shape cast int32 parse_single_example decode_jpeg ctc_beam_search_decoder batch_size model_dir Saver open _read_tfrecord data_dir placeholder latest_checkpoint char_map_json_file tf_record_iterator ConfigProto CRNNCTCNetwork batch load join float32 sparse_placeholder int32 _eval_crnn_ctc load ctc_beam_search_decoder latest_checkpoint char_map_json_file placeholder model_dir Saver int32 CRNNCTCNetwork open _inference_crnn_ctc ctc_beam_search_decoder localtime model_dir Saver exponential_decay open decay_rate str _read_tfrecord data_dir ctc_loss decay_steps get_collection merge_all placeholder strftime cast format char_map_json_file create_global_step ConfigProto CRNNCTCNetwork batch load join time learning_rate float32 sparse_placeholder edit_distance reduce_mean UPDATE_OPS int32 global_variables_initializer scalar makedirs _train_crnn_ctc
# crnn_ctc_ocr_tf This software implements the Convolutional Recurrent Neural Network (CRNN), a combination of CNN, RNN and CTC loss for image-based sequence recognition tasks, such as scene text recognition and OCR. https://arxiv.org/abs/1507.05717 More details for CRNN and CTC loss (in chinese): https://zhuanlan.zhihu.com/p/43534801 ![](https://github.com/bai-shang/crnn_ctc_ocr_tf/blob/master/Arch.jpg?raw=true) ***The crnn+seq2seq+attention ocr code can be found here [bai-shang/crnn_seq2seq_ocr_pytorch](https://github.com/bai-shang/crnn_seq2seq_ocr_pytorch)*** # Dependencies All dependencies should be installed are as follow: * Python3 * tensorflow==1.15.0
1,493
bai-shang/crnn_ctc_ocr.Tensorflow
['optical character recognition', 'scene text recognition']
['An End-to-End Trainable Neural Network for Image-based Sequence Recognition and Its Application to Scene Text Recognition']
tools/train_crnn_ctc.py data/create_synth90k_tfrecord.py crnn_model/model.py tools/inference_crnn_ctc.py tools/eval_crnn_ctc.py tools/create_crnn_ctc_tfrecord.py CRNNCTCNetwork _string_to_int _int64_feature _write_tfrecord _convert_dataset _bytes_feature main _string_to_int _int64_feature _write_tfrecord _convert_dataset _bytes_feature main _sparse_matrix_to_list _int_to_string _read_tfrecord main _eval_crnn_ctc main _sparse_matrix_to_list _int_to_string _inference_crnn_ctc _sparse_matrix_to_list _int_to_string _read_tfrecord _train_crnn_ctc main load char_map_json_file append open data_dir join makedirs load int char_map_json_file len shuffle shuffle_list _write_tfrecord validation_split_fraction open _convert_dataset load join list _int_to_string ones char_map_json_file dense_shape len indices append keys enumerate values open load list char_map_json_file keys open read TFRecordReader string_input_producer float32 set_shape cast int32 parse_single_example decode_jpeg ctc_beam_search_decoder batch_size model_dir Saver open _read_tfrecord data_dir placeholder latest_checkpoint char_map_json_file tf_record_iterator ConfigProto CRNNCTCNetwork batch load join float32 sparse_placeholder int32 _eval_crnn_ctc load ctc_beam_search_decoder latest_checkpoint char_map_json_file placeholder model_dir Saver int32 CRNNCTCNetwork open _inference_crnn_ctc ctc_beam_search_decoder localtime model_dir Saver exponential_decay open decay_rate str _read_tfrecord data_dir ctc_loss decay_steps get_collection merge_all placeholder strftime cast format char_map_json_file create_global_step ConfigProto CRNNCTCNetwork batch load join time learning_rate float32 sparse_placeholder edit_distance reduce_mean UPDATE_OPS int32 global_variables_initializer scalar makedirs _train_crnn_ctc
# crnn_ctc_ocr_tf This software implements the Convolutional Recurrent Neural Network (CRNN), a combination of CNN, RNN and CTC loss for image-based sequence recognition tasks, such as scene text recognition and OCR. https://arxiv.org/abs/1507.05717 More details for CRNN and CTC loss (in chinese): https://zhuanlan.zhihu.com/p/43534801 ![](https://github.com/bai-shang/crnn_ctc_ocr_tf/blob/master/Arch.jpg?raw=true) ***The crnn+seq2seq+attention ocr code can be found here [bai-shang/crnn_seq2seq_ocr_pytorch](https://github.com/bai-shang/crnn_seq2seq_ocr_pytorch)*** # Dependencies All dependencies should be installed are as follow: * Python3 * tensorflow==1.15.0
1,494
bai-shang/crnn_ctc_ocr_tf
['optical character recognition', 'scene text recognition']
['An End-to-End Trainable Neural Network for Image-based Sequence Recognition and Its Application to Scene Text Recognition']
tools/train_crnn_ctc.py data/create_synth90k_tfrecord.py crnn_model/model.py tools/inference_crnn_ctc.py tools/eval_crnn_ctc.py tools/create_crnn_ctc_tfrecord.py CRNNCTCNetwork _string_to_int _int64_feature _write_tfrecord _convert_dataset _bytes_feature main _string_to_int _int64_feature _write_tfrecord _convert_dataset _bytes_feature main _sparse_matrix_to_list _int_to_string _read_tfrecord main _eval_crnn_ctc main _sparse_matrix_to_list _int_to_string _inference_crnn_ctc _sparse_matrix_to_list _int_to_string _read_tfrecord _train_crnn_ctc main load char_map_json_file append open data_dir join makedirs load int char_map_json_file len shuffle shuffle_list _write_tfrecord validation_split_fraction open _convert_dataset load join list _int_to_string ones char_map_json_file dense_shape len indices append keys enumerate values open load list char_map_json_file keys open read TFRecordReader string_input_producer float32 set_shape cast int32 parse_single_example decode_jpeg ctc_beam_search_decoder batch_size model_dir Saver open _read_tfrecord data_dir placeholder latest_checkpoint char_map_json_file tf_record_iterator ConfigProto CRNNCTCNetwork batch load join float32 sparse_placeholder int32 _eval_crnn_ctc load ctc_beam_search_decoder latest_checkpoint char_map_json_file placeholder model_dir Saver int32 CRNNCTCNetwork open _inference_crnn_ctc ctc_beam_search_decoder localtime model_dir Saver exponential_decay open decay_rate str _read_tfrecord data_dir ctc_loss decay_steps get_collection merge_all placeholder strftime cast format char_map_json_file create_global_step ConfigProto CRNNCTCNetwork batch load join time learning_rate float32 sparse_placeholder edit_distance reduce_mean UPDATE_OPS int32 global_variables_initializer scalar makedirs _train_crnn_ctc
# crnn_ctc_ocr_tf This software implements the Convolutional Recurrent Neural Network (CRNN), a combination of CNN, RNN and CTC loss for image-based sequence recognition tasks, such as scene text recognition and OCR. https://arxiv.org/abs/1507.05717 More details for CRNN and CTC loss (in chinese): https://zhuanlan.zhihu.com/p/43534801 ![](https://github.com/bai-shang/crnn_ctc_ocr_tf/blob/master/Arch.jpg?raw=true) ***The crnn+seq2seq+attention ocr code can be found here [bai-shang/crnn_seq2seq_ocr_pytorch](https://github.com/bai-shang/crnn_seq2seq_ocr_pytorch)*** # Dependencies All dependencies should be installed are as follow: * Python3 * tensorflow==1.15.0
1,495
baidu-research/NCRF
['whole slide images']
['Cancer Metastasis Detection With Neural Conditional Random Field']
wsi/bin/plot_W.py wsi/data/image_producer.py wsi/model/layers.py wsi/bin/probs_map.py wsi/bin/tissue_mask.py wsi/model/__init__.py wsi/model/resnet.py wsi/bin/Evaluation_FROC.py wsi/bin/nms.py wsi/bin/patch_gen.py wsi/bin/camelyon16xml2json.py wsi/bin/train.py wsi/data/wsi_producer.py wsi/data/annotation.py main run computeEvaluationMask readCSVContent compute_FP_TP_Probs computeFROC plotFROC computeITCList main run main process run main main run Formatter Annotation Polygon GridImageDataset GridWSIPatchDataset CRF ResNet resnet50 Bottleneck resnet152 conv3x3 resnet34 resnet18 BasicBlock resnet101 json_path xml_path camelyon16xml2json parse_args basicConfig run open_slide distance_transform_edt pow label zeros binary_fill_holes array read_region amax pow append range regionprops int readlines len append float range split str append zeros range amax len sorted asarray set append float sum len show plot suptitle xlabel ylabel figure load int format probs_map_path gaussian write close coord_path where shape pow level max range open int join str patch_size convert save wsi_path OpenSlide patch_path join basicConfig map copyfile enumerate mkdir split Pool coords_path append patch_path load subplot show reshape imshow ckpt_path rgb2hsv npy_path transpose convert logical_not array save wsi_path OpenSlide ResNet ResNet ResNet ResNet ResNet
![Baidu Logo](/doc/baidu-research-logo-small.png) - [NCRF](#ncrf) - [Prerequisites](#prerequisites) - [Data](#data) - [Whole slide images](#whole-slide-images) - [Annotations](#annotations) - [Patch images](#patch-images) - [Model](#model) - [Training](#training) - [Testing](#testing)
1,496
baidu/Senta
['sentiment analysis', 'multi label classification']
['SKEP: Sentiment Knowledge Enhanced Pre-training for Sentiment Analysis']
senta/data/field_reader/generate_label_field_reader.py pretraining.py senta/training/trainer_config.py senta/data/vocabulary.py senta/data/data_set_reader/ernie_onesentclassification_dataset_reader_ch.py senta/inference/inference.py senta/utils/__init__.py senta/data/data_set_reader/ernie_twosentclassification_dataset_reader_en.py senta/modules/text_embedder.py senta/data/data_set_reader/roberta_skep_pretrain_dataset_reader_en.py senta/data/field_reader/text_field_reader.py senta/data/tokenizer/tokenizer.py senta/data/field_reader/ernie_classification_field_reader.py senta/modules/token_embedding/custom_fluid_embedding.py senta/models/roberta_language_model.py senta/models/ernie_one_sent_classification_ch.py setup.py senta/data/tokenizer/__init__.py senta/data/tokenizer/tokenization_spm.py senta/utils/multi_process_eval.py senta/data/field_reader/base_field_reader.py senta/data/data_set_reader/roberta_pretrain_dataset_reader_en.py senta/modules/bert.py senta/modules/transformer_encoder.py senta/training/__init__.py senta/data/util_helper.py senta/common/register.py senta/data/data_set_reader/basic_dataset_reader.py senta/data/field_reader/scalar_array_field_reader.py senta/data/data_set_reader/ernie_pretrain_dataset_reader.py senta/training/custom_trainer.py senta/metrics/metrics.py senta/modules/elmo.py senta/models/roberta_skep_language_model.py senta/data/field_reader/ernie_seqlabel_label_field_reader.py senta/train.py senta/models/ernie_two_sent_classification_ch.py senta/data/field_reader/ernie_text_field_reader.py senta/__init__.py senta/data/tokenizer/tokenization_wp.py senta/metrics/sklearn_metrics.py senta/data/data_set.py senta/models/ernie_language_model.py senta/data/field_reader/custom_text_field_reader.py senta/data/field_reader/scalar_field_reader.py senta/models/__init__.py senta/models/ernie_two_sent_classification_en.py senta/data/data_set_reader/basic_dataset_reader_without_fields.py senta/data/data_set_reader/ernie_onesentclassification_dataset_reader_en.py senta/data/reader_config.py senta/data/data_set_reader/ernie_skep_pretrain_dataset_reader.py senta/models/ernie_crf_sequence_label.py senta/data/data_set_reader/base_dataset_reader.py senta/models/roberta_classification.py senta/training/base_trainer.py senta/models/ernie_classification.py senta/data/data_set_reader/roberta_twosentclassification_dataset_reader_en.py senta/models/roberta_one_sent_classification_en.py senta/modules/token_embedding/ernie_embedding.py senta/common/__init__.py senta/modules/ernie.py senta/data/data_set_reader/ernie_twosentclassification_dataset_reader_ch.py train.py senta/metrics/__init__.py senta/utils/init.py senta/common/rule.py senta/utils/args.py senta/data/__init__.py senta/data/tokenizer/custom_tokenizer.py senta/data/tokenizer/tokenization_utils.py infer.py lanch.py senta/data/field.py senta/utils/params.py senta/metrics/glue_eval.py senta/utils/log.py senta/models/ernie_one_sent_classification_en.py senta/models/ernie_skep_multil_task_language_model.py senta/modules/__init__.py senta/common/args.py senta/utils/util_helper.py senta/modules/token_embedding/base_token_embedding.py senta/models/ernie_multil_task_language_model.py senta/training/glue_task_trainer.py senta/models/model.py build_inference model_from_params dataset_reader_from_params main start_procs main Readers PretrainingTrainer build_trainer model_from_params dataset_reader_from_params untar get_http_url build_trainer dataset_reader_from_params download_data Senta model_from_params ArgumentGroup str2bool print_arguments import_new_module RegisterSet import_modules Register FleetMode EmbeddingType DataShape FieldLength InstanceName MaxTruncation DataSet Field ReaderConfig structure_fields_dict padding_batch_data convert_text_to_id get_field_length generate_pad_batch_data convert_texts_to_ids mask_batch_data pad_batch_data Vocabulary BaseDataSetReader BasicDataSetReader TaskBaseReader OneSentClassifyReaderCh OneSentClassifyReaderEn multi_sent_sorted prepare_batch_data shuffle_entity ErniePretrainDataReader mask pad_batch_data FluidDataType multi_sent_sorted prepare_batch_data shuffle_entity mask pad_batch_data FluidDataType ErnieSkepPretrainDataReader TwoSentClassifyReaderCh TwoSentClassifyReaderEn prepare_batch_data RobertaPretrainDataReaderEnglish mask pad_batch_data FluidDataType prepare_batch_data RobertaSkepPretrainDataReaderEnglish mask pad_batch_data FluidDataType RobertaTwoSentClassifyReaderEn BaseFieldReader CustomTextFieldReader ErnieClassificationFieldReader ErnieSeqlabelLabelFieldReader ErnieTextFieldReader GenerateLabelFieldReader ScalarArrayFieldReader ScalarFieldReader TextFieldReader CustomTokenizer validate_case_matches_checkpoint preprocess_text encode_ids encode_pieces clean_text FullTokenizerSpm convert_by_vocab bytes_to_unicode get_pairs convert_ids_to_tokens printable_text convert_tokens_to_ids load_vocab whitespace_tokenize BpeEncoder PreTrainedWordpieceTokenizer convert_to_unicode _is_whitespace PreTrainedBasicTokenizer _is_control _is_punctuation PreTrainedFullTokenizer FullTokenizer SentencePieceTokenizer BasicTokenizer GptBpeTokenizer WordsegTokenizer SentencepieceTokenizerErnie WordPieceTokenizer WSSPTokenizer CharTokenizer Tokenizer Inference simple_accuracy matthews_corrcoef pearson_and_spearman acc_and_f1 pgd_loss scale_l2 evaluate_mrr f1_score evaluate_map Ppl LmPpl F1 Acc Recall chunk_eval calculate_f1 Pn Auc Chunk Metrics Precision SKLearnClassify SequenceLabelEvaluate SimNetEvaluate ErnieClassification ErnieCrfSeqLabel ErnieLM ErnieMTLM ErnieOneSentClassificationCh ErnieOneSentClassificationEn ErnieSkepMTLM ErnieTwoSentClassificationCh ErnieTwoSentClassificationEn Model RobertaClassification RobertaLM RobertaOneSentClassificationEn RobertaSkepLM BertModel dropout elmo_encoder encoder_wrapper weight_layers lstmp_encoder ErnieConfig ErnieModel EmbeddingLayer pre_post_process_layer encoder_outer_share gelu multi_head_attention positionwise_feed_forward encoder encoder_layer encoder_inner_share BaseTokenEmbedding CustomFluidTokenEmbedding ErnieTokenEmbedding BaseTrainer create_master_params_grads optimization master_param_to_train_param append_cast_op apply_dynamic_loss_scaling linear_warmup_decay copy_to_master_param update_loss_scaling CustomTrainer GlueTaskTrainer TrainerConfig ArgumentGroup str2bool print_arguments build_common_arguments init_pretraining_params cast_fp32_to_fp16 init_checkpoint init_log MultiProcessEvalForMrc MultiProcessEval from_file parse_file _get_dict_from_environ_or_json_or_file evaluate_file replace_none array2tensor parse_data_config is_whitespace printable_text check_cuda save_infer_data_meta whitespace_tokenize is_punctuation is_control parse_version_code convert_to_unicode truncation_words strdecode truncate_seq_pair build DataSet get model_class __getitem__ Inference update split_log_path wait training_script_args current_node_ip copy close nproc_per_node print_config open info append node_id range Popen len print_config start_procs print_arguments load join deepcopy log_dir task_group_path ernie_config_path model_class tokenizer_class Readers init_log trainer_class train_and_eval vars config_class open int epoch update batch_size getenv info get_num_examples get trainer_class __getitem__ get disable_warnings extractall open str remove get_http_url int time untar move exists _get_abs_path vars iteritems sorted info join replace Register dirname abspath append listdir ALL_MODULES import_module import_module max_seq_len convert_tokens_to_ids tokenizer need_convert truncation_type truncation_words tokenize split max array expand_dims max array expand_dims max array expand_dims max array list convert_tokens_to_ids truncation_words pad_batch_data append tokenize range len error index choice append enumerate rand shuffle max enumerate len reshape rand len append randint max range enumerate deepcopy shuffle_entity zeros_like reshape astype mask pad_batch_data append range ensure_str match group _is_whitespace _is_control replace append join lower normalize ensure_text split ensure_binary replace isinstance printable_text extend clean_text append ensure_text EncodeAsPieces SampleEncodeAsPieces encode_pieces int strip OrderedDict open enumerate split text_type isinstance PY3 PY2 text_type isinstance PY3 PY2 append strip split category category startswith category ord append list range add set reduce_max reduce_sum sqrt pow abs append_backward gaussian_random var task_fc_fn _word_emb_name shape _build_model default_main_program scale_l2 range sqrt sum array sum array array simple_accuracy f1_score array array range len extract_bio_chunk tolist max range len fc dynamic_lstmp format slice squeeze append range lstmp_encoder len sums softmax create_parameter zip append split embedding dropout sequence_reverse concat encoder_wrapper append weight_layers range len pow tanh sqrt pi scaled_dot_product_attention fc concat __combine_heads __split_heads __compute_qkv fc gelu dropout dtype layer_norm cast dropout post_process_layer positionwise_feed_forward multi_head_attention append encoder_layer range append encoder_layer range range enc_fn extend pre_process_layer append_op get name Parameter fill_constant concat isfinite reduce_sum fill_constant create_global_var update_loss_scaling enumerate rstrip minimize name distributed_optimizer backward apply_dynamic_loss_scaling Adam noam_decay create_master_params_grads dict OrderedDict apply_gradients master_param_to_train_param set_gradient_clip exclude_from_weight_decay linear_warmup_decay create_global_var all_parameters parse_args ArgumentGroup add_arg ArgumentParser place uint16 view get_tensor name set info find_var array all_parameters load_vars format cast_fp32_to_fp16 info load_vars format cast_fp32_to_fp16 info setFormatter getLogger addHandler StreamHandler Formatter dirname TimedRotatingFileHandler setLevel makedirs get string_types read isinstance eval exists _get_dict_from_environ_or_json_or_file evaluate_file loads int list items chr isinstance print decode print exit join float insert append pop len category category startswith startswith category ord PaddleTensor dumps
[English](https://github.com/baidu/Senta/blob/master/README.en.md)|简体中文 # <p align=center>`Senta`</p> ## 目录 - [简介](#简介) - [SKEP](#SKEP) - [代码结构](#代码结构) - [一键化工具](#一键化工具) - [详细使用说明](#详细使用说明) - [Demo数据集说明](#Demo数据集说明) - [论文效果复现](#论文效果复现)
1,497
bairdzhang/smallhardface
['face detection']
['Robust Face Detection via Learning Small Faces on Hard Images']
caffe/python/caffe/classifier.py caffe/python/caffe/test/test_net.py caffe/scripts/split_caffe_proto.py caffe/examples/pycaffe/layers/pascal_multilabel_datalayers.py lib/datasets/imdb.py caffe/tools/extra/resize_and_crop_images.py lib/test.py caffe/examples/pycaffe/caffenet.py lib/layers/generate_anchors.py lib/layers/multi_layer_anchor_layer.py external/marcopede-face-eval-f2870fd85d48/plot_AP.py caffe/src/caffe/test/test_data/generate_sample_data.py lib/setup.py external/marcopede-face-eval-f2870fd85d48/VOCpr.py lib/wider_eval_tools/wider_eval.py lib/utils/ipdb.py caffe/python/caffe/coord_map.py lib/datasets/factory.py lib/layers/merge_prediction.py caffe/python/caffe/test/test_nccl.py caffe/python/detect.py lib/datasets/afw.py caffe/tools/extra/summarize.py caffe/python/caffe/detector.py caffe/python/draw_net.py lib/utils/test_utils.py caffe/python/train.py caffe/examples/finetune_flickr_style/assemble_data.py external/marcopede-face-eval-f2870fd85d48/util.py lib/train.py lib/datasets/pascalface.py train_test.py caffe/tools/extra/extract_seconds.py external/marcopede-face-eval-f2870fd85d48/database.py external/marcopede-face-eval-f2870fd85d48/loadData.py external/marcopede-face-eval-f2870fd85d48/getColorLabel.py lib/datasets/wider.py lib/nms/py_cpu_nms.py lib/utils/bbox_transform.py caffe/python/caffe/io.py caffe/python/caffe/test/test_layer_type_list.py caffe/python/caffe/__init__.py caffe/examples/pycaffe/layers/pyloss.py caffe/examples/web_demo/app.py lib/utils/timer.py caffe/python/classify.py lib/prototxt/manipulate.py caffe/python/caffe/draw.py caffe/examples/pycaffe/tools.py lib/nms/nms_wrapper.py caffe/python/caffe/test/test_draw.py caffe/scripts/download_model_binary.py caffe/python/caffe/test/test_python_layer_with_param_str.py caffe/tools/extra/parse_log.py lib/datasets/general.py lib/utils/blob.py caffe/python/caffe/net_spec.py caffe/examples/web_demo/exifutil.py caffe/python/caffe/test/test_python_layer.py caffe/python/caffe/test/test_solver.py lib/layers/proposal_layer.py caffe/scripts/cpp_lint.py lib/datasets/fddb.py caffe/scripts/copy_notebook.py lib/roi_data_layer/layer.py caffe/python/caffe/test/test_io.py lib/utils/get_config.py lib/roi_data_layer/minibatch.py lib/utils/tensorboard.py caffe/python/caffe/pycaffe.py external/marcopede-face-eval-f2870fd85d48/plot_AP_fddb.py caffe/python/caffe/test/test_coord_map.py caffe/python/caffe/test/test_net_spec.py parser download_image make_net max_pool caffenet conv_relu fc_relu CaffeSolver SimpleTransformer print_info check_params PascalMultilabelDataLayerSync load_pascal_annotation BatchLoader EuclideanLossLayer start_tornado start_from_terminal embed_image_html classify_upload index allowed_file ImagenetClassifier classify_url open_oriented_im apply_orientation main main main parse_args train solve time Classifier coord_map UndefinedMapException conv_params coord_map_from_to AxisMismatchException inverse crop_params compose crop Detector get_edge_label draw_net get_layer_label get_pydot_graph choose_color_by_layertype get_pooling_types_dict draw_net_to_file Transformer blobproto_to_array datum_to_array array_to_blobproto array_to_datum resize_image arraylist_to_blobprotovector_str blobprotovector_str_to_arraylist load_image oversample Layers Function Parameters Top NetSpec assign_proto param_name_dict to_proto _Net_blobs _Net_forward_all _Net_set_input_arrays _Net_backward _Net_params _Net_forward _Net_outputs _Net_forward_backward_all _Net_blob_loss_weights _Net_batch _Net_get_id_name _Net_inputs _Net_layer_dict TestCoordMap coord_net_spec getFilenames TestDraw TestBlobProtoToArray TestArrayToDatum TestLayerTypeList TestNCCL TestLevels TestStages simple_net_file TestNet TestAllInOne lenet TestNetSpec silent_net anon_lenet exception_net_file parameter_net_file SimpleLayer phase_net_file TestPythonLayer ParameterLayer PhaseLayer python_net_file ExceptionLayer SimpleParamLayer TestLayerWithParam python_param_net_file TestSolver ParseNolintSuppressions CheckVlogArguments CheckSectionSpacing FindNextMultiLineCommentEnd ReplaceAll CheckForFunctionLengths _SetOutputFormat _IsTestFilename _VerboseLevel CheckBraces RemoveMultiLineComments ResetNolintSuppressions CheckForNonStandardConstructs _SetVerboseLevel PrintUsage _NestingState CheckIncludeLine CheckAccess _CppLintState Search CheckInvalidIncrement RemoveMultiLineCommentsFromRange CleansedLines CheckForBadCharacters UpdateIncludeState FindPreviousMatchingAngleBracket CheckEmptyBlockBody FindNextMultiLineCommentStart Match _NamespaceInfo CheckMakePairUsesDeduction CheckCheck IsBlankLine _SetFilters ProcessLine _FunctionState CheckPosixThreading GetLineWidth GetHeaderGuardCPPVariable IsCppString _IncludeState CheckSpacing _ClassInfo CheckForCopyright IsErrorSuppressedByNolint ProcessFileData CheckForMultilineCommentsAndStrings CloseExpression _PreprocessorInfo _OutputFormat CheckForIncludeWhatYouUse CheckSpacingForFunctionCall FindEndOfExpressionInLine FindNextMatchingAngleBracket _SetCountingStyle ProcessFile _IncludeError CleanseRawStrings CheckAltTokens CheckForNewlineAtEOF ParseArguments CheckForNonConstReference PrintCategories _Filters main FilesBelongToSameModule CheckCStyleCast FileInfo _BlockInfo CheckForHeaderGuard CheckCaffeDataLayerSetUp ReverseCloseExpression CleanseComments _DropCommonSuffixes _ClassifyInclude CheckStyle CheckCaffeAlternatives FindStartOfExpressionInLine _ShouldPrintError CheckComment Error _GetTextInside CheckLanguage CheckCaffeRandom GetPreviousNonBlankLine reporthook parse_readme_frontmatter model_checks_out valid_dirname get_start_time extract_seconds extract_datetime_from_line get_log_created_year write_csv parse_log fix_initial_nan_learning_rate save_csv_files main parse_args parse_line_for_net_output ResizeCropImagesMapper PILResizeCrop OpenCVResizeCrop print_table printed_len summarize_net main read_net format_param getbboxVOC06 getbboxVOC07 getRecord VOC06Data AFLW VOC07Data imageData LFW AFW PASCALfaces getColorLabel loadDetections loadDetectionsCSV loadDetectionsShen loadDetectionsPascalFormat loadDetectionsRamanan loadDetectionsYann load_fddb load myimread drawModel inclusion drawDef drawDeform boxHOG myinclusion save overlapx box overlap evaluate_optim VOColdap drawPrfast VOCprRecord transf_dets cmpscore filterdet VOCprRecordOptim VOCap find_in_path customize_compiler_for_nvcc custom_build_ext locate_cuda bbox_vote get_testing_roidb inference_worker detect forward_net demo test_net train_net worker get_training_roidb SolverWrapper afw get_imdb fddb general imdb pascalface wider generate_anchors _scale_enum _whctrs _ratio_enum _shift_anchor _mkanchors MergePrediction _softmax _compute_targets _softmax _unmap MultiLayerAnchorLayer _filter_boxes ProposalLayer nms py_cpu_nms _add_dimension_reduction manipulate_test manipulate_train _apply_mult_lr _simple_relu_layer manipulate_solver _simple_conv_layer RoIDataLayer _project_im_rois _crop_blobs _get_image_blob get_minibatch _get_resize_range clip_boxes bbox_transform bbox_transform_inv _distortion _brightness prep_im_for_blob im_list_to_blob _contrast _hue _saturation cfg_dump cfg_from_list cfg_table cfg_from_file _sort_dict _merge_a_into_b cfg_print get_output_dir ForkedPdb grace TBExp Tensorboard TimeoutError Fake Timeout _compute_scaling_factor _get_image_blob Timer wider_eval _VOCap _norm_score _image_pr_info _boxoverlap _evaluation _dataset_pr_info _image_evaluation _read_pred add_argument ArgumentParser imread urlretrieve Convolution InnerProduct Data SoftmaxWithLoss LRN Accuracy max_pool InnerProduct conv_relu fc_relu Dropout join list getElementsByTagName get_data_from_tag csr_matrix dict zip zeros float range enumerate len print format get read info load_image classify_image StringIO join replace info secure_filename save filename open_oriented_im classify_image fromarray replace astype save resize StringIO items list listen HTTPServer format print start WSGIContainer update start_tornado add_option OptionParser debug port parse_args ImagenetClassifier forward run hasattr _getexif astype float32 tile apply_orientation open transpose model_def endswith ArgumentParser save mean_file channel_swap output_file dirname expanduser parse_args input_file predict Classifier set_mode_cpu load time isdir print add_argument set_mode_gpu pretrained_model gpu len DataFrame Detector format to_hdf detect_selective_search mean set_index to_csv detect_windows read_csv add_argument ArgumentParser read NetParameter output_image_file rankdir Merge TRAIN draw_net_to_file TEST Process str join init_log start append new_uid range log len before_backward layers display add_callback after_backward after_forward Timer append before_forward range len max_iter restore time set_solver_count set_solver_rank add_callback set_device set_multiprocess SGDSolver after_backward set_mode_gpu layer_wise_reduce step bcast NCCL len get params array get params array crop_params conv_params pop collect_bottoms add fn coord_map compose coord_map_from_to items list DESCRIPTOR batch_size str num_output get_pooling_types_dict add_edge get_edge_label list Dot exclude get_layer_label add_node values choose_color_by_layertype Edge Node bottom append type layer include top data array diff shape BlobProto extend flat extend BlobProtoVector ParseFromString BlobProtoVector extend tostring shape Datum flat data len astype float32 tile zoom tuple resize fill empty array concatenate shape tile empty array LayerParameter list NetParameter _to_proto extend Counter OrderedDict values iteritems hasattr isinstance extend add getattr setattr list OrderedDict _blobs _blob_names zip list _blob_loss_weights OrderedDict _blob_names zip _layer_names list layers OrderedDict zip OrderedDict list keys list keys iteritems layers index set outputs _forward len iteritems _backward layers inputs index set len iteritems asarray extend copy next _batch itervalues forward len iteritems asarray backward extend copy next _batch itervalues zip_longest zip forward len ascontiguousarray concatenate itervalues zeros next range len data Pooling pool Convolution NetSpec Deconvolution conv Input join walk dirname abspath NamedTemporaryFile str close write data Pooling pool1 conv2 pool2 ip1 relu1 SoftmaxWithLoss Convolution NetSpec DummyData ip2 ReLU InnerProduct label conv1 Pooling SoftmaxWithLoss Convolution DummyData ReLU InnerProduct data NetSpec DummyData Silence data2 error search add group clear compile compile compile SetOutputFormat SetCountingStyle SetFilters _Filters startswith IsErrorSuppressedByNolint _ShouldPrintError write IncrementErrorCount replace append Match group find startswith endswith range error FindNextMultiLineCommentEnd RemoveMultiLineCommentsFromRange FindNextMultiLineCommentStart rstrip find range len FindEndOfExpressionInLine range len FindStartOfExpressionInLine error min search I range len FileInfo RepositoryName sep sub ParseNolintSuppressions error startswith split GetHeaderGuardCPPVariable enumerate error enumerate error len error replace count error find error find error find error find error Search error match InnermostClass replace error escape Match Search error group Search Check error lines Count End group Begin NumLines Match raw_lines range Search error match group error Match group pop group append Search pop group append Search elided replace CheckSpacingForFunctionCall rfind error len group min CloseExpression NumLines sub find CheckComment Match range Search lines_without_raw_strings error group starting_linenum Match range Search error rfind len group ReverseCloseExpression Search Match CloseExpression find error Match CloseExpression find elided error strip group FindEndOfExpressionInLine find Match range CloseExpression len error Match finditer normalize isinstance PY2 GetLineWidth int InnermostClass CheckCheck error CheckAltTokens CheckBraces CheckSpacing CheckSectionSpacing CheckEmptyBlockBody CheckAccess GetHeaderGuardCPPVariable lines_without_raw_strings _DropCommonSuffixes RepositoryName match split CheckNextIncludeOrder CanonicalizeAlphabeticalOrder FileInfo error search group SetLastHeader match _ClassifyInclude Match pop end search set itervalues append M rstrip replace CheckCStyleCast error _GetTextInside CheckIncludeLine search group lstrip startswith Match ResetSection Search split rfind error group ReverseCloseExpression lstrip findall Match range Search ReplaceAll error Match Search endswith replace setdefault group search CleanseComments open list FilesBelongToSameModule error search copy sub NumLines FullName keys range error search CheckPosixThreading ParseNolintSuppressions CheckVlogArguments CheckMakePairUsesDeduction CheckCaffeDataLayerSetUp CheckLanguage CheckInvalidIncrement CheckCaffeRandom CheckForNonConstReference check_fn Update CheckForNonStandardConstructs CheckStyle raw_lines CheckForMultilineCommentsAndStrings CheckCaffeAlternatives CheckForFunctionLengths CleansedLines _NestingState CheckForBadCharacters CheckForNewlineAtEOF _IncludeState RemoveMultiLineComments CheckForCopyright ResetNolintSuppressions CheckForHeaderGuard NumLines CheckCompletedBlocks CheckForIncludeWhatYouUse range ProcessLine _FunctionState Error rstrip endswith len write ProcessFileData _SetVerboseLevel range split write exit join write exit _VerboseLevel int getopt _SetOutputFormat set _SetVerboseLevel PrintCategories _SetFilters _OutputFormat PrintUsage _SetCountingStyle split getreader ParseArguments ResetErrorCounts stderr exit verbose_level PrintErrorCounts StreamReaderWriter ProcessFile getwriter PY2 int time write flush load join index int rfind datetime split getctime year strip extract_datetime_from_line get_start_time total_seconds strip write get_log_created_year close extract_datetime_from_line open float get_log_created_year compile fix_initial_nan_learning_rate search group OrderedDict append float join basename write_csv print excel parse_log save_csv_files output_dir logfile_path NetParameter decay_mult format name lr_mult append print zip len get join str format convolution_param list setdefault param kernel_size map set top bottom append type module layer enumerate print_table filename summarize_net read_net int readlines open append enumerate find data int getElementsByTagName parse append getTotal getBBox getFacial min getPose zeros getImageName range print replace print loadDetectionsCSV loadDetectionsShen loadDetectionsPascalFormat loadDetectionsRamanan loadDetectionsYann sorted append float loadmat range append sorted loadmat enumerate sorted reader open append next enumerate sorted readlines open append float enumerate split show sorted myimread input draw imshow eval clf append box float loadmat range len genfromtxt basename plot print sort getColorLabel splitext find open imread ascontiguousarray imresize dump close open close open subplot drawHOG9 axis imshow boxHOG drawHOG range len subplot exp Ellipse set_xlim axis add_artist drawDef set_facecolor fill range clip set_ylim len ioff exp Ellipse add_artist set_facecolor fill range clip len abs max min abs max min min sqrt float abs max float abs max axis plot plot clf show imshow input sum range getImageByName2 eval box float enumerate ioff print sort draw figure overlapx zeros overlap len clf show imshow append input sum range getImageByName2 eval box float enumerate ioff print sort draw figure overlapx zeros overlap len concatenate sum max range len max array linspace show plot VOColdap cumsum xlabel grid set_xlim ylabel draw title VOCap set_ylim append min print enumerate append plot print drawPrfast transf_dets mean VOCprRecordOptim range pathsep pjoin exists split find_in_path items list pjoin pathsep dirname sep append _compile compiler_so data int format reshape pad warning MAX_RESOLUTION LEVEL tile ceil forward array append FLIP vstack forward_net nms basename shape tic _get_image_blob append imread range NMS_THRESH concatenate MAX_SIZE astype copy flush toc bbox_vote _compute_scaling_factor isinstance float32 len minimum max sum maximum delete row_stack tile zeros empty array str int format num_classes GPU_ID print set_device average_time add_scalar_value put set_mode_gpu Net MODEL detect range TEST image_path_at str imwrite GPU_ID set_device set_mode_gpu Net MODEL detect rectangle IMAGE imread range TEST join int format Process isinstance evaluate_detections name GPU_ID inference_worker NAME start Queue info ceil range exists append len prepare_roidb info data set_solver_count set_random_seed Timer str list GPU_ID set_device tic set_iter layer_wise_reduce iter after_backward format snapshot add_callback set_solver_rank SolverWrapper average_time set_multiprocess add_scalar_value timedelta float get_solver NCCL flush toc int items join RNG_SEED write accuracy set_mode_gpu step bcast len prepare_roidb filter_roidb roidb USE_FLIPPED append_flipped_images info Process str join init_log start info append new_uid range log len vstack _ratio_enum array hstack sqrt _whctrs round _mkanchors meshgrid T _whctrs _mkanchors transpose exp fill empty append maximum minimum ClearField ITERSIZE STEPVALUE BASELR append SolverParameter STEPSIZE WEIGHT_DECAY _add_dimension_reduction join NetParameter ENABLE _apply_mult_lr draw_net_to_file add_image _add_dimension_reduction join NetParameter ENABLE draw_net_to_file add_image append extend ClearField LayerParameter append LayerParameter param lr_mult min BACKBONE_MULT HEAD_MULT layer enumerate extend min ClearField int copy _crop_blobs pad SHORT_SIDE _get_image_blob MAX_RESOLUTION ceil randint empty array len im_list_to_blob debug prep_im_for_blob MAX_SIZE copy PIXEL_MEANS append median imread array range len int UPPER clip MAX_TRIES range KEEP_ONLY_CENTER_INSIDE uniform randint round LOWER exp arange EPS min max log transpose log dtype exp astype shape zeros minimum maximum transpose zeros max range len _distortion min ENABLE astype float32 shape sqrt resize float max _brightness _contrast COLOR_HSV2BGR COLOR_BGR2HSV randint _saturation round _hue clip cvtColor uniform DELTA clip clip uniform LOWER UPPER clip uniform LOWER UPPER uniform DELTA OrderedDict items sorted __sort join EXP_DIR str abspath ROOT_DIR makedirs print _sort_dict dumps dump _sort_dict split items list ndarray isinstance type array EasyDict load _merge_a_into_b literal_eval zip split max float min ORIG_SIZE astype float32 int format debug zeros loadmat range inf min copy max range len minimum inf maximum _boxoverlap zeros array range len zeros range zeros range hstack max range _image_pr_info reduce copy _dataset_pr_info vstack _image_evaluation zeros loadmat range _VOCap format _norm_score map _evaluation _read_pred Pool range len
# Robust Face Detection via Learning Small Faces on Hard Images ## Performance on WIDER FACE val, FDDB, Pascal Faces and AFW [Link to the trained model](https://livejohnshopkins-my.sharepoint.com/:u:/g/personal/zzhang99_jh_edu/EV4rn_lxo45Lj5VYEwljqncBIhwbJns4zTJ_BHwjwPa05g?e=hR6QnT) | WIDER FACE val easy | WIDER FACE val medium | WIDER FACE val hard | FDDB | Pascal Faces | AFW | |:-------|:-------|:-------|:-------|:-------|:-------| | 95.7 | 94.9 | 89.7 | 98.7 | 99.0 | 99.6 | ## Build source code 1. Clone this repository to `$ROOT` 1. Install python library `cd $ROOT; pip install -r requirements.txt` 1. Install graphviz `apt-get install -y graphviz`
1,498
baiwenjia/CIMAS
['semantic segmentation']
['Automatic 3D bi-ventricular segmentation of cardiac images by a shape-refined multi-task deep learning approach']
seg.py config.py cimas.py segment_data join int format read print system mkdir len
# CIMAS Cardiac Image Multi-Atlas Segmentation pipeline (CIMAS) ## What is CIMAS CIMAS is a pipeline for cardiac MR image segmentation using multi-atlas segmentation method. It assumes that the target image (image under segmentation) shares a similar anatomy as the atlas image (image with corresponding segmentation or label map) and the difference between target and atlas can be described by a spatial transformation. To segment the target image, image registration is performed to estimate this spatial transformation and then atlas label map is propagated onto the target image to form the segmentation. To improve robustness and accuracy, multiple atlases are used in this process. Each atlas acts as an expert and provides a segmentation result. The segmentation results from multiple atlases are combined in a label fusion process. ## System requirements There are a few binary files which are pre-compiled on Ubuntu 16.04. So it is recommended to run the pipeline on a Ubuntu 16.04 machine. I have also tested that these binaries work on Ubuntu 14.04. ## Software and data dependencies We use the [MIRTK](https://github.com/BioMedIA/MIRTK) library for performing image registration and 20 3D MR images acquired from Hammersmith Hospital, Imperial College London as the atlas set. The 20 atlases have been manually segmented by experienced radiologists. To initialise image registration, we use six landmarks and perform point-based registration, which is then followed by image-based registration. The landmarks are defined as in the [placing the landmarks](http://wp.doc.ic.ac.uk/wbai/data) section and they are manually selected using the [rview](https://www.doc.ic.ac.uk/~dr/software/download.html) software. Alternatively, the landmarks can be automatically detected using [stratified decision forests](https://www.doc.ic.ac.uk/~oo2113/publication/TMI_stratified/) developed by Ozan Oktay. ## Installation steps
1,499