repo
stringlengths 8
116
| tasks
stringlengths 8
117
| titles
stringlengths 17
302
| dependencies
stringlengths 5
372k
| readme
stringlengths 5
4.26k
| __index_level_0__
int64 0
4.36k
|
---|---|---|---|---|---|
VegB/Text_Infilling | ['text infilling', 'text generation'] | ['Text Infilling'] | texar/losses/mle_losses.py texar/modules/encoders/rnn_encoders.py texar/modules/decoders/rnn_decoders.py texar/modules/qnets/__init__.py texar/utils/shapes.py texar/modules/embedders/embedders.py texar/utils/__init__.py texar/data/__init__.py texar/evals/bleu.py texar/agents/seq_pg_agent.py texar/evals/__init__.py texar/modules/decoders/beam_search_decode.py texar/utils/average_recorder.py texar/data/data/multi_aligned_data.py texar/modules/decoders/rnn_decoder_base.py texar/models/__init__.py text_infilling/self_attn_hyperparams.py texar/models/seq2seq/seq2seq_base.py texar/modules/decoders/beam_search_decode_test.py text_infilling/seq2seq.py bin/make_vocab.py texar/data/data/scalar_data_test.py texar/modules/connectors/connectors.py bin/average_checkpoints.py texar/modules/embedders/position_embedders.py texar/data/data/dataset_utils.py texar/models/seq2seq/basic_seq2seq.py texar/losses/mle_losses_test.py texar/modules/networks/network_base.py texar/modules/decoders/template_transformer_decoder.py texar/core/optimization.py texar/custom/__init__.py texar/agents/agent_test.py text_infilling/bleu_tool.py texar/run/executor.py text_infilling/config.py texar/losses/pg_losses.py texar/core/layers_test.py texar/losses/rl_losses.py setup.py texar/agents/agent_utils_test.py texar/modules/qnets/qnets.py texar/utils/variables.py texar/modules/encoders/encoder_base.py texar/__init__.py texar/modules/memory/__init__.py texar/agents/seq_pg_agent_test.py texar/run/__init__.py texar/core/optimization_test.py texar/data/data/paired_text_data_test.py texar/data/embedding.py texar/utils/beam_search.py texar/data/data/__init__.py texar/modules/classifiers/__init__.py texar/utils/exceptions.py docs/conf.py texar/modules/decoders/rnn_decoder_helpers.py texar/modules/decoders/rnn_decoders_test.py texar/modules/policies/policy_nets.py texar/modules/embedders/embedders_test.py texar/agents/ac_agent.py texar/context.py texar/core/replay_memories.py texar/data/data/mono_text_data.py texar/losses/dqn_losses.py texar/modules/connectors/__init__.py texar/data/embedding_test.py texar/agents/pg_agent.py texar/losses/adv_losses.py texar/data/vocabulary.py texar/evals/bleu_test.py texar/modules/__init__.py texar/modules/classifiers/rnn_classifiers.py texar/modules/connectors/connector_base.py texar/modules/encoders/conv_encoders.py texar/core/layers.py texar/agents/seq_agent_base.py texar/modules/classifiers/conv1d_discriminator.py bin/train.py texar/modules/networks/conv_networks_test.py texar/data/data/paired_text_data.py texar/modules/encoders/hierarchical_encoders.py texar/modules/classifiers/classifier_base.py texar/utils/average_recorder_test.py texar/utils/utils.py texar/evals/bleu_moses.py texar/modules/classifiers/conv_classifiers.py texar/modules/networks/networks.py texar/data/data/data_iterators.py texar/utils/utils_test.py texar/modules/memory/memory_network.py texar/data/data/text_data_base.py text_infilling/gan.py texar/modules/classifiers/conv_classifiers_test.py texar/data/data_utils.py texar/utils/transformer_utils.py texar/modules/networks/networks_test.py texar/data/vocabulary_test.py texar/hyperparams_test.py texar/context_test.py texar/core/attentions.py texar/agents/pg_agent.bak.py texar/modules/classifiers/rnn_classifiers_test.py texar/agents/__init__.py texar/losses/__init__.py texar/losses/losses_utils.py texar/losses/rewards.py text_infilling/seq2seq_hyperparams.py texar/modules/policies/policy_nets_test.py texar/module_base.py texar/hyperparams.py texar/modules/connectors/connectors_test.py texar/evals/metrics.py texar/modules/encoders/conv_encoders_test.py texar/modules/networks/conv_networks.py texar/data/data/multi_aligned_data_test.py texar/modules/policies/__init__.py text_infilling/self_attn.py texar/data/data/data_iterators_test.py texar/modules/networks/__init__.py texar/data/data/scalar_data.py texar/modules/embedders/__init__.py texar/modules/encoders/__init__.py texar/models/seq2seq/__init__.py texar/data/data_decoders.py texar/modules/encoders/transformer_encoders.py texar/modules/embedders/embedder_utils.py texar/utils/shapes_test.py texar/modules/embedders/embedder_base.py text_infilling/gan_hyperparams.py texar/utils/mode_test.py texar/agents/agent_base.py texar/core/distributions.py texar/losses/rewards_test.py texar/data/data/mono_text_data_test.py texar/utils/utils_io.py texar/data/data/data_base.py texar/losses/adv_losses_test.py bin/utils/make_vocab.py bin/utils/average_checkpoints.py texar/modules/encoders/rnn_encoders_test.py texar/run/executor_test.py texar/agents/agent_gym_utils.py texar/core/explorations.py texar/data/data_utils_test.py texar/utils/dtypes.py texar/agents/dqn_agent.py texar/models/model_base.py texar/agents/episodic_agent_base.py texar/core/__init__.py texar/utils/transformer_utils_test.py texar/modules/embedders/embedder_utils_test.py text_infilling/data_utils.py texar/losses/entropy.py texar/utils/mode.py texar/agents/agent_base.bak.py texar/agents/agent_utils.py texar/modules/decoders/transformer_decoders.py texar/modules/encoders/hierarchical_encoders_test.py texar/modules/decoders/__init__.py texar/data/data/dataset_utils_test.py main main main _get_run_config _process_config main main global_mode_train global_mode_predict valid_modes global_mode_eval global_mode ContextTest HParams _type_name HParamsTest ModuleBase ActorCriticAgent AgentBase AgentBase get_gym_env_config convert_gym_space Space EnvConfig SpaceTest DQNAgent EpisodicAgentBase PGAgent PGAgent SeqAgentBase SeqPGAgent SeqPGAgentTest _ones_matrix_band_part _combine_heads attention_bias_ignore_padding _split_heads layer_normalize multihead_attention attention_bias_local attention_bias_lower_triangle sample_gaussian EpsilonLinearDecayExploration ExplorationBase MergeLayer _ReducePooling1D default_flatten_kwargs multihead_attention get_rnn_cell_trainable_variables default_conv3d_transpose_kwargs default_max_pooling1d_kwargs default_dense_kwargs default_separable_conv2d_kwargs default_average_pooling1d_kwargs default_average_pooling3d_kwargs default_conv2d_transpose_kwargs default_conv3d_kwargs get_regularizer default_batch_normalization_kwargs MaxReducePooling1D default_conv1d_kwargs combine_heads SequentialLayer default_max_pooling2d_kwargs default_rnn_cell_hparams ones_matrix_band_part AverageReducePooling1D get_constraint_fn get_activation_fn get_layer default_average_pooling2d_kwargs get_initializer split_heads _compute_concat_output_shape default_dropout_kwargs default_max_pooling3d_kwargs get_pooling_layer_hparams layer_normalize _common_default_conv_dense_kwargs default_regularizer_hparams get_rnn_cell default_conv2d_kwargs GetLayerTest GetActivationFnTest MergeLayerTest ReducePoolingLayerTest SequentialLayerTest GetRNNCellTest get_gradient_clip_fn get_train_op get_learning_rate_decay_fn get_optimizer_fn default_optimization_hparams OptimizationTest DequeReplayMemory ReplayMemoryBase ScalarDataDecoder VarUttTextDataDecoder _append_token TextDataDecoder _download _download_from_google_drive create_dir_if_needed _extract_google_drive_file_id read_words count_file_lines make_vocab get_files maybe_download CountFileLinesTest load_word2vec Embedding load_glove EmbeddingTest _make_defaultdict Vocab SpecialTokens VocabularyTest maybe_tuple name_prefix_fn _make_length_filter_fn random_shard_dataset make_combined_transformation _make_combined_filter_fn _DataSpec make_chained_transformation _connect_name make_partial _make_smaller_batch_filter_fn TransformationTest DataBase FeedableDataIterator TrainTestDataIterator TrainTestFeedableDataIterator DataIterator DataIteratorBase DataIteratorTest _LengthFilterMode MonoTextData _default_mono_text_dataset_hparams VarUttMonoTextDataTest MonoTextDataTest _default_dataset_hparams MultiAlignedData _DataTypes _is_text_data _is_scalar_data MultiAlignedDataTest _default_paired_text_dataset_hparams PairedTextData PairedTextDataTest ScalarData _default_scalar_dataset_hparams ScalarDataTest TextDataBase _maybe_str_to_list _get_ngrams corpus_bleu _lowercase sentence_bleu corpus_bleu_moses sentence_bleu_moses _maybe_list_to_str _parse_multi_bleu_ret BLEUTest accuracy binary_clas_accuracy binary_adversarial_losses AdvLossesTest l2_loss _get_entropy sequence_entropy_with_logits entropy_with_logits reduce_dimensions mask_and_reduce reduce_batch_time binary_sigmoid_cross_entropy_with_clas sequence_softmax_cross_entropy sequence_sigmoid_cross_entropy binary_sigmoid_cross_entropy sequence_sparse_softmax_cross_entropy MLELossesTest pg_loss_with_logits pg_loss_with_log_probs discount_reward _discount_reward_tensor_1d _discount_reward_py_1d _discount_reward_tensor_2d _discount_reward_py_2d RewardTest reinforce_loss reinforce_loss_with_MCtree ModelBase BasicSeq2seq Seq2seqBase ClassifierBase CNN Conv1DClassifier Conv1DClassifierTest UnidirectionalRNNClassifier UnidirectionalRNNClassifierTest _get_tensor_depth _mlp_transform StochasticConnector ReparameterizedStochasticConnector _assert_same_size MLPTransformConnector ForwardConnector ConstantConnector TestConnectors ConnectorBase _get_initial_state beam_search_decode BeamSearchDecodeTest BasicRNNDecoderOutput AttentionRNNDecoderOutput AttentionRNNDecoder BasicPositionalRNNDecoder BasicRNNDecoder BasicRNNDecoderTest AttentionRNNDecoderTest RNNDecoderBase compute_output_shape default_helper_infer_hparams _get_training_helper GumbelSoftmaxEmbeddingHelper default_helper_train_hparams SoftmaxEmbeddingHelper get_helper TemplateTransformerDecoder TransformerDecoderOutput TransformerDecoder WordEmbedder EmbedderTest EmbedderBase default_embedding_hparams soft_embedding_lookup get_embedding GetEmbeddingTest PositionEmbedder SinusoidsSegmentalPositionEmbedder SinusoidsPositionEmbedder Conv1DEncoder Conv1DEncoderTest EncoderBase HierarchicalRNNEncoder HierarchicalRNNEncoderTest _default_output_layer_hparams _build_dense_output_layer BidirectionalRNNEncoder _forward_single_output_layer _apply_rnn_encoder_output_layer UnidirectionalRNNEncoder _forward_output_layers RNNEncoderBase _apply_dropout UnidirectionalRNNEncoderTest BidirectionalRNNEncoderTest TransformerEncoder MemNetSingleLayer default_embedder_fn MemNetBase MemNetRNNLike _to_list Conv1DNetwork Conv1DNetworkTest FeedForwardNetwork FeedForwardNetworkTest _build_layers FeedForwardNetworkBase CategoricalPolicyNet PolicyNetBase CategoricalPolicyNetTest CategoricalQNet QNetBase Executor ExecutorTest AverageRecorder _SingleAverageRecorder AverageRecorderTest compute_batch_indices compute_topk_scores_and_seq log_prob_from_logits _unmerge_beam_dim beam_search get_state_shape_invariants _expand_to_beam_size _merge_beam_dim is_callable _maybe_list_to_array is_str get_tf_dtype is_placeholder maybe_hparams_to_dict compat_as_text TexarError is_train_mode_py is_train_mode is_predict_mode is_eval_mode is_predict_mode_py switch_dropout maybe_global_mode is_eval_mode_py UtilsTest mask_sequences flatten _mask_sequences_py _mask_sequences_tensor get_rank get_batch_size transpose_batch_time shape_list ShapesTest smoothing_cross_entropy parse_segment generate_equal_length_mask PadRemover _pad_array_list update_template_pack embedding_to_padding _batching_scheme fill_template _split_template generate_prediction_segment_ids _merge_segments prepare_template _bucket_boundaries _get_start_end_pos generate_dynamic_mask generate_prediction_offsets _prepare_squeezed_template _parse_template test_split_template test_merge_segments test_fill_template test_generate_random_mask load_hyperparams test_prepare_template test_generate_equal_length_mask Hyperparams test_fill_template_with_tensor dict_fetch straight_through strip_eos uniquify_str get_instance_with_redundant_kwargs get_instance_kwargs strip_special_tokens strip_token ceildiv get_function get_default_arg_values check_or_get_class get_args get_class map_ids_to_strs get_instance check_or_get_instance_with_redundant_kwargs call_function_with_redundant_kwargs str_join flatten_dict _expand_name dict_patch dict_pop default_str dict_lookup strip_bos check_or_get_instance load_config_single _load_config_yaml _load_config_python write_paired_text load_config UtilsTest add_variable get_unique_named_variable_scope collect_trainable_variables UnicodeRegex _get_ngrams bleu_wrapper bleu_tokenize compute_bleu transform_input_with_is_missing_token prepare_data _main load_hyperparams Hyperparams _main load_hyperparams Hyperparams _main load_hyperparams Hyperparams all_model_checkpoint_paths Saver set_verbosity ArgumentParser output_dir get_variable iteritems global_variables list_variables append parse_args info INFO int join load_checkpoint add_argument zeros len get_files files make_vocab load get dump config_paths dict_patch model_hparams dir mkdtemp data_hparams_eval getattr info data_hparams_train load_config ConfigProto GPUOptions RunConfig Executor train_and_evaluate check_or_get_instance_with_redundant_kwargs _process_config _get_run_config get_collection_ref placeholder_with_default append global_mode global_mode global_mode Discrete Box isinstance _ones_matrix_band_part reshape transpose T constant all ones reshape tri float32 matrix_band_part int64 cast shape exp multiply random_normal MultiRNNCell todict get_instance default_rnn_cell_hparams HighwayWrapper is_str switch_dropout DropoutWrapper append HParams ResidualWrapper range isinstance todict get_instance is_str default_regularizer_hparams HParams type get_instance todict isinstance is_str todict isinstance get_function get_function get items get_class isinstance endswith get_instance get_constraint_fn get_activation_fn get_regularizer get_initializer HParams get pop todict isinstance copy update _common_default_conv_dense_kwargs update _common_default_conv_dense_kwargs reshape transpose T constant all ones reshape tri float32 matrix_band_part int64 cast HParams Optimizer check_or_get_class isinstance to_int32 HParams todict get_function todict isinstance get_function HParams args get get_default_arg_values get_gradient_clip_fn optimize_loss __init__ get_learning_rate_decay_fn get_optimizer_fn HParams default_optimization_hparams MakeDirs join _download _download_from_google_drive create_dir_if_needed endswith extractall _extract_google_drive_file_id is_zipfile is_tarfile info append enumerate join format urlretrieve st_size print stat get join format print _extract_google_drive_file_id _get_confirm_token Session Glob isinstance items list sorted Counter dict zip range len sum defaultdict zip tuple ceildiv linspace update _default_mono_text_dataset_hparams _is_scalar_data _default_scalar_dataset_hparams update _default_mono_text_dataset_hparams tuple range Counter len is_str _maybe_str_to_list _get_ngrams exp Counter _lowercase compat_as_text zip float sum range len isinstance search group float32 join mkdtemp realpath rmtree compat_as_text dirname abspath max range dtype cast to_float ones_like zeros_like size accuracy reduce_mean discriminator_fn isinstance sigmoid_cross_entropy_with_logits reduce_sum softmax log reduce_sum append _get_entropy reduce_dimensions get_rank mask_and_reduce _get_entropy get_rank reduce_batch_time reduce_sum mask_sequences reduce_mean _transpose_batch_time to_float reduce_mean reduce_sum reduce_mean squeeze reduce_sum clas_fn isinstance stop_gradient sparse_softmax_cross_entropy_with_logits stop_gradient mask_and_reduce reduce_mean reduce_sum _discount_reward_tensor_1d std _discount_reward_py_1d ndim mean _discount_reward_tensor_2d sqrt _discount_reward_py_2d is_tensor array moments asarray arange ones mask_sequences array tile max sequence_mask ones concat reduce_max mask_sequences cumprod expand_dims mask_sequences range copy cumsum transpose scan mask_sequences reverse constant reshape sample_fn local_reward_fn shape global_reward_fn _mask_sequences log flatten assert_same_structure zip as_list isinstance TensorShape concat fully_connected reshape flatten split pack_sequence_as sum prod enumerate len tile_batch zero_state clone isinstance convert_to_tensor isinstance _get_initial_state size float32 output_layer _get_beam_search_cell RNNCell variable_scope TensorShape with_rank_at_least update TFTrainingHelper update todict isinstance num_layers activation _to_list get_layer final_layer_activation append layer_size range reshape concat output_layer array prod variational_dropout is_train_mode isinstance _forward_single_output_layer dropout_layer_ids mask_sequences _to_list _apply_dropout enumerate flatten pack_sequence_as partial zip word_embedder embedding WordEmbedder _layer_names uniquify_str name _layers append pop shape_list shape_list expand_dims ndims as_list range len reshape range compute_batch_indices stack top_k map_structure gather constant bool reduce_any ones while_loop where shape_list set_shape int32 tile map_structure zeros expand_dims _expand_to_beam_size callable isinstance isinstance _recur_convert flatten as_list asarray ndim shape is_tensor len is_tensor dtype sequence_mask to_int32 _transpose_batch_time expand_dims range asarray arange transpose ndim tile expand_dims array range reshape concat convert_to_tensor as_list shape append enumerate abs reduce_sum append int max max min _bucket_boundaries append pad amax enumerate pop tolist extend zip append _pad_array_list len reshape py_func dtype zeros_like Variable transpose append _prepare_squeezed_template range py_func Variable _prepare_squeezed_template dynamic_partition _fill_mask fill cast parse_segment Variable ones_like present_rate reshape parse_segment where shape blank_num generate_dynamic_mask cast generate_prediction_segment_ids fill generate_prediction_offsets equal append enumerate append tolist zip _parse range extend len _merge_segments tolist _transpose _split_template zip append ones_like _fill_segment zeros_like Variable where _get_start_end_pos parse_segment shape fill _prepare_squeezed_template equal int max_seq_length add_argument Hyperparams ArgumentParser parse_args log Variable generate_equal_length_mask Variable prepare_template load_hyperparams fill_template array Variable prepare_template load_hyperparams Variable generate_random_mask get_class is_str join locate get_instance get_class set is_str keys args get_instance_with_redundant_kwargs args items get_class set locate join is_callable args items set getargspec getargspec defaults len update isinstance deepcopy items list keys todict isinstance MutableMapping items isinstance extend OrderedDict _fields zip append range len _recur_strip compat_as_text _recur_strip compat_as_text _recur_strip compat_as_text _strip_eos_ strip_token compat_as_text _strip_bos_ _recur_join compat_as_text compat_as_text strip_special_tokens map_ids_to_tokens_py str_join rstrip import_module getattr dir update items isinstance endswith _load_config_yaml _load_config_python join list load_config_single isinstance strip ListDirectory IsDirectory append split format append isinstance add_variable trainable_variables xrange _get_ngrams exp dict xrange zip sum sub splitlines ones_like print concat where shape fill equal remove maybe_download embedder smoothing_cross_entropy sample_id FeedableDataIterator get_next set_segment_id Saver classifier to_float BasicPositionalRNNDecoder load_hyperparams placeholder update_template_pack ForwardConnector append encoder state_size MonoTextData ones_like size collect_trainable_variables UnidirectionalRNNEncoder GumbelSoftmaxEmbeddingHelper ConfigProto logits WordEmbedder enumerate minimum prepare_template embedding position_embedder decoder sigmoid_cross_entropy_with_logits connector Variable minimize Conv1DClassifier get_train_op AdamOptimizer reduce_mean SinusoidsSegmentalPositionEmbedder hidden_dim max_train_epoch batch_size abspath train_file present_rate data_dir blank_num mask_rate format valid_file filename_prefix join log_dir print log_disk_dir learning_rate_strategy hidden_dim filename_suffix makedirs TemplateTransformerDecoder max_decode_len generate_prediction_segment_ids dynamic_decode TrainTestDataIterator generate_prediction_offsets affine_bias beam_width max_decode_len deepcopy | # Source Code and Dataset for Text Infilling This repository contains the source code and dataset for [Text Infilling](https://arxiv.org/abs/1901.00158). The implementation is based on [Texar](https://github.com/asyml/texar). ## Repository Structure This repository contains two branches: - `master` branch - This branch contain the code for conducting the experiments of *Varying Mask Rates and #Blanks*. - `ShowCases` branch - This branch is used for *Preposition Infilling* and *Long Content Infilling*. ## Install | 1,100 |
VictorProkhorov/KL_Text_VAE | ['text generation'] | ['On the Importance of the Kullback-Leibler Divergence Term in Variational Autoencoders for Text Generation'] | Scripts/Model/model_wngt.py Scripts/Experiments/experiment_figure_1.py Scripts/Experiments/experiment_table_2.py Scripts/Experiments/preprocessing_reconstruction.py Scripts/Experiments/rouge.py Scripts/Experiments/bleu.py Scripts/Experiments/experiment_table_1.py Scripts/Experiments/experiment_table_4.py Scripts/Experiments/experiment_table_3.py _get_ngrams compute_bleu calc_log_var calc_bleu_scores restore_sentences_batch sentence_reconstruct calc_log_var calc_au calc_rouge_scores interpolate_g_n_k_all_models interpolate_g_n_k interpolate sample_sentences_from_prior make_corpus_parallel get_syntactic_zs get_accuracy_for_syntactic_case_adversary_mean read_file get_accuracy_for_syntactic_case_mean truncate_the_eos bucketing load_vocab map_idx_to_words preprocess _len_lcs _get_ngrams rouge rouge_l_summary_level rouge_n _recon_lcs _split_into_words _lcs rouge_l_sentence_level _union_lcs _f_p_r_lcs _get_word_ngrams Decoder_RNN get_vocab pad_sequences Decoder_CNN evaluate_nnl_and_rate_batch Encoder ConvLM load_dataset last_relevant create_dataset preprocess_sentence train Sentence_VAE LanguageIndex tuple range Counter len _get_ngrams exp Counter zip float sum range len mean numpy concatenate print compute_bleu print sum len count_nonzero to_float reduce_sum mean last_relevant cast embeddings rnn count_nonzero concatenate concat tolist vectorize mean last_relevant softmax top_k embeddings _sampling expand_dims log_var out range rnn str from_tensor_slices close open batch enumerate convert_to_tensor epsilon constant base_dtype concatenate log numpy top_k linspace embeddings softmax append clip_by_value expand_dims nucleus_threshold out range rnn top_k linspace clip_by_value log append expand_dims range convert_to_tensor concatenate softmax embeddings rnn constant base_dtype print nucleus_threshold numpy out epsilon print str join enumerate concat top_k clip_by_value random_normal log tolist gather_nd topk_renormalise expand_dims range convert_to_tensor concatenate multinomial softmax embeddings rnn constant base_dtype reshape dict nucleus_threshold vectorize out epsilon str join int len write close index dict zip ceil range open convert_to_tensor print concat expand_dims len dict vectorize truncate_the_eos bucketing load_vocab map_idx_to_words add set _split_into_words _lcs dict max range _recon tuple _lcs map intersection _get_word_ngrams len _len_lcs _split_into_words len _recon_lcs set _split_into_words union len _split_into_words len mean map zip asarray tuple full append max enumerate len set vocab get_vocab max_length print pad_sequences create_dataset LanguageIndex len gather int range reshape time format print shuffle evaluate_nnl_and_rate_batch range assign_add save numpy enumerate batch len vae | # [On the Importance of the Kullback-Leibler Divergence Term in Variational Autoencoders for Text Generation](https://arxiv.org/abs/1909.13668) ## Table of contents 1. [Replicate Results](#replicated-results) 2. [Usage](#usage) 3. [Citing](#citing) 4. [Licence](#licence) 5. [Contact info](#contact-info) ## Replicate Results We provide pretrained models (all models were trained on a GPU) and data used in the paper. To replicate the results: ### Step 1: | 1,101 |
VideoObjectSearch/ALFNet | ['pedestrian detection'] | ['Learning Efficient Single-stage Pedestrian Detectors by Asymptotic Localization Fitting'] | keras_alfnet/model/model_3step.py keras_alfnet/config.py evaluation/eval_script/eval_MR_multisetup.py keras_alfnet/data_generators.py evaluation/cocoapi/PythonAPI/pycocotools/cocoeval.py keras_alfnet/parallel_model.py keras_alfnet/nms/py_cpu_nms.py keras_alfnet/bbox_process.py train.py evaluation/eval_script/coco.py keras_alfnet/model/mobilenet_v1.py keras_alfnet/losses.py keras_alfnet/model/model_alf.py keras_alfnet/model/FixedBatchNormalization.py keras_alfnet/model/resnet50.py keras_alfnet/bbox_transform.py keras_alfnet/model/model_2step.py evaluation/cocoapi/PythonAPI/pycocotools/mask.py test.py keras_alfnet/model/model_1step.py keras_alfnet/utils/timer.py evaluation/cocoapi/PythonAPI/pycocotools/coco.py keras_alfnet/utils/__init__.py evaluation/cocoapi/PythonAPI/pycocotools/__init__.py keras_alfnet/model/base_model.py demo.py evaluation/cocoapi/PythonAPI/setup.py evaluation/eval_script/eval_demo.py keras_alfnet/utils/blob.py keras_alfnet/nms_wrapper.py keras_alfnet/model/__init__.py keras_alfnet/data_augment.py generate_data.py COCO _isArrayLike Params COCOeval encode decode area toBbox Config _brightness augment calc_target_multilayer _scale_enum _whctrs _ratio_enum get_target get_anchors _mkanchors _ratio_enum2 cls_loss regr_loss nms ParallelModel build_model Base_model FixedBatchNormalization nn_base _depthwise_conv_block _conv_block relu6 DepthwiseConv2D Model_1step Model_2step Model_3step create_alf prior_probability alf_2nd alf_pred alf_1st alf_3rd identity_block nn_base conv_block py_cpu_nms im_list_to_blob prep_im_for_blob Timer shape uniform COLOR_RGB2HSV cvtColor where deepcopy _brightness int asarray max astype copy uniform resize randint imread flip len hstack sqrt _whctrs round _mkanchors ones _whctrs round _mkanchors len _whctrs _mkanchors asarray arange append concatenate reshape transpose vstack meshgrid zeros expand_dims array range _ratio_enum2 len argmax concatenate box_op ascontiguousarray copy classifier_regr_std compute_targets zeros expand_dims sum bbox_overlaps range len calc_target_multilayer concatenate astype float32 shuffle expand_dims append augment array to_float maximum reduce_sum where less abs constant maximum binary_crossentropy reduce_sum reset_default_graph Input int int _depthwise_conv_block _conv_block array alf_pred alf_pred alf_pred alf_3rd alf_2nd alf_1st str str conv_block identity_block append maximum minimum transpose zeros max range len min astype float32 shape resize float max | # Learning Efficient Single-stage Pedestrian Detectors by Asymptotic Localization Fitting Keras implementation of [ALFNet](./docs/2018ECCV-ALFNet.pdf) accepted in ECCV 2018. ## Introduction This paper is a step forward pedestrian detection for both speed and accuracy. Specifically, a structurally simple but effective module called Asymptotic Localization Fitting (ALF) is proposed, which stacks a series of predictors to directly evolve the default anchor boxes step by step into improving detection results. As a result, during training the latter predictors enjoy more and better-quality positive samples, meanwhile harder negatives could be mined with increasing IoU thresholds. On top of this, an efficient single-stage pedestrian detection architecture (denoted as ALFNet) is designed, achieving state-of-the-art performance on CityPersons and Caltech. For more details, please refer to our [paper](./docs/2018ECCV-ALFNet.pdf).  ### Dependencies * Python 2.7 * Numpy * Tensorflow 1.x * Keras 2.0.6 | 1,102 |
Vikrant-Deshmukh/Scene-Text-Recognition-OpenCV | ['optical character recognition', 'scene text detection', 'curved text detection'] | ['EAST: An Efficient and Accurate Scene Text Detector'] | test.py text_recognition.py decode_predictions decode_predictions int cos sin append range | # Scene Text Recognition using OpenCV and Teseeract ### Text detection and text recognition using OpenCV, Python, and Tesseract. Implementation done using Zhou et al.’s 2017 paper, [EAST: An Efficient and Accurate Scene Text Detector] (https://arxiv.org/abs/1704.03155) **Requirements** - Opencv 3.4.2 or OpenCV 4 - argparse | 1,103 |
VinACE/trans-vsumm | ['video summarization'] | ['Summarizing Videos with Attention'] | vasnet_model.py aladdin.py layer_norm.py knapsack.py seq_summ.py test_aladding_1024.py create_split.py attention_model.py sys_utils.py vasnet_seq2seq_summ.py cpd_nonlin.py seq_sum_old.py vsumm_transform_working_aladdin.py cpd_auto.py vasnet_seq2seq_summ_v2.py seq_sum_v1.py test_aladding_1024_atten.py aladdin_org.py seq_sum copy.py seq_sum_v2.py vsumm_weight_transform.py vsum_tools.py main.py tvsumm_transform_v1.py seq_sum.py tvsumm_transform.py config.py aladdin_test.py Transformer Decoder SelfAttention Encoder TransformerBlock DecoderBlock Transformer Decoder SelfAttention Encoder TransformerBlock DecoderBlock Transformer Decoder SelfAttention Encoder TransformerBlock DecoderBlock HParameters estimate_vmax cpd_auto eval_score eval_cost centering calc_scatters cpd_nonlin create split_random write_json mkdir_if_missing test_knapsack_dp test_knapsack knapsack check_inputs knapsack_ortools knapsack_dp LayerNorm eval_split AONet parse_splits_filename weights_init lookup_weights_splits_file train DecoderLayer Seq2Seq PositionwiseFeedforwardLayer Decoder MultiHeadAttentionLayer Encoder EncoderLayer DecoderLayer Seq2Seq PositionwiseFeedforwardLayer Decoder MultiHeadAttentionLayer Encoder EncoderLayer DecoderLayer Seq2Seq PositionwiseFeedforwardLayer Decoder MultiHeadAttentionLayer Encoder EncoderLayer DecoderLayer Seq2Seq PositionwiseFeedforwardLayer Decoder MultiHeadAttentionLayer Encoder EncoderLayer DecoderLayer Seq2Seq PositionwiseFeedforwardLayer Decoder MultiHeadAttentionLayer Encoder EncoderLayer DecoderLayer Seq2Seq PositionwiseFeedforwardLayer Decoder MultiHeadAttentionLayer Encoder EncoderLayer print_table list_files get_image_list run_command torch_summarize del_file ge_pkg_versions get_video_list print_pkg_versions Transformer Decoder SelfAttention Encoder TransformerBlock DecoderBlock Transformer Decoder SelfAttention Encoder TransformerBlock DecoderBlock Transformer Decoder SelfAttention Encoder TransformerBlock DecoderBlock Transformer Decoder SelfAttention Encoder TransformerBlock DecoderBlock SelfAttention VASNet DecoderLayer Seq2Seq PositionwiseFeedforwardLayer Decoder MultiHeadAttentionLayer Encoder EncoderLayer DecoderLayer Seq2Seq PositionwiseFeedforwardLayer Decoder MultiHeadAttentionLayer Encoder EncoderLayer Transformer Decoder SelfAttention Encoder TransformerBlock DecoderBlock _get_clones TransformerNet PositionalEncoding TransformerEncoder TransformerEncoderLayer _get_activation_fn evaluate_summary evaluate_user_summaries generate_summary arange argmin zeros float log cpd_nonlin centering trace list range len float log len list T cumsum reshape astype zeros diag int inf calc_scatters print ones reshape min argmin copy shape zeros max range makedirs dirname mkdir_if_missing append range enumerate choice int join format num_splits append print File save_name train_percent close write_json save_dir ceil dataset keys range split_random len zeros max range print knapsack sort check_inputs append zeros range print knapsack_dp int tolist astype Init Solve array bias xavier_uniform_ weight __name__ constant_ splitext split glob format print lookup_weights_file initialize splits AONet load_datasets select_split print load_model print_table test_keys mean eval verbose load_split_file append range len parse_splits_filename output_dir load_split_file open str initialize splits AONet len range format close datasets splitext flush join get_dataset_by_name load_datasets select_split print system write split makedirs set_trace print sort isdir remove Popen split run_command __version__ version platform isfile print items ge_pkg_versions items tuple _addindent __repr__ sum __name__ pop format print sum enumerate len int concatenate ones tolist astype delete mean floor int32 knapsack_ortools append zeros float range len argmax concatenate astype float32 mean shape append zeros sum max range len argmax astype float32 mean shape append sum max range | # Video Summarization with Attention A PyTorch implementation of our paper [Video Summarization with Attention](https://arxiv.org/abs/1812.01969) by Jiri Fajtl, Hajar Sadeghi Sokeh, Vasileios Argyriou, Dorothy Monekosso and Paolo Remagnino. This paper was presented at [ACCV 2018](http://accv2018.net/program/) [AIU2018 workshop](http://www.sys.info.hiroshima-cu.ac.jp/aiu2018/). ## Installation The development and evaluation was done on the following configuration: ### System configuration - Platform : Linux-4.15.0-43-generic-x86_64-with-Ubuntu-16.04-xenial - Display driver : NVRM version: NVIDIA UNIX x86_64 Kernel Module 384.130 Wed Mar 21 03:37:26 PDT 2018 GCC version: gcc version 5.4.0 20160609 (Ubuntu 5.4.0-6ubuntu1~16.04.10) | 1,104 |
VincLee8188/GMAN-PyTorch | ['image dehazing', 'traffic prediction'] | ['GMAN: A Graph Multi-Attention Network for Traffic Prediction'] | utils/utils_.py model/model_.py model/test.py model/train.py main.py spatialAttention transformAttention STEmbedding gatedFusion temporalAttention conv2d_ GMAN FC STAttBlock test train load time model_file batch_size log_string metric mean shape num_pred load_data ceil range append batch_size model zero_grad save loss_criterion max_epoch shape randperm load_state_dict ceil append range state_dict log_string eval is_available float time model_file backward print min load_data empty_cache step | # PyTorch implementation of GMAN: A Graph Multi-Attention Network for Traffic Prediction This is a testing PyTorch version implementation of Graph Multi-Attention Network in the following paper: Chuanpan Zheng, Xiaoliang Fan, Cheng Wang, and Jianzhong Qi. "[GMAN: A Graph Multi-Attention Network for Traffic Prediction](https://arxiv.org/abs/1911.08415)", AAAI2020. ## Requirements * Python * PyTorch * Pandas * Matplotlib * Numpy ## Dataset The datasets could be unzipped and load from the data directory in this repository. | 1,105 |
VincLee8188/Gman_Ptorch_version | ['image dehazing', 'traffic prediction'] | ['GMAN: A Graph Multi-Attention Network for Traffic Prediction'] | utils/utils_.py model/model_.py model/test.py model/train.py main.py spatialAttention transformAttention STEmbedding gatedFusion temporalAttention conv2d_ GMAN FC STAttBlock test train load time model_file batch_size log_string metric mean shape num_pred load_data ceil range append batch_size model zero_grad save loss_criterion max_epoch shape randperm load_state_dict ceil append range state_dict log_string eval is_available float time model_file backward print min load_data empty_cache step | # PyTorch implementation of GMAN: A Graph Multi-Attention Network for Traffic Prediction This is a testing PyTorch version implementation of Graph Multi-Attention Network in the following paper: Chuanpan Zheng, Xiaoliang Fan, Cheng Wang, and Jianzhong Qi. "[GMAN: A Graph Multi-Attention Network for Traffic Prediction](https://arxiv.org/abs/1911.08415)", AAAI2020. ## Requirements * Python * PyTorch * Pandas * Matplotlib * Numpy ## Dataset The datasets could be unzipped and load from the data directory in this repository. | 1,106 |
VinceMarron/style_transfer | ['style transfer'] | ['A Neural Algorithm of Artistic Style'] | vgg_styletrans.py basic_styletrans.py main make_parser calc_l2wass_dist TransferStyle calc_2_moments pool_func describe_style error style TransferStyle infer_loss output make_parser synthesize_image subject parse_args vggnet open add_argument ArgumentParser avg_pool max_pool reshape float32 matmul shape reduce_prod reduce_mean cast square matmul maximum reduce_sum sqrt | # Style Transfer as Optimal Transport ## An algorithm that transfers the distribution of visual characteristics, or *style*, of a reference image onto a subject image via an Optimal Transport plan.  # How it Works: * For a general description and pictures: [style-transfer-theory.pdf](style-transfer-theory.pdf). * For an explanation of the use of L2-Wasserstein distance instead of the loss function from the [Gatys et. al](https://arxiv.org/abs/1508.06576) framework see [why_wasserstein.ipynb](why_wasserstein.ipynb). * [Demo video](https://vimeo.com/284207984) tl;dr - Passes a subject and style image through the convolution layers of the vgg19 network. Extracts probabalistic descriptions (first two moments) of the convolution filter activations. Calculates L2-Wasserstein distance between these probability distributions and then modifies the subject image optimally to minimize this distance. # Running Requires: | 1,107 |
VincentK1991/BERT_summarization_1 | ['text summarization'] | ['Automatic Text Summarization of COVID-19 Medical Research Articles using BERT and GPT-2'] | helperGPT2.py train_GPT2.py GPT2_preprocessing.py GPT2_summarize.py main tag_pull_abstract load_words main write_last_token write_lm_labels write_torch_tensor write_mc_labels write_token_type_ids write_input_ids execute_tokenization shuffle_batch main train evaluate int word_tokenize list join sort choice randint array len values format reset_index load_words print default_timer output range TensorDataset save input zeros tag_pull_abstract execute_tokenization read_csv from_pretrained decode add_special_tokens read encode resize_token_embeddings close open writelines generate model_directory tensor to input_file append len pad_token_id shuffle encode max append len append eos_token_id index list map zip append len shuffle array arange unsqueeze write_last_token write_lm_labels write_torch_tensor write_mc_labels write_token_type_ids write_input_ids shuffle_batch get_linear_schedule_with_warmup DataLoader train_data save_vocabulary to_json_file state_dict val_data enumerate load evaluate AdamW RandomSampler parameters model_name train epochs max_norm lm_coef model backward clip_grad_norm_ zero_grad parameters grad_accumulation mc_coef step eval | # The pre-print article is out! please visit, and suggest if you want to see any changes. I thanks our co-authors/collaborators Bowen Tan and Yiming Niu from Rockefeller University. https://arxiv.org/abs/2006.01997 --- # command line interface I added a more user friendly command line pre-processing/training/summarization codes for the GPT2. These are the GPT2_preprocessing.py, trainGPT2.py, and GPT2_summarizer.py. To use it, first you'd need Huggingface's transformer package, and a folder where you'd want to save your fine-tuned model on. For the training and validation dataset, refer to the notebook *pre-processing-text-for-GPT2-fine-tuning*. (Update on Aug 21 2020) ## setting up the environment To install from the Pipfile | 1,108 |
ViniciusMikuni/ABCNet | ['graph attention'] | ['GAPNet: Graph Attention based Point Neural Network for Exploiting Local Feature of Point Cloud'] | models/gapnet_QG.py utils/tf_util.py models/gapnet_PU.py segmentation/train.py classification/evaluate.py segmentation/evaluate.py utils/provider.py classification/train.py models/gat_layers.py get_batch eval_one_epoch convert_label_to_one_hot printout eval get_batch get_learning_rate eval_one_epoch log_string train_one_epoch train get_bn_decay get_model gap_block get_loss placeholder_inputs get_model gap_block get_loss placeholder_inputs attn_feature eval_one_epoch eval get_batch printout get_batch get_learning_rate eval_one_epoch log_string train_one_epoch train get_bn_decay loadDataFile getDataFiles load_h5 shuffle_data load_add conv2d_transpose pairwise_distance fully_connected conv3d max_pool3d pairwise_distanceR batch_norm_template conv2d conv1d _variable_with_weight_decay batch_norm_for_conv1d dropout batch_norm_for_conv2d knn max_pool2d avg_pool3d _variable_on_cpu avg_pool2d conv2d_nobias batch_norm_dist_template get_neighbors batch_norm_for_fc batch_norm_for_conv3d print write zeros range arange subplots grid save argmax max roc_auc_score run semilogy name squeeze set_xlabel savefig plot_path sum range format get_batch concatenate float join print roc_curve load_h5 hist set_ylabel zeros len print write flush exponential_decay maximum minimum exponential_decay str join sum arange get_batch squeeze log_string shuffle now load_h5 shuffle_data add_summary run float argmax range len str log_string time now shuffle_data add_summary int32 float32 placeholder attn_feature concat reduce_max append range value dropout pairwise_distance reshape concat knn avg_pool2d conv2d cond tile expand_dims pairwise_distanceR gap_block reduce_mean sparse_softmax_cross_entropy_with_logits get_shape print fully_connected one_hot softmax_cross_entropy value leaky_relu squeeze transpose matmul activation conv2d_nobias conv2d softmax tile get_neighbors expand_dims bias_add arange shuffle len list File keys remove format print File astype len multiply add_to_collection xavier_initializer _variable_on_cpu l2_loss truncated_normal_initializer get_shape value reshape squeeze gather expand_dims range squeeze transpose square reduce_sum matmul expand_dims ones_like constant greater_equal squeeze transpose square where pi matmul shape reduce_sum tile fill expand_dims abs equal top_k | # ABCNet: An attention-based method for particle tagging. This is the main repository for the [ABCNet paper](https://arxiv.org/abs/2001.05311). The implementation uses a modified version of [GAPNet](https://arxiv.org/abs/1905.08705) to suit the High Energy Physics needs. This repository is divided into two main folders: classification and segmentation, for the quark-gluon tagging and pileup mitigation applications, respectively. The input ```.h5``` files are expected to have the following structure: * **data**: [N,P,F], * **label**:[N,P] * **pid**: [N] * **global**: [N,G] N = Number of events | 1,109 |
VisualComputingInstitute/2D_lidar_person_detection | ['human detection'] | ['DR-SPAAM: A Spatial-Attention and Auto-regressive Model for Person Detection in 2D Range Data', 'Self-Supervised Person Detection in 2D Range Data using a Calibrated Camera', 'Deep Person Detection in 2D Range Data'] | dr_spaam/tests/test_detector.py dr_spaam/dr_spaam/datahandle/jrdb_handle.py dr_spaam/bin/train.py dr_spaam/dr_spaam/model/_common.py dr_spaam/dr_spaam/pipeline/optim.py dr_spaam/tests/test_jrdb_handle_mayavi.py dr_spaam_ros/scripts/drow_data_converter.py dr_spaam/dr_spaam/pipeline/logger.py dr_spaam/bin/plotting/plot_clustering.py dr_spaam/dr_spaam/pseudo_labels.py dr_spaam/dr_spaam/dataset/__init__.py dr_spaam/dr_spaam/utils/plotting.py dr_spaam/bin/plotting/get_pseudo_label_videos.py dr_spaam/tests/test_detr_dataloader.py dr_spaam_ros/src/dr_spaam_ros/dr_spaam_ros.py dr_spaam/dr_spaam/dataset/builder.py dr_spaam/bin/plotting/get_eer_thresh.py dr_spaam/bin/plotting/analyze_pseudo_labels.py dr_spaam/dr_spaam/pipeline/__init__.py dr_spaam/dr_spaam/utils/jrdb_utils.py dr_spaam/dr_spaam/model/losses.py dr_spaam/dr_spaam/model/dr_spaam.py dr_spaam/dr_spaam/utils/precision_recall.py dr_spaam/bin/setup_jrdb_dataset.py dr_spaam/dr_spaam/dataset/drow_dataset.py dr_spaam/dr_spaam/utils/pytorch_nms/setup.py dr_spaam/dr_spaam/pipeline/pipeline.py dr_spaam/dr_spaam/utils/jrdb_transforms.py dr_spaam/setup.py dr_spaam/dr_spaam/model/drow_net.py dr_spaam/dr_spaam/model/dr_spaam_fn.py dr_spaam/dr_spaam/model/get_model.py dr_spaam/dr_spaam/datahandle/drow_handle.py dr_spaam/dr_spaam/utils/pytorch_nms/src/nms/__init__.py dr_spaam/dr_spaam/datahandle/_pypcd.py dr_spaam_ros/setup.py dr_spaam_ros/scripts/node.py dr_spaam/dr_spaam/dataset/jrdb_dataset.py dr_spaam/tests/test_inference_speed.py dr_spaam/dr_spaam/pipeline/trainer.py dr_spaam/dr_spaam/detector.py dr_spaam/dr_spaam/utils/utils.py dr_spaam/dr_spaam/datahandle/jrdb_handle_det3d.py dr_spaam/dr_spaam/model/__init__.py dr_spaam/dr_spaam/datahandle/__init__.py dr_spaam/tests/test_jrdb_handle.py dr_spaam/tests/test_dataloader.py dr_spaam/tests/test_drow_handle.py _match_pc_im_laser_one_sequence match_pc_im_laser _laser_idx_to_fname extract_laser_from_rosbag run_training run_evaluation _plot_pseudo_labels _write_file_make_dir _get_bounding_box_plotting_vertices _plot_frame display_evaluation_result evaluate_pseudo_labels generate_pseudo_labels _get_bounding_box_plotting_vertices _distance_to_bgr_color plot_color_bar plot_pseudo_label_for_all_frames _plot_frame_im _plot_frame_pts _plot_pseudo_labels _write_file_make_dir _get_bounding_box_plotting_vertices _plot_frame _distance_to_bgr_color generate_pseudo_labels Detector get_regression_target_using_bounding_boxes DROWHandle JRDBHandle _SequenceHandle JRDBHandleDet3D _SequenceHandle save_xyz_label make_xyz_label_point_cloud write_header save_txt decode_rgb_from_pcl parse_binary_compressed_pc_data cat_point_clouds save_xyz_intensity_label point_cloud_to_path save_point_cloud_bin parse_header add_fields build_ascii_fmtstr make_xyz_rgb_point_cloud _metadata_is_consistent update_field parse_binary_pc_data point_cloud_from_path point_cloud_to_buffer encode_rgb_for_pcl point_cloud_from_fileobj make_xyz_point_cloud PointCloud point_cloud_from_buffer parse_ascii_pc_data _build_dtype save_point_cloud_bin_compressed point_cloud_to_fileobj save_point_cloud get_dataloader DROWDataset _closest_detection _get_regression_target JRDBDataset _get_regression_target _get_regression_target_from_pseudo_labels _mixup_samples DrowNet DrSpaam _SpatialAttentionMemory get_model SelfPacedLearningLoss binary_focal_loss BinaryFocalLoss PartiallyHuberisedBCELoss SymmetricBCELoss FocalLoss _conv1d _conv1d_3 _conv1d_1 _create_logger Logger _ExpDecayScheduler Optim Pipeline Trainer transform_pts_upper_velodyne_to_base _get_R_z transform_pts_lower_velodyne_to_base transform_pts_base_to_lower_velodyne transform_pts_laser_to_base transform_pts_base_to_stitched_im transform_pts_base_to_upper_velodyne transform_pts_laser_to_stitched_im transform_pts_base_to_laser box_to_kitti_string kitti_string_to_box _cls_to_color _plot_scan _plot_target _plot_detection plot_one_batch_detr plot_one_batch _plot_annotation_detr plot_one_frame _plot_prediction _create_figure get_precision_recall _eer _prec_rec_2d kitti_string_to_drow_detection get_precision_recall_one_hot _peakf1 _plot_prec_rec_wps_only _prettify_pr_curve plot_pr_curve _eval_prec_rec drow_detection_to_kitti_string evaluate_drow _increment_dist_hist_count _plot_prec_rec evaluate_drow_one_hot _lbplt_fatlegend get_displacement_from_odometry rphi_to_xy nms_2d scans_to_polar_grid canonical_to_global rphi_to_xy_torch generate_pseudo_labels _phi_to_rotation_matrix xy_to_rphi get_drow_laser_phi get_jrdb_laser_phi get_velocity_from_odometry ravel_hash_vec nms_predicted_center_torch scans_to_cutout_torch canonical_to_global_xy group_predicted_center nms_predicted_center canonical_to_global_torch scans_to_cutout_original scans_to_cutout global_to_canonical data_augmentation get_unique_rows nms _test_dataloader _plot_sample _plot_sample_light test_detector _plot_annotation _test_detr_dataloader _plot_annotation _plot_sequence test_inference_speed_on_drow test_inference_speed_on_jrdb _get_pts_color _test_loading_speed _plot_sequence _test_loading_speed _plot_sequence sequence_to_bag load_odoms load_scans read_publisher_param detections_to_pose_array read_subscriber_param detections_to_rviz_marker DrSpaamROS _match_pc_im_laser_one_sequence match_pc_im_laser _laser_idx_to_fname extract_laser_from_rosbag run_training run_evaluation _plot_pseudo_labels _write_file_make_dir _get_bounding_box_plotting_vertices _plot_frame display_evaluation_result evaluate_pseudo_labels generate_pseudo_labels _get_bounding_box_plotting_vertices _distance_to_bgr_color plot_color_bar plot_pseudo_label_for_all_frames _plot_frame_im _plot_frame_pts _plot_pseudo_labels _write_file_make_dir _plot_frame generate_pseudo_labels Detector get_regression_target_using_bounding_boxes DROWHandle JRDBHandle _SequenceHandle JRDBHandleDet3D save_xyz_label make_xyz_label_point_cloud write_header save_txt decode_rgb_from_pcl parse_binary_compressed_pc_data cat_point_clouds save_xyz_intensity_label point_cloud_to_path save_point_cloud_bin parse_header add_fields build_ascii_fmtstr make_xyz_rgb_point_cloud _metadata_is_consistent update_field parse_binary_pc_data point_cloud_from_path point_cloud_to_buffer encode_rgb_for_pcl point_cloud_from_fileobj make_xyz_point_cloud PointCloud point_cloud_from_buffer parse_ascii_pc_data _build_dtype save_point_cloud_bin_compressed point_cloud_to_fileobj save_point_cloud get_dataloader DROWDataset _closest_detection _get_regression_target JRDBDataset _get_regression_target_from_pseudo_labels _mixup_samples DrowNet DrSpaam _SpatialAttentionMemory get_model SelfPacedLearningLoss binary_focal_loss BinaryFocalLoss PartiallyHuberisedBCELoss SymmetricBCELoss FocalLoss _conv1d _conv1d_3 _conv1d_1 _create_logger Logger _ExpDecayScheduler Optim Pipeline Trainer transform_pts_upper_velodyne_to_base _get_R_z transform_pts_lower_velodyne_to_base transform_pts_base_to_lower_velodyne transform_pts_laser_to_base transform_pts_base_to_stitched_im transform_pts_base_to_upper_velodyne transform_pts_laser_to_stitched_im transform_pts_base_to_laser box_to_kitti_string kitti_string_to_box _cls_to_color _plot_scan _plot_target _plot_detection plot_one_batch_detr plot_one_batch _plot_annotation_detr plot_one_frame _plot_prediction _create_figure get_precision_recall _eer _prec_rec_2d kitti_string_to_drow_detection get_precision_recall_one_hot _peakf1 _plot_prec_rec_wps_only _prettify_pr_curve plot_pr_curve _eval_prec_rec drow_detection_to_kitti_string evaluate_drow _increment_dist_hist_count _plot_prec_rec evaluate_drow_one_hot _lbplt_fatlegend get_displacement_from_odometry rphi_to_xy nms_2d scans_to_polar_grid canonical_to_global rphi_to_xy_torch generate_pseudo_labels _phi_to_rotation_matrix xy_to_rphi get_drow_laser_phi get_jrdb_laser_phi get_velocity_from_odometry ravel_hash_vec nms_predicted_center_torch scans_to_cutout_torch canonical_to_global_xy group_predicted_center nms_predicted_center canonical_to_global_torch scans_to_cutout_original scans_to_cutout global_to_canonical data_augmentation get_unique_rows nms _test_dataloader _plot_sample _plot_sample_light test_detector _plot_annotation _test_detr_dataloader _plot_annotation _plot_sequence test_inference_speed_on_drow test_inference_speed_on_jrdb _get_pts_color _test_loading_speed _plot_sequence sequence_to_bag load_odoms load_scans read_publisher_param detections_to_pose_array read_subscriber_param detections_to_rviz_marker DrSpaamROS _match_pc_im_laser_one_sequence match_pc_im_laser _laser_idx_to_fname extract_laser_from_rosbag run_training run_evaluation _plot_pseudo_labels _write_file_make_dir _get_bounding_box_plotting_vertices _plot_frame display_evaluation_result evaluate_pseudo_labels generate_pseudo_labels _distance_to_bgr_color plot_color_bar plot_pseudo_label_for_all_frames _plot_frame_im _plot_frame_pts _plot_pseudo_labels _write_file_make_dir _plot_frame generate_pseudo_labels Detector get_regression_target_using_bounding_boxes DROWHandle JRDBHandle _SequenceHandle JRDBHandleDet3D save_xyz_label make_xyz_label_point_cloud write_header save_txt decode_rgb_from_pcl parse_binary_compressed_pc_data cat_point_clouds save_xyz_intensity_label point_cloud_to_path save_point_cloud_bin parse_header add_fields build_ascii_fmtstr make_xyz_rgb_point_cloud _metadata_is_consistent update_field parse_binary_pc_data point_cloud_from_path point_cloud_to_buffer encode_rgb_for_pcl point_cloud_from_fileobj make_xyz_point_cloud PointCloud point_cloud_from_buffer parse_ascii_pc_data _build_dtype save_point_cloud_bin_compressed point_cloud_to_fileobj save_point_cloud get_dataloader DROWDataset _closest_detection _get_regression_target JRDBDataset _get_regression_target_from_pseudo_labels _mixup_samples DrowNet DrSpaam _SpatialAttentionMemory get_model SelfPacedLearningLoss binary_focal_loss BinaryFocalLoss PartiallyHuberisedBCELoss SymmetricBCELoss FocalLoss _conv1d _conv1d_3 _conv1d_1 _create_logger Logger _ExpDecayScheduler Optim Pipeline Trainer transform_pts_upper_velodyne_to_base _get_R_z transform_pts_lower_velodyne_to_base transform_pts_base_to_lower_velodyne transform_pts_laser_to_base transform_pts_base_to_stitched_im transform_pts_base_to_upper_velodyne transform_pts_laser_to_stitched_im transform_pts_base_to_laser box_to_kitti_string kitti_string_to_box _cls_to_color _plot_scan _plot_target _plot_detection plot_one_batch_detr plot_one_batch _plot_annotation_detr plot_one_frame _plot_prediction _create_figure get_precision_recall _eer _prec_rec_2d kitti_string_to_drow_detection get_precision_recall_one_hot _peakf1 _plot_prec_rec_wps_only _prettify_pr_curve plot_pr_curve _eval_prec_rec drow_detection_to_kitti_string evaluate_drow _increment_dist_hist_count _plot_prec_rec evaluate_drow_one_hot _lbplt_fatlegend get_displacement_from_odometry rphi_to_xy nms_2d scans_to_polar_grid canonical_to_global rphi_to_xy_torch generate_pseudo_labels _phi_to_rotation_matrix xy_to_rphi get_drow_laser_phi get_jrdb_laser_phi get_velocity_from_odometry ravel_hash_vec nms_predicted_center_torch scans_to_cutout_torch canonical_to_global_xy group_predicted_center nms_predicted_center canonical_to_global_torch scans_to_cutout_original scans_to_cutout global_to_canonical data_augmentation get_unique_rows nms _test_dataloader _plot_sample _plot_sample_light test_detector _plot_annotation _test_detr_dataloader _plot_sequence test_inference_speed_on_drow test_inference_speed_on_jrdb _get_pts_color _test_loading_speed sequence_to_bag load_odoms load_scans read_publisher_param detections_to_pose_array read_subscriber_param detections_to_rviz_marker DrSpaamROS join format ranges read_messages print to_sec _laser_idx_to_fname close rmtree Bag savetxt mkdir append listdir array exists enumerate len join basename reshape loadtxt argmin append abs array range len join format print _match_pc_im_laser_one_sequence listdir enumerate len get_dataloader train evaluate get_dataloader evaluate rphi_to_xy _get_bounding_box_plotting_vertices add_subplot axis GridSpec canonical_to_global max clip set_aspect set_title set_xlabel imshow scatter savefig dirname transform_pts_laser_to_stitched_im plot set_xlim close add_artist stack zip float join Circle set_ylabel repeat figure array set_ylim makedirs rphi_to_xy axis add_subplot pi max clip set_aspect logical_and imshow scatter savefig dirname transform_pts_laser_to_stitched_im Axes close add_axes sqrt stack mean zip empty enumerate int join set_size_inches reshape set_axis_off makedirs figure zeros array len dirname makedirs rphi_to_xy _plot_frame exists _plot_pseudo_labels _write_file_make_dir list len logical_and drow_detection_to_kitti_string sleep sum range get_dataloader astype stack enumerate join int items tqdm rmtree logical_or array makedirs join zip evaluate_drow_one_hot _write_file_make_dir join listdir isfile COLOR_HSV2RGB reshape astype float32 cvtColor rphi_to_xy _get_bounding_box_plotting_vertices axis logical_and _distance_to_bgr_color imshow ylim scatter savefig dirname transform_pts_laser_to_stitched_im Axes plot close add_axes stack xlim join set_size_inches figure makedirs rphi_to_xy add_subplot set_aspect set_xlabel scatter savefig dirname set_xlim close add_artist nms_predicted_center zip join Circle reshape set_ylabel figure array set_ylim makedirs get_dataloader _plot_frame_pts load_ckpt print tqdm eval Logger _plot_frame_im get_model cuda range enumerate len int join set_size_inches set_axis_off close _distance_to_bgr_color add_axes imshow repeat savefig linspace figure Axes _distance_to_bgr_color _get_regression_target_from_pseudo_labels generate_pseudo_labels str list int len map warn match lower split join str format map copy append print append dtype list extend zip append type count extend zip read itemsize decompress read itemsize calcsize fromstring unpack zeros range len decode print parse_binary_pc_data parse_ascii_pc_data _build_dtype parse_binary_compressed_pc_data startswith append parse_header close StringIO point_cloud_from_fileobj join pack write_header names compress build_ascii_fmtstr write tostring lower savetxt get_metadata pc_data append len StringIO point_cloud_to_fileobj get_metadata get_metadata list names extend pc_data get_metadata zip append empty PointCloud len concatenate get_metadata points width PointCloud update dtype view astype float32 PointCloud update squeeze PointCloud uint32 astype float32 array uint32 asarray zeros copy dtype astype float32 fromarrays PointCloud JRDBDataset DROWDataset JRDBDeTrDataset list global_to_canonical zip zeros _closest_detection enumerate len cdist T hstack array rphi_to_xy ones reshape argmin logical_and stack hypot ones xy_to_rphi logical_and stack _get_regression_target logical_or zeros logical_or beta DrSpaam SelfPacedLearningLoss partial DrowNet PartiallyHuberisedBCELoss binary_cross_entropy_with_logits SymmetricBCELoss log join basicConfig setFormatter addHandler StreamHandler Formatter DEBUG setLevel cos pi arctan2 transform_pts_laser_to_base ones strip zip zeros len int append float array split Circle _plot_scan rphi_to_xy _plot_target _plot_detection _plot_prediction add_artist zip array _create_figure append len range plot_one_frame _plot_scan _plot_target len _plot_annotation_detr append _plot_prediction range _create_figure repeat set_aspect set_title set_xlabel set_xlim add_subplot set_ylabel figure set_ylim scatter rphi_to_xy scatter canonical_to_global _cls_to_color rphi_to_xy scatter _cls_to_color canonical_to_global scatter rphi_to_xy Circle rphi_to_xy zip add_artist ones strip zip zeros len int append float array split get_precision_recall join remove replace concatenate print glob append listdir array join get_precision_recall_one_hot replace concatenate print glob append listdir array ones _prec_rec_2d _eval_prec_rec len _plot_prec_rec_wps_only savefig reshape min _increment_dist_hist_count zeros float max range len hypot argmin at defaultdict linear_sum_assignment logical_not reversed argsort nan unique full_like append zeros sum array enumerate len abs argmin first_nonzero_idx subplots _prettify_pr_curve suptitle plot _lbplt_fatlegend subplots _prettify_pr_curve suptitle plot _lbplt_fatlegend plot set_xlabel set_xlim FuncFormatter set_ylabel set_major_formatter set_ylim legendHandles set_linewidth set_alpha legend get_linewidth radians radians cos sin cos arctan2 cos atan2 canonical_to_global update int arctan reshape astype maximum pi mean shape logical_or take int32 ceil _clip max view clamp pi where mean shape logical_xor item expand_as gather atan int list arange arctan squeeze shape pad round _clip float empty max range int astype float32 range shape int32 zeros empty clip rphi_to_xy arange where canonical_to_global maximum_filter hypot argmin int64 append sum range concatenate copy mean GaussianBlur int float32 at zeros len rphi_to_xy ones reshape square canonical_to_global_xy canonical_to_global sqrt zeros range len long T _phi_to_rotation_matrix reshape matmul array T _phi_to_rotation_matrix reshape matmul eye append maximum minimum minimum norm fill_diagonal reshape maximum mean append max uint64 min astype zeros range unique ravel_hash_vec set_aspect Circle rphi_to_xy set_xlabel set_xlim add_artist set_ylabel scatter zip array set_ylim cla set_aspect Circle rphi_to_xy set_title set_xlabel set_xlim add_artist canonical_to_global set_ylabel scatter zip array set_ylim cla get_dataloader join show mpl_connect pause add_subplot len rmtree savefig figure _plot_sample range exists enumerate makedirs Circle rphi_to_xy add_artist zip array rphi_to_xy add_subplot exists clip set_aspect show set_title Detector set_xlabel set_laser_fov scatter savefig d mpl_connect set_xlim add_artist zip enumerate JRDBHandle join T Circle pause makedirs rmtree set_ylabel figure transform_pts_base_to_laser set_ylim cla rphi_to_xy add_subplot canonical_to_global exists set_aspect show set_title set_xlabel len scatter savefig range get_dataloader mpl_connect set_xlim add_artist zip enumerate join Circle pause makedirs rmtree set_ylabel figure set_ylim cla rphi_to_xy add_subplot DROWHandle exists set_aspect show set_title set_xlabel _plot_annotation scatter savefig mpl_connect set_xlim enumerate join pause makedirs rmtree set_ylabel figure set_ylim cla time Detector print set_laser_fov DROWHandle mean zip append randint detector len JRDBHandle time Detector print set_laser_fov mean zip append randint detector len hypot repeat clip JRDBHandle list time print sample range len axis GridSpec box_is_on_ground max transform_pts_lower_velodyne_to_base imshow uniform append box_from_jrdb transform_pts_upper_velodyne_to_base plot transform_pts_base_to_stitched_im stack zip float draw_fpv draw_bev JRDBHandle transform_pts_laser_to_base array tuple points3d to_corners boxes_to_corners plot3d concatenate associate_points_and_boxes genfromtxt genfromtxt astype float32 radians LaserScan angle_min angle_max TransformStamped Marker pi Point ADD stack LINE_LIST linspace zip append range len append Pose zip PoseArray get_param get_param | # Person Detection in 2D Range Data This repository implements DROW3 ([arXiv](https://arxiv.org/abs/1804.02463)) and DR-SPAAM ([arXiv](https://arxiv.org/abs/2004.14079)), real-time person detectors using 2D LiDARs mounted at ankle or knee height. Also included are experiments from *Self-Supervised Person Detection in 2D Range Data using a Calibrated Camera* ([arXiv](https://arxiv.org/abs/2012.08890)). Pre-trained models (using PyTorch 1.6) can be found in this [Google drive](https://drive.google.com/drive/folders/1Wl2nC8lJ6s9NI1xtWwmxeAUnuxDiiM4W?usp=sharing).  ## News [06-03-2021] Our work has been accepted to ICRA'21! Checkout the presentation video [here](https://www.youtube.com/watch?v=f5U1ZfqXtc0). ## Quick start First clone and install the repository ``` | 1,110 |
VisualComputingInstitute/DR-SPAAM-Detector | ['human detection'] | ['DR-SPAAM: A Spatial-Attention and Auto-regressive Model for Person Detection in 2D Range Data'] | dr_spaam/src/dr_spaam/detector.py dr_spaam/bin/train.py dr_spaam/hyperopt/objective_functions.py dr_spaam/src/dr_spaam/utils/pytorch_nms/setup.py dr_spaam_ros/scripts/drow_data_converter.py dr_spaam/src/dr_spaam/utils/prec_rec_utils.py dr_spaam/src/dr_spaam/utils/dataset.py dr_spaam_ros/src/dr_spaam_ros/dr_spaam_ros.py dr_spaam/src/dr_spaam/utils/train_utils.py dr_spaam/hyperopt/generate_inference_result.py dr_spaam/src/dr_spaam/model/loss_utils.py dr_spaam/src/dr_spaam/utils/utils.py dr_spaam/src/dr_spaam/utils/eval_utils.py dr_spaam/setup.py dr_spaam/src/dr_spaam/utils/pytorch_nms/src/nms/__init__.py dr_spaam_ros/setup.py dr_spaam/bin/demo.py dr_spaam_ros/scripts/node.py dr_spaam/bin/eval.py dr_spaam/src/dr_spaam/utils/logger.py dr_spaam/src/dr_spaam/model/drow.py play_sequence_with_tracking inference_time play_sequence eval eval_dir objective _TrackingExtension Detector _conv1x1 TemporalDROW _conv3x3 _SpatialAttention _TemporalAttention DROW SpatialDROW _conv FocalLoss BinaryFocalLoss binary_focal_loss DROWDataset create_test_dataloader create_dataloader eval_batch eval_epoch_with_output model_fn eval_prec_rec plot_prec_rec compute_prec_rec cfg_to_model eval_epoch create_logger create_tb_logger eer _prettify_pr_curve peakf1 eval_prec_rec plot_prec_rec plot_prec_rec_wps_only prec_rec_2d _lbplt_fatlegend checkpoint_state load_checkpoint Trainer save_checkpoint LucasScheduler closest_detection rphi_to_xy data_augmentation get_regression_target nms_predicted_center_torch xy_to_rphi scans_to_cutout_torch scans_to_polar_grid group_predicted_center nms_predicted_center canonical_to_global scan_to_xy canonical_to_global_torch global_to_canonical scans_to_cutout_original get_laser_phi rphi_to_xy_torch scans_to_cutout nms sequence_to_bag load_odoms load_scans read_publisher_param detections_to_pose_array read_subscriber_param detections_to_rviz_marker DrSpaamROS time Detector print mean zip append set_laser_spec range detector genfromtxt rphi_to_xy add_subplot axis detector set_aspect set_title Detector ones len matmul scatter range plot mpl_connect set_xlim add_artist stack set_laser_spec T Circle pause figure get_laser_phi array set_ylim cla genfromtxt rphi_to_xy get_tracklets add_subplot axis detector set_aspect set_title Detector ones len matmul scatter range plot mpl_connect set_xlim add_artist stack zip set_laser_spec T Circle pause figure get_laser_phi array set_ylim cla join eval_epoch_with_output create_test_dataloader join glob print load_checkpoint eval cuda cfg_to_model eval_prec_rec group_predicted_center zip get_laser_phi compute_prec_rec array append enumerate log DROWDataset DataLoader DROWDataset DataLoader TemporalDROW DROW PolarDROW SpatialDROW FConvDROW ne view model squeeze cls_loss sigmoid mse_loss mean item float long update model_fn nms_predicted_center zip append numpy enumerate len eval_batch items list rphi_to_xy concatenate eval_prec_rec tqdm mean keys eval append compute_prec_rec sum array enumerate len ones len savefig plot_prec_rec_wps_only join list items flush add_image reshape tostring_rgb makedirs close draw fromstring transpose astype float32 plot_prec_rec eval_epoch add_scalar join basicConfig setFormatter addHandler StreamHandler Formatter DEBUG setLevel defaultdict linear_sum_assignment logical_not reversed argsort nan unique full_like append zeros sum array enumerate len abs argmin first_nonzero_idx subplots _prettify_pr_curve suptitle plot _lbplt_fatlegend subplots _prettify_pr_curve suptitle plot _lbplt_fatlegend plot set_xlabel set_xlim FuncFormatter set_ylabel set_major_formatter set_ylim legendHandles set_linewidth set_alpha legend get_linewidth DataParallel isinstance state_dict format save info load get format isfile print load_state_dict info cos sin cos arctan2 cos atan2 update list closest_detection global_to_canonical zip zeros enumerate len cdist T hstack array int arctan reshape astype maximum mean shape logical_or take int32 ceil _clip max view clamp where mean shape logical_xor item expand_as gather atan int list arange arctan squeeze shape pad round _clip float empty max range int astype float32 range shape int32 zeros empty clip rphi_to_xy arange where canonical_to_global maximum_filter hypot argmin int64 append sum range concatenate copy mean GaussianBlur int float32 at zeros len rphi_to_xy ones reshape square canonical_to_global sqrt zeros range len long genfromtxt genfromtxt astype float32 radians LaserScan angle_min angle_max TransformStamped Marker pi Point ADD stack LINE_LIST linspace zip append range len append Pose zip PoseArray get_param get_param | ## Check out our new repo at https://github.com/VisualComputingInstitute/2D_lidar_person_detection ---- This repository contains the implementation of *DR-SPAAM: A Spatial-Attention and Auto-regressive Model for Person Detection in 2D Range Data* to appear in IROS'20 ([arXiv](https://arxiv.org/abs/2004.14079), [video](https://www.youtube.com/watch?v=fACppMBEiQo)). # DR-SPAAM Detector DR-SPAAM is a deep learning based person detector that detects persons in 2D range sequences obtained from a laser scanner.  Although DR-SPAAM is a detector, it can generate simple tracklets, based on its spatial similarity module.  To interface with many robotic applications, an example ROS node is included. | 1,111 |
W4ngatang/sent-bias | ['word embeddings'] | ['On Measuring Social Biases in Sentence Encoders'] | scripts/print_table.py sentbias/encoders/elmo.py scripts/generate_basic_contexts.py scripts/print-name-distances.py scripts/wordcount.py sentbias/main.py scripts/glove2h5.py sentbias/encoders/bow.py sentbias/data.py sentbias/encoders/gensen.py sentbias/weat.py scripts/convert_to_jsonl.py sentbias/encoders/bert.py sentbias/encoders/infersent.py scripts/compare-txt-json-tests.py convert_file fill_template pluralize singularize truncate_lists main main norm2 main mean_vector main holm_bonferroni read_weat_words load_json save_encodings load_jiant_encodings load_encodings test_sort_key maybe_make_dir handle_arguments ModelName main split_comma_and_check cossim s_wAB run_test stdev_s_wAB mean_s_wAB s_XAB p_val_permutation_test effect_size construct_cossim_lookup convert_keys_to_ints s_XYAB encode load_model get_glove encode get_word_dict get_vecs encode GenSenSingle Encoder encode GenSen build_vocab encode load_infersent print format endswith endswith min len items basicConfig format join add_argument pluralize singularize truncate_lists split input_paths ArgumentParser info parse_args len asarray special_dtype input_path close File create_dataset append open sorted values print mean_vector dict load_word2vec_format model_path items sorted enumerate update p_values_only header lstrip holm_bonferroni correct_within_groups join load items open info dict end finditer add_argument_group value add_argument ArgumentParser split makedirs glove_path log_file load_json vocab_expansion seed infersent_dir load_model load_infersent addHandler data_dir time_combine_method encode GenSen build_vocab load_jiant_encodings debug load_encodings layer_combine_method results_path bert_version save_encodings GenSenSingle ConfigProto gensen_version run_test split_comma_and_check FileHandler Module maybe_make_dir handle_arguments cove_encs exp_dir openai_encs use_cpu zeros cossim s_wAB binom warning list s_XAB array append range s_XYAB format concatenate shuffle mean info sf shapiro int combinations std len mean_s_wAB list stdev_s_wAB update copy p_val_permutation_test effect_size info construct_cossim_lookup convert_keys_to_ints from_pretrained eval model convert_tokens_to_ids tensor numpy tokenize len split append zeros array len len info get_glove get_word_dict get_vecs concatenate ElmoEmbedder embed_sentence mean sum max get_representation range append word_tokenize load join set_glove_path dict info | # sent-bias This repository contains the code and data for the paper "[On Measuring Social Biases in Sentence Encoders](https://arxiv.org/abs/1903.10561)" by Chandler May, Alex Wang, Shikha Bordia, Samuel R. Bowman and Rachel Rudinger. ## Setup ### Environment setup First, install Anaconda and a C++ compiler (for example, `g++`) if you do not have them. #### Using the prespecified environment Use `environment.yml` to create a conda environment with all necessary code dependencies: ``` | 1,112 |
WHUIR/DAZER | ['sentiment analysis', 'text classification', 'word embeddings'] | ['A Deep Relevance Model for Zero-Shot Document Filtering'] | get_label.py model.py get_word2id get_label_index get_labels BaseNN DAZER DataGenerator get_word2id list zip append range len Unicode tag tag tag | # DAZER The Tensorflow implementation of our ACL 2018 paper: ***A Deep Relevance Model for Zero-Shot Document Filtering, Chenliang Li, Wei Zhou, Feng Ji, Yu Duan, Haiqing Chen*** Paper url: http://aclweb.org/anthology/P18-1214 <p align="center"> <img src='https://github.com/WHUIR/DAZER/blob/master/model-img.png' width="800" align="center"> </p> ### Requirements - Python 3.5 - Tensorflow 1.2 | 1,113 |
WHUQZhang/SSGN | ['denoising'] | ['Hybrid Noise Removal in Hyperspectral Imagery With a Spatial-Spectral Gradient Network'] | utils.py batch_PSNR data_augmentation weights_init_kaiming data constant clamp_ __name__ kaiming_normal range astype float32 transpose rot90 flipud | # SSGN Q.Zhang, Q. Yuan, J. li, X. Liu, H. Shen, and L. Zhang, "Hybrid Noise Removal in Hyperspectral Imagery With a Spatial-Spectral Gradient Network," Accepted by IEEE TGRS, in press, 2019. https://ieeexplore.ieee.org/document/8734833 | 1,114 |
WING-NUS/scisumm-corpus | ['information retrieval', 'document summarization', 'text summarization'] | ['Overview and Results: CL-SciSumm Shared Task 2019', 'The CL-SciSumm Shared Task 2018: Results and Key Insights', 'ScisummNet: A Large Annotated Corpus and Content-Impact Models for Scientific Paper Summarization with Citation Networks'] | CLSciSumm_2020_Evaluation/15-CIST/setup/run36/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run58/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run55/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_scibert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run13/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run15/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run72/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_bert_proba_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run50/program/task1_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantE/program/task1_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run10/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run11/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run109/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run22/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_raw_padding_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run10/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run57/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_scibert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run37/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run43/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_aclbert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run9/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run9/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run28/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_direct_token_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run107/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_2_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_scibert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run14/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run47/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run45/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run109/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run110/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run1/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_scibert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run32/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_1_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run53/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run13/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run25/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_scibert_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_2_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run78/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run27/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_raw_padding_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_unused_token_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantU2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run41/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run3/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run106/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run73/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run45/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_scibert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run31/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run22/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run56/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/MMR2/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run9/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_scibert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_sp_token_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run52/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run18/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_2_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_aclbert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_scibert_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run12/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run17/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run29/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run31/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run4/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run64/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run44/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run60/program/task1_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantX/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run16/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run107/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_raw_padding_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_scibert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run62/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_sp_token_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_all_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run17/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_bert_proba_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run10/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run82/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run21/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantF2/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run109/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_scibert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run14/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_2_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run47/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run50/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run81/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_scibert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_scibert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run7/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run48/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run32/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run27/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_sp_token_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantU/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run7/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_aclbert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run105/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run24/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run55/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/49-uniHD/setup/negative_only_3_field/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run30/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run19/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run42/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantX/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run36/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run10/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run19/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run25/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run13/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run22/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run25/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run3/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run56/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_fake_token_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run7/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_scibert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run5/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/49-uniHD/setup/negative_only_2_field/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run28/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run31/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run6/program/task_2_preprocess.py 2018-evaluation-script/createSetup.py CLSciSumm_2020_Evaluation/15-CIST/setup/run58/program/create_setting_task2_rouge.py 2018-evaluation-script/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run1/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run4/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run108/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run68/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_aclbert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run17/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run4/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_raw_padding_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run30/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_1_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run20/program/task1_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/intersection_2_field/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_scibert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run5/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run66/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run107/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run33/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run105/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run24/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_unused_token_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run12/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run8/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_2_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_fake_token_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_all_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run20/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run42/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run46/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_scibert_all_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantS2/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_scibert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run21/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run54/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclberte_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run16/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run78/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run82/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run52/program/task1_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantU/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantF2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run70/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run70/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_raw_padding_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run55/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run15/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_1_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_scibert_all_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/49-uniHD/setup/with_truth_2_field/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run32/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run63/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_scibert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run106/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run72/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_direct_token_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run18/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_1_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run16/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run79/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run46/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run26/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_bert_proba_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run60/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run103/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run15/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run6/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run19/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run22/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_sp_token_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_scibert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_scibert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run61/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run77/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run49/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_scibert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run31/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run26/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run78/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run35/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run19/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_scibert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run66/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run71/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run105/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run11/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run46/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run17/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run48/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_sp_token_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run59/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_bert_proba_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run4/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run15/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run11/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run25/program/task1_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run4/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run37/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run56/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run105/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run24/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run15/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_scibert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run23/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run47/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run50/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run5/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run51/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run10/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_scibert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run4/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run72/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run28/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run79/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantF/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_aclbert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantS/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run109/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run24/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run12/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run35/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_sp_token_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_scibert_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run57/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_scibert_all_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run44/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run5/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run51/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run74/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_bert_proba_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantF2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run16/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/49-uniHD/setup/intersection_2_field/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run47/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run36/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run49/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run53/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run80/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run14/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run61/program/task2_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/negative_only_3_field/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run42/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run57/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_scibert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run13/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run51/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run38/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_unused_token_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run74/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantE2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run26/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run38/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run5/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run14/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_scibert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run22/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run41/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_scibert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run69/program/task2_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantE2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run109/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_aclbert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run8/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_all_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantE/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run43/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run65/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_fake_token_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclberte_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/MMR2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_scibert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run1/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantF/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run103/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run42/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run11/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/49-uniHD/setup/negative_only_2_field/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run101/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_unused_token_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run17/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run62/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_bert_proba_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run38/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_direct_token_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run20/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run8/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_scibert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run7/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run22/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run15/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run56/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run68/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run18/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_scibert_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run58/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_bert_proba_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run41/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run18/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run106/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run105/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run24/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run103/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run10/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run52/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run14/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run11/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run21/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run29/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_scibert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run62/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run80/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_direct_token_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_1_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run22/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run23/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run30/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_aclbert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run1/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run20/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run1/program/task2_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantU/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run66/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/MMR2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/49-uniHD/setup/with_truth_2_field/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run107/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run60/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantE/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_2_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run76/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantE2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run103/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_direct_token_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantS/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_scibert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_scibert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantA2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run25/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run77/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/intersection_3_field/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run102/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run33/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_scibert_all_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run38/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_unused_token_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run27/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_scibert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run75/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run18/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run104/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run33/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run7/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_direct_token_top3/program/task2_eval.py 2018-evaluation-script/rename.py 2018-evaluation-script/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run14/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run48/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run60/program/task2_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantU2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_unused_token_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_scibert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run40/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/intersection_3_field/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_bert_proba_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantE/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run24/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run11/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run35/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_scibert_all_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run63/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_1_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run102/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_scibert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run21/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_scibert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_scibert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_1_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_2_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run80/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_unused_token_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run26/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run75/program/task1_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run1/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run82/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_aclbert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_fake_token_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run19/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_bert_proba_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run59/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run79/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run106/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run44/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run7/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run110/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run30/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run6/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run18/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_aclbert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_sp_token_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantX2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run10/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run49/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run14/program/task1_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantU2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_scibert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run83/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_direct_token_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run39/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_raw_padding_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run8/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run12/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run39/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_unused_token_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_scibert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run8/program/task2_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/intersection_2_field/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_bert_proba_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run63/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run15/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_all_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantF2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_aclbert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run2/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run102/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run59/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run27/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run6/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run41/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_scibert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run9/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run53/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run14/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run11/program/task2_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantS2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_aclbert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run43/program/task1_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run9/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run23/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run83/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantE2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_raw_padding_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantS2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run25/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run110/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run70/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run75/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_scibert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run75/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_scibert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/with_truth_3_field/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run39/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run55/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_aclbert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run10/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run11/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_fake_token_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run42/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run106/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run33/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run108/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run5/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run22/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run73/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run83/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_bert_proba_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run9/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run101/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run12/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run16/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run54/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run32/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_bert_proba_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantF/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_scibert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_2_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run25/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run23/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run1/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run26/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run84/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_scibert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/MMR2/program/task1_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run8/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run64/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run81/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run65/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run45/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run20/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run40/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run79/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run59/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run53/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/49-uniHD/setup/negative_only_2_field/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run21/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run60/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_aclbert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_all_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run13/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run29/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run44/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run19/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run34/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run48/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_scibert_all_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run11/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run34/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run34/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run84/program/task1_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run16/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run32/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run7/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run67/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run37/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_raw_padding_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run83/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_scibert_all_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/negative_only_2_field/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run22/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run76/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run21/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_sp_token_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run17/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run12/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run19/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run49/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run54/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run15/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run2/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run24/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run71/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run19/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run84/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_all_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantE/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run51/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run15/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run4/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run68/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run64/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run7/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run23/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run69/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run21/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_scibert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run16/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run71/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_2_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/49-uniHD/setup/negative_only_3_field/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run5/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run53/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run77/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run65/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantS/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run16/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run2/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run66/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_aclbert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantE2/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run2/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run14/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run18/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_bert_proba_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run82/program/task1_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/negative_only_3_field/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantF2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/with_truth_3_field/program/task2_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantS2/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_bert_proba_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run5/program/task1_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantS/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_scibert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run68/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/intersection_2_field/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run12/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run24/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run0/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run40/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run58/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run62/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_scibert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run9/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run74/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run101/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run101/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run61/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run28/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run73/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_direct_token_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run1/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run19/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run4/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run81/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_scibert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_1_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run10/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_bert_proba_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run37/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_scibert_all_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run13/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run110/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run21/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run57/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run59/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_scibert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run72/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run50/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_scibert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run84/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run80/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run74/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run67/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run56/program/task2_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantA/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run1/program/task1_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/intersection_3_field/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run25/program/task2_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/intersection_3_field/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run18/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run82/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_fake_token_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_aclbert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/with_truth_2_field/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run13/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run76/program/task1_eval.py 2018-evaluation-script/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_direct_token_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_all_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantX/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run23/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run17/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run20/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run1/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_bert_proba_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run23/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run11/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run18/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run9/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run11/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_scibert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_unused_token_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_raw_padding_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_scibert_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run13/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run16/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_bert_proba_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run6/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantX/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run78/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_scibert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run61/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run67/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_sp_token_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run20/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run1/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_scibert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run26/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_aclbert_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run17/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclberte_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_bert_proba_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_fake_token_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_scibert_top3/program/task_2_preprocess.py 2018-evaluation-script/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantU2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run20/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantA2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run6/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run78/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run68/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run52/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/49-uniHD/setup/with_truth_2_field/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run8/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run25/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run7/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_2_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run37/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run108/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_scibert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run23/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run0/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run10/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_1_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run0/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run62/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run22/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantU/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantU2/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run38/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_scibert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run69/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantA2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run33/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run8/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run43/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run6/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run4/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run18/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run69/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_fake_token_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run1/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_raw_padding_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run104/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run67/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_scibert_all_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run108/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantX2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run6/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run30/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run48/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run61/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_scibert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run12/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run0/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantS/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run0/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclberte_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_unused_token_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run79/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run8/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run80/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run12/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run40/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run103/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_scibert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run64/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run35/program/task2_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/with_truth_3_field/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_scibert_top2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run23/program/task2_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/intersection_3_field/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run19/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run12/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantF/program/task2_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantX/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run11/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_scibert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_scibert_all_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run43/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run65/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run8/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run16/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run50/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run71/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run76/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run36/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run17/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run12/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantA/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_scibert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run3/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run58/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run29/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run73/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run12/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run28/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run35/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run25/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run41/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run7/program/task1_eval.py 2018-evaluation-script/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run10/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run104/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run26/program/task2_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantS2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run108/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantX2/program/task2_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/negative_only_2_field/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_scibert_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run39/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run67/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run49/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run5/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run11/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantA2/program/task1_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/with_truth_3_field/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run10/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run34/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run9/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run57/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run55/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run11/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run6/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_scibert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/49-uniHD/setup/with_truth_2_field/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run21/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_sembert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run6/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run70/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run75/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_bert_proba_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run23/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run39/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run71/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run83/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run54/program/task2_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/MMR2/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run51/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run11/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_aclbert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantX2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run1/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_scibert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_aclbert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantA/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run20/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run1/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run63/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run77/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_all_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantA/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run24/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run31/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run29/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_scibert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/negative_only_3_field/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_fake_token_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_scibert_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run110/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_scibert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/49-uniHD/setup/intersection_2_field/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run46/program/task2_eval.py CLSciSumm_2020_Evaluation/49-uniHD/setup/with_truth_3_field/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run15/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run69/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_fake_token_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run10/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_scibert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_scibert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run24/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_aclbert_scibert_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run81/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run101/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run66/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run4/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run64/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run72/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run76/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run27/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_1_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run81/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run20/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run3/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run104/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_scibert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run107/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run52/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run13/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run40/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run12/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run45/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run65/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_aclbert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run45/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_aclbert_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantA/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run26/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_direct_token_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantU/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run47/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run73/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_scibert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run2/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run21/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_scibert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run36/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_scibert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_scibert_top3/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_aclbert_top3/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run34/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run63/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run17/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_aclbert_scibert_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lr_bert_proba_top3/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run102/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run104/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run14/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run74/program/task1_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclberte_top3/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_lgbm_scibert_top2/program/task1_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run12/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run54/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run46/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run84/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_all_top2/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/15-CIST/setup/run77/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/15-CIST/setup/run70/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantF/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run1/program/task2_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run44/program/task1_eval.py CLSciSumm_2020_Evaluation/15-CIST/setup/run13/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_scibert_all_top2/program/task2_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run10/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/54-CMU(CiteQA)/setup/run102/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run10/program/create_setting_task2_rouge.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run12/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_randomforest_scibert_top3/program/task_2_preprocess.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_catboost_aclbert_scibert_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantA2/program/task2_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run5/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/43-IITBH-IITP/setup/variantX2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/18-NLP-PINGAN-TECH/setup/run_only_scibert_sp_token_top2/program/task1_rouge_eval.py CLSciSumm_2020_Evaluation/50-IITP-AI-NLP-ML/setup/run9/program/task_2_preprocess.py calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate main main process calculate parse evaluate dictify main parse_csv main dictify do_rouge evaluate attrib findall text copy append str replace print endswith strip fromstring BeautifulSoup dictify startswith append split str reader replace print endswith fromstring BeautifulSoup dictify startswith zip append enumerate open print float calculate parse_csv join remove sum evaluate print startswith append float listdir exists len format print wait startswith float Popen split do_rouge print append float sum len int split strip range startswith len process | # README ** LaySumm is NOT covered by CC BY 4.0 licence. Please do not email us about Elsevier's LaySumm. We are unable to respond** Shield: [![CC BY 4.0][cc-by-shield]][cc-by] This work is licensed under a [Creative Commons Attribution 4.0 International License][cc-by] **EXCEPT** for the following files which are closed source under strict copyright laws enforced by Elsevier labs. We hold no accountability for these: * https://github.com/WING-NUS/scisumm-corpus/blob/master/README_Laysumm.md * and files under the directory: https://github.com/WING-NUS/scisumm-corpus/tree/master/data/LAYSUMM_SAMPLE [![CC BY 4.0][cc-by-image]][cc-by] [cc-by]: http://creativecommons.org/licenses/by/4.0/ [cc-by-image]: https://i.creativecommons.org/l/by/4.0/88x31.png | 1,115 |
Walleclipse/AGPC | ['text generation'] | ['Automatic Generation of Personalized Comment Based on User Profile'] | prep_data.py model/encoder.py model/PCGN_beamsearch.py model/cell.py utils/infer_utils.py model/PCGN_model.py utils/model_utils.py infer_PCGN.py utils/config_utils.py train_PCGN.py utils/data_utils.py model/PCGN_attention.py parse_args infer data_to_token_ids initialize_vocabulary sentence_to_token_ids create_vocabulary basic_tokenizer main main parse_args create_rnn_cell single_cell build_encoder PCGNWrapper PCGNWrapperState PCGNBeamSearchDecoder _check_maybe _get_scores _maybe_tensor_gather_helper BeamSearchDecoderOutput _tensor_gather_helper _beam_search_step _length_penalty tile_batch BeamSearchDecoderState _mask_probs FinalBeamSearchDecoderOutput _tile_batch PCGNModel get_pcgn_training_config get_pcgn_infer_config get_pcgn_model_config get_pcgn_batch read_data calc_bleu2 batch_token_to_str featinds2df token_to_str create_vocab_tables setup_workpath save_model load_model add_summary add_argument ArgumentParser get_pcgn_model_config calc_bleu2 concat batch_token_to_str DataFrame GPUOptions Session values run get_pcgn_batch str read_data load_model PCGNModel range format saver int print to_csv featinds2df global_variables_initializer get_pcgn_infer_config create_vocab_tables len extend split print sorted len dict Exists basic_tokenizer tokenizer items time list reset_index initialize_vocabulary iterrows concatenate print sentence_to_token_ids append values data_to_token_ids concatenate print create_vocabulary unique read_csv setup_workpath save_model get_pcgn_model_config GPUOptions Session run get_pcgn_batch read_data load_model PCGNModel append range compute_perplexity format FileWriter get_pcgn_training_config saver time learning_rate graph add_summary global_variables_initializer train len BasicLSTMCell LayerNormBasicLSTMCell GRUCell DropoutWrapper MultiRNNCell single_cell dynamic_rnn embedding_lookup tuple create_rnn_cell range convert_to_tensor concatenate reshape concat shape set_shape tile expand_dims ndims flatten isinstance TensorArray mod to_int32 _tensor_gather_helper BeamSearchDecoderState top_k set_shape map_structure to_int64 shape expand_dims constant_value convert_to_tensor finished one_hot _get_scores log_softmax BeamSearchDecoderOutput lengths _mask_probs cond equal minimum not_equal log_probs logical_or _length_penalty convert_to_tensor set_shape constant_value to_float expand_dims one_hot _check_maybe concatenate ones choice array append max len load print append flush open load_vocab append join join list int DataFrame concat argmax enumerate sentence_bleu SmoothingFunction zip int restore format print get_checkpoint_state model_checkpoint_path join format print save flush makedirs format mkdir Summary | ## Automatic Generation of Personalized Comment Based on User Profile - This is the code for ACL 2019 SRW paper *[AGPC: Automatic Generation of Personalized Comment Based on User Profile](https://arxiv.org/pdf/1907.10371v1.pdf)* ### Requirements * Python 3.5 * tensorflow 1.4 ### Preprocessing ``` python prep_data.py ``` We provide the sample data in sample_data/sample_data.csv | 1,116 |
Walleclipse/ChineseAddress_OCR | ['scene text detection'] | ['Detecting Text in Natural Image with Connectionist Text Proposal Network'] | ctpn/lib/utils/timer.py ctpn/lib/text_connector/other.py ctpn/lib/datasets/__init__.py ctpn/lib/rpn_msr/generate_anchors.py ctpn/lib/fast_rcnn/config.py ctpn/lib/roi_data_layer/layer.py ctpn/lib/rpn_msr/proposal_layer_tf.py ctpn/lib/networks/VGGnet_test.py ctpn/text_detect.py ctpn/lib/utils/setup_cpu.py ctpn/lib/utils/blob.py ctpn/lib/__init__.py ctpn/lib/backup/fast_rcnn/config.py ctpn/lib/fast_rcnn/bbox_transform.py ctpn/prepare_training_data/split_label.py ctpn/lib/datasets/pascal_voc.py ctpn/lib/roi_data_layer/minibatch.py stupid_addrs_rev.py demo_final.py ctpn/lib/backup/text_connector/detectors.py ctpn/lib/backup/text_connector/text_proposal_graph_builder.py ctpn/lib/fast_rcnn/nms_wrapper.py ctpn/lib/roi_data_layer/__init__.py ctpn/lib/text_connector/text_proposal_graph_builder.py ctpn/lib/datasets/factory.py ctpn/lib/backup/fast_rcnn/train.py densenet/keys.py ctpn/lib/utils/__init__.py ctpn/lib/text_connector/text_proposal_connector_oriented.py ctpn/lib/backup/text_connector/text_proposal_connector.py ctpn/lib/backup/text_connector/text_proposal_connector_oriented.py ctpn/lib/backup/fast_rcnn/bbox_transform.py run_flask.py ctpn/lib/backup/text_connector/text_connect_cfg.py ctpn/lib/backup/fast_rcnn/__init__.py ctpn/lib/datasets/ds_utils.py ctpn/lib/networks/network.py ctpn/lib/networks/VGGnet_train.py ctpn/ctpn/demo.py ctpn/lib/networks/__init__.py ctpn/lib/backup/text_connector/other.py ctpn/lib/utils/boxes_grid.py ctpn/lib/text_connector/detectors.py ctpn/lib/fast_rcnn/train.py ctpn/lib/fast_rcnn/test.py ocr_whole.py ctpn/ctpn/train_net.py ctpn/lib/backup/text_connector/__init__.py ctpn/lib/rpn_msr/anchor_target_layer_tf.py ctpn/lib/text_connector/text_proposal_connector.py densenet/model.py ctpn/lib/networks/factory.py ctpn/lib/utils/setup.py ctpn/lib/roi_data_layer/roidb.py ctpn/lib/backup/fast_rcnn/nms_wrapper.py ctpn/lib/backup/fast_rcnn/test.py ctpn/lib/datasets/imdb.py ctpn/prepare_training_data/ToVoc.py ctpn/lib/text_connector/text_connect_cfg.py ctpn/lib/text_connector/__init__.py densenet/densenet.py is_alphabet demo_flask dumpRotateImage model sort_box charRec Pic_str download_txt api_upload allowed_file download_img test_stupid re_prep may_cut_messy stupid_stroke_sims stupid_revise stupid_revise_split stupid_match_single text_detect draw_boxes ctpn resize_im load_tf_model resize_im draw_boxes ctpn clip_boxes bbox_transform bbox_transform_inv cfg_from_list cfg_from_file _merge_a_into_b get_log_dir get_output_dir nms test_ctpn _get_image_blob _get_blobs train_net get_training_roidb get_data_layer SolverWrapper TextDetector clip_boxes normalize threshold Graph Config TextProposalConnector TextProposalConnector TextProposalGraphBuilder unique_boxes xywh_to_xyxy validate_boxes xyxy_to_xywh filter_small_boxes _selective_search_IJCV_top_k get_imdb list_imdbs imdb pascal_voc _which clip_boxes bbox_transform bbox_transform_inv cfg_from_list cfg_from_file _merge_a_into_b get_log_dir get_output_dir nms py_cpu_nms test_ctpn _get_image_blob _get_blobs train_net get_training_roidb get_data_layer SolverWrapper get_network layer Network VGGnet_test VGGnet_train RoIDataLayer _project_im_rois _sample_rois _vis_minibatch _get_image_blob get_minibatch _get_bbox_regression_labels add_bbox_regression_targets prepare_roidb _compute_targets _unmap _compute_targets anchor_target_layer generate_anchors generate_basic_anchors scale_anchor _filter_boxes proposal_layer _filter_irregular_boxes TextDetector clip_boxes normalize threshold Graph Config TextProposalConnector TextProposalConnector TextProposalGraphBuilder im_list_to_blob prep_im_for_blob get_boxes_grid find_in_path customize_compiler_for_nvcc custom_build_ext locate_cuda find_in_path customize_compiler_for_nvcc custom_build_ext Timer generate_xml _is_hard build_voc_dirs conv_block dense_blstm dense_cnn transition_block dense_block decode predict model save exists open list name Grocery predict isdigit format replace close stupid_revise load join time print convert get_load_status extend filter sub train array GroceryTextModel len append sort len warpAffine int min getRotationMatrix2D append max dumpRotateImage int keras_densenet convert degrees atan2 append enumerate cfg_from_file text_detect sort_box charRec join secure_filename create_uuid save filename makedirs join demo_flask isfile print search may_cut_messy sub split range len list replace distance ratio append ord set int sorted nlargest min stupid_stroke_sims append stupid_match_single len time replace print re_prep stupid_revise_split str print split stupid_revise range len float min max checkpoints_path restore format print get_checkpoint_state get_network model_checkpoint_path Saver ConfigProto GPUOptions Session toc format print test_ctpn total_time tic detect TextDetector Timer resize_im int line copy resize zeros range draw_boxes ctpn join imwrite draw_boxes imread transpose log dtype exp astype shape zeros minimum maximum join EXP_DIR name abspath ROOT_DIR makedirs join name strftime localtime LOG_DIR abspath ROOT_DIR makedirs items list ndarray isinstance type array _merge_a_into_b literal_eval zip split USE_GPU_NMS MAX_SIZE min astype float32 SCALES shape resize append im_list_to_blob float max _get_image_blob HAS_RPN array _get_blobs run prepare_roidb print USE_FLIPPED append_flipped_images HAS_RPN RoIDataLayer IS_MULTISCALE HAS_RPN ConfigProto min max threshold dot array unique pascal_voc selective_search_IJCV_roidb print list_imdbs join strip is_exe pathsep split append maximum minimum vstack round _project_im_rois basename BATCH_SIZE ones len shape _get_image_blob range FG_FRACTION hstack astype HAS_RPN empty zeros _sample_rois float32 randint array BBOX_REG minimum size choice append _get_bbox_regression_labels prep_im_for_blob PIXEL_MEANS imread range len zeros BBOX_INSIDE_WEIGHTS shape show uint8 print astype copy add_patch imshow Rectangle range image_index toarray roidb argmax max range image_path_at len EPS BBOX_NORMALIZE_STDS print _compute_targets BBOX_NORMALIZE_MEANS BBOX_NORMALIZE_TARGETS_PRECOMPUTED mean sqrt BBOX_NORMALIZE_TARGETS tile zeros array range len bbox_transform ascontiguousarray zeros argmax bbox_overlaps arange RPN_BBOX_INSIDE_WEIGHTS _unmap argmax max RPN_FG_FRACTION generate_anchors ones transpose bbox_intersections array meshgrid sum RPN_BATCHSIZE format hstack astype ascontiguousarray choice sqrt fill empty RPN_POSITIVE_WEIGHT int EPS print reshape RPN_CLOBBER_POSITIVES _compute_targets zeros bbox_overlaps fill empty zeros array int32 scale_anchor copy append decode generate_anchors RPN_POST_NMS_TOP_N format arange nms print reshape meshgrid transpose clip_boxes bbox_transform_inv _filter_boxes hstack shape RPN_NMS_THRESH RPN_PRE_NMS_TOP_N RPN_MIN_SIZE zeros max range len min astype float32 rand shape RANDOM_DOWNSAMPLE resize float max arange reshape transpose hstack SPATIAL_SCALE dstack ASPECTS sqrt SCALES_BASE floor KERNEL_SIZE repeat meshgrid zeros max range len pathsep pjoin exists split find_in_path items list pjoin pathsep dirname sep append _compile compiler_so str int Document lower append float append_xml_node_attr split join mkdir conv_block range concatenate transition_block dense_block append range len int decode ANTIALIAS reshape astype float32 resize | # ChineseAddress_OCR ## 环境不可控场景下拍照文档地址文字识别 Photographing Chinese-Address OCR implemented using CTPN+CTC+Address Correction. This is a project of the 2018 Deecamp 25th group (DRPRG). Thanks to my team members! 这个是 2018年 Deecamp 25组 (深度受限抠图小组) 的项目,非常非常的感谢每一位队友! Our Demo: https://www.bilibili.com/video/av30081208 Our Wechat Program (微信小程序): OCRdeecamp <img src="https://github.com/Walleclipse/ChineseAddress_OCR/raw/master/demo/demo.png" width="600" > | 1,117 |
Wang-Shuo/Neural-Attentive-Session-Based-Recommendation-PyTorch | ['session based recommendations'] | ['Neural Attentive Session-based Recommendation', 'Neural Att entive Session-based Recommendation'] | metric.py datasets/preprocess.py dataset.py narm.py utils.py main.py load_data RecSysDataset main validate trainForEpoch get_mrr get_recall evaluate NARM collate_fn process_seqs obtian_tra obtian_tes int len_argsort arange shuffle zip append round len validate RecSysDataset dataset_path DataLoader save topk StepLR Adam load_state_dict trainForEpoch to CrossEntropyLoss range epoch format test lr load print tqdm parameters load_data step time criterion model backward print train zero_grad tqdm item to step enumerate len mean eval float size nonzero expand_as reciprocal data view size nonzero expand_as float topk get_mrr get_recall LongTensor sort transpose append long enumerate print range zip len | # Neural-Attentive-Session-Based-Recommendation-PyTorch A PyTorch implementation of the NARM model in [Neural Attentive Session Based Recommendation](https://arxiv.org/abs/1711.04725) (Li, Jing, et al. "Neural attentive session-based recommendation." Proceedings of the 2017 ACM on Conference on Information and Knowledge Management. ACM, 2017).  # Usage 1. Install required packages from requirements.txt file. ```bash pip install -r requirements.txt ``` 2. Download datasets used in the paper: [YOOCHOOSE](http://2015.recsyschallenge.com/challenge.html) and [DIGINETICA](http://cikm2016.cs.iupui.edu/cikm-cup). Put the two specific files named `train-item-views.csv` and `yoochoose-clicks.dat` into the folder `datasets/` 3. Change to `datasets` fold and run `preprocess.py` script to preprocess datasets. Two directories named after dataset should be generated under `datasets/`. | 1,118 |
WangZesen/GAN-Hinge-Loss | ['text generation'] | ['Geometric GAN'] | utils.py train.py main.py model.py apply_configure Generator Discriminator train plot get_dataset str join append range split learning_rate train_one_step plot print Generator Adam reset_states get_dataset test_step Discriminator Mean range mnist batch_size astype float32 load_data batch join str zeros range imsave | ## GAN with Hinge Loss Implemented with Tensorflow 2.0
An implementation of hinge version of GAN loss from Geometric GAN (https://arxiv.org/pdf/1705.02894.pdf)
### Training Loss

### Trained After 0 Epochs

| 1,119 |
Wanggcong/Spatial-Temporal-Re-identification | ['person re identification'] | ['Spatial-Temporal Person Re-identification'] | prepare.py gen_rerank_all_scores_mat.py gen_st_model_duke.py test_st_market.py train_market.py test_st_duke.py random_erasing.py gen_st_model_market.py train_duke.py evaluate_st.py evaluate_rerank_market.py re_ranking.py plot_st_distribution.py evaluate_rerank_duke.py model.py compute_mAP evaluate compute_mAP evaluate evaluate compute_mAP gaussian_func2 gauss_smooth2 gaussian_func gauss_smooth evaluate compute_mAP evaluate2 gaussian_func2 gauss_smooth2 gaussian_func gauss_smooth spatial_temporal_distribution get_id spatial_temporal_distribution get_id gaussian_func gauss_smooth ft_net_dense ClassBlock ft_net PCB_test weights_init_classifier ft_net_middle PCB weights_init_kaiming gaussian_func2 gauss_smooth2 gaussian_func gauss_smooth gen_gallery_rename gen_train_all_rename gen_query_rename gen_val_rename parse_frame gen_train_rename RandomErasing k_reciprocal_neigh re_ranking load_network get_id extract_feature fliplr load_network get_id extract_feature fliplr train_model save_network draw_curve train_model save_network draw_curve setdiff1d compute_mAP argsort intersect1d argwhere append flatten argwhere in1d zero_ range len int exp dot zeros range len print sqrt pi pow sqrt power pi transpose gaussian_func2 dot zeros range len transpose gaussian_func2 dot zeros range len int exp dot zeros range len append int int zeros sum range len gaussian_func enumerate data normal constant __name__ kaiming_normal data normal constant __name__ int format range str parse_frame walk copyfile makedirs parse_frame walk copyfile makedirs parse_frame walk copyfile makedirs parse_frame walk copyfile makedirs parse_frame walk copyfile makedirs zeros_like around max exp transpose shape append sum range astype mean unique minimum int print float32 argpartition k_reciprocal_neigh zeros len load join which_epoch load_state_dict index_select long norm use_dense view FloatTensor print Variable size model div cuda zero_ expand_as cpu PCB fliplr range cat data draw_curve Softmax model zero_grad max cuda sm load_state_dict append range state_dict format save_network item time criterion backward print Variable train step join plot savefig legend append join save is_available cuda state_dict | # Spatial-Temporal Person Re-identification ---------- Code for st-ReID(pytorch). We achieve **Rank@1=98.1%, mAP=87.6%** without re-ranking and **Rank@1=98.0%, mAP=95.5%** with re-ranking for market1501.For Duke-MTMC, we achieve **Rank@1=94.4%, mAP=83.9%** without re-ranking and **Rank@1=94.5%, mAP=92.7%** with re-ranking. ## Update and FQA: - 2020.01.08: If you do not want to re-train a model, you can follow this link. https://github.com/Wanggcong/Spatial-Temporal-Re-identification/issues/26#issuecomment-571905649 - 2019.12.26:a demo figure has been added. I am not sure if it works or not because it was written one years ago. I will update this file in the future. - 2019.07.28: Models(+RE) (google drive Link:https://drive.google.com/drive/folders/1FIreE0pUGiqLzppzz_f7gHw0kaXZb1kC) - 2019.07.11: Models (+RE) (baiduyun Link:https://pan.baidu.com/s/1QMp22dVGJvBH45e4XPdeKw password:dn7b) are released. Note that, for market, slightly different from the results in the paper because we use pytorch 0.4.1 to train these models (mAP is slightly higher than paper while rank-1 is slightly lower than paper). We may reproduce the results by Pytorch 0.3 later. - 2019.07.11: README.md, python3 prepare --Duke ---> python3 prepare.py --Duke - 2019.06.02: How to add the spatial-temporal constraint into conventional re-id models? You can replace step 2 and step 3 by your own visual feature represenation. | 1,120 |
WanzhengZhu/GRUEN | ['text generation'] | ['GRUEN for Evaluating Linguistic Quality of Generated Text'] | Main.py get_focus_score get_gruen preprocess_candidates get_lm_score get_redundancy_score get_grammaticality_score get_cola_score join strip group search len sent_tokenize append range split from_pretrained tqdm eval append to load_pretrained_cola_model convert_sentence_score_to_paragraph_score evaluate_cola get_lm_score get_cola_score range len compute_sentence_similarity range len preprocess_candidates get_focus_score get_grammaticality_score get_redundancy_score |  [](https://opensource.org/licenses/MIT) # GRUEN for Evaluating Linguistic Quality of Generated Text This repo is the GRUEN metric implementation of [GRUEN for Evaluating Linguistic Quality of Generated Text](https://arxiv.org/pdf/2010.02498.pdf) (Findings of EMNLP 2020). ## Table of Contents - [Introduction](#Introduction) - [Code](#Code) - [Dataset](#Dataset) - [Related Papers](#Related-Papers) - [Citation](#Citation) | 1,121 |
WarBean/tps_stn_pytorch | ['optical character recognition', 'scene text detection', 'scene text recognition'] | ['Robust Scene Text Recognition with Automatic Rectification'] | mnist_make_gif.py mnist_plot_curve.py tps_grid_gen.py mnist_model.py mnist_visualize.py single_visualize.py grid_sample.py mnist_train.py data_loader.py get_test_loader get_train_loader grid_sample CNN STNClsNet ClsNet UnBoundedGridLocNet BoundedGridLocNet get_model train test TPSGridGen compute_partial_repr Variable fill_ print ClsNet STNClsNet format state_dict model backward print nll_loss dataset zero_grad save step cuda enumerate len format model print write eval fsync dataset cuda flush len masked_fill_ size log view | # tps_stn_pytorch PyTorch implementation of Spatial Transformer Network (STN) with Thin Plate Spline (TPS). <img src="https://raw.githubusercontent.com/warbean/tps_stn_pytorch/master/demo/top_1.gif" height = "200"/> <img src="https://raw.githubusercontent.com/warbean/tps_stn_pytorch/master/demo/top_2.gif" height = "200"/> ## Introduction STN is a powerful neural network architecture proposed by DeepMind in [[1]](#ref-1). STN achieves real spatial invariance by automatically rectify input images before they are feeded into a normal classification network. The most amazing part of STN is that it is end-to-end differential and can be directly plugged into existing network architectures (AlexNet, Resnet, etc), **without any extra supervision.** Original STN paper [[1]](#ref-1) experiments on three specific transformation forms: Affine Transformation, Projective Transformation and **Thin Plate Spline Transformation (TPS)**. Among them I think TPS is the most powerful translation because it can warp a image in arbitrary way. As shown below, I can warp my Avatar <img src="https://raw.githubusercontent.com/warbean/tps_stn_pytorch/master/demo/source_avatar.jpg" height = "200"/> into <img src="https://raw.githubusercontent.com/warbean/tps_stn_pytorch/master/demo/target_avatar.jpg" height = "200"/> | 1,122 |
WayneDW/DeepLight_Deep-Lightweight-Feature-Interactions | ['click through rate prediction'] | ['DeepLight: Deep Lightweight Feature Interactions for Accelerating CTR Predictions in Ad Serving'] | utils/data_preprocess.py utils/plot_criteo_R_matrix.py NFM.py model/DeepFMs.py main_all.py utils/find_the_similar_sparse_dnn.py data/large/preprocess.py load_category_index read_data int open append range split int len split append load_category_index enumerate open | # DeepLight: Deep Lightweight Feature Interactions Deploying the end-to-end deep factorization machines has a critical issue in prediction latency. To handle this issue, we study the acceleration of the prediction by conducting structural pruning for DeepFwFM, which ends up with 46X speed-ups without sacrifice of the state-of-the-art performance on Criteo dataset. [](https://paperswithcode.com/sota/click-through-rate-prediction-on-criteo?p=a-sparse-deep-factorization-machine-for) Please refer to the [arXiv paper](https://arxiv.org/pdf/2002.06987.pdf) if you are interested in the details. ``` @inproceedings{deeplight, title={DeepLight: Deep Lightweight Feature Interactions for Accelerating CTR Predictions in Ad Serving}, author={Wei Deng and Junwei Pan and Tian Zhou and Deguang Kong and Aaron Flores and Guang Lin}, booktitle={International Conference on Web Search and Data Mining (WSDM'21)}, year={2021} | 1,123 |
WayneDW/sDeepFwFM | ['click through rate prediction'] | ['DeepLight: Deep Lightweight Feature Interactions for Accelerating CTR Predictions in Ad Serving'] | utils/data_preprocess.py utils/plot_criteo_R_matrix.py NFM.py model/DeepFMs.py main_all.py utils/find_the_similar_sparse_dnn.py data/large/preprocess.py load_category_index read_data int open append range split int len split append load_category_index enumerate open | # DeepLight: Deep Lightweight Feature Interactions Deploying the end-to-end deep factorization machines has a critical issue in prediction latency. To handle this issue, we study the acceleration of the prediction by conducting structural pruning for DeepFwFM, which ends up with 46X speed-ups without sacrifice of the state-of-the-art performance on Criteo dataset. [](https://paperswithcode.com/sota/click-through-rate-prediction-on-criteo?p=a-sparse-deep-factorization-machine-for) Please refer to the [arXiv paper](https://arxiv.org/pdf/2002.06987.pdf) if you are interested in the details. ``` @inproceedings{deeplight, title={DeepLight: Deep Lightweight Feature Interactions for Accelerating CTR Predictions in Ad Serving}, author={Wei Deng and Junwei Pan and Tian Zhou and Deguang Kong and Aaron Flores and Guang Lin}, booktitle={International Conference on Web Search and Data Mining (WSDM'21)}, year={2021} | 1,124 |
WeiWangTrento/Power-Iteration-SVD | ['denoising'] | ['Backpropagation-Friendly Eigendecomposition'] | PCANorm.py main_cifar100.py models/__init__.py ZCANorm.py torch_utils.py utils.py main.py models/resnet.py isnan train test isnan train test myPCANormSVDPI power_iteration_unstable power_iteration_once calc_mid format_time init_params progress_bar get_mean_and_std ZCANormPIunstable l2normalize ZCANormSVDPI ZCANormSVDunstable conv1x1 resnext50_32x4d ResNet resnet50 resnext101_32x8d Bottleneck resnet152 conv3x3 resnet34 resnet18 print_grad_ BasicBlock resnet101 format criterion backward print add_scalar progress_bar zero_grad exit named_parameters isnan item step max net enumerate len join format add_scalar print eval save len t clamp mm eye print DataLoader div_ zeros range len normal constant isinstance kaiming_normal Conv2d bias modules BatchNorm2d weight Linear int time join format_time write append range flush len int print sleep item load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict ResNet ResNet | # Backpropagation-Friendly Eigendecomposition Eigendecomposition (ED) is widely used in deep networks. However, the backpropagation of its results tends to be numerically unstable, whether using ED directly or approximating it with the Power Iteration method, particularly when dealing with large matrices. While this can be mitigated by partitioning the data in small and arbitrary groups, doing so has no theoretical basis and makes its impossible to exploit the power of ED to the full. We introduce a numerically stable and differentiable approach to leveraging eigenvectors in deep networks. It can handle large symmetric square matrices without requiring to split them. ## Github Code ## Pros & cons Pros: - Numerically Stable. - Can be plugged into any codes easily. Cons: - Could not compute all eigenvalues (for very large matrices) because of the round-off error accumulation. - It is a bit slow as it has for loops inside the deflation process. | 1,125 |
Weijian-li/unsupervised_inter_intra_landmark | ['graph learning'] | ['Structured Landmark Detection via Topology-Adapting Deep Graph Learning'] | losses.py train.py MTFAN.py test.py databases.py Train_options.py utils.py extract_data.py model.py SuperDB preparedb loadnet main extractdata getdata matching_loss convertBack MyConv2d gather_feature interpolated_sum_multicontour convertLayer conv3x3 FAN GeoDistill HourGlass ConvBlock train_regressor compute_iod fit_regressor compute_errors NMSE main main train_epoch Options pairs savetorchimgandptsv2 SoftArgmax2D LossNetwork process_image AverageMeter crop savetorchimg shuffle_list init_weights affine_trans savetorchimgandpts saveheatmap HeatMap SuperDB preparedb loadnet main extractdata getdata matching_loss convertBack MyConv2d gather_feature interpolated_sum_multicontour convertLayer conv3x3 FAN GeoDistill HourGlass ConvBlock train_regressor compute_iod fit_regressor compute_errors NMSE main train_epoch Options pairs savetorchimgandptsv2 SoftArgmax2D LossNetwork process_image AverageMeter crop savetorchimg shuffle_list init_weights affine_trans savetorchimgandpts saveheatmap HeatMap setattr init load update apply load_state_dict to state_dict dump format getdata print DataLoader open loadnet SuperDB to len str format print min index core t extractdata db getGPUs parse_args cuda size squeeze sum isinstance Parameter __delattr__ zeros_like isinstance out_channels old_weight eye to register_parameter detach data Parameter __delattr__ hasattr view size mm register_parameter float view detach gather_feature clamp clone unsqueeze floor zip append load epoch reg replace dump reshape f mean open npoints append train_regressor compute_iod fit_regressor reshape NMSE mean sqrt zeros sum range len mean reshape sum sqrt ones reshape transpose inv mean eye max reshape sqrt arange mymodel shuffle_list DataLoader copy2 SuperDB lr_fan _save seed list StepLR angle getcwd tight Adam folder file npts sum range bSize glob eval manual_seed float flip keys s join int lr_gan train_epoch num_workers gc dict parameters step args interpolate save_image forward list folder permute iter expand_dims next range detach update format concatenate copy item _set_batch enumerate items time print step loss circle len shuffle copy zip list remove print range int fromkeys randn FloatTensor type_as from_numpy affine_trans flip floor swapaxes crop clip minimum int min maximum copy resize round max int warpAffine randn concatenate transpose getRotationMatrix2D dot resize abs print apply uint8 imwrite astype COLOR_RGB2BGR cvtColor imwrite circle copy COLOR_RGB2BGR range cvtColor uint8 imwrite applyColorMap astype COLOR_RGB2BGR COLORMAP_JET cvtColor imwrite copy COLOR_RGB2BGR drawMarker cvtColor | # Unsupervised Landmark Learning with Inter-Intra Subject Consistencies [[paper]](https://arxiv.org/pdf/2004.07936.pdf) Weijian Li, Haofu Liao, Shun Miao, Le Lu, and Jiebo Luo IAPR International Conference on Pattern Recognition (ICPR), 2020, Oral Presentation ### Introduction We propose an unsupervised learning approach to image landmark discovery by incorporating the inter-subject landmark consistencies on facial images. <!--  --> <p align="center"> <img src="http://cs.rochester.edu/u/wli69/images/projects/ICPR-20.png" width="75%"/> | 1,126 |
Wendy-Xiao/Extsumm_local_global_context | ['text summarization', 'extractive summarization'] | ['Extractive Summarization of Long Documents by Combining Global and Local Context'] | run.py data.py rouge_papier_v2/setup.py test.py rouge_papier_v2/rouge_papier_v2/util.py models.py rouge_papier_v2/rouge_papier_v2/wrapper.py rouge_papier_v2/rouge_papier_v2/__init__.py utils.py main.py rouge_papier_v2/rouge_papier_v2/generate.py SummarizationDataLoader SummarizationDataset Bsl2 Concatenation Bsl1 ChengAndLapataSentenceExtractor Bsl3 SummaRunnerSentenceExtractor Attentive_context eval_seq2seq_batch eval_seq2seq train_seq2seq train_seq2seq_batch get_meteor build_word2ind make_file_list get_posweight get_rouge get_all_text getEmbeddingMatrix compute_greedy_sequential_pairwise_ranks compute_extract compute_pairwise_ranks compute_greedy_independent_extract compute_greedy_sequential_extract make_simple_config_text TempFileManager convert_output convert_output2 compute_rouge print train enumerate train_seq2seq_batch data model cumsum clamp float backward zero_grad step binary_cross_entropy_with_logits is_available empty_cache to sum cat print get_meteor Series extend to_csv eval Sigmoid get_rouge append eval_seq2seq_batch array __name__ enumerate data model cumsum clamp float squeeze predict numpy permute binary_cross_entropy_with_logits save is_available to sum cat print readlines close Path open readlines close open append Path isinstance print Counter len get zeros items print Series len write close make_simple_config_text compute_rouge append array range open join readlines close len write call append range open append join format join decode concat extend resource_filename append convert_output2 range format findall set_index search append float DataFrame format findall set_index search append float DataFrame | # Extsumm_local_global_context This is the official code for paper 'Extractive summarization of Long Documents by combining local context and global context'(EMNLP-IJCNLP 2019). ## Installation Make sure you have `python 3` and `pytorch` installed. First need to install the tool rouge_papier_v2. Direct to folder 'rouge_papier_v2', and then 'python setup.py install'. <br/> (This is a modified version from https://github.com/kedz/rouge_papier) Other dependencies needed: `nltk->word_tokenizer`, `numpy`, `pandas` The data should in the same form as example-input.json, example-label.json and example-abstract.txt. ## Data The original data: https://github.com/armancohan/long-summarization.<br/> | 1,127 |
Wendy-Xiao/redundancy_reduction_longdoc | ['text summarization'] | ['Systematically Exploring Redundancy Reduction in Summarizing Long Documents'] | run.py data.py unsupervised_mmr.py test.py reward_function.py models.py utils.py main.py SummarizationDataLoader SummarizationDataset ScoreAttention StackedGRU NeuSum Pointer Attentive_context_sr Attentive_context compute_reward greedy_max greedy_nommr make_scores_gain predict_trigram_block eval_seq2seq train_seq2seq_batch_neusum predict_neusum regression_loss predict_redundancy_max train_seq2seq_batch_rl train_seq2seq_batch_newloss eval_seq2seq_batch_rl eval_seq2seq_batch_neusum eval_seq2seq_batch predict train_seq2seq_batch unsupervised_mmr get_rouge_v2 get_redundancy_scores get_meteor build_embedding_matrix output_to_dict check_trigram build_word2ind make_file_list get_posweight get_rouge_single get_rouge rouge_matrix get_all_text get_rouge_python getEmbeddingMatrix argmax join list ones shape unsqueeze append numpy range argmax join list ones shape append numpy range greedy_max greedy_nommr requires_grad_ unsqueeze get_rouge_single append to range len data model cumsum clamp float backward zero_grad step binary_cross_entropy_with_logits is_available empty_cache to sum cat data compute_reward model cumsum zero_grad Sigmoid squeeze permute binary_cross_entropy_with_logits to sum cat pad_packed_sequence is_available float backward clamp empty_cache step data KLDivLoss model cumsum pad_sequence zero_grad regression_loss max to sum cat is_available enumerate make_scores_gain backward clamp empty_cache zeros step len zeros softmax enumerate len KLDivLoss isinstance permute sum crit log data model cumsum zero_grad Sigmoid unsqueeze squeeze permute binary_cross_entropy_with_logits to sum cat pad_packed_sequence stack is_available float bmm backward clamp sigmoid cpu empty_cache step get_redundancy_scores get_meteor Series len extend to_csv eval Sigmoid get_rouge eval_seq2seq_batch_rl eval_seq2seq_batch_neusum append eval_seq2seq_batch sum array __name__ enumerate data model cumsum clamp float pad_packed_sequence squeeze predict_redundancy_max permute binary_cross_entropy_with_logits is_available to sum cat data KLDivLoss model cumsum pad_sequence regression_loss max predict_neusum permute to sum cat is_available zeros enumerate make_scores_gain clamp empty_cache len data model cumsum clamp float squeeze predict predict_trigram_block permute binary_cross_entropy_with_logits is_available to sum cat join len close write append range open join len write close item append range open join len check_trigram write close set append range open join len write close unsqueeze append range open load join cosine_similarity get_redundancy_scores FloatTensor print squeeze mean unsqueeze get_rouge Path permute item append zeros makedirs load range print Path readlines close open append Path isinstance print Counter len zeros tensor append zeros tensor print len write close make_simple_config_text compute_rouge append to_dict range open format replace groups match float compile split print len write close make_simple_config_text compute_rouge append to_dict range open join readlines close len write call append range open transpose mean Rouge eye get_scores range array zeros len get_scores Rouge FloatTensor mean get_scores Rouge array ngrams union set word_tokenize list entropy print ngrams len words readlines extend set strip CountVectorizer append range fit_transform log open | # Redundancy Reduction of Extractive Summarization This is the official code for the paper ['Systematically Exploring Redundancy Reduction in Summarizing Long Documents']() (AACL 2020) In this paper, we systematically explored ways for redundancy reduction for extractive summarization on long documents. ## Installation Make sure you have `python 3` and `pytorch` installed. First need to install the tool [rouge_papier_v2](https://github.com/Wendy-Xiao/Extsumm_local_global_context/tree/master/rouge_papier_v2). ``` python setup.py install. ``` (This is a modified version from https://github.com/kedz/rouge_papier) | 1,128 |
WenmuZhou/PAN.pytorch | ['scene text detection'] | ['Efficient and Accurate Arbitrary-Shaped Text Detection with Pixel Aggregation Network'] | data_loader/__init__.py models/loss.py utils/cal_recall/script.py config/default.py models/model.py utils/metrics.py utils/util.py base/base_trainer.py train.py utils/make_trainfile.py data_loader/dataset.py trainer/__init__.py post_processing/pypse.py models/__init__.py models/modules/__init__.py utils/__init__.py trainer/trainer.py utils/schedulers.py utils/cal_recall/__init__.py predict.py config/__init__.py models/modules/segmentation_head.py data_loader/data_utils.py utils/cal_recall/rrc_evaluation_funcs.py base/__init__.py eval.py data_loader/augment.py models/modules/shufflenetv2.py models/modules/resnet.py post_processing/kmeans.py post_processing/__init__.py main decode_clip Pytorch_model main BaseTrainer DataAugment show_pic ImageDataset Batch_Balanced_Dataset augmentation unshrink_offset image_label quadratic generate_rbox check_and_validate_polys get_dataloader get_datalist get_dataset PANLoss Model get_model get_loss conv1x1 resnext50_32x4d ResNet resnet50 resnext101_32x8d Bottleneck resnet152 conv3x3 _resnet resnet34 resnet18 BasicBlock resnet101 FPEM_FFM FPN FPEM SeparableConv2d shufflenet_v2_x2_0 shufflenet_v2_x1_5 InvertedResidual shufflenet_v2_x1_0 _shufflenetv2 channel_shuffle shufflenet_v2_x0_5 ShuffleNetV2 km get_dis pse_py decode decode_dice Trainer runningScore WarmUpLR PolynomialLR ConstantLR show_img cal_kernel_score order_points_clockwise order_points_clockwise_list setup_logger save_json load_json cal_text_score exe_time draw_bbox validate_point_inside_bounds load_folder_file load_zip_file_keys validate_clockwise_points validate_lines_in_file print_help decode_utf8 main_validation get_tl_line_values load_zip_file get_tl_line_values_from_file_contents validate_tl_line main_evaluation cal_recall_precison_f1 evaluate_method default_evaluation_params validate_data join format imwrite print reshape tqdm rmtree savetxt Pytorch_model draw_bbox exists predict makedirs connectedComponents PyclipperOffset uint8 JT_ROUND astype sigmoid contourArea arcLength append minAreaRect AddPath numpy array range ET_CLOSEDPOLYGON Execute get_dataloader get_model Trainer train cuda int line tuple astype copy imshow array append clip contourArea arcLength contourArea sqrt PyclipperOffset int zip fillPoly JT_ROUND astype arcLength contourArea zeros AddPath array enumerate ET_CLOSEDPOLYGON Execute random_scale random_rotate_img_bbox horizontal_flip augmentation random_crop ones min shape generate_rbox check_and_validate_polys resize append array append pop deepcopy Batch_Balanced_Dataset Compose get_datalist get_dataset DataLoader append len print ResNet load_state_dict load_state_dict_from_url size view contiguous ShuffleNetV2 load_state_dict load_state_dict_from_url reshape mean shape append expand_dims fit_predict len get transpose copy put mean shape Queue zeros range connectedComponents items uint8 list reshape transpose boxPoints astype float32 sigmoid shape get_points get_num append minAreaRect pse_cpp numpy range connectedComponents PyclipperOffset uint8 JT_ROUND astype sigmoid contourArea arcLength append minAreaRect AddPath numpy array range ET_CLOSEDPOLYGON Execute setFormatter basicConfig getLogger addHandler StreamHandler info ColoredFormatter DEBUG setLevel imshow expand_dims figure line isinstance tuple astype copy imread update astype int32 get_scores numpy update astype int32 get_scores numpy zeros sum diff sort sorted tolist array write exit group match namelist append ZipFile group match namelist append ZipFile append match listdir group decode BOM_UTF8 replace startswith encode validate_tl_line decode_utf8 replace split get_tl_line_values validate_point_inside_bounds int replace validate_clockwise_points reshape group match float replace argsort append get_tl_line_values split default_evaluation_params_fn update list writestr items write dumps close evaluate_method_fn ZipFile makedirs update default_evaluation_params_fn validate_data_fn print exit dict load_folder_file validate_lines_in_file load_folder_file compute_ap namedtuple float area int8 rectangle_to_polygon Rectangle get_tl_line_values_from_file_contents get_intersection append zeros get_intersection_over_union empty polygon_from_points range len main_evaluation | WenmuZhou/PAN.pytorch | 1,129 |
WenmuZhou/PSENet.pytorch | ['optical character recognition', 'scene text detection'] | ['Shape Robust Text Detection with Progressive Scale Expansion Network'] | dataset/augment.py utils/utils.py models/loss.py cal_recall/__init__.py models/model.py cal_recall/rrc_evaluation_funcs.py train.py models/__init__.py dataset/augment_img.py utils/__init__.py pse/__init__.py utils/lr_scheduler.py predict.py models/mobilenetv3.py dataset/data_utils.py models/ShuffleNetV2.py eval.py cal_recall/script.py config.py dataset/__init__.py models/resnet.py print main _get_annotation Pytorch_model train_epoch eval adjust_learning_rate weights_init main validate_point_inside_bounds load_folder_file load_zip_file_keys validate_clockwise_points validate_lines_in_file print_help decode_utf8 main_validation get_tl_line_values load_zip_file get_tl_line_values_from_file_contents validate_tl_line main_evaluation cal_recall_precison_f1 evaluate_method default_evaluation_params validate_data DataAugment show_pic augmentation MyDataset image_label generate_rbox check_and_validate_polys PSELoss hsigmoid Block SeModule hswish MobileNetV3_Small MobileNetV3_Large PSENet ResNet resnet50 Bottleneck resnet152 conv3x3 resnet34 resnet18 BasicBlock resnet101 shufflenet_v2_x2_0 shufflenet_v2_x1_5 InvertedResidual shufflenet_v2_x1_0 _shufflenetv2 channel_shuffle shufflenet_v2_x0_5 ShuffleNetV2 decode pse_warpper WarmupMultiStepLR show_img load_checkpoint setup_logger save_checkpoint exe_time draw_bbox items join format print reshape predict tqdm rmtree savetxt Pytorch_model exists PSENet makedirs isinstance Conv2d bias weight kaiming_normal_ constant_ warm_up_epoch param_groups lr_gamma lr zero_grad unsqueeze display_output_images display_interval step format size info item net add_image enumerate time display_input_images make_grid criterion backward reshape sigmoid cpu train epochs add_scalar join unsqueeze_ cal_recall_precison_f1 to reshape tqdm rmtree savetxt resize imread max exists makedirs restart_training testroot MultiStepLR gpu_id DataLoader __version__ DataParallel save_checkpoint output_dir device seed trainroot len Adam copyfile apply device_count to lr_decay_step range manual_seed_all SummaryWriter glob PSELoss close setup_logger start_epoch eval manual_seed info checkpoint time remove add_scalar load_checkpoint MyDataset train_epoch __len__ parameters epochs write exit group match namelist append ZipFile group match namelist append ZipFile append match listdir group decode BOM_UTF8 replace startswith encode validate_tl_line decode_utf8 replace split get_tl_line_values validate_point_inside_bounds int replace validate_clockwise_points group match float replace argsort append get_tl_line_values split default_evaluation_params_fn update list writestr items write dumps close evaluate_method_fn ZipFile makedirs update default_evaluation_params_fn validate_data_fn print exit dict load_folder_file validate_lines_in_file load_folder_file compute_ap namedtuple float area int8 rectangle_to_polygon Rectangle get_tl_line_values_from_file_contents get_intersection append zeros get_intersection_over_union empty polygon_from_points range len main_evaluation int line tuple astype copy imshow array append clip contourArea PyclipperOffset int zip fillPoly JT_ROUND astype arcLength contourArea zeros AddPath array ET_CLOSEDPOLYGON Execute random_scale random_rotate_img_bbox horizontal_flip augmentation random_crop_author COLOR_BGR2RGB ones min shape generate_rbox check_and_validate_polys resize append imread array range cvtColor ResNet _load_pretrained_model ResNet _load_pretrained_model ResNet _load_pretrained_model ResNet _load_pretrained_model ResNet _load_pretrained_model size view contiguous ShuffleNetV2 load_state_dict load_state_dict_from_url info connectedComponents uint8 astype append pse_cpp array range len boxPoints astype float32 sigmoid mean pse_warpper append minAreaRect numpy imshow expand_dims figure line isinstance tuple astype copy imread setFormatter basicConfig getLogger addHandler StreamHandler info ColoredFormatter DEBUG setLevel save info load load_state_dict info | # Shape Robust Text Detection with Progressive Scale Expansion Network
## Requirements
* pytorch 1.1
* torchvision 0.3
* pyclipper
* opencv3
* gcc 4.9+
## Update
| 1,130 |
WenmuZhou/Segmentation-Free_OCR | ['optical character recognition', 'scene text recognition'] | ['An End-to-End Trainable Neural Network for Image-based Sequence Recognition and Its Application to Scene Text Recognition'] | src/densenet.py hlp/simple_generate_scene_text.py src/decoding.py hlp/get_captcha.py src/model_info.py hlp/csv_helpers.py train.py src/alexnet.py src/model_tfrecords.py convert_str_2_code.py src/__init__.py src/input_utils.py src/loader.py src/resnet.py src/config.py utils/export_model.py hlp/numbers_mnist_generator.py predict_all.py utils/convert2tfrecords.py src/model_digist.py image_utils2.py test_model.py hlp/generate_text.py hlp/config.py utils/draw_train_log.py hlp/parse_data.py test_inputfn.py utils/convert_cpkt2model.py src/data_handler.py predict_one.py src/googleNet.py get_max_height_index img_show draw_rect_line combine_connected_domain bubble_sort Rect thre find_contours get_height save_image PredictionModel cal_acc1 write_file cal_acc PredictionModel main main Alphabet CONST Params import_params_from_json csv_filtering_chars_from_labels csv_rel2abs_path_convertor write2file_callback write_one write process_str matrix2image Captcha generate_random_image_numbers parse_cvl_data check_file remove_resize parse_car_data write2file_callback write_one write process_str matrix2image alexnet Alphabet CONST Params import_params_from_json random_padding input_fn padding_inputs_width augment_data image_reading preprocess_image_for_prediction data_loader random_rotation get_words_from_chars Batch_Normalization Drop_out DenseNet Concatenation Max_Pooling dense_net Relu Average_pooling conv_layer googlenet1 googlenet inception_v1 inception_v2 input_fn PredictionModel _signature_def_to_tensors biasVar conv2d deep_bidirectional_lstm deep_cnn crnn_fn weightVar biasVar conv2d deep_bidirectional_lstm deep_cnn crnn_fn weightVar biasVar conv2d deep_bidirectional_lstm deep_cnn crnn_fn weightVar building_block conv2d_fixed_padding resnet batch_norm_relu fixed_padding block_layer bottleneck_block _int64_feature recordsCreater recordsReader split_lines _bytes_feature readLines test_reader getFileName convert_cpkt get_steo_acc_lose imwrite max endy RETR_EXTERNAL starty startx append get_height Canny findContours img_show thre dilate endx CHAIN_APPROX_SIMPLE getStructuringElement MORPH_CROSS min Rect endy int get_max_height_index ceil starty append sum len range len endy print min len starty startx max range endx starty endy range len imshow resize imwrite COLOR_GRAY2BGR img_show rectangle thre cvtColor makedirs str imwrite print rmtree mkdir thre exists uint8 threshold bitwise_not THRESH_OTSU THRESH_BINARY erode dilate zeros imread range join print range len print range len RunConfig output_model_dir replace print Estimator input_model_dir ConfigProto Params gpu csv_files_eval makedirs evaluate_every_epoch params_file str preprocess_image_for_prediction LoggingTensorHook export_savedmodel ceil range format import_params_from_json n_epochs csv_files_train int join export_experiment_params evaluate train train_batch_size list range len pop list format print keys join list format tqdm abspath split list fromarray uint8 convert astype threshold Captcha imwrite write_texts image abspath __contains__ THRESH_OTSU append sum range astype crop int join uint8 random_noise convert THRESH_BINARY array len update join remove list write2file_callback print write_one len close shuffle extend tqdm range exists makedirs validation join list next_batch format imsave reshape hstack map test tqdm append randint train range read_data_sets makedirs writer close open writer print writerow check_file close splitext listdir open endswith join remove listdir paste open new truetype close Draw text randint read TextLineReader string_input_producer isinstance decode_csv image_reading shuffle_batch resize_images constant augment_data read_file cond equal randint list case divide shape cast int32 round batch_normalization model TFRecordDataset shuffle make_one_shot_iterator get_next repeat prefetch batch get_default_graph truncated_normal constant learning_decay_rate deep_bidirectional_lstm RMSPropOptimizer exponential_decay get_words_from_chars values get_collection identity apply resnet_pool lookup DIMENSION_REDUCTION_W_POOLING cast int64 bincount get get_or_create_global_step mean learning_decay_steps alphabet_codes learning_rate minimize divide accuracy average AdamOptimizer edit_distance UPDATE_OPS int32 ExponentialMovingAverage AdadeltaOptimizer scalar resnet reshape Print deep_cnn batch_normalization relu pad fixed_padding conv2d_fixed_padding batch_norm_relu projection_shortcut conv2d_fixed_padding batch_norm_relu projection_shortcut update items list TFRecordWriter print write SerializeToString split_lines tqdm close Example readLines tobytes flush open read TFRecordReader string_input_producer decode_raw uint8 reshape cast int32 string parse_single_example recordsReader join format replace print preprocess_image_for_prediction Estimator export_savedmodel mkdir Params RunConfig | # Convolutional Recurrent Neural Network in Tensorflow (tf.crnn) this code is fork from <https://github.com/solivr/tf-crnn> and modified. CRNN model in Tensorflow using Estimators Implementation of the Convolutional Recurrent Neural Network (CRNN) for image-based sequence recognition tasks, such as scene text recognition and OCR. Original paper <http://arxiv.org/abs/1507.05717> and code <https://github.com/bgshih/crnn> This version uses the `tf.estimator.Estimator` to build the model. ### Contents * `src/model.py` : definition of the model * `src/data_handler.py` : functions for data loading, preprocessing and data augmentation * `src/config.py` : `class Params` manages parameters of model and experiments * `src/decoding.py` : helper fucntion to transform characters to words | 1,131 |
WenmuZhou/crnn.pytorch | ['optical character recognition', 'scene text recognition'] | ['An End-to-End Trainable Neural Network for Image-based Sequence Recognition and Its Application to Scene Text Recognition'] | data_loader/__init__.py modeling/head/CTC.py modeling/losses/__init__.py modeling/neck/sequence_modeling.py modeling/__init__.py utils/util.py base/base_trainer.py train.py modeling/basic.py modeling/backbone/resnet_torch.py utils/get_keys.py data_loader/dataset.py trainer/__init__.py modeling/backbone/resnet.py data_loader/modules/resize.py modeling/modules/seg/resnet_fpn.py data_loader/modules/Text_Image_Augmentation_python/__init__.py modeling/model.py modeling/modules/seg/resnet.py data_loader/modules/Text_Image_Augmentation_python/augment.py modeling/losses/AttnLoss.py utils/__init__.py modeling/neck/__init__.py trainer/trainer.py data_loader/modules/Text_Image_Augmentation_python/demo.py modeling/head/Attn.py predict.py modeling/backbone/feature_extraction.py modeling/trans/TPS.py modeling/losses/CTCLoss.py utils/create_lmdb_dataset.py modeling/backbone/__init__.py utils/label_utils.py data_loader/modules/__init__.py base/__init__.py data_loader/modules/augment.py base/base_dataset.py modeling/backbone/MobileNetV3.py utils/gen_img.py modeling/head/__init__.py modeling/modules/seg/__init__.py modeling/trans/__init__.py data_loader/modules/Text_Image_Augmentation_python/warp_mls.py modeling/modules/seg/unet.py PytorchNet save main init_args BaseDataSet BaseTrainer ImageDataset LmdbDataset Batch_Balanced_Dataset get_dataloader get_transforms get_dataset IaaAugment Resize stretch distort perspective create_gif WarpMLS RandomAug ChannelAttention DWConv BasicBlockV2 SpartialAttention GhostBottleneck _make_divisible DWBlock BasicConv GhostModule CBAM Model build_model DenseNet ResNet _make_transition VGG CNN_lite MobileNetV3 ConvBNACT SEBlock HSwish ResidualUnit HardSigmoid ReaNet BasicBlock ResNet_FeatureExtractor ResNet_MT conv1x1 resnext50_32x4d wide_resnet50_2 ResNet resnet50 resnext101_32x8d Bottleneck resnet152 wide_resnet101_2 conv3x3 _resnet resnet34 resnet18 BasicBlock resnet101 build_backbone Attn AttentionCell CTC build_head AttnLoss CTCLoss build_loss deformable_resnet18 constant_init ResNet resnet50 Bottleneck resnet152 conv3x3 resnet34 resnet18 deformable_resnet50 BasicBlock resnet101 ResNetFPN FPN DownBlock UNet ConvBlock UpBlock Reshape BidirectionalGRU BidirectionalLSTM RNNDecoder CNNDecoder build_neck LocalizationNetwork GridGenerator TPS build_trans Trainer createDataset writeCache checkImageIsValid create_strings_from_dict main margins parse_arguments show_dict get_key split CTCLabelConverter AttnLabelConverter load _load_txt Averager _load_json get_parameter_number get_datalist setup_logger _save_json parse_config punctuation_mend save _save_txt exe_time eval trace load join get_dataloader CTCLabelConverter build_model build_loss Trainer get_batch_max_length character isfile zeros train AttnLabelConverter len parse_args add_argument ArgumentParser append Compose pop deepcopy Batch_Balanced_Dataset get_dataset DataLoader append get_transforms len list WarpMLS arange generate append list WarpMLS arange generate append randint append list WarpMLS generate append mimsave int max pop deepcopy AvgPool2d Sequential add_module Conv2d ReLU BatchNorm2d Dropout ResNet load_state_dict load_state_dict_from_url pop deepcopy weight hasattr constant_ bias print ResNet load_url load_state_dict print ResNet load_url load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict frombuffer imdecode IMREAD_GRAYSCALE print makedirs tqdm encode writeCache open append range len split add_argument ArgumentParser include_symbols random random_sequences use_wikipedia output_dir Pool count list length font_dir include_numbers exit generate_from_tuple terminate parse_arguments create_strings_from_file include_letters create_strings_randomly font input_file language create_strings_from_dict load_dict create_strings_from_wikipedia imap_unordered zip load_fonts thread_count tqdm dict makedirs groupby format print append sum items list sorted plot print title savefig figure sum array split join sorted defaultdict print set show_dict setFormatter getLogger addHandler StreamHandler Formatter info DEBUG setLevel FileHandler Path Path normalize translate extend isinstance pop load open merge sum | Convolutional Recurrent Neural Network ====================================== This software implements the Convolutional Recurrent Neural Network (CRNN) in pytorch. Origin software could be found in [crnn](https://github.com/bgshih/crnn) ## Requirements * pytorch 1.3+ * torchvision 0.4+ ## Data Preparation Prepare a text in the following format ``` | 1,132 |
Westhealth/scipy2020 | ['word embeddings'] | ['Quasi-orthonormal Encoding for Machine Learning Applications'] | cloud_infrastructure/supplemental_code/hub/front_door.py quasiorthonormal/notebooks/for_fun/binary_helpers.py pyvis/paper/got.py quasiorthonormal/notebooks/helpers/tf_qsoftmax.py cloud_infrastructure/supplemental_code/rstudio/front_door.py quasiorthonormal/notebooks/helpers/basis_helper.py cloud_infrastructure/supplemental_code/novnc/auth_plugins.py cloud_infrastructure/supplemental_code/novnc/websockifyserver.py cloud_infrastructure/supplemental_code/bespoke/amazon_auth.py quasiorthonormal/notebooks/helpers/pt_qsoftmax.py pyvis/code/spotify_network.py cloud_infrastructure/supplemental_code/novnc/customrequesthandler.py quasiorthonormal/notebooks/helpers/np_qsoftmax.py quasiorthonormal/notebooks/for_fun/random_helpers.py cloud_infrastructure/supplemental_code/jupyter/logout.py cloud_infrastructure/supplemental_code/jupyter/login.py get_user login_required authenticated login health_check authenticated logout api_notebook api_notebook_status dashboard get_user help LoginHandler LogoutHandler BasicHTTPAuth ExpectOrigin BasePlugin AuthenticationError AmazonAuth ClientCertCNAuth InvalidOriginError CustomRequestHandler WebSockifyServer CompatibleWebSocket WebSockifyRequestHandler sign_out health_check sign_in construct_network binary random_vectors worst_angle worst_dot abs_inner hadamard dedup parse_sphere normalize parse_basis qsoftmax softmax f_qsoftmax qsoftmax qsoftmax f_qsoftmax get get_unverified_headers decode error text warning authenticated print authenticated make_response url_for redirect set_cookie IGNORECASE compile get get_unverified_headers decode format make_response rstudio_cookie str uuid4 error text debug warning redirect abort set_cookie get related list str update value_counts barnes_hut Network show artist nodes edges zip add_edges inherit_edge_colors add_node normal norm list append range append map split append len range split norm apply_along_axis combinations map range exp asarray matmul squeeze constant | # Supplemental Material for WestHealth's Scipy 2020 material There are three posters accepted with accompanying papers: * [Network visualizations with Pyvis and VisJS](pyvis) * [Securing Your Collaborative Jupyter Notebooks in the Cloud using Container and Load Balancing Services](cloud_infrastructure) * [Quasi-orthonormal Encoding for Machine Learning Applications](quasiorthonormal) Supplemental material is found in the directories by clicking the links above | 1,133 |
WilliamPeng38/Fashion | ['data augmentation'] | ['DENSER: Deep Evolutionary Network Structured Representation'] | utils/helper.py configs.py benchmark/convnet.py app.py benchmark/runner.py utils/argparser.py utils/mnist_reader.py visualization/project_zalando.py start_s3_sync get_json_logger touch touch_dir _get_logger main cnn_model_fn PredictJob JobWorker JobManager get_args_request parse_arg get_args_cli now_int upload_result_s3 get_sprite_image invert_grayscale create_sprite_image vector_to_matrix_mnist UploadS3Thread load_mnist UploadS3Thread start Event dirname makedirs makedirs setFormatter touch_dir DEBUG getLogger addHandler StreamHandler Formatter touch setLevel INFO FileHandler setFormatter getLogger addHandler Formatter touch setLevel INFO FileHandler dense max_pooling2d dropout one_hot minimize reshape GradientDescentOptimizer conv2d softmax_cross_entropy asarray evaluate print Estimator shuffle labels images numpy_input_fn train range read_data_sets int append items defaultdict utcfromtimestamp info int isinstance ones sqrt ceil array range vector_to_matrix_mnist invert_grayscale join | # Fashion-MNIST [](https://github.com/zalandoresearch/fashion-mnist/) [](https://gitter.im/fashion-mnist/Lobby?utm_source=share-link&utm_medium=link&utm_campaign=share-link) [](README.zh-CN.md) [](README.ja.md) [](https://opensource.org/licenses/MIT) [](https://hanxiao.github.io/2018/09/28/Fashion-MNIST-Year-In-Review/) <details><summary>Table of Contents</summary><p> * [Why we made Fashion-MNIST](#why-we-made-fashion-mnist) * [Get the Data](#get-the-data) | 1,134 |
WilsonWangTHU/POPLIN | ['model based reinforcement learning'] | ['Exploring Model-based Planning with Policy Networks'] | dmbrl/config/gym_ant.py dmbrl/misc/optimizers/__init__.py dmbrl/config/gym_hopper.py dmbrl/misc/logger.py dmbrl/misc/optimizers/policy_network/BC_WA_policy.py dmbrl/controllers/Controller.py dmbrl/config/__init__.py dmbrl/misc/optimizers/policy_network/tf_utils.py dmbrl/modeling/layers/__init__.py dmbrl/config/gym_invertedPendulum.py dmbrl/misc/optimizers/policy_network/BC_A_policy.py dmbrl/controllers/MPC.py dmbrl/modeling/models/__init__.py dmbrl/config/gym_cheetah.py dmbrl/misc/optimizers/policy_network/tf_norm.py dmbrl/modeling/layers/FC.py dmbrl/modeling/models/TFGP.py dmbrl/config/gym_swimmer.py dmbrl/env/pusher.py dmbrl/misc/optimizers/policy_network/BC_WD_policy.py dmbrl/config/view_humanoid.py dmbrl/misc/DotmapUtils.py dmbrl/config/pusher.py dmbrl/misc/optimizers/policy_network/gan_policy.py dmbrl/config/reacher.py dmbrl/modeling/models/BNN.py dmbrl/config/default.py dmbrl/misc/Agent.py dmbrl/misc/optimizers/optimizer.py dmbrl/misc/optimizers/policy_network/gmm_policy.py dmbrl/modeling/models/NN.py scripts/render.py dmbrl/controllers/__init__.py dmbrl/misc/optimizers/cem.py dmbrl/misc/optimizers/POPLIN_P.py dmbrl/misc/optimizers/gbp_rs.py mbexp.py dmbrl/config/halfcheetah.py dmbrl/env/cartpole.py dmbrl/misc/optimizers/policy_network/gmm_util.py dmbrl/env/half_cheetah.py dmbrl/config/gym_acrobot.py dmbrl/misc/optimizers/policy_network/tf_networks.py show_with_test_result.py dmbrl/config/gym_pendulum.py dmbrl/env/reacher.py dmbrl/misc/MBExp.py dmbrl/misc/optimizers/policy_network/whitening_util.py dmbrl/modeling/models/GT_dynamics.py dmbrl/config/template.py scripts/mbexp.py dmbrl/config/gym_reacher.py dmbrl/misc/optimizers/random.py dmbrl/config/gym_cartpole.py show_result.py dmbrl/config/gym_fhopper.py dmbrl/misc/optimizers/policy_network/base_policy.py dmbrl/modeling/utils/__init__.py dmbrl/config/gym_fswimmer.py dmbrl/env/__init__.py dmbrl/misc/optimizers/pgcem.py dmbrl/misc/optimizers/policy_network/wgan_policy.py dmbrl/modeling/utils/TensorStandardScaler.py dmbrl/config/gym_walker2d.py dmbrl/config/reward_util.py dmbrl/misc/optimizers/POPLIN_A.py dmbrl/misc/optimizers/gbp_cem.py main _create_il_config _create_exp_config _create_mb_config _create_ctrl_config _create_gbp_config make_bool create_conditional apply_override _create_cem_config create_read_only create_config GymAcrobotConfigModule AntConfigModule GymCartpoleConfigModule HalfCheetahConfigModule FixedHopperConfigModule FixedSwimmerConfigModule HopperConfigModule GymINVPendulumConfigModule GymPendulumConfigModule ReacherConfigModule SwimmerConfigModule WalkerConfigModule HalfCheetahConfigModule PusherConfigModule ReacherConfigModule _sigmoids tolerance EnvConfigModule Controller MPC CartpoleEnv HalfCheetahEnv PusherEnv Reacher3DEnv Agent get_required_argument _get_path GLOBAL_PATH _MyFormatter set_file_handler MBExperiment CEMOptimizer GBPCEMOptimizer GBPRandomOptimizer Optimizer POPLINAOptimizer POPLINPOptimizer RandomOptimizer base_policy_network limit_action policy_network policy_network policy_network policy_network generate_noise policy_network get_conditional_gaussian get_gmm_posterior MLP normc_initializer_func normc_initializer get_normalizer flatten_feature CNN_RL conv2d get_activation_func W_MLP WZ_MLP GRU weight_variable batch_norm_with_train layer_norm batch_norm_without_train logit_bernoulli_entropy GetFlat gauss_ent gauss_log_prob gauss_selfKL_firstfixed xavier_initializer fully_connected slice_2d set_network_weights numel logsigmoid gauss_KL get_weight_decay_loss var_shape SetFromFlat flatgrad get_network_weights l2_loss policy_network generate_noise add_whitening_operator init_whitening_stats copy_whitening_var append_normalized_data_dict update_whitening_stats set_whitening_var FC BNN none_constructor compile_cost GT NN TFGP TensorStandardScaler main main run_experiment DotMap copy MBExperiment MPC exp_cfg ctrl_cfg pformat create_config info logdir makedirs join _create_exp_config exec_module _create_ctrl_config DotMap name SourceFileLoader module_from_spec CONFIG_MODULE apply_override spec_from_loader realpath exp_cfg ctrl_cfg mb_cfg dirname init_var exists NTRAIN_ITERS ENV TASK_HORIZON NROLLOUTS_PER_ITER DotMap UPDATE_FNS _create_gbp_config none_constructor _create_cem_config gbp_cfg obs_postproc _create_il_config PLAN_HOR hasattr obs_postproc2 gp_constructor il_cfg NN_TRAIN_CFG get _create_mb_config GP_NINDUCING_POINTS targ_proc create_conditional mb_cfg obs_ac_cost_fn ac_cost_fn obs_cost_fn RBF model_init_cfg obs_preproc cem_cfg ENV INIT_VAR create_read_only nn_constructor replace split acosh arctanh pi sqrt acos log logical_and _sigmoids where get join _set_path setFormatter format addHandler _MyFormatter strftime abspath info FileHandler makedirs rvs truncnorm sqrt dot T reshape mean predict_proba expand_dims sum array tanh leaky_relu relu layer_norm identity RandomState float32 astype random_normal_initializer normc_initializer orthogonal_initializer constant_initializer get_variable zeros_initializer xavier_initializer random_uniform_initializer truncated_normal_initializer batch_norm batch_norm l2_loss sigmoid logsigmoid stop_gradient map constant exp log pi exp square reduce_sum constant e pi reduce_sum float32 log shape int64 cast reshape constant gradients sqrt sum square maximum run int expert_obs reshape transpose min tile zeros range predict len pprint append | # POPLIN <p align=center> <img src="img/reward.png" width=800> </p> [Arxiv Link](https://arxiv.org/abs/1906.08649) **Abstract** Model-based reinforcement learning (MBRL) with model-predictive control or online planning has shown great potential for locomotion control tasks in terms of both sample efficiency and asymptotic performance. However, the existing planning methods search from candidate sequences that are randomly generated in the action space. We argue that random action search is inefficient in complex high-dimensional environments. In this paper, we propose a novel MBRL algorithm, model-based policy planning (POPLIN), that combines policy networks with online planning. More specifically, we formulate action planning at each time-step as an optimization problem using neural networks. We experiment with both optimization w.r.t. the action sequences initialized from the policy network, and also online optimization directly w.r.t. the parameters of the policy network. We show that POPLIN obtains state-of-the-art performance in the MuJoCo benchmarking environments, being about 3x more sample efficient than the state-of-the-art algorithms, such as PETS, TD3 and SAC. To explain the effectiveness of our algorithm, we show that the optimization surface in parameter space is smoother than in action space. Further more, we found the distilled policy network can be effectively applied without the expansive model predictive control during test time for some environments such as Cheetah. ### Performance Below are some of the benchmarking results of POPLIN compared with other state-of-the-art algorithms Result Curves <p align=center> | 1,135 |
WingsBrokenAngel/Semantics-AssistedVideoCaptioning | ['video captioning'] | ['A Semantics-Assisted Video Captioning Model Trained with Scheduled Sampling'] | scripts/MSR-VTT/generate_eco_feature.py tagging/test.py msvd/train_model.py scn.py scripts/MSR-VTT/generate_res_feature.py scripts/MSVD/generate_tag_gt.py tagging/tag_net.py scripts/MSR-VTT/msrvtt_tagging.py msrvtt/train_model.py tagging/train_tag_net.py scripts/MSR-VTT/prepare_frames.py tagging/evaluate_tag.py scripts/MSVD/generate_res_feature.py scripts/MSVD/generate_feature.py SemanticLSTM F train_part get_batch train_step score print_sents cal_metrics main train_part get_batch train_step score print_sents cal_metrics main generate_feat generate_feat main main allocate_videos generate_feat generate_feat main TagNet main main test_step load_split_data train_step load SemanticLSTM print join enumerate get_batch run stack append zeros max enumerate T format arange train_step print print_sents shuffle append range score squeeze pprint test_sents append range run compute_score zip ones open save resize forward COLOR_BGR2RGB set_device transpose IMREAD_COLOR imread TEST range glob Net tile enumerate join int print float32 set_mode_gpu zeros array cvtColor len basename load_img output preprocess run input glob join extract_frames add_argument outputpath allocate_videos ArgumentParser parse_args videopath listdir enumerate TagNet Session train_step print load_split_data shuffle test_step save append ConfigProto array range run load concatenate | # Semantics-Assisted Video Captioning Model Trained with Scheduled Sampling Strategy      ## Table of Contents 1. [Description](#description) 2. [Dependencies](#dependencies) 3. [Manual](#manul) 4. [Data](#data) 5. [Results](#results) 1. [Comparison on Youtube2Text](#cy) | 1,136 |
WingsBrokenAngel/Semantics-AssistedVideoCaptioningModelTrainedwithScheduledSamplingStrategy | ['video captioning'] | ['A Semantics-Assisted Video Captioning Model Trained with Scheduled Sampling'] | scripts/MSR-VTT/generate_eco_feature.py tagging/test.py msvd/train_model.py scn.py scripts/MSR-VTT/generate_res_feature.py scripts/MSVD/generate_tag_gt.py tagging/tag_net.py scripts/MSR-VTT/msrvtt_tagging.py msrvtt/train_model.py tagging/train_tag_net.py scripts/MSR-VTT/prepare_frames.py tagging/evaluate_tag.py scripts/MSVD/generate_res_feature.py scripts/MSVD/generate_feature.py SemanticLSTM F train_part get_batch train_step score print_sents cal_metrics main train_part get_batch train_step score print_sents cal_metrics main generate_feat generate_feat main main allocate_videos generate_feat generate_feat main TagNet main main test_step load_split_data train_step load SemanticLSTM print join enumerate get_batch run stack append zeros max enumerate T format arange train_step print print_sents shuffle append range score squeeze pprint test_sents append range run compute_score zip ones open save resize forward COLOR_BGR2RGB set_device transpose IMREAD_COLOR imread TEST range glob Net tile enumerate join int print float32 set_mode_gpu zeros array cvtColor len basename load_img output preprocess run input glob join extract_frames add_argument outputpath allocate_videos ArgumentParser parse_args videopath listdir enumerate TagNet Session train_step print load_split_data shuffle test_step save append ConfigProto array range run load concatenate | # Semantics-Assisted Video Captioning Model Trained with Scheduled Sampling Strategy      ## Table of Contents 1. [Description](#description) 2. [Dependencies](#dependencies) 3. [Manual](#manul) 4. [Data](#data) 5. [Results](#results) 1. [Comparison on Youtube2Text](#cy) | 1,137 |
WonderSeven/ULDA | ['few shot learning', 'data augmentation'] | ['Diversity Helps: Unsupervised Few-shot Learning via Distribution Shift-based Data Augmentation'] | network/Classifiers/MAML.py network/Embeddings/ResNet256F.py network/Embeddings/__init__.py network/Embeddings/Preact_Resnet.py engine/checkpointer.py datasets/DataSets.py network/Embeddings/DropBlockResNet12.py network/Classifiers/MAML_components.py network/Embeddings/ProtoEmbedding.py network/Embeddings/R2D2_embedding.py datasets/few_shot_dataloader.py network/Embeddings/Conv64F1600.py network/Embeddings/rfs_ResNet.py engine/configs/__init__.py engine/ssl_baseline.py datasets/few_shot_dataset.py preprocess/ditribution_distance.py network/Classifiers/ClassificationHead.py network/__init__.py network/Classifiers/__init__.py engine/averagemeter.py compare/autoaugment.py network/Classifiers/Linear.py preprocess/FID.py network/Classifiers/Linear_classifier.py network/inits.py network/Embeddings/Conv64F.py network/Embeddings/dropblock.py preprocess/tools.py network/Embeddings/Global_Conv64F1600.py main/main_base.py network/net_tools.py network/Classifiers/tools.py compare/Rotate_dataloader.py engine/__init__.py engine/fsl_baseline.py engine/ssl_trainer.py engine/configs/parser.py engine/logger.py network/schedulers.py network/Embeddings/ResNet12.py datasets/base_dataset.py engine/fsl_trainer.py engine/configs/registry.py preprocess/mixup.py preprocess/visualize.py CIFAR10Policy ImageNetPolicy SVHNPolicy SubPolicy rotate_with_fill Rotate FewShotDataloader BaseDataSet PIL_loader Gray_loader Default_loader RGB_loader DataSet default_loader RGB_loader FewShotDataloader PIL_loader Gray_loader FewShotDataSet Default_loader RGB_loader AverageMeter ProgressMeter load_model_directly remove_modules_for_DataParallel add_modules_for_DataParallel Checkpointer mean_confidence_interval accuracy test adjust_learning_rate train Trainer create_logger add_filehandler mean_confidence_interval accuracy test adjust_learning_rate train Trainer get_config BaseOptions parse_gpu_ids _register_generic Registry get_embedding_network get_FSL_scheduler get_loss_func get_FSL_dataloader get_ssl_classifier get_ssl_solver get_FSL_dataset get_function get_logger get_normal_dataloader get_normal_dataset get_transforms get_solver get_classifier_head get_classifier one_hot get_norm_layer weights_init_orthogonal weights_init_normal weights_init_xavier Flatten init_weights print_network weights_init_kaiming StepLR NoneLR CosineLR PolyLR WarmUpLRScheduler ClassificationHead LinearHead LinearHead_V1 MetaOptNetHead_SVM_He MetaOptNetHead_SVM_WW ProtoNetHead MetaOptNetHead_SVM_CS MetaOptNetHead_Ridge R2D2Head MAMLHead Linear_fw Conv6NP Conv4NP ConvNetS ConvBlock Flatten Conv6 ResNet18 ResNet50 Conv4S ResNet10 Conv4SNP BatchNorm2d_fw ResNet SimpleBlock ResNet34 Conv4 ConvNet ResNet101 BottleneckBlock init_layer Conv2d_fw ConvNetSNopool ConvNetNopool computeGramMatrix batched_kronecker binv one_hot Conv64F Conv64F1600 DropBlock ResNet conv3x3 resnet12 DropBlock BasicBlock Conv64F PreActBlock PreActResNet50 PreActResNet batch_norm PreActResNet18 PreActResNet152 PreActBottleneck PreActResNet101 PreActResNet34 ConvBlock ProtoNetEmbedding R2D2_conv_block R2D2Embedding ResNet conv3x3 BasicBlock resnet12 ResBlock ResNet256F seresnet18 seresnet50 seresnet24 resnet24 ResNet resnet50 seresnet101 seresnet12 conv3x3 DropBlock resnet12 resnet18 SELayer BasicBlock resnet101 Wasserstein_distance MMD KL_divergence FID calculate_activation_statistics get_activations calculate_fid_given_paths imread calculate_frechet_distance _compute_statistics_of_path mixup_criterion TIM_S inter_task_mixup intra_task_uniform__mixup JS_TIM batch_task_mix intra_task_mixup TIM load_class_names pil2numpy write_csv l2_normalize tensor2pil repeat_1d_tensor renormalize pil2tensor load_csv2dict shuffle_channel format_time read_csv parse_reward_file show_reward show_meta_feature parse_accuracy_file show_by_task show_accuracies show_Images show_task_by_count rotate load format add_modules_for_DataParallel print remove_modules_for_DataParallel load_state_dict cuda items list items list cla_net emb_net zero_grad list train_loader update one_hot log_softmax mean item enumerate time criterion backward print reshape AverageMeter accuracy ProgressMeter step len len AverageMeter ProgressMeter param_groups base_lr _ppf len clear setFormatter getLogger addHandler StreamHandler setLevel setFormatter addHandler DEBUG setLevel FileHandler ssl_net TIM TIM_S int print set_device append split lower Embeddings get_function Classifers issubclass kwargs Module print eval lower get_function momentum lower weight_decay float base_lr eval lower kwargs criterion print Compose get_transforms FewShotDataSet test_batch_size FewShotDataloader shuffle episode_train_num num_workers episode_val_num get_FSL_dataset episode_test_num append train_batch_size BaseDataSet get_transforms shuffle num_workers BaseDataloader append get_normal_dataset join remove format embedding config exists name copy create_logger add_filehandler output_dir format_time stage momentum lower weight_decay float base_lr data isinstance Conv2d normal_ BatchNorm2d __name__ constant_ Linear data normal_ xavier_normal_ __name__ constant_ data normal_ kaiming_normal_ __name__ constant_ data print orthogonal_ normal_ __name__ constant_ print apply BatchNorm2d partial InstanceNorm2d print parameters view Size size scatter_ zeros cuda one_hot view Variable computeGramMatrix size transpose reshape expand repeat permute zeros float sum cuda detach bmm binv one_hot view computeGramMatrix size transpose cuda view Variable computeGramMatrix size ones expand float sum cuda cat detach bmm var one_hot view reshape size transpose div unsqueeze repeat permute expand_as one_hot view Variable reshape size computeGramMatrix expand cuda zeros float sum batched_kronecker detach one_hot view ones computeGramMatrix size transpose reshape Variable expand batched_kronecker float sum cuda cat detach fill_ isinstance out_channels Conv2d normal_ sqrt BatchNorm2d float cuda gesv reshape ResNet Sequential MaxPool2d add_module Conv2d BatchNorm2d LeakyReLU Dropout ResNet ResNet ResNet ResNet ResNet ResNet ResNet ResNet ResNet exp size t expand_as mm Tensor numpy isinstance adaptive_avg_pool2d FloatTensor print reshape transpose size tqdm eval type cuda empty array range len atleast_2d print iscomplexobj atleast_1d dot sqrtm trace eye real abs max imag mean cov get_activations load list endswith glob calculate_activation_statistics close Path InceptionV3 cuda calculate_frechet_distance _compute_statistics_of_path randperm cuda beta randperm cuda maximum beta size clone maximum randperm beta cuda range randperm cuda maximum beta randperm cuda beta randperm randint range cuda clip list keys writer writerow close open squeeze renormalize normalize to_tensor as_tensor add_ clone to size device show isinstance suptitle set_xticklabels set_yticklabels tensor2pil add_subplot axis ceil subplots_adjust shape imshow savefig figure cpu Tensor range to_pil_image show plot xlabel ylabel title savefig figure legend tick_params show xlabel ylabel title scatter figure tick_params show int plot mean title savefig figure zeros array range len show int plot title savefig figure zeros sum array range len show isinstance suptitle set_xticklabels set_yticklabels squeeze add_subplot axis subplots_adjust shape imshow savefig figure ceil Tensor numpy range | Unsupervised Few-shot Learning via Distribution Shift-based Augmentation --- ### Introduction This reposity contains the official implementation for the paper: [Unsupervised Few-shot Learning via Distribution Shift-based Augmentation](https://arxiv.org/abs/2004.05805) Tiexin Qin, Wenbin Li, Yinghuan Shi and Yang Gao. <center> <img src="./figs/framework.png" width="90%" height="50%" /> </center> ### Abstract | 1,138 |
X-BrainLab/PI-ReID | ['person re identification'] | ['Do Not Disturb Me: Person Re-identification Under the Interference of Other Pedestrians'] | data/datasets/dataset_loader.py data/collate_batch.py utils/logger.py utils/re_ranking.py data/build.py data/datasets/veri.py modeling/backbones/__init__.py data/datasets/__init__.py tests/lr_scheduler_test.py layers/__init__.py utils/iotools.py modeling/__init__.py engine/trainer.py config/defaults.py data/__init__.py modeling/Pre_Selection_Model.py engine/inference.py data/datasets/prw.py tests/__init__.py tools/__init__.py layers/center_loss.py modeling/backbones/Query_Guided_Attention.py data/datasets/market1501.py modeling/PISNet.py layers/triplet_loss.py tools/train.py solver/lr_scheduler.py tools/pre_selection.py utils/__init__.py data/transforms/build.py solver/build.py data/datasets/bases.py solver/__init__.py data/datasets/cuhk03.py data/samplers/__init__.py data/transforms/transforms.py config/__init__.py modeling/baseline.py modeling/backbones/resnet.py data/samplers/triplet_sampler.py utils/reid_metric.py data/transforms/__init__.py data/datasets/msmt17.py data/datasets/dukemtmcreid.py data/datasets/eval_reid.py data/datasets/cuhk.py modeling/backbones/pisnet.py make_data_loader_train make_data_loader_val multi_person_training_info_cuhk make_data_loader multi_person_training_info_prw train_collate_fn_pair3 train_collate_fn train_collate_fn_pair val_collate_fn_pair val_collate_fn BaseDataset BaseImageDataset BaseVideoDataset CUHK CUHK03 ImageDataset_pair3 ImageDataset ImageDataset_pair ImageDataset_pair_val read_image DukeMTMCreID process_g_pids eval_func Market1501 MSMT17 PRW VeRi get_names init_dataset RandomIdentitySampler RandomIdentitySampler_alignedreid build_transforms RandomErasing inference create_supervised_evaluator create_supervised_trainer_with_center loss1 euclidean_dist create_supervised_trainer do_train create_supervised_evaluator do_train_with_center CenterLoss hard_example_mining euclidean_dist CrossEntropyLabelSmooth TripletLoss normalize make_loss_with_center make_loss weights_init_classifier weights_init_kaiming Baseline weights_init_classifier weights_init_kaiming PISNet weights_init_classifier Pre_Selection_Model weights_init_kaiming build_model_pre build_model pisnet Bottleneck conv3x3 feature_corruption BasicBlock weights_init_kaiming Query_Guided_Attention _Query_Guided_Attention ResNet conv3x3 BasicBlock Bottleneck make_optimizer make_optimizer_with_center WarmupMultiStepLR MyTestCase main create_supervised_evaluator main train check_isfile read_json write_json mkdir_if_missing setup_logger R1_mAP_pair R1_mAP R1_mAP_reranking pre_selection_index re_ranking join items str append array enumerate join items str choice append enumerate gallery NAMES init_dataset NUM_WORKERS query ImageDataset DataLoader num_train_pids build_transforms train gallery NAMES init_dataset ImageDataset_pair3 train multi_person_training_info_cuhk NUM_WORKERS ImageDataset DataLoader query TRAIN_ANNO num_train_pids ROOT_DIR build_transforms multi_person_training_info_prw Pre_Index_DIR NUM_WORKERS DataLoader ImageDataset_pair_val build_transforms tensor zip tensor FloatTensor zip tensor zip zip zip convert append invert process_g_pids format asarray print cumsum astype float32 argsort shape mean int32 append sum range Compose Normalize items Engine DataParallel to attach DEVICE format getLogger print RE_RANKING info create_supervised_evaluator run sqrt sum ranking_loss euclidean_dist MarginRankingLoss fill_ to DataParallel to DataParallel CHECKPOINT_PERIOD init_dataset getLogger create_supervised_trainer Timer create_supervised_evaluator OUTPUT_DIR run EVAL_PERIOD LOG_PERIOD DEVICE add_event_handler NAME info attach EPOCH_COMPLETED NAMES MAX_EPOCHS ModelCheckpoint CHECKPOINT_PERIOD DEVICE Timer create_supervised_trainer_with_center CENTER_LOSS_WEIGHT getLogger add_event_handler MAX_EPOCHS EVAL_PERIOD NAME attach info LOG_PERIOD ModelCheckpoint create_supervised_evaluator OUTPUT_DIR EPOCH_COMPLETED run expand_as t addmm_ expand data ne view size min squeeze expand t eq gather max METRIC_LOSS_TYPE format SAMPLER MARGIN print CrossEntropyLabelSmooth TripletLoss METRIC_LOSS_TYPE format MARGIN print CrossEntropyLabelSmooth TripletLoss CenterLoss affine bias kaiming_normal_ weight __name__ constant_ bias normal_ weight __name__ constant_ NECK_FEAT PISNet LAST_STRIDE NECK NAME PRETRAIN_CHOICE PRETRAIN_PATH NECK_FEAT Pre_Selection_Model NECK LAST_STRIDE NAME PRETRAIN_CHOICE PRETRAIN_PATH cat WEIGHT_DECAY_BIAS named_parameters BASE_LR BIAS_LR_FACTOR WEIGHT_DECAY WEIGHT_DECAY_BIAS SGD named_parameters parameters BASE_LR BIAS_LR_FACTOR WEIGHT_DECAY ArgumentParser make_data_loader opts load_param create_supervised_evaluator OUTPUT_DIR run DEVICE_ID freeze parse_args merge_from_file DEVICE config_file WEIGHT merge_from_list mkdir print add_argument build_model_pre STEPS WARMUP_METHOD GAMMA make_loss do_train cuda values PRETRAIN_PATH make_optimizer make_data_loader_train load_state_dict PRETRAIN_CHOICE state_dict update METRIC_LOSS_TYPE format make_loss_with_center replace build_model IF_WITH_CENTER eval do_train_with_center WARMUP_FACTOR WARMUP_ITERS load items isinstance print named_parameters make_optimizer_with_center WarmupMultiStepLR Tensor format setup_logger info train makedirs makedirs print format isfile dirname mkdir_if_missing setFormatter join getLogger addHandler StreamHandler Formatter DEBUG setLevel FileHandler zeros_like float16 max exp transpose expand append sum range cat size astype mean unique addmm_ minimum print t int32 zeros numpy len | # PI-ReID (ECCV 2020) Official code for ECCV 2020 paper [Do Not Disturb Me: Person Re-identification Under the Interference of Other Pedestrians](https://arxiv.org/abs/2008.06963). If you find this code useful in your research, please consider citing: ``` @inproceedings{zhao2020pireid, title={Do Not Disturb Me: Person Re-identification Under the Interference of Other Pedestrians}, author={Shizhen, Zhao and Changxin, Gao and Jun, Zhang and Hao, Cheng and Chuchu, Han and Xinyang, Jiang and Xiaowei, Guo and Wei-Shi, Zheng and Nong, Sang and Xing, Sun}, booktitle={European Conference on Computer Vision (ECCV)}, year={2020} } | 1,139 |
XL2248/AGDT | ['sentiment analysis', 'aspect based sentiment analysis'] | ['A Novel Aspect-Guided Deep Transition Model for Aspect Based Sentiment Analysis'] | thumt/thumt/scripts/shuffle_corpus.py thumt/thumt/__init__.py thumt/thumt/utils/__init__.py thumt/thumt/scripts/convert_old_model.py thumt/thumt/models/__init__.py thumt/thumt/data/record.py thumt/thumt/interface/model.py thumt/thumt/layers/__init__.py thumt/thumt/scripts/checkpoint_averaging.py thumt/thumt/data/dataset.py thumt/thumt/utils/parallel.py thumt/thumt/layers/attention.py thumt/thumt/utils/hooks.py thumt/thumt/layers/rnn_cell.py thumt/thumt/utils/utils.py thumt/thumt/utils/search.py thumt/thumt/data/__init__.py thumt/thumt/data/vocab.py thumt/thumt/scripts/build_vocab.py thumt/thumt/models/rnnsearch.py thumt/thumt/interface/__init__.py thumt/thumt/scripts/input_converter.py thumt/thumt/scripts/convert_vocab.py thumt/thumt/layers/nn.py thumt/thumt/bin/trainer.py export_params override_parameters decode_target_ids get_learning_rate_decay import_params default_parameters main parse_args collect_params session_config get_initializer merge_parameters batch_examples English get_evaluation_input get_training_input sort_input_file sort_and_zip_files input_pipeline batch_examples get_input_features process_vocabulary get_control_mapping load_vocabulary NMTModel add_timing_signal multihead_attention multiplicative_attention attention_bias additive_attention split_heads combine_heads attention linear smoothed_sigmoid_cross_entropy_with_logits smoothed_softmax_cross_entropy_with_logits maxout layer_norm LegacyGRUCell DL4MTGRULAUTransiLNCell _decoder _process_vocabulary RNNsearch _copy_through _gru_encoder model_graph _load_embedding _encoder get_model save_vocab control_symbols main count_words parse_args main parseargs checkpoint_exists get_checkpoints main parseargs new_keys old_keys convert_to_record load_vocab main to_example write_records parse_args main parseargs _save_checkpoint_def _read_score_record _evaluate _add_to_record EvaluationHook _save_score_record _read_checkpoint_def _get_saver _maybe_repeat shard_features parallel_model GPUParamServerDeviceSetter data_parallelism _create_device_setter create_inference_graph replicate_variables session_run scale_gradients collect_gradients zero_variables add_argument ArgumentParser HParams join abspath join MkDir add_hparam HParams getattr iterkeys iteritems add_hparam HParams setattr values parameters parse process_vocabulary model initializer_gain to_float minimum warmup_steps device_list e s r0 float hidden_size len join OptimizerOptions device_list GraphOptions ConfigProto print append override_parameters get_parameters model output import_params set_verbosity default_parameters collect_params export_params get_model INFO merge_parameters findall join split MosesTokenizer append sorted enumerate sorted close zip append enumerate MosesTokenizer append eos append_eos enumerate linear get get append embedding_path uniform array zero_state read constant output_size while_loop transpose TensorArray stack unstack set_shape zeros zeros output_size rnn_dropout smoothed_sigmoid_cross_entropy_with_logits add_timing_signal reduce_max concat num_transi bias_add use_aspect smoothed_softmax_cross_entropy_with_logits maxout expand_dims DL4MTGRULAUTransiLNCell dropout relu maxnum use_prediction alpha tile _encoder use_capsule_net tanh linear reduce_mean rooting hidden_size len lower items list sorted Counter zip list items sorted zip save_vocab zip limit print corpus control_symbols control count_words sum len add_argument ArgumentParser sorted dtype join rstrip replace info global_variables get_tensor load_checkpoint Variable Glob path Copy Saver get_checkpoints list_variables zeros load new_keys old_keys dict input enumerate string_types BytesList iteritems isinstance FloatList integer_types Int64List Feature TFRecordWriter close write info enumerate join TFRecordWriter write shuffle SerializeToString close xrange append to_example num_shards convert_to_record load_vocab shuffle output_dir output_name seed suffix arange min write tolist close SAVERS get_collection append int sorted append int sorted append sorted Graph isinstance iteritems list _maybe_repeat isinstance tuple zip range _create_device_setter len convert_to_tensor iteritems tile append expand_dims range len data_parallelism shard_features top_beams beam_size decode_alpha model_fn decode_length enumerate append isinstance indices assign_add zip append Tensor scatter_add values isinstance indices IndexedSlices append values | # Code and data for [A Novel Aspect-Guided Deep Transition Model for Aspect Based Sentiment Analysis](https://www.aclweb.org/anthology/D19-1559.pdf) ## Introduction The implementation is based on [THUMT](https://github.com/thumt/THUMT). Download [Glove](http://nlp.stanford.edu/data/glove.840B.300d.zip) file and change the path in 'AGDT/thumt/thumt/bin/trainer.py' correspondingly. The dataset we used is from [GCAE](https://github.com/wxue004cs/GCAE). ## Usage Training with the following scripts: + ACSA ``` bash run_train_14.sh bash run_train_large.sh ``` | 1,140 |
XiSHEN0220/WatermarkReco | ['sketch based image retrieval', 'image retrieval'] | ['Large-Scale Historical Watermark Recognition: dataset and a new consistency-based approach'] | featureLearning/train.py featureLearning/dataloader.py model/model.py localMatching/ConcatBriquet.py localMatching/localMatching.py localMatching/AvgPoolBriquet.py classification/outils.py classification/dataloader.py featureLearning/outils.py localMatching/outils.py localMatching/pair_discovery.py classification/train.py localMatching/localMatching_briquet.py featComparisonBaseline/featComparisonBaseline.py localMatching/LocalSimiBriquet.py ValLoader TrainLoader format_time init_params progress_bar get_mean_and_std train FC test getFeat LocalSimi AvgPool Cat LoadImg ImageFolder TrainDataLoader RetrievalRes cropPatch NegaCosineSimilaritytopk DataShuffle CosSimilarity PairPos ResizeImg CosineSimilarityTopK Normalization TrainPair PosCosineSimilaritytop1 saveTrainImgPair PosNegaSimilarity SearchImgFeat CosineSimilarity RandomQueryFeat InfiniteSampler getFeat getFeat getFeat BlurMask ScorePosIdentity ScorePosHough ResizeImg Affine FeatImgRef Prediction MatchPair ScorePosAffine drawPair imgFeat Hough PairDiscovery conv3x3 BasicBlock ResNetLayer4Feat ResNetLayer3Feat DataLoader ImageFolder DataLoader ImageFolder print DataLoader div_ zeros range len normal constant isinstance kaiming_normal Conv2d bias modules BatchNorm2d weight Linear int time join format_time write append range flush len int format criterion backward print progress_bar zero_grad item step max net enumerate len join format state_dict criterion print progress_bar eval outDir save max net enumerate len data cuda net squeeze sum mean sum sum ImageFolder DataLoader float round max loop FloatTensor size convert ResizeImg clone tqdm unsqueeze resize append InfiniteSampler conv2d size expand topk view size conv2d expand_as sum expand_as data ResizeImg size convert clone unsqueeze resize list topk view squeeze len keys tqdm unsqueeze cuda SearchImgFeat CosineSimilarity gather float max range enumerate combinations size convert ResizeImg min tqdm append float range len ResizeImg size convert resize crop join cropPatch format print system mkdir item range exists len permutation astype range len size contiguous min expand conv2d max size contiguous expand int NegaCosineSimilaritytopk PairPos ResizeImg size convert PosCosineSimilaritytop1 conv2d zip append size clone expand squeeze mean normalize view size expand view LongTensor ResizeImg size map expand unsqueeze resize type cuda ResizeImg size cuda join FLIP_LEFT_RIGHT topk LongTensor transpose convert mm scatter_ nonzero item append zeros imgFeat type range len T array ones array reshape T exp sum range len exp arange values choice Prediction sum max range Hough len exp arange Affine choice Prediction sum max range values len ones convolve2d max FLIP_LEFT_RIGHT uint8 imwrite addWeighted applyColorMap size transpose convert min astype COLORMAP_BONE COLOR_RGB2BGR zeros array cvtColor join ScorePosIdentity ScorePosHough convert FeatImgRef ScorePos ScorePosAffine MatchPair | # WatermarkReco Pytorch implementation of Paper "Large-Scale Historical Watermark Recognition: dataset and a new consistency-based approach" [[arXiv](http://arxiv.org/pdf/1908.10254.pdf)] [[Project website](http://imagine.enpc.fr/~shenx/Watermark)] [[YouTube Video (5mins)](https://youtu.be/9Y47oyvjfQ8)] [[Slides](http://imagine.enpc.fr/~shenx/Watermark/watermarkReco.pptx)] <p align="center"> <img src="https://github.com/XiSHEN0220/WatermarkReco/blob/master/figure/teaser.jpg" width="800px" alt="teaser"> </p> The project is an extension work to [ArtMiner](http://imagine.enpc.fr/~shenx/ArtMiner/). If our project is helpful for your research, please consider citing : ``` @inproceedings{shen2020watermark, title={Large-Scale Historical Watermark Recognition: dataset and a new consistency-based approach}, | 1,141 |
XiaLiPKU/EMANet | ['semantic segmentation'] | ['Expectation-Maximization Attention Networks for Semantic Segmentation'] | bn_lib/nn/modules/tests/test_sync_batchnorm.py network.py bn_lib/nn/modules/__init__.py bn_lib/utils/data/sampler.py bn_lib/nn/modules/comm.py bn_lib/nn/modules/batchnorm.py bn_lib/nn/modules/tests/test_numeric_batchnorm.py train.py metric.py bn_lib/utils/data/dataloader.py dataset.py bn_lib/utils/data/distributed.py bn_lib/utils/data/dataset.py bn_lib/utils/data/__init__.py bn_lib/nn/parallel/__init__.py bn_lib/utils/__init__.py settings.py bn_lib/nn/__init__.py eval.py bn_lib/nn/modules/replicate.py bn_lib/nn/modules/unittest.py bn_lib/nn/parallel/data_parallel.py bn_lib/metric.py bn_lib/utils/th.py BaseDataset ValDataset fetch pad_inf TrainDataset test_dt pad flip scale crop main Session cal_scores fast_hist EMANet EMAU ResNet CrossEntropyLoss2d Bottleneck resnet ConvBNReLU test_net ensure_dir poly_lr_scheduler main get_params Session cal_scores fast_hist _sum_ft SynchronizedBatchNorm2d _unsqueeze_ft _SynchronizedBatchNorm SynchronizedBatchNorm1d SynchronizedBatchNorm3d SyncMaster FutureResult SlavePipe execute_replication_callbacks CallbackContext DataParallelWithCallback patch_replication_callback TorchTestCase as_numpy handy_var NumericTestCase SyncTestCase handy_var _find_bn mark_volatile as_variable as_numpy DataLoaderIter _set_SIGCHLD_handler default_collate _worker_manager_loop DataLoader _worker_loop ExceptionWrapper pin_memory_batch random_split ConcatDataset Subset TensorDataset Dataset DistributedSampler SubsetRandomSampler WeightedRandomSampler RandomSampler BatchSampler SequentialSampler Sampler asarray MEAN FloatTensor STD unsqueeze SCALES interpolate choice max CROP_SIZE pad STRIDE randint CROP_SIZE ValDataset print TrainDataset mean shape unique range len items inf_batch cal_scores eval load_checkpoints info numpy dataloader Session enumerate reshape N_CLASSES N_CLASSES nanmean dict zip sum diag load ResNet load_state_dict EMANet list named_children randn print model size eval long named_modules isinstance Conv2d makedirs float train_batch step write save_checkpoints poly_lr_scheduler iter train next list hasattr __data_parallel_replicate__ modules enumerate len replicate data isinstance size sum modules isinstance Mapping Sequence isinstance is_tensor Mapping Sequence is_tensor isinstance Sequence Variable Mapping seed init_fn get set_num_threads _set_worker_signal_handlers collate_fn manual_seed get isinstance set_device put pin_memory_batch sum isinstance Sequence new Mapping type zip _new_shared is_tensor Mapping is_tensor isinstance Sequence SIGCHLD signal getsignal randperm sum | # EMANet ## News - The bug in loading the pretrained model is now fixed. I have updated the .pth. To use it, download it again. - EMANet-101 gets *80.99* on the PASCAL VOC dataset (Thanks for Sensetimes' server). So, with a classic backbone(ResNet) instead of some newest ones(WideResNet, HRNet), EMANet still achieves the top performance. - EMANet-101 (OHEM) gets *81.14* in mIoU on Cityscapes val using single-scale inference, and [81.9](https://www.cityscapes-dataset.com/anonymous-results/?id=35eea66e90fef69e336babb009082f9cfda2b98d0327a8186f91d5edaacd7c7f) on test server with multi-scale inference. ## Background This repository is for [Expectation-Maximization Attention Networks for Semantic Segmentation](https://xialipku.github.io/publication/expectation-maximization-attention-networks-for-semantic-segmentation/) (to appear in ICCV 2019, Oral presentation), by [Xia Li](https://xialipku.github.io/), [Zhisheng Zhong](https://zzs1994.github.io/), [Jianlong Wu](https://jlwu1992.github.io/), [Yibo Yang](https://scholar.google.com.hk/citations?user=DxXXnCcAAAAJ&hl=en), [Zhouchen Lin](http://www.cis.pku.edu.cn/faculty/vision/zlin/zlin.htm) and [Hong Liu](https://scholar.google.com/citations?user=4CQKG8oAAAAJ&hl=en) from Peking University. **The source code is now available!** ### citation | 1,142 |
XiaXuehai/faceboxes | ['face detection'] | ['FaceBoxes: A CPU Real-time Face Detector with High Accuracy'] | multibox_layer.py data/handle.py networks.py multibox_loss.py encoderl.py dataset.py inference.py trainvisdom.py ListDataset DataEncoder detect testIm init_model MultiBoxLayer MultiBoxLoss init_model Inception conv_bn_relu FaceBox train decode squeeze shape pad unsqueeze softmax numpy resize float abs net int str imwrite print putText FONT_HERSHEY_COMPLEX exit waitKey shape detect rectangle imshow imread enumerate isinstance Conv2d bias xavier_uniform_ BatchNorm2d weight constant_ init_model BatchNorm2d Conv2d zero_grad MultiStepLR DataLoader abspath save MultiBoxLoss cuda Adam dirname load_state_dict FaceBox range state_dict Visdom format eval mkdir item is_available net enumerate load line criterion backward print parameters ListDataset step len | # Faceboxes faceboxes implement by pytorch.《FaceBoxes: A CPU Real-time Face Detector with High Accuracy》 This repository is forked [lxg2015](https://github.com/lxg2015/faceboxes),and refer to [xiongzihua](https://github.com/xiongzihua/faceboxes) and add the augmentation from SSD,and change the code to pytorch 0.41. # Data Preparation You should download wider face data. Data annotation is like this: ``` data/all/image01468.jpg 1 119 185 139 139 1 data/all/image01449.jpg 2 9 39 74 74 1 409 93 77 77 1 | 1,143 |
XiangLi1999/PosteriorControl-NLG | ['text generation'] | ['Posterior Control of Blackbox Generation'] | src/utils_data.py src/post-process.py src/train.py src/control-measure.py src/conllu_reader.py src/train_helper.py src/labeled_data.py src/reranker.py src/rnn_new.py src/beam_search.py src/controlwb2.py src/labeled_data2.py data/make_wikibio_labedata.py src/post_process2.py src/rnn.py src/temp_others.py src/train_chunk.py experiment.py src/lex_overlap.py shepherd.py src/hsmm.py src/Chunking_Reader.py src/pre_train_elmo.py src/utils.py src/hsmm_chunk.py data/utils.py src/glo.py data/make_e2e_labedata.py src/control-measure-e2e.py src/toy_data.py src/labeled_data3.py src/Chunking_Gen.py src/controle2e2.py src/eval_pos_induction.py src/hsmm_gen.py get_p_template con_train_wb beam_search_wb con_train con_train_e2e_test beam_search _itr_file _get_data _itr_file_list beam_rnn beam_control con_train22 train_permute con_train_e2e_rnn con_train_wbglobal con_train_wbcluster con_train_e2e con_train_e2e_soft collect_tb_info CLSPJobHandler _list_to_dict init_marcc git VarDict MarccJobHandler _download setup init_local CMD sync_only make_dirs post arg_conf get_logger JobHandler init_cluster gs DRY_RUN load_job_info _upload _git _collect_exp_functions shepherd init LocalJobHandler sync MarccInteractJobHandler SPD basic_func grid_search init_clsp_grid _load_conf ALL CMD_LOACL get_wikibio_poswrds get_e2e_poswrds get_e2e_fields get_wikibio_fields beam_action_decode beam_decode BeamSearchNode greedy_decode Chunking_RNN _is_divider Data_Loader_toy Data_Loader_Chunk Data_Loader_ptb Embedding_Weight Data_Loader read_beam_files process_e2e_control replace_control get_span visual_viterb replace_unk2 get_sents get_span read_vit_files read_beam_files3 combine_vit process_full_beam2 process_wibi_control replace_unk process_full_beam replace_control read_beam_files process_full_beam3 get_wiki_mapj combine_beam read_beam_files process_e2e_control replace_control get_span visual_viterb replace_unk2 get_sents get_span read_vit_files read_beam_files3 combine_vit process_full_beam2 process_wibi_control replace_unk process_full_beam replace_control read_beam_files process_full_beam3 get_wiki_mapj combine_beam idx_tags gold_reader eval_full eval_avg sanity_check induction_reader dict_renew purity_measure VarDict get_logger experiment HSMM HSMM_chunk HSMM_generative SentenceCorpus Dictionary SentenceCorpus Dictionary SentenceCorpus Dictionary process_full_beam_e2e visual_viterb replace_unk_full get_sents replace_unk_e2e_ replace_unk_e2e replace_unk22 read_vit_files read_beam_files3 combine_vit replace_unk_ process_full_beam2 replace_unk process_full_beam read_beam_files combine_beam_e2e process_full_beam3 combine_beam print_segment get_sents process_full_beam2 replace_unk see_result read_beam_files combine_beam Embedding_Weight Chunking_Model RNN_cond_Gen RNNLM2 generate_x_ngram generate_all Data_Loader_toy get_grams generate_z generate_x beam_rnn2 _error_break full_lagging_opt gather_stats investigate_viterb_hsmm aggressive_opt get_template beam_corpus visual_viterb get_template_p test_rnn data_generator generate _finish gather_stats_PPL beam_rnn test beam_single control_rnn get_optim _start investigate_viterb segment_num_viterbi beam_corpus_rnn gather_rnn_stats train beam_corpus segment_num_viterbi visual_viterb _error_break beam_rnn gather_rnn_stats gather_stats train test get_template_p beam_single test_rnn get_template control_rnn _finish get_optim _start print_segment get_v1_acc get_f1_score_batch gen_detailed_tgt_mask1 logsumexp0 gen_detailed_tgt_mask3 make_bwd_idxs_pre gen_detailed_tgt_mask2_ get_v3_acc print_dict2 logsumexp1 vseq_2_vit_lst_batch get_f1_score editDistance logsumexp2 vit2lst get_segment_ed_batch gen_detailed_tgt_mask2 get_segment_acc bwd_from_fwd_obs_logprobs get_acc_ get_linear_boundary v1_2_v3 make_bwd_idxs_pre2 v1_2_v2 get_segment_acc_batch gen_detailed_src_mask get_acc_seg get_uniq_fields get_kl_p make_combo_targs vseq_2_vit_lst print_dict3 idx2word vseq_2_vvit get_kl_log get_segment_ed gen_detailed_tgt_mask gen_detailed_tgt_mask_pre v2_2_v1 make_masks form_word_embeds vvit_2_vseq print_dict calc_mi get_wikibio_poswrds get_e2e_poswrds get_e2e_fields get_wikibio_fields join print groups match normpath walk compile split grid_search ALL set grid_search ALL set grid_search ALL set grid_search ALL set grid_search ALL set grid_search ALL set grid_search ALL set grid_search ALL set grid_search ALL set submit join _itr_file_list format tmp set ALL grid_search ALL set grid_search ALL set grid_search ALL set grid_search ALL set grid_search ALL set submit ALL _itr_file_list set join print groups match normpath walk compile compile dict items update map stdout setFormatter basename getLogger addHandler StreamHandler Formatter syslog setLevel FileHandler info decode print strip info Popen info isfile makedirs join argv info workspace getcwd _list_to_dict sys _load_conf add set get_logger usr prfx out log makedirs seed join update setup info init_cluster init_local CMD load_job_info set make_dirs skip latest startswith task_dir listdir split finish LocalJobHandler CLSPJobHandler init_marcc init_clsp_grid set gs list map Random submit est info format arg_conf format CMD set CMD split split _git _download _upload _git _download _upload append defaultdict append join defaultdict split int join split prevNode put unsqueeze BeamSearchNode leng topk sorted view h append range get PriorityQueue LongTensor item wordid isinstance logp min step_ len prevNode put unsqueeze BeamSearchNode leng topk sorted view h append range get PriorityQueue LongTensor size item wordid isinstance logp min step_ len topk decoder LongTensor view size zeros range items join defaultdict get_span print len get_e2e_poswrds literal_eval index mean append range split append replace_control read_beam_files load join format get_sents combine_beam read_beam_files append enumerate load join format read_beam_files3 append enumerate join format get_sents replace_unk read_beam_files append combine_beam items defaultdict print append len join format get_sents read_beam_files replace_control append get_wiki_mapj combine_beam join literal_eval get_wikibio_poswrds index zip append enumerate split join len literal_eval get_wikibio_poswrds index append range enumerate split get_wikibio_poswrds max enumerate print print append format join visual_viterb print append enumerate len append enumerate format unique argmax array items format print mean append len items Counter print defaultdict len print zip append append v_measure_score mean zip v_measure_score defaultdict valid print extend test zip append range len join format get_sents print replace_unk_e2e read_beam_files combine_beam_e2e append replace_unk_full join len literal_eval get_e2e_poswrds index append range enumerate split join literal_eval get_wikibio_poswrds index zip append enumerate split join get_e2e_poswrds literal_eval index split append float range enumerate len float join format len literal_eval get_wikibio_poswrds append float range enumerate split join len literal_eval get_wikibio_poswrds index append range enumerate split append enumerate append format print append list range set append append generate_z generate_x_ngram seed str info add __version__ version manual_seed vars info print labeled_states info defaultdict valid format info train print_dict format viterbi view print vseq_2_vvit len get_weights enumerate format viterbi view print vseq_2_vvit len get_weights enumerate sum beam_forward size transpose encode_table beam_forward size transpose encode_table beam_rnn2 valid device cuda defaultdict ngen_types append to range format size get_uniq_fields info control_rnn make_combo_targs flush beam_size items save_out print write pad_idx make_masks train len valid device cuda defaultdict ngen_types append to range format beam_rnn size get_uniq_fields info control_rnn make_combo_targs flush beam_size items save_out print min write pad_idx make_masks train len get_tgt_wembs get_tgt_embs transpose encode_table mean shape append forward range tagset_size data zeros_like copy_ sample_size device log str get_field_embs exp visual_viterb get_tgt_embs fill_ transpose encode_table logsumexp1 shape append to sum range detach get_tgt_wembs forward_with_crf format mean item zip enumerate join print write data zeros_like copy_ sample_size device str get_field_embs exp visual_viterb get_tgt_embs fill_ transpose encode_table shape append to sum range detach get_tgt_wembs forward_with_crf format mean item zip enumerate join time print write print_dict3 defaultdict valid format info train print_dict format print_dict info gather_stats clip_grad_norm_ zero_grad device cuda defaultdict kl_pen print_dict randperm ngen_types to range format size q_max_grad_norm get_uniq_fields info make_combo_targs backward min pad_idx parameters delta_kl make_masks step len list len range choice __next__ batch_size gather_stats clip_grad_norm_ zero_grad max_grad_norm aggressive_opt kl_pen data_generator append lamb_init lamb lamb_max mean q_max_grad_norm enumerate investigate_viterb backward min parameters delta_kl step len defaultdict kl_pen __next__ gather_stats print param_groups backward clip_grad_norm_ zero_grad print_dict mean parameters max_grad_norm q_max_grad_norm append step enumerate test format valid save_out info train word_vecs test get_state_embs log logsumexp1 vseq_2_vit_lst_batch get_word_embs print_dict2 print_dict2 print zeros size range items print range fill_ unique range fill_ unique update min extend max range len update fill_ view min extend max range enumerate len shape range fill_ unique range fill_ unique range fill_ unique append view copy_ size range fill_ exp sum sum log fill_ size copy_ max enumerate size exp view size unsqueeze expand_as sum max log exp view size unsqueeze expand_as sum max log exp expand_as sum max log enumerate sum array vvit_2_vseq len copy_ size range fill_ len get_sample get_score view vseq_2_vvit squeeze get_entr len expand mean shape stack append get_weights enumerate append range len append len append enumerate sum items items append zip enumerate enumerate sum array v1_2_v3 len sum len append len range len enumerate min len append range enumerate get_linear_boundary range editDistance len get_linear_boundary editDistance get_linear_boundary f1_score get_linear_boundary range len | # FSA-RNN Posterior Control of Blackbox Generation -- How to train? run the following commands to generate the scripts, and run the scripts. python shepherd.py -d con_train_wbcluster-1 python shepherd.py -d con_train_wbglobal-1 python shepherd.py -d con_train_e2e-1 -- Download and play with the trained model: https://drive.google.com/drive/folders/1e-chqMmCx-NmVenrcqspNyLm8gwCqCOP?usp=sharing is the res file. -- Load trained model: (Dynamic Constraints PR) | 1,144 |
Xiangyu-CAS/AICity2020-VOC-ReID | ['vehicle re identification', 'data augmentation'] | ['VOC-ReID: Vehicle Re-identification based on Vehicle-Orientation-Camera'] | tools/aicity20/weakly_supervised_crop_aug.py lib/modeling/backbones/__init__.py lib/modeling/backbones/osnet.py lib/modeling/backbones/regnet/config.py lib/data/datasets/aicity20_ReColor.py tools/aicity20/vis_result.py lib/data/build.py lib/solver/swa.py tools/test.py lib/utils/iotools.py lib/utils/bbox_utils.py lib/layers/__init__.py lib/data/datasets/dataset_loader.py lib/config/defaults.py lib/layers/metric_learning.py lib/layers/triplet_loss.py lib/layers/pooling.py lib/modeling/backbones/resnet.py lib/utils/vis.py lib/data/datasets/__init__.py lib/data/transforms/transforms.py lib/layers/build.py lib/utils/__init__.py lib/data/transforms/vis_transform.py lib/data/datasets/aicity20.py lib/data/datasets/market1501.py lib/modeling/backbones/res2net.py lib/data/datasets/aicity20_ReCam.py lib/data/samplers/__init__.py lib/solver/build.py lib/solver/__init__.py tools/vis_actmap.py lib/data/datasets/veri.py tools/train.py lib/data/transforms/fmix.py lib/data/datasets/aicity20_trainval.py tools/aicity20/compute_distmat_from_feats.py lib/data/datasets/dukemtmcreid.py tools/aicity20/eval_by_distmat.py lib/data/datasets/aicity20_ReOri.py lib/data/transforms/__init__.py lib/utils/actmap.py tools/aicity20/multi_model_ensemble.py tools/aicity20/submit.py lib/utils/post_process.py lib/modeling/backbones/regnet/regnet.py lib/data/datasets/cuhk03.py lib/data/datasets/msmt17.py lib/modeling/backbones/resnext_ibn_a.py lib/engine/train_net.py lib/solver/lr_scheduler.py lib/modeling/backbones/osnet_ain.py lib/data/collate_batch.py lib/modeling/backbones/resnet_ibn_a.py lib/config/__init__.py lib/data/datasets/bases.py lib/modeling/__init__.py lib/utils/reid_eval.py lib/solver/ranger.py lib/utils/logger.py lib/data/datasets/aicity20_sim.py lib/data/datasets/aicity20_ReType.py lib/data/transforms/build.py lib/modeling/baseline.py lib/data/samplers/triplet_sampler.py lib/modeling/backbones/resnet_ibn_b.py lib/engine/inference.py lib/modeling/backbones/resnest.py lib/data/__init__.py lib/modeling/backbones/densenet.py tools/aicity20/fix_track.py lib/data/transforms/augmix.py make_data_loader val_collate_fn train_collate_fn AICity20 AICity20ReCam AICity20ReColor AICity20ReOri AICity20ReType AICity20Sim AICity20Trainval BaseDataset BaseImageDataset apply_id_bias BaseVideoDataset CUHK03 ImageDataset read_image DukeMTMCreID Market1501 MSMT17 VeRi get_names init_dataset RandomIdentitySampler MPerClassSampler RandomIdentitySampler_alignedreid equalize translate_x sample_level float_parameter rotate shear_y posterize solarize translate_y int_parameter autocontrast shear_x AugMix build_transforms fftfreqnd make_low_freq_image FMixBase sample_mask sample_and_apply fmix_loss sample_lam get_spectrum FMix binarise_mask RandomErasing ColorAugmentation RandomBlur RandomPatch GaussianBlur ColorSpaceConvert inference extract_features select_topk validate AverageMeter open_all_layers do_train train frozen_feature_layers make_loss CircleLoss Arcface AMSoftmax Cosface ContrastiveLoss GeM hard_example_mining euclidean_dist CenterTripletLoss CrossEntropyLabelSmooth TripletLoss normalize Baseline_reduce Baseline weights_init_classifier build_embedding_head weights_init_kaiming build_model DenseNet _DenseLayer _DenseBlock _Transition densenet121 Conv1x1 osnet_x1_0 OSNet Conv3x3 Conv1x1Linear ConvLayer ChannelGate LightConv3x3 osnet_ibn_x1_0 OSBlock osnet_ain_x1_0 LightConvStream Conv1x1 OSNet Conv3x3 Conv1x1Linear ConvLayer ChannelGate LightConv3x3 OSBlockINin OSBlock res2net50_v1b_26w_4s Res2Net res2net50_v1b res2net101_v1b Bottle2neck res2net101_v1b_26w_4s res2net152_v1b_26w_4s DropBlock2D ResNet Bottleneck SplAtConv2d IBN GlobalAvgPool2d resnest50 ResNet resnet50 Bottleneck conv3x3 BasicBlock resnet152_ibn_a SEBottleneck Bottleneck_IBN resnet50_ibn_a se_resnet101_ibn_a ResNet_IBN IBN resnet101_ibn_a SELayer resnet50_ibn_b ResNet Bottleneck conv3x3 resnet101_ibn_b BasicBlock resnet152_ibn_b resnext50_ibn_a ResNeXt Bottleneck IBN resnext152_ibn_a resnext101_ibn_a build_backbone load_cfg dump_cfg assert_and_infer_cfg adjust_ws_gs_comp ResStemCifar regnety_3200mf get_block_fun SE BasicTransform ResStemIN regnety_800mf ResBasicBlock quantize_float AnyNet get_stages_from_blocks ResBottleneckBlock SimpleStemIN init_weights VanillaBlock AnyHead generate_regnet get_stem_fun AnyStage RegNet BottleneckTransform regnety_1600mf make_optimizer build_lr_scheduler WarmupCosineLR CosineStepLR WarmupMultiStepLR CyclicCosineLR Ranger _check_bn SWA _reset_bn _get_momenta _check_bn_apply _set_momenta batch_attention_mask generate_attention_mask localize_from_map extract_bbox_from_mask compute_iou bbox_nms draw_bbox check_isfile read_json write_json mkdir_if_missing setup_logger orientation_penalize write_results re_ranking decode_tf2gf decode_trackIndice comput_distmat build_track_lookup pca_whiten encode_gf2tf rerank_indice_by_track average_query_expansion generate_track_idxs generate_track_distmat track_aug database_aug alpha_query_expansion evaluator eval_func concat_vis main main train main vis_actmap generate_track_results results_to_pid generate_results results_to_track eval_results main write_result_with_track write_result visualize_submit main vis_actmap BaseImageDataset apply_id_bias init_dataset gallery_orientation query DataLoader query_orientation get_imagedata_info build_transforms TEST relabel COMBINEALL TRAIN ImageDataset print_dataset_statistics longtail_data_process CUTOFF_LONGTAILED enumerate gallery test_tracks isinstance print extend NUM_WORKERS train get_id_range tensor zip zip append convert sample_level int_parameter sample_level int_parameter sample_level int_parameter sample_level float_parameter size AFFINE transform sample_level float_parameter size AFFINE transform sample_level size AFFINE int_parameter transform sample_level size AFFINE int_parameter transform Compose Normalize fftfreq expand_dims list randn ones maximum shape array fftfreqnd min get_spectrum irfftn real max rvs int reshape size min linspace binarise_mask make_low_freq_image sample_lam isinstance permutation sample_mask DEVICE evaluator time format compute getLogger eval info empty_cache to append range DEVICE eval normalize to cat validate getLogger save OUTPUT_DIR cuda initialize FP16 range state_dict DEVICE format info join remove MAX_EPOCHS open_all_layers empty_cache train step frozen_feature_layers data model zero_grad cuda FP16 LOG_PERIOD update val format synchronize size to_python_float info time backward AverageMeter loss_fn step len evaluator compute format eval info parameters eval named_children parameters train named_children METRIC_LOSS_TYPE HARD_EXAMPLE_MINING_METHOD format MARGIN print CrossEntropyLabelSmooth TripletLoss ContrastiveLoss cross_entropy expand_as t sqrt addmm_ expand data ne format exp view print softmin size min squeeze expand t multinomial eq softmax gather sum max affine bias normal_ kaiming_normal_ weight __name__ constant_ bias normal_ weight __name__ constant_ print Sequential format Linear NECK_FEAT print Baseline_reduce LAST_STRIDE NECK Baseline NAME PRETRAIN_CHOICE PRETRAIN_PATH OSNet OSNet OSNet load_url Res2Net load_state_dict load_url Res2Net load_state_dict load_url Res2Net load_state_dict load_url Res2Net load_state_dict load_url Res2Net load_state_dict ResNet ResNet_IBN load_url load_state_dict ResNet_IBN load_url load_state_dict ResNet_IBN ResNet_IBN load_url load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict ResNeXt ResNeXt ResNeXt OUT_DIR join CFG_DEST merge_from_file join isinstance fill_ out_channels Conv2d normal_ zero_ BatchNorm2d Linear tolist zip arange divide power round log merge_from_file merge_from_file merge_from_file FC_LR_FACTOR print WEIGHT_DECAY_BIAS SWA SGD named_parameters BASE_LR Ranger BIAS_LR_FACTOR WEIGHT_DECAY STEPS CYCLE_EPOCH WarmupCosineLR WARMUP_METHOD GAMMA float MAX_EPOCHS CosineStepLR MultiStepLR WarmupMultiStepLR CosineAnnealingLR WARMUP_FACTOR CyclicCosineLR WARMUP_ITERS _BatchNorm issubclass __class__ apply ones_like issubclass zeros_like running_mean _BatchNorm __class__ running_var _BatchNorm issubclass __class__ momentum _BatchNorm issubclass __class__ view size min sum max range min max any extract_bbox_from_mask pop sorted list filter append min max rectangle makedirs print format isfile dirname mkdir_if_missing setFormatter join getLogger addHandler StreamHandler Formatter DEBUG setLevel FileHandler t argsort addmm_ expand expand t argsort mean numpy addmm_ mean comput_distmat mean comput_distmat zeros_like float16 max exp transpose expand append sum range cat size astype mean unique addmm_ minimum print t int32 zeros numpy len enumerate format print linear comput_distmat len clone t mean expand_as append normalize enumerate add set build_track_lookup append range transform PCA fit_transform build_track_lookup mean append zeros enumerate rand build_track_lookup append range len append zeros range build_track_lookup normalize print comput_distmat shape unsqueeze tensor print shape format makedirs append append hstack enumerate invert format arange asarray print cumsum astype float32 set mean shape int32 append sum array range len resize zeros enumerate len ArgumentParser make_data_loader opts load_param OUTPUT_DIR DEVICE_ID freeze parse_args inference merge_from_file format build_model config_file setup_logger WEIGHT merge_from_list mkdir info add_argument make_optimizer load build_lr_scheduler LR_SCHEDULER build_model print PRETRAIN_CHOICE make_loss load_state_dict make_data_loader do_train load_param PRETRAIN_PATH train makedirs DEVICE join eval mkdir to OUTPUT_DIR SIZE_TEST vis_actmap extend argsort shape append range append asarray arange format cumsum print mean append sum array range len argsort shape range append extend add set append range enumerate print shape format makedirs format print tolist extend set add shape append range enumerate makedirs ROOT_DIR dirname AICity20 write_result_with_track test_tracks join basename gallery imwrite query_dir concatenate gallery_dir rectangle resize append imread range enumerate makedirs | # VOC-ReID: Vehicle Re-identification based on Vehicle-Orientation-Camera This repo includes the 2nd place solution for [AICity2020](https://www.aicitychallenge.org/) Challenge ReID track. [Our paper](http://arxiv.org/abs/2004.09164) ## Update - In ECCV VisDA 2020 Person ReID challenge, all the top3 teams adopt camera bias post-process. It brings 5%-10% increment in challenge. ## Introduction Our work aims to eliminate the bias posed by similar background and shape This project is mainly based on [reid-strong-baseline](https://github.com/michuanhaohao/reid-strong-baseline) and [deep-person-reid](https://github.com/KaiyangZhou/deep-person-reid) | 1,145 |
XiaoxiaoGuo/fashion-iq | ['image retrieval'] | ['Fashion IQ: A New Dataset Towards Retrieving Images by Natural Language Feedback'] | start_kit/train.py transformer/interactive_retrieval/UserModel.py start_kit/data_loader.py transformer/user_modeling/preprocess_fashionIQ.py transformer/attribute_prediction/attribute_loader.py transformer/user_modeling/dataset.py transformer/interactive_retrieval/models.py transformer/user_modeling/pytorchtools.py transformer/attribute_prediction/finetune.py start_kit/utils.py transformer/interactive_retrieval/eval.py transformer/interactive_retrieval/Ranker.py transformer/user_modeling/preprocess.py transformer/user_modeling/Embed.py start_kit/models.py transformer/interactive_retrieval/train.py transformer/interactive_retrieval/user_model.py transformer/user_modeling/test.py transformer/interactive_retrieval/Beam.py transformer/interactive_retrieval/data_loader.py transformer/user_modeling/train.py start_kit/eval.py transformer/interactive_retrieval/utils.py transformer/user_modeling/Beam.py transformer/user_modeling/download_image.py transformer/user_modeling/resize_image.py transformer/user_modeling/Optim.py start_kit/resize_images.py transformer/user_modeling/Models.py transformer/interactive_retrieval/Vocabulary.py start_kit/build_vocab.py transformer/user_modeling/Layers.py transformer/user_modeling/Sublayers.py transformer/interactive_retrieval/glove_embedding.py transformer/user_modeling/build_vocab.py transformer/attribute_prediction/test.py main build_vocab Vocabulary collate_fn Dataset get_loader evaluate DummyImageEncoder DummyCaptionEncoder resize_images resize_image_operator resize_image main resize_images_parallel eval_batch train create_exp_dir Ranker collate_fn Dataset get_loader create_exp_dir finetune_attributes evaluate_model compute_metric evaluate_attributes collate_fn get_loader Dataset evaluate_model greedy_search create_masks nopeak_mask init_vars k_best_outputs beam_search collate_fn Dataset eval_batch create_exp_dir eval load_test_image_features extract_glove_embedding extract_vocab_embedding MultiHeadAttention FeedForward Encoder get_clones RetrieverTransformer Norm attention PositionalEncoder EncoderLayer Ranker eval_batch create_exp_dir train load_image_features UserModel DecoderLayer Transformer create_model MultiHeadAttention Attribute_Embedding FeedForward Decoder Encoder get_clones load_trained_model Embedder Norm CNN_Embedding Joint_Encoding attention PositionalEncoder EncoderLayer get_image_batch extract_features get_attribute_batch Vocabulary init_vars k_best_outputs beam_search main build_vocab Vocabulary load_ori_token_data_new collate_fn get_loader_test collate_fn_test get_loader load_ori_token_data Dataset_fastrcnn Dataset process_url parse_url make_folder Embedder PositionalEncoder DecoderLayer EncoderLayer Transformer create_masks nopeak_mask Attribute_Embedding Decoder Encoder get_clones Joint_Encoding get_model CNN_Embedding NoamOpt get_std_opt preprocess_data main process_url parse_url preprocess_data_from_xiaoxiao process_url preprocess_data main parse_url EarlyStopping resize_images resize_image_operator resize_image main resize_images_parallel MultiHeadAttention Norm FeedForward attention main test greedy_search count_parameters get_subsequent_mask cal_performance train_epoch cal_loss main eval_epoch train eval_epoch_bleu calculate_bleu load update word_tokenize format init_vocab Vocabulary print add_word len Counter lower range enumerate open format print len data_set save build_vocab stack long enumerate zip DataLoader Dataset update_emb batch_size open delete_resnet load_state_dict to dump format Vocabulary Compose load_resnet data_set eval embed_size model_folder load join print data_split Ranker get_loader format print makedirs listdir enumerate len print format format print cpu_count len listdir makedirs image_dir resize_images_parallel output_dir update_emb size item cat enumerate batch_size logging zero_grad create_exp_dir save forward eval_batch str delete_resnet triplet_avg Adam strftime to range state_dict format LongTensor Vocabulary caption_encoder param_groups size Compose load_resnet data_set eval item embed_size float enumerate load join learning_rate backward Ranker get_loader step get_trainable_parameters len join format basename print copyfile mkdir cpu_count gather sum eval enumerate from_pretrained logging model data_file zero_grad create_exp_dir save bce_average Adam strftime load_state_dict append to range state_dict format param_groups Compose data_set eval item float load join requires_grad learning_rate int backward print named_parameters tqdm pretrained_model get_loader train step len tqdm from_pretrained items load format logging Compose data_set eval pretrained_model load_state_dict get_loader to Variable astype to size unsqueeze device beam_size topk decoder joint_encoding attribute_embedding1 encoder unsqueeze softmax out device cnn2 to add_attribute attribute_embedding2 cat cnn1 topk unsqueeze transpose view beam_size max join decoder init_vars k_best_outputs max_seq_len type_as softmax nonzero cuda device to out range len max decoder cumsum max_seq_len joint_encoding attribute_embedding1 encoder out device cnn2 to range add_attribute attribute_embedding2 cat cnn1 load join format print Compose data_set save isfile image_model extract_features Dataset attr2idx_file batch_size random_ num_dialog_turns floor tensor forward round init_hist append get_image_batch to range update eval encode_image get_attribute_batch int time criterion Ranker tqdm len load str join format logging print UserModel load_test_image_features trained_model to append asarray len format print data_set append range len dropout transpose matmul sqrt unsqueeze masked_fill softmax load join format print Compose data_set save isfile image_model extract_features Dataset attr2idx_file zero_grad backward train step load_image_features max UserModel parameters load Transformer cnn_name n_layers dropout attribute_vocab_size print n_heads add_attribute d_model vocab_size load_state_dict cnn_pretrained_model joint_enc_func Transformer cnn_name n_layers dropout attribute_vocab_size n_heads xavier_uniform_ add_attribute parameters d_model vocab_size cnn_pretrained_model joint_enc_func from_pretrained load format asarray print append_batch tqdm from_numpy eval floor load_state_dict to range len save_output_path stack zip print len print DataLoader Dataset len join reverse split mkdir lower Transformer n_heads xavier_uniform_ vocab_size device add_attribute cnn_name d_model load_state_dict to joint_enc_func n_layers dropout attribute_vocab_size load print pretrained_model cnn_pretrained_model get_trainable_parameters parse_url preprocess_data tsv_file_path load format open append makedirs load format open append makedirs data_prefix preprocess_data_from_xiaoxiao add_argument exit test ArgumentParser device parse_args batch_size strip get_loader_test beam_search map load_ori_token_data_new range vocab Vocabulary Compose eval compute_score zip data_test load print tqdm get_model method len cnn numpy cuda cal_loss ne eq item list word2idx tolist map sentence_bleu append ne view log_softmax size scatter sum cross_entropy float size transpose masked_fill ne model backward train cal_performance zero_grad map clip_grad_norm_ tqdm item to step get_trainable_parameters eval eval time epoch print EarlyStopping train_epoch early_stopping_with_saving device eval_epoch eval_epoch_bleu log early_stop save_model batch_size count_parameters get_loader_test seed load_ori_token_data_new split d_model vocab Vocabulary Compose get_std_opt data_dev_combined data_train data_dev manual_seed data_test train load join pretrained_model get_loader get_model makedirs | # fashion-iq ## About this repository Fashion IQ is a dataset we contribute to the research community to facilitate research on natural language based interactive image retrieval. We released Fashion IQ dataset at ICCV 2019 workshop on [Linguistics Meets Image and Video Retrieval](https://sites.google.com/view/lingir/fashion-iq). The images can be downloaded from [here](https://github.com/hongwang600/fashion-iq-metadata). The image attribute features can be downloaded from [here](https://ibm.box.com/s/imyukakmnrkk2zuitju2m8akln3ayoct). ## Starter code for Fashion IQ challenge To get started with the framework, install the following dependencies: | 1,146 |
Xiehuaiqi/3D-ResNets-PyTorch-master | ['action recognition'] | ['Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet?'] | utils/eval_ucf101.py utils/video_jpg.py opts.py models/resnext.py train.py datasets/hmdb51.py dataset.py models/wide_resnet.py models/densenet.py utils/ucf101_json.py utils.py utils/eval_kinetics.py datasets/activitynet.py models/pre_act_resnet.py temporal_transforms.py test.py utils/kinetics_json.py datasets/ucf101.py utils/eval_hmdb51.py utils/hmdb51_json.py mean.py utils/n_frames_ucf101_hmdb51.py datasets/kinetics.py main.py target_transforms.py model.py utils/n_frames_kinetics.py utils/video_jpg_ucf101_hmdb51.py utils/fps.py validation.py spatial_transforms.py models/resnet.py utils/video_jpg_kinetics.py get_training_set get_test_set get_validation_set get_std get_mean generate_model parse_opts MultiScaleCornerCrop CenterCrop MultiScaleRandomCrop ToTensor Compose Scale Normalize RandomHorizontalFlip CornerCrop ClassLabel VideoID Compose TemporalBeginCrop LoopPadding TemporalCenterCrop TemporalRandomCrop calculate_video_results test calculate_accuracy AverageMeter Logger load_value_file modify_frame_indices get_class_labels load_annotation_data video_loader get_end_t make_dataset ActivityNet accimage_loader get_default_image_loader get_default_video_loader make_untrimmed_dataset pil_loader get_video_names_and_annotations get_class_labels load_annotation_data video_loader make_dataset accimage_loader HMDB51 get_default_image_loader get_default_video_loader pil_loader get_video_names_and_annotations get_class_labels load_annotation_data video_loader make_dataset accimage_loader Kinetics get_default_image_loader get_default_video_loader pil_loader get_video_names_and_annotations UCF101 get_class_labels load_annotation_data video_loader make_dataset accimage_loader get_default_image_loader get_default_video_loader pil_loader get_video_names_and_annotations get_fine_tuning_parameters DenseNet densenet201 densenet169 densenet264 _DenseLayer _DenseBlock _Transition densenet121 conv3x3x3 get_fine_tuning_parameters resnet50 downsample_basic_block resnet152 PreActivationBasicBlock resnet34 resnet200 PreActivationBottleneck resnet18 PreActivationResNet resnet101 conv3x3x3 get_fine_tuning_parameters ResNet downsample_basic_block resnet50 Bottleneck resnet152 resnet34 resnet200 resnet18 resnet10 BasicBlock resnet101 ResNeXtBottleneck conv3x3x3 get_fine_tuning_parameters resnet50 downsample_basic_block ResNeXt resnet152 resnet101 conv3x3x3 get_fine_tuning_parameters WideBottleneck resnet50 downsample_basic_block WideResNet convert_hmdb51_csv_to_activitynet_json get_labels convert_csv_to_dict load_labels convert_kinetics_csv_to_activitynet_json convert_csv_to_dict class_process class_process load_labels convert_ucf101_csv_to_activitynet_json convert_csv_to_dict class_process class_process video_path UCF101 ActivityNet Kinetics annotation_path HMDB51 video_path UCF101 n_val_samples ActivityNet Kinetics annotation_path HMDB51 video_path UCF101 ActivityNet Kinetics annotation_path HMDB51 get_fine_tuning_parameters in_features densenet264 DataParallel ft_begin_index resnet34 resnet152 cuda load_state_dict resnet200 resnet101 resnet18 format resnet50 resnet10 n_finetune_classes Linear load densenet169 densenet201 print pretrain_path densenet121 parse_args set_defaults add_argument ArgumentParser topk size mean stack append range update time format model print Variable cpu AverageMeter size eval softmax calculate_video_results append range enumerate len topk view size t eq join format image_loader append exists get_default_image_loader append enumerate append items format append join format items join format deepcopy get_class_labels list load_annotation_data print modify_frame_indices len load_value_file ceil max range append get_video_names_and_annotations sort listdir items join format deepcopy get_class_labels list load_annotation_data print modify_frame_indices len load_value_file get_end_t ceil max range append get_video_names_and_annotations int min DenseNet DenseNet DenseNet DenseNet append format range named_parameters data isinstance FloatTensor Variable zero_ avg_pool3d cuda cat PreActivationResNet PreActivationResNet PreActivationResNet PreActivationResNet PreActivationResNet PreActivationResNet ResNet ResNet ResNet ResNet ResNet ResNet ResNet ResNeXt ResNeXt ResNeXt WideResNet join read_csv append listdir range len append join listdir update get_labels convert_csv_to_dict read_csv update load_labels convert_csv_to_dict join int print sort append listdir split append range update load_labels convert_csv_to_dict format call mkdir splitext exists | # 3D ResNets for Action Recognition ## Update (2018/2/21) Our paper "Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet?" is accepted to CVPR2018! We update the paper information. ## Update (2018/01/16) We uploaded some of fine-tuned models on UCF-101 and HMDB-51. * ResNeXt-101 fine-tuned on UCF-101 (split1) * ResNeXt-101 (64 frame inputs) fine-tuned on UCF-101 (split1) * ResNeXt-101 fine-tuned on HMDB-51 (split1) * ResNeXt-101 (64 frame inputs) fine-tuned on HMDB-51 (split1) | 1,147 |
XinChenNottingham/SpatiallyConstrainedDCNN | ['semantic segmentation'] | ['A Spatially Constrained Deep Convolutional Neural Network for Nerve Fiber Segmentation in Corneal Confocal Microscopic Images using Inaccurate Annotations'] | example_train.py nets_tf/reg3d.py core/data_provider.py utils/process_methods.py _custom/mymodel.py utils/data_loader.py nets_tf/unet3d.py core/trainer_tf.py core/learning_rate.py utils/eval_saver.py utils/loss_tf.py models/model.py _custom/myprocessor.py nets_tf/unet2d.py utils/util.py core/data_processor.py nets_tf/cls2d.py utils/eval_methods.py SimpleImageProcessor Processor DataProvider StepDecayLearningRate Trainer SimpleTFModel Model _DownSampling _Residual Classification2D Regression3D _DownSampling _Residual UNet2D _DownSampling _Residual _UpSampling UNet3D _DownSampling _Residual _UpSampling load_file true_positive iou false_negative accuracy auc dice_coefficient false_positive specificity precision softmax mean_squared_error recall true_negative cross_entropy sensitivity save_str save_img spatially_constrained_loss balanced_dice_coefficient balance_weight_map mse dice_coefficient balance_cross_entropy cross_entropy zero_mean one_hot min_max rgb2gray dencecrf median_mean resize2d resize3d channel_check dict_to_str combine_2d_imgs_from_tensor gray2rgb dict_concat dict_list2arr dict_append dict_add recale_array MyModel MyProcessor load strip genfromtxt open exp max mean expand_dims square false_positive true_positive false_negative true_positive false_positive true_negative sum sum concatenate format save makedirs softmax_cross_entropy_with_logits balance_weight_map cross_entropy expand_dims reduce_mean square cast float32 reduce_sum softmax sum cast float32 dice_coefficient exp extract_volume_patches ones reshape reduce_max extract_patches float32 reduce_sum shape cast softmax expand_dims argmax len list sum tile min max mean std median std list min shape zeros max range len reshape list shape dot resize zoom create_pairwise_bilateral reshape transpose float32 copy shape prod create_pairwise_gaussian DenseCRF addPairwiseEnergy inference array setUnaryEnergy len append concatenate array get mean array max min array gray2rgb concatenate reshape recale_array append | # SpatiallyConstrainedDCNN Please cite our papr if you find the code useful. N. Zhang, et al. A Spatially Constrained Deep Convolutional Neural Network for Nerve Fiber Segmentation in Corneal Confocal Microscopic Images Using Inaccurate Annotations, International Symposium on Biomedical Imaging, 2020. In press. Paper preprint is available here: https://arxiv.org/abs/2004.09443 A video presentation is available here: https://youtu.be/yOSiodu9mo8 A CCM segmentation and quantification software is available in : http://www.cs.nott.ac.uk/~pszxc/ Contact [email protected] In this code, we only included a few example CCM images for testing the code."example_train.py" is the main function to train the model. | 1,148 |
XingLiangLondon/Image-Similarity-in-Percentage | ['adversarial attack'] | ['DAmageNet: A Universal Adversarial Dataset'] | inception_v3.py vgg16.py inception_resnet_v2.py imagenet_utils.py music_tagger_crnn.py ResNet50_similarity_Xing.py audio_conv_utils.py VGG16_similarity_Xing.py vgg19.py xception.py resnet50.py mobilenet.py preprocess_input librosa_exists decode_predictions preprocess_input decode_predictions InceptionResNetV2 preprocess_input inception_resnet_block conv2d_bn InceptionV3 conv2d_bn preprocess_input preprocess_input MobileNet _depthwise_conv_block _conv_block relu6 DepthwiseConv2D MusicTaggerCRNN identity_block ResNet50 conv_block calculate_similarity_cosine get_feature_vector_fromPIL calculate_similarity_euclidean VGG16 calculate_similarity_cosine get_feature_vector_fromPIL calculate_similarity_euclidean VGG19 preprocess_input Xception __import__ load int logam hstack logamplitude librosa_exists image_dim_ordering melgram melspectrogram expand_dims append sorted zip get_file load open str conv2d_bn _obtain_input_shape get_file Input get_source_inputs warn Model conv2d_bn load_weights inception_resnet_block range _obtain_input_shape get_file concatenate get_source_inputs warn Model conv2d_bn load_weights convert_all_kernels_in_model Input range _obtain_input_shape get_file set_image_data_format _depthwise_conv_block get_source_inputs _conv_block warn Model load_weights Input int int get_file Model load_weights convert_all_kernels_in_model Input str add str add _obtain_input_shape conv_block get_file get_source_inputs warn Model load_weights convert_all_kernels_in_model get_layer convert_dense_weights_data_format identity_block Input shape reshape predict _obtain_input_shape get_file get_source_inputs warn Model load_weights convert_all_kernels_in_model get_layer convert_dense_weights_data_format Input _obtain_input_shape get_file get_source_inputs warn Model load_weights convert_all_kernels_in_model get_layer convert_dense_weights_data_format Input _obtain_input_shape str get_file set_image_data_format get_source_inputs warn add Model load_weights Input range | # Image Similarity in Percentage % ## Siamese network to compare image similarity in percentage - based on Keras deep learning model (VGG16, ResNet50) & cosine similarity, euclidean similarity ### Accuracy The cosine similarity and euclidean similarity are shown in the table. |image1 | image2| cosine similarity (VGG16) |euclidean similarity (VGG16) | cosine similarity (ResNet50) |euclidean similarity (ResNet50) | | --- | --- | --- | --- | --- | --- | |<img src="images/selfiemonkey1.jpg" width=250 >|<img src="images/selfiemonkey2.png" width=250 >|**84.51%**|0.01326|91.28%|0.05116| | --- | --- | --- | --- | --- | --- | |<img src="images/monkey1.jpg" width=250 >|<img src="images/monkey2.jpg" width=250 >|**63.95%**|0.00980|54.98%|0.02871| | --- | --- | --- | --- | --- | --- | | 1,149 |
XiwangLi/object-tracking-SORT-Pytorch | ['multiple object tracking'] | ['Simple Online and Realtime Tracking'] | utils/utils.py utils/datasets.py utils/parse_config.py object_tracking_Sort.py models.py sort.py YOLOLayer create_modules Darknet EmptyLayer detect_image convertMillis KalmanBoxTracker iou Sort convert_bbox_to_z associate_detections_to_trackers convert_x_to_bbox parse_args ImageFolder ListDataset parse_data_config parse_model_config compute_ap build_targets bbox_iou_numpy to_categorical weights_init_normal load_classes bbox_iou non_max_suppression pop int YOLOLayer Sequential ZeroPad2d MaxPool2d add_module Conv2d ModuleList EmptyLayer Upsample append BatchNorm2d LeakyReLU sum enumerate unsqueeze_ Variable Compose min type float round int divmod minimum maximum float sqrt linear_assignment iou concatenate reshape append zeros empty enumerate add_argument ArgumentParser rstrip strip open startswith append split dict strip split open data normal_ __name__ constant_ concatenate size maximum sum range clamp min max minimum eps expand_dims maximum data sort new squeeze size shape unsqueeze cuda unique bbox_iou append max is_cuda cat enumerate int fill_ FloatTensor ones concatenate size range unsqueeze bbox_iou zeros argmax log | # Object Tracking using SORT in Pytorch ## Yolo for object detection in Videos The basic logic for object detection in videos is: 1. extract the frames from video using OpenCV: ```python cap = cv2.VideoCapture(videopath) _, frame = cap.read() frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) ``` 2. apply yolo detection on the extracted frame | 1,150 |
Xking8/Nav_ml-agent-v5 | ['unity'] | ['Unity: A General Platform for Intelligent Agents'] | ml-agents/mlagents/envs/communicator_objects/environment_parameters_proto_pb2.py ml-agents/tests/trainers/test_trainer_controller.py ml-agents/mlagents/trainers/buffer.py ml-agents/mlagents/envs/communicator_objects/unity_rl_initialization_input_pb2.py ml-agents/mlagents/envs/communicator_objects/brain_parameters_proto_pb2.py ml-agents/tests/envs/test_envs.py ml-agents/mlagents/envs/communicator_objects/__init__.py ml-agents/mlagents/envs/rpc_communicator.py ml-agents/mlagents/trainers/ppo/__init__.py gym-unity/gym_unity/envs/__init__.py ml-agents/mlagents/envs/communicator_objects/agent_action_proto_pb2.py ml-agents/mlagents/trainers/learn.py gym-unity/gym_unity/envs/unity_env.py ml-agents/mlagents/trainers/bc/trainer.py ml-agents/mlagents/trainers/policy.py ml-agents/mlagents/envs/communicator_objects/unity_rl_initialization_output_pb2.py ml-agents/tests/trainers/test_curriculum.py ml-agents/mlagents/trainers/meta_curriculum.py ml-agents/mlagents/trainers/curriculum.py ml-agents/mlagents/trainers/ppo/models.py ml-agents/mlagents/envs/communicator_objects/space_type_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_output_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_input_pb2.py gym-unity/gym_unity/__init__.py ml-agents/mlagents/trainers/ppo/policy.py ml-agents/mlagents/envs/communicator_objects/engine_configuration_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/brain_type_proto_pb2.py ml-agents/mlagents/envs/socket_communicator.py gym-unity/setup.py ml-agents/mlagents/trainers/trainer_controller.py ml-agents/mlagents/envs/communicator_objects/agent_info_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_to_external_pb2_grpc.py ml-agents/tests/trainers/test_ppo.py ml-agents/mlagents/envs/brain.py ml-agents/mlagents/trainers/bc/policy.py ml-agents/tests/trainers/test_bc.py ml-agents/tests/mock_communicator.py ml-agents/mlagents/envs/communicator_objects/unity_message_pb2.py ml-agents/mlagents/trainers/models.py ml-agents/mlagents/trainers/__init__.py ml-agents/mlagents/envs/communicator_objects/resolution_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_to_external_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_rl_input_pb2.py ml-agents/tests/trainers/test_buffer.py ml-agents/mlagents/trainers/trainer.py ml-agents/mlagents/envs/communicator.py ml-agents/setup.py ml-agents/mlagents/envs/communicator_objects/unity_rl_output_pb2.py ml-agents/mlagents/envs/__init__.py ml-agents/mlagents/trainers/bc/__init__.py gym-unity/tests/test_gym.py ml-agents/mlagents/envs/exception.py ml-agents/mlagents/envs/environment.py ml-agents/mlagents/trainers/bc/models.py ml-agents/mlagents/envs/communicator_objects/command_proto_pb2.py ml-agents/mlagents/trainers/exception.py ml-agents/tests/trainers/test_meta_curriculum.py ml-agents/mlagents/trainers/ppo/trainer.py ml-agents/mlagents/envs/communicator_objects/header_pb2.py UnityGymException UnityEnv test_gym_wrapper test_multi_agent BrainInfo BrainParameters Communicator UnityEnvironment UnityException UnityTimeOutException UnityEnvironmentException UnityActionException RpcCommunicator UnityToExternalServicerImplementation SocketCommunicator UnityToExternalServicer UnityToExternalStub add_UnityToExternalServicer_to_server BufferException Buffer Curriculum CurriculumError MetaCurriculumError TrainerError main run_training MetaCurriculum LearningModel Policy UnityPolicyException log_histogram UnityTrainerException Trainer TrainerController BehavioralCloningModel BCPolicy BehavioralCloningTrainer PPOModel PPOPolicy PPOTrainer get_gae discount_rewards MockCommunicator test_initialization test_reset test_close test_step test_handles_bad_filename test_dc_bc_model test_cc_bc_model test_visual_cc_bc_model test_bc_policy_evaluate dummy_config test_visual_dc_bc_model assert_array test_buffer location default_reset_parameters test_init_curriculum_bad_curriculum_raises_error test_init_curriculum_happy_path test_increment_lesson test_get_config test_init_meta_curriculum_happy_path test_increment_lessons_with_reward_buff_sizes default_reset_parameters MetaCurriculumTest test_increment_lessons measure_vals reward_buff_sizes test_set_all_curriculums_to_lesson_num test_get_config test_set_lesson_nums test_init_meta_curriculum_bad_curriculum_folder_raises_error more_reset_parameters test_rl_functions test_ppo_model_dc_vector_curio test_ppo_model_dc_vector_rnn test_ppo_model_cc_vector_rnn test_ppo_policy_evaluate test_ppo_model_cc_visual dummy_config test_ppo_model_dc_vector test_ppo_model_dc_visual test_ppo_model_cc_visual_curio test_ppo_model_dc_visual_curio test_ppo_model_cc_vector_curio test_ppo_model_cc_vector test_initialization test_initialize_trainers dummy_bc_config dummy_bad_config dummy_config dummy_start test_load_config sample step MockCommunicator UnityEnv step MockCommunicator UnityEnv method_handlers_generic_handler add_generic_rpc_handlers start_learning int str TrainerController int Process getLogger print start info append randint docopt range max int flush min HistogramProto shape prod histogram add_summary append float sum array Summary size range reversed zeros_like asarray tolist discount_rewards UnityEnvironment close MockCommunicator UnityEnvironment close MockCommunicator reset str local_done print agents step close reset MockCommunicator UnityEnvironment len UnityEnvironment close MockCommunicator reset_default_graph close reset_default_graph reset_default_graph reset_default_graph reset_default_graph flatten list range len get_batch Buffer assert_array append_update_buffer make_mini_batch append reset_agent array range Curriculum Curriculum Curriculum MetaCurriculum assert_has_calls MetaCurriculumTest increment_lessons assert_called_with MetaCurriculumTest increment_lessons assert_called_with assert_not_called MetaCurriculumTest set_all_curriculums_to_lesson_num MetaCurriculumTest dict update MetaCurriculumTest reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph assert_array_almost_equal array discount_rewards TrainerController | <img src="docs/images/unity-wide.png" align="middle" width="3000"/> <img src="docs/images/image-banner.png" align="middle" width="3000"/> # Unity ML-Agents Toolkit (Beta) **The Unity Machine Learning Agents Toolkit** (ML-Agents) is an open-source Unity plugin that enables games and simulations to serve as environments for training intelligent agents. Agents can be trained using reinforcement learning, imitation learning, neuroevolution, or other machine learning methods through a simple-to-use Python API. We also provide implementations (based on TensorFlow) of state-of-the-art algorithms to enable game developers and hobbyists to easily train intelligent agents for 2D, 3D and VR/AR games. These trained agents can be | 1,151 |
XuRen20/deepface | ['face swapping'] | ['DeepFaceLab: Integrated, flexible and extensible face-swapping framework'] | models/Model_XSeg/Model.py mainscripts/VideoEd.py merger/MergerScreen/MergerScreen.py mainscripts/dev_misc.py merger/InteractiveMergerSubprocessor.py samplelib/SampleGeneratorImageTemporal.py core/leras/models/__init__.py samplelib/Sample.py core/joblib/__init__.py core/imagelib/estimate_sharpness.py core/joblib/MPFunc.py core/leras/layers/DenseNorm.py core/leras/nn.py samplelib/SampleProcessor.py core/leras/archis/__init__.py core/leras/layers/Conv2D.py core/stdex.py core/imagelib/text.py core/leras/models/PatchDiscriminator.py core/leras/__init__.py models/Model_XSeg/__init__.py core/imagelib/equalize_and_stack_square.py samplelib/__init__.py core/imagelib/sd/draw.py core/randomex.py core/joblib/MPClassFuncOnDemand.py core/leras/optimizers/__init__.py core/leras/initializers/__init__.py mainscripts/Extractor.py mainscripts/Sorter.py core/mathlib/__init__.py core/leras/optimizers/OptimizerBase.py core/imagelib/draw.py samplelib/SampleGeneratorBase.py core/leras/layers/BatchNorm2D.py core/leras/layers/ScaleAdd.py XSegEditor/QCursorDB.py models/__init__.py core/joblib/SubprocessGenerator.py DFLIMG/DFLIMG.py core/leras/layers/__init__.py models/ModelBase.py facelib/FaceEnhancer.py core/joblib/ThisThreadGenerator.py XSegEditor/QIconDB.py core/imagelib/blursharpen.py merger/MergeMasked.py core/cv2ex.py models/Model_Quick96/__init__.py main.py core/leras/archis/ArchiBase.py localization/localization.py facelib/LandmarksProcessor.py core/leras/layers/Dense.py models/Model_SAEHD/__init__.py mainscripts/Merger.py mainscripts/XSegUtil.py core/imagelib/filters.py XSegEditor/QStringDB.py core/leras/initializers/CA.py core/imagelib/__init__.py core/leras/models/ModelBase.py samplelib/SampleGeneratorFaceTemporal.py core/leras/layers/InstanceNorm2D.py facelib/S3FDExtractor.py samplelib/SampleLoader.py samplelib/SampleGeneratorFaceXSeg.py core/qtex/qtex.py DFLIMG/__init__.py core/leras/optimizers/RMSprop.py core/structex.py core/leras/layers/BlurPool.py facelib/__init__.py core/leras/archis/DeepFakeArchi.py core/imagelib/SegIEPolys.py mainscripts/FacesetEnhancer.py merger/FrameInfo.py models/Model_SAEHD/Model.py core/mplib/__init__.py samplelib/SampleGeneratorFacePerson.py merger/MergerScreen/__init__.py core/imagelib/common.py core/imagelib/sd/__init__.py core/leras/layers/AdaIN.py core/leras/layers/LayerBase.py facelib/FANExtractor.py core/leras/models/XSeg.py samplelib/SampleGeneratorFace.py merger/MergeAvatar.py core/pathex.py core/leras/layers/Conv2DTranspose.py core/qtex/QXMainWindow.py merger/__init__.py core/imagelib/morph.py core/joblib/SubprocessorBase.py mainscripts/Trainer.py samplelib/PackedFaceset.py core/mathlib/umeyama.py mainscripts/Util.py core/interact/__init__.py core/leras/layers/FRNorm2D.py core/leras/layers/Saveable.py core/leras/layers/TLU.py facelib/XSegNet.py core/mplib/MPSharedList.py core/leras/models/CodeDiscriminator.py facelib/FaceType.py XSegEditor/XSegEditor.py merger/MergerConfig.py core/leras/ops/__init__.py core/interact/interact.py core/leras/device.py core/imagelib/sd/calc.py core/imagelib/reduce_colors.py samplelib/SampleGeneratorImage.py localization/__init__.py DFLIMG/DFLJPG.py core/qtex/__init__.py core/qtex/QSubprocessor.py core/qtex/QXIconButton.py samplelib/SampleGeneratorFaceCelebAMaskHQ.py core/imagelib/warp.py core/osex.py core/imagelib/color_transfer.py models/Model_Quick96/Model.py process_dev_test process_merge process_videoed_cut_video process_train process_faceset_enhancer process_xsegeditor process_xsegapply process_xsegremove process_xsegremovelabels process_videoed_video_from_sequence process_xsegfetch process_util process_extract fixPathAction process_videoed_extract_video process_sort bad_args process_videoed_denoise_image_sequence cv2_imwrite cv2_imread set_process_dpi_aware get_screen_size set_process_lowest_prio get_image_paths move_all_files write_bytes_safe get_first_file_by_stem get_image_unique_filestem_paths get_all_dir_names get_file_paths delete_all_files scantree get_paths get_all_dir_names_startswith random_normal suppress_stdout_stderr struct_unpack LinearMotionBlur blursharpen _scale_array color_transfer color_transfer_idt color_transfer_mkl reinhard_color_transfer lab_image_stats linear_color_transfer channel_hist_match color_transfer_mix color_transfer_sot color_hist_match overlay_alpha_image cut_odd_image normalize_channels draw_polygon draw_rect equalize_and_stack_square compute _calculate_sharpness_metric marziliano_method get_block_contrast _simple_thinning estimate_sharpness is_edge_block sobel apply_random_motion_blur apply_random_rgb_levels apply_random_hsv_shift apply_random_bilinear_resize apply_random_gaussian_blur morphTriangle morph_by_points applyAffineTransform reduce_colors SegIEPolys SegIEPolyType SegIEPoly get_text_image draw_text_lines draw_text _get_pil_font get_draw_text_lines warp_by_params gen_warp_params dist_to_edges random_circle_faded circle_faded InteractBase InteractColab InteractDesktop MPClassFuncOnDemand MPFunc SubprocessGenerator Subprocessor ThisThreadGenerator Devices Device nn ArchiBase DeepFakeArchi CAInitializerSubprocessor initializers AdaIN BatchNorm2D BlurPool Conv2D Conv2DTranspose Dense DenseNorm FRNorm2D InstanceNorm2D LayerBase Saveable ScaleAdd TLU CodeDiscriminator ModelBase PatchDiscriminator XSeg dssim concat average_gv_list resize2d_bilinear flatten rgb_to_lab space_to_depth tf_gradients random_binomial style_loss gelu init_weights tf_get_value upsample2d reshape_4D batch_set_value max_pool average_tensor_list gaussian_blur depth_to_space OptimizerBase RMSprop umeyama get_power_of_two rotationMatrixToEulerAngles polygon_area ArrayFillerSubprocessor MPSharedList IndexHost Index2DHost ListHost DictHostCli DictHost QSubprocessor QDarkPalette QActionEx QSize_to_np QImage_from_np QImage_to_np QPixmap_from_np QPoint_to_np QPoint_from_np QXIconButton QXMainWindow DFLIMG DFLJPG FaceEnhancer FaceType FANExtractor blur_image_hull_mask mirror_landmarks get_face_struct_mask estimate_pitch_yaw_roll convert_98_to_68 expand_eyebrows get_rect_from_landmarks get_transform_mat draw_rect_landmarks get_cmask transform_points estimate_averaged_yaw calc_face_pitch alpha_to_color get_image_eye_mask draw_landmarks get_image_hull_mask S3FDExtractor XSegNet dev_test_68 dev_test1 dev_resave_pngs extract_vggface2_dataset extract_umd_csv dev_segmented_trash process_folder FacesetEnhancerSubprocessor extract_video video_from_sequence denoise_image_sequence cut_video remove_xseg remove_xseg_labels apply_xseg fetch_xseg FrameInfo InteractiveMergerSubprocessor MergeFaceAvatar process_frame_info MergeMasked MergeMaskedFace MergerConfigMasked MergerConfigFaceAvatar MergerConfig ScreenManager ScreenAssets Screen ModelBase import_model QModel SAEHDModel XSegModel PackedFaceset Sample SampleType SampleGeneratorBase SampleGeneratorFace SampleGeneratorFaceCelebAMaskHQ MaskType SampleGeneratorFacePerson SampleGeneratorFaceTemporal SampleGeneratorFaceXSeg SegmentedSampleFilterSubprocessor SampleGeneratorImage SampleGeneratorImageTemporal FaceSamplesLoaderSubprocessor SampleLoader SampleProcessor QCursorDB QIconDB QStringDB ImagePreviewSequenceBar QUIConfig QCanvasOperator LoaderQSubprocessor CanvasConfig OpMode QCanvas DragType ViewLock ColorScheme QCanvasControlsLeftBar start QCanvasControlsRightBar MainWindow PTEditMode main set_process_lowest_prio main set_process_lowest_prio unpack_faceset pack save_faceset_metadata log_info restore_faceset_metadata_folder pack_faceset save_faceset_metadata_folder restore_faceset_metadata Path input_dir unpack recover_original_aligned_filename set_process_lowest_prio add_landmarks_debug_images main set_process_lowest_prio main set_process_lowest_prio output_ext fps extract_video output_dir input_file set_process_lowest_prio audio_track_id from_time bitrate to_time cut_video input_file set_process_lowest_prio factor denoise_image_sequence set_process_lowest_prio input_dir video_from_sequence set_process_lowest_prio Path set_process_lowest_prio input_dir process_folder dev_test set_process_lowest_prio input_dir start Path set_process_lowest_prio input_dir model_dir apply_xseg Path input_dir set_process_lowest_prio Path remove_xseg set_process_lowest_prio input_dir remove_xseg_labels Path set_process_lowest_prio input_dir Path fetch_xseg set_process_lowest_prio input_dir print_help exit loader_func asarray bytearray imencode suffix nice SetPriorityClass HANDLE GetCurrentProcess SetProcessDPIAware user32 write_bytes parent name unlink rename exists is_dir scandir str list scandir any Path scantree exists append remove get_image_paths name stem set add verbose_print_func Path exists Path exists Path exists str list lower scandir Path startswith append exists str sorted list path lower scandir Path exists name Path rename get_file_paths unlink Path get_file_paths normal empty prod range calcsize warpAffine ones getRotationMatrix2D zeros sum medianBlur addWeighted ones zeros GaussianBlur max dtype reshape astype copy argsort shape bilateralFilter fill empty range eps T clip reshape eig dot shape sqrt cov mean diag T reshape min astype float32 empty_like solve dot shape histogram interp max range _scale_array uint8 astype float32 merge lab_image_stats COLOR_LAB2BGR cvtColor split T reshape transpose mean dot eigh eye cholesky split min max float64 astype shape unique interp ravel dtype astype shape channel_hist_match range uint8 astype float32 COLOR_BGR2LAB color_transfer_sot COLOR_LAB2BGR cvtColor uint8 color_transfer_idt color_transfer_mkl astype float32 reinhard_color_transfer linear_color_transfer color_transfer_sot clip shape repeat len shape shape range tuple line range len draw_polygon concatenate shape resize expand_dims max enumerate T convolve square mean sqrt array shape zeros float64 marziliano_method astype canny sobel gradient atan2 shape any zeros round range int exp slice get_block_contrast shape flipud round zeros is_edge_block rot90 range cvtColor COLOR_BGR2GRAY rand random clip array COLOR_HSV2BGR random merge COLOR_BGR2HSV randint clip cvtColor split LinearMotionBlur randint random randint GaussianBlur random int rand random shape resize INTER_LINEAR float32 getAffineTransform float32 fillConvexPoly shape boundingRect int32 applyAffineTransform zeros expand_dims array shape morphTriangle zeros simplices fromarray uint8 convert astype COLOR_RGB2BGR array cvtColor truetype asarray Draw get_default_ttf_font_name concatenate text new _get_pil_font shape clip draw_text range len draw_text_lines zeros shape T random astype copy float32 getRotationMatrix2D dict uniform linspace random_normal warpAffine remap norm clip einsum concatenate norm reshape empty abs clip max random randint initializer inputs append batch_set_value run gradients expand_dims __enter__ __exit__ enumerate reduce_mean __enter__ __exit__ concat pow tanh sqrt pi as_list reshape tile transpose value resize transpose reshape transpose randint float32 pad make_kernel tile depthwise_conv2d gaussian_blur dtype constant arange reshape float32 square reduce_mean reducer cast softmax tile as_list reshape transpose as_list reshape transpose constant reshape multiply matmul cast svd T ones matrix_rank mean dot eye sum diag sqrt atan2 shape Format_Grayscale8 Format_BGR888 Format_ARGB32 height reshape convertToFormat width constBits setsize range squeeze invertAffineTransform shape transform expand_dims get norm getAffineTransform polygon_area astype float32 transform_points sqrt estimate_averaged_yaw array transform_points FULL_NO_ALIGN get_transform_mat float32 array copy concatenate expand_eyebrows fillConvexPoly convexHull zeros int getStructuringElement astype fillConvexPoly MORPH_ELLIPSE convexHull dilate zeros GaussianBlur shape zeros concatenate process copy blend alpha_to_color zeros get_image_hull_mask gdf max clip int blur getStructuringElement min erode argwhere MORPH_ELLIPSE expand_dims copy draw_landmarks zeros expand_eyebrows concatenate polylines tuple shape get_image_hull_mask array circle get_transform_mat draw_rect transform_points draw_polygon draw_landmarks array array rotationMatrixToEulerAngles concatenate astype float32 pi solvePnP zeros array clip get pop get_image_paths parent log_info name stem progress_bar_generator get_all_dir_names Path mkdir run fromString split cv2_imread Path normalize_channels exists input_bool str log_info name stem append get_image_paths get_rect_from_landmarks unlink mkdir parent cv2_imwrite progress_bar_generator read_text split get str get_image_paths parent log_info name len unlink Path mkdir split log_err run range exists fromString input_bool get_image_paths progress_bar_generator get_all_dir_names Path x get_image_paths cv2_imwrite progress_bar_generator cv2_imread Path get_image_paths parent name stem rename Path mkdir append input_bool join get_image_paths log_info parent name copy unlink rmtree mkdir run update str get_image_paths parent input_str stem output get_first_file_by_stem unlink input_int mkdir Path log_err input run str suffix parent input_str stem overwrite_output input_int log_err Path input max run update str suffix parent progress_bar_generator output input_int rename log_err Path run clip enumerate suffix input_str wait input_int Path max input_bool str stem input update run_async get_image_paths close mkdir parent overwrite_output get_first_file_by_stem log_err probe load extract initialize get_image_paths log_info set_xseg_mask progress_bar_generator astype float32 get_resolution ask_choose_device shape XSegNet resize save load str get_image_paths log_info parent name has_polys progress_bar_generator copy get_seg_ie_polys mkdir load get_image_paths log_info set_xseg_mask input_str progress_bar_generator has_xseg_mask save load get_image_paths log_info input_str has_seg_ie_polys progress_bar_generator save set_seg_ie_polys warpAffine get_transform_mat astype float32 cv2_imread normalize_channels filename clip sharpen_func sharpen_mode concatenate predictor_func add_source_image process_frame_info temporal_face_count append range sharpen_amount predictor_func color_transfer_mkl motion_power bicubic_degrade_power motion_blur_power linear_color_transfer color_transfer_mix boundingRect resize reduce_colors max clip face_enhancer_func hist_match_threshold medianBlur super_resolution_power WARP_INVERSE_MAP ones LinearMotionBlur shape pad blur_mask_modifier image_denoise_power masked_hist_match blursharpen range color_hist_match BORDER_TRANSPARENT warpAffine sharpen_mode xseg_256_extract_func seamlessClone color_transfer_idt astype copy reinhard_color_transfer empty_like motion_deg INTER_CUBIC MORPH_ELLIPSE color_transfer_sot dilate GaussianBlur get_image_hull_mask NORMAL_CLONE uint8 int erode_mask_modifier getStructuringElement get_transform_mat float32 nan_to_num erode argwhere blursharpen_amount color_degrade_power landmarks_list concatenate astype float32 cv2_imread shape normalize_channels MergeMaskedFace filepath clip enumerate str parent cv2_imread locals __import__ globals dict setApplicationName setPalette QDarkPalette Path show str initialize log_info setWindowIcon addApplicationFont AA_EnableHighDpiScaling setStyle setFont gettempdir setAttribute QApplication path_contains app_icon MainWindow exec_ parent QFont raise_ AA_UseHighDpiPixmaps | <table align="center" border="0"><tr><td align="center" width="9999"> # DeepFaceLab <a href="https://arxiv.org/abs/2005.05535"> <img src="https://static.arxiv.org/static/browse/0.3.0/images/icons/favicon.ico" width=14></img> https://arxiv.org/abs/2005.05535</a> ### the leading software for creating deepfakes <img src="doc/DFL_welcome.png" align="center"> </td></tr> <tr><td align="center" width="9999"> <p align="center"> | 1,152 |
XuezheMax/apollo | ['stochastic optimization'] | ['Apollo: An Adaptive Parameter-wise Diagonal Quasi-Newton Method for Nonconvex Stochastic Optimization'] | classification/imagenet.py optim/__init__.py classification/cifar.py optim/adabelief.py utils/utils.py utils/__init__.py optim/lr_scheduler.py language_model/model_word_ada/adaptive.py optim/adahessian.py language_model/train_1bw.py language_model/model_word_ada/lm.py optim/apollo.py language_model/pre_word_ada/encode_data2folder.py optim/radamw.py language_model/model_word_ada/dataset.py init_dataloader setup logging ResNet eval main parse_args train get_optimizer setup init_dataloader logging single_process_main is_distributed is_master ErrorHandler eval slurm init_distributed distributed main parse_args train get_optimizer run get_optimizer evaluate logging AdaptiveLogSoftmaxWithLoss LargeDataset EvalDataset LM encode_dataset2file encode_dataset AdaBelief AdaHessian Apollo MultiStepLR _LRScheduler ExponentialScheduler InverseSquareRootScheduler RAdamW clip_grad_norm_ clip_param_grad_norm_ AverageMeter accuracy add_argument ArgumentParser print flush RAdamW format Apollo ExponentialLR AdamW AdaBelief SGD MultiStepLR AdaHessian CosineAnnealingLR logging depth dataset cuda log run seed str open set_device to CIFAR100 format ResNet CIFAR10 model_path is_available manual_seed join makedirs data_path len DataLoader data model logging zero_grad device cuda log count to update format size avg item flush enumerate time criterion backward AverageMeter write accuracy empty_cache step len data update format criterion model logging size AverageMeter accuracy avg item device empty_cache to cuda log rebound logging weight_decay_type opt_h1 weight_decay last_lr opt log get_optimizer decay_rate open run setup step lr_decay append sum CrossEntropyLoss range eps format dump milestone lr opt_h2 model_path join init_dataloader warmup_updates parameters train epochs init_lr print set_device format init_process_group is_master ImageFolder resnet34 rank resnet18 recover resnext50_32x4d DistributedSampler is_distributed is_master rank rebound logging weight_decay_type is_master opt_h1 weight_decay last_lr save opt log get_optimizer decay_rate open run setup step lr_decay rank load_state_dict append sum CrossEntropyLoss range recover eps checkpoint_name format dump milestone is_distributed lr opt_h2 model_path load join init_dataloader warmup_updates print set_epoch parameters init_distributed train epochs local_rank init_lr reduce is_master div rank world_size is_distributed rank module is_distributed pop int str format batch_size print check_output single_process_main rank environ vars local_rank pop str Process pid join batch_size add_child get_context SimpleQueue single_process_main ErrorHandler start environ vars range append parse_args distributed slurm single_process_main Adam exp logging reshape size eval lm_model device to log get_tqdm list walk startswith tqdm list map tqdm append walk split topk size t eq mul_ expand_as append sum max list norm isinstance filter mul_ append Tensor float max | [](https://opensource.org/licenses/Apache-2.0) <h1 align="center">Apollo</h1> <h5 align="center">Apollo: An Adaptive Parameter-wise Diagonal Quasi-Newton Method for Nonconvex Stochastic Optimization</h5> This is the Pytorch implementation for [Apollo: An Adaptive Parameter-wise Diagonal Quasi-Newton Method for Nonconvex Stochastic Optimization](https://arxiv.org/abs/2009.13586) ## Table of Contents - [Requirements](#requirements) - [Installation](#installation) - [Notes](#notes) - [Experiments](#experimental-results) - [Discussion](#discussion) | 1,153 |
YH0517/CLCI_Net | ['medical image segmentation', 'lesion segmentation', 'semantic segmentation'] | ['CLCI-Net: Cross-Level fusion and Context Inference Networks for Lesion Segmentation of Chronic Stroke'] | custom_layer.py CLCI_Net.py dice_coef CLF_ASPP CLCI_Net dice_coef_loss flatten sum conv_lstm conv_1_init CLF_ASPP Model conv_2_init Input concat_pool pow dilate_conv conv_1_init | # CLCI-Net
[CLCI-Net: Cross-Level Fusion and Context Inference Networks for Lesion Segmentation of Chronic Stroke (MICCAI 2019)](https://link.springer.com/chapter/10.1007/978-3-030-32248-9_30)
# 作者
Hao Yang, Weijian Huang, Kehan Qi, Cheng Li, Xinfeng Liu, Meiyun Wang, Hairong Zheng, and Shanshan Wang
# 项目简介
## 1. 功能
采用CLCI-Net实现对ATLAS数据集的图像分割
## 2. 性能
|Dice |Precision|Recall|VOE|RVD|
|-----|-----|-----|-----|-----|
| 1,154 |
YafeiWu/DIEN | ['click through rate prediction'] | ['Deep Interest Network for Click-Through Rate Prediction', 'Deep Interest Evolution Network for Click-Through Rate Prediction'] | script/model.py script/split_by_user.py script/utils.py script/rnn.py script/generate_voc.py script/local_aggretor.py script/shuffle.py script/Dice.py script/process_data.py script/data_iterator.py script/fix_iteminfo.py script/train.py fopen load_dict unicode_to_utf8 DataIterator dice parametric_relu Model_DIN_V2_Gru_att_Gru Model_DIN_V2_Gru_Gru_att Model Model_DNN Model_DIN_V2_Gru_Vec_attGru Model_WideDeep Model_DIN_V2_Gru_Vec_attGru_Neg Model_DIN_V2_Gru_QA_attGru Model_PNN Model_DIN split_test process_meta manual_join process_reviews dynamic_rnn _dynamic_rnn_loop _best_effort_input_batch_size raw_rnn static_rnn _infer_state_dtype static_bidirectional_rnn bidirectional_dynamic_rnn static_state_saving_rnn _rnn_step _transpose_batch_time _reverse_seq main eval prepare_data train test prelu VecAttGRUCell din_fcn_attention QAAttGRUCell self_all_attention din_attention self_attention calc_auc din_fcn_shine attention endswith reshape square sigmoid sqrt reduce_mean abs relu get_variable print eval open print str eval open join sorted print strip len tqdm open append randint split print strip seek open get_shape concatenate transpose concat rank set_shape shape value all is_sequence get_shape _copy_some_through call_cell assert_same_structure flatten set_shape zip pack_sequence_as cond get_shape tuple merge_with unknown_shape stack set_shape reverse_sequence unstack zip append _reverse flatten tuple identity to_int32 value constant output_size _best_effort_input_batch_size tuple while_loop reduce_max _concat flatten shape set_shape zip pack_sequence_as reduce_min state_size is_sequence static_rnn state flatten pack_sequence_as state_size flatten tuple pack_sequence_as _reverse_seq remove seek print strip close readlines shuffle mkstemp realpath split TemporaryFile open astype array zip append max enumerate len calculate prepare_data tolist save zip append calc_auc str GPUOptions str GPUOptions append sorted ones_like isinstance Variable reshape concat transpose where expand_dims shape softmax random_normal equal tensordot dense ones_like isinstance print reshape concat transpose where matmul shape softmax tile expand_dims equal dense ones_like prelu isinstance reshape concat transpose where matmul shape softmax tile expand_dims equal while_loop transpose TensorArray stack expand_dims while_loop transpose TensorArray stack expand_dims dense ones_like prelu isinstance reshape concat transpose shape tile equal | # Deep Interest Evolution Network for Click-Through Rate Prediction https://arxiv.org/abs/1809.03672 ## prepare data ### method 1 You can get the data from amazon website and process it using the script ``` sh prepare_data.sh ``` ### method 2 (recommended) Because getting and processing the data is time consuming,so we had processed it and upload it for you. You can unzip it to use directly. | 1,155 |
Yale-LILY/SummEval | ['text summarization'] | ['SummEval: Re-evaluating Summarization Evaluation'] | evaluation/summ_eval/sentence_transformers/readers/NLIDataReader.py evaluation/summ_eval/mover_score_metric.py evaluation/summ_eval/s3_metric.py evaluation/summ_eval/data_stats_metric.py evaluation/summ_eval/sentence_transformers/models/__init__.py evaluation/summ_eval/sentence_transformers/models/BoW.py evaluation/summ_eval/sentence_transformers/readers/__init__.py evaluation/summ_eval/sentence_transformers/losses/BatchHardTripletLoss.py evaluation/summ_eval/sentence_transformers/losses/SoftmaxLoss.py evaluation/summ_eval/sentence_transformers/models/BERT.py evaluation/summ_eval/blanc_metric.py evaluation/tests/test_bert_score.py evaluation/tests/test_chrfpp.py evaluation/summ_eval/cider_utils.py data_processing/pair_data.py evaluation/summ_eval/sentence_transformers/datasets.py evaluation/summ_eval/sentence_transformers/evaluation/__init__.py evaluation/summ_eval/sentence_transformers/evaluation/BinaryEmbeddingSimilarityEvaluator.py evaluation/summ_eval/sentence_transformers/models/tokenizer/__init__.py evaluation/summ_eval/sentence_transformers/models/XLNet.py evaluation/summ_eval/sentence_transformers/readers/TripletReader.py evaluation/summ_eval/sentence_transformers/evaluation/SimilarityFunction.py evaluation/tests/test_mover_score.py evaluation/summ_eval/sentence_transformers/evaluation/SequentialEvaluator.py evaluation/summ_eval/cider_metric.py evaluation/summ_eval/sentence_transformers/util.py evaluation/summ_eval/rouge_metric.py evaluation/summ_eval/sentence_transformers/losses/CosineSimilarityLoss.py evaluation/summ_eval/sentence_transformers/evaluation/LabelAccuracyEvaluator.py evaluation/summ_eval/syntactic_metric.py evaluation/tests/test_meteor.py evaluation/summ_eval/sentence_transformers/LoggingHandler.py evaluation/summ_eval/data_stats_utils.py evaluation/tests/test_rouge.py evaluation/tests/test_cider.py evaluation/summ_eval/sentence_transformers/evaluation/EmbeddingSimilarityEvaluator.py evaluation/summ_eval/metric.py evaluation/tests/test_sentence_movers.py evaluation/summ_eval/sentence_movers_utils.py evaluation/summ_eval/sentence_transformers/models/tokenizer/WordTokenizer.py evaluation/summ_eval/chrfpp_metric.py evaluation/tests/test_data_stats.py evaluation/summ_eval/meteor_metric.py evaluation/summ_eval/sentence_transformers/losses/test_batch_hard_triplet_loss.py evaluation/tests/test_summaqa.py evaluation/summ_eval/syntactic_utils.py evaluation/summ_eval/sentence_transformers/models/WordWeights.py evaluation/summ_eval/sentence_transformers/evaluation/SentenceEvaluator.py evaluation/summ_eval/sentence_transformers/models/tokenizer/PhraseTokenizer.py evaluation/summ_eval/sentence_transformers/models/WordEmbeddings.py evaluation/summ_eval/sentence_transformers/losses/TripletLoss.py evaluation/summ_eval/sentence_movers_metric.py evaluation/summ_eval/sentence_transformers/__init__.py evaluation/summ_eval/sentence_transformers/losses/MultipleNegativesRankingLoss.py evaluation/tests/test_syntactic.py evaluation/summ_eval/calc_scores.py evaluation/summ_eval/sentence_transformers/models/tokenizer/WhitespaceTokenizer.py evaluation/summ_eval/sentence_transformers/models/LSTM.py evaluation/tests/test_bleu.py evaluation/summ_eval/sentence_transformers/models/CNN.py evaluation/summ_eval/supert_metric.py evaluation/summ_eval/sentence_transformers/models/AbstractModel.py evaluation/summ_eval/bleu_metric.py evaluation/summ_eval/s3_utils.py evaluation/summ_eval/sentence_transformers/data_samplers.py evaluation/summ_eval/rouge_we_metric.py evaluation/summ_eval/summa_qa_metric.py evaluation/summ_eval/sentence_transformers/models/Pooling.py evaluation/summ_eval/test_util.py evaluation/summ_eval/sentence_transformers/readers/InputExample.py evaluation/setup.py evaluation/summ_eval/sentence_transformers/SentenceTransformer.py evaluation/summ_eval/summa_qa_utils.py evaluation/summ_eval/bert_score_metric.py evaluation/summ_eval/sentence_transformers/models/RoBERTa.py evaluation/summ_eval/sentence_transformers/readers/LabelSentenceReader.py evaluation/tests/test_rouge_we.py evaluation/summ_eval/supert_utils.py evaluation/summ_eval/sentence_transformers/readers/STSDataReader.py evaluation/summ_eval/sentence_transformers/models/Dense.py evaluation/summ_eval/sentence_transformers/evaluation/TripletEvaluator.py evaluation/summ_eval/sentence_transformers/losses/__init__.py parse_story_file annotation_pairing output_pairing BertScoreMetric BlancMetric BleuMetric cli_main ChrfppMetric CiderMetric precook CiderScorer cook_test cook_refs DataStatsMetric find_ngrams Fragments normalize MeteorMetric enc dec Metric MoverScoreMetric RougeMetric RougeWeMetric S3Metric get_all_content_words_stem is_ngram_content load_embeddings _get_embedding S3 _counter_overlap _soft_overlap JS_Divergence compute_tf _convert_to_numpy extract_feature load_model rouge_n_we rouge_n _safe_f1 JS_eval _find_closest _safe_divide compute_average_freq _ngram_counts compute_word_freq pre_process_summary KL_Divergence normalize_word _ngrams _has_embedding get_all_content_words pre_process_summary_stem _ngram_count SentenceMoversMetric get_embeddings calc_smd tokenize_texts get_sim print_score get_weights get_sent_embedding SummaQAMetric QA_Bert normalize_answer evaluate_corpus f1_score QG_masked QA_Metric SupertMetric get_doc_simtop get_sbert_score get_other_weights graph_centrality_weight get_ref_vecs get_token_vecs normaliseList graph_weights build_pseudo_ref get_subgraph parse_refs get_idf_weights get_sim_metric kill_stopwords get_weights get_global_graph_weights get_top_sim_weights parse_documents get_top_weights parse_docs get_all_token_vecs get_human_score get_indep_cluster_weights get_global_cluster_weights get_indep_graph_weights SyntacticMetric get_stats division SentencesDataset SentenceLabelDataset LabelSampler LoggingHandler SentenceTransformer http_get fullname import_from_string batch_to_device BinaryEmbeddingSimilarityEvaluator EmbeddingSimilarityEvaluator LabelAccuracyEvaluator SentenceEvaluator SequentialEvaluator SimilarityFunction TripletEvaluator BatchHardTripletLoss CosineSimilarityLoss MultipleNegativesRankingLoss SoftmaxLoss test_batch_hard_triplet_loss test_anchor_negative_triplet_mask pairwise_distance_np test_pairwise_distances_are_positive test_triplet_mask test_batch_all_triplet_loss test_anchor_positive_triplet_mask test_simple_batch_all_triplet_loss test_pairwise_distances TripletDistanceMetric TripletLoss AbstractModel BERT BoW CNN Dense LSTM Pooling RoBERTa WordEmbeddings WordWeights XLNet PhraseTokenizer WhitespaceTokenizer WordTokenizer InputExample LabelSentenceReader NLIDataReader STSDataReader TripletReader TestScore Test2Score TestScore TestScore TestScore TestScore TestScore TestScore TestScore TestScore TestScore TestScore join filter join replace print dirname data_annotations story_files makedirs join listdir replace print tqdm model_outputs dirname aligned_data story_files makedirs RougeWeMetric MoverScoreMetric BlancMetric ArgumentParser SupertMetric RegexpTokenizer list defaultdict S3Metric add append parse_args SummaQAMetric BleuMetric range evaluate_batch update replace parse_config_file config_file set CiderMetric MeteorMetric SentenceMoversMetric zip load items join isinstance aggregate BertScoreMetric print add_argument SnowballStemmer SyntacticMetric dict RougeMetric DataStatsMetric ChrfppMetric len defaultdict tuple split range len _namedtuple map extend isinstance split get list dict compute_word_freq get_all_content_words len isnan items get keys set compute_average_freq isnan KL_Divergence pre_process_summary list isinstance extend map tokenize split get_all_content_words_stem deque append iteritems _safe_divide pre_process_summary_stem _ngram_counts _ngram_count len append append iteritems _get_embedding sorted iteritems _find_closest pre_process_summary_stem _ngram_counts _ngram_count len load join open sorted extract_feature load_model array append keys rouge_n_we JS_eval rouge_n get_embeddings get_weights exp WMD list isinstance append sents range len average embed_batch append get_sent_embedding get_vector sum max range enumerate mean list array pop Counter array append sum keys range len str strip write len close mean range open get_embeddings str exp ElmoEmbedder print tokenize_texts readlines close len mean WMD append print_score get_weights range open Counter split sum values len compute get_questions len zip append QG_masked QA_Metric enumerate min max format cosine_similarity mean append set reshape matmul mean dot get_idf_weights array startswith cosine_similarity sum max append encode enumerate len encode format sent_tokenize append enumerate get_top_sim_weights int arange get_top_weights get_global_cluster_weights get_indep_graph_weights shuffle get_other_weights startswith float get_indep_cluster_weights get_global_graph_weights split join print parse_docs parse_refs get_weights len range get_doc_simtop min set cosine_similarity range len append range len Graph add_edge range add_node cosine_similarity any get_subgraph len cosine_similarity graph_centrality_weight list graph_weights extend set array range len graph_weights len print cluster_centers_ len index set append sum array fit print cluster_centers_ fit index append sum len encode range len append kill_stopwords set list cosine_similarity len set mean array append max range enumerate list words extend set row_stack encode enumerate deepcopy list words extend set row_stack enumerate int sentence replace zip annotate tregrex division append to range len get update format print status_code len write close tqdm raise_for_status iter_content open __module__ rsplit import_module triu_indices norm T diagonal zeros diag astype float32 pairwise_distance_np from_numpy _pairwise_distances from_numpy _pairwise_distances astype float32 astype float32 from_numpy zeros _get_triplet_mask range astype float32 from_numpy _get_anchor_positive_triplet_mask zeros range astype _get_anchor_negative_triplet_mask float32 from_numpy zeros range batch_all_triplet_loss astype float32 batch_all_triplet_loss astype maximum float32 pairwise_distance_np from_numpy range min astype float32 pairwise_distance_np maximum from_numpy batch_hard_triplet_loss max range | # Summarization Repository Authors: [Alex Fabbri*](http://alex-fabbri.github.io/), [Wojciech Kryściński*](https://twitter.com/iam_wkr), [Bryan McCann](https://bmccann.github.io/), [Caiming Xiong](http://cmxiong.com/), [Richard Socher](https://www.socher.org/), and [Dragomir Radev](http://www.cs.yale.edu/homes/radev/)<br/> This project is a collaboration work between [Yale LILY Lab](https://yale-lily.github.io/) and [Salesforce Research](https://einstein.ai/). <br/><br/> <p align="center"> <img src="https://raw.githubusercontent.com/Yale-LILY/SummEval/master/assets/logo-lily.png" height="100" alt="LILY Logo" style="padding-right:160"> <img src="https://raw.githubusercontent.com/Yale-LILY/SummEval/master/assets/logo-salesforce.svg" height="100" alt="Salesforce Logo"> </p> <sub><sup>\* - Equal contributions from authors</sup></sub> ## Table of Contents | 1,156 |
YalongLiu/Refined-Segmentation-R-CNN | ['medical image segmentation', 'lesion segmentation', 'semantic segmentation'] | ['Refined-Segmentation R-CNN: A Two-stage Convolutional Neural Network for Punctate White Matter Lesion Segmentation in Preterm Infants'] | models/model_enhancedrpn_enlargeroi_segnet_crf.py mrcnn/config_gpu_environ.py mrcnn/config.py mrcnn/self_utils.py load_data/load_data_pwml.py mrcnn/seg_eval_utils.py mrcnn/crf.py mrcnn/parallel_model.py configs/pwml/config_maskrcnn_enhancedrpn_enlargeroi_segnet_crf.py main.py mrcnn/visualize.py mrcnn/utils.py ModelConfig TargetDataset fpn_classifier_graph compose_image_meta rpn_bbox_loss_graph norm_boxes_graph DefineModel compute_backbone_shapes rpn_class_loss_graph log DetectionTargetLayer trim_zeros_graph log2_graph parse_image_meta parse_image_meta_graph data_generator rpn_graph identity_block BatchNorm build_fpn_mask_graph load_image_gt build_rpn_targets resnet_graph unmold_image PyramidROIAlign apply_box_deltas_graph denorm_boxes_graph generate_random_rois detection_targets_graph build_detection_targets overlaps_graph mrcnn_bbox_loss_graph conv_block batch_pack_graph ProposalLayer smooth_l1_loss clip_boxes_graph mrcnn_class_loss_graph mrcnn_mask_loss_graph mold_image build_rpn_model DetectionLayer refine_detections_graph Config crf_from_sigmoid ParallelModel build_model OverlapMeasures specificity get_one_sample SurfaceDistanceMeasures evaluation_sample subplot_image_mask post_process show_image_pred_gt merge_image_pred_gt subplot_images crop_to_skull merge_patches enlarge_masks_save generate_features epoches_summary detect_dicoms evaluate_folder sementic2instance_mask mask_pad_off evaluate_sum otsu_mask enlarge_mask mask_pad evaluate detect_targets generate_patchs compute_ap norm_boxes compute_recall apply_box_deltas compute_overlaps compute_iou resize resize_image box_refinement_graph generate_pyramid_anchors mold_mask generate_anchors compute_ap_range compute_overlaps_masks denorm_boxes unmold_mask download_trained_weights non_max_suppression minimize_mask resize_mask extract_bboxes trim_zeros compute_matches rescale_mask batch_slice expand_mask box_refinement Dataset display_differences draw_box display_images draw_rois draw_boxes apply_mask random_colors display_instances display_table display_weight_stats plot_overlaps plot_precision_recall display_top_masks int round array str ljust print BACKBONE callable str str conv_block identity_block range stack minimum concat maximum set_shape split minimum reshape maximum tile expand_dims split concat reduce_max boolean_mask MASK_SHAPE crop_and_resize gather box_refinement_graph round trim_zeros_graph ROI_POSITIVE_RATIO multiply transpose squeeze pad cast expand_dims range USE_MINI_MASK overlaps_graph cond minimum int TRAIN_ROIS_PER_IMAGE float32 greater divide maximum int32 split minimum apply_box_deltas_graph reshape clip_boxes_graph concat gather map_fn DETECTION_MAX_INSTANCES stack gather_nd DETECTION_MIN_CONFIDENCE pad set_intersection expand_dims argmax BBOX_STD_DEV Input rpn_graph int_shape less abs cast switch constant not_equal squeeze where mean sparse_categorical_crossentropy gather_nd cast int32 equal IMAGES_PER_GPU batch_pack_graph switch constant smooth_l1_loss squeeze where mean gather_nd cast int32 sum equal reduce_sum sparse_softmax_cross_entropy_with_logits cast gather argmax switch constant reshape smooth_l1_loss mean int64 stack cast gather_nd gather switch constant reshape transpose mean shape int64 stack cast gather_nd gather binary_crossentropy compose_image_meta random randint resize resize_image round list load_mask shape MINI_MASK_SHAPE load_image expand_dims augment_image range minimize_mask astype to_deterministic int uint8 extract_bboxes rescale_mask maximum dstack zeros bool fliplr int ROI_POSITIVE_RATIO concatenate resize astype TRAIN_ROIS_PER_IMAGE compute_iou choice MASK_SHAPE int32 box_refinement USE_MINI_MASK zeros argmax range sum zip ones compute_overlaps choice RPN_TRAIN_ANCHORS_PER_IMAGE zeros argmax amax len int sort min hstack randint zeros max range split image_ids arange IMAGE_SHAPE compute_backbone_shapes RPN_ANCHOR_RATIOS generate_pyramid_anchors BACKBONE_STRIDES MAX_GT_INSTANCES shape expand_dims load_image_gt build_rpn_targets astype shuffle copy choice generate_random_rois build_detection_targets RPN_ANCHOR_SCALES mold_image RPN_ANCHOR_STRIDE float32 extend zeros len list array boolean_mask reduce_sum cast bool abs append range constant concat float32 cast split constant concat float32 cast split array setUnaryEnergy addPairwiseGaussian ones reshape addPairwiseBilateral ascontiguousarray shape vstack unary_from_softmax inference expand_dims round DenseCRF2D reset_default_graph Input range range logical_or sum logical_and logical_not median GetVolumeSimilarity LabelContour LabelOverlapMeasuresImageFilter GetFalseNegativeError Cast max Abs Execute GetHausdorffDistance GetArrayViewFromImage list GetJaccardCoefficient sitkFloat32 set_printoptions HausdorffDistanceImageFilter mean SignedMaurerDistanceMap items int GetSum GetFalsePositiveError StatisticsImageFilter GetImageFromArray specificity GetDiceCoefficient zeros std len str sorted uint8 hstack where shape vstack swapaxes append imread range len zeros uint8 hstack vstack shape range int list extract_bboxes sementic2instance_mask shape append randint round zeros range where len show subplot axis tight_layout imshow title figure range len show subplot imshow random_colors apply_mask figure apply_mask logical_and logical_not threshold ones logical_and THRESH_OTSU erode dilate list where shape label range zeros list zoom zeros maximum where shape detect stack resize_image range len uint8 rescale_mask shape stack resize_mask resize_image max zeros uint8 image_ids imsave print zeros squeeze len load_image maximum resize_image tqdm detect mkdir append PRED_DIR range enumerate makedirs str image_ids GEN_DIR load_mask imsave rescale_mask maximum resize_image tqdm detect mkdir save append load_image range enumerate len str uint8 image_ids print hstack where tqdm NAME evaluation_sample sleep append resize_image imread enumerate len str uint8 list sleep print hstack where tqdm shape evaluation_sample append resize_image imread exists enumerate len image_ids print hstack append read_csv len list hstack vstack read_csv print enlarge_mask mkdir imread range imsave len show uint8 merge_image_pred_gt print ones enlarge_mask hstack axis shape imshow figure imread range len crop_to_skull save resize round exists str logical_and shape pad ceil range rescale_intensity concatenate otsu_mask copy swapaxes enumerate load uint8 print divide maximum tqdm Nifti1Image read_file detect_targets zeros pixel_array len zeros array range minimum maximum zeros range compute_iou T astype float32 dot sum astype delete float32 compute_iou append astype float32 stack cast float32 log astype float32 log dtype imresize min pad max astype resize zeros bool range astype resize zeros bool range zeros bool astype resize arange concatenate reshape flatten sqrt meshgrid array append generate_anchors range len ones trim_zeros compute_overlaps_masks range len arange concatenate cumsum compute_matches astype float32 maximum sum range len compute_ap format print mean append compute_overlaps set argmax max len list graph_fn zip append range len print array array show subplot uint8 margins axis subplots_adjust astype imshow title figure zip len shuffle list map uint8 array range where subplots axis show set_title apply_mask imshow find_contours range set_xlim astype copy zeros uint8 Polygon print text add_patch Rectangle randint fliplr set_ylim compute_matches display_instances concatenate len subplots arange rand axis Line2D unmold_mask shape title apply_mask imshow format set_xlim astype copy enumerate add_line print text add_patch Rectangle int32 set_ylim len format arange display_images unique append sum range format subplots set_title plot set_xlim set_ylim format arange product yticks text xlabel tight_layout ylabel imshow figure xticks max range len subplots axis Line2D random_colors set_title apply_mask imshow find_contours range set_xlim astype copy zeros add_line uint8 Polygon text add_patch Rectangle int32 randint fliplr set_ylim HTML display get_trainable_layers name weights display_table append get_weights enumerate | # Refined Segmentation R-CNN **Accepted by MICCAI2019.** Liu, Yalong, Jie Li, Ying Wang, Miaomiao Wang, Xianjun Li, Zhicheng Jiao, Jian Yang, and Xingbo Gao. "Refined Segmentation R-CNN: A Two-Stage Convolutional Neural Network for Punctate White Matter Lesion Segmentation in Preterm Infants." In International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 193-201. Springer, Cham, 2019.  --A Deep Learning method to segment punctate white matter lesions (PWMLs); Brain tumor segmentation. **By:** Yalong Liu<sup>1</sup>, Jie Li<sup>1</sup>, Ying Wang<sup>1</sup>, Miaomiao Wang<sup>2</sup>, Xianjun Li<sup>2</sup>, Zhicheng Jiao<sup>3</sup>, Jian Yang<sup>2</sup>, Xingbo Gao<sup>1</sup> 1. Lab of Video and Image Processing Systems, School of Electronic Engineering, Xidian University, Xi’an 710071, China 2. Department of Radiology, The First Affiliated Hospital of Xi'an Jiaotong University, Xi’an 710061, China 3. University of North Carolina at Chapel Hill, Chapel Hill, NC 27599, USA | 1,157 |
YangLi1221/CoRA | ['relation extraction'] | ['Improving Long-Tail Relation Extraction with Collaborating Relation-Augmented Attention'] | printer.py script/evaluate.py model/model_CoRA.py model/__init__.py script/show_pr.py utilize.py script/initial_CoRA.py script/train.py Printer bn_dense_layer_v2 assert_rank dropout get_shape_list gelu selu swish act_name2fn activation_name_to_func CoRA NN main find_index sort_test_files init_test_files sort_files pos_embed assert_equal init_relation init_word init_train_files init_test_files_pn makedirs main name integer_types ndims isinstance as_list assert_rank name shape append enumerate sqrt erf tanh exp leaky_relu hasattr isinstance relu identity sigmoid elu log act_name2fn logits_path checkpoint_path test_sleep model batch_size test_step num_hier1_classes Saver save Session test_end_step open CoRA num_hier2_classes run list str num_classes restore sleep append test_start_ckpt range test_single concatenate pter readlines close test_end_ckpt test_start_step mean zip zeros enumerate load print reshape word_size Printer save_epoch pos_size data_path argsort average_precision_score mode split global_variables_initializer array hidden_size len enumerate int print ones len close open float range split int print strip close open range split str readline close write open append split str readline close write open append split int str flush print strip len min write pos_embed split append zeros range enumerate open int str flush print strip len min write pos_embed split append zeros range enumerate open strip open str append range format hstack close assert_equal tile flush enumerate int print reshape min write pos_embed split zeros array len model_dir train_step get_collection merge_all max_epoch GradientDescentOptimizer global_step format replace FileWriter shuffle take ConfigProto float flush int summary_dir learning_rate Variable graph write UPDATE_OPS restore_epoch scalar | # CoRA Codes and datasets for our paper "Improving Long-Tail Relation Extraction with Collaborating Relation-Augmented Attention" If you use the code, please cite the following [paper](https://arxiv.org/pdf/2010.03773.pdf): ``` @article{li2020improving, title={Improving Long-Tail Relation Extraction with Collaborating Relation-Augmented Attention}, author={Li, Yang and Shen, Tao and Long, Guodong and Jiang, Jing and Zhou, Tianyi and Zhang, Chengqi}, journal={arXiv preprint arXiv:2010.03773}, year={2020} } | 1,158 |
YantaoShen/kpm_rw_person_reid | ['person re identification'] | ['Deep Group-shuffling Random Walk for Person Re-identification', 'End-to-End Deep Kronecker-Product Matching for Person Re-identification'] | reid/loss/oim.py reid/models/__init__.py reid/evaluation_metrics/ranking.py reid/utils/serialization.py reid/dist_metric.py test/datasets/test_cuhk01.py reid/metric_learning/kissme.py reid/feature_extraction/database.py reid/models/kron.py reid/datasets/market1501.py reid/feature_extraction/__init__.py reid/__init__.py reid/evaluators.py reid/metric_learning/euclidean.py reid/evaluation_metrics/classification.py reid/datasets/dukemtmc.py reid/datasets/viper.py reid/datasets/cuhk03.py test/loss/test_oim.py reid/loss/triplet.py reid/utils/meters.py test/evaluation_metrics/test_cmc.py reid/loss/__init__.py reid/models/embedding.py reid/feature_extraction/cnn.py reid/utils/data/sampler.py reid/utils/data/__init__.py reid/datasets/cuhk01.py reid/utils/data/transforms.py reid/trainers.py reid/utils/data/preprocessor.py reid/utils/osutils.py examples/main.py test/models/test_inception.py reid/utils/__init__.py test/datasets/test_cuhk03.py test/datasets/test_viper.py reid/models/multi_branch.py reid/models/inception.py reid/models/resnet.py reid/evaluation_metrics/__init__.py test/utils/data/test_preprocessor.py reid/utils/data/dataset.py test/datasets/test_dukemtmc.py test/feature_extraction/test_database.py setup.py test/datasets/test_market1501.py reid/metric_learning/__init__.py reid/datasets/__init__.py reid/utils/logging.py get_data main DistanceMetric extract_embeddings evaluate_all CascadeEvaluator pairwise_distance Evaluator compute_random_walk extract_features Trainer RandomWalkGrpShufTrainer BaseTrainer CUHK01 CUHK03 DukeMTMC Market1501 VIPeR names create get_dataset accuracy mean_ap _unique_sample cmc extract_cnn_feature FeatureDatabase OIM oim OIMLoss TripletLoss Euclidean KISSME validate_cov_matrix get_metric RandomWalkEmbed EltwiseSubEmbed _make_conv inception Block InceptionNet KronMatching kron_matching random_walk_compute RandomWalkKpmNet ResNet resnet50 resnet152 resnet34 resnet18 resnet101 names create Logger AverageMeter mkdir_if_missing load_checkpoint copy_state_dict read_json save_checkpoint write_json to_numpy to_torch Dataset _pluck Preprocessor KeyValuePreprocessor RandomIdentitySampler RandomMultipleGallerySampler No_index RandomSizedEarser RectScale RandomSizedRectCrop TestCUHK01 TestCUHK03 TestDukeMTMC TestMarket1501 TestVIPeR TestCMC TestFeatureDatabase TestOIMLoss TestInception TestPreprocessor join create val list gallery Compose set query DataLoader Preprocessor Normalize workers batch_size get_data query evaluate_from save_checkpoint Logger arch dataset cuda max seed create CascadeEvaluator data_dir copy_state_dict logs_dir Adam RandomWalkGrpShufTrainer grp_num load_state_dict width RandomWalkKpmNet range val height format combine_trainval resume retrain manual_seed alpha join gallery evaluate print load_checkpoint DistanceMetric num_instances adjust_lr train epochs split data view Variable transpose contiguous clone mean softmax inverse append cuda range len update val time format Variable print AverageMeter eval avg compute_random_walk cuda range cat len update extract_cnn_feature time format val print AverageMeter OrderedDict eval avg zip enumerate len list view mm expand t cat transform sum addmm_ values len print format mean_ap warn topk size t eq mul_ expand_as append sum max zeros list items choice asarray arange defaultdict astype argsort shape _unique_sample int32 zip append to_numpy range zeros enumerate len asarray arange astype average_precision_score argsort shape int32 append to_numpy range to_torch remove model Variable register_forward_hook OrderedDict eval cpu cuda append T cholesky eye BatchNorm2d ReLU Conv2d conv2d size view data view Variable transpose contiguous clone softmax inverse cuda cat makedirs dirname mkdir_if_missing join copy dirname save mkdir_if_missing load format print isfile data items list replace isinstance print set copy_ add keys state_dict is_tensor list map split append enumerate | # Person-ReID with Deep Kronecker-Product Matching and Group-shuffiling Random Walk This is a Pytorch implementation of our two CVPR 2018 works' combination, * End-to-End Deep Kronecker-Product Matching for Person Re-identification (KPM) [Paper](http://openaccess.thecvf.com/content_cvpr_2018/papers/Shen_End-to-End_Deep_Kronecker-Product_CVPR_2018_paper.pdf) * Deep Group-shuffling Random Walk for Person Re-identification (GSRW) [Paper](http://openaccess.thecvf.com/content_cvpr_2018/papers/Shen_Deep_Group-Shuffling_Random_CVPR_2018_paper.pdf) The TPAMI extension version is * Person Re-identification with Deep Kronecker-Product Matching and Group-shuffling Random Walk [Paper](https://ieeexplore.ieee.org/abstract/document/8906139) Our code is mainly based on [open-reid](https://github.com/Cysu/open-reid) ## Requirements * python 2.7 (We recommend to use [Anaconda](https://www.anaconda.com/download/#linux), since many python libs like [numpy](http://www.numpy.org/) and [sklearn](http://scikit-learn.org/stable/) are needed in our code.) * [PyTorch](https://pytorch.org/previous-versions/) (we run the code under version 0.3.0, maybe versions <= 0.3.1 also work.) | 1,159 |
Yaozhuwa/FisheyeSeg | ['semantic segmentation', 'autonomous driving'] | ['Universal Semantic Segmentation for Fisheye Urban Driving Images'] | models/BasicModule.py image_process.py test.py data/FishEyeGenerator.py models/ERFPSPNet.py config.py models/SwiftNet.py main.py DataGenerator.py loss.py data/CityScape.py DefaultConfig FESetsGenerator test crop CrossEntropyLoss2d FocalLoss2d get_default_device val run_image real_image_test all_eval final_eval val_distortion one_eval train MyTransform RandOneTransform label2color test_color test_trans test_rand_shift test_scale fish_scale CityScape FishEyeGenerator test_gray test_color BasicModule Decoder DownsamplerBlock Encoder UpsamplerBlock ERFPSPNet PSPDec non_bottleneck_1d SpatialPyramidPooling conv1x1 ResNet _Upsample _BNReluConv conv3x3 SwiftNet resnet34 MySpatialPyramidPooling resnet18 BasicBlock FESetsGenerator generate rand_ext_params set_ext_param_range zeros is_available time print eval class_num val_batch_size ceil zeros to sum time ones fish_size print eval class_num val_batch_size ceil zeros to sum range load print data_dir CityScape SwiftNet eval DataLoader val_distortion load_state_dict ckpt_path resnet18 to range len load val print data_dir CityScape val_distortion eval DataLoader load_state_dict to resnet18 SwiftNet one_eval ext_param batch_size model zero_grad rand_ext_params DataLoader FocalLoss2d set_crop train_with_ckpt ckpt_path save cuda ext_range str view set_ext_params f max_epoch Adam valid_annot_dir load_state_dict rand_ext resnet18 ceil to logdir range val SummaryWriter fish_size close CityScape SwiftNet set_bkg item model_path CosineAnnealingLR rand_f train_img_dir load time learning_rate add_image enumerate criterion backward print set_ext_param_range valid_img_dir ckpt_name train_annot_dir step MyTransform add_scalar unsqueeze_ label2color model device to numpy imread load sorted run_image waitKey SwiftNet eval imshow load_state_dict device to shape range zeros label2color waitKey imshow resize imread label2color set_ext_params FishEyeGenerator waitKey imshow transFromColor imread transFromGray print ones random shape resize imshow imread fish_scale waitKey warpAffine random waitKey imshow resize imread array time print_ext_param set_ext_params print FishEyeGenerator transFromColor FishEyeGenerator waitKey imshow resize imread transFromGray load_url ResNet load_state_dict load_url ResNet load_state_dict | # Fisheye Segmentation | 1,160 |
YapengTian/AVVP-ECCV20 | ['multiple instance learning'] | ['Unified Multisensory Perception: Weakly-Supervised Audio-Visual Video Parsing'] | scripts/extract_rgb_feat.py utils/eval_metrics.py main_avvp.py dataloader.py scripts/extract_3D_feat.py scripts/extract_frames.py scripts/utils.py scripts/download_dataset.py scripts/transforms.py nets/net_audiovisual.py scripts/extract_audio.py ToTensor LLP_dataset ids_to_multinomial main eval train HANLayer _get_clones Encoder CMTLayer MMIL_Net download extract_feats extract_frames extract_feats Pad CenterCrop center_crop hflip Resize ToFloatTensorInZeroOne pad RandomCrop Normalize RandomHorizontalFlip resize normalize crop to_normalized_float_tensor ToRange255 LoadTransformImage ToSpaceBGR LoadImage TransformImage Identity event_wise_metric F1 Recall to_vec segment_level Precision extract_event event_level zeros len format criterion model backward print dataset zero_grad clamp_ item step enumerate len format print mean array read_csv label_test DataLoader ArgumentParser save seed StepLR LLP_dataset Adam epochs load_state_dict parse_args to range state_dict label_val eval manual_seed BCELoss checkpoint load model_save_dir add_argument parameters train step gpu join remove print system exists makedirs join sorted load_img glob getcwd print eval numpy round mkdir linspace save zeros listdir range len print system load_image_fn int round float min isinstance reshape dim range len range len range len event_wise_metric extract_event append zeros sum range len append sum range len zeros range append range to_vec range len | Unified Multisensory Perception: Weakly-Supervised Audio-Visual Video Parsing (To appear in ECCV 2020) [[Paper]](https://arxiv.org/pdf/2007.10558.pdf) [Yapeng Tian](http://yapengtian.org/), [Dingzeyu Li](https://dingzeyu.li/), and [Chenliang Xu](https://www.cs.rochester.edu/~cxu22/) ### Audio-visual video parsing We define the <b>Audio-Visual Video Parsing</b> as a task to group video segments and parse a video into different temporal audio, visual, and audio-visual events associated with semantic labels.  ### LLP Dataset & Features ```bash # LLP dataset annotations | 1,161 |
Yarlak/Larry | ['unity'] | ['Unity: A General Platform for Intelligent Agents'] | ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_input_pb2.py ml-agents/mlagents/trainers/components/reward_signals/curiosity/model.py ml-agents-envs/mlagents/envs/communicator_objects/unity_to_external_pb2.py ml-agents/mlagents/trainers/components/reward_signals/reward_signal.py gym-unity/gym_unity/envs/__init__.py ml-agents/mlagents/trainers/learn.py ml-agents-envs/mlagents/envs/communicator_objects/custom_observation_pb2.py ml-agents/mlagents/trainers/meta_curriculum.py ml-agents/mlagents/trainers/tests/test_barracuda_converter.py ml-agents/mlagents/trainers/ppo/models.py gym-unity/gym_unity/__init__.py utils/validate_meta_files.py ml-agents/mlagents/trainers/trainer_controller.py ml-agents/mlagents/trainers/components/bc/model.py ml-agents/mlagents/trainers/tests/test_curriculum.py ml-agents-envs/mlagents/envs/communicator.py ml-agents-envs/mlagents/envs/communicator_objects/custom_reset_parameters_pb2.py ml-agents/mlagents/trainers/tests/test_ppo.py ml-agents-envs/mlagents/envs/tests/test_rpc_communicator.py ml-agents/mlagents/trainers/components/reward_signals/__init__.py ml-agents-envs/setup.py ml-agents/mlagents/trainers/tests/mock_brain.py ml-agents-envs/mlagents/envs/action_info.py ml-agents-envs/mlagents/envs/rpc_communicator.py ml-agents/mlagents/trainers/tests/test_bcmodule.py ml-agents/mlagents/trainers/tests/test_trainer_controller.py ml-agents/mlagents/trainers/components/reward_signals/reward_signal_factory.py ml-agents/setup.py ml-agents/mlagents/trainers/barracuda.py ml-agents-envs/mlagents/envs/tests/test_envs.py ml-agents-envs/mlagents/envs/env_manager.py ml-agents/mlagents/trainers/ppo/trainer.py ml-agents-envs/mlagents/envs/tests/test_timers.py ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_output_pb2.py ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_initialization_output_pb2.py ml-agents-envs/mlagents/envs/communicator_objects/unity_input_pb2.py ml-agents/mlagents/trainers/tests/test_meta_curriculum.py ml-agents/mlagents/trainers/components/reward_signals/curiosity/signal.py ml-agents-envs/mlagents/envs/subprocess_env_manager.py ml-agents/mlagents/trainers/bc/trainer.py ml-agents/mlagents/trainers/components/reward_signals/curiosity/__init__.py ml-agents/mlagents/trainers/curriculum.py ml-agents-envs/mlagents/envs/communicator_objects/agent_action_proto_pb2.py ml-agents/mlagents/trainers/tests/test_policy.py ml-agents/mlagents/trainers/ppo/policy.py ml-agents-envs/mlagents/envs/communicator_objects/space_type_proto_pb2.py ml-agents/mlagents/trainers/tests/test_learn.py ml-agents-envs/mlagents/envs/communicator_objects/brain_parameters_proto_pb2.py ml-agents/mlagents/trainers/tests/test_demo_loader.py ml-agents/mlagents/trainers/components/bc/__init__.py ml-agents/mlagents/trainers/models.py ml-agents/mlagents/trainers/__init__.py ml-agents-envs/mlagents/envs/communicator_objects/agent_info_proto_pb2.py ml-agents-envs/mlagents/envs/communicator_objects/environment_parameters_proto_pb2.py ml-agents/mlagents/trainers/tests/test_simple_rl.py ml-agents-envs/mlagents/envs/policy.py ml-agents/mlagents/trainers/exception.py gym-unity/gym_unity/tests/test_gym.py ml-agents/mlagents/trainers/components/reward_signals/extrinsic/__init__.py ml-agents/mlagents/trainers/buffer.py ml-agents/mlagents/trainers/bc/online_trainer.py ml-agents-envs/mlagents/envs/communicator_objects/engine_configuration_proto_pb2.py ml-agents/mlagents/trainers/ppo/__init__.py ml-agents/mlagents/trainers/tensorflow_to_barracuda.py ml-agents-envs/mlagents/envs/communicator_objects/unity_to_external_pb2_grpc.py ml-agents-envs/mlagents/envs/mock_communicator.py ml-agents/mlagents/trainers/tests/test_rl_trainer.py ml-agents-envs/mlagents/envs/timers.py gym-unity/setup.py ml-agents-envs/mlagents/envs/communicator_objects/unity_message_pb2.py ml-agents-envs/mlagents/envs/environment.py ml-agents-envs/mlagents/envs/communicator_objects/custom_action_pb2.py ml-agents/mlagents/trainers/bc/policy.py ml-agents-envs/mlagents/envs/simple_env_manager.py ml-agents-envs/mlagents/envs/base_unity_environment.py ml-agents/mlagents/trainers/bc/__init__.py ml-agents/mlagents/trainers/trainer_util.py ml-agents/mlagents/trainers/tests/test_trainer_util.py ml-agents-envs/mlagents/envs/communicator_objects/unity_output_pb2.py ml-agents/mlagents/trainers/components/reward_signals/extrinsic/signal.py ml-agents/mlagents/trainers/components/reward_signals/gail/__init__.py ml-agents-envs/mlagents/envs/sampler_class.py ml-agents-envs/mlagents/envs/exception.py gym-unity/gym_unity/envs/unity_env.py ml-agents/mlagents/trainers/components/reward_signals/gail/model.py ml-agents-envs/mlagents/envs/communicator_objects/header_pb2.py ml-agents/mlagents/trainers/rl_trainer.py ml-agents/mlagents/trainers/tests/test_reward_signals.py ml-agents-envs/mlagents/envs/brain.py ml-agents/mlagents/trainers/components/reward_signals/gail/signal.py ml-agents/mlagents/trainers/ppo/multi_gpu_policy.py ml-agents/mlagents/trainers/tests/test_multigpu.py ml-agents-envs/mlagents/envs/communicator_objects/demonstration_meta_proto_pb2.py ml-agents-envs/mlagents/envs/communicator_objects/resolution_proto_pb2.py ml-agents-envs/mlagents/envs/communicator_objects/__init__.py ml-agents/mlagents/trainers/demo_loader.py ml-agents-envs/mlagents/envs/__init__.py ml-agents/mlagents/trainers/components/bc/module.py ml-agents/mlagents/trainers/tests/test_trainer_metrics.py ml-agents-envs/mlagents/envs/tests/test_sampler_class.py ml-agents/mlagents/trainers/tests/test_buffer.py ml-agents-envs/mlagents/envs/communicator_objects/command_proto_pb2.py ml-agents/mlagents/trainers/trainer.py ml-agents-envs/mlagents/envs/socket_communicator.py ml-agents-envs/mlagents/envs/tests/test_subprocess_env_manager.py ml-agents/mlagents/trainers/bc/models.py ml-agents/mlagents/trainers/bc/offline_trainer.py ml-agents/mlagents/trainers/tf_policy.py ml-agents/mlagents/trainers/tests/test_bc.py ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_initialization_input_pb2.py ml-agents/mlagents/trainers/trainer_metrics.py UnityGymException ActionFlattener UnityEnv create_mock_vector_braininfo test_gym_wrapper test_multi_agent test_branched_flatten setup_mock_unityenvironment create_mock_brainparams BarracudaWriter fuse print_known_operations compress Build sort lstm write fuse_batchnorm_weights trim mean gru Model summary Struct parse_args to_json rnn BufferException Buffer Curriculum make_demo_buffer load_demonstration demo_to_buffer CurriculumError MetaCurriculumError TrainerError create_environment_factory create_sampler_manager run_training prepare_for_docker_run try_create_meta_curriculum main load_config MetaCurriculum EncoderType LearningModel AllRewardsOutput RLTrainer get_layer_shape pool_to_HW flatten sqr_diff process_layer process_model get_layer_rank slow_but_stable_topological_sort get_attr basic_lstm ModelBuilderContext order_by get_epsilon get_tensor_dtype replace_strings_in_list debug embody by_op get_tensor_dims strided_slice remove_duplicates_from_list axis_to_barracuda by_name locate_actual_output_node convert strides_to_HW get_tensor_data very_slow_but_stable_topological_sort gru TFPolicy UnityPolicyException UnityTrainerException Trainer TrainerController TrainerMetrics initialize_trainers BehavioralCloningModel OfflineBCTrainer OnlineBCTrainer BCPolicy BCTrainer BCModel BCModule RewardSignal create_reward_signal CuriosityModel CuriosityRewardSignal ExtrinsicRewardSignal GAILModel GAILRewardSignal PPOModel get_devices MultiGpuPPOPolicy PPOPolicy PPOTrainer get_gae discount_rewards create_buffer simulate_rollout create_mock_3dball_brain create_mock_banana_brain setup_mock_unityenvironment create_mock_braininfo create_mock_brainparams setup_mock_env_and_brains test_barracuda_converter test_bc_trainer_step test_bc_trainer_add_proc_experiences test_cc_bc_model test_dc_bc_model test_visual_cc_bc_model test_bc_trainer_end_episode test_bc_policy_evaluate dummy_config test_visual_dc_bc_model create_bc_trainer test_bcmodule_rnn_update test_bcmodule_update test_bcmodule_dc_visual_update dummy_config create_ppo_policy_with_bc_mock test_bcmodule_defaults test_bcmodule_rnn_dc_update test_buffer_sample construct_fake_buffer assert_array fakerandint test_buffer test_buffer_truncate location default_reset_parameters test_init_curriculum_bad_curriculum_raises_error test_init_curriculum_happy_path test_increment_lesson test_get_config test_load_demo test_load_demo_dir basic_options test_docker_target_path test_run_training test_init_meta_curriculum_happy_path test_increment_lessons_with_reward_buff_sizes default_reset_parameters MetaCurriculumTest test_increment_lessons measure_vals reward_buff_sizes test_set_all_curriculums_to_lesson_num test_get_config test_set_lesson_nums test_init_meta_curriculum_bad_curriculum_folder_raises_error more_reset_parameters test_create_model dummy_config test_average_gradients test_update basic_mock_brain test_take_action_returns_action_info_when_available basic_params test_take_action_returns_nones_on_missing_values test_take_action_returns_empty_with_no_agents test_trainer_increment_step test_rl_functions test_ppo_model_dc_vector_rnn test_ppo_model_cc_vector_rnn test_add_rewards_output test_ppo_policy_evaluate test_ppo_model_cc_visual dummy_config test_ppo_model_dc_vector test_ppo_model_dc_visual test_ppo_get_value_estimates test_ppo_model_cc_vector test_gail_dc_visual reward_signal_update reward_signal_eval test_extrinsic test_curiosity_cc test_gail_rnn test_gail_cc create_ppo_policy_mock test_curiosity_dc curiosity_dummy_config dummy_config test_curiosity_visual test_curiosity_rnn gail_dummy_config create_mock_all_brain_info create_rl_trainer dummy_config test_rl_trainer create_mock_brain create_mock_policy clamp test_simple_rl Simple1DEnvironment _check_environment_trains test_initialization_seed test_start_learning_trains_until_max_steps_then_saves basic_trainer_controller test_take_step_adds_experiences_to_trainer_and_trains dummy_config trainer_controller_with_take_step_mocks trainer_controller_with_start_learning_mocks test_start_learning_trains_forever_if_no_train_model TestTrainerMetrics test_initialize_online_bc_trainer test_initialize_ppo_trainer test_initialize_trainer_parameters_override_defaults dummy_offline_bc_config test_initialize_invalid_trainer_raises_exception dummy_bad_config dummy_config dummy_offline_bc_config_with_override dummy_online_bc_config ActionInfo BaseUnityEnvironment safe_concat_np_ndarray BrainInfo BrainParameters safe_concat_lists Communicator UnityEnvironment EnvManager StepInfo SamplerException UnityWorkerInUseException UnityException UnityCommunicationException UnityTimeOutException UnityEnvironmentException UnityActionException MockCommunicator Policy RpcCommunicator UnityToExternalServicerImplementation MultiRangeUniformSampler UniformSampler SamplerFactory SamplerManager GaussianSampler Sampler SimpleEnvManager SocketCommunicator worker EnvironmentResponse UnityEnvWorker StepResponse SubprocessEnvManager EnvironmentCommand TimerNode hierarchical_timer get_timer_root get_timer_tree reset_timers set_gauge timed GaugeNode TimerStack UnityToExternalServicer UnityToExternalStub add_UnityToExternalServicer_to_server test_initialization test_reset test_close test_step test_handles_bad_filename test_rpc_communicator_checks_port_on_create test_rpc_communicator_create_multiple_workers test_rpc_communicator_close test_empty_samplers sampler_config_1 check_value_in_intervals incorrect_uniform_sampler test_incorrect_sampler test_sampler_config_1 sampler_config_2 incorrect_sampler_config test_incorrect_uniform_sampler test_sampler_config_2 mock_env_factory SubprocessEnvManagerTest MockEnvWorker test_timers decorated_func main create_mock_vector_braininfo sample UnityEnv setup_mock_unityenvironment step create_mock_brainparams create_mock_vector_braininfo UnityEnv setup_mock_unityenvironment step create_mock_brainparams setup_mock_unityenvironment create_mock_vector_braininfo create_mock_brainparams UnityEnv Mock Mock array range join isdir print replaceFilenameExtension add_argument exit verbose source_file ArgumentParser target_file sqrt topologicalSort list hasattr layers addEdge Graph print inputs set len list hasattr layers print filter match trim_model compile data layers print tensors float16 replace layers dumps data dtype layers isinstance print name tensors inputs outputs shape zip array_without_brackets to_json globals Build array_equal pool reduce Build tanh mad tanh mul Build concat add sigmoid sub mad _ tanh mul Build concat add sigmoid mad print sorted keys Buffer reset_local_buffers number_visual_observations append_update_buffer append range enumerate make_demo_buffer load_demonstration join read suffix isdir endswith BrainParametersProto from_agent_proto DemonstrationMetaProto ParseFromString AgentInfoProto isfile append from_proto listdir _DecodeVarint32 start_learning int str format create_environment_factory create_sampler_manager initialize_trainers external_brains TrainerController put try_create_meta_curriculum reset_parameters load_config SubprocessEnvManager pop SamplerManager load_config set_all_curriculums_to_lesson_num MetaCurriculum reset_parameters keys chmod format basename isdir glob copyfile copytree prepare_for_docker_run replace int Process join docopt getLogger print run_training start Queue info append randint setLevel range endswith len print HasField hasattr get_attr isinstance get_attr tensor_shape ndarray isinstance shape int_val bool_val float_val ListFields name ndarray isinstance str tensor_content ndarray product isinstance get_tensor_dtype print get_tensor_dims unpack int_val bool_val array float_val enter append add set Build mul sub insert Build tolist append range len locate_actual_output_node name find_tensor_by_name split locate_actual_output_node name lstm find_tensor_by_name find_forget_bias split get_layer_shape id Struct tensor get_layer_rank layer_ranks hasattr name patch_data rank input_shapes out_shapes input get_attr append replace_strings_in_list tensors embody astype op inputs zip enumerate print float32 patch_data_fn model_tensors map_ignored_layer_to_its_input co_argcount len items hasattr get_tensors name print process_layer eval slow_but_stable_topological_sort ModelBuilderContext sort assign_ids pop range insert len layers verbose Struct process_model open print_known_operations fuse compress node GraphDef Model dims_to_barracuda_shape insert get_tensor_dims inputs MessageToJson ParseFromString cleanup_layers read memories print sort write trim summary print_supported_ops update format OfflineBCTrainer copy OnlineBCTrainer PPOTrainer get check_config rcls list_local_devices size range reversed zeros_like append discount_rewards ones Mock array range brain_name create_buffer brain sequence_length append range vector_action_space_size Buffer ones number_visual_observations append_update_buffer shape append sum range enumerate setup_mock_unityenvironment mock_env create_mock_braininfo create_mock_brainparams create_mock_brainparams create_mock_brainparams join remove _get_candidate_names convert _get_default_tempdir dirname abspath isfile next Mock BCTrainer simulate_rollout mock_env dirname abspath setup_mock_unityenvironment policy create_mock_braininfo create_mock_3dball_brain update_policy create_bc_trainer increment_step agents process_experiences step create_bc_trainer add_experiences end_episode agents process_experiences step create_bc_trainer add_experiences BCPolicy evaluate close reset MockCommunicator reset_default_graph UnityEnvironment reset_default_graph reset_default_graph reset_default_graph reset_default_graph mock_env dirname abspath PPOPolicy setup_mock_unityenvironment create_mock_braininfo create_ppo_policy_with_bc_mock close create_mock_3dball_brain update items close create_ppo_policy_with_bc_mock create_mock_3dball_brain update items close create_ppo_policy_with_bc_mock create_mock_3dball_brain update items close create_mock_banana_brain create_ppo_policy_with_bc_mock update items close create_mock_banana_brain create_ppo_policy_with_bc_mock flatten list range len append range Buffer get_batch construct_fake_buffer assert_array append_update_buffer make_mini_batch reset_agent array sample_mini_batch construct_fake_buffer append_update_buffer construct_fake_buffer truncate_update_buffer append_update_buffer Curriculum Curriculum Curriculum make_demo_buffer load_demonstration dirname abspath make_demo_buffer load_demonstration dirname abspath MagicMock basic_options MagicMock MetaCurriculum assert_has_calls MetaCurriculumTest increment_lessons assert_called_with MetaCurriculumTest increment_lessons assert_called_with assert_not_called MetaCurriculumTest set_all_curriculums_to_lesson_num MetaCurriculumTest dict update MetaCurriculumTest reset_default_graph MultiGpuPPOPolicy create_mock_brainparams reset_default_graph create_mock_brainparams update Mock reset_default_graph MultiGpuPPOPolicy create_mock_brainparams MagicMock TFPolicy basic_mock_brain basic_params BrainInfo get_action MagicMock TFPolicy basic_mock_brain basic_params BrainInfo get_action MagicMock TFPolicy basic_mock_brain ActionInfo basic_params BrainInfo get_action evaluate close reset MockCommunicator PPOPolicy reset_default_graph UnityEnvironment get_value_estimates items close reset MockCommunicator PPOPolicy reset_default_graph UnityEnvironment reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph assert_array_almost_equal array discount_rewards Mock increment_step BrainParameters assert_called_with PPOTrainer AllRewardsOutput BrainParameters PPOTrainer add_rewards_outputs update PPOPolicy setup_mock_env_and_brains reset evaluate model simulate_rollout _execute_model prepare_update update_dict make_mini_batch create_ppo_policy_mock reward_signal_update reward_signal_eval reward_signal_update reward_signal_eval create_ppo_policy_mock dirname abspath create_ppo_policy_mock reward_signal_update reward_signal_eval create_ppo_policy_mock reward_signal_update reward_signal_eval create_ppo_policy_mock reward_signal_update reward_signal_eval create_ppo_policy_mock reward_signal_update reward_signal_eval create_ppo_policy_mock reward_signal_update reward_signal_eval create_ppo_policy_mock reward_signal_update reward_signal_eval create_mock_brainparams RLTrainer dummy_config create_mock_brain Mock create_mock_all_brain_info create_rl_trainer values end_episode construct_curr_info episode_steps create_mock_braininfo create_mock_policy add_experiences Simple1DEnvironment _check_environment_trains TrainerController assert_called_with MagicMock basic_trainer_controller start_learning assert_called_once MagicMock assert_not_called trainer_controller_with_start_learning_mocks trainer_controller_with_start_learning_mocks start_learning MagicMock assert_called_once MagicMock basic_trainer_controller assert_called_once Mock MagicMock StepInfo current_all_brain_info advance outputs assert_not_called trainer_controller_with_take_step_mocks assert_called_once_with previous_all_brain_info dummy_offline_bc_config dummy_offline_bc_config_with_override BrainParametersMock BrainParametersMock dummy_online_bc_config dummy_config BrainParametersMock dummy_bad_config extend copy items value EnvironmentResponse external_brains global_done payload text get_timer_root reset_timers put reset _send_response reset_parameters StepResponse env_factory step memory action perf_counter push reset method_handlers_generic_handler add_generic_rpc_handlers UnityEnvironment close MockCommunicator UnityEnvironment close MockCommunicator reset str local_done print agents step close reset MockCommunicator UnityEnvironment len UnityEnvironment close MockCommunicator close RpcCommunicator close RpcCommunicator close RpcCommunicator SamplerManager sample_all sampler_config_1 sampler_config_2 SamplerManager SamplerManager sample_all incorrect_uniform_sampler incorrect_sampler_config set_gauge replace endswith add set walk | <img src="docs/images/unity-wide.png" align="middle" width="3000"/> <img src="docs/images/image-banner.png" align="middle" width="3000"/> # Unity ML-Agents Toolkit (Beta) [](docs/Readme.md) [](LICENSE) **The Unity Machine Learning Agents Toolkit** (ML-Agents) is an open-source Unity plugin that enables games and simulations to serve as environments for training intelligent agents. Agents can be trained using reinforcement learning, imitation learning, neuroevolution, or other machine learning methods through a simple-to-use Python API. We also provide implementations (based on TensorFlow) | 1,162 |
Yarlak/LarryOriginal | ['unity'] | ['Unity: A General Platform for Intelligent Agents'] | ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_input_pb2.py ml-agents/mlagents/trainers/components/reward_signals/curiosity/model.py ml-agents-envs/mlagents/envs/communicator_objects/unity_to_external_pb2.py ml-agents/mlagents/trainers/components/reward_signals/reward_signal.py gym-unity/gym_unity/envs/__init__.py ml-agents/mlagents/trainers/learn.py ml-agents-envs/mlagents/envs/communicator_objects/custom_observation_pb2.py ml-agents/mlagents/trainers/meta_curriculum.py ml-agents/mlagents/trainers/tests/test_barracuda_converter.py ml-agents/mlagents/trainers/ppo/models.py gym-unity/gym_unity/__init__.py utils/validate_meta_files.py ml-agents/mlagents/trainers/trainer_controller.py ml-agents/mlagents/trainers/components/bc/model.py ml-agents/mlagents/trainers/tests/test_curriculum.py ml-agents-envs/mlagents/envs/communicator.py ml-agents-envs/mlagents/envs/communicator_objects/custom_reset_parameters_pb2.py ml-agents/mlagents/trainers/tests/test_ppo.py ml-agents-envs/mlagents/envs/tests/test_rpc_communicator.py ml-agents/mlagents/trainers/components/reward_signals/__init__.py ml-agents-envs/setup.py ml-agents/mlagents/trainers/tests/mock_brain.py ml-agents-envs/mlagents/envs/action_info.py ml-agents-envs/mlagents/envs/rpc_communicator.py ml-agents/mlagents/trainers/tests/test_bcmodule.py ml-agents/mlagents/trainers/tests/test_trainer_controller.py ml-agents/mlagents/trainers/components/reward_signals/reward_signal_factory.py ml-agents/setup.py ml-agents/mlagents/trainers/barracuda.py ml-agents-envs/mlagents/envs/tests/test_envs.py ml-agents-envs/mlagents/envs/env_manager.py ml-agents/mlagents/trainers/ppo/trainer.py ml-agents-envs/mlagents/envs/tests/test_timers.py ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_output_pb2.py ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_initialization_output_pb2.py ml-agents-envs/mlagents/envs/communicator_objects/unity_input_pb2.py ml-agents/mlagents/trainers/tests/test_meta_curriculum.py ml-agents/mlagents/trainers/components/reward_signals/curiosity/signal.py ml-agents-envs/mlagents/envs/subprocess_env_manager.py ml-agents/mlagents/trainers/bc/trainer.py ml-agents/mlagents/trainers/components/reward_signals/curiosity/__init__.py ml-agents/mlagents/trainers/curriculum.py ml-agents-envs/mlagents/envs/communicator_objects/agent_action_proto_pb2.py ml-agents/mlagents/trainers/tests/test_policy.py ml-agents/mlagents/trainers/ppo/policy.py ml-agents-envs/mlagents/envs/communicator_objects/space_type_proto_pb2.py ml-agents/mlagents/trainers/tests/test_learn.py ml-agents-envs/mlagents/envs/communicator_objects/brain_parameters_proto_pb2.py ml-agents/mlagents/trainers/tests/test_demo_loader.py ml-agents/mlagents/trainers/components/bc/__init__.py ml-agents/mlagents/trainers/models.py ml-agents/mlagents/trainers/__init__.py ml-agents-envs/mlagents/envs/communicator_objects/agent_info_proto_pb2.py ml-agents-envs/mlagents/envs/communicator_objects/environment_parameters_proto_pb2.py ml-agents/mlagents/trainers/tests/test_simple_rl.py ml-agents-envs/mlagents/envs/policy.py ml-agents/mlagents/trainers/exception.py gym-unity/gym_unity/tests/test_gym.py ml-agents/mlagents/trainers/components/reward_signals/extrinsic/__init__.py ml-agents/mlagents/trainers/buffer.py ml-agents/mlagents/trainers/bc/online_trainer.py ml-agents-envs/mlagents/envs/communicator_objects/engine_configuration_proto_pb2.py ml-agents/mlagents/trainers/ppo/__init__.py ml-agents/mlagents/trainers/tensorflow_to_barracuda.py ml-agents-envs/mlagents/envs/communicator_objects/unity_to_external_pb2_grpc.py ml-agents-envs/mlagents/envs/mock_communicator.py ml-agents/mlagents/trainers/tests/test_rl_trainer.py ml-agents-envs/mlagents/envs/timers.py gym-unity/setup.py ml-agents-envs/mlagents/envs/communicator_objects/unity_message_pb2.py ml-agents-envs/mlagents/envs/environment.py ml-agents-envs/mlagents/envs/communicator_objects/custom_action_pb2.py ml-agents/mlagents/trainers/bc/policy.py ml-agents-envs/mlagents/envs/simple_env_manager.py ml-agents-envs/mlagents/envs/base_unity_environment.py ml-agents/mlagents/trainers/bc/__init__.py ml-agents/mlagents/trainers/trainer_util.py ml-agents/mlagents/trainers/tests/test_trainer_util.py ml-agents-envs/mlagents/envs/communicator_objects/unity_output_pb2.py ml-agents/mlagents/trainers/components/reward_signals/extrinsic/signal.py ml-agents/mlagents/trainers/components/reward_signals/gail/__init__.py ml-agents-envs/mlagents/envs/sampler_class.py ml-agents-envs/mlagents/envs/exception.py gym-unity/gym_unity/envs/unity_env.py ml-agents/mlagents/trainers/components/reward_signals/gail/model.py ml-agents-envs/mlagents/envs/communicator_objects/header_pb2.py ml-agents/mlagents/trainers/rl_trainer.py ml-agents/mlagents/trainers/tests/test_reward_signals.py ml-agents-envs/mlagents/envs/brain.py ml-agents/mlagents/trainers/components/reward_signals/gail/signal.py ml-agents/mlagents/trainers/ppo/multi_gpu_policy.py ml-agents/mlagents/trainers/tests/test_multigpu.py ml-agents-envs/mlagents/envs/communicator_objects/demonstration_meta_proto_pb2.py ml-agents-envs/mlagents/envs/communicator_objects/resolution_proto_pb2.py ml-agents-envs/mlagents/envs/communicator_objects/__init__.py ml-agents/mlagents/trainers/demo_loader.py ml-agents-envs/mlagents/envs/__init__.py ml-agents/mlagents/trainers/components/bc/module.py ml-agents/mlagents/trainers/tests/test_trainer_metrics.py ml-agents-envs/mlagents/envs/tests/test_sampler_class.py ml-agents/mlagents/trainers/tests/test_buffer.py ml-agents-envs/mlagents/envs/communicator_objects/command_proto_pb2.py ml-agents/mlagents/trainers/trainer.py ml-agents-envs/mlagents/envs/socket_communicator.py ml-agents-envs/mlagents/envs/tests/test_subprocess_env_manager.py ml-agents/mlagents/trainers/bc/models.py ml-agents/mlagents/trainers/bc/offline_trainer.py ml-agents/mlagents/trainers/tf_policy.py ml-agents/mlagents/trainers/tests/test_bc.py ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_initialization_input_pb2.py ml-agents/mlagents/trainers/trainer_metrics.py UnityGymException ActionFlattener UnityEnv create_mock_vector_braininfo test_gym_wrapper test_multi_agent test_branched_flatten setup_mock_unityenvironment create_mock_brainparams BarracudaWriter fuse print_known_operations compress Build sort lstm write fuse_batchnorm_weights trim mean gru Model summary Struct parse_args to_json rnn BufferException Buffer Curriculum make_demo_buffer load_demonstration demo_to_buffer CurriculumError MetaCurriculumError TrainerError create_environment_factory create_sampler_manager run_training prepare_for_docker_run try_create_meta_curriculum main load_config MetaCurriculum EncoderType LearningModel AllRewardsOutput RLTrainer get_layer_shape pool_to_HW flatten sqr_diff process_layer process_model get_layer_rank slow_but_stable_topological_sort get_attr basic_lstm ModelBuilderContext order_by get_epsilon get_tensor_dtype replace_strings_in_list debug embody by_op get_tensor_dims strided_slice remove_duplicates_from_list axis_to_barracuda by_name locate_actual_output_node convert strides_to_HW get_tensor_data very_slow_but_stable_topological_sort gru TFPolicy UnityPolicyException UnityTrainerException Trainer TrainerController TrainerMetrics initialize_trainers BehavioralCloningModel OfflineBCTrainer OnlineBCTrainer BCPolicy BCTrainer BCModel BCModule RewardSignal create_reward_signal CuriosityModel CuriosityRewardSignal ExtrinsicRewardSignal GAILModel GAILRewardSignal PPOModel get_devices MultiGpuPPOPolicy PPOPolicy PPOTrainer get_gae discount_rewards create_buffer simulate_rollout create_mock_3dball_brain create_mock_banana_brain setup_mock_unityenvironment create_mock_braininfo create_mock_brainparams setup_mock_env_and_brains test_barracuda_converter test_bc_trainer_step test_bc_trainer_add_proc_experiences test_cc_bc_model test_dc_bc_model test_visual_cc_bc_model test_bc_trainer_end_episode test_bc_policy_evaluate dummy_config test_visual_dc_bc_model create_bc_trainer test_bcmodule_rnn_update test_bcmodule_update test_bcmodule_dc_visual_update dummy_config create_ppo_policy_with_bc_mock test_bcmodule_defaults test_bcmodule_rnn_dc_update test_buffer_sample construct_fake_buffer assert_array fakerandint test_buffer test_buffer_truncate location default_reset_parameters test_init_curriculum_bad_curriculum_raises_error test_init_curriculum_happy_path test_increment_lesson test_get_config test_load_demo test_load_demo_dir basic_options test_docker_target_path test_run_training test_init_meta_curriculum_happy_path test_increment_lessons_with_reward_buff_sizes default_reset_parameters MetaCurriculumTest test_increment_lessons measure_vals reward_buff_sizes test_set_all_curriculums_to_lesson_num test_get_config test_set_lesson_nums test_init_meta_curriculum_bad_curriculum_folder_raises_error more_reset_parameters test_create_model dummy_config test_average_gradients test_update basic_mock_brain test_take_action_returns_action_info_when_available basic_params test_take_action_returns_nones_on_missing_values test_take_action_returns_empty_with_no_agents test_trainer_increment_step test_rl_functions test_ppo_model_dc_vector_rnn test_ppo_model_cc_vector_rnn test_add_rewards_output test_ppo_policy_evaluate test_ppo_model_cc_visual dummy_config test_ppo_model_dc_vector test_ppo_model_dc_visual test_ppo_get_value_estimates test_ppo_model_cc_vector test_gail_dc_visual reward_signal_update reward_signal_eval test_extrinsic test_curiosity_cc test_gail_rnn test_gail_cc create_ppo_policy_mock test_curiosity_dc curiosity_dummy_config dummy_config test_curiosity_visual test_curiosity_rnn gail_dummy_config create_mock_all_brain_info create_rl_trainer dummy_config test_rl_trainer create_mock_brain create_mock_policy clamp test_simple_rl Simple1DEnvironment _check_environment_trains test_initialization_seed test_start_learning_trains_until_max_steps_then_saves basic_trainer_controller test_take_step_adds_experiences_to_trainer_and_trains dummy_config trainer_controller_with_take_step_mocks trainer_controller_with_start_learning_mocks test_start_learning_trains_forever_if_no_train_model TestTrainerMetrics test_initialize_online_bc_trainer test_initialize_ppo_trainer test_initialize_trainer_parameters_override_defaults dummy_offline_bc_config test_initialize_invalid_trainer_raises_exception dummy_bad_config dummy_config dummy_offline_bc_config_with_override dummy_online_bc_config ActionInfo BaseUnityEnvironment safe_concat_np_ndarray BrainInfo BrainParameters safe_concat_lists Communicator UnityEnvironment EnvManager StepInfo SamplerException UnityWorkerInUseException UnityException UnityCommunicationException UnityTimeOutException UnityEnvironmentException UnityActionException MockCommunicator Policy RpcCommunicator UnityToExternalServicerImplementation MultiRangeUniformSampler UniformSampler SamplerFactory SamplerManager GaussianSampler Sampler SimpleEnvManager SocketCommunicator worker EnvironmentResponse UnityEnvWorker StepResponse SubprocessEnvManager EnvironmentCommand TimerNode hierarchical_timer get_timer_root get_timer_tree reset_timers set_gauge timed GaugeNode TimerStack UnityToExternalServicer UnityToExternalStub add_UnityToExternalServicer_to_server test_initialization test_reset test_close test_step test_handles_bad_filename test_rpc_communicator_checks_port_on_create test_rpc_communicator_create_multiple_workers test_rpc_communicator_close test_empty_samplers sampler_config_1 check_value_in_intervals incorrect_uniform_sampler test_incorrect_sampler test_sampler_config_1 sampler_config_2 incorrect_sampler_config test_incorrect_uniform_sampler test_sampler_config_2 mock_env_factory SubprocessEnvManagerTest MockEnvWorker test_timers decorated_func main create_mock_vector_braininfo sample UnityEnv setup_mock_unityenvironment step create_mock_brainparams create_mock_vector_braininfo UnityEnv setup_mock_unityenvironment step create_mock_brainparams setup_mock_unityenvironment create_mock_vector_braininfo create_mock_brainparams UnityEnv Mock Mock array range join isdir print replaceFilenameExtension add_argument exit verbose source_file ArgumentParser target_file sqrt topologicalSort list hasattr layers addEdge Graph print inputs set len list hasattr layers print filter match trim_model compile data layers print tensors float16 replace layers dumps data dtype layers isinstance print name tensors inputs outputs shape zip array_without_brackets to_json globals Build array_equal pool reduce Build tanh mad tanh mul Build concat add sigmoid sub mad _ tanh mul Build concat add sigmoid mad print sorted keys Buffer reset_local_buffers number_visual_observations append_update_buffer append range enumerate make_demo_buffer load_demonstration join read suffix isdir endswith BrainParametersProto from_agent_proto DemonstrationMetaProto ParseFromString AgentInfoProto isfile append from_proto listdir _DecodeVarint32 start_learning int str format create_environment_factory create_sampler_manager initialize_trainers external_brains TrainerController put try_create_meta_curriculum reset_parameters load_config SubprocessEnvManager pop SamplerManager load_config set_all_curriculums_to_lesson_num MetaCurriculum reset_parameters keys chmod format basename isdir glob copyfile copytree prepare_for_docker_run replace int Process join docopt getLogger print run_training start Queue info append randint setLevel range endswith len print HasField hasattr get_attr isinstance get_attr tensor_shape ndarray isinstance shape int_val bool_val float_val ListFields name ndarray isinstance str tensor_content ndarray product isinstance get_tensor_dtype print get_tensor_dims unpack int_val bool_val array float_val enter append add set Build mul sub insert Build tolist append range len locate_actual_output_node name find_tensor_by_name split locate_actual_output_node name lstm find_tensor_by_name find_forget_bias split get_layer_shape id Struct tensor get_layer_rank layer_ranks hasattr name patch_data rank input_shapes out_shapes input get_attr append replace_strings_in_list tensors embody astype op inputs zip enumerate print float32 patch_data_fn model_tensors map_ignored_layer_to_its_input co_argcount len items hasattr get_tensors name print process_layer eval slow_but_stable_topological_sort ModelBuilderContext sort assign_ids pop range insert len layers verbose Struct process_model open print_known_operations fuse compress node GraphDef Model dims_to_barracuda_shape insert get_tensor_dims inputs MessageToJson ParseFromString cleanup_layers read memories print sort write trim summary print_supported_ops update format OfflineBCTrainer copy OnlineBCTrainer PPOTrainer get check_config rcls list_local_devices size range reversed zeros_like append discount_rewards ones Mock array range brain_name create_buffer brain sequence_length append range vector_action_space_size Buffer ones number_visual_observations append_update_buffer shape append sum range enumerate setup_mock_unityenvironment mock_env create_mock_braininfo create_mock_brainparams create_mock_brainparams create_mock_brainparams join remove _get_candidate_names convert _get_default_tempdir dirname abspath isfile next Mock BCTrainer simulate_rollout mock_env dirname abspath setup_mock_unityenvironment policy create_mock_braininfo create_mock_3dball_brain update_policy create_bc_trainer increment_step agents process_experiences step create_bc_trainer add_experiences end_episode agents process_experiences step create_bc_trainer add_experiences BCPolicy evaluate close reset MockCommunicator reset_default_graph UnityEnvironment reset_default_graph reset_default_graph reset_default_graph reset_default_graph mock_env dirname abspath PPOPolicy setup_mock_unityenvironment create_mock_braininfo create_ppo_policy_with_bc_mock close create_mock_3dball_brain update items close create_ppo_policy_with_bc_mock create_mock_3dball_brain update items close create_ppo_policy_with_bc_mock create_mock_3dball_brain update items close create_mock_banana_brain create_ppo_policy_with_bc_mock update items close create_mock_banana_brain create_ppo_policy_with_bc_mock flatten list range len append range Buffer get_batch construct_fake_buffer assert_array append_update_buffer make_mini_batch reset_agent array sample_mini_batch construct_fake_buffer append_update_buffer construct_fake_buffer truncate_update_buffer append_update_buffer Curriculum Curriculum Curriculum make_demo_buffer load_demonstration dirname abspath make_demo_buffer load_demonstration dirname abspath MagicMock basic_options MagicMock MetaCurriculum assert_has_calls MetaCurriculumTest increment_lessons assert_called_with MetaCurriculumTest increment_lessons assert_called_with assert_not_called MetaCurriculumTest set_all_curriculums_to_lesson_num MetaCurriculumTest dict update MetaCurriculumTest reset_default_graph MultiGpuPPOPolicy create_mock_brainparams reset_default_graph create_mock_brainparams update Mock reset_default_graph MultiGpuPPOPolicy create_mock_brainparams MagicMock TFPolicy basic_mock_brain basic_params BrainInfo get_action MagicMock TFPolicy basic_mock_brain basic_params BrainInfo get_action MagicMock TFPolicy basic_mock_brain ActionInfo basic_params BrainInfo get_action evaluate close reset MockCommunicator PPOPolicy reset_default_graph UnityEnvironment get_value_estimates items close reset MockCommunicator PPOPolicy reset_default_graph UnityEnvironment reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph assert_array_almost_equal array discount_rewards Mock increment_step BrainParameters assert_called_with PPOTrainer AllRewardsOutput BrainParameters PPOTrainer add_rewards_outputs update PPOPolicy setup_mock_env_and_brains reset evaluate model simulate_rollout _execute_model prepare_update update_dict make_mini_batch create_ppo_policy_mock reward_signal_update reward_signal_eval reward_signal_update reward_signal_eval create_ppo_policy_mock dirname abspath create_ppo_policy_mock reward_signal_update reward_signal_eval create_ppo_policy_mock reward_signal_update reward_signal_eval create_ppo_policy_mock reward_signal_update reward_signal_eval create_ppo_policy_mock reward_signal_update reward_signal_eval create_ppo_policy_mock reward_signal_update reward_signal_eval create_ppo_policy_mock reward_signal_update reward_signal_eval create_mock_brainparams RLTrainer dummy_config create_mock_brain Mock create_mock_all_brain_info create_rl_trainer values end_episode construct_curr_info episode_steps create_mock_braininfo create_mock_policy add_experiences Simple1DEnvironment _check_environment_trains TrainerController assert_called_with MagicMock basic_trainer_controller start_learning assert_called_once MagicMock assert_not_called trainer_controller_with_start_learning_mocks trainer_controller_with_start_learning_mocks start_learning MagicMock assert_called_once MagicMock basic_trainer_controller assert_called_once Mock MagicMock StepInfo current_all_brain_info advance outputs assert_not_called trainer_controller_with_take_step_mocks assert_called_once_with previous_all_brain_info dummy_offline_bc_config dummy_offline_bc_config_with_override BrainParametersMock BrainParametersMock dummy_online_bc_config dummy_config BrainParametersMock dummy_bad_config extend copy items value EnvironmentResponse external_brains global_done payload text get_timer_root reset_timers put reset _send_response reset_parameters StepResponse env_factory step memory action perf_counter push reset method_handlers_generic_handler add_generic_rpc_handlers UnityEnvironment close MockCommunicator UnityEnvironment close MockCommunicator reset str local_done print agents step close reset MockCommunicator UnityEnvironment len UnityEnvironment close MockCommunicator close RpcCommunicator close RpcCommunicator close RpcCommunicator SamplerManager sample_all sampler_config_1 sampler_config_2 SamplerManager SamplerManager sample_all incorrect_uniform_sampler incorrect_sampler_config set_gauge replace endswith add set walk | <img src="docs/images/unity-wide.png" align="middle" width="3000"/> <img src="docs/images/image-banner.png" align="middle" width="3000"/> # Unity ML-Agents Toolkit (Beta) [](docs/Readme.md) [](LICENSE) **The Unity Machine Learning Agents Toolkit** (ML-Agents) is an open-source Unity plugin that enables games and simulations to serve as environments for training intelligent agents. Agents can be trained using reinforcement learning, imitation learning, neuroevolution, or other machine learning methods through a simple-to-use Python API. We also provide implementations (based on TensorFlow) | 1,163 |
YashNita/Audio-Visual-Event-Localization-in-Unconstrained-Videos | ['temporal localization'] | ['Audio-Visual Event Localization in Unconstrained Videos'] | cmm_train.py dataloader.py models_weakly.py models.py attention_visualization.py weak_supervised_main.py models_fusion.py cmm_test.py supervised_main.py video_frame_sample fun normlize create_heatmap step_decay compute_precision scmm_net contrastive_loss eucl_dist_output_shape euclidean_distance compute_accuracy step_decay scmm_net contrastive_loss eucl_dist_output_shape euclidean_distance compute_accuracy AVE_weak_Dataset AVEDataset att_Net TBMRF_Net att_Net val train compute_acc test val train test append int range shape min max range print max copy_ data pow floor range len range len Model Input int str append zeros argmax range val time get_batch batch_size Variable backward print zero_grad __len__ AVEDataset net_model nb_epoch loss_function save append step cuda range get_batch Variable print __len__ AVEDataset net_model eval compute_acc numpy cuda load get_batch model Variable print __len__ AVEDataset eval compute_acc numpy cuda AVE_weak_Dataset argmax int str append zeros accuracy_score AVE_weak_Dataset range int argmax str append zeros accuracy_score AVE_weak_Dataset range | Audio-Visual Event Localization in Unconstrained Videos (To appear in ECCV 2018) [Project](https://sites.google.com/view/audiovisualresearch) [ArXiv](https://arxiv.org/abs/1803.08842) ### AVE Dataset & Features AVE dataset can be downloaded from https://drive.google.com/open?id=1FjKwe79e0u96vdjIVwfRQ1V6SoDHe7kK. [Audio feature](https://drive.google.com/file/d/1F6p4BAOY-i0fDXUOhG7xHuw_fnO5exBS/view?usp=sharing) and [visual feature](https://drive.google.com/file/d/1hQwbhutA3fQturduRnHMyfRqdrRHgmC9/view?usp=sharing) (7.7GB) are also released. Please put videos of AVE dataset into /data/AVE folder and features into /data folder before running the code. ### Requirements Python-3.6, Pytorch-0.3.0, Keras, ffmpeg. ### Visualize attention maps Run: python attention_visualization.py to generate audio-guided visual attention maps.  | 1,164 |
YeLyuUT/SSeg | ['semantic segmentation'] | ['Improving Semantic Segmentation via Video Propagation and Label Relaxation'] | optimizer.py sdcnet/main.py sdcnet/models/model_utils.py utils/misc.py datasets/kitti.py sdcnet/sdc_aug.py loss.py datasets/mapillary.py train.py network/deepv3.py network/SEresnext.py sdcnet/utility/tools.py utils/attr_dict.py utils/my_data_parallel.py network/mynn.py transforms/joint_transforms.py network/__init__.py sdcnet/datasets/dataset_utils.py sdcnet/datasets/frame_loader.py network/wider_resnet.py datasets/camvid.py sdcnet/spatialdisplconv_package/spatialdisplconv.py sdcnet/models/__init__.py sdcnet/spatialdisplconv_package/test_spatialdisplconv.py datasets/sampler.py sdcnet/datasets/__init__.py datasets/__init__.py datasets/cityscapes.py network/Resnet.py datasets/uavid.py datasets/cityscapes_labels.py demo.py sdcnet/spatialdisplconv_package/setup.py eval.py datasets/uniform.py transforms/transforms.py config.py demo_folder.py sdcnet/models/sdc_net2d.py datasets/nullloader.py assert_and_infer_cfg inference_sliding reverse_mapping resize_thread pooled_eval flip_tensor infer_args RunEval inference_pool sliding_window_cropping setup_loader main reverse_sliding_window get_net get_loss ImageBasedCrossEntropyLoss2d CrossEntropyLoss2d customsoftmax ImgWtLossSoftNLL forgiving_state_restore restore_snapshot load_weights get_optimizer main train validate make_dataset CAMVID add_items colorize_mask colorize_mask make_cv_splits make_dataset_video make_test_split make_dataset make_split_coarse add_items CityScapes CityScapesVideo CityScapesUniform assureSingleInstanceName make_dataset Mapillary colorize_mask gen_colormap NullLoader DistributedSampler make_dataset colorize_mask UAVid gen_colormap pooled_class_centroids_all class_centroids_image class_centroids_all build_epoch Point random_sampling calc_tile_locations unpooled_class_centroids_all setup_loaders _AtrousSpatialPyramidPoolingModule DeepR50V3PlusD_m1 DeepSRNX101V3PlusD_m1 DeepV3Plus DeepWV3Plus DeepSRNX50V3PlusD_m1 initialize_weights Upsample Norm2d ResNet resnet50 Bottleneck resnet152 conv3x3 resnet34 resnet18 BasicBlock resnet101 se_resnext50_32x4d SENet SEResNetBottleneck SEBottleneck SEResNeXtBottleneck initialize_pretrained_model Bottleneck se_resnext101_32x4d SEModule WiderResNet IdentityResidualBlock WiderResNetA2 bnrelu GlobalAvgPool2d get_model wrap_network_in_dataparallel get_net build_and_initialize_model_and_optimizer save_model load_model train_step evaluate write_summary calc_linf_grad_norm parse_and_set_args initialilze_distributed train_epoch set_random_seed get_train_and_valid_data_loaders main forward_only train get_learning_rate_scheduler get_data get_model multi_step_augmentation one_step_augmentation StaticRandomCrop FrameLoader conv2d deconv2d SDCNet2DRecon SDCNet2D SpatialDisplConv SpatialDisplConvFunction create_pipe AverageMeter TimerBlock module_to_dict RandomHorizontallyFlip ResizeHeight SlidingCropOld CenterCrop RandomSizedCrop FreeScale RandomRotate CenterCropPad Compose PadImage Scale RandomSizeAndCrop Resize RandomCrop ScaleMin SlidingCrop ClassUniform DeNormalize ResizeHeight adjust_saturation RandomBilateralBlur adjust_hue FreeScale RelaxedBoundaryLossToTensor RandomVerticalFlip RandomGaussianBlur FlipChannels adjust_brightness _is_pil_image MaskToTensor adjust_contrast ColorJitter AttrDict per_class_iu save_log AverageMeter fast_hist evaluate_eval make_exp_name evaluate_eval_for_inference print_evaluate_results prep_experiment MyDataParallel data_parallel _check_balance batch_weighting apex jointwtborder print class_uniform_pct immutable rlx_off_epoch BatchNorm2d SyncBatchNorm int append min unsqueeze ceil range sliding_overlap cat len put get join Thread astype put dict start Queue append zeros float range len get join Thread put mean start Queue append range len pooled_eval range no_flip len get join FLIP_LEFT_RIGHT Thread img_transform size transpose BILINEAR Compose no_flip start sliding_window_cropping Queue resize append empty_cache numpy range split_index batch_size split_dataset KITTI ToTensor DataLoader mode scales split_count CityScapes MaskToTensor split restore_snapshot snapshot eval info cuda int basename snapshot group search split save_log inference_mode ckpt_path arch RunEval exp_name get_net setup_loader inf infer_args info cv_split enumerate join AvgPool2d final_dump tqdm split fixed_aspp_pool single_scale makedirs jointwtborder cuda img_wt_loss softmax LambdaLR Adam sgd SGD rescale parameters adam amsgrad REDUCE_BORDER_EPOCH restore_snapshot info load forgiving_state_restore load_state_dict info update load_state_dict info state_dict setup_loaders wrap_network_in_dataparallel assert_and_infer_cfg set_num_samples build_epoch get_optimizer initialize max_epoch range apex get_loss snapshot class_uniform_pct start_epoch load_weights fp16 immutable restore_optimizer set_epoch empty_cache train step prep_experiment zero_grad world_size update detach_ apex format val size mean avg item fp16 info net enumerate SUM backward add_scalar AverageMeter all_reduce step len update apex dataset_cls info FloatTensor size AverageMeter evaluate_eval all_reduce eval item cpu numpy enumerate append len convert putpalette int sort append listdir split join format exit add_items info len join join sorted append CV_SPLITS range len sorted listdir join str make_cv_splits make_test_split make_split_coarse append join listdir sorted remove print append listdir range len append range update items int defaultdict astype copy shape append center_of_mass calc_tile_locations array range open join defaultdict partial close map extend tqdm Pool class_centroids_image defaultdict extend tqdm pooled_class_centroids_all arange shuffle append range len str int extend random_sampling info range len test_mode UAVid ignore_label DataLoader CityScapes MaskToTensor num_classes bblur KITTI DistributedSampler crop_size Mapillary color_aug gblur apex jointwtborder bs_mult_val CAMVID Compose class_uniform_pct null_loader maxSkip CityScapesUniform ade20k RelaxedBoundaryLossToTensor bs_mult coarse_boost_classes ngpu MODEL getattr layer fill_ isinstance weight modules zero_ BatchNorm2d kaiming_normal_ load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict load_url load_state_dict initialize_pretrained_model SENet initialize_pretrained_model SENet format get_model sum DataParallel DistributedDataParallel import_module getattr net_func int join format sorted items save_root name val_file torch_home getenv eval get_default save vars parse_args local_rank log makedirs device_count set_device rank init_process_group manual_seed format DistributedSampler log DataLoader dataset dataset_class len load format resume load_state_dict max log network_class initialize format load_model DDP exit resume fp16 sum cuda log current_device optimizer_class format ExponentialLR lr_milestones LambdaLR lr_gamma MultiStepLR log mean model Tensor list filter isinstance format backward calc_linf_grad_norm zero_grad clip_grad_value_ parameters fp16 forward_only clip_gradients step log join format save_root move wait close empty_cache create_pipe train log max_memory_allocated add_scalar update val join train_step write_summary size AverageMeter set_epoch all_reduce reset avg item world_size step cuda log enumerate count join format save_root copy save module log state_dict save_model evaluate initial_eval print train_epoch epochs start_epoch eval range log load format SDCNet2DRecon print pretrained start_epoch load_state_dict transpose astype float32 from_numpy expand_dims imread join int uint8 imwrite model print sort target_dir makedirs exit astype copyfile get_data source_dir listdir cuda split imwrite model get_data cuda target_dir transpose exit from_numpy source_dir expand_dims imread astype listdir join int uint8 print sort float32 split print join devnull open Brightness enhance enhance Contrast Color enhance fromarray convert mode array split vars sorted format isinstance reshape join basicConfig str setFormatter print addHandler StreamHandler Formatter setLevel INFO FileHandler join str exp date_str tb_path SummaryWriter ckpt exp_path save_log write strftime make_exp_name device_count tb_exp_path makedirs format nanmean info print_evaluate_results sum diag exp_path save print_evaluate_results copyfile sum format synchronize Compose stack avg info zip enumerate add_image join remove colorize_mask make_grid add_scalar extend nanmean numpy diag makedirs format id2cat info sum diag enumerate warn_imbalance list scatter_kwargs device_count replicate parallel_apply range | # Improving Semantic Segmentation via Video Prediction and Label Relaxation ### [Project](https://nv-adlr.github.io/publication/2018-Segmentation) | [Paper](https://arxiv.org/pdf/1812.01593.pdf) | [YouTube](https://www.youtube.com/watch?v=aEbXjGZDZSQ) | [Cityscapes Score](https://www.cityscapes-dataset.com/anonymous-results/?id=555fc2b66c6e00b953c72b98b100e396c37274e0788e871a85f1b7b4f4fa130e) | [Kitti Score](http://www.cvlibs.net/datasets/kitti/eval_semseg_detail.php?benchmark=semantics2015&result=83cac7efbd41b1f2fc095f9bc1168bc548b48885) <br> PyTorch implementation of our CVPR2019 paper (oral) on achieving state-of-the-art semantic segmentation results using Deeplabv3-Plus like architecture with a WideResNet38 trunk. We present a video prediction-based methodology to scale up training sets by synthesizing new training samples and propose a novel label relaxation technique to make training objectives robust to label noise. <br> [Improving Semantic Segmentation via Video Propagation and Label Relaxation](https://nv-adlr.github.io/publication/2018-Segmentation) <br /> Yi Zhu<sup>1*</sup>, Karan Sapra<sup>2*</sup>, [Fitsum A. Reda](https://scholar.google.com/citations?user=quZ_qLYAAAAJ&hl=en)<sup>2</sup>, Kevin J. Shih<sup>2</sup>, Shawn Newsam<sup>1</sup>, Andrew Tao<sup>2</sup>, [Bryan Catanzaro](http://catanzaro.name/)<sup>2</sup> <sup>1</sup>UC Merced, <sup>2</sup>NVIDIA Corporation <br /> In CVPR 2019 (* equal contributions). [SDCNet: Video Prediction using Spatially Displaced Convolution](https://nv-adlr.github.io/publication/2018-SDCNet) [Fitsum A. Reda](https://scholar.google.com/citations?user=quZ_qLYAAAAJ&hl=en), Guilin Liu, Kevin J. Shih, Robert Kirby, Jon Barker, David Tarjan, Andrew Tao, [Bryan Catanzaro](http://catanzaro.name/)<br /> NVIDIA Corporation <br /> | 1,165 |
YeeU/InverseRenderNet | ['intrinsic image decomposition'] | ['Outdoor inverse rendering from a single image using multiview self-supervision'] | train.py model/dataloader.py model/loss_layer.py utils/render_sphere_nm.py test_demo.py model/pred_illuDecomp_layer.py test_iiw.py model/SfMNet.py model/lambSH_layer.py model/sup_illuDecomp_layer.py utils/whdr.py model/reproj_layer.py pinv pinv main _read_pk_function md_construct_inputPipeline md_preprocess_func md_read_func megaDepth_dataPipeline lambSH_layer conv2d_nosum_2ch pinv cvtLab loss_formulate conv2d_nosum pinv illuDecomp map_reproj interpImg SfMNet get_bilinear_filter pinv illuDecomp render_sphere_nm load_image srgb_to_rgb compute_whdr svd reduce_max boolean_mask diag zeros_like Saver save SfMNet megaDepth_dataPipeline run open restore get_collection placeholder ceil expand_dims range close GLOBAL_VARIABLES loss_formulate ConfigProto InteractiveSession join time constant n_batch Variable reshape print optimize_loss float32 write data_path sigmoid int32 join sorted concatenate glob output_types make_initializer from_structure argsort get_next md_construct_inputPipeline output_shapes array len float32 from_tensor_slices py_func from_tensor_slices map apply parallel_interleave repeat prefetch shuffle_and_repeat constant not_equal ones concat reduce_sum pow stack clip_by_value expand_dims l2_normalize map_reproj while_loop concat boolean_mask where TensorArray cvtLab clip_by_value abs log to_float exp transpose matrix_set_diag get_collection reduce_sum matmul conv2d pad gather_nd cast conv2d_nosum illuDecomp expand_dims sum ones_like absolute_difference relu sqrt pinv stack lambSH_layer tile equal load join constant mean_squared_error REGULARIZATION_LOSSES not_equal reshape sigmoid pow reduce_mean zeros bool acos to_float constant transpose greater matmul equal ones_like where conv2d conv2d constant while_loop TensorArray pow stack clip_by_value zeros_like to_int32 concat logical_not where to_float logical_and matmul reduce_sum interpImg meshgrid expand_dims range ones_like matrix_inverse stack tile equal constant is_nan reshape float32 scatter_nd to_float to_int32 gather_nd floor ceil expand_dims str list max_pool conv2d log2 int32 zip ceil range append meshgrid abs arange tile not_equal reduce_sum ones_like arange sqrt stack nan meshgrid range append mean max float astype power zeros_like | # InverseRenderNet: Learning single image inverse rendering ***!! Check out our new work InverseRenderNet++ [paper](https://arxiv.org/abs/2102.06591) and [code](https://github.com/YeeU/InverseRenderNet_v2), which improves the inverse rendering results and shadow handling.*** This is the implementation of the paper "InverseRenderNet: Learning single image inverse rendering". The model is implemented in tensorflow. If you use our code, please cite the following paper: @inproceedings{yu19inverserendernet, title={InverseRenderNet: Learning single image inverse rendering}, author={Yu, Ye and Smith, William AP}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, year={2019} } | 1,166 |
YeongHyeon/ConAD | ['anomaly detection'] | ['Anomaly Detection With Multiple-Hypotheses Predictions', 'Consistency-based anomaly detection with adaptive multiple-hypotheses predictions'] | source/tf_process.py source/datamanager.py run.py source/neuralnet.py main Dataset ConAD gray2rgb dat2canvas boxplot discrete_cmap random_noise make_dir save_img training test histogram latent_plot training test ConAD Saver global_variables_initializer ConfigProto Dataset Session run mkdir astype float32 gray2rgb astype float32 sqrt shape ceil range subplot dat2canvas close tight_layout imshow title savefig figure range len str name linspace base get_cmap grid close tight_layout colorbar scatter savefig figure subplots set_xticklabels close tight_layout clf savefig append enumerate minimum xlabel text close ylabel hist savefig legend xlim sum max x_fake linspace save optimizer_d add_run_metadata run z_dim append fit_transform imsave range dat2canvas make_dir FileWriter latent_plot enumerate RunOptions join time print graph random_noise save_img PCA reshape RunMetadata add_summary next_train optimizer_g summaries array gray2rgb exists open run restore ones z_dim shape append fit_transform imsave mse_r asarray make_dir boxplot latent_plot join print next_test random_noise write float32 PCA histogram | Consistency-based anomaly detection (ConAD) ===== Implementation of Consistency-based anomaly detection (ConAD) from paper <a href="https://arxiv.org/abs/1810.13292">'Anomaly Detection With Multiple-Hypotheses Predictions'</a> with MNIST dataset [<a href="https://github.com/YeongHyeon/CVAE-AnomalyDetection">Related repository</a>] [<a href="https://github.com/YeongHyeon/ConAD-PyTorch">PyTorch Version</a>]. ## Architecture <div align="center"> <img src="./figures/conad.png" width="500"> <p>Simplified ConAD architecture.</p> </div> ## Graph in TensorBoard <div align="center"> | 1,167 |
YeongHyeon/ConAD-PyTorch | ['anomaly detection'] | ['Anomaly Detection With Multiple-Hypotheses Predictions', 'Consistency-based anomaly detection with adaptive multiple-hypotheses predictions'] | run.py source/neuralnet.py source/solver.py source/loss_functions.py source/datamanager.py main Dataset mean_square_error find_best_x lossfunc_g lossfunc_d Decoder Encoder Discriminator NeuralNet Flatten Hypotheses gray2rgb dat2canvas boxplot discrete_cmap torch2npy make_dir save_img save_graph training test histogram latent_plot forward test training device Dataset NeuralNet shape len mean_square_error item mean abs log mean sum log mkdir astype float32 gray2rgb astype float32 sqrt shape ceil range subplot dat2canvas close tight_layout imshow title savefig figure range len str name linspace base get_cmap grid close tight_layout colorbar scatter savefig figure subplots set_xticklabels close tight_layout clf savefig append enumerate minimum xlabel text close ylabel hist savefig legend xlim sum max asarray plot xlabel close ylabel tight_layout clf savefig save cpu numpy find_best_x decoder torch2npy add num_h hypotheses div unsqueeze discriminator device encoder to cat enumerate models torch2npy zero_grad num_h linspace save device forward str transpose z_dim append to fit_transform imsave range state_dict SummaryWriter dat2canvas make_dir save_graph lossfunc_d lossfunc_g latent_plot enumerate join time decoder backward print reshape random_noise save_img PCA next_train step array add_scalar gray2rgb torch2npy forward open mean_square_error ones transpose z_dim shape load_state_dict append fit_transform imsave asarray glob make_dir eval item boxplot latent_plot enumerate load join print sort next_test random_noise write float32 PCA histogram | [PyTorch] Consistency-based anomaly detection (ConAD) ===== Implementation of Consistency-based anomaly detection (ConAD) from paper <a href="https://arxiv.org/abs/1810.13292">'Anomaly Detection With Multiple-Hypotheses Predictions'</a> with MNIST dataset [<a href="https://github.com/YeongHyeon/ConAD">TensorFlow Version</a>]. ## Architecture <div align="center"> <img src="./figures/conad.png" width="500"> <p>Simplified ConAD architecture.</p> </div> ## Graph in TensorBoard <div align="center"> | 1,168 |
YeongHyeon/FARED_for_Anomaly_Detection | ['anomaly detection'] | ['Fast Adaptive RNN Encoder-Decoder for Anomaly Detection in SMD Assembly Machine'] | FARED_source/run.py preprocessing_source/dat2npy_mfcc.py FARED_source/source/datamanager.py FARED_source/source/neuralnet.py FARED_source/source/tf_process.py preprocessing_source/dat2npy_stft.py FARED_source/source/developer.py main DataSet print_stamp LSTM_Model_TF1_04 LSTM_Model_TF1_14 validation make_dir data2canvas training main makedir spectrums2timeaverage complex2magnitude main makedir spectrums2timeaverage validation DataSet trkey LSTM_Model_TF1_14 training Saver run append global_variables_initializer InteractiveSession print mkdir zeros reshape graph_def clf save run str write_graph ylabel savefig append imsave range asarray plot make_dir close FileWriter tight_layout logits data2canvas join time int print xlabel add_summary zeros next_batch loss clf save exists run restore ylabel scatter savefig legend append range asarray make_dir close tight_layout am_tot time key_tot print xlabel next_batch loss len print mkdir zeros transpose sum clf save subplot ylabel imshow title savefig melspectrogram makedir rot90 plot glob astype tight_layout close join read int print sort spectrums2timeaverage xlabel power sqrt imag real imresize complex2magnitude stft | Fast Adaptive RNN Encoder-Decoder for Anomaly Detection in SMD Assembly Machine ===== ## Introduction This repository provides the source code of the paper "Fast Adaptive RNN Encoder-Decoder for Anomaly Detection in SMD Assembly Machine" [<a href="https://www.mdpi.com/1424-8220/18/10/3573/pdf">pdf</a>]. <div align="center"> <img src="./figures/microphone.png" width="500"> <p>The SMD assembly machine with microphone (red box)</p> </div> ## Requirements | 1,169 |
YeongHyeon/GANomaly | ['semi supervised anomaly detection', 'anomaly detection'] | ['GANomaly: Semi-Supervised Anomaly Detection via Adversarial Training'] | source/tf_process.py run.py source/neuralnet.py source/layers.py source/datamanager.py main Dataset Layers GANomaly gray2rgb dat2canvas make_dir save_img training test boxplot test training confirm_params Dataset GANomaly mkdir astype float32 gray2rgb astype float32 sqrt shape ceil range subplot dat2canvas close tight_layout imshow title savefig figure range len subplots set_xticklabels close tight_layout clf savefig append enumerate print make_dir save_img save_parameter next_train step range load_parameter join asarray gray2rgb print next_test ones make_dir write float32 shape boxplot append step imsave open | [TensorFlow] GANomaly: Semi-Supervised Anomaly Detection via Adversarial Training ===== TensorFlow implementation of GANomaly with MNIST dataset. <a href="https://github.com/YeongHyeon/GANomaly-PyTorch">PyTorch Version</a> is also implemented. ## Summary ### GANomaly architecture <div align="center"> <img src="./figures/ganomaly.png" width="650"> <p>Simplified GANomaly architecture.</p> </div> | 1,170 |
YerevaNN/BioRelEx | ['relation extraction'] | ['BioRelEx 1.0: Biological Relation Extraction Benchmark'] | evaluate.py unordered_pair get_entity_mentions hash_sentence BootstrapEvaluation get_sentences evaluate_sentences main PRFScores PRFScoresFlatMentions get_entity_coreferences lower sub format get_entity_mentions print keys set PRFScores PRFScoresFlatMentions get_entity_coreferences add_sets print_scores match_by BootstrapEvaluation ArgumentParser values hash_sentence truth_path get_sentences append parse_args format prediction_path print_results get_scores bootstrap_count items evaluate print add_argument evaluate_sentences len | # BioRelEx: Biological Relation Extraction Benchmark BioRelEx is a dataset of 2000+ sentences from biological journals with complete annotations of proteins, genes, chemicals and other entities along with binding interactions between them. [A paper describing the dataset](https://www.aclweb.org/anthology/papers/W/W19/W19-5019/) is accepted at [ACL BioNLP Workshop 2019](https://aclweb.org/aclwiki/BioNLP_Workshop). We invite everyone to submit their relation extraction systems to [our Codalab competition](https://competitions.codalab.org/competitions/20468). ## Dataset format Training and development sets are provided as JSON files. Each version of the dataset is one [release](https://github.com/YerevaNN/BioRelEx/releases) of this repository. Each JSON file is a list of objects, one per sentence. _More details will be added soon._ ## Evaluation We propose two main metrics for evaluation, one for **entity recognition** and another one for **relation extraction**. We provide a [script](https://github.com/YerevaNN/BioRelEx/blob/master/evaluate.py) for the main evaluation metrics and several additional metrics designed for error analysis. The test set is not released. **Please submit your solution in [this Codalab competition](https://competitions.codalab.org/competitions/20468).** | 1,171 |
YiZeng623/FenceBox | ['data augmentation', 'adversarial attack'] | ['FenceBox: A Platform for Defeating Adversarial Examples with Data Augmentation Techniques'] | imagenet_labels.py watermark_defense.py eot_defense.py test.py defense.py inceptionv3.py utils.py defend_PROTAT defend_BdR defend_GB defend_JPEG dct2 cropresult_sig shifting defend_CD padresult_sig defend_FD FD_fuction_sig jpeg defend_GN defend_RDG defend_ET nearest_neighbour_scaling get_random_crop_coords random_crop irfft2 padding rfft2 defend_WebP defend_RAND defend_SHIELD defend_PD idct2 defend_MB defend_CROP cropping defend_PROTAT tf_rand_cropping tf_rand_padding defend_RDG defend_RAND defend_CROP label_to_name _preprocess model _get_model one_hot l2_distortion getabatch linf_distortion load_image optimistic_restore make_classify get_random_crop_coords random_crop defend_PROTAT padding defend_RDG defend_RAND defend_ET shifting defend_CROP cropping astype uint16 roll int zeros_like padding cos pi rescale uniform shifting sin round range cropping astype float32 remap linspace meshgrid zeros range enumerate int get_random_crop_coords randint random int int pad resize asarray concatenate reshape squeeze transpose divide float32 dct2 idct2 zeros round clip split PadIfNeeded Crop FD_fuction_sig cropresult_sig padresult_sig zeros arange reshape tile fromarray uint8 BytesIO astype float32 save int jpeg asarray arange reshape transpose astype stack tile append zeros range len uint8 astype float32 aug ElasticTransform aug MotionBlur aug GlassBlur aug JpegCompression aug ImageCompression aug CoarseDropout aug GaussNoise shape range copy to_float squeeze random_uniform crop_and_resize squeeze pad set_shape random_uniform crop_and_resize expand_dims convert_to_tensor reshape squeeze roll rotate int64 cast random_uniform resize cond ones_like constant concat shape stack gather_nd cast int32 random_uniform cond clip_by_value tile round reverse squeeze crop_and_resize squeeze set_shape random_uniform crop_and_resize expand_dims hasattr inception_v3_arg_scope inception_v3 default_image_size network_fn _preprocess default_image_size _get_model argmax optimistic_restore mean reshape abs max product reshape mean sqrt shape sum zeros restore sorted NewCheckpointReader Saver get_variable_to_shape_map | environment requirements: Numpy1.17.2 Scipy 1.3.1 Skimage 0.15.0 Opencv 3.4.2 Albumentations 0.4.5 Pillow 6.1.0 Python 3.7 | 1,172 |
YichengWu/PhaseCam3D | ['depth estimation', 'monocular depth estimation', 'autonomous driving'] | ['PhaseCam3D — Learning Phase Masks for Passive Single View Depth Estimation'] | Network.py depth_estimation_test.py depth_estimation.py gen_PSFs blurImage read2batch gen_OOFphase system parse_element add_gaussian_noise cost_rms fft2dshift cost_grad data_augment blurImage read2batch gen_OOFphase system parse_element add_gaussian_noise fft2dshift add_SDGN gen_PSFs max_pool_2x2 cnn3x3 BN conv_transpose UNet_2 cnnLayer conv2d parse_single_example reshape uint8 decode_raw concat random_uniform map_fn TFRecordDataset shuffle make_one_shot_iterator float32 convert_image_dtype get_next cast data_augment batch random_normal int value print slice concat linspace meshgrid empty range len int stack stack value int32 sqrt reduce_mean square image_gradients cost_rms repeat multiply sqrt random_normal max_pool_2x2 cnn3x3 concat cnnLayer sigmoid | # PhaseCam3D ### [Project](https://yichengwu.github.io/PhaseCam3D/) | [Video](https://www.youtube.com/watch?time_continue=751&v=CV4vEAjBv20) | [Paper](https://drive.google.com/file/d/1ISWnM1NhrcNpu5vBtejTQdS9GNuiQyqW/view?usp=sharing) This repository contains TensorFlow implementation for the ICCP2019 paper *PhaseCam3D — Learning Phase Masks for Passive Single View Depth Estimation* by [Yicheng Wu](https://yichengwu.github.io), [Vivek Boominathan](https://vivekboominathan.com/), [Huaijin Chen](http://hc25.web.rice.edu/), [Aswin Sankaranarayanan](http://imagesci.ece.cmu.edu/index.html), and [Ashok Veeraraghavan](https://computationalimaging.rice.edu/).  ## Installation Clone this repo. ```bash git clone https://github.com/YichengWu/PhaseCam3D cd PhaseCam3D/ ``` | 1,173 |
YilunZhou/optimal-active-learning | ['active learning'] | ['Towards Understanding the Behaviors of Optimal Deep Active Learning Algorithms'] | intent_classification/help_text.py named_entity_recognition/search.py object_classification/batchbald_redux/__init__.py object_classification/performance_curve.py object_classification/odmr.py object_classification/summarize.py intent_classification/relative_order_ranking.py object_classification/collect_results.py intent_classification/train_server.py intent_classification/random_baselines.py object_classification/search.py intent_classification/distribution_vis_horizontal.py object_classification/help_text.py object_classification/seed_transfer.py intent_classification/performance_curve.py object_classification/train_server.py intent_classification/collect_results.py object_classification/utils.py object_classification/batchbald_redux/batchbald.py intent_classification/summarize.py named_entity_recognition/collect_results.py object_classification/batchbald_redux/_nbdev.py named_entity_recognition/utils.py intent_classification/seed_transfer.py intent_classification/model_transfer.py object_classification/batchbald_redux/joint_entropy.py object_classification/batchbald_redux/active_learning.py intent_classification/search.py named_entity_recognition/distribution_vis.py object_classification/train_scheduler.py named_entity_recognition/trainer.py named_entity_recognition/idmr.py object_classification/distribution_vis.py object_classification/distribution_vis_bald_batchbald.py object_classification/random_baselines.py named_entity_recognition/heuristics.py named_entity_recognition/train_server.py intent_classification/heuristics.py object_classification/trainer.py intent_classification/roberta_model_store.py object_classification/idmr.py intent_classification/train_scheduler.py intent_classification/utils.py named_entity_recognition/random_baselines.py named_entity_recognition/performance_curve.py object_classification/batchbald_redux/consistent_mc_dropout.py intent_classification/distribution_vis.py object_classification/batchbald_redux/repeated_mnist.py named_entity_recognition/help_text.py named_entity_recognition/summarize.py named_entity_recognition/train_scheduler.py object_classification/heuristics.py intent_classification/trainer.py intent_classification/idmr.py intent_classification/odmr.py main main_cli plot_length_distribution get_label_cdf main_cli plot_label_distribution plot_ref_meter group_adjacent main get_length_cdf plot_length_distribution get_label_cdf main_cli plot_label_distribution plot_ref_meter group_adjacent main get_length_cdf score_bald_roberta main_cli score_maxent_roberta score_bald score_maxent active_learn active_learn_roberta main main_cli select_batch group_proportions prioritize_len_groups main idmr display_name compute_quality main main_cli main_cli select_batch prioritize_labels main odmr main abs_path main_cli plot_curves main main_cli main plot_relative_order main_cli RobertaWrapper roberta_get_model swap_kernel main_cli Runner main new_args_result_file main main_cli RobertaTrainer roberta_expand_data Trainer AOEModel BiLSTMModel get_trainer roberta_get_tokenizer CNNModel TrainScheduler TrainClient create_server get_open_port server_train load_baseline abs_path load_optimal store_baseline main main_cli main_cli get_len_proportion get_tag_proportion plot_ref_meter plot_len_proportion group_proportions plot_tag_proportion main main main_cli acquire main_cli score_model group_proportions select_batch main prioritize_len_groups idmr main abs_path main_cli plot_curves main main_cli swap_kernel main_cli Runner main new_args_result_file Decoder Encoder Trainer get_trainer EncoderDecoder TrainScheduler Client create_server get_open_port server_train load_baseline abs_path load_optimal store_baseline main main_cli main_cli plot_ref_meter plot_tsne plot_label_proportion get_frac main main_cli plot_ref_meter plot_tsne plot_label_proportion get_frac main main main_cli acquire main_cli sort largest_deficit main idmr main_cli select_batch prioritize_labels main odmr main abs_path main_cli plot_curves main main_cli swap_kernel main_cli Runner main new_args_result_file main main_cli CNN BayesianCNN get_trainer Trainer TrainScheduler Client create_server get_open_port server_train load_optimal store_baseline concat_data_shards load_baseline abs_path load_data get_subset_base_indices get_base_indices ActiveLearningData get_balanced_sample_indices RandomFixedLengthSampler CandidateBatch compute_conditional_entropy compute_entropy get_bald_batch compute_conditional_entropy_from_logits compute_entropy_from_logits get_batchbald_batch ConsistentMCDropout BayesianModule _ConsistentMCDropout ConsistentMCDropout2d JointEntropy ExactJointEntropy gather_expand DynamicJointEntropy batch_multi_choices SampledJointEntropy create_MNIST_dataset TransformedDataset get_targets create_repeated_MNIST_dataset custom_doc_links print mean append main parse_args add_argument ArgumentParser update T list insert Counter append sum array range len append enumerate update T list insert len Counter get_proportion dict append range enumerate plot len axis zip fill_between range enumerate get_length_cdf get_label_cdf plot axis zip fill_between range enumerate len axis add_patch Rectangle range len plot_label_distribution cumsum add_subplot GridSpec linspace xticks max yticks seed list load_optimal ylabel Counter group_adjacent title savefig sum range flat insert plot_ref_meter zip keys plot_length_distribution int xlabel figure array len print array manual_seed int list sorted best_model evaluate_f1 copy get_trainer score_bald score_maxent zip append trange train keys array int list score_bald_roberta sorted roberta_expand_data zip best_model score_maxent_roberta evaluate_f1 map copy get_trainer roberta_get_tokenizer append trange train keys store_baseline active_learn active_learn_roberta argsort Counter len copy zip append prioritize_len_groups range int list best_model evaluate_f1 copy get_trainer score_bald score_maxent select_batch zip append trange train keys values append enumerate axis plot_curves load_optimal subplot colorbar imshow gca annotate zeros enumerate add_patch Rectangle compute_quality argsort array Counter len list prioritize_labels int list best_model evaluate_f1 Counter copy keys get_trainer score_bald score_maxent select_batch zip append trange train array len load_baseline append legend plot abs_path set_xlim tight_layout min set_ylim Namespace shuffle TrainScheduler evaluate_order trange int list subplot suptitle set_xlabel set_yticks ConnectionPatch tight_layout index set add_artist imshow set_visible set_xticks figure intersection zeros len plot_relative_order from_pretrained randint RobertaWrapper manual_seed tot_acq copy sample randint range len ascii_lowercase join isfile choices Runner run tokenizer zip from_pretrained load int list roberta_get_model len AOEModel Trainer BiLSTMModel manual_seed randint maxsize array CNNModel values open socket bind close AF_INET SOCK_STREAM Popen get_open_port model patience batchsize max_epoch evaluate_f1 domain get_trainer model_seed train load list map set split float open join map update T list insert len Counter get_proportion dict append range enumerate plot get_len_proportion axis zip fill_between range enumerate len update T list flatten_and_map insert Counter set append sum array range values len plot get_tag_proportion axis zip fill_between range enumerate len plot_len_proportion plot_tag_proportion group_proportions list sorted keys zip max values best_model evaluate_f1 extend copy get_trainer acquire train max zip sorted score_model keys EncoderDecoder list best_model zip scatter xticks array range yticks Counter axis linspace xticks list ndarray append range flat plot insert copy get_frac zip enumerate isinstance xlabel zeros fill_between len list insert get_frac flat TSNE vstack fit_transform array_split plot_label_proportion reshape PCA plot_tsne load_data print to map to concatenate stack evaluate_acc argsort to zip Counter len TSNE KMeans vstack largest_deficit map Counter array to fit_transform range flat array_split concatenate stack fit_predict sort PCA evaluate_acc evaluate_acc to argmax CNN BayesianCNN map evaluate_acc to array concat_data_shards abs_path isfile int defaultdict randperm append range len Subset isinstance shape empty close tqdm shape empty close tqdm shape empty close tqdm shape empty close tqdm add_variables sum min compute_conditional_entropy DynamicJointEntropy tqdm shape item append compute_batch empty max range shape topk min multinomial reshape list list DEBUG_CHECKS expand MNIST ConcatDataset Compose normal_ TransformedDataset isinstance ConcatDataset Subset dataset | # Optimal Active Learning Behaviors  This is the code repository for the AISTATS 2021 paper [_Towards Understanding the Behaviors of Optimal Deep Active Learning Algorithms_](http://proceedings.mlr.press/v130/zhou21b.html) by Yilun Zhou, Adithya Renduchintala, Xian Li, Sida Wang, Yashar Mehdad and Asish Ghoshal. A brief video introduction is available [here](https://www.youtube.com/watch?v=McBC7H3BJFM). There are three tasks, `object_classification`, `intent_classification`, and `named_entity_recognition`. Specific instructions are listed in `<task>/README.md` for each task. Before proceeding, please download the preprocessed data as a zip file from [this link](http://bit.ly/optimal-al-data), and unpack the contents of `<task>/data/` into the the currently empty `<task>/data/` folder. In `<task>/README.md`, the first step is to search for the optimal order, which takes several days _per search_ on 8 V100 GPUs, using the settings in the paper. Thus, we have saved the log files for each task. You can download all of them from [this link](http://bit.ly/optimal-al-logs), and unpack the contents of `<task>/logs/` into the currently empty `<task>/logs/` folder. All the plots will be saved in `figures/<task>/` folder, which is currently populated with the those used in the paper. The code should run with reasonably recent versions of `pytorch`, `numpy`, `scipy`, `matplotlib`, `scikit-learn`, etc. However, if there are any compatibility issues, please try again with the exact versions specified in [`requirements.txt`](requirements.txt), which contains a (more than) sufficient list of packages. For any questions, please contact Yilun Zhou at [email protected]. The paper can be cited as ``` | 1,174 |
Yinghao-Li/GuiGen | ['text generation'] | ['Transformer-Based Neural Text Generation with Syntactic Guidance'] | syn_gen_data_prepare.py SynGen/Beam.py TextGen/Model.py txt_gen_train.py Core/Utils.py TextGen/Dataset.py Core/Layers.py Core/Models.py syn_gen_train.py TextGen/Generator.py Core/Dataset.py txt_gen_data_prepare.py SynGen/Model.py SynGen/Generator.py Core/Modules.py txt_gen_from_tmpl.py txt_generate.py Core/Constants.py SynGen/Dataset.py TextGen/TrainFunc.py Core/SubLayers.py SynGen/TrainFunc.py Core/Optim.py TextGen/Beam.py main parse_args main parse_args prepare_dataloaders main parse_args main parse_args main parse_args main parse_args prepare_dataloaders syn_batch_prep txt_batch_prep collate_fn read_instances_from_file Dataset MultiEncDecoderLayer SimplifiedSynTrfEncoderLayer TrfEncoderLayer TrfDecoderLayer SynTrfEncoderLayer TrfEncoder SynTrfEncoder RNNEncoder TransformerDecoder RNNDecoder MultiEncSynDecoder MultiEncDecoder ScaledDotProductAttention NoamOpt Lookahead ScheduledOptim MultiHeadAttention PositionwiseFeedForward MultiEncAttention get_tree_path_mask get_attn_key_pad_mask is_number prune_tree_by_depth cal_nll_loss set_seed_everywhere get_subsequent_mask cal_kld get_sinusoid_encoding_table build_vocab_idx part_shuffle cal_kl_weight convert_instance_to_idx_seq get_tree_path text_regularize get_non_pad_mask cal_accuracy Beam Dataset collate_fn batch_prep Generator MultiEncVAETransformer train train_epoch eval_epoch Beam Dataset collate_fn batch_prep Generator MultiEncTransformer train train_epoch eval_epoch print add_argument ArgumentParser load ori_load_dir prune_tree_by_depth print tmpl_depth data_save_dir ref_load_dir save dict_load_dir parse_args prepare_dataloaders device max cuda MultiEncVAETransformer Adam max_token_src_len ScheduledOptim d_model to n_lvl_token set_seed_everywhere lr ref_dir random_seed ori_dir n_warmup_steps dict_dir parameters filter train n_syn_token print DataLoader Dataset dict_path Dataset Generator strftime today DataLoader mkdir read_instances_from_file convert_instance_to_idx_seq round build_vocab_idx exists list dict_save_dir shuffle train_ratio zip int ori_save_dir ref_save_dir len TxtGenerator SynGenerator max_syn_token_len max_txt_token_len MultiEncTransformer n_txt_token list txt_batch_prep syn_batch_prep zip max tensor empty array max tensor array list encode_as_pieces print map tqdm SentencePieceProcessor append randint get_tree_path text_regularize array split array cos sin size eq PAD expand triu size ones expand seed manual_seed_all manual_seed sub ne view eq item PAD ne view log_softmax size scatter sum PAD cross_entropy exp pow sum items format print set len int asarray arange permutation choice round len float int print tolist astype tqdm zip append append deepcopy enumerate zeros enumerate batch_prep max tensor empty array ne cal_nll_loss model backward zero_grad map tqdm item train step PAD cal_accuracy eval time format epoch print model_save train_epoch strftime today mkdir dirname save range log eval_epoch state_dict | # Syntactically Guided Text Generation This repo is associated with the paper [Transformer-Based Neural Text Generation with Syntactic Guidance](https://arxiv.org/abs/2010.01737) ## Requirements - [Pytorch](https://pytorch.org/) - [Numpy](https://numpy.org/) - [tqdm](https://github.com/tqdm/tqdm) - [sentencepiece](https://github.com/google/sentencepiece) ## Training ### Data Preparation Download training and test data from [here](https://drive.google.com/drive/folders/1LanFy0BC1qC93vICXk2V2J3zcKpAio7j?usp=sharing) and copy the `train` and `test` folders into the `data` folder. | 1,175 |
YingjieHu/POI_Name | ['information retrieval'] | ['An empirical study on the names of points of interest and their changes with geographic distance'] | src/metro_countvector.py src/similarity_matrix.py src/metro_word2vec.py src/geocoder.py src/distribution_divergence.py src/multiple_bar_chart.py src/term_rank_frequency.py src/metro_local_word.py normalize JSD softmax calculate_divergence geocoding_cities vector_count_similarity_matrix_metro count_based_correlation_metro softmax learn_vector_by_count check_metro_local_word_usage grey_color_func visualize_local_words_by_metro find_local_words_by_metro train_word2vec correlation_based_on_word2vec_metro_scale plot_similarity_matrix word2vec_similarity_metro geo_distance_similarity_metro shape exp max shape norm print normalize empty range GeoNames close open join list add_doc replace rows strip close len dict TermDocumentMatrix append listdir range open log2 cosine spearmanr pearsonr show list ylabel legend append fit_transform range plot softmax miles linregress print xlabel PCA dict split array len plot_similarity_matrix cosine zeros range len strip axis sign log2 TermDocumentMatrix log open show list recolor imshow append sum range add_doc replace rows close listdir flip join generate_from_frequencies dict figure array len strip sign log2 TermDocumentMatrix log open list append sum range add_doc replace rows close listdir flip join dict array len join str replace print strip close dict split listdir open str most_similar print Word2Vec save MySentences range log2 spearmanr pearsonr show str list ylabel legend append range plot similarity kilometers linregress load print xlabel dict split array len show list subplots matshow colorbar title get_cmap xticks range yticks load similarity plot_similarity_matrix zeros range len str list len dict plot_similarity_matrix miles append zeros range split | # POI_Name_Analysis * Author: Yingjie Hu * Email: [email protected] ### Overall description This project provides the code for studying the names of Points of Interest (POI) and their changes with geographic distances. This GitHub repository is in companion with the published paper: Hu, Y. & Janowicz, K. (2018): An empirical study on the names of points of interest and their changes with geographic distance, In: Proceedings of the 10th International Conference on Geographic Information Science (GIScience 2018), Aug. 29-31, Melbourne, Australia. https://arxiv.org/pdf/1806.08040.pdf You can refer to this original paper for more details, and please feel free to re-use the code shared here for your own research projects. If you use the code, we would appreciate if you could cite our paper. Thank you! ### Dataset The POI dataset used for this study is from Yelp, and can be downloaded here: https://www.yelp.com/dataset . This study focuses on the seven metropolitan areas in the United States. The figure below provides a geographic visualization of the used POIs: <p align="center"> | 1,176 |
YiwenShaoStephen/NGD-SGD | ['speech recognition'] | ['Parallel training of DNNs with Natural Gradient and Parameter Averaging'] | examples/image_classification/models/cifar/densenet.py examples/image_classification/cifar.py examples/image_classification/models/cifar/resnet.py examples/image_classification/models/cifar/preresnet.py examples/image_classification/models/imagenet/resnext.py examples/image_classification/models/imagenet/__init__.py examples/image_classification/models/cifar/vgg.py examples/image_classification/models/cifar/wrn.py ngd.py examples/image_classification/models/cifar/__init__.py examples/image_classification/models/cifar/resnext.py examples/image_classification/models/cifar/alexnet.py OnlineNaturalGradient NGD AverageMeter accuracy test save_checkpoint main train AlexNet alexnet densenet Transition DenseNet Bottleneck BasicBlock preresnet PreResNet Bottleneck conv3x3 BasicBlock ResNet Bottleneck conv3x3 resnet BasicBlock ResNeXtBottleneck resnext CifarResNeXt vgg19 VGG vgg16_bn vgg19_bn vgg11_bn make_layers vgg11 vgg13 vgg13_bn vgg16 wrn BasicBlock NetworkBlock WideResNet resnext50 ResNeXt Bottleneck resnext101 resnext152 endswith SGD MultiStepLR DataLoader save_checkpoint arch dataset dataloader max exp step Adam dirname load_state_dict NGD sum CIFAR100 CrossEntropyLoss range SummaryWriter format Adagrad Compose test start_epoch resume startswith CIFAR10 gamma load ExponentialLR print parameters train epochs update time format criterion model backward print size AverageMeter zero_grad len avg item step enumerate add_scalar update time format criterion model print add_scalar size AverageMeter eval avg item enumerate len topk size t eq mul_ expand_as append sum max copyfile join save AlexNet CifarResNeXt Conv2d make_layers VGG make_layers VGG make_layers VGG make_layers VGG make_layers VGG make_layers VGG make_layers VGG make_layers VGG WideResNet ResNeXt ResNeXt ResNeXt | # NGD-SGD A Pytorch Implementation of [Natural Gradient Descent](https://arxiv.org/abs/1410.7455) ## Installation Install [PyTorch 1.1.0](https://pytorch.org/get-started/locally/) with Tensorboard support ```bash pip install tb-nightly pip install future ``` ## Run go to ```examples/image_classification/``` and run ```./run_ngd_wrn.sh``` | 1,177 |
YohannaYin/segmentlink_yh | ['scene text detection', 'curved text detection'] | ['Detecting Oriented Text in Natural Images by Linking Segments'] | seglink/seglink/unit_tests.py seglink/seglink/ops.py seglink/seglink/evaluate.py seglink/tool/convert_caffe_model/tests.py seglink/seglink/config.py seglink/tool/create_datasets.py seglink/seglink/data.py seglink/seglink/visualizations.py seglink/manage.py seglink/seglink/model_cnn.py seglink/seglink/preprocess_data.py seglink/tool/convert_caffe_model/convert_caffemodel_to_ckpt.py seglink/seglink/model.py seglink/tool/convert_caffe_model/dump_caffemodel_weights.py seglink/tool/convert_caffe_model/my_convert_caffemodel_to_ckpt.py seglink/seglink/model_cnn_ckpt.py seglink/seglink/utils.py seglink/seglink/fpn.py seglink/seglink/solver.py clear build_op run_tf_program_with_json_config upload_logs start_tb test train clean_op test_preprocess input_stream train_preprocess postprocess_and_write_results_ic13 postprocess_and_write_results_ic15 evaluate build_feature_pyramid SegLinkDetector SsdVgg16 SsdVgg16 load_oplib atrous_conv2d score_loss smooth_l1_loss max_pool conv2d conv_relu avg_pool _nn_variable ReadTxt rotate Solver test_encode_decode_synth_data test_clip_rboxes test_encode_decode_real_data test_data_loading_and_preprocess test_max_pool_on_odd_sized_maps test_decode_combine_rboxes summarize_losses print_tensor_summary rboxes_to_polygons setup_logger log_git_version summarize_activations log_flags mkdir_if_not_exist rboxes_to_bboxes visualize_nodes visualize_rboxes visualize_segments_and_links convert_image_for_visualization visualize_combined_rboxes visualize_detection_each_layer visualize_links visualize_bboxes create_icdar2015_incidental_dataset _int64_feature _bytes_list_feature read_jpeg_check DatasetCreator_Icdar2013 DatasetCreator_Scut DatasetCreator_Td500 _int64_list_feature create_merge_multiple _bytes_feature create_synthtext_dataset _float_feature DatasetCreator DatasetCreator_Icdar2015Incidental _float_list_feature convert_caffemodel_to_ckpt dump_caffemodel_weights convert_caffemodel_to_ckpt test_classify_image join chdir print system mkdir print join system join remove format print glob input len pop join items format isinstance print chdir system abspath split len run_tf_program_with_json_config run_tf_program_with_json_config system greater_equal info build_model multiply reshape OFFSET_VARIANCE shape node_threshold link_threshold softmax cast int32 decode_segments_links SegLinkDetector combine_segments ConfigProto append enumerate minimum decode join format test_batch_size info rboxes_to_polygons astype maximum system int32 result_suffix float range minimum str join astype maximum save_image_and_lexicon bbox_scale int32 float range rboxes_to_bboxes join format load_op_library print copyfile realpath dirname isinstance xavier_initializer_conv2d sqrt constant_initializer add_to_collection get_variable xavier_initializer truncated_normal_initializer get get softmax_cross_entropy_with_logits int64 cast one_hot int radians warpAffine imwrite fabs cos pi getRotationMatrix2D sqrt dot array sin acos str list glob print readlines write close map rotate open imread range append len encode_groundtruth print decode_prediction _generate_random_gt randint range len pack constant encode_groundtruth build_model train_preprocess train_record_path FctdDetector input_stream mkdir_if_not_exist batch enumerate constant clip_rboxes float32 _generate_random_rboxes shuffle_batch train_preprocess add_subplot figure input_stream mkdir_if_not_exist print astype float32 set_trace decode_combine_rboxes stdout setFormatter getLogger addHandler StreamHandler Formatter info DEBUG setLevel FileHandler items join format info append str format check_output strip info name zero_fraction sub histogram info scalar ExponentialMovingAverage apply reduce_max zero_fraction reduce_mean shape reduce_min Print print makedirs hstack hstack min max _rboxes_to_polygons uint8 asarray astype float32 IMAGE_BGR_MEAN join isinstance transpose copy add_patch imshow clf savefig Rectangle info range enumerate set_transform print add_patch rotate_around Rectangle transData expand_dims range Circle add_artist shape xrange float plot min shape xrange float range clear visualize_rboxes print add_subplot convert_image_for_visualization imshow savefig Rectangle legend append range enumerate clear iteritems str join format visualize_rboxes print add_subplot shuffle convert_image_for_visualization det_layers imshow savefig visualize_links xrange append range len clear join format visualize_rboxes print add_subplot convert_image_for_visualization imshow savefig range join str permutation arange print TFRecordWriter transpose min write extend SerializeToString tqdm Example expand_dims loadmat range split join format basename print glob TFRecordWriter write shuffle SerializeToString Example exists enumerate format _create_next_sample n_samples concatenate print TFRecordWriter write shuffle SerializeToString _read_list append full range enumerate load build_model Vgg16Model float32 placeholder caffe_weights_path model_scope dump layers prototxt_path Net caffe_weights_path caffemodel_path range TEST len SsdVgg16 Vgg16Model float32 placeholder load_image_and_preprocess vgg16 | # segmentlink_yh use seglink to detect text # SegLink Detecting Oriented Text in Natural Images by Linking Segments (https://arxiv.org/abs/1703.06520). ## Prerequisites The project is written in Python3 and C++ and relies on TensorFlow v1.3. We have only tested it on Ubuntu 14.04. If you are using other Linux versions, we suggest using Docker. CMake (version >= 2.8) is required to compile the C++ code. Install TensorFlow (GPU-enabled) by following the instructions on https://www.tensorflow.org/install/. The project requires no other Python packages. On Ubuntu 14.04, install the required packages by ``` sudo apt-get install cmake sudo pip install --upgrade tensorflow-gpu | 1,178 |
Yongbinkang/ExpFinder | ['information retrieval'] | ['ExpFinder: An Ensemble Expert Finding Model Integrating $N$-gram Vector Space Model and $μ$CO-HITS'] | src/lib/weight.py src/lib/extractor.py src/controller/generator.py src/lib/tokenization.py src/algo/expfinder.py src/lib/vectorizer.py scripts/examples/extract_np.py src/controller/trainer.py main EF generate_pr_matrix generate_ecg generate_dtop_matrix generate_topic_vector generate_ed_vector generate_dp_matrix generate_ed_matrix generate_tf run_expfinder tokenise_doc Tokenizer PhraseVectorizer calc_pr_weight calc_ngram_tfidf tokenise_doc format print split columns unique values fit_transform CountVectorizer calc_ngram_tfidf join sorted columns transform list items multiply divide dict array append sum PhraseVectorizer values split columns calc_pr_weight nodes index append zeros array values len add_edges_from values DiGraph sum columns DataFrame reshape nodes index append sum values transform EF DataFrame append transform append Tokenizer count_nonzero todense get_feature_names len astype float32 reduce divide pow append sum array log enumerate split columns matmul index dot DataFrame values | # ExpFinder: An Ensemble Model for Expert Finding from Text-based Expertise Data ## Introduction <p align="justify"> Finding an expert plays a crucial role in driving successful collaborations and speeding up high-quality research development and innovations. However, the rapid growth of scientific publications and digital expertise data makes identifying the right experts a challenging problem. Existing approaches for finding experts given a topic can be categorised into information retrieval techniques based on vector space models, document language models, and graph-based models. In this paper, we propose <i>ExpFinder</i>, a new ensemble model for expert finding, that integrates a novel <i>N</i>-gram vector space model, denoted as <i>n</i>VSM, and a graph-based model, denoted as <i>μCO-HITS</i>, that is a proposed variation of the CO-HITS algorithm. The key of <i>n</i>VSM is to exploit recent inverse document frequency weighting method for <i>N</i>-gram words, and <i>ExpFinder</i> incorporates <i>n</i>VSM into <i>μCO-HITS</i> to achieve expert finding. </p> ## Setup steps 1. Clone the repository ``` git clone https://github.com/Yongbinkang/ExpFinder.git ``` | 1,179 |
YonghaoHe/A-Light-and-Fast-Face-Detector-for-Edge-Devices | ['face detection'] | ['LFFD: A Light and Fast Face Detector for Edge Devices'] | ChasingTrainFramework_GeneralOneClassDetection/loss_layer_farm/cross_entropy_with_focal_loss_for_one_class_detection.py head_detection/accuracy_evaluation/evaluation_on_brainwash.py license_plate_detection/data_iterator_farm/multithread_dataiter_for_cross_entropy_v1_small.py pedestrian_detection/symbol_farm/symbol_30_320_20L_4scales_v1.py ChasingTrainFramework_GeneralOneClassDetection/image_augmentation/augmentor.py license_plate_detection/accuracy_evaluation/evaluation_on_CCPD.py face_detection/config_farm/configuration_10_560_25L_8scales_v1.py face_detection/data_provider_farm/reformat_WIDERFACE.py license_plate_detection/symbol_farm/symbol_64_512_16L_3scales_v1.py face_detection/symbol_farm/symbol_10_560_25L_8scales_v1.py face_detection/data_provider_farm/text_list_adapter.py head_detection/data_provider_farm/text_list_adapter.py head_detection/data_iterator_farm/multithread_dataiter_for_cross_entropy_v1.py license_plate_detection/inference_speed_evaluation/inference_speed_eval.py ChasingTrainFramework_GeneralOneClassDetection/loss_layer_farm/cross_entropy_with_hnm_for_one_class_detection.py license_plate_detection/data_provider_farm/text_list_adapter.py head_detection/config_farm/configuration_10_160_17L_4scales_v1.py license_plate_detection/symbol_farm/symbol_64_512_16L_3scales_v1_small.py pedestrian_detection/config_farm/configuration_30_320_20L_4scales_v1.py license_plate_detection/data_iterator_farm/multithread_dataiter_for_cross_entropy_v1.py license_plate_detection/config_farm/configuration_64_512_16L_3scales_v1_small.py face_detection/inference_speed_evaluation/inference_speed_eval.py license_plate_detection/data_provider_farm/reformat_CCPD.py head_detection/inference_speed_evaluation/inference_speed_eval.py pedestrian_detection/metric_farm/metric_default.py face_detection/accuracy_evaluation/predict.py head_detection/symbol_farm/symbol_10_160_17L_4scales_v1.py ChasingTrainFramework_GeneralOneClassDetection/data_provider_base/base_provider.py ChasingTrainFramework_GeneralOneClassDetection/data_provider_base/base_data_adapter.py ChasingTrainFramework_GeneralOneClassDetection/logging_GOCD.py face_detection/caffemodel/predict_caffemodel_v2.py face_detection/deploy_tensorrt/predict_tensorrt.py face_detection/symbol_farm/symbol_10_320_20L_5scales_v2.py license_plate_detection/data_provider_farm/pickle_provider.py head_detection/data_provider_farm/reformat_brainwash.py pedestrian_detection/data_iterator_farm/multithread_dataiter_for_cross_entropy_v1.py pedestrian_detection/data_provider_farm/pickle_provider.py ChasingTrainFramework_GeneralOneClassDetection/data_iterator_base/data_batch.py face_detection/data_iterator_farm/multithread_dataiter_for_cross_entropy_v1.py ChasingTrainFramework_GeneralOneClassDetection/data_provider_base/text_list_adapter.py face_detection/data_iterator_farm/multithread_dataiter_for_cross_entropy_v2.py ChasingTrainFramework_GeneralOneClassDetection/data_provider_base/pickle_provider.py license_plate_detection/metric_farm/metric_default.py pedestrian_detection/accuracy_evaluation/predict.py ChasingTrainFramework_GeneralOneClassDetection/inference_speed_eval/inference_speed_eval_with_tensorrt_cudnn.py ChasingTrainFramework_GeneralOneClassDetection/train_GOCD.py face_detection/config_farm/__init__.py face_detection/demo/demo.py ChasingTrainFramework_GeneralOneClassDetection/loss_layer_farm/mean_squared_error_with_hnm_for_one_class_detection.py face_detection/symbol_farm/__init__.py head_detection/data_provider_farm/pickle_provider.py head_detection/metric_farm/metric_default.py face_detection/metric_farm/metric_default.py face_detection/accuracy_evaluation/evaluation_on_fddb.py ChasingTrainFramework_GeneralOneClassDetection/solver_GOCD.py license_plate_detection/accuracy_evaluation/predict.py pedestrian_detection/data_provider_farm/text_list_adapter.py ChasingTrainFramework_GeneralOneClassDetection/loss_layer_farm/mean_squared_error_with_ohem_for_one_class_detection.py pedestrian_detection/inference_speed_evaluation/inference_speed_eval.py license_plate_detection/config_farm/configuration_64_512_16L_3scales_v1.py face_detection/data_provider_farm/pickle_provider.py ChasingTrainFramework_GeneralOneClassDetection/inference_speed_eval/inference_speed_eval_with_mxnet_cudnn.py face_detection/caffemodel/predict_caffemodel.py head_detection/accuracy_evaluation/predict.py pedestrian_detection/data_provider_farm/reformat_caltech.py face_detection/config_farm/configuration_10_320_20L_5scales_v2.py face_detection/deploy_tensorrt/to_onnx.py face_detection/accuracy_evaluation/evaluation_on_widerface.py temp_test init_logging Solver start_train DataBatch DataAdapterBaseclass ProviderBaseclass read_file write_file PickleProvider TextListAdapter Augmentor InferenceSpeedEval InferenceSpeedEval HostDeviceMem focal_loss_for_twoclass focal_loss_for_twoclass_Prop cross_entropy_with_hnm_for_one_class_detection_Prop cross_entropy_with_hnm_for_one_class_detection mean_squared_error_with_hnm_for_one_class_detection mean_squared_error_with_hnm_for_one_class_detection_Prop mean_squared_error_with_ohem_for_one_class_detection_Prop mean_squared_error_with_ohem_for_one_class_detection DataBatch run_prediction_folder Predict NMS DataBatch run_prediction_folder PredictCaffe NMS DataBatch run_prediction_folder PredictCaffe NMS run run Multithread_DataIter_for_CrossEntropy Multithread_DataIter_for_CrossEntropy read_file write_file PickleProvider merge_list generate_data_list generate_neg_image check_txt TextListAdapter main parse_args run_prediction_folder Inference_TensorRT HostDeviceMem NMS generate_onnx_file Metric run_get_net_symbol_for_train loss_branch get_net_symbol run_get_net_symbol_for_train loss_branch get_net_symbol generate_gt_files generate_predicted_files DataBatch Predict run_prediction_pickle NMS run Multithread_DataIter_for_CrossEntropy read_file write_file PickleProvider dataset_statistics generate_data_list show_image TextListAdapter Metric run_get_net_symbol_for_train loss_branch get_net_symbol generate_gt_files generate_predicted_files Predict run_prediction_pickle DataBatch NMS run_prediction_folder run run Multithread_DataIter_for_CrossEntropy Multithread_DataIter_for_CrossEntropy read_file write_file PickleProvider annotation_from_name dataset_statistics generate_data_list show_image TextListAdapter Metric run_get_net_symbol_for_train loss_branch get_net_symbol run_get_net_symbol_for_train loss_branch get_net_symbol Predict run_prediction_pickle DataBatch NMS run_prediction_folder run Multithread_DataIter_for_CrossEntropy read_file write_file PickleProvider dataset_statistics generate_data_list show_image TextListAdapter Metric run_get_net_symbol_for_train loss_branch get_net_symbol setFormatter addHandler print makedirs exit StreamHandler Formatter dirname setLevel FileHandler init_logging str list items __version__ info Solver fit write PickleProvider TextListAdapter PickleProvider ndarray isinstance print read_by_index positive_index shuffle waitKey imshow rectangle negative_index range enumerate minimum concatenate astype float32 maximum delete argsort append len join Predict waitKey imshow rectangle resize append imread max predict PredictCaffe PickleProvider start_train get_net_symbol Xavier DataIter Metric init_logging info append join readline int endswith print strip write close split append range open int join str imwrite print makedirs min write close split zeros imread max range open close write open int readlines close shuffle waitKey imshow rectangle open imread range enumerate split add_argument ArgumentParser data VideoCapture imwrite FONT_HERSHEY_SIMPLEX VideoWriter VideoWriter_fourcc release destroyAllWindows waitKey shape imshow parse_args imread predict use_gpu replace astype join read Predict time uint8 print putText write rectangle cpu gpu do_inference Inference_TensorRT load update basicConfig list items load_model graph export_model float32 dict check_graph split Convolution Custom slice_axis softmax LinearRegressionOutput Activation Variable Convolution Group loss_branch Activation list_outputs list_arguments print get_net_symbol list_auxiliary_states infer_shape print_summary int join str replace print strip makedirs write close split findall float append open join Predict replace print strip makedirs write close IMREAD_COLOR imread predict open Predict PickleProvider print read_by_index positive_index waitKey imshow rectangle negative_index predict str makedirs len dirname findall float int readlines close shuffle waitKey imshow rectangle open imread range append split int sorted print readlines close min open max range split basename range split shuffle imwrite int split annotation_from_name shuffle str imwrite floor ceil walk enumerate | # A Light and Fast Face Detector for Edge Devices ## **Big News**: LFD, which is a big update of LFFD, now is released (2021.03.09). It is strongly recommended to use LFD instead !!! Visit LFD Repo [here](https://github.com/YonghaoHe/LFD-A-Light-and-Fast-Detector). This repo will not be maintained from now on. ## Recent Update * `2019.07.25` This repos is first online. Face detection code and trained models are released. * `2019.08.15` This repos is formally released. Any advice and error reports are sincerely welcome. * `2019.08.22` face_detection: latency evaluation on TX2 is added. * `2019.08.25` face_detection: RetinaFace-MobileNet-0.25 is added for comparison (both accuracy and latency). * `2019.09.09` LFFD is ported to NCNN ([link](https://github.com/SyGoing/LFFD-with-ncnn)) and MNN ([link](https://github.com/SyGoing/LFFD-MNN)) by [SyGoing](https://github.com/SyGoing), great thanks to SyGoing. * `2019.09.10` face_detection: **important bug fix:** vibration offset should be subtracted by shift in data iterator. This bug may result in lower accuracy, inaccurate bbox prediction and bbox vibration in test phase. We will upgrade v1 and v2 as soon as possible (should have higher accuracy and more stable). | 1,180 |
YosukeSugiura/SEGAN | ['speech enhancement'] | ['SEGAN: Speech Enhancement Generative Adversarial Network'] | settings.py segan.py display.py data.py display figout pesq_score Loss_dis Loss_gen Generator test Discriminator train settings read format array_split print pypesq average append array range concatenate mean SquaredError_Scalor mean absolute_error SquaredError_Scalor learning_rate_gen ImageExporter create_batch batch_size save_models updating data_loader model_save_path learning_rate_dis export forward display waveform Generator Adam shape load_models figout Discriminator clear_parameters append next get_unlinked_variable range absolute_error epoch_from epoch format d processEvents mean load Loss_dis join Loss_gen Variable print upd scene disp histogram zeros loss batch_num len Variable Generator print wav_write data_loader noisy shape flatten zeros forward create_batch_test | # SEGAN (NNabla) Implementation of Speech Enhancement GAN (SEGAN) by [NNabla](https://nnabla.readthedocs.io/en/latest/#) Read me Japanese Ver. (**日本語バージョンはこちら**) -> [Link](https://github.com/YosukeSugiura/SEGAN/blob/master/README_ja.md) **Original Paper** SEGAN: Speech Enhancement Generative Adversarial Network https://arxiv.org/abs/1703.09452 ## Requrement ### Python - Python 3.6 - CUDA 10.0 & CuDNN 7.6 | 1,181 |
Young-in/ANM-Assignment2-loglizer | ['anomaly detection'] | ['Anomaly Detection using Autoencoders in High Performance Computing Systems'] | demo/InvariantsMiner_demo_without_labels.py loglizer/models/SVM.py loglizer/models/LR.py loglizer/dataloader.py loglizer/models/InvariantsMiner.py demo/InvariantsMiner_demo.py demo/SVM_demo.py demo/PCA_demo.py loglizer/models/DecisionTree.py loglizer/preprocessing.py demo/PCA_demo_without_labels.py loglizer/models/__init__.py benchmarks/HDFS_bechmark.py demo/LogClustering_demo.py demo/LR_demo.py demo/DecisionTree_demo.py loglizer/models/PCA.py loglizer/models/IsolationForest.py loglizer/models/LogClustering.py loglizer/utils.py demo/IsolationForest_demo.py _split_data load_BGL load_HDFS bgl_preprocess_data FeatureExtractor metrics DecisionTree InvariantsMiner IsolationForest LogClustering LR PCA SVM int hstack arange shuffle endswith DataFrame values iterrows list apply OrderedDict _split_data append sum format set load items set_index print to_csv findall to_dict read_csv str list print tuple set savetxt mkdir append zeros sum range values len precision_recall_fscore_support | # ANM-Assignment2-loglizer loglizer part of ANM assignment #2 <p align="center"> <a href="https://github.com/logpai"> <img src="https://github.com/logpai/logpai.github.io/blob/master/img/logpai_logo.jpg" width="425"></a></p> # loglizer **Loglizer is a machine learning-based log analysis toolkit for automated anomaly detection**. > Loglizer是一款基于AI的日志大数据分析工具, 能用于自动异常检测、智能故障诊断等场景 Logs are imperative in the development and maintenance process of many software systems. They record detailed runtime information during system operation that allows developers and support engineers to monitor their systems and track abnormal behaviors and errors. Loglizer provides a toolkit that implements a number of machine-learning based log analysis techniques for automated anomaly detection. :telescope: If you use loglizer in your research for publication, please kindly cite the following paper. | 1,182 |
YoungGod/DFR | ['anomaly detection'] | ['Unsupervised anomaly segmentation via deep feature reconstruction', 'DFR: Deep Feature Reconstruction for Unsupervised Anomaly Segmentation'] | DFR-source/vgg.py DFR-source/MVTec.py DFR-source/anoseg_dfr.py DFR-source/utils.py DFR-source/feature.py DFR-source/feat_cae.py DFR-source/main.py DFR-source/vgg19.py AnoSegDFR Extractor AvgFeatAGG2d channel_shuffle ChannelShuffle FeatSCAE FeatCAE config ValTestDataset AbnormalDataset TestDataset build_dataset_from_featmap get_image_files TrainTestDataset get_mask_files MaskDataset NormalDataset visulization_score visulization spec_sensi_acc_riou_auc rescale spec_sensi_acc_iou_auc normalize auc_roc vgg19 download_url_to_file VGG vgg16_bn _vgg _get_torch_home vgg19_bn vgg11_bn vgg13 vgg11 make_layers vgg13_bn vgg16 load_state_dict_from_url VGG19 size view contiguous parse_args add_argument ArgumentParser print join walk append print join walk append view float32 unfold mean interpolate permute to min max RETR_TREE join drawContours format CHAIN_APPROX_SIMPLE findContours where IMREAD_COLOR IMREAD_GRAYSCALE resize imread imsave split RETR_TREE join drawContours format imwrite CHAIN_APPROX_SIMPLE applyColorMap findContours copy IMREAD_COLOR IMREAD_GRAYSCALE resize COLORMAP_JET imread imsave split logical_and ravel logical_or sum roc_auc_score roc_curve logical_and ravel logical_or sum roc_auc_score roc_curve ravel roc_auc_score print Conv2d load_state_dict_from_url make_layers VGG load_state_dict expanduser join getenv int hasattr move getheaders name close NamedTemporaryFile hexdigest urlopen dirname info expanduser get_all sha256 join basename format download_url_to_file write warn _get_torch_home getenv path is_zipfile urlparse makedirs | # DFR Project: Unsupervised Anomaly Detection and Segmentation Paper: Unsupervised anomaly segmentation via deep feature reconstruction | **[Neurocomputing]**[`pdf`](https://www.sciencedirect.com/science/article/pii/S0925231220317951)[`code`](https://github.com/YoungGod/DFR) | **arxive preprint**[`pdf`](https://arxiv.org/abs/2012.07122) Introduction: Automatic detecting anomalous regions in images of objects or textures without priors of the anomalies is challenging, especially when the anomalies appear in very small areas of the images, making difficult-to-detect visual variations, such as defects on manufacturing products. This paper proposes an effective unsupervised anomaly segmentation approach that can detect and segment out the anomalies in small and confined regions of images. Concretely, we develop a multi-scale regional feature generator which can generate multiple spatial context-aware representations from pre-trained deep convolutional networks for every subregion of an image. The regional representations not only describe the local characteristics of corresponding regions but also encode their multiple spatial context information, making them discriminative and very beneficial for anomaly detection. Leveraging these descriptive regional features, we then design a deep yet efficient convolutional autoencoder and detect anomalous regions within images via fast feature reconstruction. Our method is simple yet effective and efficient. It advances the state-of-the-art performances on several benchmark datasets and shows great potential for real applications. # Qualitative results | 1,183 |
YoungHector/one-shot-mann | ['one shot learning'] | ['One-shot Learning with Memory-Augmented Neural Networks'] | mann/utils/generators.py mann/utils/tf_utils.py mann/utils/similarities.py mann/model.py mann/utils/images.py omniglot.py omniglot build_argparser mann OmniglotGenerator transform_image get_sampled_data cosine_similarity variable_float32 variable_one_hot add_argument ArgumentParser OmniglotGenerator RMSPropOptimizer argmax Session run show softmax_cross_entropy_with_logits subplot _nb_samples ylabel _batch_size build_argparser placeholder title cast parse_args _input_height _input_width mann one_hot plot _nb_reads _learning_rate _iterations equal minimize xlabel _nb_classes reduce_mean _controller_size figure global_variables_initializer compute_output sample minimum asarray imresize shift maximum rotate imread max transpose matmul zeros | # One-shot Learning with Memory-Augmented Neural Networks Tensorflow implementation of the paper *One-shot Learning with Memory-Augmented Neural Networks*, by A. Santoro et al. This is an offshoot of a [larger project](https://github.com/adityagilra/archibrain), which aims to synthesize bio-plausible neural networks that solve cognitive tasks. This implementation is much simpler than a lot of others out there, thanks to TensorFlow's API and ease of use. The model as described in the paper has been followed as closely as possible. The code is inspired by [tristandeleu](https://github.com/tristandeleu)'s excellent Theano implementation. ## Requirements This implementation requires * numpy >= 1.12.1 * scipy >= 0.17.0 * tensorflow >= 1.0 | 1,184 |
YuanGongND/ReMASC | ['voice anti spoofing'] | ['ReMASC: Realistic Replay Attack Corpus for Voice Controlled Systems'] | fix_sox_warn.py | # ReMASC: Realistic Replay Attack Corpus for Voice Controlled Systems We introduce a new database of voice recordings with the goal of supporting research on vulnerabilities and protection of voice-controlled systems. In contrast to prior efforts, the proposed database contains genuine and replayed recordings of voice commands obtained in realistic usage scenarios and using state-of-the-art voice assistant development kits. Specifically, the database contains recordings from four systems (each with a different microphone array) in a variety of environmental conditions with different forms of background noise and relative positions between speaker and device. To the best of our knowledge, this is the first database that has been specifically designed for the protection of voice controlled systems (**VCS**) against various forms of replay attacks. ## Updates June, 30, 2020: - We now release the complete set. - We now host the data on **[IEEE DataPort](https://ieee-dataport.org/open-access/remasc-realistic-replay-attack-corpus-voice-controlled-systems)**, which offers a high-speed download (you will need an [IEEE account](https://ieee-dataport.org/faq/how-do-i-access-dataset-ieee-dataport) to download, which is also free). - The ReMASC corpus remains **free for academic and commercial use**, and we no longer require a verification process, you can download the data immediately. Nevertheless, it is highly recommended to sign up to our **[mailing list](https://forms.gle/CAQeo9JsuoD4d1AS9)** to get the latest news about the dataset. ## Downloads **Sample Set** (~10MB) A mini set consists of 16 samples for initial evaluation. You can download it **\[[here](https://drive.google.com/open?id=1RYHaaHnwuTb7Mx-jlVynQBaIJ-qQ80my)]**. | 1,185 |
YuanGongND/realtime-adversarial-attack | ['adversarial attack'] | ['Real-Time Adversarial Attacks'] | generate_expert_demo.py speech_model_train/generate_streaming_test_wav.py speech_model_train/models_test.py speech_model_train/wav_to_features.py speech_model_train/train.py speech_model_train/input_data_test.py speech_model_train/label_wav_test.py speech_model_train/label_wav.py train_attack.py speech_model_train/input_data.py differential_evolution.py rnn_model.py speech_model_train/generate_streaming_test_wav_test.py speech_model_train/freeze_batch.py speech_model_train/wav_to_features_test.py speech_model_train/label_wav_dir.py inject_attack.py speech_model_train/label_wav_batch.py speech_model_train/freeze.py speech_model_train/trained_models/differential_evolution.py speech_model_train/models.py speech_model_train/freeze_test.py DifferentialEvolutionSolver differential_evolution write_wavfile attack_fix_scale perturb_data predict_classes load_graph predict_classes_all generate_perturbation_fix_scale check_create_folder load_data load_labels run_graph inject_attack_5_fixed_scale load_graph predict_classes_all check_create_folder run_graph load_data iter_loadtxt customLoss lstm_model9_mfcc_fixed_scale process_missing_value iter_loadtxt load_data train load_data_complete create_inference_graph main create_inference_graph main FreezeTest main mix_in_audio_sample GenerateStreamingTestWavTest save_wav_file AudioProcessor which_set load_wav_file prepare_words_list get_features_range InputDataTest load_graph label_wav load_labels main run_graph load_graph label_wav load_labels main run_graph load_graph label_wav load_labels main run_graph LabelWavTest prepare_model_settings create_model create_single_fc_model create_low_latency_conv_model create_tiny_conv_model load_variables_from_checkpoint _next_power_of_two create_low_latency_svdf_model create_conv_model ModelsTest main main wav_to_features WavToFeaturesTest DifferentialEvolutionSolver differential_evolution DifferentialEvolutionSolver read int min astype copy tile range len str perturb_data print transpose zeros str perturb_data print transpose zeros differential_evolution max copy len write write_wavfile time attack_fix_scale perturb_data print predict_classes_all savetxt load_data zeros argmax array mkdir int reshape min copy zeros max range fromiter iter_func reshape zeros convert_to_tensor int print reshape multiply transpose shape range isnan StandardScaler fit_transform isinf print shape iter_loadtxt print shape iter_loadtxt zeros mkdir pool prepare_model_settings create_model decode_wav reshape audio identity placeholder audio_spectrogram string softmax split mfcc expand_dims prepare_words_list len start_checkpoint quantize load_variables_from_checkpoint graph_def feature_bin_count window_stride_ms write_graph basename clip_duration_ms create_eval_graph model_architecture wanted_words dirname output_file window_size_ms preprocess info InteractiveSession convert_variables_to_constants sample_rate create_inference_graph clip_stride_ms concat float32 range min range prepare_model_settings get_unprocessed_data arange silence_percentage floor word_gap_ms background_data data_dir len uniform test_duration_seconds output_labels_file ceil append UNKNOWN_WORD_LABEL range AudioProcessor shuffle testing_percentage background_volume zeros mix_in_audio_sample int validation_percentage save_wav_file randint output_audio_file prepare_words_list split int basename sub hexdigest load_labels load_graph fatal run_graph graph wav label_wav labels how_many_labels input_name output_name read print reshape zeros wav_dir ceil int floor _next_power_of_two restore Saver global_variables matmul float32 placeholder get_variable get_shape int dropout relu reshape float32 placeholder max_pool matmul conv2d get_variable int dropout relu reshape float32 placeholder matmul conv2d floor get_variable concat assign bias_add get_variable count_nonzero get_shape transpose placeholder matmul reduce_sum conv1d expand_dims dropout relu equal cond int reshape float32 get_shape int dropout relu reshape float32 placeholder matmul conv2d get_variable batch_size data_url get_data unknown_percentage assign time_shift_ms set_verbosity Saver background_frequency set_size xrange save argmax get_features_range run list global_variables merge_all map placeholder int64 cast check_nans sum train_dir create_model get_or_create_global_step fake_quant_with_min_max_args FileWriter eval equal INFO create_training_graph add_check_numerics_ops join min float32 confusion_matrix reduce_mean summaries_dir add_summary prepare_model_settings AudioProcessor get_features_for_wav InteractiveSession input_wav output_c_file wav_to_features | # Real-Time Adversarial Attacks In recent years, many efforts have demonstrated that modern machine learning algorithms are vulnerable to adversarial attacks, where small, but carefully crafted, perturbations on the input can make them fail. While these attack methods are very effective, they only focus on scenarios where the target model takes static input, i.e., an attacker can observe the entire original sample and then add a perturbation at any point of the sample. These attack approaches are not applicable to situations where the target model takes streaming input, i.e., an attacker is only able to observe past data points and add perturbations to the remaining (unobserved) data points of the input. In this work, we propose a real-time adversarial attack scheme for machine learning models with streaming inputs. ## Cite us: If you feel this repository is helpful, please cite the following paper: Yuan Gong, Boyang Li, Christian Poellabauer, and Yiyu Shi, **["Real-time Adversarial Attacks"](https://www.ijcai.org/Proceedings/2019/649)**, Proceedings of the 28th International Joint Conference on Artificial Intelligence (IJCAI), Macao, China, August 2019. ## Dependencies Tensorflow 1.0 ## Dataset In the experiments of this work, we use the [Speech Commands dataset](https://storage.cloud.google.com/download.tensorflow.org/data/speech_commands_v0.02.tar.gz) (2.3GB), which is publically accessible. You don't need to download it if you want to train the target model from scratch because the training code will automatically download it. ## How to run the code? | 1,186 |
YuanXinCherry/Person-reID-Evaluation | ['image retrieval'] | ['Re-identification = Retrieval + Verification: Back to Essence and Forward with a New Metric'] | GOM.py test.py evaluate_closed evaluate_open draw ProgressBar norm_distance print_GOM min max arange move cumsum where intersect1d log str shape sleep append sum range astype ProgressBar copy sqrt tile T print float32 argsort argwhere int32 zeros arange move where intersect1d log str shape sleep sum range astype ProgressBar tile T print argsort argwhere int32 zeros arange print trapz array argmax max range len show subplot join arange plot set_yticks set_xlim set_xlabel subplots_adjust set_ylabel savefig figure legend tick_params range len | # Genuine Open-set re-ID Metric (GOM) The *official* repository for [Re-identification = Retrieval + Verification: Back to Essence and Forward with a New Metric](https://arxiv.org/abs/2011.11506). IEEE Transactions on Image Processing (Under Review). **The corresponding references of supplementary material are avaliable at** https://github.com/YuanXinCherry/Person-reID-Evaluation/tree/master/figs/Reference_for_Supplementary.png GOM evaluates the state-of-the-art methods on both **supervised learning** tasks and **unsupervised learning** tasks for object re-ID, including person re-ID and vehicle re-ID.  **Illustration of existing retrieval and verification procedures.** 1) Closed-world retrieval: the method returns a ranking list to users and cannot tell which images are the true targets. The user needs to judge targets according to their experience and feeling. 2) Closed-world verification: given a decision boundary, images whose distances are below the boundary are considered as the targets. Nevertheless, the method cannot distinguish ground truth (GT) and non-GT within the boundary. 3) Open-set: the GT of the probe does not always exist in the gallery, thus re-ID procedure should include this kind of scenario. ## Comparison with existing metrics  ## A family of metrics of GOM <img src="figs/Metrics.png" width="700" height="350" alt="metrics"/><br/> | 1,187 |
YueWuHKUST/FutureVideoSynthesis | ['motion prediction'] | ['Future Video Synthesis with Object Motion Prediction'] | back/util/util.py back/models/models.py fore/data/kitti_dataset_test.py video_inpainting/Deep-Flow-Guided-Video-Inpainting/tools/video_inpaint_city.py fore/train_city.py video_inpainting/Deep-Flow-Guided-Video-Inpainting/dataset/data_list_city.py back/data/base_dataset.py dynamic/util/html.py evaluation/demo_kitti.py back/options/base_options.py fore/data/temporal_dataset_test_my_back_next.py fore/data/custom_dataset_data_loader.py fore/options/base_options.py dynamic/test.py fore/util/visualizer.py dynamic/options/base_options.py back/util/image_pool.py process_scripts/test_cityscapes.py dynamic/util/visualizer.py back/data/temporal_dataset.py back/data/custom_dataset_data_loader.py back/options/train_options.py fore/data/data_loader.py fore/util/html.py dynamic/data/custom_dataset_data_loader.py fore/test_myback.py process_scripts/test_kitti.py fore/models/models.py back/data/kitti_dataset_test.py fore/data/base_dataset.py background_inpainting/generative_inpainting/test_kitti.py fore/data/test_dataset.py dynamic/train.py back/data/base_data_loader.py fore/options/test_options.py fore/train_kitti.py fore/data/temporal_dataset_test.py dynamic/options/train_options.py dynamic/data/temporal_dataset.py dynamic/util/util.py dynamic/models/networks.py dynamic/util/image_pool.py back/train.py dynamic/models/models.py dynamic/data/base_dataset.py back/data/temporal_dataset_test.py back/models/backpred_model_D.py back/models/networks.py back/models/pwcnet.py fore/models/forepred_model_D.py process_scripts/gen_nonrigid_small/small.py back/data/kitti_dataset.py back/models/backpred_model_G.py back/util/html.py fore/util/util.py back/options/test_options.py fore/data/kitti_dataset.py process_scripts/occ.py process_scripts/gen_nonrigid_small/nonrigid.py video_inpainting/Deep-Flow-Guided-Video-Inpainting/tools/video_inpaint_kitti.py fore/models/forepred_model_G.py fore/models/pwcnet.py fore/options/train_options.py fore/models/networks.py dynamic/options/test_options.py dynamic/data/data_loader.py fore/data/kitti_dataset_test_myback.py background_inpainting/generative_inpainting/test_cityscapes.py evaluation/demo_cityscape.py dynamic/models/dynamic_detect.py fore/models/base_model.py fore/demo.py fore/data/temporal_dataset_test_my_back.py fore/data/temporal_dataset_test_my_back_all_fore.py back/test.py back/models/base_model.py dynamic/data/kitti_dataset.py fore/data/base_data_loader.py dynamic/models/base_model.py fore/util/image_pool.py back/data/data_loader.py dynamic/models/pwcnet.py back/util/visualizer.py fore/test_myback_next.py fore/data/temporal_dataset.py process_conf compute_flow test lcm reshape compute_flow process_conf train BaseDataset __flip get_transform __crop make_power_2 __scale_image get_img_params BaseDataLoader CustomDatasetDataLoader CreateDataset CreateDataLoader KittiDataset TestKittiDataset TemporalDataset TemporalDataset get_skipped_frames BackPredModelD BackPredModelG BaseModel create_model MultiscaleDiscriminator BaseNetwork resample MaskOneL1Loss get_norm_layer GANLoss ResnetBlock define_D SmoothLoss weights_init define_G grid_sample VGGLoss Consistency NLayerDiscriminator MultiscaleL1Loss get_grid MaskTwoL1Loss Vgg19 print_network ResNetGenerator PWCNet BaseOptions TestOptions TrainOptions HTML ImagePool colormap tensor2graysemantic Colorize print_numpy uint82bin tensor2occ labelcolormap tensor2flow mkdirs tensor2label mkdir save_image tensor2im tensor2graylabel tensor2mask Visualizer S2mask mask_remove_shadow load_all_image_paths mask_remove_shadow clip_mask load_all_image_paths compute_flow_pair grid_sample inst_and_dynamic resample gen_pre_mask get_grid compute_flow test lcm reshape train compute_flow BaseDataset __flip get_transform __crop concat_frame make_power_2 __scale_image get_img_params toTensor_normalize CustomDatasetDataLoader CreateDataLoader KittiDataset TemporalDataset BaseModel DynamicDetect create_model conv_pwcnet resize_like get_norm_layer grid_sample resample BaseNetwork get_grid downconv deconv predict_flow ResnetBlock conv weights_init print_network ResNetGenerator define_G PWCNet BaseOptions TestOptions TrainOptions HTML ImagePool colormap Colorize print_numpy uint82bin tensor2occ labelcolormap tensor2flow numpy2mask mkdirs tensor2label mkdir save_image tensor2im tensor2graylabel tensor2mask Visualizer check_valid_car_mask load_all_image_paths_track_val load_tracked_dict_val load_tracked_dict read_image lcm train compute_flow prepare_input BaseDataset __flip get_transform make_power_2 __scale_image get_img_params BaseDataLoader CustomDatasetDataLoader CreateDataset CreateDataLoader KittiDataset compute_bbox TestKittiDataset compute_bbox TestKittiDataset compute_bbox TemporalDataset compute_bbox TestTemporalDataset compute_bbox TestTemporalDataset compute_bbox TestTemporalDataset compute_bbox TestTemporalDataset transfrom_single_image compute_bbox TestDataset BaseModel ForePredModelD ForePredModelG create_model MaskOneL1Loss MultiscaleDiscriminator get_norm_layer GANLoss STN ResnetBlock define_D Vgg19 VGGLoss weights_init print_network NLayerDiscriminator CrossEntropyLoss define_G MultiscaleL1Loss PWCNet BaseOptions TestOptions TrainOptions HTML ImagePool save_pred_all uint82bin clip_image save_image colormap save_cnt_result_and_class_next tensor2edge mkdirs tensor2im tensor2mask Colorize tensor2flow tensor2label save_cnt_result_and_class mkdir save_cnt_result clip_mask print_numpy labelcolormap Visualizer process_fore process_dynamic class_mapping get_mask loadinclass preprocess_bike_person load_tracked_dict combine_bike_rider interaction_mask read_image IOU_mask process_dynamic class_mapping get_mask loadinclass preprocess_bike_person load_tracked_dict combine_bike_rider interaction_mask read_image IOU_mask generate_mask add_masks clip_mask clip_mask generate_mask whether_move add_masks mask2bbox gen_flow_refine_test_mask_list gen_flow_initial_test_mask_list flow_guided_propagation parse_argse extract_flow flow_completion main load_all_image_paths flow_guided_propagation parse_argse extract_flow flow_completion main load_all_image_paths flowNet process_conf unsqueeze cuda range cat checkpoints_dir CreateDataLoader image_nc dataset cuda view name compute_flow len OrderedDict append inference range cat parse create_model flow_nc size save_test_images semantic_nc enumerate join Visualizer print load_data makedirs vis_print checkpoints_dir batchSize optimizer_G CreateDataLoader lcm zero_grad optimizer_D save niter_decay cuda modelD loss_names list sorted view name compute_flow call loss_names_T OrderedDict savetxt update_learning_rate append range display_current_results continue_train parse create_model debug print_freq size optimizer_D_T zip enumerate join time items Visualizer get_all_skipped_frames print loadtxt backward print_current_errors_mean modelG dict get_losses niter load_data zeros step len size isinstance int maximum randint loadSize append Lambda isTrain size TestKittiDataset initialize TemporalDataset name print KittiDataset CustomDatasetDataLoader name print initialize int size repeat range cat initialize isTrain PWCNet BackPredModelD DataParallel BackPredModelG normal_ __name__ fill_ BatchNorm2d partial InstanceNorm2d n_blocks_local get_norm_layer apply ResNetGenerator cuda MultiscaleDiscriminator get_norm_layer apply print_network cuda parameters isinstance view half expand linspace cat grid_sample size get_grid get_device cuda cat data clip isinstance Variable transpose append numpy range len data clip isinstance Variable astype mean int32 append numpy tensor2im range len data clip isinstance Variable transpose append numpy tensor2im range len data isinstance Variable transpose float numpy data normalize COLOR_HSV2RGB isinstance Variable NORM_MINMAX transpose pi zeros numpy cartToPolar cvtColor data isinstance Variable transpose float numpy data isinstance Variable transpose float numpy fromarray save print float64 flatten astype mkdir makedirs uint8 uint82bin zeros array range uint8 arange astype zeros array sort append listdir range len shape max range uint8 print ones shape tile dilate zeros expand_dims range flowNet zeros_like astype int32 unique float sum range len compute_flow_pair resample append cuda range gen_pre_mask results_dir inst_and_dynamic numpy image_nc loadSize print_current_errors flow_nc plot_current_errors semantic_nc BCELoss int criterionmask tIn minimum randn data_augment cat DynamicDetect print transpose clip NEAREST array resize check_valid_car_mask glob sort append listdir range len glob sort append listdir range len listdir SemanticRoot TrackInstanceRoot sort DepthMapRoot range SemanticGTRoot load_tracked_dict_val BackRoot append InstanceGTRoot Instance_maskrcnn ImagesRoot len append cuda gen_seq gpu_ids update_fixed_params n_scales_temporal TestTemporalDataset zeros where range zeros_like len ForePredModelG n_gpus_gen ForePredModelD STN transpose clip clip clip_mask len close write save clip_image range exists open clip_mask len close write save clip_image range exists open clip_mask len close write save clip_image range exists open clip_mask save clip_image range len data isinstance Variable transpose numpy ones uint8 dilate erode index int class_mapping readlines split open range append len uint8 ones astype float32 dilate bool get_mask astype int32 append range len get_mask combine_bike_rider append zeros interaction_mask range len open append array range len NEAREST process_dynamic loadinclass astype preprocess_bike_person open int32 resize array exists resize print range clip_mask print zeros range add_masks len astype float32 dilate min max where uint8 clip_mask ones astype float32 dilate abs range mask2bbox whether_move str write close open range clip len join str min write close open append range clip len parse_args add_argument ArgumentParser join read_flow min infer write_flow join MS gen_flow_refine_test_mask_list print IMAGE_SHAPE PRETRAINED_MODEL_3 PRETRAINED_MODEL_1 test_initial_stage dataset_root PRETRAINED_MODEL_2 test_refine_stage output_root gen_flow_initial_test_mask_list makedirs propagation DeepFillv1 parse_argse dataset_root save exists flow_guided_propagation copyfile flow_completion dirname range load_all_image_paths extract_flow Propagation img_size frame_dir DFC join print convert FlowNet2 zeros array len | # Official implementation of Paper "Future Video Synthesis With Object Motion Prediction"(CVPR 2020) # Enviroment Python 3 Pytorch1.0.0 # Components There are exists several components in our framework. This repo only contain modified files for [Generative Inpainting](https://github.com/JiahuiYu/generative_inpainting/tree/v1.0.0) and [Deep-Flow-Guided-Video-Inpainting](https://github.com/nbei/Deep-Flow-Guided-Video-Inpainting). We use [PWCNet](https://github.com/NVlabs/PWC-Net/tree/master/PyTorch) to compute optical flow. Please put the code under './*/models/' directory - Dynamic Object Detection - Background Prediction - Background Inpainting following [Generative Inpainting](https://github.com/JiahuiYu/generative_inpainting/tree/v1.0.0) | 1,188 |
Yujun-Yan/Neural-Execution-Engines | ['learning to execute'] | ['Neural Execution Engines: Learning to Execute Subroutines'] | data/mul_data.py data/merge_data.py data/sel_sort_data.py model/layers.py model/model.py run_exp/run_sel_sort.py data/addition_data.py data/add_holdout_data.py run_exp/run_addition.py data/min_graph.py run_exp/run_addition_holdout.py run_exp/run_mul.py run_exp/run_prim.py utils.py run_exp/run_min_graph.py run_exp/run_merge_sort.py run_exp/run_shortest_path.py run_exp/run_experiment.py Graph_paths CustomSchedule back2int loss_pos loss_function binary_encoding Graph_mst add_data add_holdout_data join merge_data_gen get_traces get_end_exmp get_weighted_graph get_random_graph min_graph mul_data sel_sort_data_gen point_wise_feed_forward_network DecoderLayer generate_similarity_score get_angles Decoder Encoder positional_encoding add_pos Attention scaled_general_attention EncoderLayer Transformer mask_transform run_add run_add_hold run_merge run_min_graph run_mul run_prim run_sel run_dist constant left_shift greater bitwise_and float32 reverse cast expand_dims range constant left_shift matmul loss_object BinaryCrossentropy loss_obj CategoricalCrossentropy load list format from_tensor_slices concatenate cumsum ones print choice from_ind_to_seq save zeros sum range enumerate load list format setdiff1d from_tensor_slices ones print apply_along_axis shuffle choice int64 save create_examples range concatenate int64 floor append zeros len load join format arange permutation zeros_like concatenate cumsum print from_tensor_slices dict from_ind_to_seq int64 save create_seq zeros range list tril_indices multiply astype choice uniform range Graph_paths Graph_mst argsort sort range zeros list todense asarray fill_diagonal barabasi_albert_graph min random_regular_graph choice uniform get_weighted_graph erdos_renyi_graph newman_watts_strogatz_graph range barabasi_albert_graph get_end_exmp save erdos_renyi_graph list todense random_regular_graph uniform get_weighted_graph append range format asarray fill_diagonal from_tensor_slices concatenate astype choice get_traces stack binomial load min zeros newman_watts_strogatz_graph load list format arange from_tensor_slices concatenate cumsum ones print choice from_ind_to_seq int64 save zeros prod range enumerate load list format from_tensor_slices concatenate print sort apply_along_axis shuffle choice argsort tile save append zeros sum array range concat NN tile generate_similarity_score softmax matmul power float32 concatenate get_angles cos sin cumsum zeros squeeze Transformer concat save CheckpointManager restore list add_data map Adam reset_states apply_along_axis array AUTOTUNE prefetch iter append next range ones_like CustomSchedule concatenate latest_checkpoint cache astype shuffle choice tile Mean batch emb enumerate join time evaluate print Checkpoint eye Accuracy zeros create_file_writer Transformer concat save CheckpointManager restore list add_holdout_data map Adam reset_states apply_along_axis array AUTOTUNE prefetch iter append next range ones_like CustomSchedule concatenate latest_checkpoint cache astype shuffle choice tile Mean batch emb enumerate join time evaluate print Checkpoint eye Accuracy zeros create_file_writer Transformer mask_transform zeros_like floor merge_sort_test_exmp CheckpointManager restore list merge_data_gen less_equal map Adam reset_states apply_along_axis ylabel int64 savefig AUTOTUNE prefetch iter append next range from_tensor_slices format CustomSchedule plot concatenate latest_checkpoint cache shuffle choice eval_val tile Mean batch enumerate join time evaluate merge_sort print xlabel sort Checkpoint figure Accuracy create_file_writer len Transformer mask_transform save min_graph CheckpointManager restore map Adam reset_states AUTOTUNE prefetch iter next range CustomSchedule latest_checkpoint cache Mean batch emb time print Checkpoint eye Accuracy create_file_writer Transformer concat floor save CheckpointManager mul_data restore list ones map Adam reset_states apply_along_axis int64 array AUTOTUNE prefetch iter append next range CustomSchedule concatenate latest_checkpoint cache shuffle choice sqrt tile Mean batch emb enumerate join time evaluate print Checkpoint eye Accuracy zeros create_file_writer Transformer mask_transform Graph_mst CheckpointManager restore squeeze Adam reset_states reduce_sum result int64 cast append primmst range ones_like format CustomSchedule latest_checkpoint stack get_random_graph zeros update_state print mst Checkpoint Accuracy randint array len Transformer eval_val_2 mask_transform concat save CheckpointManager restore list squeeze less_equal map Adam reset_states apply_along_axis ylabel int64 imshow savefig AUTOTUNE prefetch iter append next range from_tensor_slices format CustomSchedule plot concatenate latest_checkpoint cache shuffle choice tile Mean batch sel_sort_data_gen emb time evaluate print sort xlabel Checkpoint figure eye Accuracy create_file_writer len Transformer mask_transform dijkstra gather CheckpointManager restore Adam reset_states result int64 cast append range Graph_paths ones_like format CustomSchedule latest_checkpoint shortest_path stack get_random_graph zeros update_state print Checkpoint Accuracy randint array len | # Code for NeurIPS 2020 paper: Neural Execution Engines: Learning to Execute Subroutines Authors: Yujun Yan, Kevin Swersky, Danai Koutra, Parthasarathy Ranganathan, Milad Hashemi Link to the paper: https://arxiv.org/abs/2006.08084 This folder only contains code to run NEE. For the baseline transformer model, it can be obtained from https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/transformer.ipynb # Software requirement: This code was tested with Python 3.7.4, numpy 1.17.1 and tensorflow 2.0.0-rc0 # Usage: Please run the following commands in the folder run_exp ## To see different options: python run_experiment.py -h | 1,189 |
YukangWang/TextField | ['scene text detection'] | ['TextField: Learning A Deep Direction Field for Irregular Scene Text Detection'] | examples/TextField/pylayerUtils.py python/caffe/io.py python/caffe/test/test_nccl.py examples/TextField/evaluation/total/polygon_wrapper.py python/train.py python/caffe/test/test_python_layer.py scripts/download_model_binary.py python/caffe/net_spec.py examples/TextField/evaluation/ic15/script.py python/caffe/coord_map.py examples/TextField/evaluation/total/Deteval.py examples/TextField/evaluation/ic15/rrc_evaluation_funcs.py python/caffe/test/test_net.py tools/extra/resize_and_crop_images.py python/draw_net.py examples/TextField/evaluation/ic15/seg2bbox.py python/caffe/test/test_draw.py python/caffe/test/test_net_spec.py src/caffe/test/test_data/generate_sample_data.py python/caffe/draw.py python/caffe/pycaffe.py tools/extra/extract_seconds.py scripts/cpp_lint.py python/classify.py python/caffe/test/test_solver.py python/caffe/classifier.py python/caffe/test/test_io.py python/caffe/test/test_python_layer_with_param_str.py tools/extra/parse_log.py scripts/split_caffe_proto.py python/caffe/__init__.py python/caffe/test/test_layer_type_list.py scripts/copy_notebook.py python/caffe/detector.py python/detect.py examples/TextField/datasetUtils.py examples/TextField/evaluation/total/seg2bbox.py python/caffe/test/test_coord_map.py tools/extra/summarize.py examples/TextField/train.py ReadGt_synth ReadGt_ic15 GetPts_synth ReadGt_total ReadGt_ctw ReadGt_td GetPts_td GetPts_ic15 GetPts_ctw GetPts_total random_crop EuclideanLossLayerWithOHEM DataLayer evaluation_imports evaluate_method default_evaluation_params validate_data approx_area_of_intersection iou area area_of_intersection iod main main main parse_args train solve time Classifier Detector get_edge_label draw_net get_layer_label get_pydot_graph choose_color_by_layertype get_pooling_types_dict draw_net_to_file Transformer blobproto_to_array datum_to_array array_to_blobproto array_to_datum resize_image arraylist_to_blobprotovector_str blobprotovector_str_to_arraylist load_image oversample Layers Function Parameters Top NetSpec assign_proto param_name_dict to_proto _Net_blobs _Net_forward_all _Net_set_input_arrays _Net_backward _Net_params _Net_forward _Net_outputs _Net_forward_backward_all _Net_blob_loss_weights _Net_batch _Net_get_id_name _Net_inputs _Net_layer_dict TestCoordMap coord_net_spec getFilenames TestDraw TestBlobProtoToArray TestArrayToDatum TestLayerTypeList TestNCCL TestLevels TestStages simple_net_file TestNet TestAllInOne lenet TestNetSpec silent_net anon_lenet exception_net_file parameter_net_file SimpleLayer phase_net_file TestPythonLayer ParameterLayer PhaseLayer python_net_file ExceptionLayer SimpleParamLayer TestLayerWithParam python_param_net_file TestSolver ParseNolintSuppressions CheckVlogArguments CheckSectionSpacing FindNextMultiLineCommentEnd ReplaceAll CheckForFunctionLengths _SetOutputFormat _IsTestFilename _VerboseLevel CheckBraces RemoveMultiLineComments ResetNolintSuppressions CheckForNonStandardConstructs _SetVerboseLevel PrintUsage _NestingState CheckIncludeLine CheckAccess _CppLintState Search CheckInvalidIncrement RemoveMultiLineCommentsFromRange CleansedLines CheckForBadCharacters UpdateIncludeState FindPreviousMatchingAngleBracket CheckEmptyBlockBody FindNextMultiLineCommentStart Match _NamespaceInfo CheckMakePairUsesDeduction CheckCheck IsBlankLine _SetFilters ProcessLine _FunctionState CheckPosixThreading GetLineWidth GetHeaderGuardCPPVariable IsCppString _IncludeState CheckSpacing _ClassInfo CheckForCopyright IsErrorSuppressedByNolint ProcessFileData CheckForMultilineCommentsAndStrings CloseExpression _PreprocessorInfo _OutputFormat CheckForIncludeWhatYouUse CheckSpacingForFunctionCall FindEndOfExpressionInLine FindNextMatchingAngleBracket _SetCountingStyle ProcessFile _IncludeError CleanseRawStrings CheckAltTokens CheckForNewlineAtEOF ParseArguments CheckForNonConstReference PrintCategories _Filters main FilesBelongToSameModule CheckCStyleCast FileInfo _BlockInfo CheckForHeaderGuard CheckCaffeDataLayerSetUp ReverseCloseExpression CleanseComments _DropCommonSuffixes _ClassifyInclude CheckStyle CheckCaffeAlternatives FindStartOfExpressionInLine _ShouldPrintError CheckComment Error _GetTextInside CheckLanguage CheckCaffeRandom GetPreviousNonBlankLine reporthook parse_readme_frontmatter model_checks_out valid_dirname get_start_time extract_seconds extract_datetime_from_line get_log_created_year zeros float round range split zeros range int split append int array range int array split int cos sin float array split int min sqrt uniform randint round range load_zip_file validate_lines_in_file compute_ap area iteritems decode_utf8 append polygon_from_points range import_module get_intersection_over_union float load_zip_file empty get_tl_line_values_from_file_contents namedtuple int8 rectangle_to_polygon Rectangle get_intersection zeros len polygon zeros sum max minimum min max maximum zeros_like maximum where polygon zeros sum max zeros_like maximum where polygon zeros sum max sum zeros_like maximum where polygon zeros round max model_def endswith ArgumentParser save mean_file channel_swap output_file dirname expanduser parse_args input_file predict Classifier set_mode_cpu load time isdir print add_argument set_mode_gpu pretrained_model gpu len DataFrame Detector format to_hdf detect_selective_search mean set_index to_csv detect_windows read_csv add_argument ArgumentParser read NetParameter output_image_file rankdir Merge TRAIN draw_net_to_file TEST Process str join init_log start append new_uid range log len before_backward layers display add_callback after_backward after_forward Timer append before_forward range len max_iter restore time set_solver_count set_solver_rank add_callback set_device set_multiprocess SGDSolver after_backward set_mode_gpu layer_wise_reduce step bcast NCCL len items DESCRIPTOR batch_size str num_output get_pooling_types_dict add_edge get_edge_label Dot exclude get_layer_label add_node values choose_color_by_layertype Edge Node bottom append type layer include top data array diff shape BlobProto extend flat extend BlobProtoVector ParseFromString BlobProtoVector extend tostring shape Datum flat data len astype float32 tile zoom tuple resize fill empty array concatenate shape tile empty array LayerParameter NetParameter _to_proto extend Counter OrderedDict values iteritems hasattr isinstance extend add getattr setattr OrderedDict _blobs _blob_names zip OrderedDict _blob_loss_weights _blob_names zip OrderedDict layers _layer_names zip OrderedDict list keys list keys iteritems layers index set outputs _forward len iteritems _backward layers inputs index set len iteritems asarray extend copy next _batch itervalues forward len iteritems izip_longest asarray backward extend copy next _batch itervalues zip forward len ascontiguousarray concatenate itervalues zeros next range len data Pooling pool Convolution NetSpec Deconvolution conv Input join walk dirname abspath NamedTemporaryFile str close write data Pooling pool1 conv2 pool2 ip1 relu1 SoftmaxWithLoss Convolution NetSpec DummyData ip2 ReLU InnerProduct label conv1 Pooling SoftmaxWithLoss Convolution DummyData ReLU InnerProduct data NetSpec DummyData Silence data2 error search add group clear compile compile compile SetOutputFormat SetCountingStyle SetFilters _Filters startswith IsErrorSuppressedByNolint _ShouldPrintError write IncrementErrorCount replace append Match group find startswith endswith range error FindNextMultiLineCommentEnd RemoveMultiLineCommentsFromRange FindNextMultiLineCommentStart rstrip find xrange len FindEndOfExpressionInLine xrange len FindStartOfExpressionInLine error min search I xrange len FileInfo RepositoryName sep sub ParseNolintSuppressions error startswith split GetHeaderGuardCPPVariable enumerate error enumerate error len error replace count error find error find error find error find error Search error match InnermostClass replace error escape Match Search error group Search Check error lines Count End group Begin xrange NumLines Match raw_lines Search error match group error Match group pop group append Search pop group append Search elided replace CheckSpacingForFunctionCall rfind error len group min CloseExpression NumLines sub xrange find CheckComment Match Search lines_without_raw_strings error group starting_linenum Match range Search error rfind len group ReverseCloseExpression Search Match CloseExpression find error Match CloseExpression find elided error strip group FindEndOfExpressionInLine xrange find Match CloseExpression len error Match finditer normalize isinstance PY2 GetLineWidth int InnermostClass CheckCheck error CheckAltTokens CheckBraces CheckSpacing CheckSectionSpacing CheckEmptyBlockBody CheckAccess GetHeaderGuardCPPVariable lines_without_raw_strings _DropCommonSuffixes RepositoryName match split CheckNextIncludeOrder CanonicalizeAlphabeticalOrder FileInfo error search group SetLastHeader match _ClassifyInclude Match pop end search set itervalues append M rstrip replace CheckCStyleCast error _GetTextInside CheckIncludeLine search group lstrip startswith Match ResetSection Search split rfind error group ReverseCloseExpression lstrip xrange findall Match Search ReplaceAll error Match Search endswith replace setdefault group search CleanseComments open FilesBelongToSameModule error search copy sub xrange NumLines FullName keys error search CheckPosixThreading ParseNolintSuppressions CheckVlogArguments CheckMakePairUsesDeduction CheckCaffeDataLayerSetUp CheckLanguage CheckInvalidIncrement CheckCaffeRandom CheckForNonConstReference check_fn Update CheckForNonStandardConstructs CheckStyle raw_lines CheckForMultilineCommentsAndStrings CheckCaffeAlternatives CheckForFunctionLengths CleansedLines _NestingState CheckForBadCharacters CheckForNewlineAtEOF _IncludeState NumLines RemoveMultiLineComments CheckForCopyright ResetNolintSuppressions CheckForHeaderGuard xrange CheckCompletedBlocks CheckForIncludeWhatYouUse ProcessLine _FunctionState Error rstrip endswith len write ProcessFileData _SetVerboseLevel range split write exit join write exit _VerboseLevel int getopt _SetOutputFormat set _SetVerboseLevel PrintCategories _SetFilters _OutputFormat PrintUsage _SetCountingStyle split getreader ParseArguments ResetErrorCounts stderr exit verbose_level PrintErrorCounts StreamReaderWriter ProcessFile getwriter PY2 int time write flush load join index int rfind datetime split getctime year strip extract_datetime_from_line get_start_time total_seconds strip write get_log_created_year close extract_datetime_from_line open | # TextField: Learning A Deep Direction Field for Irregular Scene Text Detection ## Introduction The code and trained models of: TextField: Learning A Deep Direction Field for Irregular Scene Text Detection, TIP 2019 [[Paper]](https://arxiv.org/abs/1812.01393) ## Citation Please cite the related works in your publications if it helps your research: ``` @article{xu2018textfield, title={TextField: Learning A Deep Direction Field for Irregular Scene Text Detection}, author={Xu, Yongchao and Wang, Yukang and Zhou, Wei and Wang, Yongpan and Yang, Zhibo and Bai, Xiang}, | 1,190 |
Yuliang-Liu/Box_Discretization_Network | ['scene text detection'] | ['Exploring the Capacity of an Orderless Box Discretization Network for Multi-orientation Scene Text Detection', 'Omnidirectional Scene Text Detection with Sequential-free Box Discretization'] | demo/test_single_image.py maskrcnn_benchmark/modeling/make_layers.py maskrcnn_benchmark/modeling/rpn/loss.py maskrcnn_benchmark/layers/roi_align.py maskrcnn_benchmark/utils/model_zoo.py maskrcnn_benchmark/solver/__init__.py maskrcnn_benchmark/layers/nms.py tests/test_feature_extractors.py tests/test_detectors.py tests/test_box_coder.py maskrcnn_benchmark/modeling/balanced_positive_negative_sampler.py maskrcnn_benchmark/layers/scale.py maskrcnn_benchmark/utils/imports.py tests/test_rpn_heads.py maskrcnn_benchmark/data/samplers/distributed.py maskrcnn_benchmark/modeling/backbone/fbnet.py maskrcnn_benchmark/layers/deform_conv_v2.py maskrcnn_benchmark/structures/mty.py maskrcnn_benchmark/utils/env.py tests/checkpoint.py maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py maskrcnn_benchmark/layers/iou_loss.py maskrcnn_benchmark/layers/_utils.py maskrcnn_benchmark/modeling/detector/__init__.py maskrcnn_benchmark/utils/metric_logger.py maskrcnn_benchmark/modeling/rpn/utils.py maskrcnn_benchmark/modeling/backbone/__init__.py maskrcnn_benchmark/modeling/rpn/__init__.py maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py maskrcnn_benchmark/structures/bounding_box.py setup.py maskrcnn_benchmark/data/datasets/evaluation/word/word_eval.py maskrcnn_benchmark/data/samplers/grouped_batch_sampler.py maskrcnn_benchmark/modeling/rpn/retinanet/inference.py maskrcnn_benchmark/data/datasets/evaluation/voc/__init__.py maskrcnn_benchmark/modeling/backbone/fbnet_builder.py maskrcnn_benchmark/layers/__init__.py maskrcnn_benchmark/utils/comm.py maskrcnn_benchmark/modeling/roi_heads/ke_head/ke_head.py maskrcnn_benchmark/structures/segmentation_mask.py maskrcnn_benchmark/modeling/rpn/fcos/loss.py maskrcnn_benchmark/data/datasets/coco.py maskrcnn_benchmark/modeling/roi_heads/keypoint_head/loss.py maskrcnn_benchmark/data/datasets/evaluation/__init__.py maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py maskrcnn_benchmark/structures/keypoint.py maskrcnn_benchmark/modeling/poolers.py maskrcnn_benchmark/layers/misc.py maskrcnn_benchmark/data/datasets/evaluation/coco/__init__.py maskrcnn_benchmark/modeling/rpn/fcos/inference.py ic15_TIoU_metric/rrc_evaluation_funcs.py maskrcnn_benchmark/modeling/backbone/pan.py maskrcnn_benchmark/data/transforms/__init__.py maskrcnn_benchmark/modeling/rpn/rpn.py maskrcnn_benchmark/utils/cv2_util.py maskrcnn_benchmark/utils/timer.py maskrcnn_benchmark/layers/smooth_l1_loss.py maskrcnn_benchmark/data/datasets/word_dataset.py maskrcnn_benchmark/utils/miscellaneous.py maskrcnn_benchmark/modeling/rpn/retinanet/loss.py maskrcnn_benchmark/modeling/roi_heads/keypoint_head/inference.py tests/test_backbones.py maskrcnn_benchmark/structures/ke.py tools/train_net.py maskrcnn_benchmark/data/build.py maskrcnn_benchmark/modeling/roi_heads/keypoint_head/keypoint_head.py tests/test_metric_logger.py maskrcnn_benchmark/modeling/backbone/resnet.py maskrcnn_benchmark/modeling/roi_heads/mask_head/inference.py tests/test_predictors.py maskrcnn_benchmark/modeling/roi_heads/mask_head/loss.py maskrcnn_benchmark/data/datasets/evaluation/coco/coco_eval.py maskrcnn_benchmark/utils/collect_env.py maskrcnn_benchmark/utils/logger.py maskrcnn_benchmark/layers/batch_norm.py maskrcnn_benchmark/data/datasets/concat_dataset.py maskrcnn_benchmark/utils/checkpoint.py tests/test_segmentation_mask.py maskrcnn_benchmark/structures/image_list.py maskrcnn_benchmark/data/datasets/evaluation/word/__init__.py maskrcnn_benchmark/data/collate_batch.py maskrcnn_benchmark/modeling/roi_heads/mask_head/mask_head.py maskrcnn_benchmark/modeling/roi_heads/keypoint_head/roi_keypoint_predictors.py maskrcnn_benchmark/modeling/utils.py maskrcnn_benchmark/modeling/roi_heads/roi_heads.py maskrcnn_benchmark/engine/__init__.py maskrcnn_benchmark/modeling/registry.py maskrcnn_benchmark/modeling/detector/generalized_rcnn.py maskrcnn_benchmark/modeling/roi_heads/keypoint_head/roi_keypoint_feature_extractors.py maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_predictors.py maskrcnn_benchmark/data/datasets/voc.py maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py ic15_TIoU_metric/script.py maskrcnn_benchmark/data/samplers/__init__.py maskrcnn_benchmark/config/paths_catalog.py ic15_TIoU_metric/to_eval.py maskrcnn_benchmark/modeling/box_coder.py tests/env_tests/env.py demo/predictor.py tests/test_fbnet.py maskrcnn_benchmark/data/transforms/transforms.py tools/test_net.py maskrcnn_benchmark/modeling/roi_heads/ke_head/roi_ke_predictors.py maskrcnn_benchmark/modeling/backbone/backbone.py maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py maskrcnn_benchmark/structures/boxlist_ops.py tests/test_nms.py maskrcnn_benchmark/__init__.py tests/utils.py maskrcnn_benchmark/modeling/roi_heads/ke_head/loss.py maskrcnn_benchmark/config/defaults.py maskrcnn_benchmark/layers/sigmoid_focal_loss.py maskrcnn_benchmark/utils/registry.py tests/test_data_samplers.py maskrcnn_benchmark/modeling/rpn/anchor_generator.py maskrcnn_benchmark/modeling/backbone/msr.py maskrcnn_benchmark/data/datasets/__init__.py maskrcnn_benchmark/modeling/roi_heads/ke_head/inference.py maskrcnn_benchmark/data/__init__.py maskrcnn_benchmark/solver/lr_scheduler.py maskrcnn_benchmark/modeling/roi_heads/ke_head/roi_ke_feature_extractors.py maskrcnn_benchmark/utils/model_serialization.py tests/test_configs.py maskrcnn_benchmark/modeling/backbone/fpn.py maskrcnn_benchmark/modeling/rpn/fcos/fcos.py maskrcnn_benchmark/utils/c2_model_loading.py maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py maskrcnn_benchmark/config/__init__.py maskrcnn_benchmark/modeling/rpn/inference.py maskrcnn_benchmark/solver/build.py maskrcnn_benchmark/modeling/matcher.py maskrcnn_benchmark/modeling/detector/detectors.py maskrcnn_benchmark/modeling/rpn/retinanet/retinanet.py maskrcnn_benchmark/layers/roi_pool.py maskrcnn_benchmark/engine/inference.py maskrcnn_benchmark/data/datasets/list_dataset.py maskrcnn_benchmark/engine/trainer.py maskrcnn_benchmark/data/datasets/evaluation/voc/voc_eval.py maskrcnn_benchmark/data/transforms/build.py get_extensions paraToQuad_v2 paraToQuad_v3 vis_keypoints COCODemo main evaluation_imports evaluate_method default_evaluation_params validate_data DatasetCatalog ModelCatalog make_data_sampler _quantize make_data_loader make_batch_data_sampler build_dataset _compute_aspect_ratios BatchCollator COCODataset _has_only_empty_bbox has_valid_annotation _count_visible_keypoints ConcatDataset ListDataset PascalVOCDataset WordDataset evaluate COCOResults check_expected_results prepare_for_coco_segmentation evaluate_predictions_on_coco evaluate_box_proposals do_coco_evaluation prepare_for_coco_keypoint prepare_for_coco_detection coco_evaluation calc_detection_voc_ap do_voc_evaluation calc_detection_voc_prec_rec eval_detection_voc voc_evaluation py_cpu_pnms COCOResults check_expected_results prepare_for_coco_segmentation prepare_for_kes evaluate_predictions_on_coco contour_to_xys esd_pnms paraToQuad_v3 do_coco_evaluation mask_to_roRect ke_to_quad evaluate_box_proposals prepare_for_coco_detection word_evaluation DistributedSampler GroupedBatchSampler IterationBasedBatchSampler build_transforms RandomRotation Compose ToTensor Resize RandomCrop Normalize RandomHorizontalFlip compute_on_dataset inference _accumulate_predictions_from_multiple_gpus do_train reduce_loss_dict FrozenBatchNorm2d _DCNv2Pooling DCNPooling _DCNv2 DCNv2 DCN DCNv2Pooling IOULoss _NewEmptyTensorOp Conv2d interpolate BatchNorm2d ConvTranspose2d ROIAlign _ROIAlign _ROIPool ROIPool Scale SigmoidFocalLoss _SigmoidFocalLoss sigmoid_focal_loss_cpu smooth_l1_loss _load_C_extensions BalancedPositiveNegativeSampler BoxCoder conv_with_kaiming_uniform make_conv3x3 get_group_gn make_fc group_norm Matcher make_pooler LevelMapper Pooler cat build_resnet_fpn_p3p7_backbone build_backbone build_resnet_fpn_backbone build_resnet_backbone add_rpn_head add_roi_head_mask FBNetROIHead _get_rpn_stage FBNetRPNHead FBNetTrunk add_roi_head _get_head_stage _get_trunk_cfg create_builder add_conv_body add_roi_head_keypoints _get_divisible_by ConvBNRelu _expand_block_cfg FBNetBuilder CascadeConv3x3 get_blocks SEModule _add_to_arch IRFBlock Shift expand_stages_cfg expand_stage_cfg ShiftBlock5x5 _py2_round get_num_stages unify_arch_def _get_upsample_op Upsample Identity _block_cfgs_to_list ChannelShuffle add_archs LastLevelMaxPool FPN LastLevelP6P7 MSR ConcatUpConv GAU PAN FPA StemWithGN ResNetHead _make_stage DeformableConvWithFixedBatchNorm DeformableConvWithGN ResNet BottleneckWithGN Bottleneck StemWithFixedBatchNorm BottleneckWithFixedBatchNorm BaseStem build_detection_model GeneralizedRCNN CombinedROIHeads build_roi_heads build_roi_box_head ROIBoxHead PostProcessor make_roi_box_post_processor make_roi_box_loss_evaluator FastRCNNLossComputation make_roi_box_feature_extractor FPNXconv1fcFeatureExtractor FPN2MLPFeatureExtractor ResNet50Conv5ROIFeatureExtractor FPNPredictor make_roi_box_predictor FastRCNNPredictor heatmaps_to_keypoints Keypointer make_roi_keypoint_post_processor KeypointPostProcessor ROIKeypointHead build_roi_keypoint_head make_roi_keypoint_loss_evaluator project_keypoints_to_heatmap KeypointRCNNLossComputation _within_box cat_boxlist_with_keypoints KeypointRCNNFeatureExtractor make_roi_keypoint_feature_extractor KeypointRCNNPredictor make_roi_keypoint_predictor heatmaps_to_kes scores_to_probs kePostProcessor KEer kes_decode make_roi_ke_post_processor build_roi_ke_head ROIKEHead keep_only_positive_boxes project_kes_to_heatmap KERCNNLossComputation _within_box make_roi_ke_loss_evaluator KERCNNFPNFeatureExtractor make_roi_ke_feature_extractor make_roi_ke_predictor KERCNNC4Predictor paste_mask_in_image expand_boxes Masker make_roi_mask_post_processor MaskPostProcessorCOCOFormat expand_masks MaskPostProcessor make_roi_mask_loss_evaluator MaskRCNNLossComputation project_masks_on_boxes keep_only_positive_boxes ROIMaskHead build_roi_mask_head MaskRCNNFPNFeatureExtractor make_roi_mask_feature_extractor MaskRCNNC4Predictor MaskRCNNConv1x1Predictor make_roi_mask_predictor AnchorGenerator generate_anchors _scale_enum _whctrs make_anchor_generator _ratio_enum make_anchor_generator_retinanet _generate_anchors BufferList _mkanchors make_rpn_postprocessor RPNPostProcessor RPNLossComputation generate_rpn_labels make_rpn_loss_evaluator build_rpn RPNHeadFeatureSingleConv RPNModule RPNHead RPNHeadConvRegressor concat_box_prediction_layers permute_and_flatten FCOSHead FCOSModule build_fcos FCOSPostProcessor make_fcos_postprocessor make_fcos_loss_evaluator FCOSLossComputation make_retinanet_postprocessor RetinaNetPostProcessor make_retinanet_loss_evaluator generate_retinanet_labels RetinaNetLossComputation build_retinanet RetinaNetHead RetinaNetModule make_optimizer make_lr_scheduler WarmupMultiStepLR BoxList cat_boxlist boxlist_iou boxlist_nms remove_small_boxes _cat ImageList to_image_list KES textKES _create_flip_indices kes_to_heat_map PersonKeypoints kp_connections _create_flip_indices Keypoints keypoints_to_heat_map MTY SegmentationMask PolygonList PolygonInstance BinaryMaskList _rename_basic_resnet_weights load_resnet_c2_format load_c2_format _rename_weights_for_resnet _load_c2_pickled_weights _rename_fpn_weights DetectronCheckpointer Checkpointer collect_env_info get_pil_version synchronize get_world_size reduce_dict all_gather get_rank is_main_process findContours setup_environment setup_custom_environment import_file setup_logger SmoothedValue MetricLogger mkdir strip_prefix_if_present load_state_dict align_and_update_state_dicts cache_url _register_generic Registry get_time_str Timer TestCheckpointer TestBackbones TestBoxCoder TestConfigs SubsetSampler TestGroupedBatchSampler TestIterationBasedBatchSampler create_random_input create_model get_config_files _test_build_detectors _test_run_selected_detectors TestDetectors _test_primitive TestFBNetBuilder TestFeatureExtractors _test_feature_extractors TestMetricLogger TestNMS TestPredictors _test_predictors TestRPNHeads TestSegmentationMask load_config_from_file load_config get_config_root_path get_config_root_path main main train run_test get_extensions paraToQuad_v2 paraToQuad_v3 vis_keypoints COCODemo main evaluation_imports evaluate_method default_evaluation_params validate_data DatasetCatalog ModelCatalog make_data_sampler _quantize make_data_loader make_batch_data_sampler build_dataset _compute_aspect_ratios BatchCollator COCODataset _has_only_empty_bbox has_valid_annotation _count_visible_keypoints ConcatDataset ListDataset PascalVOCDataset WordDataset evaluate COCOResults check_expected_results prepare_for_coco_segmentation evaluate_predictions_on_coco evaluate_box_proposals do_coco_evaluation prepare_for_coco_keypoint prepare_for_coco_detection coco_evaluation calc_detection_voc_ap do_voc_evaluation calc_detection_voc_prec_rec eval_detection_voc voc_evaluation py_cpu_pnms COCOResults check_expected_results prepare_for_coco_segmentation prepare_for_kes evaluate_predictions_on_coco contour_to_xys esd_pnms paraToQuad_v3 do_coco_evaluation mask_to_roRect ke_to_quad evaluate_box_proposals prepare_for_coco_detection word_evaluation DistributedSampler GroupedBatchSampler IterationBasedBatchSampler build_transforms RandomRotation Compose ToTensor Resize RandomCrop Normalize RandomHorizontalFlip compute_on_dataset inference _accumulate_predictions_from_multiple_gpus do_train reduce_loss_dict FrozenBatchNorm2d _DCNv2Pooling DCNPooling _DCNv2 DCNv2 DCN DCNv2Pooling IOULoss _NewEmptyTensorOp Conv2d interpolate BatchNorm2d ConvTranspose2d ROIAlign _ROIAlign _ROIPool ROIPool Scale SigmoidFocalLoss _SigmoidFocalLoss sigmoid_focal_loss_cpu smooth_l1_loss _load_C_extensions BalancedPositiveNegativeSampler BoxCoder conv_with_kaiming_uniform make_conv3x3 get_group_gn make_fc group_norm Matcher make_pooler LevelMapper Pooler cat build_resnet_fpn_p3p7_backbone build_backbone build_resnet_fpn_backbone build_resnet_backbone add_rpn_head add_roi_head_mask FBNetROIHead _get_rpn_stage FBNetRPNHead FBNetTrunk add_roi_head _get_head_stage _get_trunk_cfg create_builder add_conv_body add_roi_head_keypoints _get_divisible_by ConvBNRelu _expand_block_cfg FBNetBuilder CascadeConv3x3 get_blocks SEModule _add_to_arch IRFBlock Shift expand_stages_cfg expand_stage_cfg ShiftBlock5x5 _py2_round get_num_stages unify_arch_def _get_upsample_op Upsample Identity _block_cfgs_to_list ChannelShuffle add_archs LastLevelMaxPool FPN LastLevelP6P7 MSR ConcatUpConv GAU PAN FPA StemWithGN ResNetHead _make_stage DeformableConvWithFixedBatchNorm DeformableConvWithGN ResNet BottleneckWithGN Bottleneck StemWithFixedBatchNorm BottleneckWithFixedBatchNorm BaseStem build_detection_model GeneralizedRCNN CombinedROIHeads build_roi_heads build_roi_box_head ROIBoxHead PostProcessor make_roi_box_post_processor make_roi_box_loss_evaluator FastRCNNLossComputation make_roi_box_feature_extractor FPNXconv1fcFeatureExtractor FPN2MLPFeatureExtractor ResNet50Conv5ROIFeatureExtractor FPNPredictor make_roi_box_predictor FastRCNNPredictor heatmaps_to_keypoints Keypointer make_roi_keypoint_post_processor KeypointPostProcessor ROIKeypointHead build_roi_keypoint_head make_roi_keypoint_loss_evaluator project_keypoints_to_heatmap KeypointRCNNLossComputation _within_box cat_boxlist_with_keypoints KeypointRCNNFeatureExtractor make_roi_keypoint_feature_extractor KeypointRCNNPredictor make_roi_keypoint_predictor heatmaps_to_kes scores_to_probs kePostProcessor KEer kes_decode make_roi_ke_post_processor build_roi_ke_head ROIKEHead keep_only_positive_boxes project_kes_to_heatmap KERCNNLossComputation _within_box make_roi_ke_loss_evaluator KERCNNFPNFeatureExtractor make_roi_ke_feature_extractor make_roi_ke_predictor KERCNNC4Predictor paste_mask_in_image expand_boxes Masker make_roi_mask_post_processor MaskPostProcessorCOCOFormat expand_masks MaskPostProcessor make_roi_mask_loss_evaluator MaskRCNNLossComputation project_masks_on_boxes keep_only_positive_boxes ROIMaskHead build_roi_mask_head MaskRCNNFPNFeatureExtractor make_roi_mask_feature_extractor MaskRCNNC4Predictor MaskRCNNConv1x1Predictor make_roi_mask_predictor AnchorGenerator generate_anchors _scale_enum _whctrs make_anchor_generator _ratio_enum make_anchor_generator_retinanet _generate_anchors BufferList _mkanchors make_rpn_postprocessor RPNPostProcessor RPNLossComputation generate_rpn_labels make_rpn_loss_evaluator build_rpn RPNHeadFeatureSingleConv RPNModule RPNHead RPNHeadConvRegressor concat_box_prediction_layers permute_and_flatten FCOSHead FCOSModule build_fcos FCOSPostProcessor make_fcos_postprocessor make_fcos_loss_evaluator FCOSLossComputation make_retinanet_postprocessor RetinaNetPostProcessor make_retinanet_loss_evaluator generate_retinanet_labels RetinaNetLossComputation build_retinanet RetinaNetHead RetinaNetModule make_optimizer make_lr_scheduler WarmupMultiStepLR BoxList cat_boxlist boxlist_iou boxlist_nms remove_small_boxes _cat ImageList to_image_list KES textKES _create_flip_indices kes_to_heat_map PersonKeypoints kp_connections _create_flip_indices Keypoints keypoints_to_heat_map MTY SegmentationMask PolygonList PolygonInstance BinaryMaskList _rename_basic_resnet_weights load_resnet_c2_format load_c2_format _rename_weights_for_resnet _load_c2_pickled_weights _rename_fpn_weights DetectronCheckpointer Checkpointer collect_env_info get_pil_version synchronize get_world_size reduce_dict all_gather get_rank is_main_process findContours setup_environment setup_custom_environment import_file setup_logger SmoothedValue MetricLogger mkdir strip_prefix_if_present load_state_dict align_and_update_state_dicts cache_url _register_generic Registry get_time_str Timer TestCheckpointer TestBackbones TestBoxCoder TestConfigs SubsetSampler TestGroupedBatchSampler TestIterationBasedBatchSampler create_random_input create_model get_config_files _test_build_detectors _test_run_selected_detectors TestDetectors _test_primitive TestFBNetBuilder TestFeatureExtractors _test_feature_extractors TestMetricLogger TestNMS TestPredictors _test_predictors TestRPNHeads TestSegmentationMask load_config_from_file load_config get_config_root_path get_config_root_path main main train run_test glob join dirname abspath permutations is_ccw Polygon LinearRing Point is_ccw Polygon LinearRing area Point xrange append abs array range enumerate minimum line NAMES tuple CONNECTIONS copy index get_cmap range circle len run_on_opencv_image imwrite ArgumentParser resize output_dir opts basename waitKey imshow freeze parse_args imread merge_from_file format img glob config_file shuffle merge_from_list mkdir enumerate join time isdir print add_argument extend COCODemo load_zip_file validate_lines_in_file compute_ap area round iteritems decode_utf8 append get_text_intersection_over_union_precision polygon_from_points range get_text_intersection_over_union_recall import_module get_intersection_over_union load_zip_file empty get_tl_line_values_from_file_contents float namedtuple print int8 rectangle_to_polygon Rectangle get_intersection zeros len get ConcatDataset getattr append factory SequentialSampler RandomSampler list sorted map copy get_img_info append float range len BatchSampler IterationBasedBatchSampler GroupedBatchSampler _quantize _compute_aspect_ratios format import_file make_data_sampler getLogger IMS_PER_BATCH PATHS_CATALOG MAX_ITER get_world_size NUM_WORKERS BatchCollator DataLoader warning make_batch_data_sampler SIZE_DIVISIBILITY build_transforms build_dataset DatasetCatalog append _has_only_empty_bbox PascalVOCDataset isinstance COCODataset dict WordDataset __name__ items join format COCOResults check_expected_results getLogger prepare_for_coco_segmentation prepare_for_coco_keypoint item info save evaluate_box_proposals prepare_for_coco_detection get_img_info convert tolist extend resize enumerate get_img_info decode Masker tolist extend masker expand tqdm resize get_field enumerate convert tolist extend resize get_field enumerate arange zeros_like resize max boxlist_iou append loadAnns sum range cat getAnnIds mean float enumerate get_img_info reshape sort convert min zeros as_tensor len accumulate summarize evaluate COCOeval error format info getLogger get_img_info format info get_groundtruth eval_detection_voc resize append enumerate calc_detection_voc_ap calc_detection_voc_prec_rec defaultdict cumsum astype extend copy keys numpy array unique zip append zeros argmax max arange concatenate empty nan sum max range len warning info getLogger prepare_for_kes get_valid_y reshape boxPoints get_valid_x int0 minAreaRect enumerate CHAIN_APPROX_NONE findContours copy contour_to_xys numpy RETR_CCOMP append argmax paraToQuad_v3 Polygon area shape intersection append zeros range len append py_cpu_pnms array NMS_THRESH PNMS tolist extend tqdm esd_pnms KEer resize get_field ke_to_quad numpy append enumerate print TO_BGR255 ROTATE_DEGREE MIN_SIZE_TEST Compose MIN_SIZE_TRAIN Normalize ROTATE_PROB_TRAIN MAX_SIZE_TRAIN MAX_SIZE_TEST CROP_PROB_TRAIN range update tqdm eval device to enumerate update list sorted getLogger warning all_gather keys getLogger save Timer device dataset get_time_str _accumulate_predictions_from_multiple_gpus tic format synchronize total_time get_world_size info load join toc dict isfile compute_on_dataset len get_world_size getLogger model zero_grad save str MetricLogger to sum update format timedelta info reduce_loss_dict enumerate time isinstance backward global_avg train step len _output_size tuple dtype sigmoid unsqueeze device log abs where join glob extend dirname abspath EPSILON DIM_PER_GP NUM_GROUPS group_norm Conv2d bias normal_ kaiming_normal_ ReLU append weight constant_ kaiming_uniform_ bias weight constant_ Linear POOLER_RESOLUTION POOLER_SCALES POOLER_SAMPLING_RATIO Pooler OrderedDict ResNet Sequential BACKBONE_OUT_CHANNELS FPN MSR ResNet Sequential OrderedDict RES2_OUT_CHANNELS BACKBONE_OUT_CHANNELS MSR_ON PAN FPN ResNet Sequential OrderedDict RES2_OUT_CHANNELS BACKBONE_OUT_CHANNELS get format FBNetBuilder SCALE_FACTOR WIDTH_DIVISOR DW_CONV_SKIP_BN unify_arch_def DW_CONV_SKIP_RELU loads ARCH_DEF BN_TYPE info ARCH get_num_stages get_blocks get range FBNetTrunk Sequential OrderedDict create_builder last_depth get format warn get_blocks range len create_builder FBNetRPNHead RPNHeadConvRegressor out_channels get get_blocks create_builder create_builder create_builder int Upsample append deepcopy range append expand_stage_cfg append expand_stage_cfg enumerate enumerate update deepcopy _block_cfgs_to_list _add_to_arch max append deepcopy append transformation_module range MASK_ON CombinedROIHeads RETINANET_ON KE_ON KEYPOINT_ON append CLS_AGNOSTIC_BBOX_REG BoxCoder DETECTIONS_PER_IMG PostProcessor BBOX_REG_WEIGHTS USE_FPN NMS SCORE_THRESH POSITIVE_FRACTION FG_IOU_THRESHOLD CLS_AGNOSTIC_BBOX_REG BATCH_SIZE_PER_IMAGE BoxCoder BalancedPositiveNegativeSampler BBOX_REG_WEIGHTS BG_IOU_THRESHOLD Matcher FastRCNNLossComputation int transpose maximum resize ceil zeros argmax range len Keypointer KeypointPostProcessor convert cat_boxlist add_field get_fields cat POSITIVE_FRACTION FG_IOU_THRESHOLD BATCH_SIZE_PER_IMAGE BalancedPositiveNegativeSampler KeypointRCNNLossComputation BG_IOU_THRESHOLD Matcher RESOLUTION sum exp max range enumerate arange RESCORING NUM_KES resize argmax max transpose set_printoptions shape ceil append RESCORING_GAMA sum kes_decode range copy int scores_to_probs min maximum zeros RESOLUTION len kePostProcessor POSTPROCESS_KES KEer get_field squeeze append kes_to_heat_map mty convert kes_x kes_y bbox POSITIVE_FRACTION FG_IOU_THRESHOLD BATCH_SIZE_PER_IMAGE BalancedPositiveNegativeSampler BG_IOU_THRESHOLD Matcher RESOLUTION KERCNNLossComputation zeros_like float new_zeros int uint8 expand_masks min float32 expand interpolate zeros to max POSTPROCESS_MASKS POSTPROCESS_MASKS_THRESHOLD Masker MaskPostProcessor zip convert device resize append to crop get_mask_tensor FG_IOU_THRESHOLD MaskRCNNLossComputation BG_IOU_THRESHOLD Matcher RESOLUTION AnchorGenerator STRADDLE_THRESH ANCHOR_SIZES ANCHOR_STRIDE USE_FPN ASPECT_RATIOS AnchorGenerator STRADDLE_THRESH OCTAVE ANCHOR_SIZES tuple SCALES_PER_OCTAVE append float ASPECT_RATIOS ANCHOR_STRIDES range vstack _ratio_enum array hstack sqrt _whctrs round _mkanchors _whctrs _mkanchors NMS_THRESH FPN_POST_NMS_TOP_N_TRAIN POST_NMS_TOP_N_TRAIN RPNPostProcessor POST_NMS_TOP_N_TEST MIN_SIZE PRE_NMS_TOP_N_TRAIN FPN_POST_NMS_TOP_N_TEST PRE_NMS_TOP_N_TEST get_field POSITIVE_FRACTION FG_IOU_THRESHOLD RPNLossComputation BATCH_SIZE_PER_IMAGE BalancedPositiveNegativeSampler BG_IOU_THRESHOLD Matcher RETINANET_ON FCOS_ON reshape permute view permute_and_flatten reshape shape zip append FPN_POST_NMS_TOP_N_TRAIN NMS_TH DETECTIONS_PER_IMG FCOSPostProcessor INFERENCE_TH PRE_NMS_TOP_N_TRAIN PRE_NMS_TOP_N FCOSLossComputation NMS_TH DETECTIONS_PER_IMG INFERENCE_TH PRE_NMS_TOP_N RetinaNetPostProcessor get_field FG_IOU_THRESHOLD RetinaNetLossComputation SigmoidFocalLoss LOSS_GAMMA BG_IOU_THRESHOLD Matcher LOSS_ALPHA WEIGHT_DECAY_BIAS SGD named_parameters BASE_LR BIAS_LR_FACTOR WEIGHT_DECAY convert _box_nms get_field bbox mode squeeze unbind bbox clamp min area max len add_field size set BoxList _cat fields mode int list isinstance tuple copy_ zero_ zip ceil Tensor update copy long long enumerate max _rename_basic_resnet_weights sorted format getLogger OrderedDict from_numpy info keys _rename_fpn_weights CONV_BODY _load_c2_pickled_weights replace _rename_weights_for_resnet get_pretty_env_info barrier get_world_size from_buffer dumps get_world_size loads zip append to max cat get_world_size startswith get setup_custom_environment setup_environment import_file spec_from_file_location exec_module module_from_spec setFormatter join getLogger addHandler StreamHandler Formatter DEBUG setLevel FileHandler makedirs max list sorted format view getLogger tuple tolist shape info keys enumerate len OrderedDict items sorted keys strip_prefix_if_present align_and_update_state_dicts state_dict join basename format replace synchronize write search group getenv path _download_url_to_file expanduser urlparse makedirs str timedelta glob join get_config_root_path deepcopy to freeze build_detection_model int SIZE_DIVISIBILITY rand MIN_SIZE_TRAIN to_image_list append to assertGreater get_config_files len assertGreater len format Size assertEqual op shape to get deepcopy items assertGreater format print fe assertEqual rand Size BoxList getattr builder assertIsNotNone load_config len get deepcopy items assertGreater format print fe rand builder load_config len join get_config_root_path merge_from_file deepcopy realpath join dirname abspath make_data_loader OUTPUT_DIR collect_env_info set_device MASK_ON get_rank to inference KEYPOINT_ON TEST DEVICE build_detection_model init_process_group synchronize setup_logger WEIGHT info zip KE_ON load DetectronCheckpointer local_rank len DEVICE make_optimizer load update build_detection_model CHECKPOINT_PERIOD make_data_loader WEIGHT DistributedDataParallel DetectronCheckpointer do_train device to OUTPUT_DIR make_lr_scheduler join zip TEST synchronize MASK_ON inference mkdir make_data_loader empty_cache OUTPUT_DIR module KEYPOINT_ON enumerate len distributed run_test train | # Box_Discretization_Network This repository is built on the **pytorch [[maskrcnn_benchmark]](https://github.com/facebookresearch/maskrcnn-benchmark)**. The method is the foundation of our ReCTs-competition method [[link]](https://rrc.cvc.uab.es/?ch=12), which won the **championship**. PPT link [[Google Drive]](https://drive.google.com/file/d/1xgVx04RNbCbe6f1vCi-ccSqt0Ig07VaK/view?usp=sharing)[[Baidu Cloud]](https://pan.baidu.com/s/1g_uzMaag1w2LOm1Q_Cgk0g) Generate your own **JSON**: [[Google Drive]](https://drive.google.com/file/d/1Rfv1pxDpoFGSqvzxJZCmGfpKhUe8P3Bk/view?usp=sharing)[[Baidu Cloud]](https://pan.baidu.com/s/1eEcczeNP9z5HzmxylkxCmw) Brief introduction (in Chinese): [[Google Drive]](https://drive.google.com/file/d/1ED4TCXUFqV0fzBX4Cj29B6ndy1dqm1y-/view?usp=sharing)[[Baidu Cloud]](https://pan.baidu.com/s/1i0qngj3L3_Ezygp5KEXDCA&shfl=sharepset) # Competition related Competition model and config files (it needs a lot of video memory): * Paper [[Link]](https://arxiv.org/pdf/1912.09629.pdf) (Exploring the Capacity of Sequential-free Box Discretization Networkfor Omnidirectional Scene Text Detection) * Config file [[BaiduYun Link]](https://pan.baidu.com/s/1yKiyFGIQlooc_jGkMECREA). Models below all use this config file except directory. Results below are the multi-scale ensemble results. The very details are described in our updated paper. * MLT 2017 Model [[BaiduYun Link]](https://pan.baidu.com/s/10p6ka_fYdACAdnlOHZcUSw). | 1,191 |
Yuliang-Liu/Curve-Text-Detector | ['scene text detection', 'curved text detection'] | ['Detecting Curve Text in the Wild: New Dataset and New Solution'] | lib/datasets/text.py caffe/python/caffe/classifier.py lib/rpn/generate_anchors.py caffe/python/caffe/test/test_net.py lib/datasets/imdb.py caffe/examples/pycaffe/layers/pascal_multilabel_datalayers.py tools/ctw1500_evaluation/voc_eval_polygon.py caffe/tools/extra/resize_and_crop_images.py caffe/examples/pycaffe/caffenet.py caffe/src/caffe/test/test_data/generate_sample_data.py lib/setup.py lib/datasets/__init__.py lib/fast_rcnn/test.py caffe/python/caffe/coord_map.py lib/fast_rcnn/bbox_transform.py lib/datasets/factory.py tools/demo.py caffe/python/detect.py lib/datasets/voc_eval.py lib/rpn/__init__.py tools/ctw1500_evaluation/sortdetection.py caffe/tools/extra/summarize.py lib/fast_rcnn/train.py caffe/python/caffe/detector.py caffe/python/draw_net.py lib/utils/__init__.py caffe/examples/finetune_flickr_style/assemble_data.py lib/nms/py_cpu_nms.py lib/rpn/generate.py caffe/tools/extra/extract_seconds.py lib/roi_data_layer/roidb.py lib/rpn/proposal_layer.py caffe/python/caffe/io.py caffe/python/caffe/test/test_layer_type_list.py lib/roi_data_layer/__init__.py lib/rpn/anchor_target_layer.py caffe/python/caffe/__init__.py caffe/examples/pycaffe/layers/pyloss.py caffe/examples/web_demo/app.py lib/utils/timer.py tools/train_net.py lib/fast_rcnn/__init__.py caffe/python/classify.py tools/ctw1500_evaluation/test_ctw1500_eval.py caffe/python/caffe/draw.py caffe/examples/pycaffe/tools.py lib/fast_rcnn/nms_wrapper.py lib/datasets/ds_utils.py tools/test_net.py tools/_init_paths.py caffe/scripts/download_model_binary.py caffe/python/caffe/test/test_python_layer_with_param_str.py lib/datasets/tools/mcg_munge.py caffe/tools/extra/parse_log.py lib/utils/blob.py caffe/python/caffe/net_spec.py caffe/examples/web_demo/exifutil.py caffe/python/caffe/test/test_python_layer.py caffe/python/caffe/test/test_solver.py lib/roi_data_layer/layer.py caffe/scripts/cpp_lint.py lib/rpn/proposal_fg_disHW_target_layer.py caffe/scripts/copy_notebook.py caffe/python/caffe/test/test_io.py lib/roi_data_layer/minibatch.py caffe/python/caffe/pycaffe.py lib/fast_rcnn/config.py caffe/python/caffe/test/test_coord_map.py caffe/python/caffe/test/test_net_spec.py download_image make_net max_pool caffenet conv_relu fc_relu CaffeSolver SimpleTransformer EuclideanLossLayer start_tornado start_from_terminal embed_image_html classify_upload index allowed_file ImagenetClassifier classify_url open_oriented_im apply_orientation main main main parse_args Classifier coord_map UndefinedMapException conv_params coord_map_from_to AxisMismatchException inverse crop_params compose crop Detector get_edge_label draw_net get_layer_label get_pydot_graph choose_color_by_layertype get_pooling_types_dict draw_net_to_file Transformer blobproto_to_array datum_to_array array_to_blobproto arraylist_to_blobprotovecor_str array_to_datum resize_image blobprotovector_str_to_arraylist load_image oversample Layers Function Parameters Top NetSpec assign_proto param_name_dict to_proto _Net_blobs _Net_forward_all _Net_set_input_arrays _Net_backward _Net_params _Net_forward _Net_IdNameWrapper _Net_outputs _Net_forward_backward_all _Net_blob_loss_weights _Net_batch _Net_inputs TestCoordMap coord_net_spec TestBlobProtoToArray TestArrayToDatum TestLayerTypeList simple_net_file TestNet lenet TestNetSpec silent_net anon_lenet exception_net_file parameter_net_file SimpleLayer phase_net_file TestPythonLayer ParameterLayer PhaseLayer python_net_file ExceptionLayer SimpleParamLayer TestLayerWithParam python_param_net_file TestSolver ParseNolintSuppressions CheckVlogArguments CheckSectionSpacing FindNextMultiLineCommentEnd ReplaceAll CheckForFunctionLengths _SetOutputFormat _IsTestFilename _VerboseLevel CheckBraces RemoveMultiLineComments ResetNolintSuppressions CheckForNonStandardConstructs _SetVerboseLevel PrintUsage _NestingState CheckIncludeLine CheckAccess _CppLintState Search CheckInvalidIncrement RemoveMultiLineCommentsFromRange CleansedLines CheckForBadCharacters UpdateIncludeState FindPreviousMatchingAngleBracket CheckEmptyBlockBody FindNextMultiLineCommentStart Match _NamespaceInfo CheckMakePairUsesDeduction CheckCheck IsBlankLine _SetFilters ProcessLine _FunctionState CheckPosixThreading GetLineWidth GetHeaderGuardCPPVariable IsCppString _IncludeState CheckSpacing _ClassInfo CheckForCopyright IsErrorSuppressedByNolint ProcessFileData CheckForMultilineCommentsAndStrings CloseExpression _PreprocessorInfo _OutputFormat CheckForIncludeWhatYouUse CheckSpacingForFunctionCall FindEndOfExpressionInLine FindNextMatchingAngleBracket _SetCountingStyle ProcessFile _IncludeError CleanseRawStrings CheckAltTokens CheckForNewlineAtEOF ParseArguments CheckForNonConstReference PrintCategories _Filters main FilesBelongToSameModule CheckCStyleCast FileInfo _BlockInfo CheckForHeaderGuard CheckCaffeDataLayerSetUp ReverseCloseExpression CleanseComments _DropCommonSuffixes _ClassifyInclude CheckStyle CheckCaffeAlternatives FindStartOfExpressionInLine _ShouldPrintError CheckComment Error _GetTextInside CheckLanguage CheckCaffeRandom GetPreviousNonBlankLine reporthook parse_readme_frontmatter model_checks_out valid_dirname get_start_time extract_seconds extract_datetime_from_line get_log_created_year find_in_path customize_compiler_for_nvcc custom_build_ext locate_cuda unique_boxes xywh_to_xyxy validate_boxes xyxy_to_xywh filter_small_boxes get_imdb list_imdbs clip_boxes bbox_transform bbox_transform_inv info_syn_transform_inv_w info_syn_transform_inv_h info_syn_transform_hw cfg_from_file cfg_from_list get_output_dir _merge_a_into_b nms pnms py_cpu_pnms py_cpu_nms _project_im_rois _sample_rois _get_image_blob get_minibatch _get_bbox_regression_labels add_bbox_regression_targets prepare_roidb _compute_targets im_list_to_blob prep_im_for_blob Timer _get_blobs vis _get_image_blob nps im_detect parse_args add_path add_path imread urlretrieve Convolution InnerProduct Data SoftmaxWithLoss LRN Accuracy max_pool InnerProduct conv_relu fc_relu Dropout get read info load_image classify_image StringIO join replace info secure_filename save filename open_oriented_im classify_image fromarray replace astype save resize StringIO iteritems listen HTTPServer format print start WSGIContainer update start_tornado add_option OptionParser debug port parse_args ImagenetClassifier forward run hasattr _getexif astype float32 tile apply_orientation open transpose model_def endswith ArgumentParser save mean_file channel_swap output_file dirname expanduser parse_args input_file predict Classifier set_mode_cpu load time isdir print add_argument set_mode_gpu pretrained_model gpu len DataFrame Detector format to_hdf detect_selective_search mean set_index to_csv detect_windows read_csv add_argument ArgumentParser read NetParameter output_image_file rankdir Merge draw_net_to_file get params array get params array crop_params conv_params pop collect_bottoms add fn coord_map compose coord_map_from_to items DESCRIPTOR batch_size str num_output get_pooling_types_dict add_edge get_edge_label Dot get_layer_label values choose_color_by_layertype Edge Node bottom append type layer add_node top data array diff shape BlobProto extend flat extend BlobProtoVector ParseFromString BlobProtoVector extend tostring shape Datum flat data len astype float32 tile zoom tuple resize fill empty array concatenate shape tile empty array LayerParameter NetParameter _to_proto extend Counter OrderedDict values iteritems hasattr isinstance extend add getattr setattr OrderedDict _blobs _blob_names zip OrderedDict _blob_loss_weights _blob_names zip OrderedDict list keys list keys iteritems layers index set outputs _forward len iteritems _backward layers inputs index set len iteritems asarray extend copy next _batch itervalues forward len iteritems izip_longest asarray backward extend copy next _batch itervalues zip forward len ascontiguousarray concatenate itervalues zeros next range len data Pooling pool Convolution NetSpec Deconvolution conv Input NamedTemporaryFile str close write data Pooling pool1 conv2 pool2 ip1 relu1 SoftmaxWithLoss Convolution NetSpec DummyData ip2 ReLU InnerProduct label conv1 Pooling SoftmaxWithLoss Convolution DummyData ReLU InnerProduct data NetSpec DummyData Silence data2 error search add group clear compile compile compile SetOutputFormat SetCountingStyle SetFilters _Filters startswith IsErrorSuppressedByNolint _ShouldPrintError write IncrementErrorCount replace append Match group find startswith endswith range error FindNextMultiLineCommentEnd RemoveMultiLineCommentsFromRange FindNextMultiLineCommentStart rstrip find xrange len FindEndOfExpressionInLine xrange len FindStartOfExpressionInLine error min search I xrange len FileInfo RepositoryName sep sub ParseNolintSuppressions error startswith split GetHeaderGuardCPPVariable enumerate error enumerate error len error replace count error find error find error find error find error Search error match InnermostClass replace error escape Match Search error group Search Check error lines Count End group Begin xrange NumLines Match raw_lines Search error match group error Match group pop group append Search pop group append Search elided replace CheckSpacingForFunctionCall rfind error len group min CloseExpression NumLines sub xrange find CheckComment Match Search lines_without_raw_strings error group starting_linenum Match range Search error rfind len group ReverseCloseExpression Search Match CloseExpression find error Match CloseExpression find elided error strip group FindEndOfExpressionInLine xrange find Match CloseExpression len error Match finditer normalize isinstance GetLineWidth int InnermostClass CheckCheck error CheckAltTokens CheckBraces CheckSpacing CheckSectionSpacing CheckEmptyBlockBody CheckAccess GetHeaderGuardCPPVariable lines_without_raw_strings _DropCommonSuffixes RepositoryName match split CheckNextIncludeOrder CanonicalizeAlphabeticalOrder FileInfo error search group SetLastHeader match _ClassifyInclude Match pop end search set itervalues append M rstrip replace CheckCStyleCast error _GetTextInside CheckIncludeLine search group lstrip startswith Match ResetSection Search split rfind error group ReverseCloseExpression lstrip xrange findall Match Search ReplaceAll error Match Search endswith replace setdefault group search CleanseComments open FilesBelongToSameModule error search copy sub xrange NumLines FullName keys error search CheckPosixThreading ParseNolintSuppressions CheckVlogArguments CheckMakePairUsesDeduction CheckCaffeDataLayerSetUp CheckLanguage CheckInvalidIncrement CheckCaffeRandom CheckForNonConstReference check_fn Update CheckForNonStandardConstructs CheckStyle raw_lines CheckForMultilineCommentsAndStrings CheckCaffeAlternatives CheckForFunctionLengths CleansedLines _NestingState CheckForBadCharacters CheckForNewlineAtEOF _IncludeState NumLines RemoveMultiLineComments CheckForCopyright ResetNolintSuppressions CheckForHeaderGuard xrange CheckCompletedBlocks CheckForIncludeWhatYouUse ProcessLine _FunctionState Error rstrip endswith len write ProcessFileData _SetVerboseLevel range split write exit join write exit _VerboseLevel int getopt _SetOutputFormat set _SetVerboseLevel PrintCategories _SetFilters _OutputFormat PrintUsage _SetCountingStyle split getreader ParseArguments ResetErrorCounts stderr exit verbose_level PrintErrorCounts StreamReaderWriter ProcessFile getwriter int time write flush load join index int rfind datetime split getctime year strip extract_datetime_from_line get_start_time total_seconds strip write get_log_created_year close extract_datetime_from_line open pathsep pjoin exists split find_in_path iteritems pjoin pathsep dirname sep append _compile compiler_so dot array unique ctw1500 transpose log dtype exp astype shape zeros transpose zeros_like dtype astype zeros dtype astype zeros minimum maximum join EXP_DIR name abspath ROOT_DIR makedirs iteritems ndarray isinstance type array _merge_a_into_b literal_eval zip split append maximum minimum Polygon area shape xrange intersection append zeros len FG_FRACTION HAS_RPN empty _get_image_blob randint round array len minimum size choice append _get_bbox_regression_labels MAX_SIZE prep_im_for_blob PIXEL_MEANS xrange append im_list_to_blob imread len zeros BBOX_INSIDE_WEIGHTS AGNOSTIC shape roidb _image_index xrange image_path_at len EPS BBOX_NORMALIZE_STDS BBOX_NORMALIZE_MEANS BBOX_NORMALIZE_TARGETS_PRECOMPUTED sqrt xrange tile zeros array len bbox_transform ascontiguousarray zeros argmax bbox_overlaps transpose xrange zeros max len min astype float32 shape resize float max min astype float32 SCALES shape resize float max _get_image_blob clip_boxes reshape astype float32 _get_blobs copy bbox_transform_inv shape info_syn_transform_inv_h info_syn_transform_inv_w forward array minimum line waitKey imshow xrange resize Polygon print delete xrange append exit print_help insert | # SCUT-CTW1500 Datasets **We have updated annotations for both train and test set.** Train: 1000 images [[images]](https://universityofadelaide.box.com/shared/static/py5uwlfyyytbb2pxzq9czvu6fuqbjdh8.zip)[[annos]](https://universityofadelaide.box.com/shared/static/jikuazluzyj4lq6umzei7m2ppmt3afyw.zip) Additional point annotation for each character is included. Example can be referred to [here](https://github.com/Yuliang-Liu/Curve-Text-Detector/tree/master/data). ``` wget -O train_images.zip https://universityofadelaide.box.com/shared/static/py5uwlfyyytbb2pxzq9czvu6fuqbjdh8.zip wget -O train_labels.zip https://universityofadelaide.box.com/shared/static/jikuazluzyj4lq6umzei7m2ppmt3afyw.zip ``` Test: 500 images [[images]](https://universityofadelaide.box.com/shared/static/t4w48ofnqkdw7jyc4t11nsukoeqk9c3d.zip)[[annos]](https://cloudstor.aarnet.edu.au/plus/s/uoeFl0pCN9BOCN5) ``` | 1,192 |
Yuliang-Liu/bezier_curve_text_spotting | ['scene text detection', 'text spotting'] | ['ABCNet: Real-time Scene Text Spotting with Adaptive Bezier-Curve Network'] | maskrcnn_benchmark/layers/balanced_l1_loss.py maskrcnn_benchmark/modeling/make_layers.py maskrcnn_benchmark/modeling/rpn/loss.py maskrcnn_benchmark/layers/roi_align.py maskrcnn_benchmark/engine/searcher.py maskrcnn_benchmark/utils/model_zoo.py maskrcnn_benchmark/solver/__init__.py maskrcnn_benchmark/layers/nms.py tests/test_feature_extractors.py tests/test_detectors.py tests/test_box_coder.py maskrcnn_benchmark/modeling/balanced_positive_negative_sampler.py maskrcnn_benchmark/layers/scale.py maskrcnn_benchmark/utils/imports.py tests/test_rpn_heads.py maskrcnn_benchmark/data/samplers/distributed.py maskrcnn_benchmark/modeling/backbone/fbnet.py maskrcnn_benchmark/modeling/one_stage_head/align/align.py maskrcnn_benchmark/utils/env.py tests/checkpoint.py maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py maskrcnn_benchmark/layers/iou_loss.py maskrcnn_benchmark/layers/_utils.py maskrcnn_benchmark/layers/dcn/__init__.py maskrcnn_benchmark/layers/dcn/deform_conv_func.py maskrcnn_benchmark/modeling/detector/__init__.py maskrcnn_benchmark/utils/metric_logger.py maskrcnn_benchmark/modeling/rpn/utils.py maskrcnn_benchmark/modeling/backbone/__init__.py maskrcnn_benchmark/modeling/rpn/__init__.py maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_feature_extractors.py maskrcnn_benchmark/structures/bounding_box.py setup.py maskrcnn_benchmark/data/datasets/evaluation/word/word_eval.py maskrcnn_benchmark/data/samplers/grouped_batch_sampler.py maskrcnn_benchmark/modeling/rpn/retinanet/inference.py maskrcnn_benchmark/modeling/backbone/fbnet_builder.py maskrcnn_benchmark/utils/comm.py maskrcnn_benchmark/layers/__init__.py maskrcnn_benchmark/layers/context_block.py maskrcnn_benchmark/layers/dcn/deform_pool_func.py maskrcnn_benchmark/structures/segmentation_mask.py maskrcnn_benchmark/modeling/rpn/fcos/loss.py maskrcnn_benchmark/data/datasets/coco.py maskrcnn_benchmark/data/datasets/evaluation/__init__.py maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py maskrcnn_benchmark/modeling/poolers.py maskrcnn_benchmark/layers/misc.py maskrcnn_benchmark/modeling/rpn/fcos/inference.py maskrcnn_benchmark/layers/bezier_align.py maskrcnn_benchmark/modeling/backbone/pan.py maskrcnn_benchmark/data/transforms/__init__.py maskrcnn_benchmark/modeling/rpn/rpn.py maskrcnn_benchmark/utils/cv2_util.py maskrcnn_benchmark/utils/timer.py maskrcnn_benchmark/layers/smooth_l1_loss.py maskrcnn_benchmark/data/datasets/word_dataset.py maskrcnn_benchmark/utils/miscellaneous.py maskrcnn_benchmark/modeling/one_stage_head/__init__.py maskrcnn_benchmark/modeling/backbone/resnet_layers.py maskrcnn_benchmark/modeling/rpn/retinanet/loss.py tools/tests/single_demo_bezier.py tests/test_backbones.py tools/train_net.py maskrcnn_benchmark/layers/dcn/deform_conv_module.py maskrcnn_benchmark/data/build.py maskrcnn_benchmark/modeling/backbone/hnasnet.py tests/test_metric_logger.py maskrcnn_benchmark/modeling/backbone/resnet.py maskrcnn_benchmark/modeling/roi_heads/mask_head/inference.py tests/test_predictors.py maskrcnn_benchmark/modeling/roi_heads/mask_head/loss.py maskrcnn_benchmark/utils/collect_env.py maskrcnn_benchmark/utils/logger.py maskrcnn_benchmark/layers/batch_norm.py maskrcnn_benchmark/modeling/backbone/necks.py maskrcnn_benchmark/data/datasets/concat_dataset.py maskrcnn_benchmark/utils/checkpoint.py maskrcnn_benchmark/structures/image_list.py tests/test_segmentation_mask.py maskrcnn_benchmark/data/datasets/evaluation/word/__init__.py maskrcnn_benchmark/data/collate_batch.py maskrcnn_benchmark/modeling/roi_heads/mask_head/mask_head.py maskrcnn_benchmark/modeling/utils.py maskrcnn_benchmark/modeling/roi_heads/roi_heads.py maskrcnn_benchmark/engine/__init__.py maskrcnn_benchmark/modeling/registry.py maskrcnn_benchmark/modeling/detector/generalized_rcnn.py maskrcnn_benchmark/modeling/rpn/fcos/predictors.py maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_predictors.py maskrcnn_benchmark/layers/non_local.py maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py maskrcnn_benchmark/layers/seg_loss.py maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py maskrcnn_benchmark/data/samplers/__init__.py maskrcnn_benchmark/config/paths_catalog.py maskrcnn_benchmark/utils/measure.py maskrcnn_benchmark/engine/bbox_aug.py maskrcnn_benchmark/modeling/box_coder.py maskrcnn_benchmark/modeling/detector/one_stage.py demo/predictor.py tests/test_fbnet.py maskrcnn_benchmark/data/transforms/transforms.py tests/env_tests/env.py tools/test_net.py maskrcnn_benchmark/modeling/backbone/backbone.py maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py demo/vis_bezier.py maskrcnn_benchmark/modeling/backbone/resnet_bn.py maskrcnn_benchmark/structures/boxlist_ops.py tests/test_nms.py maskrcnn_benchmark/__init__.py maskrcnn_benchmark/modeling/one_stage_head/one_stage_head.py tests/utils.py maskrcnn_benchmark/data/datasets/rec.py maskrcnn_benchmark/config/defaults.py maskrcnn_benchmark/layers/sigmoid_focal_loss.py maskrcnn_benchmark/utils/registry.py tests/test_data_samplers.py maskrcnn_benchmark/modeling/rpn/anchor_generator.py maskrcnn_benchmark/modeling/backbone/msr.py maskrcnn_benchmark/data/datasets/__init__.py maskrcnn_benchmark/data/__init__.py maskrcnn_benchmark/data/datasets/bezier.py maskrcnn_benchmark/solver/lr_scheduler.py maskrcnn_benchmark/utils/model_serialization.py tests/test_configs.py maskrcnn_benchmark/modeling/backbone/fpn.py maskrcnn_benchmark/modeling/rpn/fcos/fcos.py maskrcnn_benchmark/utils/c2_model_loading.py maskrcnn_benchmark/modeling/roi_heads/box_head/inference.py maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py maskrcnn_benchmark/config/__init__.py maskrcnn_benchmark/modeling/backbone/mobilenet.py maskrcnn_benchmark/modeling/rpn/inference.py maskrcnn_benchmark/solver/build.py maskrcnn_benchmark/modeling/matcher.py maskrcnn_benchmark/modeling/detector/detectors.py maskrcnn_benchmark/modeling/rpn/retinanet/retinanet.py maskrcnn_benchmark/layers/roi_pool.py maskrcnn_benchmark/engine/inference.py maskrcnn_benchmark/data/datasets/list_dataset.py maskrcnn_benchmark/engine/trainer.py maskrcnn_benchmark/layers/dcn/deform_pool_module.py maskrcnn_benchmark/data/transforms/build.py get_extensions Resize vis_offsets COCODemo main DatasetCatalog ModelCatalog make_data_sampler _quantize make_data_loader make_batch_data_sampler build_dataset _compute_aspect_ratios BatchCollator BBoxAugCollator BEZIER COCODataset _has_only_empty_bbox has_valid_annotation _count_visible_keypoints ConcatDataset ListDataset REC WordDataset evaluate py_cpu_pnms prepare_for_bezier COCOResults check_expected_results prepare_for_coco_segmentation evaluate_predictions_on_coco bezier_to_poly contour_to_xys esd_pnms do_coco_evaluation mask_to_roRect ke_to_quad evaluate_box_proposals prepare_for_coco_detection word_evaluation DistributedSampler GroupedBatchSampler IterationBasedBatchSampler build_transforms Compose ToTensor RandomVerticalFlip Resize RandomCrop Normalize RandomHorizontalFlip RandomCropExpand ColorJitter im_detect_bbox_aug im_detect_bbox_scale im_detect_bbox_hflip im_detect_bbox compute_on_dataset inference _accumulate_predictions_from_multiple_gpus do_train reduce_loss_dict do_train reduce_loss_dict BalancedL1Loss weighted_balanced_l1_loss balanced_l1_loss FrozenBatchNorm2d _BezierAlign BezierAlign last_zero_init ContextBlock kaiming_init constant_init IOULoss _NewEmptyTensorOp Conv2d DFConv2d interpolate BatchNorm2d ConvTranspose2d NonLocal2D ROIAlign _ROIAlign _ROIPool ROIPool Scale SegLoss SigmoidFocalLoss _SigmoidFocalLoss sigmoid_focal_loss_cpu smooth_l1_loss _load_C_extensions DeformConvFunction ModulatedDeformConvFunction ModulatedDeformConv DeformConv ModulatedDeformConvPack DeformRoIPoolingFunction DeformRoIPoolingPack ModulatedDeformRoIPoolingPack DeformRoIPooling BalancedPositiveNegativeSampler BoxCoder conv_with_kaiming_uniform make_conv3x3 get_group_gn make_fc group_norm Matcher make_pooler LevelMapper Pooler cat build_resnet_fpn_backbone build_resnet_50_antialiased_backbone build_resnet_50_bn_backbone build_resnet_pan_backbone build_resnet_backbone build_resnet_fpn_p3p7_backbone build_backbone build_hnasnet_backbone build_detnasnet_backbone build_mnv2_fpn_backbone build_detnasnet_fpn_backbone add_rpn_head add_roi_head_mask FBNetROIHead _get_rpn_stage FBNetRPNHead FBNetTrunk add_roi_head _get_head_stage _get_trunk_cfg create_builder add_conv_body add_roi_head_keypoints _get_divisible_by ConvBNRelu _expand_block_cfg FBNetBuilder CascadeConv3x3 get_blocks SEModule _add_to_arch IRFBlock Shift expand_stages_cfg expand_stage_cfg Identity ShiftBlock5x5 _py2_round get_num_stages unify_arch_def _get_upsample_op Upsample ShuffleV2Block _block_cfgs_to_list ChannelShuffle add_archs LastLevelMaxPool FPN Scaler LastLevelP6P7 LastLevelP6 DeepLabScaler HNASNet Scaler conv_1x1_bn InvertedResidual conv_bn MobileNetV2 MSR ConcatUpConv BFP build_empty_neck build_retina_neck build_neck build_libra_neck GAU PAN FPA StemWithGN ResNetHead _make_stage ResNet BottleneckWithGN Bottleneck StemWithFixedBatchNorm BottleneckWithFixedBatchNorm BaseStem ResNet resnet50 resnet152 resnet34 make_list resnet18 resnet101 conv1x1 Bottleneck conv3x3 batchnorm BasicBlock build_detection_model GeneralizedRCNN OneStage build_one_stage_head BidirectionalLSTM AlignHead ATTPredictor AlignModule build_align_head Attention CTCPredictor CRNN CombinedROIHeads build_roi_heads build_roi_box_head ROIBoxHead PostProcessor make_roi_box_post_processor make_roi_box_loss_evaluator FastRCNNLossComputation make_roi_box_feature_extractor FPNXconv1fcFeatureExtractor FPN2MLPFeatureExtractor ResNet50Conv5ROIFeatureExtractor FPNPredictor make_roi_box_predictor FastRCNNPredictor paste_mask_in_image expand_boxes Masker make_roi_mask_post_processor MaskPostProcessorCOCOFormat expand_masks MaskPostProcessor make_roi_mask_loss_evaluator MaskRCNNLossComputation project_masks_on_boxes keep_only_positive_boxes ROIMaskHead build_roi_mask_head MaskRCNNFPNFeatureExtractor make_roi_mask_feature_extractor MaskRCNNC4Predictor MaskRCNNConv1x1Predictor make_roi_mask_predictor AnchorGenerator generate_anchors _scale_enum _whctrs make_anchor_generator _ratio_enum make_anchor_generator_retinanet _generate_anchors BufferList _mkanchors make_rpn_postprocessor RPNPostProcessor RPNLossComputation generate_rpn_labels make_rpn_loss_evaluator build_rpn RPNHeadFeatureSingleConv RPNModule RPNHead RPNHeadConvRegressor concat_box_prediction_layers permute_and_flatten FCOSHead FCOSModule snv2_block build_fcos FCOSPostProcessor make_fcos_postprocessor make_fcos_loss_evaluator FCOSLossComputation make_offset_predictor SequentialPredictor PolarPredictor make_retinanet_postprocessor RetinaNetPostProcessor make_retinanet_loss_evaluator generate_retinanet_labels RetinaNetLossComputation build_retinanet RetinaNetHead RetinaNetModule make_optimizer make_search_lr_scheduler make_lr_scheduler OptimizerDict WarmupMultiStepLR WarmupPolynormialLR PolyCosineAnnealingLR BoxList cat_boxlist boxlist_iou boxlist_nms remove_small_boxes _cat ImageList to_image_list SegmentationMask PolygonList PolygonInstance BinaryMaskList _rename_basic_resnet_weights _rename_conv_weights_for_deformable_conv_layers load_resnet_c2_format load_c2_format _rename_weights_for_resnet _load_c2_pickled_weights _rename_fpn_weights DetectronCheckpointer Checkpointer collect_env_info get_pil_version synchronize get_world_size reduce_dict reduce_sum all_gather get_rank is_main_process findContours setup_environment setup_custom_environment import_file setup_logger get_layer_info is_pruned is_leaf get_layer_param measure_model measure_layer get_num_gen SmoothedValue MetricLogger mkdir save_labels save_config strip_prefix_if_present load_state_dict align_and_update_state_dicts cache_url _register_generic Registry get_time_str Timer TestCheckpointer TestBackbones TestBoxCoder TestConfigs SubsetSampler TestGroupedBatchSampler TestIterationBasedBatchSampler create_random_input create_model get_config_files _test_build_detectors _test_run_selected_detectors TestDetectors _test_primitive TestFBNetBuilder TestFeatureExtractors _test_feature_extractors TestMetricLogger TestNMS TestPredictors _test_predictors TestRPNHeads TestSegmentationMask load_config_from_file load_config get_config_root_path get_config_root_path main main train run_test Model get_size test glob join dirname abspath line size copy zip get_cmap enumerate PIXEL_MEAN run_on_opencv_image ArgumentParser make_data_loader opts tensor imshow permute freeze parse_args merge_from_file config_file ascontiguousarray merge_from_list enumerate uint8 add_argument COCODemo tqdm numpy get ConcatDataset getattr append factory SequentialSampler RandomSampler list sorted map copy get_img_info append float range len BatchSampler IterationBasedBatchSampler GroupedBatchSampler _quantize _compute_aspect_ratios import_file make_data_sampler save_labels getLogger IMS_PER_BATCH PATHS_CATALOG MAX_ITER get_world_size NUM_WORKERS DataLoader warning append make_batch_data_sampler build_dataset OUTPUT_DIR DatasetCatalog _has_only_empty_bbox dict __name__ isinstance WordDataset items join format COCOResults check_expected_results getLogger prepare_for_coco_segmentation prepare_for_kes item info save prepare_for_bezier evaluate_box_proposals prepare_for_coco_detection convert tolist extend resize enumerate get_valid_y reshape boxPoints get_valid_x int0 minAreaRect enumerate CHAIN_APPROX_NONE findContours copy contour_to_xys numpy RETR_CCOMP Masker tolist extend masker expand tqdm resize get_field append enumerate argmax paraToQuad_v3 int tuple tolist map extend dot reverse linspace int32 Polygon area shape intersection append zeros range len append py_cpu_pnms array NMS_THRESH append zip BEZIER size tolist PNMS extend tqdm esd_pnms resize get_field enumerate bbox bezier_to_poly arange zeros_like resize max boxlist_iou append sum loadAnns range cat getAnnIds mean float enumerate reshape sort convert min zeros as_tensor len print error format info getLogger BRIGHTNESS TO_BGR255 SATURATION VERTICAL_FLIP_PROB_TRAIN CONTRAST MIN_SIZE_TEST HUE Compose MIN_SIZE_TRAIN Normalize FLIP_PROB_TRAIN MAX_SIZE_TRAIN MAX_SIZE_TEST CROP_PROB_TRAIN range ColorJitter add_preds_t MIN_SIZE_TEST SCALE_H_FLIP filter_results MAX_SIZE_TEST len im_detect_bbox_scale append range cat add_field MAX_SIZE size im_detect_bbox_hflip im_detect_bbox make_roi_box_post_processor enumerate H_FLIP NUM_CLASSES BoxList SCALES mode SIZE_DIVISIBILITY Compose to_image_list model Compose to_image_list SIZE_DIVISIBILITY to im_detect_bbox im_detect_bbox_hflip update tqdm eval device enumerate update list sorted getLogger warning all_gather keys getLogger save Timer device dataset get_time_str _accumulate_predictions_from_multiple_gpus tic format synchronize total_time get_world_size info load join toc dict isfile compute_on_dataset len get_world_size getLogger model clip_grad_norm_ zero_grad save str MetricLogger to sum update format timedelta info reduce_loss_dict enumerate time genotype isinstance backward parameters global_avg train step len error any e where abs log get_enum balanced_l1_loss item weight constant_ bias kaiming_uniform_ bias weight kaiming_normal_ constant_ Sequential isinstance constant_init _output_size tuple dtype sigmoid unsqueeze device log abs where join glob extend dirname abspath EPSILON DIM_PER_GP NUM_GROUPS group_norm Conv2d bias normal_ kaiming_normal_ ReLU append weight constant_ kaiming_uniform_ bias weight constant_ Linear POOLER_RESOLUTION POOLER_SCALES POOLER_SAMPLING_RATIO Pooler ResNet BACKBONE_OUT_CHANNELS resnet50 BACKBONE_OUT_CHANNELS resnet50 BACKBONE_OUT_CHANNELS FPN Sequential OrderedDict FILTER_MULTIPLIER BACKBONE_OUT_CHANNELS NUM_BLOCKS HNASNet DetNASNet Scaler Sequential OrderedDict FILTER_MULTIPLIER BACKBONE_OUT_CHANNELS DetNASNet Scaler Sequential OrderedDict FILTER_MULTIPLIER BACKBONE_OUT_CHANNELS FPN MSR ResNet Sequential OrderedDict RES2_OUT_CHANNELS BACKBONE_OUT_CHANNELS MSR_ON FPN MSR ResNet Sequential OrderedDict RES2_OUT_CHANNELS BACKBONE_OUT_CHANNELS MSR_ON PAN FPN ResNet Sequential OrderedDict RES2_OUT_CHANNELS BACKBONE_OUT_CHANNELS return_features_num_channels FPN Sequential OrderedDict BACKBONE_OUT_CHANNELS MobileNetV2 get format FBNetBuilder SCALE_FACTOR WIDTH_DIVISOR DW_CONV_SKIP_BN unify_arch_def DW_CONV_SKIP_RELU loads ARCH_DEF BN_TYPE info ARCH get_num_stages get_blocks get range FBNetTrunk Sequential OrderedDict create_builder last_depth get format warn get_blocks range len create_builder FBNetRPNHead RPNHeadConvRegressor out_channels get get_blocks create_builder create_builder create_builder int Upsample append deepcopy range append expand_stage_cfg append expand_stage_cfg enumerate enumerate update deepcopy _block_cfgs_to_list _add_to_arch max append deepcopy NUM_LEVELS USE_DEFORMABLE IN_CHANNELS REFINE_TYPE REFINE_LEVEL USE_GN LastLevelP6P7 LAST_STRIDE RES2_OUT_CHANNELS LastLevelP6 BACKBONE_OUT_CHANNELS NUM_LEVELS get transformation_module range append isinstance ResNet cache_url load_state_dict ResNet cache_url load_state_dict ResNet cache_url load_state_dict ResNet cache_url load_state_dict ResNet cache_url load_state_dict MASK_ON CombinedROIHeads INST_ON RETINANET_ON KEYPOINT_ON append CLS_AGNOSTIC_BBOX_REG BoxCoder DETECTIONS_PER_IMG ENABLED PostProcessor BBOX_REG_WEIGHTS USE_FPN NMS SCORE_THRESH POSITIVE_FRACTION FG_IOU_THRESHOLD CLS_AGNOSTIC_BBOX_REG BATCH_SIZE_PER_IMAGE BoxCoder BalancedPositiveNegativeSampler BBOX_REG_WEIGHTS BG_IOU_THRESHOLD Matcher FastRCNNLossComputation zeros_like float new_zeros int uint8 float expand_masks min float32 expand interpolate zeros to max POSTPROCESS_MASKS POSTPROCESS_MASKS_THRESHOLD Masker MaskPostProcessor zip convert device resize append to crop get_mask_tensor FG_IOU_THRESHOLD MaskRCNNLossComputation BG_IOU_THRESHOLD Matcher RESOLUTION get_field squeeze append AnchorGenerator STRADDLE_THRESH ANCHOR_SIZES ANCHOR_STRIDE USE_FPN ASPECT_RATIOS AnchorGenerator STRADDLE_THRESH OCTAVE ANCHOR_SIZES tuple SCALES_PER_OCTAVE append float ASPECT_RATIOS ANCHOR_STRIDES range vstack _ratio_enum array hstack sqrt _whctrs round _mkanchors _whctrs _mkanchors NMS_THRESH FPN_POST_NMS_TOP_N_TRAIN POST_NMS_TOP_N_TRAIN RPNPostProcessor FPN_POST_NMS_PER_BATCH POST_NMS_TOP_N_TEST MIN_SIZE PRE_NMS_TOP_N_TRAIN FPN_POST_NMS_TOP_N_TEST PRE_NMS_TOP_N_TEST get_field POSITIVE_FRACTION FG_IOU_THRESHOLD RPNLossComputation BATCH_SIZE_PER_IMAGE BalancedPositiveNegativeSampler BG_IOU_THRESHOLD Matcher RETINANET_ON FCOS_ON reshape permute view permute_and_flatten reshape shape zip append FPN_POST_NMS_TOP_N_TRAIN NMS_TH DETECTIONS_PER_IMG FCOSPostProcessor INFERENCE_TH PRE_NMS_TOP_N_TRAIN PRE_NMS_TOP_N FCOSLossComputation PREDICTOR NMS_TH DETECTIONS_PER_IMG INFERENCE_TH PRE_NMS_TOP_N RetinaNetPostProcessor get_field FG_IOU_THRESHOLD RetinaNetLossComputation SigmoidFocalLoss LOSS_GAMMA BG_IOU_THRESHOLD Matcher LOSS_ALPHA format print WEIGHT_DECAY_BIAS Adam SGD named_parameters BASE_LR startswith DARTS_ON append BIAS_LR_FACTOR WEIGHT_DECAY convert _box_nms get_field bbox mode squeeze unbind bbox clamp min convert area max len add_field size set BoxList _cat fields mode int list isinstance tuple copy_ zero_ zip ceil Tensor enumerate max _rename_basic_resnet_weights sorted format getLogger OrderedDict from_numpy info keys _rename_fpn_weights sorted format replace getLogger match STAGE_WITH_DCN info keys enumerate CONV_BODY _rename_conv_weights_for_deformable_conv_layers replace _rename_weights_for_resnet _load_c2_pickled_weights get_pretty_env_info barrier get_world_size from_buffer dumps get_world_size loads zip append to max cat get_world_size all_reduce clone get_world_size startswith get setup_custom_environment setup_environment import_file spec_from_file_location exec_module module_from_spec setFormatter join getLogger addHandler StreamHandler Formatter DEBUG setLevel FileHandler mask str strip mul kernel_size reduce get_layer_param branch_2 conv out_channels numel groups relu padding size stride in_channels int norm get_layer_info condense_factor branch_1 modify_forward print restore_forward image_sizes forward makedirs update join categories format hasattr getLogger warning info __name__ is_main_process is_main_process max list sorted format view getLogger tuple tolist shape info keys enumerate len OrderedDict items sorted keys strip_prefix_if_present align_and_update_state_dicts state_dict join basename format replace synchronize write search group getenv path _download_url_to_file expanduser urlparse makedirs str timedelta glob join get_config_root_path deepcopy to freeze build_detection_model int SIZE_DIVISIBILITY rand MIN_SIZE_TRAIN to_image_list append to assertGreater get_config_files len assertGreater len format Size assertEqual op shape to get deepcopy items assertGreater format print fe assertEqual rand Size BoxList getattr builder assertIsNotNone load_config len get deepcopy items assertGreater format print fe rand builder load_config len join get_config_root_path merge_from_file deepcopy realpath join dirname abspath OUTPUT_DIR collect_env_info PREDICTOR set_device get_rank to inference KEYPOINT_ON TEST DEVICE format build_detection_model init_process_group synchronize setup_logger mkdir init info zip KE_ON load join DetectronCheckpointer local_rank len DEVICE make_optimizer initialize load build_detection_model update CHECKPOINT_PERIOD make_data_loader WEIGHT SYNCBN convert_sync_batchnorm DistributedDataParallel DetectronCheckpointer do_train device to OUTPUT_DIR make_lr_scheduler join zip TEST synchronize MASK_ON inference mkdir make_data_loader empty_cache OUTPUT_DIR module KEYPOINT_ON enumerate len save_config distributed run_test train max save resize cuda open fromarray get_size expand append size astype mean stack float enumerate uint8 ANTIALIAS backward print m array | # ABCNet **The full code has been released in the [Adet](https://github.com/aim-uofa/AdelaiDet/blob/master/configs/BAText/README.md), including the models of CTW1500 and Total-text, all the training data we used, evaluation scripts, results of detection, etc. More updates will also be conducted on [Adet](https://github.com/aim-uofa/AdelaiDet/blob/master/configs/BAText/README.md) as well. This repo will not be maintained anymore.** [Paper Link](https://arxiv.org/abs/2002.10200). # Run Demo Check [Installation](https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/INSTALL.md) for installation instructions. ``` bash vis_demo.sh ``` We assume that your symlinked `datasets/totaltext` directory has the following structure: ``` | 1,193 |
YumaKoizumi/ToyADMOS-dataset | ['anomaly detection'] | ['ToyADMOS: A Dataset of Miniature-Machine Operating Sounds for Anomalous Sound Detection'] | C01_create_small_INT_dataset/make_dataset_for_car_and_conveyor.py E01_simple_AE_test/Modules/Config.py E01_simple_AE_test/Modules/my_gpu_funcs.py E01_simple_AE_test/Modules/model_definition.py E01_simple_AE_test/Modules/my_modules.py E01_simple_AE_test/01_train.py C01_create_small_INT_dataset/make_dataset_for_train.py E01_simple_AE_test/02_test.py wavwrite make_dir wav_read_all load_and_cut_noise wavread wavwrite make_dir wav_read_all load_and_cut_noise wavread debug_draw evaluate_wav exe_one_set evaluate_wav load_config frame_concat FCN_AE chainer_fft_spectrogram exe_fft return_time_mat_xp concat_2_wavs feature_extraction loss_MMSE hz2mel wavwrite list_to_gpu_select_device wav_read_test wavread melFilterBank wav_read_trn list_to_gpu mel2hz print mkdir read astype float32 write around array glob print resample tqdm wavread append range len randint rand int len replace loss_MMSE exe_fft feature_extraction dnn_model update permutation cleargrads backward evaluate_wav concat_2_wavs zeros range unchain_backward len data subplot T show to_cpu plot imshow flipud xlim shape concat range zeros sqrt log matmul mean int arange reshape floor tile float len chainer_fft_spectrogram transpose astype float32 shape tile ifft fft concat transpose shape fliplr hstack append to_gpu range len append to_gpu range len permutation glob print tqdm wavread append range len glob print tqdm wavread append range len hz2mel arange hstack mel2hz astype float32 zeros sum range len | # ToyADMOS dataset ToyADMOS dataset is a machine operating sounds dataset of approximately 540 hours of normal machine operating sounds and over 12,000 samples of anomalous sounds collected with four microphones at a 48kHz sampling rate, prepared by Yuma Koizumi and members in NTT Media Intelligence Laboratories. The ToyADMOS dataset is designed for anomaly detection in machine operating sounds (ADMOS) research. We have collected normal and anomalous operating sounds of miniature machines by deliberately damaging their components. It is designed for three tasks of ADMOS: product inspection (toy car), fault diagnosis for fixed machine (toy conveyor), and fault diagnosis for moving machine (toy train). For more information, refer to the paper [1]. If you use the ToyADMOS dataset in your work, please cite this paper where it was introduced. >[1] Yuma Koizumi, Shoichiro Saito, Noboru Harada, Hisashi Uematsu and Keisuke Imoto, "ToyADMOS: A Dataset of Miniature-Machine Operating Sounds for Anomalous Sound Detection," in Proc of Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA), 2019. > Paper URL: https://arxiv.org/abs/1908.03299 ## Download: The dataset can be downloaded at https://zenodo.org/record/3351307#.XT-JZ-j7QdU. Since the total size of the ToyADMOS dataset is over 440GB, each sub-dataset is split into 7-9 files by 7-zip (7z-format). The total size of the compressed dataset is approximately 180GB, and that of each sub-dataset is approximately 60GB. Download the zip files corresponding to sub-datasets of interest and use your favorite compression tool to unzip these split zip files. ## Detailed description of dataset See the file named DETAIL.pdf ## Usage examples | 1,194 |
Yunbo426/predrnn-pp | ['video prediction'] | ['PredRNN++: Towards A Resolution of the Deep-in-Time Dilemma in Spatiotemporal Predictive Learning'] | train.py nets/models_factory.py layers/GradientHighwayUnit.py data_provider/datasets_factory.py data_provider/mnist.py utils/preprocess.py layers/CausalLSTMCell.py nets/predrnn_pp.py utils/metrics.py layers/TensorLayerNorm.py data_provider InputHandle CausalLSTMCell GHU tensor_layer_norm construct_model rnn batch_mae_frame_float batch_psnr reshape_patch reshape_patch_back begin InputHandle split get_shape batch_normalization ndims moments get_variable as_list str bool transpose ghu stack append cslstm range l2_loss sum float32 absolute size log10 int32 float sum reshape transpose reshape transpose | # PredRNN++
This is a TensorFlow implementation of [PredRNN++](https://arxiv.org/abs/1804.06300), a recurrent model for video prediction as described in the following paper:
**PredRNN++: Towards A Resolution of the Deep-in-Time Dilemma in Spatiotemporal Predictive Learning**, by Yunbo Wang, Zhifeng Gao, Mingsheng Long, Jianmin Wang and Philip S. Yu.
## Setup
Required python libraries: tensorflow (>=1.0) + opencv + numpy.\
Tested in ubuntu/centOS + nvidia titan X (Pascal) with cuda (>=8.0) and cudnn (>=5.0).
## Datasets
| 1,195 |
YunjiKim/Unsupervised-Keypoint-Learning-for-Guiding-Class-conditional-Video-Prediction | ['video prediction'] | ['Unsupervised Keypoint Learning for Guiding Class-Conditional Video Prediction'] | models/keypoint_model.py train.py data/__init__.py models/networks/__init__.py models/__init__.py models/motion_generator_model.py evaluate.py models/base_model.py models/networks/vgg.py models/networks/layers.py utils/__init__.py data/keypoint_dataloader.py data/image_pair_dataloader.py data/base_dataloader.py utils/model.py utils/data.py models/detector_translator_model.py utils/training.py models/final_model.py data/sequence_dataloader.py make_pseudo_labels.py _checkpoint_exist main _save_img_sequence _save_img main _save_output main _get_dataloader_by_mode _get_model_by_mode BaseDataLoader ImagePairDataLoader KeypointDataLoader SequenceDataLoader BaseModel DetectorTranslatorModel FinalModel KeypointModel MotionGeneratorModel to_coord conv lstm_model batch_norm Vgg19 translator image_encoder seq_discr pose_encoder vae_decoder img_discr encoder vae_encoder rotate_keypoints center_crop create_one_hot_label apply_random_filter generate_new_color color_distance get_n_colors get_random_color get_gaussian_maps colorize_point_maps get_coord get_n_iterations touch_dir load_config config add_argument ArgumentParser parse_args ConfigProto load_config fromarray uint8 squeeze astype save join touch_dir range _save_img join touch_dir join format save MultiRNNCell randint float size radians cos sin zeros min get_random_color range append generate_new_color range to_float exp reshape transpose square linspace to_float reshape reduce_sum reduce_mean softmax linspace mkdir | # Unsupervised Keypoint Learning <br/> for Guiding Class-Conditional Video Prediction An official implementation of the paper "Unsupervised Keypoint Learning for Guiding Class-Conditional Video Prediction", NeurIPS, 2019. [[paper](https://arxiv.org/abs/1910.02027)] [[supp](https://papers.nips.cc/paper/8637-unsupervised-keypoint-learning-for-guiding-class-conditional-video-prediction-supplemental.zip)] <p align="left"> <img src='img/model_overview.png' width="860" title="Overview"> </p> ## I. Requirements - Linux - NVIDIA GeForce GTX 1080Ti - Tensorflow 1.12.0 - Python3 (>= 3.5.2) | 1,196 |
YunseokJANG/l2l-da | ['adversarial defense'] | ['Adversarial Defense via Learning to Generate Diverse Attacks'] | mister_ed/imagenet/examples/imagenet_logits.py mister_ed/imagenet/pretrainedmodels/utils.py mister_ed/utils/pytorch_ssim.py adv_defence/data_loader.py mister_ed/imagenet/pretrainedmodels/models/senet.py adv_defence/attacks.py mister_ed/adversarial_perturbations.py mister_ed/imagenet/pretrainedmodels/__init__.py mister_ed/imagenet/pretrainedmodels/datasets/utils.py evaluation_tinyimagenet.py mister_ed/imagenet/examples/voc2007_extract.py mister_ed/imagenet/pretrainedmodels/models/nasnet_mobile.py adv_defence/sync_batchnorm/unittest.py adv_defence/sync_batchnorm/replicate.py mister_ed/imagenet/pretrainedmodels/models/bninception.py mister_ed/cifar10/cifar_loader.py mister_ed/imagenet/pretrainedmodels/models/resnext_features/resnext101_64x4d_features.py mister_ed/main_sandbox.py mister_ed/imagenet/pretrainedmodels/models/inceptionresnetv2.py adv_defence/models.py mister_ed/adversarial_attacks.py evaluation_mnist.py adv_defence/trainer.py mister_ed/imagenet/pretrainedmodels/models/fbresnet/resnet152_load.py mister_ed/mnist/mnist_loader.py mister_ed/utils/image_utils.py mister_ed/scripts/advtrain.py mister_ed/utils/discretization.py mister_ed/imagenet/pretrainedmodels/models/cafferesnet.py mister_ed/imagenet/pretrainedmodels/models/resnext_features/__init__.py mister_ed/cifar10/wide_resnets.py mister_ed/st_sandbox.py main.py mister_ed/scripts/setup_cifar.py mister_ed/imagenet/pretrainedmodels/models/dpn.py mister_ed/imagenet/pretrainedmodels/models/fbresnet.py mister_ed/evaluation_mnist.py mister_ed/loss_functions.py mister_ed/cifar10/cifar_resnets.py mister_ed/utils/experiment_utils.py adv_defence/sync_batchnorm/batchnorm_reimpl.py mister_ed/bin-codes/bin_codes.py adv_defence/classifier_tester.py mister_ed/utils/pytorch_utils.py mister_ed/evaluation.py mister_ed/imagenet/pretrainedmodels/models/torchvision_models.py mister_ed/imagenet/pretrainedmodels/datasets/__init__.py mister_ed/imagenet/pretrainedmodels/datasets/voc.py mister_ed/imagenet/examples/visu_arch.py mister_ed/imagenet/pretrainedmodels/models/utils.py mister_ed/imagenet/examples/imagenet_eval.py adv_defence/config.py evaluation_cifar.py mister_ed/imagenet/pretrainedmodels/models/resnext_features/resnext101_32x4d_features.py mister_ed/adversarial_evaluation.py mister_ed/prebuilt_attacks.py mister_ed/utils/checkpoints.py mister_ed/prebuilt_loss_functions.py mister_ed/scripts/lpips_attack.py mister_ed/spatial_transformers.py adv_defence/utils.py mister_ed/config.py mister_ed/imagenet/imagenet_loader.py mister_ed/imagenet/pretrainedmodels/models/xception.py mister_ed/custom_lpips/custom_dist_model.py mister_ed/imagenet/pretrainedmodels/models/resnext.py mister_ed/custom_lpips/base_model.py adv_defence/helper.py adv_defence/sync_batchnorm/batchnorm.py adv_defence/sync_batchnorm/__init__.py tiny_imagenet_val_format.py mister_ed/imagenet/pretrainedmodels/models/inceptionv4.py mister_ed/imagenet/pretrainedmodels/models/wideresnet.py adv_defence/sync_batchnorm/comm.py mister_ed/imagenet/pretrainedmodels/models/nasnet.py mister_ed/adversarial_training.py mister_ed/imagenet/pretrainedmodels/version.py mister_ed/imagenet/pretrainedmodels/models/__init__.py mister_ed/imagenet/pretrainedmodels/models/vggm.py main main main main get_fgsm get_cw run_fgsm run_pgd _get_settings get_pgd run_cw ClassifierTester add_argument_group get_config str2bool get_loader Normalizer resnet20 ResNet LambdaLayer LeNet test load_classifier resnet56 _weights_init BasicBlock NoiseGenerator Classifier weights_init_normal UNetUp UNetDown Trainer model_name_generator get_time prepare_dirs_and_logger save_config _sum_ft convert_model patch_sync_batchnorm SynchronizedBatchNorm2d _unsqueeze_ft _SynchronizedBatchNorm SynchronizedBatchNorm1d SynchronizedBatchNorm3d BatchNorm2dReimpl SyncMaster FutureResult SlavePipe execute_replication_callbacks CallbackContext DataParallelWithCallback patch_replication_callback TorchTestCase FGSM PGD AdversarialAttack CarliniWagner IdentityEvaluation AdversarialEvaluation EvaluationResult PerturbationParameters DeltaAddition ParameterizedXformAdv ThreatModel AdversarialPerturbation SequentialPerturbation initialized AdversarialAttackParameters AdversarialTraining path_resolver main main SSIMRegularization PartialXentropy ReferenceRegularizer LpipsRegularization PerturbationNormLoss RegularizedLoss CombinedTransformerLoss L2Regularization SoftLInfRegularization RelaxedTransformerLoss PartialLoss IncorrectIndicator FullSpatialLpLoss CWLossF6 main_evaluation_script main_attack_script main_defense_script build_delta_pgd build_rot_trans_pgd build_delta_stadv_pgd build_stadv_pgd build_stadv_rot_trans_pgd build_delta_rot_trans_pgd build_delta_stadv_rot_trans_pgd build_delta_fgsm CWTransformerLoss CWL2Loss CWLInfLoss CWLpipsLoss PerceptualXentropy VanillaXentropy CWRelaxedTransformerLoss FullSpatial TranslationTransform RotationTransform AffineTransform ParameterizedTransformation PointScaleTransform SequentialSelector load_pretrained_cifar_resnet load_cifar_data load_pretrained_cifar_wide_resnet resnet110 resnet20 ResNet LambdaLayer resnet44 test resnet1202 resnet56 resnet32 _weights_init BasicBlock conv_init conv3x3 wide_basic Wide_ResNet BaseModel PNetLin normalize_tensor NetLinLayer DistModel alexnet normalizer_from_imagenet_model load_pretrained_imagenet load_imagenet_data main print_info save_activation ToRange255 LoadTransformImage ToSpaceBGR LoadImage TransformImage Identity Warp download_url AveragePrecisionMeter load_imagenet_classes find_images_classification read_object_labels_csv read_image_label write_object_labels_csv Voc2007Classification download_voc2007 read_object_labels bninception BNInception ResNet Bottleneck conv3x3 cafferesnet101 BasicBlock dpn68b pooling_factor AdaptiveAvgMaxPool2d DPN DualPathBlock InputBlock dpn68 dpn98 CatBnAct BnActConv2d dpn131 dpn107 dpn92 adaptive_avgmax_pool2d FBResNet fbresnet18 Bottleneck fbresnet101 conv3x3 fbresnet152 fbresnet34 fbresnet50 BasicBlock Block17 Block8 Mixed_6a Mixed_5b BasicConv2d InceptionResNetV2 inceptionresnetv2 Block35 Mixed_7a Mixed_4a Mixed_5a Reduction_B Inception_B BasicConv2d Inception_A Reduction_A Mixed_3a Inception_C inceptionv4 InceptionV4 NormalCell BranchSeparablesStem AvgPoolPad ReductionCell1 ReductionCell0 MaxPoolPad NASNetALarge SeparableConv2d BranchSeparables CellStem0 nasnetalarge FirstCell BranchSeparablesReduction CellStem1 NormalCell BranchSeparablesStem AvgPoolPad ReductionCell1 ReductionCell0 MaxPoolPad nasnetamobile SeparableConv2d BranchSeparables CellStem0 FirstCell BranchSeparablesReduction CellStem1 NASNetAMobile ResNeXt101_32x4d resnext101_32x4d ResNeXt101_64x4d resnext101_64x4d se_resnext50_32x4d senet154 SENet SEResNetBottleneck SEBottleneck SEResNeXtBottleneck initialize_pretrained_model Bottleneck se_resnet152 se_resnet50 se_resnext101_32x4d SEModule se_resnet101 vgg19 inceptionv3 load_pretrained resnet34 vgg11_bn resnet152 squeezenet1_1 vgg11 modify_alexnet modify_densenets vgg16 densenet161 modify_squeezenets vgg19_bn resnet101 resnet18 vgg13_bn resnet50 vgg16_bn vgg13 modify_vggs densenet169 densenet201 modify_resnets alexnet densenet121 squeezenet1_0 VGGM Lambda LambdaBase SpatialCrossMapLRN vggm wideresnet50 WideResNet define_model Block Xception xception SeparableConv2d ResNet resnet50 Bottleneck resnet152 conv3x3 resnet34 resnet18 BasicBlock resnet101 Lambda LambdaBase LambdaReduce LambdaMap Lambda LambdaBase LambdaReduce LambdaMap load_mnist_data validate_architecture build_pgd_linf_stadv_att build_attack_params build_fgsm_attack build_stadv_linf_attack build_pgd_linf_attack main build_full_attack validate_filenaming build_rotation_translation_attack build_attack_loss file_hash check_cifar_data_loaded load_cifar_classifiers load_state_dict_from_filename CustomDataSaver list_saved_epochs clear_experiment load_state_dict params_to_filename save_state_dict CustomDataLoader discretized_adversarial discretize_image flip_greedy_pixel flip_random_pixel level_sets_r2 l2_dist equidistant_points get_midpoint display_adversarial_notebook nchw_l2 show_images nhwc255_xform display_adversarial_2row create_window gaussian _ssim SSIM ssim fold_mask cuda_assert random_from_lp_ball tuple_setter torch_arctanh IdentityNormalize clamp_ref summed_lp_norm get_gpu_memory_map DifferentiableNormalize unset_global_gpu tanh_transform accuracy_int torch_argmin safe_var tanh_rescale random_linf_pertubation use_gpu tuple_getter checkpoint_incremental_array set_global_gpu batchwise_lp_project rough_gpu_estimate TrainingLogger batchwise_norm sizeof_fmt clamp_0_1_delta clip_0_1 AverageMeter accuracy safe_tensor torch_argmax DeltaAddition evaluate_ensemble IdentityNormalize CWLossF6 FGSM load_state_dict Classifier ThreatModel SoftLInfRegularization VanillaXentropy startswith keys load join items AdversarialAttackParameters AdversarialEvaluation EvaluationResult path PGD CarliniWagner seed get_sample_pdf_of_checkpoint manual_seed save_config Trainer test_classifier_with_l2lda_att need_samples ClassifierTester get_loader random_seed train prepare_dirs_and_logger PerturbationParameters _get_settings DeltaAddition ThreatModel VanillaXentropy attack normalizer PerturbationParameters _get_settings DeltaAddition ThreatModel VanillaXentropy attack normalizer PerturbationParameters _get_settings DeltaAddition ThreatModel attack append parse_known_args MNIST join print RandomRotation ToTensor Compose __len__ RandomCrop ImageFolder DataLoader RandomHorizontalFlip CIFAR10 append weight kaiming_normal_ __name__ list print parameters filter len load Normalizer resnet20 Sequential LeNet in_features Conv2d eval load_state_dict startswith cuda resnet18 AdaptiveAvgPool2d keys Linear data xavier_uniform_ normal_ zero_ weight __name__ f_pretrain f_classifier_name use_cross_entropy_for_g lr_gamma dataset g_z_dim g_method f_update_style train_g_iter append g_lr single_batch_size format g_mini_update_style g_ministep_size get_time f_lr dsgan_lambda g_use_grad g_normalize_grad model_name_generator setFormatter join basename log_dir f_classifier_name getLogger handlers addHandler data_dir StreamHandler Formatter removeHandler model_name dataset load_path makedirs print join model_name model_dir eps num_features affine isinstance named_children momentum running_mean add_module DataParallel DataParallelWithCallback zip running_var module sync_module detach list hasattr __data_parallel_replicate__ modules enumerate len replicate startswith flavor_blackbox pretty_printer architecture defence cuda open load_pretrained_cifar_wide_resnet load_pretrained_cifar_resnet epoch format load_cifar_data close write blackbox load_mnist_data LeNet DeltaAddition L2Regularization CWLossF6 FGSM display_adversarial_2row attack iter DifferentiableNormalize next load_pretrained_cifar_resnet load_cifar_data ThreatModel VanillaXentropy eval adversarial_tensors BIM print PGD CarliniWagner FGSM AdversarialAttackParameters AdversarialTraining load_cifar_data VanillaXentropy eval DifferentiableNormalize train load_pretrained_cifar_resnet CrossEntropyLoss FGSM load_cifar_data AdversarialAttackParameters BIM evaluate AdversarialEvaluation VanillaXentropy DifferentiableNormalize load_pretrained_cifar_resnet PerturbationParameters FGSM DeltaAddition AdversarialAttackParameters RegularizedLoss ThreatModel EvaluationResult VanillaXentropy CWLossF6 PerturbationParameters update DeltaAddition AdversarialAttackParameters RegularizedLoss Adam ThreatModel EvaluationResult VanillaXentropy PGD CWLossF6 PerturbationParameters update PartialXentropy ParameterizedXformAdv AdversarialAttackParameters PerturbationNormLoss RegularizedLoss Adam ThreatModel EvaluationResult PGD CWLossF6 PerturbationParameters update ParameterizedXformAdv AdversarialAttackParameters RegularizedLoss Adam ThreatModel EvaluationResult VanillaXentropy SequentialPerturbation PGD CWLossF6 PerturbationParameters update DeltaAddition ParameterizedXformAdv AdversarialAttackParameters RegularizedLoss Adam ThreatModel EvaluationResult VanillaXentropy SequentialPerturbation PGD CWLossF6 PerturbationParameters update PartialXentropy DeltaAddition ParameterizedXformAdv AdversarialAttackParameters PerturbationNormLoss RegularizedLoss Adam ThreatModel EvaluationResult SequentialPerturbation PGD CWLossF6 PerturbationParameters update PartialXentropy DeltaAddition ParameterizedXformAdv AdversarialAttackParameters PerturbationNormLoss RegularizedLoss Adam ThreatModel EvaluationResult SequentialPerturbation PGD CWLossF6 PerturbationParameters update PartialXentropy ParameterizedXformAdv AdversarialAttackParameters PerturbationNormLoss RegularizedLoss Adam ThreatModel EvaluationResult SequentialPerturbation PGD CWLossF6 load use_gpu join load_state_dict DifferentiableNormalize load join Wide_ResNet load_state_dict DifferentiableNormalize update use_gpu ToTensor Compose extend Normalize append constant xavier_uniform bias weight __name__ sqrt size sum view normalizer_from_imagenet_model use_gpu eval cuda mean hasattr std join use_gpu Compose ImageFolder Normalize append load_img model Variable print eval LoadImage TransformImage unsqueeze path_img tf_img arch parse_args max print size __name__ join format print save_image range __name__ append range len urlretrieve print dict join items read_image_label dict zeros range len print close print join join basename format print getcwd chdir extractall download_url close path open urlparse makedirs load_url load_state_dict BNInception load_url ResNet load_state_dict load_url DPN load_state_dict load_url DPN load_state_dict load_url DPN load_state_dict load_url DPN load_state_dict load_url DPN load_state_dict load_url DPN load_state_dict print max_pool2d avg_pool2d cat FBResNet FBResNet FBResNet FBResNet load_url FBResNet load_state_dict load_url InceptionResNetV2 load_state_dict Linear load_url load_state_dict Linear InceptionV4 in_features NASNetALarge load_url load_state_dict Linear load_url NASNetAMobile load_state_dict load_url ResNeXt101_32x4d load_state_dict load_url load_state_dict ResNeXt101_64x4d load_url load_state_dict initialize_pretrained_model SENet initialize_pretrained_model SENet initialize_pretrained_model SENet initialize_pretrained_model SENet initialize_pretrained_model SENet initialize_pretrained_model SENet load_url load_state_dict features setattr __class__ modify_alexnet load_pretrained __class__ setattr classifier modify_densenets load_pretrained load_pretrained modify_densenets load_pretrained modify_densenets modify_densenets load_pretrained fc load_pretrained __class__ setattr inception_v3 fc setattr __class__ modify_resnets load_pretrained modify_resnets load_pretrained modify_resnets load_pretrained modify_resnets load_pretrained modify_resnets load_pretrained setattr __class__ load_pretrained modify_squeezenets modify_squeezenets load_pretrained features setattr __class__ modify_vggs load_pretrained load_pretrained modify_vggs modify_vggs load_pretrained modify_vggs load_pretrained modify_vggs load_pretrained modify_vggs load_pretrained modify_vggs load_pretrained modify_vggs load_pretrained load_url VGGM load_state_dict load join sorted format items define_model print Variable system WideResNet from_numpy shape isfile expanduser load_url fc Xception load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict load_url ResNet load_state_dict update use_gpu Compose basename sub PerturbationParameters FGSM DeltaAddition AdversarialAttackParameters ThreatModel VanillaXentropy PerturbationParameters deepcopy DeltaAddition AdversarialAttackParameters ThreatModel VanillaXentropy PGD PerturbationParameters deepcopy DeltaAddition ParameterizedXformAdv AdversarialAttackParameters PerturbationNormLoss RegularizedLoss ThreatModel SequentialPerturbation PGD CWLossF6 PerturbationParameters deepcopy ParameterizedXformAdv AdversarialAttackParameters PerturbationNormLoss RegularizedLoss ThreatModel PGD CWLossF6 PerturbationParameters deepcopy DeltaAddition ParameterizedXformAdv AdversarialAttackParameters ThreatModel VanillaXentropy SequentialPerturbation PGD PerturbationParameters deepcopy DeltaAddition ParameterizedXformAdv AdversarialAttackParameters PerturbationNormLoss RegularizedLoss ThreatModel SequentialPerturbation PGD CWLossF6 int train_fxn validate_architecture build_attack_params sub train_from_checkpoint AdversarialTraining validate_filenaming CrossEntropyLoss print DEFAULT_DATASETS_DIR CIFAR10 sha256 join read MODEL_PATH print close add set urlopen makedirs join params_to_filename params_to_filename join basename isinstance glob select_epoch append valid_epoch join state_dict save params_to_filename len load join right_dict load_state_dict params_to_filename view shape stack unsqueeze zip append data tuple_setter sign next_pixel_to_flip unsqueeze forward add append safe_var range ne tuple_getter set float enumerate pop Variable clone safe_tensor discretize_image len print extend recursive_midpoint oracle append range len tuple l2_dist append sum range shape show list toimage transpose squeeze shape imshow figure save append zeros show list print transpose min sample imshow eval selector type append zeros forward max range cat enumerate pow sum dim range Tensor Variable contiguous unsqueeze pow conv2d create_window size type_as get_device cuda is_cuda is_available str unsetenv _TensorBase isinstance warn _TensorBase ndarray isinstance warn max numel view min numel view save concatenate ones type rand isinstance sum max transpose pow abs dim range unsqueeze renorm min expand type unsqueeze abs batchwise_norm isinstance zeros_like Variable add_ make_broadcastable type safe_tensor check_output get listprod size get_objects get_device t topk eq expand_as topk size t eq mul_ expand_as append sum max | YunseokJANG/l2l-da | 1,197 |
YunzhuLi/InfoGAIL | ['imitation learning'] | ['InfoGAIL: Interpretable Imitation Learning from Visual Demonstrations'] | wgail_info_0/utils.py wgail_info_0/wgail_info.py wgail_info_0/gym_torcs.py wgail_info_1/wgail_info.py wgail_info_1/utils.py wgail_info_0/drive.py wgail_info_1/snakeoil_gym.py wgail_info_1/gym_torcs.py wgail_info_1/models.py wgail_info_1/drive.py wgail_info_1/preprocess.py wgail_info_0/models.py wgail_info_1/behavior_clone.py wgail_info_0/snakeoil_gym.py wgail_info_0/behavior_clone.py wgail_info_0/preprocess.py TorcsEnv playGame TorcsEnv playGame seed load set_session learn TRPOAgent print load_weights TorcsEnv ConfigProto Session | # [InfoGAIL: Interpretable Imitation Learning from Visual Demonstrations](https://arxiv.org/abs/1703.08840) By Yunzhu Li, Jiaming Song, Stefano Ermon ### Introduction Modified codebase of TORCS, with the ability to extract dashboard views. InfoGAIL implementation, attached with two examples: *pass* & *turn*. ### Citing InfoGAIL If you find this codebase useful in your research, please consider citing: @article{li2017inferring, title={InfoGAIL: Interpretable Imitation Learning from Visual Demonstrations}, author={Li, Yunzhu and Song, Jiaming and Ermon, Stefano}, | 1,198 |
Yushi-Hu/Multilingual-AWE | ['word embeddings', 'dynamic time warping'] | ['Multilingual Jointly Trained Acoustic and Written Word Embeddings'] | multiview-babel-feature/code/optim/__init__.py multiview-babel-phone/code/layers/cnn.py multiview-babel-feature/code/layers/linear.py multiview-babel-feature/code/evaluate.py multiview-babel-phone/code/utils/stateful_dataset.py multiview-babel-phone/code/utils/config.py multiview-babel-feature/code/utils/saver.py multiview-babel-phone/code/layers/linear.py multiview-babel-feature/code/vocab.py multiview-babel-feature/code/optim/sgd.py multiview-babel-feature/code/utils/speech_utils.py multiview-babel-feature/code/sched/__init__.py multiview-babel-phone/code/sched/__init__.py multiview-babel-phone/code/utils/cache.py multiview-babel-phone/code/data.py multiview-babel-feature/code/sched/multistep_lr.py multiview-babel-phone/code/evaluate.py multiview-babel-phone/code/sched/revert_on_plateau.py multiview-babel-phone/code/layers/__init__.py multiview-babel-phone/code/optim/__init__.py multiview-babel-phone/code/sched/exponential_lr.py multiview-babel-phone/code/utils/speech_utils.py multiview-babel-feature/code/layers/cnn.py multiview-babel-phone/code/optim/adam.py multiview-babel-feature/code/metric.py multiview-babel-phone/code/sched/multistep_lr.py multiview-babel-phone/code/optim/sgd.py multiview-babel-feature/code/utils/stateful_dataset.py multiview-babel-phone/code/train.py multiview-babel-feature/code/layers/__init__.py multiview-babel-feature/code/layers/rnn.py multiview-babel-feature/code/net.py multiview-babel-phone/code/net.py multiview-babel-phone/code/sched/reduce_lr_on_plateau.py multiview-babel-feature/code/data.py multiview-babel-feature/code/sched/reduce_lr_on_plateau.py multiview-babel-phone/code/vocab.py multiview-babel-phone/code/loss.py multiview-babel-feature/code/sched/exponential_lr.py multiview-babel-feature/code/sched/revert_on_plateau.py multiview-babel-feature/code/train.py multiview-babel-phone/code/metric.py multiview-babel-feature/code/optim/adam.py multiview-babel-phone/code/layers/rnn.py multiview-babel-phone/code/utils/saver.py multiview-babel-feature/code/loss.py multiview-babel-feature/code/utils/config.py multiview-babel-feature/code/utils/cache.py MultilangDataset get_subwords_to_vectors DevMultilangDataset DevDataset combine_subwords_to_ids CountDict id_to_label get_xsampa_to_ipa Obj02 compute_ap compute_precision compute_recall compute_prb crossview_ap acoustic_ap MultiViewRNN_Phonetic MultiViewRNN Trainer Vocab CNN_1D Linear RNN_default Adam SGD ExponentialLR MultiStepLR ReduceLROnPlateau RevertOnPlateau Cache partial_fun NetSaver Saver TrainerSaver stack add_deltas MultilangStatefulBatchSampler StatefulDataset StatefulBatchSampler MultilangDataset DevMultilangDataset DevDataset combine_subwords_to_ids CountDict id_to_label get_xsampa_to_ipa Obj02 compute_ap compute_precision compute_recall compute_prb crossview_ap acoustic_ap MultiViewRNN Trainer Vocab CNN_1D Linear RNN_default Adam SGD ExponentialLR MultiStepLR ReduceLROnPlateau RevertOnPlateau Cache partial_fun NetSaver Saver TrainerSaver stack add_deltas MultilangStatefulBatchSampler StatefulDataset StatefulBatchSampler items list sorted append zeros keys len insert items CountDict items sum arange compute_precision compute_recall argmin abs max range len astype float32 float16 argsort pdist zeros range len cdist astype float32 float16 argsort zeros range rsplit import_module getattr pad shape range zeros roll | # Multilingual Acoustic Word Embeddings This is the code base for multilingual acoustic word embeddings (AWEs) and acoustically grounded word embeddings (AGWEs) in [Multilingual Jointly Trained Acoustic and Written Word Embeddings](https://arxiv.org/pdf/2006.14007.pdf) in INTERSPEECH 2020. ``` @inproceedings{hu2020multilingual, title={Multilingual Jointly Trained Acoustic and Written Word Embeddings}, author={Yushi Hu and Shane Settle and Karen Livescu}, year={2020}, booktitle=interspeech } ``` | 1,199 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.