repo
stringlengths
8
116
tasks
stringlengths
8
117
titles
stringlengths
17
302
dependencies
stringlengths
5
372k
readme
stringlengths
5
4.26k
__index_level_0__
int64
0
4.36k
HDI-Project/BTB
['automl']
['The Machine Learning Bazaar: Harnessing the ML Ecosystem for Effective System Development']
tests/selection/test_selector.py tests/selection/test_uniform.py btb/selection/custom_selector.py btb/tuning/__init__.py tests/selection/test_custom_selector.py tests/selection/test_recent.py tests/integration/test_tuning.py benchmark/btb_benchmark/tuning_functions/hyperopt.py btb/tuning/hyperparams/base.py btb/tuning/tuners/base.py tests/tuning/acquisition/test_expected_improvement.py btb/tuning/hyperparams/categorical.py btb/tuning/tuners/__init__.py benchmark/btb_benchmark/kubernetes.py benchmark/btb_benchmark/challenges/randomforest.py benchmark/btb_benchmark/__init__.py btb/selection/selector.py tests/tuning/metamodels/test_gaussian_process.py btb/tuning/metamodels/base.py benchmark/btb_benchmark/tuning_functions/__init__.py tests/tuning/hyperparams/test_boolean.py btb/tuning/tuners/gaussian_process.py tests/selection/test_best.py docs/conf.py benchmark/btb_benchmark/challenges/mlchallenge.py btb/tuning/acquisition/predicted_score.py benchmark/btb_benchmark/challenges/bohachevsky.py tests/selection/test_pure.py btb/selection/__init__.py benchmark/btb_benchmark/challenges/__init__.py benchmark/btb_benchmark/tuning_functions/smac.py tests/tuning/acquisition/test_predicted_score.py btb/__init__.py tests/tuning/hyperparams/test_numerical.py btb/tuning/acquisition/__init__.py benchmark/btb_benchmark/results.py benchmark/btb_benchmark/challenges/xgboost.py benchmark/btb_benchmark/challenges/datasets.py benchmark/btb_benchmark/challenges/branin.py btb/selection/pure.py btb/tuning/hyperparams/numerical.py btb/tuning/metamodels/__init__.py tests/tuning/hyperparams/test_base.py benchmark/setup.py benchmark/btb_benchmark/__main__.py btb/selection/ucb1.py tests/integration/test_session.py tests/selection/test_hierarchical.py btb/tuning/hyperparams/boolean.py btb/selection/uniform.py tests/test_session.py benchmark/btb_benchmark/main.py btb/selection/hierarchical.py btb/session.py tests/tuning/tuners/test_uniform.py benchmark/btb_benchmark/challenges/sgd.py btb/tuning/acquisition/expected_improvement.py tests/tuning/tuners/test_base.py tests/tuning/metamodels/test_base.py btb/selection/best.py benchmark/btb_benchmark/challenges/challenge.py btb/selection/recent.py tests/tuning/hyperparams/test_categorical.py benchmark/btb_benchmark/tuning_functions/ax.py btb/tuning/hyperparams/__init__.py benchmark/btb_benchmark/tuning_functions/skopt.py btb/tuning/metamodels/gaussian_process.py btb/tuning/tuners/uniform.py setup.py benchmark/btb_benchmark/challenges/rosenbrock.py tests/tuning/test_tunable.py benchmark/btb_benchmark/tuning_functions/btb.py tests/selection/test_ucb1.py tests/tuning/tuners/test_gaussian_process.py btb/tuning/acquisition/base.py btb/tuning/tunable.py github_dependency _get_extra_setup _upload_to_s3 _df_to_csv_str run_dask_function _import_function _generate_cluster_spec main _get_parser run_on_kubernetes _evaluate_tuners_on_challenge get_math_challenge_instance run_benchmark _as_list _challenges_as_list _get_tuners_dict summarize_results benchmark _evaluate_tuner_on_challenge _get_challenges_list progress LogProgressBar _get_all_challenge_names get_z_scores write_results load_results get_exclusive_wins get_wins add_sheet get_summary main _run _summary _get_parser Bohachevsky Branin Challenge get_dataset_names MLChallenge RandomForestChallenge Rosenbrock SGDChallenge XGBoostChallenge convert_hyperparameters adapt_scoring_function ax_optimize gcptuner make_btb_tuning_function gpeituner uniformtuner gcpeituner _tuning_function gptuner _search_space_from_dict _make_minimize_function _hyperopt_tuning_function hyperopt_tpe skopt_gp_hedge skopt_PI skopt_EI _make_minimize_function _skopt_tuning_function skopt_LCB _dimension_space_from_dict smac_smac4hpo_ei smac_smac4hpo_pi smac_smac4hpo_lcb _adapt_scoring_function _get_optimizer_params smac_hb4ac _smac_tuning_function _parse_params _create_config_space get_all_tuning_functions BTBSession BestKReward BestKVelocity CustomSelector HierarchicalByAlgorithm PureBestKVelocity RecentKVelocity RecentKReward Selector UCB1 Uniform Tunable BaseAcquisition ExpectedImprovementAcquisition PredictedScoreAcquisition BaseHyperParam BooleanHyperParam CategoricalHyperParam IntHyperParam FloatHyperParam NumericalHyperParam BaseMetaModel GaussianProcessMetaModel GaussianCopulaProcessMetaModel BaseTuner StopTuning BaseMetaModelTuner GPTuner GCPEiTuner GPEiTuner GCPTuner UniformTuner TestBTBSession BTBSessionTest test_tuning_minimize test_tuning TestBestKVelocity TestBestKReward TestCustomSelector TestHierarchicalByAlgorithm TestPureBestKVelocity TestRecentKVelocity TestRecentKReward TestSelector TestUCB1 TestUniform TestTunable assert_called_with_np_array TestExpectedImprovementAcquisition assert_called_with_np_array TestPredictedScoreAcquisition TestBaseHyperParam TestBooleanHyperParam assert_called_with_np_array TestCategoricalHyperParam TestIntHyperParam TestFloatHyperParam TestBaseMetaModel TestGaussianCopulaProcessMetaModel TestGaussianProcessMetaModel TestBaseMetaModelTuner TestBaseTuner TestGaussianProcessTuner TestGaussianCopulaProcessTuner TestGaussianCopulaProcessExpectedImprovementTuner TestGaussianProcessExpectedImprovementTuner TestUniformTuner join import_module split get join format append get _get_extra_setup join format dumps sub put_object client get from_dict isinstance adapt makedirs _upload_to_s3 to_csv Client get_versions dirname scale _import_function _generate_cluster_spec run CoreV1Api print create_namespaced_pod load_kube_config Configuration _generate_cluster_spec set_default add_argument ArgumentParser basicConfig print exit namespace create_pod verbose print_help run_dask_function _get_parser parse_args tabulate run_on_kubernetes tuner hasattr evaluate utcnow info get_tunable_hyperparameters append items _evaluate_tuner_on_challenge info getLogger LogProgressBar futures_of compute _evaluate_tuners_on_challenge from_records persist extend rename progress pivot get make_btb_tuning_function get_all_tuning_functions _as_list info callable any sample append challenge_class isinstance to_csv _get_tuners_dict benchmark info _get_challenges_list write_results load_results dict round replace sum mean std DataFrame items summary_function sort_values items columns reset_index set_column write to_excel append max enumerate len columns add_format ExcelWriter add_sheet get_summary save basicConfig tabulate detailed_output print ERROR CRITICAL run_benchmark output_path challenge_types iterations verbose challenges sample setLevel max_rows tuners input summarize_results output add_parser set_defaults add_subparsers filterwarnings action int items min append float max convert_hyperparameters adapt_scoring_function from_dict record tuner_class scoring_function propose max range items uniformint min choice uniform max _search_space_from_dict _make_minimize_function fmin Trials items list Real min Integer Categorical append max _make_minimize_function _dimension_space_from_dict gp_minimize get items bool CategoricalHyperparameter min ConfigurationSpace UniformFloatHyperparameter add_hyperparameter max UniformIntegerHyperparameter dict items update _create_config_space Scenario _adapt_scoring_function object inf Tunable record GPTuner random propose range Tunable record GPTuner random propose range assert_array_equal zip
<p align="left"> <img width="15%" src="https://dai.lids.mit.edu/wp-content/uploads/2018/06/Logo_DAI_highres.png" alt="BTB" /> <i>An open source project from Data to AI Lab at MIT.</i> </p> ![](https://raw.githubusercontent.com/MLBazaar/BTB/master/docs/images/BTB-Icon-small.png) A simple, extensible backend for developing auto-tuning systems. [![Development Status](https://img.shields.io/badge/Development%20Status-2%20--%20Pre--Alpha-yellow)](https://pypi.org/search/?c=Development+Status+%3A%3A+2+-+Pre-Alpha) [![PyPi Shield](https://img.shields.io/pypi/v/baytune.svg)](https://pypi.python.org/pypi/baytune) [![Travis CI Shield](https://travis-ci.com/MLBazaar/BTB.svg?branch=master)](https://travis-ci.com/MLBazaar/BTB) [![Coverage Status](https://codecov.io/gh/MLBazaar/BTB/branch/master/graph/badge.svg)](https://codecov.io/gh/MLBazaar/BTB)
400
HDI-Project/MLBlocks
['automl']
['The Machine Learning Bazaar: Harnessing the ML Ecosystem for Effective System Development']
docs/conf.py mlblocks/mlblock.py tests/features/test_fit_predicr_args.py tests/test_mlblock.py mlblocks/mlpipeline.py mlblocks/__init__.py mlblocks/discovery.py setup.py tests/features/test_pipeline_loading.py tests/test_mlpipeline.py tests/features/test_partial_outputs.py tests/test_discovery.py _load_json _find_annotations _search_annotations _load _load_entry_points find_pipelines add_primitives_path find_primitives get_primitives_paths get_pipelines_paths load_primitive _add_lookup_path load_pipeline _match add_pipelines_path MLBlock import_object MLPipeline test__load_primitive_success test__add_lookup_path_do_nothing test__load_primitive_value_error test__load_json_path test_add_pipelines_path test__match_no_match test_add_primitives_path test__find_annotations test__match_list test_find_primitives test__add_lookup_path test__load_entry_points_entry_points test__load_pipeline_value_error test__search_annotations test__load_success test__match_sublevel test_get_pipelines_paths test__match_multiple_keys test_get_primitives_paths test_find_pipelines test__match_dict test__add_lookup_path_exception test__load_entry_points_no_entry_points test__load_pipeline_success test__load_value_error test__match_root test__match_list_no_match dummy_function TestImportObject TestMLBlock DummyClass TestMLPipline get_mlblock_mock test_fit_predict_args_in_init TestPartialOutputs almost_equal TestMLPipeline insert abspath _add_lookup_path debug _add_lookup_path debug load iter_entry_points list isinstance extend append _load_entry_points join isfile split range len _load get_primitives_paths _load get_pipelines_paths update join isdir dict abspath listdir exists compile isinstance split update items list sorted _search_annotations dict loader values rsplit import_module isinstance _add_lookup_path uuid4 str _add_lookup_path abspath abspath add_primitives_path abspath add_pipelines_path _load_entry_points _load_entry_points EntryPoint get_primitives_paths assert_called_once_with get_pipelines_paths _load assert_called_once_with assert_called_once_with load_primitive assert_called_once_with assert_called_once_with load_pipeline _search_annotations join abspath _match _match _match _match _match _match _match assert_called_once_with Mock _find_annotations return_value find_primitives dict load_primitive assert_called_once_with assert_called_once_with find_pipelines load_pipeline return_value predict MLPipeline items assert_almost_equal isinstance
<p align="left"> <a href="https://dai.lids.mit.edu"> <img width=15% src="https://dai.lids.mit.edu/wp-content/uploads/2018/06/Logo_DAI_highres.png" alt="DAI-Lab" /> </a> <i>An Open Source Project from the <a href="https://dai.lids.mit.edu">Data to AI Lab, at MIT</a></i> </p> <p align="left"> <img width=20% src="https://dai.lids.mit.edu/wp-content/uploads/2018/06/mlblocks-icon.png" alt=“MLBlocks” /> </p> <p align="left">
401
HKUST-Aerial-Robotics/DenseSurfelMapping
['superpixels']
['Real-time Scalable Dense Surfel Mapping']
kitti_publisher/scripts/publisher.py
# DenseSurfelMapping **News: You can find the paper [here](https://www.dropbox.com/s/h9bais2wnw1g9f0/root.pdf?dl=0). If the project helps you in your paper, PLEASE cite it.** **News: we have updated VINS-Supported branch. The code is not fully checked after refactoring. If you encounted any problems, please let us know.** ## A depth map fusion method This is a depth map fusion method following the ICRA 2019 submission **Real-time Scalable Dense Surfel Mapping**, Kaixuan Wang, Fei Gao, and Shaojie Shen. Given a sequence of depth images, intensity images, and camera poses, the proposed methods can fuse them into a globally consistent model using surfel representation. The fusion method supports both [ORB-SLAM2](https://github.com/raulmur/ORB_SLAM2) and [VINS-Mono](https://github.com/HKUST-Aerial-Robotics/VINS-Mono) (a little modification is required) so that you can use it in RGB-D, stereo, or visual-inertial cases according to your setups. We develop the method based on the motivation that the fusion method: (1) can support loop closure (so that it can be consistent with other state-of-the-art SLAM methods), (2) do not require much CPU/memory resources to reconstruct a fine model in real-time, (3) can be scaled to large environments. These requirements are of vital importance in robot navigation tasks that the robot can safly navigate in the environment with odometry-consistent dense maps. An example to show the usage of the surfel mapping is shown below. <p align="center"> <img src="fig/example.png" alt="mapping example" width = "623" height = "300"> </p>
402
HKUST-Aerial-Robotics/MVDepthNet
['depth estimation', 'data augmentation']
['MVDepthNet: Real-time Multiview Depth Estimation Neural Network']
depthNet_model.py example.py example2.py visualize.py depthNet down_conv_layer depth_layer get_trainable_number up_conv_layer conv_layer refine_layer np2Depth np2Img shape list uint8 min astype moveaxis max COLORMAP_RAINBOW applyColorMap uint8 astype
# MVDepthNet ## A Real-time Multiview Depth Estimation Network This is an open source implementation for 3DV 2018 submission "MVDepthNet: real-time multiview depth estimation neural network" by Kaixuan Wang and Shaojie Shen. [arXiv link](https://arxiv.org/abs/1807.08563). If you find the project useful for your research, please cite: ``` @InProceedings{mvdepthnet, author = "K. Wang and S. Shen", title = "MVDepthNet: real-time multiview depth estimation neural network", booktitle = "International Conference on 3D Vision (3DV)",
403
HUJI-Deep/FlowKet
['variational monte carlo']
['Deep autoregressive models for the efficient variational simulation of many-body quantum systems']
src/flowket/layers/complex/initializers.py src/flowket/callbacks/__init__.py examples/debug_fast_sampling.py src/flowket/layers/transition_invariants.py src/flowket/machines/conv_net_autoregressive_2D.py examples/heisenberg_2d_horvod_multy_gpu_fast_sampling.py src/flowket/machines/__init__.py src/flowket/callbacks/exact/runtime_stats.py src/flowket/layers/complex/casting.py src/flowket/operators/j1j2.py examples/evaluate_with_2d_obc_invariant.py src/flowket/samplers/exact_sampler.py src/flowket/machines/ensemble.py src/flowket/machines/abstract_machine.py examples/basic_autoregressive_exact_gradient.py tests/test_variational.py src/flowket/observables/monte_carlo/sigma_z.py src/flowket/deepar/graph_analysis/layer_topology.py examples/benchmark_sampling.py src/flowket/deepar/ordering/__init__.py examples/rbm_heisenberg_1d_sr.py src/flowket/callbacks/monte_carlo/tensorboard_with_generator_validation_data.py src/flowket/deepar/graph_analysis/__init__.py src/flowket/machines/complex_values_simple_conv_net_autoregressive_1D.py src/flowket/layers/complex/histograms.py src/flowket/callbacks/monte_carlo/mcmc_stats.py examples/heisenberg_2d_keras_multy_gpu_fast_sampling.py setup.py src/flowket/optimizers/stochastic_reconfiguration/linear_equations.py src/flowket/evaluation/evaluate.py src/flowket/deepar/samplers/__init__.py src/flowket/layers/complex/dense.py examples/benchmark.py src/flowket/deepar/layers/padding.py src/flowket/optimization/__init__.py src/flowket/deepar/layers/one_hot.py src/flowket/layers/complex/base_layer.py src/flowket/machines/simple_conv_net_autoregressive_1D.py examples/basic_autoregressive_exact_gradient_multy_gpu.py src/flowket/callbacks/exact/machine_updated.py src/flowket/deepar/graph_analysis/reshape_topology.py src/flowket/utils/v1_to_v2.py src/flowket/operators/netket_operator.py tests/test_gradients_aggregation.py src/flowket/deepar/samplers/autoregressive.py src/flowket/operators/__init__.py src/flowket/optimization/mini_batch_generator.py src/flowket/deepar/graph_analysis/padding_topology.py src/flowket/optimization/exact_variational.py src/flowket/callbacks/tensorboard.py src/flowket/__init__.py experiments/train.py src/flowket/samplers/metropolis_hastings.py tests/test_graph_analysis.py src/flowket/callbacks/monte_carlo/__init__.py experiments/heisenberg_runner.py src/flowket/deepar/ordering/moves.py src/flowket/optimization/variational_monte_carlo.py tests/simple_models.py src/flowket/optimization/loss.py src/flowket/deepar/layers/wrappers.py tests/test_complex_values_optimizer.py src/flowket/deepar/ordering/zigzag.py src/flowket/callbacks/monte_carlo/observable.py src/flowket/exact/utils.py src/flowket/layers/complex/tensorflow_ops.py tests/test_samplers.py examples/j1j2_2d_monte_carlo_4.py src/flowket/deepar/ordering/flat.py src/flowket/optimizers/accumulate_gradient_optimizer.py src/flowket/optimizers/utils.py src/flowket/samplers/fast_autoregressive/__init__.py src/flowket/layers/dihedral_4_invariants.py src/flowket/deepar/graph_analysis/masking_topology.py src/flowket/operators/operator.py src/flowket/deepar/ordering/raster.py src/flowket/deepar/graph_analysis/gathering_topology.py src/flowket/deepar/graph_analysis/one_to_one_topology.py src/flowket/layers/spins_invariants.py src/flowket/deepar/utils/singleton.py src/flowket/main.py src/flowket/machines/rbm.py experiments/ising_runner.py src/flowket/deepar/graph_analysis/data_structures.py src/flowket/callbacks/monte_carlo/runtime_stats.py src/flowket/deepar/samplers/fast_autoregressive.py src/flowket/samplers/__init__.py src/flowket/deepar/layers/casting.py src/flowket/callbacks/checkpoint.py src/flowket/callbacks/exact/__init__.py src/flowket/deepar/layers/layer_normalization.py src/flowket/optimizers/stochastic_reconfiguration/optimizer.py examples/complex_ops_autoregressive_heisenberg_1d.py src/flowket/observables/monte_carlo/operator.py src/flowket/callbacks/exact/sigma_z.py src/flowket/utils/jacobian.py src/flowket/observables/monte_carlo/observable.py src/flowket/optimizers/complex_values_optimizer.py src/flowket/deepar/samplers/base_sampler.py src/flowket/operators/ising.py examples/basic_autoregressive_2d.py src/flowket/deepar/layers/autoregressive.py src/flowket/callbacks/monte_carlo/bad_eigen_state_stopping.py examples/j1j2_2d_exact_4.py tests/test_ensemble.py src/flowket/observables/monte_carlo/__init__.py tests/test_autoregressive.py src/flowket/operators/heisenberg.py examples/custom_keras_model.py src/flowket/optimization/horovod_variational_monte_carlo.py tests/test_gradient_per_example.py examples/basic_autoregressive_monte_carlo_gradient.py examples/ising.py src/flowket/deepar/graph_analysis/sampling_topology.py src/flowket/callbacks/monte_carlo/generator_iterator.py src/flowket/deepar/graph_analysis/concatenate_topology.py examples/custom_keras_model_with_validation_data.py src/flowket/deepar/utils/__init__.py src/flowket/callbacks/monte_carlo/local_energy_stats.py src/flowket/callbacks/exact/local_energy.py src/flowket/deepar/samplers/ensemble.py src/flowket/evaluation/__init__.py src/flowket/deepar/graph_analysis/convolutional_topology.py src/flowket/deepar/graph_analysis/dependency_graph.py src/flowket/deepar/layers/masking.py src/flowket/deepar/layers/gathering.py tests/conftest.py tests/test_tensorflow_complex_numbers_ops.py experiments/run_evaluation.py src/flowket/machines/simple_custom_autoregressive_ordering.py src/flowket/deepar/layers/lambda_with_one_to_one_topology.py src/flowket/callbacks/exact/observable.py src/flowket/optimizers/__init__.py src/flowket/layers/__init__.py src/flowket/layers/complex/conv.py tests/test_stochastic_reconfiguration.py src/flowket/deepar/graph_analysis/topology_manager.py src/flowket/deepar/layers/__init__.py define_args_parser run_pyket run_netket run sample build_model depth_to_max_mini_batch restore_run_state get_params run total_spin_netket_operator main main create_evaluation_config_parser run compile_model load_weights_if_exist build_model save_config create_training_config_parser init_horovod train to_valid_stages_config CheckpointByTime load_optimizer_weights save_optimizer_weights TensorBoard ExactLocalEnergy MachineUpdated ExactObservableCallback RuntimeStats ExactSigmaZ default_wave_function_callbacks_factory BadEigenStateStopping GeneratorIterator LocalEnergyStats MCMCStats ObservableStats RuntimeStats TensorBoardWithGeneratorValidationData default_wave_function_stats_callbacks_factory ConcatenateTopology ConvolutionalTopology DependencyGraph assert_valid_probabilistic_model visit_layer_predecessors GatherTopology LayerTopology RightShiftTopology DownShiftTopology OneHotTopologyWithIdentity OneToOneTopologyWithIdentity OneToOneTopology PeriodicPaddingTopology PaddingTopology from_flat_index_to_spatial_location to_flat_spatial_location ReshapeTopology CategorialSamplingTopology PlusMinusOneSamplingTopology TopologyManager CombineAutoregressiveConditionals normalize_in_log_space NormalizeInLogSpace combine_autoregressive_conditionals CastingLayer GatherLayer LambdaWithOneToOneTopology LayerNormalization shift RightShiftLayer DownShiftLayer plus_minus_one_to_one_hot PlusMinusOneToOneHot ToOneHot to_one_hot ExpandInputDim PeriodicPadding CopyNormaInitializer WeightNormalization to_flat_ordering to_flat_inverse_ordering up right down_right down up_right left up_left down_left raster zigzag AutoregressiveSampler Sampler Ensemble FastAutoregressiveSampler Singleton mean_logs evaluate exact_evaluate to_log_wave_function_vector decimal_to_binary decimal_array_to_binary_array complex_norm_log_fsum_exp vector_to_machine binary_array_to_decimal_array fsum binary_to_decimal netket_vector_to_exact_variational_vector log_fsum_exp FlipLeftRight Rot90 EqualUpDownSpins FlipSpins equal_up_down_spins_function Roll ComplexLayer VectorToComplexNumber _ComplexConv ComplexConv2D ComplexConv3D normalize_padding ComplexConv1D normalize_tuple ComplexDense TranslationInvariantComplexDense LogSpaceComplexNumberHistograms get _RealPartInitializer random_rayleigh NegateDecorator FromRealValueInitializers StandartComplexValueInitializer ConjugateDecorator ComplexValueInitializer to_int_shape _ImagPartInitializer crelu angle keras_conv_to_complex_conv float_norm lncosh extract_complex_image_patches conv2d_complex complex_log keras_conditional_wave_functions_to_wave_function AutoregressiveWrapper AutoNormalizedAutoregressiveMachine Machine AutoregressiveMachine ComplexValuesSimpleConvNetAutoregressive1D causal_conv_1d ConvNetAutoregressive2D make_pbc_invariants build_ensemble make_2d_obc_invariants average_ensemble_op make_up_down_invariant probabilistic_ensemble_op build_symmetrization_ensemble RBMBase RBM RBMSym SimpleConvNetAutoregressive1D causal_conv_1d SimpleCustomOrderingAutoregressive BaseObservable LambdaObservable Observable get_flat_local_connections_log_values sigma_z abs_sigma_z Heisenberg HeisenbergFindConn Ising j1j2_two_dim_operator j1j2_two_dim_netket_operator NetketOperatorWrapper Operator cube_shape OperatorOnGrid ExactVariational ExactObservable HorovodVariationalMonteCarlo loss_for_energy_minimization MiniBatchGenerator VariationalMonteCarlo convert_to_accumulate_gradient_optimizer ComplexValuesOptimizer get_model_real_weights get_model_imag_weights to_complex_tensors get_model_weights_for_complex_value_params_gradient forward_mode_gradients tensors_to_matrix tensors_to_column column_to_tensors conjugate_gradient ComplexValuesStochasticReconfiguration ExactSampler WaveFunctionSampler MetropolisHastingsSampler MetropolisHastingsExchange MetropolisHastingsUniform sum_correlations MetropolisHastingsLocal MetropolisHastingsSymmetricProposal MetropolisHastingsHamiltonian MetropolisHastingsGlobal _build_autoregressive_sampler ConvJacobian gradient_per_example LayerJacobian complex_values_jacobians_to_real_parts to_4d_shape JacobianManager predictions_jacobian DenseJacobian fix_tensorflow_v1_names clear_session_after_test netket complex_values_1d_model real_values_1d_model complex_values_linear_1d_model LinearDepthTwo real_values_2d_model complex_values_2d_model Linear test_autoregressive_have_normalize_distribution test_apply_complex_gradient test_get_predictions_jacobian test_get_complex_value_gradients transform_sample roll_sample test_make_2d_obc_invariants test_make_pbc_invariants test_build_symmetrization_ensemble test_update_ema_just_when_need test_changing_the_update_frequency test_reset_after_update test_update_just_when_need get_simple_linear_model test_ema test_equal_to_builtin_jacobian get_layer_output_with_topology_manager test_apply_layer_for_single_spatial_location test_sampler_by_l1 IdentityOperator sampler_factory test_stochastic_reconfiguration_matrix_vector_product_via_jvp pinv test_compute_wave_function_gradient_covariance_inverse_multiplication test_lncosh_gradient test_lncosh reduce_variance test_monte_carlo_update_unbalanced_local_energy test_monte_carlo_and_netket_agree test_exact_and_monte_carlo_agree to_generator time fast_jacobian batch_size SGD fit_generator MetropolisHastingsHamiltonian Model use_stochastic_reconfiguration ComplexValuesStochasticReconfiguration VariationalMonteCarlo summary depth Heisenberg Input range compile Hypercube Vmc time init_random_parameters run Sgd depth Heisenberg MetropolisHamiltonian Spin FFNN add_argument ArgumentParser run_pyket run_netket pyket_on_cpu trange next ConvNetAutoregressive2D Adam Model summary FastAutoregressiveSampler Input convert_to_accumulate_gradient_optimizer compile items min load_weights load_optimizer_weights exists set_update_params_frequency save_weights depth Input depth_to_max_mini_batch make_2d_obc_invariants Ising width copy_with_new_batch_size build_model fit_generator restore_run_state VariationalMonteCarlo update_params_frequency zip enumerate to_generator learning_rate evaluate print min TensorBoardWithGeneratorValidationData make_up_down_invariant default_wave_function_stats_callbacks_factory add_argument ArgumentParser CustomGraph kron append range prod Spin create_evaluation_config_parser add_argument Heisenberg add_parser create_training_config_parser func ArgumentParser pbc parse_args set_defaults add_subparsers Ising mini_batch_size load_weights weights_path set_defaults add_argument ArgumentParser str set_session init ConfigProto local_rank Session convert_to_accumulate_gradient_optimizer Adam compile print load_weights load_optimizer_weights exists num_epoch len compile_model batch_size num_epoch set_update_params_frequency save_weights Input use_horovod load_weights_if_exist set_value make_2d_obc_invariants ceil copy_with_new_batch_size vmc_cls to_valid_stages_config build_model save_config mini_batch_size size default_wave_function_stats_callbacks_factory fit_generator update_params_frequency lr zip enumerate to_generator join int learning_rate evaluate print min TensorBoardWithGeneratorValidationData make_up_down_invariant init_horovod output_path vars hasattr output_path makedirs set_defaults add_argument ArgumentParser batch_get_value getattr optimizer _make_train_function set_weights inbound_nodes tensor_indices add set zip visitor inbound_layers graph topological_sort zip append reduce_logsumexp is_complex list reduce_sum shape range len slice shape int len zip to_flat_ordering enumerate len append prod move mean keys machine_updated on_batch_end on_epoch_end num_of_batch_until_full_cycle append range zeros range zeros range zeros range prod reshape max real max reshape num_of_states state_to_number hilbert HilbertIndex range zeros dtype one_hot cumsum reshape float32 shape cast int32 fill prod tuple int isinstance lower random_normal isinstance imag real assert_valid_probabilistic_model complex exp reduce_logsumexp zeros_like angle imag reduce_mean real log predictions_model build_ensemble make_2d_obc_invariants Input build_symmetrization_ensemble build_symmetrization_ensemble Conv1D conv_layer WeightNormalization reshape tuple shape prod range len tuple shape prod range len isinstance CustomGraph kron append range Spin prod len get_updates print get_gradients variable __get__ type gradients as_list isinstance reshape tuple shape append prod isinstance isinstance namedtuple range append imag real conj isinstance gradients jacobian complex_values_jacobians_to_real_parts real zip reduce_logsumexp abs multiply reduce_max reduce_sum floormod roll sqrt reduce_mean real reduce_min imag clear_session Input Linear first_conv_layer second_dense_layer first_dense_layer Dense second_conv_layer Conv2D Input ComplexConv2D second_dense_layer ComplexDense first_dense_layer second_conv_layer first_conv_layer Input Input range second_dense_layer first_dense_layer Dense second_conv_layer first_conv_layer Conv1D Input rot90 flip Model Input convert_to_accumulate_gradient_optimizer compile get_sgd_iteration SGD get_simple_linear_model get_w fit get_sgd_iteration SGD set_update_params_frequency get_simple_linear_model get_w fit get_w get_simple_linear_model SGD fit get_sgd_iteration SGD get_simple_linear_model get_w set_weights_ema fit SGD get_simple_linear_model get_w set_weights_ema fit get_spatial_dependency product get_layer_topology spatial_location slice apply_layer_for_single_spatial_location concat reshape stack append zeros len list issubclass partial isinstance product Model func svd dtype concat reduce_max boolean_mask cast diag constant float_norm cosh lncosh reset_default_graph log tanh constant gradients float_norm lncosh real reset_default_graph conj to_generator fit_generator Adam compile Hypercube get_weights Vmc complex_values_linear_1d_model ExactSampler NetketOperatorWrapper advance range flatten mean sqrt exact_evaluate Sgd zeros Heisenberg ExactVariational Spin FFNN
# FlowKet - A Python framework for variational Monte-Carlo simulations on top of Tensorflow FlowKet is our framework for running variational Monte-Carlo simulations of quantum many-body systems. It supports any Keras model for representing a parameterized unnormalized wave-function, e.g., Restricted Boltzman Machines and ConvNets, with real or complex-valued parameters. We have implemented a standard Markov-Chain Monte-Carlo (MCMC) energy gradient estimator for this general case, which can be used to approximate the ground state of a quantum system according to a given Hamiltonian. The neural-network-based approach for representing wave-fucntions was shown to be a promising method for solving the many-body problem, often matching or even surpassing the precision of other competing methods. In addition to an MCMC energy gradient estimator, we have also implemented our novel Neural Autoregressive Quantum State wave-function representation that supports efficient and exact sampling. By overcoming the reliance on MCMC, our models can converge much faster for models of same size, which allows us to scale them to millions of parameters, as opposed to just a few thousands for prior approaches. This leads to better precison and ability to invesitgate larger and more intricated systems. Please [read our paper](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.124.020503) ([arXiv version](https://arxiv.org/abs/1902.04057)), cited below, for further details on this approach. We hope that users of our library will be able to take our method and apply to a variety of problems. If you use this codebase or apply our method, we would appreciate if you cite us as follows: ```bibtex @article{PhysRevLett.124.020503, title = {Deep Autoregressive Models for the Efficient Variational Simulation of Many-Body Quantum Systems}, author = {Sharir, Or and Levine, Yoav and Wies, Noam and Carleo, Giuseppe and Shashua, Amnon}, journal = {Phys. Rev. Lett.}, volume = {124}, issue = {2},
404
HUJI-Deep/PyKet
['variational monte carlo']
['Deep autoregressive models for the efficient variational simulation of many-body quantum systems']
src/flowket/layers/complex/initializers.py src/flowket/callbacks/__init__.py examples/debug_fast_sampling.py src/flowket/layers/transition_invariants.py src/flowket/machines/conv_net_autoregressive_2D.py examples/heisenberg_2d_horvod_multy_gpu_fast_sampling.py src/flowket/machines/__init__.py src/flowket/callbacks/exact/runtime_stats.py src/flowket/layers/complex/casting.py src/flowket/operators/j1j2.py examples/evaluate_with_2d_obc_invariant.py src/flowket/samplers/exact_sampler.py src/flowket/machines/ensemble.py src/flowket/machines/abstract_machine.py examples/basic_autoregressive_exact_gradient.py tests/test_variational.py src/flowket/observables/monte_carlo/sigma_z.py src/flowket/deepar/graph_analysis/layer_topology.py examples/benchmark_sampling.py src/flowket/deepar/ordering/__init__.py examples/rbm_heisenberg_1d_sr.py src/flowket/callbacks/monte_carlo/tensorboard_with_generator_validation_data.py src/flowket/deepar/graph_analysis/__init__.py src/flowket/machines/complex_values_simple_conv_net_autoregressive_1D.py src/flowket/layers/complex/histograms.py src/flowket/callbacks/monte_carlo/mcmc_stats.py examples/heisenberg_2d_keras_multy_gpu_fast_sampling.py setup.py src/flowket/optimizers/stochastic_reconfiguration/linear_equations.py src/flowket/evaluation/evaluate.py src/flowket/deepar/samplers/__init__.py src/flowket/layers/complex/dense.py examples/benchmark.py src/flowket/deepar/layers/padding.py src/flowket/optimization/__init__.py src/flowket/deepar/layers/one_hot.py src/flowket/layers/complex/base_layer.py src/flowket/machines/simple_conv_net_autoregressive_1D.py examples/basic_autoregressive_exact_gradient_multy_gpu.py src/flowket/callbacks/exact/machine_updated.py src/flowket/deepar/graph_analysis/reshape_topology.py src/flowket/utils/v1_to_v2.py src/flowket/operators/netket_operator.py tests/test_gradients_aggregation.py src/flowket/deepar/samplers/autoregressive.py src/flowket/operators/__init__.py src/flowket/optimization/mini_batch_generator.py src/flowket/deepar/graph_analysis/padding_topology.py src/flowket/optimization/exact_variational.py src/flowket/callbacks/tensorboard.py src/flowket/__init__.py experiments/train.py src/flowket/samplers/metropolis_hastings.py tests/test_graph_analysis.py src/flowket/callbacks/monte_carlo/__init__.py experiments/heisenberg_runner.py src/flowket/deepar/ordering/moves.py src/flowket/optimization/variational_monte_carlo.py tests/simple_models.py src/flowket/optimization/loss.py src/flowket/deepar/layers/wrappers.py tests/test_complex_values_optimizer.py src/flowket/deepar/ordering/zigzag.py src/flowket/callbacks/monte_carlo/observable.py src/flowket/exact/utils.py src/flowket/layers/complex/tensorflow_ops.py tests/test_samplers.py examples/j1j2_2d_monte_carlo_4.py src/flowket/deepar/ordering/flat.py src/flowket/optimizers/accumulate_gradient_optimizer.py src/flowket/optimizers/utils.py src/flowket/samplers/fast_autoregressive/__init__.py src/flowket/layers/dihedral_4_invariants.py src/flowket/deepar/graph_analysis/masking_topology.py src/flowket/operators/operator.py src/flowket/deepar/ordering/raster.py src/flowket/deepar/graph_analysis/gathering_topology.py src/flowket/deepar/graph_analysis/one_to_one_topology.py src/flowket/layers/spins_invariants.py src/flowket/deepar/utils/singleton.py src/flowket/main.py src/flowket/machines/rbm.py experiments/ising_runner.py src/flowket/deepar/graph_analysis/data_structures.py src/flowket/callbacks/monte_carlo/runtime_stats.py src/flowket/deepar/samplers/fast_autoregressive.py src/flowket/samplers/__init__.py src/flowket/deepar/layers/casting.py src/flowket/callbacks/checkpoint.py src/flowket/callbacks/exact/__init__.py src/flowket/deepar/layers/layer_normalization.py src/flowket/optimizers/stochastic_reconfiguration/optimizer.py examples/complex_ops_autoregressive_heisenberg_1d.py src/flowket/observables/monte_carlo/operator.py src/flowket/callbacks/exact/sigma_z.py src/flowket/utils/jacobian.py src/flowket/observables/monte_carlo/observable.py src/flowket/optimizers/complex_values_optimizer.py src/flowket/deepar/samplers/base_sampler.py src/flowket/operators/ising.py examples/basic_autoregressive_2d.py src/flowket/deepar/layers/autoregressive.py src/flowket/callbacks/monte_carlo/bad_eigen_state_stopping.py examples/j1j2_2d_exact_4.py tests/test_ensemble.py src/flowket/observables/monte_carlo/__init__.py tests/test_autoregressive.py src/flowket/operators/heisenberg.py examples/custom_keras_model.py src/flowket/optimization/horovod_variational_monte_carlo.py tests/test_gradient_per_example.py examples/basic_autoregressive_monte_carlo_gradient.py examples/ising.py src/flowket/deepar/graph_analysis/sampling_topology.py src/flowket/callbacks/monte_carlo/generator_iterator.py src/flowket/deepar/graph_analysis/concatenate_topology.py examples/custom_keras_model_with_validation_data.py src/flowket/deepar/utils/__init__.py src/flowket/callbacks/monte_carlo/local_energy_stats.py src/flowket/callbacks/exact/local_energy.py src/flowket/deepar/samplers/ensemble.py src/flowket/evaluation/__init__.py src/flowket/deepar/graph_analysis/convolutional_topology.py src/flowket/deepar/graph_analysis/dependency_graph.py src/flowket/deepar/layers/masking.py src/flowket/deepar/layers/gathering.py tests/conftest.py tests/test_tensorflow_complex_numbers_ops.py experiments/run_evaluation.py src/flowket/machines/simple_custom_autoregressive_ordering.py src/flowket/deepar/layers/lambda_with_one_to_one_topology.py src/flowket/callbacks/exact/observable.py src/flowket/optimizers/__init__.py src/flowket/layers/__init__.py src/flowket/layers/complex/conv.py tests/test_stochastic_reconfiguration.py src/flowket/deepar/graph_analysis/topology_manager.py src/flowket/deepar/layers/__init__.py define_args_parser run_pyket run_netket run sample build_model depth_to_max_mini_batch restore_run_state get_params run total_spin_netket_operator main main create_evaluation_config_parser run compile_model load_weights_if_exist build_model save_config create_training_config_parser init_horovod train to_valid_stages_config CheckpointByTime load_optimizer_weights save_optimizer_weights TensorBoard ExactLocalEnergy MachineUpdated ExactObservableCallback RuntimeStats ExactSigmaZ default_wave_function_callbacks_factory BadEigenStateStopping GeneratorIterator LocalEnergyStats MCMCStats ObservableStats RuntimeStats TensorBoardWithGeneratorValidationData default_wave_function_stats_callbacks_factory ConcatenateTopology ConvolutionalTopology DependencyGraph assert_valid_probabilistic_model visit_layer_predecessors GatherTopology LayerTopology RightShiftTopology DownShiftTopology OneHotTopologyWithIdentity OneToOneTopologyWithIdentity OneToOneTopology PeriodicPaddingTopology PaddingTopology from_flat_index_to_spatial_location to_flat_spatial_location ReshapeTopology CategorialSamplingTopology PlusMinusOneSamplingTopology TopologyManager CombineAutoregressiveConditionals normalize_in_log_space NormalizeInLogSpace combine_autoregressive_conditionals CastingLayer GatherLayer LambdaWithOneToOneTopology LayerNormalization shift RightShiftLayer DownShiftLayer plus_minus_one_to_one_hot PlusMinusOneToOneHot ToOneHot to_one_hot ExpandInputDim PeriodicPadding CopyNormaInitializer WeightNormalization to_flat_ordering to_flat_inverse_ordering up right down_right down up_right left up_left down_left raster zigzag AutoregressiveSampler Sampler Ensemble FastAutoregressiveSampler Singleton mean_logs evaluate exact_evaluate to_log_wave_function_vector decimal_to_binary decimal_array_to_binary_array complex_norm_log_fsum_exp vector_to_machine binary_array_to_decimal_array fsum binary_to_decimal netket_vector_to_exact_variational_vector log_fsum_exp FlipLeftRight Rot90 EqualUpDownSpins FlipSpins equal_up_down_spins_function Roll ComplexLayer VectorToComplexNumber _ComplexConv ComplexConv2D ComplexConv3D normalize_padding ComplexConv1D normalize_tuple ComplexDense TranslationInvariantComplexDense LogSpaceComplexNumberHistograms get _RealPartInitializer random_rayleigh NegateDecorator FromRealValueInitializers StandartComplexValueInitializer ConjugateDecorator ComplexValueInitializer to_int_shape _ImagPartInitializer crelu angle keras_conv_to_complex_conv float_norm lncosh extract_complex_image_patches conv2d_complex complex_log keras_conditional_wave_functions_to_wave_function AutoregressiveWrapper AutoNormalizedAutoregressiveMachine Machine AutoregressiveMachine ComplexValuesSimpleConvNetAutoregressive1D causal_conv_1d ConvNetAutoregressive2D make_pbc_invariants build_ensemble make_2d_obc_invariants average_ensemble_op make_up_down_invariant probabilistic_ensemble_op build_symmetrization_ensemble RBMBase RBM RBMSym SimpleConvNetAutoregressive1D causal_conv_1d SimpleCustomOrderingAutoregressive BaseObservable LambdaObservable Observable get_flat_local_connections_log_values sigma_z abs_sigma_z Heisenberg HeisenbergFindConn Ising j1j2_two_dim_operator j1j2_two_dim_netket_operator NetketOperatorWrapper Operator cube_shape OperatorOnGrid ExactVariational ExactObservable HorovodVariationalMonteCarlo loss_for_energy_minimization MiniBatchGenerator VariationalMonteCarlo convert_to_accumulate_gradient_optimizer ComplexValuesOptimizer get_model_real_weights get_model_imag_weights to_complex_tensors get_model_weights_for_complex_value_params_gradient forward_mode_gradients tensors_to_matrix tensors_to_column column_to_tensors conjugate_gradient ComplexValuesStochasticReconfiguration ExactSampler WaveFunctionSampler MetropolisHastingsSampler MetropolisHastingsExchange MetropolisHastingsUniform sum_correlations MetropolisHastingsLocal MetropolisHastingsSymmetricProposal MetropolisHastingsHamiltonian MetropolisHastingsGlobal _build_autoregressive_sampler ConvJacobian gradient_per_example LayerJacobian complex_values_jacobians_to_real_parts to_4d_shape JacobianManager predictions_jacobian DenseJacobian fix_tensorflow_v1_names clear_session_after_test netket complex_values_1d_model real_values_1d_model complex_values_linear_1d_model LinearDepthTwo real_values_2d_model complex_values_2d_model Linear test_autoregressive_have_normalize_distribution test_apply_complex_gradient test_get_predictions_jacobian test_get_complex_value_gradients transform_sample roll_sample test_make_2d_obc_invariants test_make_pbc_invariants test_build_symmetrization_ensemble test_update_ema_just_when_need test_changing_the_update_frequency test_reset_after_update test_update_just_when_need get_simple_linear_model test_ema test_equal_to_builtin_jacobian get_layer_output_with_topology_manager test_apply_layer_for_single_spatial_location test_sampler_by_l1 IdentityOperator sampler_factory test_stochastic_reconfiguration_matrix_vector_product_via_jvp pinv test_compute_wave_function_gradient_covariance_inverse_multiplication test_lncosh_gradient test_lncosh reduce_variance test_monte_carlo_update_unbalanced_local_energy test_monte_carlo_and_netket_agree test_exact_and_monte_carlo_agree to_generator time fast_jacobian batch_size SGD fit_generator MetropolisHastingsHamiltonian Model use_stochastic_reconfiguration ComplexValuesStochasticReconfiguration VariationalMonteCarlo summary depth Heisenberg Input range compile Hypercube Vmc time init_random_parameters run Sgd depth Heisenberg MetropolisHamiltonian Spin FFNN add_argument ArgumentParser run_pyket run_netket pyket_on_cpu trange next ConvNetAutoregressive2D Adam Model summary FastAutoregressiveSampler Input convert_to_accumulate_gradient_optimizer compile items min load_weights load_optimizer_weights exists set_update_params_frequency save_weights depth Input depth_to_max_mini_batch make_2d_obc_invariants Ising width copy_with_new_batch_size build_model fit_generator restore_run_state VariationalMonteCarlo update_params_frequency zip enumerate to_generator learning_rate evaluate print min TensorBoardWithGeneratorValidationData make_up_down_invariant default_wave_function_stats_callbacks_factory add_argument ArgumentParser CustomGraph kron append range prod Spin create_evaluation_config_parser add_argument Heisenberg add_parser create_training_config_parser func ArgumentParser pbc parse_args set_defaults add_subparsers Ising mini_batch_size load_weights weights_path set_defaults add_argument ArgumentParser str set_session init ConfigProto local_rank Session convert_to_accumulate_gradient_optimizer Adam compile print load_weights load_optimizer_weights exists num_epoch len compile_model batch_size num_epoch set_update_params_frequency save_weights Input use_horovod load_weights_if_exist set_value make_2d_obc_invariants ceil copy_with_new_batch_size vmc_cls to_valid_stages_config build_model save_config mini_batch_size size default_wave_function_stats_callbacks_factory fit_generator update_params_frequency lr zip enumerate to_generator join int learning_rate evaluate print min TensorBoardWithGeneratorValidationData make_up_down_invariant init_horovod output_path vars hasattr output_path makedirs set_defaults add_argument ArgumentParser batch_get_value getattr optimizer _make_train_function set_weights inbound_nodes tensor_indices add set zip visitor inbound_layers graph topological_sort zip append reduce_logsumexp is_complex list reduce_sum shape range len slice shape int len zip to_flat_ordering enumerate len append prod move mean keys machine_updated on_batch_end on_epoch_end num_of_batch_until_full_cycle append range zeros range zeros range zeros range prod reshape max real max reshape num_of_states state_to_number hilbert HilbertIndex range zeros dtype one_hot cumsum reshape float32 shape cast int32 fill prod tuple int isinstance lower random_normal isinstance imag real assert_valid_probabilistic_model complex exp reduce_logsumexp zeros_like angle imag reduce_mean real log predictions_model build_ensemble make_2d_obc_invariants Input build_symmetrization_ensemble build_symmetrization_ensemble Conv1D conv_layer WeightNormalization reshape tuple shape prod range len tuple shape prod range len isinstance CustomGraph kron append range Spin prod len get_updates print get_gradients variable __get__ type gradients as_list isinstance reshape tuple shape append prod isinstance isinstance namedtuple range append imag real conj isinstance gradients jacobian complex_values_jacobians_to_real_parts real zip reduce_logsumexp abs multiply reduce_max reduce_sum floormod roll sqrt reduce_mean real reduce_min imag clear_session Input Linear first_conv_layer second_dense_layer first_dense_layer Dense second_conv_layer Conv2D Input ComplexConv2D second_dense_layer ComplexDense first_dense_layer second_conv_layer first_conv_layer Input Input range second_dense_layer first_dense_layer Dense second_conv_layer first_conv_layer Conv1D Input rot90 flip Model Input convert_to_accumulate_gradient_optimizer compile get_sgd_iteration SGD get_simple_linear_model get_w fit get_sgd_iteration SGD set_update_params_frequency get_simple_linear_model get_w fit get_w get_simple_linear_model SGD fit get_sgd_iteration SGD get_simple_linear_model get_w set_weights_ema fit SGD get_simple_linear_model get_w set_weights_ema fit get_spatial_dependency product get_layer_topology spatial_location slice apply_layer_for_single_spatial_location concat reshape stack append zeros len list issubclass partial isinstance product Model func svd dtype concat reduce_max boolean_mask cast diag constant float_norm cosh lncosh reset_default_graph log tanh constant gradients float_norm lncosh real reset_default_graph conj to_generator fit_generator Adam compile Hypercube get_weights Vmc complex_values_linear_1d_model ExactSampler NetketOperatorWrapper advance range flatten mean sqrt exact_evaluate Sgd zeros Heisenberg ExactVariational Spin FFNN
# FlowKet - A Python framework for variational Monte-Carlo simulations on top of Tensorflow FlowKet is our framework for running variational Monte-Carlo simulations of quantum many-body systems. It supports any Keras model for representing a parameterized unnormalized wave-function, e.g., Restricted Boltzman Machines and ConvNets, with real or complex-valued parameters. We have implemented a standard Markov-Chain Monte-Carlo (MCMC) energy gradient estimator for this general case, which can be used to approximate the ground state of a quantum system according to a given Hamiltonian. The neural-network-based approach for representing wave-fucntions was shown to be a promising method for solving the many-body problem, often matching or even surpassing the precision of other competing methods. In addition to an MCMC energy gradient estimator, we have also implemented our novel Neural Autoregressive Quantum State wave-function representation that supports efficient and exact sampling. By overcoming the reliance on MCMC, our models can converge much faster for models of same size, which allows us to scale them to millions of parameters, as opposed to just a few thousands for prior approaches. This leads to better precison and ability to invesitgate larger and more intricated systems. Please [read our paper](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.124.020503) ([arXiv version](https://arxiv.org/abs/1902.04057)), cited below, for further details on this approach. We hope that users of our library will be able to take our method and apply to a variety of problems. If you use this codebase or apply our method, we would appreciate if you cite us as follows: ```bibtex @article{PhysRevLett.124.020503, title = {Deep Autoregressive Models for the Efficient Variational Simulation of Many-Body Quantum Systems}, author = {Sharir, Or and Levine, Yoav and Wies, Noam and Carleo, Giuseppe and Shashua, Amnon}, journal = {Phys. Rev. Lett.}, volume = {124}, issue = {2},
405
HYOJINPARK/HP-SPS
['semantic segmentation']
['Superpixel-based Semantic Segmentation Trained by Statistical Process Control']
Pascal_context/code/Make_Randsp.py Pascal_context/code/Reshape_layer.py Pascal_context/code/config.py Pascal_context/code/solve_test.py Pascal_context/code/solve.py Pascal_context/code/check_diff.py Pascal_context/code/sampling_feature.py Pascal_context/code/PascalContext_layers.py Pascal_context/code/score.py Pascal_context/code/save_feature.py Pascal_context/code/Make_Randsp_Test.py Pascal_context/code/solve_check.py RandSPSamplingLayer RandSPTestSamplingLayer PascalContextDataLayer ReshapeDataLayer save_feature compute_feature save_feature compute_feature delete vstack forward argmax str list transpose shape sum range astype choice unique batch print reshape float32 zeros array print compute_feature savemat share_with net savemat
# Superpixel-based Semantic Segmentation Trained by Statistical Process Control This is a caffe implementation of the paper [Superpixel-based Semantic Segmentation Trained by Statistical Process Control](https://arxiv.org/abs/1706.10071) ![model_Arch](img/model_arch.PNG) ## Additional Datasets Please download superpixel result of Pascal Context or use Matlab download [here](https://drive.google.com/file/d/1P2qUPGpwe6iCDAJxV7ltU3glh16u_Q_v/view?usp=sharing) ## Run Model install caffe from [here](https://drive.google.com/file/d/1-FcQjq3TqP6Gtie5w36NPkpOobO98xtT/view?usp=sharing) and run solve.py ## result ![ex1_img](img/ex1_img.PNG) ![ex1_gt](img/ex1_gt.jpg) ![ex1_est](img/ex1_result.PNG)
406
HaberGroup/SemiImplicitDNNs
['semantic segmentation']
['IMEXnet: A Forward Stable Deep Neural Network']
datasets/SynthSegDataset.py gen_val_plots.py datasets/nyu_depth.py gen_loss_plots.py networks/UNet.py networks/network_utils.py synth.py datasets/mytransforms.py utils.py networks/IMEXnet.py iou_pytorch plot_probs_ind gen_color_map getAccuracy receptive_field validate plot_probs getAccuracy plot_preds gen_color_map iou dataset_normalization_stats weighted_sample target_normalization_stats getAccuracy bcolors fast_iou plottable network_geometry dataset_stats Unsqueeze ToLabel OneHot CenterCropTensor ResizeTensor ToFloat WhiteNoise Relabel Squeeze GradientNoise NYUDepthV2 SynthSegGenerator ToLabel SynthSegDataset IMEXnet plottable convSame projectTorchTensor conv1x1 convSameT smooth conv1x1T conv3x3 diagImpConvFFT convDiagT misfit conv3x3T convDiag outconv up double_conv UNet down inconv numel item zeros_like colors join axis imshow plottable savefig ceil sum show subplot imshow title plottable savefig subplot imshow title plottable savefig join AverageValueMeter plot_probs print add mean item cuda range enumerate add_scalar squeeze view union1d intersect1d zeros range len data iou view max sum append T OneHot view print DataLoader zeros enumerate len print len DataLoader zeros enumerate print len DataLoader zeros enumerate view squeeze mean shape cpu tensor enumerate len rfft shape zeros abs range irfft shape int shape int mean shape range view shape shape tensor zeros convDiag array range t conv1x1
# SemiImplicitDNNs Pytorch implementation of [IMEXnet - A Forward Stable Deep Neural Network](https://arxiv.org/abs/1903.02639) demonstrated on the synthetic Qtips dataset. ## Use To run IMEXnet `python synth.py` To run the ResNet baseline `python synth.py --net_type resnet` After running both models, you can compare the results with `gen_val_plots.py` and `gen_loss_plots.py`.
407
Hadisalman/smoothing-adversarial
['adversarial defense', 'adversarial attack']
['Provably Robust Deep Learning via Adversarially Trained Smoothed Classifiers']
code/analyze.py code/train_pgd.py code/core.py code/predict.py code/certify.py code/zipdata.py code/architectures.py code/train_utils.py code/visualize.py code/datasets.py code/generate_github_result.py code/train.py code/archs/cifar_resnet.py code/attacks.py get_architecture Attacker DDN PGD_L2 Smooth get_normalize_layer _imagenet _imagenet32 TiTop50KDataset get_num_classes get_input_center_layer _imagenet_on_philly MultiDatasetsDataLoader get_dataset _cifar10 ImageNetDS NormalizeLayer InputCenterLayer main train test init_logfile AverageMeter accuracy requires_grad_ copy_code log ZipData ResNet Bottleneck conv3x3 resnet BasicBlock get_normalize_layer cuda join join join Compose join init_logfile outdir SGD DataLoader save arch dataset get_architecture cuda log StepLR get_dataset epochs noise_sd range format test join time parameters train step gpu makedirs update time format criterion model backward print size AverageMeter randn_like accuracy zero_grad item step cuda enumerate len eval time AverageMeter close write open close write open parameters join format print set mkdir copy2 walk
# Provably Robust Deep Learning via Adversarially Trained Smoothed Classifiers This repository contains the code and models necessary to replicate the results of our recent paper: **Provably Robust Deep Learning via Adversarially Trained Smoothed Classifiers** <br> *Hadi Salman, Greg Yang, Jerry Li, Huan Zhang, Pengchuan Zhang, Ilya Razenshteyn, Sebastien Bubeck* <br> Paper: https://arxiv.org/abs/1906.04584 <br> Blog post: https://decentdescent.org/smoothadv.html Our paper outperforms all existing provably L2-robust classifiers by a significant margin on ImageNet and CIFAR-10, *establishing the state-of-the-art for provable L2-defenses.*
408
Haghrah/PyIT2FLS
['time series']
['PyIT2FLS: A New Python Toolkit for Interval Type 2 Fuzzy Logic Systems']
pyit2fls/fuzzymatrix.py typereduction/examples/ex_1.py examples/ex_10.py examples/ex_3.py examples/ex_3_0.7.0.py examples/ex_12.py examples/ex_9_0.7.0.py examples/ddeintlib.py examples/ex_8.py examples/ex_15.py examples/ex_1.py examples/ex_5.py examples/ex_11_0.7.0.py examples/ex_9.py typereduction/typereduction/typereduction.py examples/ex_2.py examples/ex_6.py examples/ex_4_0.7.0.py examples/ex_11.py pyit2fls/__init__.py examples/ex_10_0.7.0.py examples/ex_7.py examples/ex_8_0.7.0.py examples/ex_4.py examples/ex_18.py examples/PyPSO.py examples/ex_14.py examples/ex_13.py examples/ex_16.py examples/ex_17.py setup.py pyit2fls/pyit2fls.py examples/ex_5_0.7.0.py typereduction/setup.py typereduction/typereduction/__init__.py ddeint dde ddeVar ddeVars error generateRuleBase error generateRuleBase y31 y12 y11 y21 y32 y41 y22 y42 error parametersGenerator Classifier velocityGenerator calculate solution_generator mackey_glass velocity_generator cost_func calculate solution_generator mackey_glass velocity_generator cost_func u model_fuzzy ts cl_sys eval_IT2FPID_BMM ITAE eval_IT2FPID_WM raw_sys u_dot eval_IT2FPID_EIASC os eval_IT2FPID_KM eval_IT2FPID_NT u model_fuzzy ts cl_sys eval_IT2FPID_BMM ITAE eval_IT2FPID_WM fuzzySystem raw_sys u_dot eval_IT2FPID_EIASC os eval_IT2FPID_KM eval_IT2FPID_NT Particle PyPSO T1FMatrix_Complement T1FMatrix T1FSoftMatrix T1FSoftMatrix_Product Minmax T1FMatrix_Intersection T1FMatrix_isUniversal T1FMatrix_isNull T1FMatrix_Union Maxmin ModiHe drastic_s_norm probabilistic_sum_s_norm TR_plot CoSet gauss_uncert_mean_lmf NT_algorithm LBMM_algorithm crisp_list Centroid zero_mf ltri_mf gauss_uncert_mean_umf gaussian_mf T1_Emphasize T1FS min_t_norm einstein_sum_s_norm IT2Mamdani T1FS_OR singleton_mf IT2FS_Elliptic IT2FS_RGaussian_UncertStd CoSum T1TSK IT2FLS WM_algorithm tri_mf trapezoid_mf gauss_uncert_std_umf drastic_t_norm nilpotent_maximum_s_norm elliptic_mf semi_elliptic_mf T1FS_plot IT2FS_plot EKM_algorithm IT2FS_Gaussian_UncertMean EIASC_algorithm BMM_algorithm Height IT2FS_Semi_Elliptic lgauss_uncert_std_lmf const_mf hamacher_product_t_norm WEKM_algorithm IT2FS_LGaussian_UncertStd product_t_norm lukasiewicz_t_norm rtri_mf rgauss_uncert_std_lmf TWEKM_algorithm gbell_mf crisp IT2_Emphasize join max_s_norm bounded_sum_s_norm nilpotent_minimum_t_norm gauss_uncert_std_lmf IT2FS_Gaussian_UncertStd meet rgauss_uncert_std_umf trim T1FS_AND KM_algorithm T1Mamdani IT2FS lgauss_uncert_std_umf IT2TSK WM_algorithm KM_algorithm EKM_algorithm EIASC_algorithm dde ddeVars extend set_f_params set_initial_value append range range evaluate add_rule Classifier append range append IT2FS_Gaussian_UncertStd evaluate append IT2FS_Gaussian_UncertStd evaluate evaluate evaluate evaluate evaluate evaluate u eval_func min u_dot max add_rule add_output_variable add_input_variable IT2Mamdani min maximum shape zeros range minimum shape zeros max range norm range len append f array logical_not logical_not domain params T1FS show plot xlabel grid ylabel domain params title savefig figure legend mf IT2FS domain lmf_params umf_params show plot xlabel grid ylabel upper domain lower title savefig figure legend fill_between show plot xlabel grid ylabel title savefig figure legend xlim append crisp keys IT2FS IT2FS where arange trim npsum isclose range len min trim sign npsum round max range len min sign trim round max range len append range len npsum len trim npsum min trim trim trim trim append Centroid range len lower zeros_like add upper argmax append upper argmax append zeros array zeros array zeros array zeros array len
PyIT2FLS ======== <p align="center"><img src="https://raw.githubusercontent.com/Haghrah/PyIT2FLS/master/PyIT2FLS_icon.png" width="256"/></p> NumPy and SciPy based toolkit for Type 1 and Interval Type 2 Fuzzy Logic Systems. ## Licence PyIT2FLS is published under MIT license. If you are using the developed toolkit, please cite preprint of our paper [PyIT2FLS: A New Python Toolkit for Interval Type 2 Fuzzy Logic Systems](https://arxiv.org/abs/1909.10051). BibTeX: @misc{haghrah2019pyit2fls, title={PyIT2FLS: A New Python Toolkit for Interval Type 2 Fuzzy Logic Systems}, author={Amir Arslan Haghrah and Sehraneh Ghaemi},
409
HaipengXiong/weighted-hausdorff-loss
['object localization']
['Locating Objects Without Bounding Boxes']
object-locator/losses.py object-locator/metrics.py object-locator/data.py object-locator/__main__.py object-locator/locate.py object-locator/argparser.py object-locator/train.py object-locator/models/unet_pix2pix.py object-locator/models/unet_model.py object-locator/logger.py setup.py object-locator/get_image_size.py object-locator/models/unet_parts.py parse_command_args CSVDataset ScaleImageAndLabel RandomVerticalFlipImageAndLabel hflip XMLDataset _is_pil_image vflip RandomHorizontalFlipImageAndLabel csv_collator UnknownImageFormat Image get_image_size get_image_metadata Test_get_image_size main Logger cdist WeightedHausdorffDistance AveragedHausdorffLoss _assert_no_grad averaged_hausdorff_distance Judge UNet outconv up double_conv down inconv UnetGenerator UnetSkipConnectionBlock pop parse add_argument_group print add_argument exit imgsize resume ArgumentParser abspath append parse_args save append stack get_image_metadata getsize run_tests getLogger json_indent output_func verbose DEBUG to_str_row_verbose basicConfig add_option quiet parse_args to_str_row partial OptionParser json debug ERROR get_image_metadata pformat INFO to_str_json print error print_help len sqrt unsqueeze average min array pairwise_distances
# A loss function (Weighted Hausdorff Distance) <br>for object localization This repository contains the PyTorch implementation of the Weighted Hausdorff Loss described in this paper: [Weighted Hausdorff Distance: A Loss Function For Object Localization](https://arxiv.org/abs/1806.07564) ![Some object centers](https://raw.githubusercontent.com/javiribera/weighted-hausdorff-loss/master/fig/dots.png) ## Abstract Recent advances in Convolutional Neural Networks (CNN) have achieved remarkable results in localizing objects in images. In these networks, the training procedure usually requires providing bounding boxes or the maximum number of expected objects. In this paper, we address the task of estimating object locations without annotated bounding boxes, which are typically hand-drawn and time consuming to label. We propose a loss function that can be used in any Fully Convolutional Network (FCN) to estimate object locations. This loss function is a modification of the Average Hausdorff Distance between two unordered sets of points. The proposed method does not require one to "guess" the maximum number of objects in the image, and has no notion of bounding boxes, region proposals, or sliding windows. We evaluate our method with three datasets designed to locate people's heads, pupil centers and plant centers. We report an average precision and recall of 94% for the three datasets, and an average location error of 6 pixels in 256x256 images. ## Citation J. Ribera, D. G&uuml;era, Y. Chen, E. Delp, "Weighted Hausdorff Distance: A Loss Function For Object Localization", arXiv preprint [arXiv:1806.07564](https://arxiv.org/abs/1806.07564), June 2018
410
HangZhouShuChengKeJi/text-detection-ctpn
['scene text detection']
['Detecting Text in Natural Image with Connectionist Text Proposal Network']
utils/rpn_msr/anchor_target_layer.py utils/rpn_msr/config.py utils/rpn_msr/proposal_layer.py utils/text_connector/text_connect_cfg.py utils/text_connector/text_proposal_connector.py nets/vgg.py utils/bbox/setup.py utils/dataset/data_util.py main/web.py utils/text_connector/text_proposal_graph_builder.py utils/dataset/data_provider.py utils/prepare/utils.py utils/text_connector/text_proposal_connector_oriented.py demo/demo.py main/train.py utils/bbox/bbox_transform.py main/ctpn.py utils/text_connector/detectors.py utils/text_connector/other.py nets/model_train.py utils/rpn_msr/generate_anchors.py utils/prepare/split_label.py CTPN main anchor_target_layer model Bilstm lstm_fc smooth_l1_dist mean_image_subtraction make_var loss vgg_16 vgg_arg_scope clip_boxes bbox_transform bbox_transform_inv generator get_training_data load_annoataion get_batch GeneratorEnqueuer pickTopLeft shrink_poly orderConvex _unmap _compute_targets anchor_target_layer Config generate_anchors generate_basic_anchors scale_anchor _filter_boxes proposal_layer _filter_irregular_boxes TextDetector clip_boxes threshold Graph Config TextProposalConnector TextProposalConnector TextProposalGraphBuilder trainable_variables checkpoint_path pretrained_model_path moving_average_decay Saver get_variable global_variables merge_all strftime placeholder apply apply_gradients get_default_graph FileWriter logs_path get_trainable_variables assign_from_checkpoint_fn ConfigProto int learning_rate Variable now float32 AdamOptimizer ExponentialMovingAverage global_variables_initializer gpu scalar makedirs range split Bilstm reshape lstm_fc conv2d shape mean_image_subtraction softmax anchor_target_layer REGULARIZATION_LOSSES not_equal reshape get_collection float32 where sparse_softmax_cross_entropy_with_logits reduce_sum shape smooth_l1_dist reduce_mean cast add_n gather equal scalar transpose log dtype exp astype shape zeros minimum maximum join format endswith print append walk len append map split load_annoataion arange subplots show shape imshow get_training_data imread format close shuffle tight_layout splitext join print reshape set_yticks rectangle set_xticks array split generator get is_running start sleep GeneratorEnqueuer argsort reshape pickTopLeft convex_hull int min append max range arange RPN_BBOX_INSIDE_WEIGHTS _unmap argmax max RPN_FG_FRACTION generate_anchors ones transpose shape array meshgrid sum RPN_BATCHSIZE format hstack ascontiguousarray choice sqrt fill empty RPN_POSITIVE_WEIGHT int EPS print reshape RPN_CLOBBER_POSITIVES _compute_targets zeros bbox_overlaps len fill empty zeros array int32 scale_anchor copy append nms generate_anchors RPN_POST_NMS_TOP_N format arange print reshape meshgrid transpose clip_boxes bbox_transform_inv _filter_boxes hstack shape RPN_NMS_THRESH RPN_PRE_NMS_TOP_N RPN_MIN_SIZE threshold
# text-detection-ctpn 基于 CTPN 的文本检测。在 [text-detection-ctpn/](https://github.com/eragonruan/text-detection-ctpn/) 项目基础上做了些优化工作。 # 编译配置 ## 环境配置 ```sh # 创建环境 conda create --name python3 python=3.7 # 切换到新创建的环境 conda activate python3 # 升级 pip 版本
411
Hanjun-Dai/graph_adversarial_attack
['adversarial attack']
['Adversarial Attack on Graph Structured Data']
code/graph_attack/rl_common.py code/common/dnn.py code/common/modules/custom_mod.py code/node_classification/node_utils.py code/data_generator/gen_er_components.py code/node_attack/node_genetic.py code/graph_attack/genetic_algorithm.py code/common/functions/custom_func.py code/graph_attack/grad_attack.py code/graph_classification/er_components.py code/graph_attack/q_net.py code/node_attack/exhaust_attack.py code/graph_attack/nstep_replay_mem.py code/node_attack/q_net_node.py code/node_attack/node_grad_attack.py code/node_classification/gcn.py code/node_attack/node_rand_attack.py code/graph_attack/dqn.py code/common/graph_embedding.py code/data_generator/data_util.py code/graph_attack/collect_rl_results.py code/graph_attack/plot_dqn.py code/node_attack/node_dqn.py code/common/build.py code/common/cmd_args.py code/node_attack/node_attack_common.py code/graph_classification/graph_common.py code/graph_attack/er_trivial_attack.py code/node_attack/plot_node_grad_attack.py code/node_classification/gcn_modules.py code/common/test.py save_args build_kwargs MLPRegression MLPClassifier GraphClassifier EmbedMeanField gnn_spmm MySpMM S2VGraph EmbedLoopyBP argmax cpu_test gpu_test JaggedMax GraphDegreeNorm GraphLaplacianNorm JaggedArgmax JaggedLogSoftmax JaggedMaxModule JaggedArgmaxModule JaggedLogSoftmaxModule load_pkl g2txt get_component Agent propose_attack GeneticAgent propose_attack NstepReplaySubMemCell hash_state_action NstepReplayMem NstepReplayMemCell Agent NStepQNet QNet greedy_actions load_base_model attackable get_supervision load_graphs test_graphs GraphEdgeEnv load_er_data loop_dataset recur_gen_edges gen_modified check_attack_rate gen_khop_edges ModifiedGraph load_base_model init_setup NodeAttakEnv Agent NodeGeneticAgent propose_del propose_add propose_del propose_add NStepQNetNode node_greedy_actions QNetNode adj_generator GraphConvolution GCNModule S2VNodeClassifier preprocess_features load_txt_data load_raw_graph load_binary_data sparse_to_tuple GraphNormTool chebyshev_polynomials parse_index_file StaticGraph run_test mod backward Variable log_softmax print rand abs grad from_numpy mean JaggedLogSoftmaxModule sum array range mod backward Variable log_softmax print grad JaggedLogSoftmaxModule cpu sum cuda range JaggedArgmaxModule mod print Variable manual_seed cuda graph_laplacian_norm_cuda ones _values _indices graph_laplacian_norm mm cuda is_cuda graph_degree_norm ones _values _indices graph_degree_norm_cuda mm cuda is_cuda neighbors write range len add_edge nodes shuffle choice erdos_renyi_graph randint range len add_edge add_edges_from model to_networkx nodes copy choice append label S2VGraph range len zero_grad flatten cuda mlp s2v num_nodes PrepareFeatureLabel backward argsort numpy label_map directed_edges JaggedMaxModule Variable jmax min clone append numpy range len int print n_graphs load_pkl min_c range max_c len list print range loop_dataset len load base_model_dump GraphClassifier test_graphs load_state_dict cuda add_edge print to_networkx nodes copy classifier edges append label S2VGraph range len zeros range attackable len backward step zero_grad tqdm set_description classifier append float sum array range len int print n_graphs load_pkl min_c range max_c len sum get_extra_adj ones Variable directed_edges float write tqdm normed_adj set_description open gcn norm_extra range append len ModifiedGraph range len ModifiedGraph range recur_gen_edges len mod saved_model eval run_test load_base_model load_txt_data LongTensor load_raw_graph Variable normed_adj dataset argmax cuda data_folder add_edge get_extra_adj graph ModifiedGraph Variable base_model argsort numpy norm_extra len add_edge get_extra_adj graph ModifiedGraph Variable base_model argsort numpy norm_extra len LongTensor Variable append max range len int LongTensor FloatTensor ones hstack shuffle get_gsize cuda edges del_rate norm_extra array range len append int strip open format isfile format lil_matrix from_dict_of_lists tolil tuple sort min tolist parse_index_file vstack zeros max range len list load_raw_graph from_dict_of_lists csr_matrix loadtxt to_tuple range isinstance len diags FloatTensor Size sparse_to_tuple contiguous astype float32 flatten dot cuda Tensor sum array list normalize_adj format chebyshev_recurrence print eye append range eigsh format gcn print eval float sum len
# graph_adversarial_attack Adversarial Attack on Graph Structured Data (https://arxiv.org/abs/1806.02371, to appear in ICML 2018). This repo contains the code, data and results reported in the paper. ### 1. download repo and data First clone the repo recursively, since it depends on another repo (https://github.com/Hanjun-Dai/pytorch_structure2vec): git clone [email protected]:Hanjun-Dai/graph_adversarial_attack --recursive (BTW if you have trouble downloading it because of permission issues, please see [this issue](https://github.com/Hanjun-Dai/graph_adversarial_attack/issues/2) ) Then download the data using the following dropbox link: https://www.dropbox.com/sh/mu8odkd36x54rl3/AABg8ABiMqwcMEM5qKIY97nla?dl=0 Put everything under the 'dropbox' folder, or create a symbolic link with name 'dropbox':
412
HansBambel/SmaAt-UNet
['weather forecasting']
['SmaAt-UNet: Precipitation Nowcasting using a Small Attention-UNet Architecture']
test_precip_lightning.py create_datasets.py train_precip_lightning.py metric/__init__.py models/layers.py models/unet_parts_depthwise_separable.py metric/confusionmatrix.py metric/iou.py models/regression_lightning.py models/SmaAt_UNet.py utils/data_loader_precip.py models/unet_precip_regression_lightning.py metric/metric.py train_SmaAtUNet.py utils/dataset_VOC.py utils/dataset_precip.py models/unet_parts.py create_dataset get_model_class get_model_loss get_model_losses print_persistent_metrics plot_losses get_persistence_metrics get_batch_size train_regression get_lr fit ConfusionMatrix IoU Metric SpatialAttention ChannelAttention DepthwiseSeparableConv DoubleDSConv Flatten DepthToSpace CBAM DoubleDense SpaceToDepth Precip_regression_base UNet_base SmaAt_UNet Up OutConv DoubleConv Down DoubleConvDS DownDS UpDS OutConv UNet_Attention UNetDS_Attention UNet UNetDS UNetDS_Attention_4CBAMs precipitation_maps_oversampled_h5 precipitation_maps_h5 precipitation_maps_classification_h5 VOCSegmentation decode_segmap get_pascal_labels get_train_valid_loader get_test_loader UNet_Attention UNetDS_Attention BackbonedUNet UNet UNetDS UNetDS_Attention_4CBAMs to eval mse_loss l1_loss view squeeze l1_loss tqdm bincount mse_loss print precipitation_maps_oversampled_h5 DataLoader get_persistence_metrics precipitation_maps_oversampled_h5 get_model_class get_model_loss tqdm dict print_persistent_metrics DataLoader load_from_checkpoint show list xlabel ylabel bar title figure xticks keys UNet_Attention print UNetDS_Attention scale_batch_size Trainer UNet UNetDS UNet_Attention UNetDS_Attention EarlyStopping UNet Trainer summary ModelCheckpoint LearningRateLogger TensorBoardLogger UNetDS fit param_groups model zero_grad loss_func save to get_lr range SummaryWriter eval train enumerate time backward print add_scalar tqdm IoU step makedirs show copy imshow get_pascal_labels zeros range seed int list Compose SubsetRandomSampler shuffle precipitation_maps_h5 precipitation_maps_classification_h5 DataLoader floor range len DataLoader precipitation_maps_h5 precipitation_maps_classification_h5
# SmaAt-UNet Code for the Paper "SmaAt-UNet: Precipitation Nowcasting using a Small Attention-UNet Architecture" [Arxiv-link](https://arxiv.org/abs/2007.04417), [Elsevier-link](https://www.sciencedirect.com/science/article/pii/S0167865521000556?via%3Dihub) ![SmaAt-UNet](SmaAt-UNet.png) The proposed SmaAt-UNet can be found in the model-folder under [SmaAt_UNet](models/SmaAt_UNet.py). --- For the paper we used the [Pytorch-Lightning](https://github.com/PyTorchLightning/pytorch-lightning) -module (PL) which simplifies the training process and allows easy additions of loggers and checkpoint creations. In order to use PL we created the model [UNetDS_Attention](models/unet_precip_regression_lightning.py) whose parent inherits from the pl.LightningModule. This model is the same as the pure PyTorch SmaAt-UNet implementation with the added PL functions. ### Training An example [training script](train_SmaAtUNet.py) is given for a classification task (PascalVOC). For training on the precipitation task we used the [train_precip_lightning.py](train_precip_lightning.py) file.
413
HaoZhongkai/AS_Molecule
['molecular property prediction', 'active learning']
['ASGN: An Active Semi-supervised Graph Neural Network for Molecular Property Prediction']
geo_al/dist_test.py base_model/__init__.py qbc_learn/qbc.py utils/pre/op_savedata.py base_model/layers.py utils/data_clustering/smilarity.py baseline/active-learning/sampling_methods/informative_diverse.py baseline/active-learning/utils/small_cnn.py pre_training/cls_ptr.py utils/data_clustering/ot_clustering.py qbc_learn/test_procon.py single_model_al/sampler.py bayes_al/bayes_learn.py baseline/active-learning/sampling_methods/utils/__init__.py pre_training/w_ptr_r.py config.py pre_training/sch_embeddings.py pre_training/non_ptrain_cls.py pre_training/w_ptr.py baseline/active-learning/sampling_methods/hierarchical_clustering_AL.py baseline/active-learning/sampling_methods/margin_AL.py pre_training/test_xlsx.py pre_training/node_ptr.py single_model_al/wsl_al.py baseline/active-learning/utils/kernel_block_solver.py utils/funcs.py utils/pre/sparse_molecular_dataset.py base_model/sch.py utils/pre/opv_savedata.py pre_training/w_ptr_part.py utils/pre/opvpre_data.py utils/data_utils.py baseline/active-learning/sampling_methods/represent_cluster_centers.py pre_training/train_part_cls.py pre_training/train_part.py rd_learn/rd_al.py baseline/active-learning/sampling_methods/bandit_discrete.py baseline/active-learning/sampling_methods/graph_density.py pre_training/train_part_msg.py geo_al/geo_learn.py baseline/active-learning/sampling_methods/kcenter_greedy.py bayes_al/bald.py bayes_al/mm_sch.py baseline/active-learning/__init__.py utils/pre/xyz2mol.py geo_al/k_center.py single_model_al/run_al.py utils/pre/pre_qm.py base_model/train_base.py bayes_al/mc_sch.py utils/pre/generate_pkls.py baseline/active-learning/utils/utils.py baseline/active-learning/sampling_methods/utils/tree.py pre_training/train_part_transfer.py baseline/active-learning/sampling_methods/constants.py pre_training/graph_ae.py qbc_learn/test_manager.py baseline/active-learning/utils/create_data.py utils/pre/test_data.py baseline/active-learning/sampling_methods/uniform_sampling.py baseline/active-learning/run_experiment.py utils/data_clustering/__init__.py bayes_al/msk_sch.py exp/ac_info_exp/plt_data.py baseline/active-learning/sampling_methods/sampling_def.py utils/data_clustering/smilarity_sch.py utils/pre/time_pre.py __init__.py pre_training/semi_supervised_learn.py utils/pre/oppre_data.py baseline/active-learning/sampling_methods/utils/tree_test.py geo_al/k_center_cifar10.py baseline/active-learning/sampling_methods/__init__.py baseline/active-learning/sampling_methods/wrapper_sampler_def.py pre_training/ot_pretrain.py baseline/active-learning/utils/allconv.py baseline/active-learning/sampling_methods/simulate_batch.py utils/pre/qm9_predata.py pre_training/wsch.py baseline/active-learning/utils/__init__.py base_model/schmodel.py qbc_learn/model.py baseline/active-learning/sampling_methods/mixture_of_samplers.py utils/pre/parallelload.py geo_al/embedding_model.py pre_training/ot_unsupervised.py baseline/active-learning/utils/chart_data.py utils/prepare_data.py Global_Config make_args main generate_one_curve BanditDiscreteSampler get_mixture_of_samplers get_all_possible_arms get_AL_sampler get_base_AL_mapping get_wrapper_AL_mapping GraphDensitySampler HierarchicalClusterAL InformativeClusterDiverseSampler kCenterGreedy MarginAL MixtureOfSamplers RepresentativeClusterMeanSampling SamplingMethod SimulateBatchSampler UniformSampling WrapperSamplingMethod Node Tree TreeTest AllConv get_normalize get_scoring_method get_sampling_method plot_results combine_results get_between main get_standardize get_cifar10 get_csv_data get_mldata get_wikipedia_talk_data get_keras_data main Dataset BlockKernelSolver SmallCNN filter_data calculate_entropy get_mldata get_train_val_test_splits flip_label create_checker_unbalanced Logger get_model flatten_X get_class_counts VEConv Interaction EdgeEmbedding CFConv RBFLayer ShiftSoftplus AtomEmbedding MultiLevelInteraction SchNetModel Interaction SchInteraction EdgeEmbedding SchNet CFConv RBFLayer ShiftSoftplus SchNetModel AtomEmbedding train test Bayes_sampler get_preds bald_learn finetune test EdgeEmbedding RBFLayer ShiftSoftplus MC_CFConv MC_SchNetModel AtomEmbedding MC_Interaction MM_Interaction EdgeEmbedding CFConv RBFLayer ShiftSoftplus AtomEmbedding MM_SchNetModel MM_Interaction EdgeEmbedding Msk_SchNetModel CFConv RBFLayer ShiftSoftplus AtomEmbedding Interaction EdgeEmbedding CFConv RBFLayer ShiftSoftplus AtomEmbedding SchEmbedding get_preds k_center_learn finetune test K_center finetune CNN_Cifar get_preds test k_center_learn get_preds train get_preds train test train test get_preds train test get_preds train get_preds train test Interaction EdgeEmbedding CFConv RBFLayer ShiftSoftplus AtomEmbedding SchEmbedding get_preds train get_pesudo_labels test train test train test train test get_preds train test Semi_Schnet WSchnet_N WSchnet_G WSchnet MM_WSchnet_R WSchnet_R get_preds train get_preds train get_preds train test Interaction EdgeEmbedding CFConv RBFLayer ShiftSoftplus SchNetModel AtomEmbedding qbc_test finetune default_collate_override get_preds Commitee qbc_active_learning tar TestClass consumer producer random_data_sampler rd_active_learning train test active_learning check_point_test Trainer Weakly_Supervised_Trainer AL_sampler save_cpt_xlsx get_preds_w Inferencer active_learning AlchemyBatcher batcher TencentAlchemyDataset batcher_n Molecule_MD k_means batcher_g MoleDataset mol2nx FMolDataSet load_dataset SelfMolDataSet k_center RefDataParallel get_mol Cifar batcher nx2mol Molecule k_medoids_pp AccMeter get_atom_ref chunks k_medoid pairwise_L2 get_dataset_from_files get_centers smi2vec get_preds smi2vec preprocess_all dist save_elements process_data save_all get_elem preprocess_all dist save_elements process_data save_all get_elem get_M get_mol M get_M get_mol M get_M get_mol M valences_not_too_large dist xyz2AC get_proto_mol chiral_stereo_check AC2mol BO_is_OK get_UA_pairs get_atom get_atomic_charge get_atomicNumList get_BO get_data_elem AC2BO read_xyz_file get_bonds xyz2mol BO2mol getUA set_atomic_radicals set_atomic_charges clean_charges SparseMolecularDataset preprocess_all dist save_elements process_data save_all get_elem get_mols bd_mol Molecule get_mol valences_not_too_large dist xyz2AC get_proto_mol chiral_stereo_check AC2mol BO_is_OK get_UA_pairs get_atom get_atomic_charge get_atomicNumList get_BO get_data_elem AC2BO read_xyz_file get_bonds xyz2mol BO2mol getUA set_atomic_radicals set_atomic_charges clean_charges parse_args add_argument ArgumentParser get_train_val_test_splits score max seed str list len sampler ceil normalize append range unique int print min extend select_batch transform fit batch_size MkDir trials Logger dataset save_dir flush_file seed str sampling_method get_mldata GFile data_dir strftime train_horizon range dump generate_one_curve Glob warmstart_size gmtime select_method score_method join get_AL_sampler get_model to_dict partial print load pop list isinstance FastGFile tuple vstack append list sorted plot keys dict mean sqrt legend zip fill_between std range len rfind len find use print len PdfPages combine_results plot_results standardize title savefig source_dir normalize close split replace GFile Dataset strip append array split read_table Dataset apply TfidfTransformer download_file array CountVectorizer fit_transform concatenate reshape transpose flatten load_data Dataset load read BytesIO seek concatenate Dataset write close download_file getnames array StringIO open data fetch_rcv1 load_iris MkDir target save_dir GFile fetch_mldata transpose fetch_20newsgroups_vectorized fit_transform dump get_wikipedia_talk_data get_keras_data TfidfTransformer get_cifar10 join get_csv_data load_breast_cancer datasets int T concatenate ones uniform vstack zeros range shape reshape load flatten create_checker_unbalanced append sort unique list shuffle copy delete unique append array range GridSearchCV AllConv SmallCNN SVC LinearSVC LogisticRegression BlockKernelSolver int entropy len ceil range append get_class_counts seed int arange min shuffle copy flip_label len L1Loss MAE_fn zero_grad DataLoader set_mean_std use_tb multi_gpu squeeze len MSELoss add epochs append to range format param_groups test mean lr item batch enumerate int AverageValueMeter backward print reset loss_fn step std add_scalar L1Loss eval AverageValueMeter MSELoss time format std print train mean DataLoader to numpy set_mean_std L1Loss MAE_fn zero_grad DataLoader set_mean_std use_tb squeeze MSELoss bald_ft_epochs add append to range format mean item batch enumerate AverageValueMeter backward print add_scalar reset loss_fn train step std len std mean DataLoader to set_mean_std save_model init_data_num query save dataset use_tb MoleDataset list finetune Bayes_sampler len append range format get_preds test zip sample save_model_path int print extend batch_data_num dict add_scalar cat k_center_ft_epochs save_model init_data_num query save dataset use_tb MoleDataset K_center list finetune len append range format get_preds test zip sample save_model_path int print extend batch_data_num dict add_scalar argmax sum CrossEntropyLoss ConfusionMeter ConfusionMeter CrossEntropyLoss init_model CNN_Cifar Cifar parameters random_query optimizer_ number_of_nodes model argmax k_means re_init_head sum CrossEntropyLoss value get_preds ConfusionMeter AccMeter cpu randint node_classifier prop pi abs log loss_r loss_mae exp ones edges sinkhorn copy sqrt number_of_edges requires_grad_ tile softmax long time edge_classifier prop float get_pesudo_labels time format std print mean DataLoader to set_mean_std cat Linear erf get concatenate set_device device get qbc_ft_epochs parameters optimizer_ L1Loss MSELoss mean DataLoader zeros qbc_test save_model id put prop_name save dataset use_tb Process list query_ids dir FMolDataSet append range format set Manager start stack Commitee Queue save_model_path int time join add_scalar print extend batch_data_num query_dataset len print getpid format put get getpid format print int list datas build mols sample range MoleDataset len parameters optimizer_ deepcopy list format save_model add_scalar print random_data_sampler dataset test dict save zip append train save_model_path range use_tb len save_model init_data_num Trainer query save dataset get_label_ids Inferencer MoleDataset run use_tb list finetune append range format test AL_sampler zip sample save_model_path check_point_test int add_scalar print batch_data_num dict save_cpt_xlsx len time format print get_level DataLoader to cat L1Loss MAE_fn zero_grad DataLoader set_mean_std squeeze Adam MSELoss add append to range format param_groups mean item loss_fn batch enumerate AverageValueMeter backward print parameters reset SchNetModel train step std len ExcelWriter concat min to_excel save append DataFrame range len generate_p_labels prop_name get_preds_w SelfMolDataSet Weakly_Supervised_Trainer long k_medoid get_unlabeled_ids cpu join BuildFeatureFactory RDDataDir zeros unsqueeze append int range len load concatenate open load print Molecule stack append open list stack zip list cat zip list cat zip t expand list format print argmin index_add_ sample zeros float pairwise_L2 range len argmax int time list format ones print choice unsqueeze append sum range list format print float choice index_add_ device zeros to max range list format print argmin get_centers sample k_center pairwise_L2 range time format print argmin index_add_ pairwise_L2 zeros float k_center range len GetIdx add_edge Graph GetBeginAtomIdx GetEndAtomIdx GetAtoms GetBonds add_node AddAtom get_edge_attributes SetFormalCharge SetIsAromatic SetChiralTag nodes SetNumExplicitHs SetHybridization Atom AddBond edges RWMol get_node_attributes MolFromSmiles GetMorganFingerprintAsBitVect int list asarray dict zip append array range split zeros norm range process_data dist dump preprocess_all print PATH open PATH lower append zip enumerate list sum copy getUA sum zip get_atomic_charge list append sum enumerate count HasSubstructMatch CombineMols MolFromSmarts ReactionFromSmarts SanitizeMol RunReactants GetMolFrags enumerate get int list GetMol SINGLE round AddBond set_atomic_radicals set_atomic_charges sum RWMol range len get_atomic_charge int SetFormalCharge GetAtomWithIdx clean_charges enumerate count get_atomic_charge int GetAtomWithIdx SetNumRadicalElectrons abs enumerate append tuple sorted enumerate combinations add_edges_from list int Graph get_bonds set append len list defaultdict product copy get_BO getUA append sum BO_is_OK get_UA_pairs asarray BO2mol AC2BO append range AddAtom str GetMol MolFromSmarts Atom RWMol range len append get_atom GetAtomicNum GetAtomWithIdx get_proto_mol Conformer AddConformer Get3DDistanceMatrix astype GetRcovalent GetPeriodicTable SetAtomPosition GetNumAtoms range len DetectBondStereochemistry AssignStereochemistry SanitizeMol AssignAtomChiralTagsFromStructure AC2mol chiral_stereo_check xyz2AC list get_atomicNumList dist read_xyz_file dict zip xyz2mol array deepcopy extend _build_ful
<p align="center"> <img src="pic.jpg" width="1000"> <br /> <br /> </p> # ASGN The official implementation of the ASGN model. Orginal paper: ASGN: An Active Semi-supervised Graph Neural Network for Molecular Property Prediction. KDD'2020 Accepted. # Project Structure + `base_model`: Containing SchNet and training code for QM9 and OPV datasets.
414
HaodiJiang/SolarUnet
['semantic segmentation']
['Identifying and Tracking Solar Magnetic Flux Elements with Deep Learning']
solarunet.py statistics_analysis.py magnetic_tracking.py calculate_element_minimum_distance_old RoI put_num_on_element check_same_pos_neg forward find_the_radius element_region find_nearest_n_neighbor_contour element_pos_neg create_elements_flux_size_dict event_tracking_backward draw_contour_line_event size_filter_v1 check_with_threshold event_tracking_forward find_contour_line check_with_threshold_2 draw_contour_line_event_half calculate_element_average_distance find_nearest_n_neighbor create_elements_flux_dict find_and_draw_contour_line_new magnetic_tracking element_moveing_distance two_way calculate_element_minimum_distance size_filter backward pre_calulate_flux_and_contour read_data_files create_elements_contour_dict adjust_data pre_processing model_predicting test_generator plot_mask train_generator validation_generator solarUnet save_result model_training post_processing conv2_block plot_tracking_results analysis flux_to_MX pixel_to_Mm statistics_analysis_lifetime statistics_analysis_area_flux read_feature_lifetime_list_file data len bitwise_not flipud imread listdir open size_filter ones create_elements_flux_dict label create_elements_contour_dict range range range len len set map intersection len len append shape tolist asarray range len str LINE_AA putText FONT_HERSHEY_SIMPLEX mean round euclidean ceil shape euclidean ceil shape euclidean find_contour_line range len range len range len euclidean KDTree min query append array dict calculate_element_minimum_distance list keys dict calculate_element_minimum_distance list keys combinations list time find_nearest_n_neighbor_contour draw_contour_line_event RoI mean put_num_on_element round check_same_pos_neg abs range append len combinations list time find_nearest_n_neighbor_contour draw_contour_line_event_half draw_contour_line_event RoI mean put_num_on_element round check_same_pos_neg abs range append len element_region abs check_with_threshold_2 event_tracking_forward RoI mean put_num_on_element check_same_pos_neg check_with_threshold find_and_draw_contour_line_new round range append len element_region abs check_with_threshold_2 event_tracking_backward RoI mean put_num_on_element check_same_pos_neg check_with_threshold find_and_draw_contour_line_new round keys range append len check_same_pos_neg time element_region abs check_with_threshold_2 event_tracking_forward event_tracking_backward RoI mean put_num_on_element append check_with_threshold find_and_draw_contour_line_new round keys range len time format imwrite backward print pre_calulate_flux_and_contour read_data_files append forward range two_way concatenate Input Model load_weights conv2_block adjust_data dict flow_from_directory zip ImageDataGenerator adjust_data dict flow_from_directory zip ImageDataGenerator join reshape shape resize imread listdir join format imwrite round enumerate data listdir format imwrite print shape flipud nan fill empty range open data format imwrite close shape flipud empty nan fill imread listdir range open show subplots set_title set_xlabel imshow set_ylabel savefig imread show set_label subplots set_title axes set_xlabel subplots_adjust colorbar imshow set_ylabel savefig Normalize append tick_params imread listdir train_generator solarUnet fit_generator ModelCheckpoint compile test_generator print predict_generator save_result solarUnet data subplots tick_params abs values open show sorted list ones savefig legend imread readsav create_elements_flux_dict flipud label size_filter xlabel text hist show sorted readsav xlabel yticks ylabel hist savefig figure legend xticks read_feature_lifetime_list_file range len statistics_analysis_area_flux statistics_analysis_lifetime
# Identifying and Tracking Solar Magnetic Flux Elements with Deep Learning Haodi Jiang, Jiasheng Wang, Chang Liu, Ju Jing, Hao Liu, Jason T. L. Wang and Haimin Wang Institute for Space Weather Sciences, New Jersey Institute of Technology ## Abstract Deep learning has drawn significant interest in recent years due to its effectiveness in processing big and complex observational data gathered from diverse instruments. Here we propose a new deep learning method, called SolarUnet, to identify and track solar magnetic flux elements or features in observed vector magnetograms based on the Southwest Automatic Magnetic Identification Suite (SWAMIS). Our method consists of a data pre-processing component that prepares
415
HaohanWang/HFC
['adversarial attack']
['High Frequency Component Helps Explain the Generalization of Convolutional Neural Networks']
utility/attackHelper.py scripts/__init__.py utility/__init__.py scripts/resnet.py utility/frequencyHelper.py utility/dataLoader.py utility/pgd_attack.py predictAdvDataModelComparisions conv_bn_relu_layer create_variables oneHotRepresentation bias_variable DataSubset getSaveName AugmentedCIFAR10Data AugmentedDataSubset fft max_pool_2x2 output_layer conv2d batch_normalization_layer fftshift ResNet CIFAR10Data trainMadry main activation_summary train bn_relu_conv_layer attackModel residual_block weight_variable loadDataCifar10 oneHotRepresentation loadDataCifar10AdvFast LinfPGDAttack predictAdvDataModelComparisions conv_bn_relu_layer create_variables oneHotRepresentation bias_variable DataSubset getSaveName AugmentedCIFAR10Data AugmentedDataSubset fft max_pool_2x2 output_layer conv2d batch_normalization_layer fftshift ResNet CIFAR10Data trainMadry main activation_summary train bn_relu_conv_layer attackModel residual_block weight_variable loadDataCifar10 loadDataCifar10AdvFast LinfPGDAttack append zeros range truncated_normal_initializer constant_initializer name zero_fraction histogram scalar l2_regularizer get_variable create_variables matmul batch_normalization_layer conv2d create_variables relu batch_normalization_layer conv2d create_variables relu pad avg_pool load str batch_size minimize ResNet CIFAR10Data float32 placeholder get_collection TRAINABLE_VARIABLES AdamOptimizer range shuffle append frequency max loss load str epsilon batch_size minimize ResNet CIFAR10Data float32 placeholder get_collection TRAINABLE_VARIABLES AdamOptimizer LinfPGDAttack append loss loadDataCifar10 ResNet float32 placeholder getSaveName print loadDataCifar10AdvFast ResNet float32 placeholder loadDataCifar10 save distance3 zeros range print dumps trainMadry vars train load astype load
# High Frequency Component Helps Explain the Generalization of Convolutional Neural Networks **[H. Wang, X. Wu, Z. Huang, and E. P. Xing. "High frequency component helps explain the generalization of convolutional neural networks." CVPR 2020 (Oral).](https://arxiv.org/abs/1905.13545)** **[\[Slides\]](https://drive.google.com/file/d/1fetghgWA91seJHvBmeVQI5AaB-yyfbwN/view?usp=sharing)** and **[\[Poster\]](https://github.com/HaohanWang/HFC/blob/master/poster.png)** ## Highlights |Fig. 1: The central hypothesis of our paper: within a data collection, there are correlations between the highfrequency components and the “semantic” component of the images. As a result, the model will perceive both high-frequency components as well as the “semantic” ones, leading to generalization behaviors counterintuitive to human (e.g., adversarial examples).|<img src="main.png" alt="main hypothesis of the paper" width="1600" height="whatever"> |:--|---| <img src="intro.gif" alt="HFC helps explain CNN generaliation" width="1000" height="whatever"> **Fig. 2: Eight testing samples selected from CIFAR10 that help explain that CNN can capture the high-frequency image: the model (ResNet18) correctly predicts the original image (1st column in each panel) and the high frequency reconstructed image (3rd column in each panel), but incorrectly predict the low-frequency reconstructed image (2nd column in each panel). The prediction confidences are also shown. Details are in the paper.** <details>
416
HaojiHu/TIFUKNN
['session based recommendations']
['Modeling Personalized Item Frequency Information for Next-basket Recommendation']
RNN_vector_addition_experiment/RNN_addition_new_batch.py TIFUKNN.py add_history temporal_decay_sum_history predict_with_elements_in_input most_frequent_elements get_precision_recall_Fscore partition_the_data weighted_aggragate_outputs get_NDCG1 get_F_score softmax merge_history generate_dictionary_BA KNN main get_DCG KNN_history_record2 group_history_list temporal_decay_add_history merge_history_and_neighbors_future evaluate KNN_history_record1 read_claim2vector_embedding_file_no_vector get_HT partition_the_data_validate embedding_layer neural_adding asMinutes timeSince trainIters one_hot_vec neural_adding_linear train custom_MultiLabelLoss_torch representation_decoder zeros zeros power range len append kneighbors keys fit softmax append zeros range len list print len append zeros kneighbors keys range fit list print len append zeros kneighbors keys range fit int print ceil floor append zeros round range len str print power group_history_list append zeros range len append print len append str print len append zeros range append range str print field_size_limit maxsize keys print zeros append generate_dictionary_BA range len range len str print mean append range len int sum range exp max zeros range len zeros range len temporal_decay_sum_history predict_with_elements_in_input get_NDCG1 mean get_precision_recall_Fscore merge_history append zeros KNN range len int list str evaluate print read_claim2vector_embedding_file_no_vector float flush partition_the_data_validate one_hot Variable to cuda float long criterion backward cumsum initHidden transpose zero_grad shape unsqueeze encoder float step cuda range floor time int time str permutation Adadelta print Adam RMSprop SGD mean parameters append train custom_MultiLabelLoss_torch std range flush len
# TIFUKNN This is our implementation for the paper: Haoji Hu, Xiangnan He, Jinyang Gao, Zhi-Li Zhang (2020). Modeling Personalized Item Frequency Information for Next-basket Recommendation.[Paper in ACM DL](https://dl.acm.org/doi/pdf/10.1145/3397271.3401066) or [Paper in arXiv](https://arxiv.org/pdf/2006.00556.pdf). In the 43th International ACM SIGIR Conference on Research and Development in Information Retrieval. **Please cite our paper if you use our codes and datasets. Thanks!** ``` @inproceedings{hu2020modeling, title={Modeling personalized item frequency information for next-basket recommendation}, author={Hu, Haoji and He, Xiangnan and Gao, Jinyang and Zhang, Zhi-Li}, booktitle={Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval}, pages={1071--1080},
417
HaoranREN/EnvNet_v1_v2_TensorFlow_Keras
['data augmentation']
['Learning from Between-class Examples for Deep Sound Recognition']
EnvNet_v2.py EnvNet_v1.py EnvNet_v1_data_utils.py EnvNet_v2_data_utils.py lr_schedule build_model train_augment random_window split_dataset label_categorical sliding_windows_for_testing val_augment Val_Sequence Train_Sequence lr_schedule build_model train_augment random_window split_dataset label_categorical sliding_windows_for_testing val_augment Val_Sequence Train_Sequence Reshape Sequential MaxPooling1D add Dense MaxPooling2D Conv2D Conv1D InputLayer Activation BatchNormalization Flatten Dropout print zeros range append split shuffle label_categorical arange min choice max len reshape reshape reshape min delete shape max
# EnvNet_v1_v2_TensorFlow_Keras An implementation of [EnvNet_v1](https://ieeexplore.ieee.org/document/7952651) and [EnvNet_v2](https://arxiv.org/abs/1711.10282) in Python with TensorFlow Keras. Train an example with [ESC-50](https://github.com/karolpiczak/ESC-50) dataset. ## Requirements - Numpy - Scipy - librosa (0.7+) - TensorFlow (1.14+)
418
Happy-Virus-IkBeom/LTH_Tensorflow
['network pruning']
['The Lottery Ticket Hypothesis: Finding Sparse, Trainable Neural Networks']
foundations/union.py mnist_fc/runners/train.py foundations/trainer.py mnist_fc/constants.py datasets/dataset_mnist.py mnist_fc/reinitialize.py foundations/pruning.py mnist_fc/locations.py mnist_fc/argfiles/lottery_experiment_argfile.py foundations/paths.py mnist_fc/train.py foundations/model_base.py mnist_fc/lottery_experiment.py mnist_fc/argfiles/reinitialize_argfile.py mnist_fc/runners/reinitialize.py foundations/model_fc.py mnist_fc/download_data.py mnist_fc/runners/lottery_experiment.py foundations/dataset_base.py setup.py foundations/save_restore.py foundations/pruning_test.py argfile_runner.py foundations/experiment.py main run DatasetMnist DatasetSplit DatasetBase experiment ModelBase ModelFc initial masks final trial summaries log run prune_by_percent PruningTest standardize save_network write_log restore_network read_log train union intersect graph trial initialization run main download train train train main main main main main call check_output split Fire items list ones prune_masks shape train_once range items list prune_by_percent_once items list Exists DeleteRecursively MakeDirs ListDirectory string_types isinstance array log run initial GFile get_train_handle final get global_variables_initializer close FileWriter save_network get_current_weights minimize get_test_handle masks get_validate_handle training_loop summaries loss iteritems iteritems save_network load_data ModelFc partial experiment prune_by_percent PRUNE_PERCENTS HYPERPARAMETERS items list maybe_restore where choice shape standardize DatasetMnist placeholders union Session OPTIMIZER_FN print format range initial experiment masks run train
# The Lottery Ticket Hypothesis ## Authors This codebase was developed by Jonathan Frankle and David Bieber at Google during the summer of 2018. ## Background This library reimplements and extends the work of Frankle and Carbin in "The Lottery Ticket Hypothesis: Finding Small, Trainable Neural Networks" (https://arxiv.org/abs/1803.03635). Their paper aims to explore why we find large, overparameterized networks easier to train than the smaller networks we can find by pruning or distilling. Their answer is the lottery ticket
419
HareeshBahuleyan/probabilistic_nlg
['text generation']
['Stochastic Wasserstein Autoencoder for Probabilistic Sentence Generation']
dialog/pathsetup.py dialog/ved/ved.py snli/w2v_generator.py snli/wae-det/predict.py snli/wae-stochastic/predict.py dialog/wed-det/model_config.py snli/vae/pathsetup.py snli/wae-stochastic/gl.py dialog/wed-det/pathsetup.py dialog/wed-stochastic/model_config.py evaluate_latent_space.py snli/wae-det/det_wae.py snli/wae-stochastic/model_config.py snli/wae-stochastic/pathsetup.py dialog/wed-stochastic/predict.py snli/decoder/basic_decoder.py dialog/ved/train.py snli/vae/predict.py snli/wae-det/train.py snli/wae-stochastic/stochastic_wae.py dialog/ved/gl.py snli/pathsetup.py utils.py snli/wae-det/model_config.py dialog/ved/model_config.py dialog/wed-stochastic/pathsetup.py dialog/wed-det/predict.py dialog/wed-det/train.py dialog/wed-det/det_wed.py dialog/data/DailyDial/original/parser.py dialog/w2v_generator.py dialog/wed-stochastic/train.py snli/vae/vae.py dialog/wed-stochastic/gl.py dialog/decoder/basic_decoder.py snli/wae-det/pathsetup.py dialog/wed-det/gl.py snli/wae-det/gl.py snli/vae/train.py snli/vae/model_config.py snli/wae-stochastic/train.py dialog/wed-stochastic/stochastic_wed.py snli/vae/gl.py dialog/ved/predict.py dialog/ved/pathsetup.py get_avg_sent_lengths get_unigram_dist calc_discrete_kl calc_discrete_entropy get_batches calculate_bleu_scores calculate_entropy tokenize_sequence clean_sentence get_batches_xy plot_2d create_data_split get_sentences create_embedding_matrix calculate_ngram_diversity run_path_setup main create_w2v load_data main parse_data BasicDecoder BasicDecoderOutput model_argparse run_path_setup VEDModel DetWEDModel model_argparse run_path_setup model_argparse run_path_setup StochasticWEDModel run_path_setup main BasicDecoder BasicDecoderOutput model_argparse run_path_setup VAEModel DetWAEModel model_argparse run_path_setup model_argparse run_path_setup StochasticWAEModel FreqDist join word_tokenize list dict keys len list keys list keys round corpus_bleu FreqDist ngram_fd from_words N len FreqDist items list len fit_on_texts list pad_sequences texts_to_sequences dict any append keys Tokenizer enumerate load items uniform list lower strip sub range len range len list range show set_size_inches subplots TSNE grid PCA scatter array legend fit_transform append getcwd str list format reset_index print concat create_w2v load_data mkdir listdir append read_csv shuffle Word2Vec save join zip endswith print len exit write close split encode enumerate open getopt exit parse_data str config add_argument config_fingerprint write realpath dirname ArgumentParser mkdir open vars flush parse_args shuffle get_sentences Word2Vec save len
# Stochastic Wasserstein Autoencoder for Probabilistic Sentence Generation ![](https://img.shields.io/badge/python-3.6-brightgreen.svg) ![](https://img.shields.io/badge/tensorflow-1.3.0-orange.svg) This is the official codebase for the following paper, implemented in tensorflow: Hareesh Bahuleyan, Lili Mou, Hao Zhou, Olga Vechtomova. **Stochastic Wasserstein Autoencoder for Probabilistic Sentence Generation.** NAACL 2019. https://arxiv.org/pdf/1806.08462.pdf ## Overview This package contains the code for two tasks - SNLI Generation (`snli` : autoencoder models) - Dialog Generation (`dialog` : encoder-decoder models) For the above tasks, the code for the following models have been made available: 1. Variational autoencoder (`vae`) / Variational encoder-decoder (`ved`)
420
HareeshBahuleyan/tf-var-attention
['text generation']
['Natural Language Generation with Neural Variational Models']
ved_varAttn/train.py ved_detAttn/train.py ved_varAttn/ved_varAttn.py ded_detAttn/train.py ved_detAttn/model_config.py ved_varAttn/model_config.py ved_varAttn/varAttention_decoder/attention_wrapper.py utils/eval_utils.py ved_varAttn/varAttention_decoder/decoder.py ved_detAttn/detAttention_decoder/basic_decoder.py ded_detAttn/ded_detAttn.py ded_detAttn/model_config.py w2v_generator.py utils/data_utils.py ved_varAttn/varAttention_decoder/basic_decoder.py ved_detAttn/ved_detAttn.py main create_w2v load_data DetSeq2SeqDetAttnModel train_model get_batches create_data_split tokenize_sequence create_embedding_matrix calculate_bleu_scores calculate_entropy calculate_ngram_diversity train_model VarSeq2SeqDetAttnModel BasicDecoder BasicDecoderOutput train_model VarSeq2SeqVarAttnModel _bahdanau_score _maybe_mask_score _BaseMonotonicAttentionMechanism safe_cumprod _compute_attention _monotonic_probability_fn _prepare_memory _BaseAttentionMechanism BahdanauAttention AttentionWrapperState hardmax _luong_score AttentionWrapper LuongMonotonicAttention monotonic_attention LuongAttention AttentionMechanism BahdanauMonotonicAttention BasicDecoder BasicDecoderOutput Decoder dynamic_decode _create_zero_outputs list format reset_index print concat create_w2v load_data mkdir listdir append read_csv shuffle Word2Vec save format tokenize_sequence print DetSeq2SeqDetAttnModel concat create_data_split create_embedding_matrix train read_csv len fit_on_texts list pad_sequences texts_to_sequences dict any append keys Tokenizer enumerate load items uniform list print list range range len round corpus_bleu FreqDist ngram_fd from_words N len FreqDist items list len VarSeq2SeqDetAttnModel VarSeq2SeqVarAttnModel convert_to_tensor sequence_mask map_structure dtype squeeze matmul expand_dims get_variable dtype rsqrt square reduce_sum expand_dims get_variable convert_to_tensor safe_cumprod cumsum concat transpose scan clip_by_value cumprod zeros dtype sigmoid shape cast random_normal count_nonzero exp squeeze concat attention_mechanism identity matmul reduce_sum shape scalar_mul cast attention_layer expand_dims random_normal values
# Variational Attention for Sequence to Sequence Models ![](https://img.shields.io/badge/python-3.6-brightgreen.svg) ![](https://img.shields.io/badge/tensorflow-1.3.0-orange.svg) This is the official codebase for the following paper, implemented in tensorflow: Hareesh Bahuleyan*, Lili Mou*, Olga Vechtomova, and Pascal Poupart. **Variational Attention for Sequence-to-Sequence Models.** COLING 2018. https://arxiv.org/pdf/1712.08207.pdf ## Overview This package consists of 3 models, each of which have been organized into separate folders: 1. Deterministic encoder-decoder with deterministic attention (`ded_detAttn`) 2. Variational encoder-decoder with deterministic attention (`ved_detAttn`) 3. Variational encoder-decoder with variational attention (`ved_varAttn`) ## Datasets
421
HarrieO/2020ictir-evaluation
['selection bias']
['Taking the Counterfactual Online: Efficient and Unbiased Online Evaluation for Ranking']
rankergeneration.py utils/EMPBM.py OI.py utils/dataset.py PI.py utils/variance.py utils/clicks.py policies/optimizedinterleaving.py LogOpt.py models/neural.py utils/logopt.py utils/ranking.py ABtest.py CTRdistribution.py policies/plackettluce.py utils/pretrained_models.py models/linear.py TDI.py ABcounterfactual.py oraclepolicy.py calc_sub_loss optimize train_ranker calc_true_loss LinearModel NeuralModel update_index OptimizedInterleaving optimize_for_query optimize_policy gradient_based_on_samples sample_rankings sample_rankings_query compute_weights weights_from_clicks sample_clicks add_clicks generate_clicks sample_from_click_probs get_relevance_click_model inverse_rank_prob bernoilli_sample_from_probs _add_zero_to_vector DataFoldSplit DataSet get_dataset_from_json_info DataFold train_click_model optimize_logging_policy read_many_models read_model model_rank_and_invert data_split_rank_and_invert data_split_rank_and_invert_tiebreak_model data_split_model_rank_and_invert rank_and_invert model_score many_models_data_split_rank_and_invert many_models_rank_and_invert oracle_data_split_list_variance oracle_doc_variance oracle_list_variance data_split_rank_and_invert rel_prob_f feature_matrix dot label_vector mean float64 query_labels query_range astype dot shape log2 query_feat zeros sum enumerate permutation logical_not log2 abs log exp rel_prob_f less_equal calc_sub_loss calc_true_loss less sum normal fill_diagonal num_features inf label_vector validation minimum time dot logical_or query_feat zeros train diag permutation num_features optimize num_queries zeros num_queries concatenate astype query_range flatten int32 append zeros sum optimize_for_query range minimum items list concatenate ones update_index linprog stack zeros keys range x sum exp arange greater_equal cumsum min range mean uniform tile empty log NINF query_range exp arange min range tile zeros sum log NINF uniform float replace split arange num_docs num_queries relevance_click_prob cumsum reshape query_labels choice sample_from_click_probs get_relevance_click_model inverse_rank_prob zeros sum arange num_queries cumsum reshape size float64 astype maximum zeros float sum max amax minimum int arange num_queries size query_range choice zeros float sum arange num_queries float64 size astype query_range copy zeros sum max amax int num_features feature_matrix greater choice log10 log_likelihood_update full sum NeuralModel range predict oracle_list_variance all num_queries gradient_update score_query query_range range choice mean query_feat gradient_based_on_samples sample_rankings sum equal query_size zeros num_features len zeros float enumerate split uniform empty arange arange num_docs num_queries rank_and_invert zeros feature_matrix model_score feature_matrix model_score model_score empty rank_and_invert range model_score data_split_model_rank_and_invert empty num_docs range mean exp arange min log range mean at tile zeros sum bernoilli_sample_from_probs NINF num_queries sample_ranking_f query_range sum range
# Taking the Counterfactual Online: Efficient and Unbiased Online Evaluation for Ranking This repository contains the code used for the experiments in "Taking the Counterfactual Online: Efficient and Unbiased Online Evaluation for Ranking" published at ICTIR 2020 ([preprint available](https://arxiv.org/abs/2007.12719)). Citation -------- If you use this code to produce results for your scientific publication, or if you share a copy or fork, please refer to our ICTIR 2020 paper: ``` @inproceedings{oosterhuis2020taking, title={Taking the Counterfactual Online: Efficient and Unbiased Online Evaluation for Ranking}, author={Harrie Oosterhuis and Maarten de Rijke}, year={2020},
422
HassamChundrigar/Urdu-Ocr
['optical character recognition', 'scene text recognition']
['An End-to-End Trainable Neural Network for Image-based Sequence Recognition and Its Application to Scene Text Recognition']
utils.py dense_to_text
# Urdu Textline OCR It is a minimal version of my research project on Urdu OCR. This code can convert your urdu "textline" (check test.jpg) into urdu text. ## Requirements: opencv-python==4.2.0.34 tensorflow==1.15.2 install it via pip or just use $pip install -r requirements.txt also install jupyter notebook, $pip install jupyter notebook ## How to Run The Code: This is the minimal or deployment code with the abstraction of Neural Network Graph. Simply run the cells of notebook, Or change the path of the image
423
Hatmm/PED-DETR-for-Pedestrian-Detection
['pedestrian detection']
['DETR for Crowd Pedestrian Detection']
dqrf/positional_encoding.py convert_ch.py dqrf/dqrf_detr.py dqrf/utils/uni_evaluator.py dqrf/utils/dataset_mapper.py dqrf/ops/functions/ms_deform_attn.py dqrf/config.py dqrf/__init__.py dqrf/utils/ch_transform.py dqrf/ops/setup.py dqrf/backbone.py dqrf/criterion.py dqrf/utils/validation_set.py dqrf/utils/ch_evalutor.py dqrf/transformer.py dqrf/ops/src/cpu/__init__.py dqrf/utils/metric_writer.py dqrf/ops/functions/local_attn.py dqrf/matcher.py dqrf/ch_criterion.py dqrf/utils/get_crowdhuman_dicts.py dqrf/utils/box_ops.py test_local_product_cpu.py dqrf/utils/utils.py train_net.py parse_args Convert2POD TestLocalProductCPU main setup Trainer Neck build_deformable_detr_backbone Joiner Conv2d_GN MaskedBackbone SetCriterion sigmoid_focal_loss add_dataset_path add_dqrf_config SetCriterion sigmoid_focal_loss ch_detector_postprocess DQRF_DETR build_matcher IgnoreMatcher_vbox build_vanilla_matcher HungarianMatcher IgnoreMatcher build_position_encoding PositionEmbeddingSine PositionEmbeddingSine_highfreq PositionEmbeddingLearned Transformer TransformerDecoderLayer TransformerDecoder build_transformer TransformerEncoder TransformerEncoderLayer get_extensions MultiHeadAttention DenseQueryAttention SamplingEncAttention SamplingAttention_RA clones LocalWeightedAverage LocalDotProduct generalized_box_iou calIoU generalized_box_iou_ xywh2xyxy regression box_xyxy_to_cxcywh masks_to_boxes box_iof calIof xyxy2xywh box_iou box_cxcywh_to_xyxy CrowdHumanEvaluator instances_to_crowdhuman_json RandomErasing RandomSelect CenterCrop ToTensor hflip Compose RandomCrop RandomResize RandomSizeCrop Normalize RandomHorizontalFlip resize crop RandomCropCH DqrfDatasetMapper CH_DqrfDatasetMapper build_transform_gen make_transforms get_crowdhuman_dicts TrainingMetricPrinter PeriodicWriter_withInitLoss InitMetricPrinter get_scale_factor mMR_FPPI_evaluator is_dist_avail_and_initialized ImageReader MLP ImageMeta _get_clones accuracy get_cur_image_dir FileSystemPILReader NestedTensor _get_activation_fn ValidationLoss_2 TestDistributedSampler ValidationLoss build_detection_val_loader add_argument ArgumentParser get join format print strip tqdm image_root loads write_json_file output_file append imread input_file open merge_from_file add_dataset_path get_crowdhuman_dicts IMG_PATH_TRAIN ANNOT_PATH_VAL config_file add_dqrf_config get_cfg set merge_from_list default_setup IMG_PATH_VAL opts freeze register ANNOT_PATH_TRAIN setup resume_or_load build_model test WEIGHTS Trainer eval_only build_position_encoding Joiner MaskedBackbone Neck sigmoid ones_like binary_cross_entropy_with_logits CN CN pred_boxes isinstance has Instances stack scale Tensor float proposal_boxes PositionEmbeddingSine HIDDEN_DIM PositionEmbeddingSine_highfreq PositionEmbeddingLearned chunk chunk unbind unbind clamp min max box_area zeros exp shape clamp min box_area expand_as max minimum reshape maximum minimum reshape maximum max min box_iou clamp max min box_iou clamp meshgrid unsqueeze arange int pred_boxes scores zip pred_classes item append round len all reshape clamp min copy prod any append tensor as_tensor size copy as_tensor flip all tuple size get_size reshape copy any append tensor as_tensor cat str RandomFlip MIN_SIZE_TRAIN_SAMPLING info getLogger MIN_SIZE_TEST MIN_SIZE_TRAIN ResizeShortestEdge MAX_SIZE_TRAIN MAX_SIZE_TEST append MIN_SIZE_TEST Compose MIN_SIZE_TRAIN MAX_SIZE_TRAIN MAX_SIZE_TEST SIZE getLogger min max topk size t eq mul_ expand_as append sum max DistributedSampler get_world_size DataLoader MapDataset BatchSampler get_detection_dataset_dicts DatasetMapper DatasetFromList
## PED: DETR for Crowd Pedestrian Detection [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) ![](readme/fig.jpeg) Code for PED: DETR For (Crowd) Pedestrian Detection ## Paper [PED: DETR for Crowd Pedestrian Detection](https://arxiv.org/abs/2012.06785) ## Installation The codebases are built on top of [Detectron2](https://github.com/facebookresearch/detectron2), [DETR](https://github.com/facebookresearch/detr), [Deformable DETR](https://github.com/fundamentalvision/Deformable-DETR) and [Fast-Transformer](https://github.com/idiap/fast-transformers) ## License PED is released under MIT License.
424
HazyResearch/anchor-stability
['word embeddings']
['Understanding the Downstream Instability of Word Embeddings']
scripts/analysis/utils.py scripts/embedding_generation/compression_experiment_wiki.py anchor/embedding.py anchor/utils.py notebooks/plot_utils.py scripts/analysis/selection_criterion.py scripts/model_training/train_downstream.py scripts/analysis/dim_stability_analysis.py scripts/model_training/gen_model_cmds.py setup.py tests/utils.py tests/test_embedding.py scripts/analysis/fit_trend.py scripts/analysis/get_correlation.py scripts/analysis/create_anchors.py scripts/embedding_generation/gen_compressed.py scripts/analysis/gather_results.py scripts/analysis/diff_to_oracle.py Embedding create_logger load_vocab load_vocab_list LogFormatter plt_correlations plt_single plt_csv main compute_diff_to_oracle parse_args main ner_stability run_seed get_final_dist_tag get_dist_tag main parse_args solve_lstsq_combine_prec solve_lstsq_combine main parse_args solve_lstsq_combine_dim main parse_args read gather_all_tasks main parse_args get_corr compute_sel_results parse_args main get_selection_error check_sent_complete run_task check_ner_complete main parse_args main parse_args main parse_args evaluate_sentiment predict_ner main parse_args evaluate_ner EmbeddingTest clean_files append split open int split open setFormatter getLogger addHandler LogFormatter StreamHandler DEBUG setLevel INFO FileHandler yscale minorticks_off errorbar ScalarFormatter xlabel xscale ylabel title set_major_formatter legend xticks enumerate yscale subplot minorticks_off errorbar xlabel xscale ylabel subplots_adjust title figure legend enumerate minorticks_off errorbar ScalarFormatter xlabel xscale ylabel set_major_formatter xticks read_csv print Embedding add_argument get_subembeds_same_vocab ArgumentParser parse_args add_argument ArgumentParser append sorted set compute_diff_to_oracle csv_file read_csv float readlines check_ner_complete zip validation exp no_norm random dist seed_test symmetric compressed wiki no_align lr dim compressed pip_loss sem_disp ner_stability same_norm open eis fro_norm Embedding sum seed_test knn float load validation print eigen_overlap array len compressed reset_index compress_type bitrate DataFrame get_final_dist_tag get_dist_tag out append Pool dim to_pickle enumerate makedirs ones log2 zeros enumerate values len ones log2 zeros enumerate values len ones log2 zeros enumerate values len prec solve_lstsq_combine_prec solve_lstsq_combine csv_files solve_lstsq_combine_dim read_pickle join read concat merge append split gather_all_tasks to_csv ds_metrics emb_metrics get_corr reset_index abs print argmin index argmax range len compute_sel_results float open check_output algo range align base_emb_path emb_path shape dirname save_embeddings compress_uniform resultdir finetune dataset seed_test split homedir predict train_ner eval_ner train_sentiment seed evaluate_sentiment predict_ner glob evaluate_ner manual_seed is_available check_output
# Understanding the Downstream Instability of Word Embeddings Code for "Understanding the Downstream Instability of Word Embeddings" in MLSys 2020. ## Install We recommend using a virtualenv or a conda environment. virtualenv: ``` virtualenv -p python3.6 anchor_venv source anchor_venv/bin/activate ``` conda:
425
HazyResearch/model-patching
['skin cancer classification', 'data augmentation']
['Model Patching: Closing the Subgroup Performance Gap with Data Augmentation']
augmentation/dataflows/utils.py augmentation/datasets/custom/celeba_128.py augmentation/datasets/utils.py augmentation/utilities/losses.py augmentation/models/resnet.py augmentation/methods/robust/train.py augmentation/autoaugment/policies.py augmentation/utilities/wandb.py augmentation/methods/cyclegan/models.py augmentation/utilities/config.py augmentation/utilities/metrics.py augmentation/utilities/labelers.py augmentation/datasets/custom/waterbirds.py augmentation/utilities/optim.py augmentation/datasets/custom/mnist_correlation.py augmentation/utilities/checkpoint.py augmentation/methods/cyclegan/train.py augmentation/utilities/utils.py augmentation/autoaugment/augmentation_transforms.py augmentation/augment/static.py augmentation/datasets/custom/tfrecords.py augmentation/datasets/custom/mnist.py augmentation/utilities/visualize.py augmentation/methods/robust/utils.py augmentation/methods/cyclegan/utils.py augmentation/models/models.py augmentation/utilities/eval.py augmentation/augment/utils.py create_multiple_train_eval_static_augmentation_pipelines PretrainedMNISTCycleGANStaticAugmentationPipeline compose_static_augmentations split_batch_size PretrainedCycleGANStaticAugmentationTFRecordPipeline StaticAugmentation ConcatenateStaticAugmentation PretrainedCycleGANStaticAugmentationPipeline PretrainedExternalGANStaticAugmentationTFRecordPipeline create_static_augmentation_pipeline create_static_augmentation_pipelines PretrainedDefaultCycleGANStaticAugmentationPipeline BinaryMNISTWandbModelPseudolabelPartition create_multiple_static_augmentation_pipelines PretrainedMNISTCycleGANAugmentationPipeline create_augmentation_pipelines OnlyImageNetPreprocessingPipeline ResizeImage ImageNetPreprocessingPipeline PretrainedGenerativeModelAugmentationPipeline AutoAugmentPipeline unshuffle_data GenerativeAugmentationPipeline CIFAR10PreprocessingPipeline create_augmentation_pipeline WandbModelPseudoLabelingPipeline RandomPolicyImageAugmentationPipeline ImgAugAugmentationPipeline BasicImagePreprocessingPipeline TandaPipeline NoAugmentationPipeline compose_augmentations create_multiple_train_eval_augmentation_pipelines AutoAugmentCIFAR10Pipeline AugmentationPipeline create_multiple_augmentation_pipelines BinaryMNISTWandbModelPseudoLabelingPipeline PretrainedCycleGANBatchBalancingAugmentationPipeline HeuristicImageAugmentationPipeline shuffle_and_split_data PretrainedCycleGANAugmentationPipeline cutout_numpy _translate_y_impl random_flip create_cutout_mask pil_batch_wrap _cutout_pil_impl TransformFunction _rotate_impl _shear_x_impl float_parameter _posterize_impl zero_pad_and_crop apply_policy _enhancer_impl _translate_x_impl _solarize_impl pil_wrap pil_batch_unwrap _crop_impl TransformT _shear_y_impl int_parameter pil_unwrap good_policies create_paired_direct_dataflow benchmark create_parallel_dataflow_via_numpy build_basic_data_pipeline dataflow_len create_paired_parallel_dataflow_via_numpy create_direct_dataflow fetch_list_of_datasets fetch_list_of_data_generators_for_trainer dataset_len create_data_generator load_custom_dataset generate_dataset_split decode_raw_image get_processed_dataset_info create_multiple_data_generators load_dataset fetch_list_of_train_datasets fetch_list_of_eval_datasets show_dataset_info get_dataset_aliases apply_modifier_to_dataset_payload load_dataset_using_tfds apply_modifier_command_to_dataset apply_modifier_to_dataset fetch_datasets_for_trainer get_dataset_from_list_files_dataset get_celeba_dataset_len read_celeba_tfrecord get_label_selection_function load_celeba_128 load_mnist_spurious_variants load_mnist_combined load_mnist_spurious _bytestring_feature _int_feature _float_feature image_label_to_tfrecord read_image_label_tfrecord load_base_variant get_label_selection_function get_waterbirds_dataset_len load_waterbirds read_waterbirds_tfrecord unet_generator mnist_discriminator mnist_unet_generator train_step_generator train_step_discriminator _train_cyclegan train_cyclegan wgan_loss discriminator_loss generator_loss build_mnist_cyclegan_models build_optimizers ReplayBuffer get_models_from_input_shape generate_and_log_one_image_batch build_cyclegan_models cycle_loss create_cyclegan_data_generator identity_loss build_gan_loss_fn build_models gradient_penalty get_loss_info _train_robust_model train_step_robust setup_and_train_robust_model train_robust_model GDROLoss irm_loss_rescale reload_run irm_penalty_gradient irm_penalty_scheduler consistency_penalty irm_penalty_explicit rewrite_config_for_resumption consistency_penalty_scheduler log_robust_train_step_to_wandb simple_cnn_model simple_model freeze_all_layers_except_last_linear_layer reinitialize_last_linear_layer create_keras_classification_model resnet_v2 get_resnet_model resnet_v1 resnet_layer compile_keras_models load_tf_optimizer_state save_tf_optimizer_state recursively_create_config_simple_namespace create_config_simple_namespace subtract_simple_namespaces load_yaml_config preprocess_yaml_config pretty_print_simple_namespace update_simple_namespace evaluate_model configure_pseudolabeler apply_pseudolabeler decay_weights create_loss_fn test_irm test_auc ConfusionMatrix test_mauc SparseCategoricalCrossentropy MultiLabelAUC test_recall BinaryCrossentropy MultiLabelRecall AUC create_metrics reset_metrics IRMPenalty log_metric_to_wandb Recall test_bce update_metrics MultiLabelBinaryAccuracy log_metrics_to_wandb Accuracy test_mlba LinearDecay build_optimizer build_lr_scheduler set_global_seeds set_gpu_growth create_logical_gpus basic_setup checkpoint gallery load_pretrained_keras_model_from_wandb fetch_all_wandb_run_ids load_pretrained_keras_classification_model get_most_recent_model_file load_most_recent_keras_model_weights particular_checkpoint_step_extractor load_wandb_run augmentation list print zip range len create_static_augmentation_pipeline len len f create_augmentation_pipeline len len permutation len zeros randint ones randint zeros create_cutout_mask append pil_wrap reshape size where append pil_unwrap int_parameter int_parameter float_parameter float_parameter int_parameter int_parameter crop resize int_parameter load create_cutout_mask range int_parameter time print perf_counter sleep range print DataFromGenerator reset_state MapDataComponent MapData start BatchData RepeatedData DataFromGenerator RepeatedData print reset_state shuffle SelectComponent MapData start JoinData AUTOTUNE prefetch unbatch BatchData batch list MultiProcessMapData DataFromList SelectComponent MapDataComponent start BatchData zip append numpy array RepeatedData list MultiProcessMapData DataFromList SelectComponent start JoinData BatchData zip append numpy array RepeatedData append map AUTOTUNE prefetch print batch TFRecordDataset cpu_count map batch int test_dataset concatenate shuffle val_dataset filter repeat take apply_modifier_to_dataset split train_dataset test_dataset apply_modifier_command_to_dataset val_dataset int num_classes float num_examples num_domains shape classes ceil domains download_and_prepare builder show as_dataset show_examples info int as_dataset show_dataset_info any load_dataset_using_tfds generate_dataset_split dataset_info get_processed_dataset_info load_dataset print fetch_datasets_for_trainer append zip fetch_list_of_datasets sum zip fetch_list_of_datasets zip print compose_static_augmentations min fetch_list_of_eval_datasets create_multiple_data_generators fetch_list_of_train_datasets len dataset_len cache shuffle strftime repeat create_parallel_dataflow_via_numpy AUTOTUNE prefetch unbatch create_direct_dataflow print create_data_generator zip append enumerate startswith update map_fn parse_example parse_single_example decode_raw_image int SimpleNamespace concatenate list_files get_label_selection_function map filter take unbatch get_celeba_dataset_len test_dataset concatenate train_dataset filter load_dataset int num_classes concatenate num_examples shape filter load_dataset SimpleNamespace test_dataset num_classes concatenate num_examples train_dataset val_dataset shape load_dataset SimpleNamespace parse_single_example decode_raw_image map_fn parse_example parse_single_example decode_raw_image map_fn parse_example cache list_files get_label_selection_function filter unbatch load_base_variant SimpleNamespace get_waterbirds_dataset_len int Conv2DTranspose random_normal_initializer up concat last reversed down log2 zip ceil Input append Concatenate concatenate Input random_normal_initializer Conv2DTranspose random_normal_initializer up concat last reversed down zip append Input Concatenate test_dataset test_daug_pipeline batch_size output_init source_dataset_version prev_wandb_run_id validation_frac create_augmentation_pipelines val_daug_pipeline_args load_wandb_run _train_cyclegan gan_loss dataset_info norm_type name get_processed_dataset_info load_dataset source_dataset_modifier build_gan_loss_fn load_most_recent_keras_model_weights load_tf_optimizer_state prev_wandb_project target_dataset_version val_daug_pipeline cache_dir create_cyclegan_data_generator apply_modifier_to_dataset_payload resume datadir init compile_keras_models target_dataset_modifier build_models input_shape build_optimizers test_daug_pipeline_args target_dataset train_dataset prev_wandb_entity get_most_recent_model_file history source_dataset train_daug_pipeline summary dataflow residual_outputs basic_setup train_daug_pipeline_args makedirs train_step_generator generate_and_log_one_image_batch save_tf_optimizer_state range save_weights save train_step_discriminator log constant _gradient_penalty loss_fn ones_like zeros_like unet_generator discriminator mnist_discriminator mnist_unet_generator get_models_from_input_shape build_lr_scheduler Adam strftime generator_g generator_f log fetch_list_of_data_generators_for_trainer reload_run id dtype build_lr_scheduler build_optimizer gdro_adj_coef augmentation_training create_metrics range loss_name metric_names cache_dir glob baseline_batch_size gdro_lr momentum resume init compile_keras_models create_loss_fn set_floatx optimizer join remove get_loss_info gdro_mixed print basic_setup makedirs GDROLoss list print zip values len convert_to_tensor list partial save_tf_optimizer_state irm_penalty_scheduler tuple consistency_penalty_scheduler map log_metrics_to_wandb log numpy save zip log_robust_train_step_to_wandb update_metrics range reset_metrics enumerate template_config config recursively_create_config_simple_namespace train_robust_model dump __dict__ _config_path id run wandb_entity wandb_project open convert_to_tensor load name print get_most_recent_model_file history load_tf_optimizer_state load_most_recent_keras_model_weights particular_checkpoint_step_extractor load_wandb_run reshape _aliases log zip sparse_categorical_crossentropy reduce_logsumexp reduce_sum softmax convert_to_tensor watch gradient Model Input Sequential add Dense MaxPooling2D Conv2D Activation Flatten Dropout get print Architecture simple_cnn_model output sigmoid Model softmax startswith summary Dense layers reversed isinstance layers isinstance reversed Dense set_weights Conv2D conv int add Model Input range resnet_layer int add Model Input range resnet_layer resnet_v2 resnet_v1 _make_train_function compile zip open open preprocess_yaml_config SimpleNamespace prefix items keys update SimpleNamespace __dict__ parent_template load_yaml_config _recurse load_yaml_config __dict__ update_simple_namespace print items update_metrics reset_metrics model print list configure_pseudolabeler split_datasets_by_pseudolabels SparseCategoricalCrossentropy MultiLabelAUC IRMPenalty ConfusionMatrix Recall Accuracy BinaryCrossentropy MultiLabelRecall append AUC MultiLabelBinaryAccuracy log log_wandb argmax update_state convert_to_tensor update_state print result reset_states argmax AUC convert_to_tensor update_state print Recall result reset_states argmax convert_to_tensor update_state print result reset_states MultiLabelBinaryAccuracy range count convert_to_tensor update_state print result BinaryCrossentropy convert_to_tensor IRMPenalty update_state print result convert_to_tensor MultiLabelAUC update_state print result reset_states range LinearDecay PiecewiseConstantDecay PolynomialDecay CosineDecay seed set_seed set_global_seeds name set_printoptions device create_logical_gpus list_logical_devices print set_virtual_device_configuration list_physical_devices len list_logical_devices print set_memory_growth list_physical_devices len save int reshape shape sqrt floor runs Api Api run print restore name name print get_most_recent_model_file load_weights sleep load_most_recent_keras_model_weights load_wandb_run load_most_recent_keras_model_weights create_keras_classification_model load_wandb_run split
# Model Patching: Closing the Subgroup Performance Gap with Data Augmentation ![Model patching pipeline](assets/model_patching.jpg "Model patching pipeline") > **Model Patching: Closing the Subgroup Performance Gap with Data Augmentation**\ > Karan Goel*, Albert Gu*, Yixuan Li, Christopher Ré\ > Stanford University\ > Paper: https://arxiv.org/pdf/2008.06775.pdf \ > Blog: http://hazyresearch.stanford.edu/data-aug-part-4 > **Abstract.** Classifiers in machine learning are often brittle when deployed. Particularly concerning are models with inconsistent performance on specific _subgroups_ of a class, e.g., exhibiting disparities in skin cancer classification in the presence or absence of a spurious bandage.
426
HazyResearch/tanda
['text augmentation', 'relation extraction', 'data augmentation', 'image augmentation']
['Learning to Compose Domain-Specific Transformations for Data Augmentation']
tanda/discriminator/discriminator.py experiments/mnist/train.py experiments/synthetic/train.py keras/utils.py tanda/discriminator/dcnn.py experiments/print_tan_stats.py tanda/discriminator/resnet_cifar.py experiments/train_scripts.py experiments/tfs/image/image_tfs.py experiments/launch_run.py experiments/cifar10/train.py experiments/synthetic/utils.py experiments/tfs/image/__init__.py keras/keras_cifar10_example.py experiments/utils.py tanda/tan.py keras/tanda_keras.py tanda/transformer.py tanda/discriminator/__init__.py tanda/generator/__init__.py tanda/generator/rnn_cell_util.py experiments/mnist/dataset.py tanda/discriminator/simple.py experiments/launch_end_models.py tanda/generator/generator.py experiments/cifar10/dataset.py assemble_tan select_fold train_end_model train_tan train transform_batch save_images parse_config_str balanced_subsample get_ngrams parse_str line_writer get_class_idxs create_subdirs create_run_log get_log_dir_path ngram_ratio average_all_pairs_jaccard_distance save_run_log num_procs_open jaccard_multiset_distance get_git_revision_short_hash ImagePlotter create_config_str ConstantPlotter load_cifar10_batch load_cifar10_data to_one_hot DataSet load_mnist dense_to_one_hot extract_images extract_labels _read32 read_data_sets TF_displace TF_displace_stuck TF_displace_decay generate_data plot_synthetic OracleDiscriminator save_data_plot TF_enhance_contrast TF_flip TF_horizontal_flip TF_shear TF_crop_pad_flip TF_jitter TF_enhance_brightness TF_erosion TF_blur TF_shift_hue TF_translate TF_zoom TF_crop_pad pil_to_np np_to_pil TF_elastic_deform TF_adjust_gamma TF_power TF_rotate TF_noise TF_enhance_color TF_swirl TF_enhance_sharpness TF_dilation TANDAImageDataGenerator load_pretrained_tan PretrainedTAN per_image_std_map TAN TFQ get_mse_loss Transformer PadCropTransformer ImageTransformer lrelu DCNN batch_norm_op linear conv2d Discriminator ResNet SimpleDiscriminator nnet GRUGenerator LSTMGenerator RNNCellGenerator MeanFieldGenerator GeneratorCellBuilder OutputRangeWrapper mean_field_cell GeneratorRNNCellBuilder assemble_tan tan_checkpoint_path train_end_model print train_tan is_test seed join format values print subsample_seed n_per_class shuffle extend map ravel run_fold n_folds range get_class_idxs enumerate len gen_class seq_len t_class n_actions d_class trainable_variables permutation batch_size is_test seed subsample_seed prod create_subdirs create_run_log format assemble_tan print reshape float32 save_action_seqs ImagePlotter run_index ConstantPlotter analyze_vars random_transform end_batch_size min shuffle copy seq_len transform_basic range get_transformed_data trainable_variables balanced_subsample end_batch_size is_test d_class seed end_lr subsample_seed prod create_subdirs create_run_log format build_supervised n_per_class print reshape analyze_vars float32 ImagePlotter ConstantPlotter run_index join makedirs str update defaultdict argv get_git_revision_short_hash __flags flags int join format concatenate reshape squeeze sqrt floor split array imsave len join strftime append argmax defaultdict enumerate shuffle iteritems get_class_idxs iteritems join format write append flush update zip set get_ngrams min len zeros range join partial concatenate vstack append range load_batch newbyteorder print name zeros arange print name DataSet fake maybe_download append vstack norm random show arange plot cos close pi ylim scatter title savefig sin xlim get_transformed_data_and_predictions plot_synthetic vstack append range squeeze img_as_ubyte uint8 asarray TF_horizontal_flip shape pad shape rgb2hsv shape np_to_pil Contrast shape Brightness np_to_pil shape np_to_pil Color shape np_to_pil Sharpness shape shape rescale shape floor int AffineTransform shape warp shape arange random shape meshgrid gaussian_filter sign absolute randn n_actions g_class PadCropTransformer per_image_standardization reshape map_fn TAN restore DCNN dense
# Learning to Compose Domain-Specific Transformations for Data Augmentation ### *Or: Transformation Adversarial Networks for Data Augmentations (TANDA)* Paper (NeurIPS 2017): [Learning to Compose Domain-Specific Transformations for Data Augmentation](https://arxiv.org/abs/1709.01643) Corresponding authors: [Alex Ratner](https://ajratner.github.io) ([email protected]), [Henry Ehrenberg](https://github.com/henryre) ([email protected]) TANDA [blog post](https://hazyresearch.github.io/snorkel/blog/tanda.html) *For more on using Transformation Functions (TFs) for data augmentation, see the [Snorkel project](http://snorkel.org) ## NEW: an easy-to-use Keras interface Just in time for NeurIPS 2017, we're releasing an **easy-to-use substitute for Keras' [`ImageDataGenerator`](https://keras.io/preprocessing/image/) data augmentation class. Just swap in [`TANDAImageDataGenerator`](keras/tanda_keras.py) and you'll
427
He-jerry/Residual-SE-Network
['rain removal']
['Residual Squeeze-and-Excitation Network for Fast Image Deraining']
pytorch_ssim/__init__.py train.py network.py test.py model/BASNet.py dataset.py BASNet.py model/resnet_model.py resnet_model.py pytorch_iou/__init__.py model/__init__.py loss.py BASNet RefUnet ImageDataset _logssim create_window gaussian _ssim MultipleLoss Vgg19 SSIM MeanShift VGGLoss compute_gradient GradientLoss ssim LOGSSIM totalnet seblock residualse ressenet conv3x3 BasicBlock Bottleneck BasicBlockDe normPRED save_img tensor2im save_output bce_ssim_loss muti_bce_loss_fusion BASNet RefUnet conv3x3 BasicBlock Bottleneck BasicBlockDe _iou IOU _logssim create_window gaussian _ssim SSIM ssim LOGSSIM Tensor Variable contiguous unsqueeze pow conv2d pow conv2d min max create_window size type_as get_device cuda is_cuda data isinstance transpose tile Tensor numpy range len fromarray tensor2im save resize min max squeeze convert len save resize imread numpy array range split bce_loss ssim_loss iou_loss print bce_ssim_loss sum range
# Residual-SE-Network Residual Squeeze-and-Excitation Network for Fast Image Deraining Jun Fu, Jianfeng Xu, Kazuyuki Tasaka, Zhibo Chen https://arxiv.org/abs/2006.00757 Modified and add BASNet to suit reflection removal. Requirements:(All network reimplements are same of similar) * 1.Pytorch 1.3.0 * 2.Torchvision 0.2.0 * 3.Python 3.6.10 * 4.glob
428
HeapHop30/transfer-learning-image-art
['style transfer']
['A Neural Algorithm of Artistic Style']
data_processing/transformations.py main.py Project.py logger.py Project
# Transfer Learning Image Art **IN PROGRESS** Personal project inspired by: https://arxiv.org/abs/1508.06576
429
HeejongBong/ldfa
['time series']
['Latent Dynamic Factor Analysis of High-Dimensional Neural Recordings']
setup.py ldfa/inference.py ldfa/estimate.py ldfa/__init__.py get_numpy_status setup_ldfa _generate_lambda_glasso AIC BIC _switch_back _temporal_est _make_PD imshow log_like _dof cross_validate fit g sdot h score_fn STAR_seq_step fdp_hat sinv s __version__ get_info format setup Extension get_numpy_status full transpose eig block_diag min eigvals eigh min max T arange pinv eye zeros diag arange _switch_back _make_PD where sqrtm abs max tensordot svd all transpose append sum inf concatenate block copy sqrt stack diagonal zip enumerate time _generate_lambda_glasso print reshape inv _temporal_est zeros array glasso len reshape transpose len matmul stack zip zeros tensordot enumerate int print fit choice log_like zeros range len plot astype maximum abs max g where mean shape sinv full range gaussian_filter max inf isinstance argmin score_fn copy where shape any array fdp_hat unravel_index zeros sum full
# LDFA-H: Latent Dynamic Factor Analysis of High-dimensional time series This repo consists of 1. Python package `ldfa` which implements LDFA-H [[1](#BLRSVK20)] 2. Experimental data analyzed in [[1](#BLRSVK20)] 3. Reproducible IPython notebooks for simulation and experimental data analysis in [[1](#BLRSVK20)] ## Install ### Prerequisite Package `ldfa` requires: 1. `Python` >= 3.5 2. `numpy` >= 1.8
430
Heidelberg-NLP/MFscore
['text generation']
['Towards a Decomposable Metric for Explainable Evaluation of Text Generation from AMR']
src/parse.py src/sent_parsers.py src/log_helper.py src/data_helpers.py src/mfscore.py src/score_form.py src/sent_scorers.py src/clean.py src/compute_acceptable_ratio.py src/sent_cleaners.py build_arg_parser build_arg_parser readf writef safe_get get_logger get_score mf_beta_score build_arg_parser build_arg_parser build_arg_parser CleanerFactory BasicCleaner AMRLibGSII ParserFactory AMRLibT5 BERTScorer ScorerFactory GPT2Scorer add_argument ArgumentParser basicConfig setLevel getLogger float filterf
# MF score for explainable evaluation of text generation For some, the **Form** of a generated text may be very important, for others, the **Meaning** may be most important. Most, however, take a balanced approach and rate a text with regard to both **Form** and **Meaning**. This repo **aims at better assessing what an NLG system excels in**: Form or Meaning? ## Preparation We recommend setting up a virtual environment to install the requirements 1. run `pip install -r requirements.txt`. 2. run `pip install -r requirements_no_deps.txt --no-deps` 3. download spacy model `en_core_web_sm` (it's used for true-casing): `python -m spacy download en_core_web_sm` 4. install [amrlib](https://github.com/bjascob/amrlib), and install a parser model. Simply follow their instructions (I tested with version `0.5.0`).
431
HeliosX7/voice-filter
['speech recognition', 'speaker recognition', 'speaker separation', 'speech enhancement']
['VoiceFilter: Targeted Voice Separation by Speaker-Conditioned Spectrogram Masking']
train_test/train.py dataset/dataset_creation.py dataset/speech_collection.py model/model.py dataset/preload_training_dataset.py configuration/Audio.py model/sequence_generator.py train_test/testing.py dataset/directory_structure.py configuration/HyperParams.py Audio HyperParams get_dvector create_example SpeechEmbedder LinearNorm create_dataset save_batch create_folders load_all_data preload_training_data load_col_data get_model data_generator compute_loss_sdr numpy float embedder get_mel_spec load int wave2spec join get_dvector sample_rate trim save abs max data_audio_len print min create_example append abs max range len join to_csv append DataFrame read_csv join makedirs append load range join list str load_col_data save range join list load_col_data save range Model Input concatenate square tqdm mean load_weights spec2wave append median array range predict
# Voice Filter This is a Tensorflow/Keras implementation of Google AI VoiceFilter. Our work is inspired from the the academic paper : https://arxiv.org/abs/1810.04826 The implementation is based on the work : https://github.com/mindslab-ai/voicefilter --- ### Team Members 1. [Angshuman Saikia](https://github.com/HeliosX7) 1. [Abhinav Jain](https://github.com/jain-abhinav02) 1. [Yashwardhan Gautam](https://github.com/yashwardhan-gautam) ---
432
HendrikStrobelt/detecting-fake-text
['text generation', 'human detection']
['GLTR: Statistical Detection and Visualization of Generated Text']
server.py backend/api.py backend/__init__.py backend/class_register.py analyze send_static Project get_all_projects redir send_data top_k_logits LM AbstractLanguageChecker BERTLM main register_api config list keys get check_probabilities print topk time format print sample_unconditional check_probabilities LM BERTLM
# GLTR: Giant Language Model Test Room Detecting text that was generated from large language models (e.g. GPT-2). <a href='http://gltr.io'> <img src='figs/overview.png' > </a> webpage: [http://gltr.io](http://gltr.io)<br> online-demo: [http://gltr.io/dist/index.html](http://gltr.io/dist/index.html)<br> paper: [https://arxiv.org/abs/1906.04043](https://arxiv.org/abs/1906.04043) A project by Hendrik Strobelt, Sebastian Gehrmann, Alexander M. Rush. collaboration of MIT-IBM Watson AI Lab and HarvardNLP
433
Henreich/ML-Breakout-Pong
['unity']
['Unity: A General Platform for Intelligent Agents']
ml-agents/mlagents/envs/communicator_objects/environment_parameters_proto_pb2.py ml-agents/tests/trainers/test_trainer_controller.py ml-agents/mlagents/trainers/buffer.py ml-agents/mlagents/envs/communicator_objects/unity_rl_initialization_input_pb2.py ml-agents/mlagents/envs/communicator_objects/brain_parameters_proto_pb2.py ml-agents/tests/envs/test_envs.py ml-agents/mlagents/envs/communicator_objects/__init__.py ml-agents/mlagents/envs/rpc_communicator.py ml-agents/mlagents/trainers/ppo/__init__.py gym-unity/gym_unity/envs/__init__.py ml-agents/mlagents/envs/communicator_objects/agent_action_proto_pb2.py ml-agents/mlagents/trainers/learn.py gym-unity/gym_unity/envs/unity_env.py ml-agents/mlagents/trainers/bc/trainer.py ml-agents/mlagents/trainers/policy.py ml-agents/mlagents/envs/communicator_objects/unity_rl_initialization_output_pb2.py ml-agents/tests/trainers/test_curriculum.py ml-agents/mlagents/trainers/meta_curriculum.py ml-agents/mlagents/trainers/curriculum.py ml-agents/mlagents/trainers/ppo/models.py ml-agents/mlagents/envs/communicator_objects/space_type_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_output_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_input_pb2.py gym-unity/gym_unity/__init__.py ml-agents/mlagents/trainers/ppo/policy.py ml-agents/mlagents/envs/communicator_objects/engine_configuration_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/brain_type_proto_pb2.py ml-agents/mlagents/envs/socket_communicator.py gym-unity/setup.py ml-agents/mlagents/trainers/trainer_controller.py ml-agents/mlagents/envs/communicator_objects/agent_info_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_to_external_pb2_grpc.py ml-agents/tests/trainers/test_ppo.py ml-agents/mlagents/envs/brain.py ml-agents/mlagents/trainers/bc/policy.py ml-agents/tests/trainers/test_bc.py ml-agents/tests/mock_communicator.py ml-agents/mlagents/envs/communicator_objects/unity_message_pb2.py ml-agents/mlagents/trainers/models.py ml-agents/mlagents/trainers/__init__.py ml-agents/mlagents/envs/communicator_objects/resolution_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_to_external_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_rl_input_pb2.py ml-agents/tests/trainers/test_buffer.py ml-agents/mlagents/trainers/trainer.py ml-agents/mlagents/envs/communicator.py ml-agents/setup.py ml-agents/mlagents/envs/communicator_objects/unity_rl_output_pb2.py ml-agents/mlagents/envs/__init__.py ml-agents/mlagents/trainers/bc/__init__.py gym-unity/tests/test_gym.py ml-agents/mlagents/envs/exception.py ml-agents/mlagents/envs/environment.py ml-agents/mlagents/trainers/bc/models.py ml-agents/mlagents/envs/communicator_objects/command_proto_pb2.py ml-agents/mlagents/trainers/exception.py ml-agents/tests/trainers/test_meta_curriculum.py ml-agents/mlagents/trainers/ppo/trainer.py ml-agents/mlagents/envs/communicator_objects/header_pb2.py UnityGymException UnityEnv test_gym_wrapper test_multi_agent BrainInfo BrainParameters Communicator UnityEnvironment UnityException UnityTimeOutException UnityEnvironmentException UnityActionException RpcCommunicator UnityToExternalServicerImplementation SocketCommunicator UnityToExternalServicer UnityToExternalStub add_UnityToExternalServicer_to_server BufferException Buffer Curriculum CurriculumError MetaCurriculumError TrainerError main run_training MetaCurriculum LearningModel Policy UnityPolicyException UnityTrainerException Trainer TrainerController BehavioralCloningModel BCPolicy BehavioralCloningTrainer PPOModel PPOPolicy PPOTrainer get_gae discount_rewards MockCommunicator test_initialization test_reset test_close test_step test_handles_bad_filename test_dc_bc_model test_cc_bc_model test_visual_cc_bc_model test_bc_policy_evaluate dummy_config test_visual_dc_bc_model assert_array test_buffer location default_reset_parameters test_init_curriculum_bad_curriculum_raises_error test_init_curriculum_happy_path test_increment_lesson test_get_config test_init_meta_curriculum_happy_path test_increment_lessons_with_reward_buff_sizes default_reset_parameters MetaCurriculumTest test_increment_lessons measure_vals reward_buff_sizes test_set_all_curriculums_to_lesson_num test_get_config test_set_lesson_nums test_init_meta_curriculum_bad_curriculum_folder_raises_error more_reset_parameters test_rl_functions test_ppo_model_dc_vector_curio test_ppo_model_dc_vector_rnn test_ppo_model_cc_vector_rnn test_ppo_policy_evaluate test_ppo_model_cc_visual dummy_config test_ppo_model_dc_vector test_ppo_model_dc_visual test_ppo_model_cc_visual_curio test_ppo_model_dc_visual_curio test_ppo_model_cc_vector_curio test_ppo_model_cc_vector test_initialization test_initialize_trainers dummy_bc_config dummy_bad_config dummy_config dummy_start test_load_config sample step MockCommunicator UnityEnv step MockCommunicator UnityEnv method_handlers_generic_handler add_generic_rpc_handlers start_learning int str TrainerController int Process getLogger print start info append randint docopt range size range reversed zeros_like asarray tolist discount_rewards UnityEnvironment close MockCommunicator UnityEnvironment close MockCommunicator reset str local_done print agents step close reset MockCommunicator UnityEnvironment len UnityEnvironment close MockCommunicator reset_default_graph close reset_default_graph reset_default_graph reset_default_graph reset_default_graph flatten list range len get_batch Buffer assert_array append_update_buffer make_mini_batch append reset_agent array range Curriculum Curriculum Curriculum MetaCurriculum assert_has_calls MetaCurriculumTest increment_lessons assert_called_with MetaCurriculumTest increment_lessons assert_called_with assert_not_called MetaCurriculumTest set_all_curriculums_to_lesson_num MetaCurriculumTest dict update MetaCurriculumTest reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph assert_array_almost_equal array discount_rewards TrainerController
<img src="docs/images/unity-wide.png" align="middle" width="3000"/> <img src="docs/images/image-banner.png" align="middle" width="3000"/> # Unity ML-Agents Toolkit (Beta) **The Unity Machine Learning Agents Toolkit** (ML-Agents) is an open-source Unity plugin that enables games and simulations to serve as environments for training intelligent agents. Agents can be trained using reinforcement learning, imitation learning, neuroevolution, or other machine learning methods through a simple-to-use Python API. We also provide implementations (based on TensorFlow) of state-of-the-art algorithms to enable game developers and hobbyists to easily train intelligent agents for 2D, 3D and VR/AR games. These trained agents can be
434
Henreich/ML-Pong
['unity']
['Unity: A General Platform for Intelligent Agents']
ml-agents/mlagents/envs/communicator_objects/environment_parameters_proto_pb2.py ml-agents/tests/trainers/test_trainer_controller.py ml-agents/mlagents/trainers/buffer.py ml-agents/mlagents/envs/communicator_objects/unity_rl_initialization_input_pb2.py ml-agents/mlagents/envs/communicator_objects/brain_parameters_proto_pb2.py ml-agents/tests/envs/test_envs.py ml-agents/mlagents/envs/communicator_objects/__init__.py ml-agents/mlagents/envs/rpc_communicator.py ml-agents/mlagents/trainers/ppo/__init__.py gym-unity/gym_unity/envs/__init__.py ml-agents/mlagents/envs/communicator_objects/agent_action_proto_pb2.py ml-agents/mlagents/trainers/learn.py gym-unity/gym_unity/envs/unity_env.py ml-agents/mlagents/trainers/bc/trainer.py ml-agents/mlagents/trainers/policy.py ml-agents/mlagents/envs/communicator_objects/unity_rl_initialization_output_pb2.py ml-agents/tests/trainers/test_curriculum.py ml-agents/mlagents/trainers/meta_curriculum.py ml-agents/mlagents/trainers/curriculum.py ml-agents/mlagents/trainers/ppo/models.py ml-agents/mlagents/envs/communicator_objects/space_type_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_output_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_input_pb2.py gym-unity/gym_unity/__init__.py ml-agents/mlagents/trainers/ppo/policy.py ml-agents/mlagents/envs/communicator_objects/engine_configuration_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/brain_type_proto_pb2.py ml-agents/mlagents/envs/socket_communicator.py gym-unity/setup.py ml-agents/mlagents/trainers/trainer_controller.py ml-agents/mlagents/envs/communicator_objects/agent_info_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_to_external_pb2_grpc.py ml-agents/tests/trainers/test_ppo.py ml-agents/mlagents/envs/brain.py ml-agents/mlagents/trainers/bc/policy.py ml-agents/tests/trainers/test_bc.py ml-agents/tests/mock_communicator.py ml-agents/mlagents/envs/communicator_objects/unity_message_pb2.py ml-agents/mlagents/trainers/models.py ml-agents/mlagents/trainers/__init__.py ml-agents/mlagents/envs/communicator_objects/resolution_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_to_external_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_rl_input_pb2.py ml-agents/tests/trainers/test_buffer.py ml-agents/mlagents/trainers/trainer.py ml-agents/mlagents/envs/communicator.py ml-agents/setup.py ml-agents/mlagents/envs/communicator_objects/unity_rl_output_pb2.py ml-agents/mlagents/envs/__init__.py ml-agents/mlagents/trainers/bc/__init__.py gym-unity/tests/test_gym.py ml-agents/mlagents/envs/exception.py ml-agents/mlagents/envs/environment.py ml-agents/mlagents/trainers/bc/models.py ml-agents/mlagents/envs/communicator_objects/command_proto_pb2.py ml-agents/mlagents/trainers/exception.py ml-agents/tests/trainers/test_meta_curriculum.py ml-agents/mlagents/trainers/ppo/trainer.py ml-agents/mlagents/envs/communicator_objects/header_pb2.py UnityGymException UnityEnv test_gym_wrapper test_multi_agent BrainInfo BrainParameters Communicator UnityEnvironment UnityException UnityTimeOutException UnityEnvironmentException UnityActionException RpcCommunicator UnityToExternalServicerImplementation SocketCommunicator UnityToExternalServicer UnityToExternalStub add_UnityToExternalServicer_to_server BufferException Buffer Curriculum CurriculumError MetaCurriculumError TrainerError main run_training MetaCurriculum LearningModel Policy UnityPolicyException UnityTrainerException Trainer TrainerController BehavioralCloningModel BCPolicy BehavioralCloningTrainer PPOModel PPOPolicy PPOTrainer get_gae discount_rewards MockCommunicator test_initialization test_reset test_close test_step test_handles_bad_filename test_dc_bc_model test_cc_bc_model test_visual_cc_bc_model test_bc_policy_evaluate dummy_config test_visual_dc_bc_model assert_array test_buffer location default_reset_parameters test_init_curriculum_bad_curriculum_raises_error test_init_curriculum_happy_path test_increment_lesson test_get_config test_init_meta_curriculum_happy_path test_increment_lessons_with_reward_buff_sizes default_reset_parameters MetaCurriculumTest test_increment_lessons measure_vals reward_buff_sizes test_set_all_curriculums_to_lesson_num test_get_config test_set_lesson_nums test_init_meta_curriculum_bad_curriculum_folder_raises_error more_reset_parameters test_rl_functions test_ppo_model_dc_vector_curio test_ppo_model_dc_vector_rnn test_ppo_model_cc_vector_rnn test_ppo_policy_evaluate test_ppo_model_cc_visual dummy_config test_ppo_model_dc_vector test_ppo_model_dc_visual test_ppo_model_cc_visual_curio test_ppo_model_dc_visual_curio test_ppo_model_cc_vector_curio test_ppo_model_cc_vector test_initialization test_initialize_trainers dummy_bc_config dummy_bad_config dummy_config dummy_start test_load_config sample step MockCommunicator UnityEnv step MockCommunicator UnityEnv method_handlers_generic_handler add_generic_rpc_handlers start_learning int str TrainerController int Process getLogger print start info append randint docopt range size range reversed zeros_like asarray tolist discount_rewards UnityEnvironment close MockCommunicator UnityEnvironment close MockCommunicator reset str local_done print agents step close reset MockCommunicator UnityEnvironment len UnityEnvironment close MockCommunicator reset_default_graph close reset_default_graph reset_default_graph reset_default_graph reset_default_graph flatten list range len get_batch Buffer assert_array append_update_buffer make_mini_batch append reset_agent array range Curriculum Curriculum Curriculum MetaCurriculum assert_has_calls MetaCurriculumTest increment_lessons assert_called_with MetaCurriculumTest increment_lessons assert_called_with assert_not_called MetaCurriculumTest set_all_curriculums_to_lesson_num MetaCurriculumTest dict update MetaCurriculumTest reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph assert_array_almost_equal array discount_rewards TrainerController
<img src="docs/images/unity-wide.png" align="middle" width="3000"/> <img src="docs/images/image-banner.png" align="middle" width="3000"/> # Unity ML-Agents Toolkit (Beta) **The Unity Machine Learning Agents Toolkit** (ML-Agents) is an open-source Unity plugin that enables games and simulations to serve as environments for training intelligent agents. Agents can be trained using reinforcement learning, imitation learning, neuroevolution, or other machine learning methods through a simple-to-use Python API. We also provide implementations (based on TensorFlow) of state-of-the-art algorithms to enable game developers and hobbyists to easily train intelligent agents for 2D, 3D and VR/AR games. These trained agents can be
435
Henreich/ML-testing
['unity']
['Unity: A General Platform for Intelligent Agents']
ml-agents/mlagents/envs/communicator_objects/environment_parameters_proto_pb2.py ml-agents/tests/trainers/test_trainer_controller.py ml-agents/mlagents/trainers/buffer.py ml-agents/mlagents/envs/communicator_objects/unity_rl_initialization_input_pb2.py ml-agents/mlagents/envs/communicator_objects/brain_parameters_proto_pb2.py ml-agents/tests/envs/test_envs.py ml-agents/mlagents/envs/communicator_objects/__init__.py ml-agents/mlagents/envs/rpc_communicator.py ml-agents/mlagents/trainers/ppo/__init__.py gym-unity/gym_unity/envs/__init__.py ml-agents/mlagents/envs/communicator_objects/agent_action_proto_pb2.py ml-agents/mlagents/trainers/learn.py gym-unity/gym_unity/envs/unity_env.py ml-agents/mlagents/trainers/bc/trainer.py ml-agents/mlagents/trainers/policy.py ml-agents/mlagents/envs/communicator_objects/unity_rl_initialization_output_pb2.py ml-agents/tests/trainers/test_curriculum.py ml-agents/mlagents/trainers/meta_curriculum.py ml-agents/mlagents/trainers/curriculum.py ml-agents/mlagents/trainers/ppo/models.py ml-agents/mlagents/envs/communicator_objects/space_type_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_output_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_input_pb2.py gym-unity/gym_unity/__init__.py ml-agents/mlagents/trainers/ppo/policy.py ml-agents/mlagents/envs/communicator_objects/engine_configuration_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/brain_type_proto_pb2.py ml-agents/mlagents/envs/socket_communicator.py gym-unity/setup.py ml-agents/mlagents/trainers/trainer_controller.py ml-agents/mlagents/envs/communicator_objects/agent_info_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_to_external_pb2_grpc.py ml-agents/tests/trainers/test_ppo.py ml-agents/mlagents/envs/brain.py ml-agents/mlagents/trainers/bc/policy.py ml-agents/tests/trainers/test_bc.py ml-agents/tests/mock_communicator.py ml-agents/mlagents/envs/communicator_objects/unity_message_pb2.py ml-agents/mlagents/trainers/models.py ml-agents/mlagents/trainers/__init__.py ml-agents/mlagents/envs/communicator_objects/resolution_proto_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_to_external_pb2.py ml-agents/mlagents/envs/communicator_objects/unity_rl_input_pb2.py ml-agents/tests/trainers/test_buffer.py ml-agents/mlagents/trainers/trainer.py ml-agents/mlagents/envs/communicator.py ml-agents/setup.py ml-agents/mlagents/envs/communicator_objects/unity_rl_output_pb2.py ml-agents/mlagents/envs/__init__.py ml-agents/mlagents/trainers/bc/__init__.py gym-unity/tests/test_gym.py ml-agents/mlagents/envs/exception.py ml-agents/mlagents/envs/environment.py ml-agents/mlagents/trainers/bc/models.py ml-agents/mlagents/envs/communicator_objects/command_proto_pb2.py ml-agents/mlagents/trainers/exception.py ml-agents/tests/trainers/test_meta_curriculum.py ml-agents/mlagents/trainers/ppo/trainer.py ml-agents/mlagents/envs/communicator_objects/header_pb2.py UnityGymException UnityEnv test_gym_wrapper test_multi_agent BrainInfo BrainParameters Communicator UnityEnvironment UnityException UnityTimeOutException UnityEnvironmentException UnityActionException RpcCommunicator UnityToExternalServicerImplementation SocketCommunicator UnityToExternalServicer UnityToExternalStub add_UnityToExternalServicer_to_server BufferException Buffer Curriculum CurriculumError MetaCurriculumError TrainerError main run_training MetaCurriculum LearningModel Policy UnityPolicyException UnityTrainerException Trainer TrainerController BehavioralCloningModel BCPolicy BehavioralCloningTrainer PPOModel PPOPolicy PPOTrainer get_gae discount_rewards MockCommunicator test_initialization test_reset test_close test_step test_handles_bad_filename test_dc_bc_model test_cc_bc_model test_visual_cc_bc_model test_bc_policy_evaluate dummy_config test_visual_dc_bc_model assert_array test_buffer location default_reset_parameters test_init_curriculum_bad_curriculum_raises_error test_init_curriculum_happy_path test_increment_lesson test_get_config test_init_meta_curriculum_happy_path test_increment_lessons_with_reward_buff_sizes default_reset_parameters MetaCurriculumTest test_increment_lessons measure_vals reward_buff_sizes test_set_all_curriculums_to_lesson_num test_get_config test_set_lesson_nums test_init_meta_curriculum_bad_curriculum_folder_raises_error more_reset_parameters test_rl_functions test_ppo_model_dc_vector_curio test_ppo_model_dc_vector_rnn test_ppo_model_cc_vector_rnn test_ppo_policy_evaluate test_ppo_model_cc_visual dummy_config test_ppo_model_dc_vector test_ppo_model_dc_visual test_ppo_model_cc_visual_curio test_ppo_model_dc_visual_curio test_ppo_model_cc_vector_curio test_ppo_model_cc_vector test_initialization test_initialize_trainers dummy_bc_config dummy_bad_config dummy_config dummy_start test_load_config sample step MockCommunicator UnityEnv step MockCommunicator UnityEnv method_handlers_generic_handler add_generic_rpc_handlers start_learning int str TrainerController int Process getLogger print start info append randint docopt range size range reversed zeros_like asarray tolist discount_rewards UnityEnvironment close MockCommunicator UnityEnvironment close MockCommunicator reset str local_done print agents step close reset MockCommunicator UnityEnvironment len UnityEnvironment close MockCommunicator reset_default_graph close reset_default_graph reset_default_graph reset_default_graph reset_default_graph flatten list range len get_batch Buffer assert_array append_update_buffer make_mini_batch append reset_agent array range Curriculum Curriculum Curriculum MetaCurriculum assert_has_calls MetaCurriculumTest increment_lessons assert_called_with MetaCurriculumTest increment_lessons assert_called_with assert_not_called MetaCurriculumTest set_all_curriculums_to_lesson_num MetaCurriculumTest dict update MetaCurriculumTest reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph reset_default_graph assert_array_almost_equal array discount_rewards TrainerController
<img src="docs/images/unity-wide.png" align="middle" width="3000"/> <img src="docs/images/image-banner.png" align="middle" width="3000"/> # Unity ML-Agents Toolkit (Beta) **The Unity Machine Learning Agents Toolkit** (ML-Agents) is an open-source Unity plugin that enables games and simulations to serve as environments for training intelligent agents. Agents can be trained using reinforcement learning, imitation learning, neuroevolution, or other machine learning methods through a simple-to-use Python API. We also provide implementations (based on TensorFlow) of state-of-the-art algorithms to enable game developers and hobbyists to easily train intelligent agents for 2D, 3D and VR/AR games. These trained agents can be
436
Henry-E/surface-realization-shallow-task
['text generation']
['Shape of synth to come: Why we should use synthetic data for English surface realization']
modules/format.py modules/parse_config.py modules/dependency_relation_analysis.py modules/make_vocab.py modules/error_analysis.py modules/create_source_and_target.py modules/majority_rules.py modules/shuffle_src_and_tgt.py modules/get_form_suggestions.py modules/eval_Py3.py modules/template_command_line_module.py modules/get_tokenized_dev_sents.py modules/get_top_hyp.py get_mapping get_tokens_with_feats linearize_tree create_source_and_target main get_tokenized_sent main main remove_inflection_errors compare_and_label remove_punctuation main read_corpus main main main main main main datetime_stamp get_run_name save_updated_config save_eval_to_csv mk_missing_dirs main initialize_experiment main str shuffle id form extend append enumerate str pop join get_mapping to_tree linearize_tree replace insert extend form xpos lower sub getattr append lemma len append lower any sub join form_suggestions_file_name get_tokens_with_feats output_dir_name close group source_conllu_dir_name tqdm iter_from_file match zip append get_tokenized_sent omit_scopes target_conllu_dir_name input_file_names items eval_set_repeats add_argument extend shuffle create_source_and_target match synthetic_data_repeats ArgumentParser train_set_repeats parse_args join format input_file_name output_dir_name Counter set add iter_from_file tqdm append deprel max zip enumerate append get deepcopy join len dev_conll_file_name iter_from_file lower zip enumerate remove_inflection_errors print compare_and_label remove_punctuation len print str listdir SmoothingFunction corpus_bleu corpus_nist exit read_corpus zip round detokenize TreebankWordDetokenizer MosesDetokenizer upper leave_tokenized split conll_sents_file_name update list form strip dirname defaultdict range min_freq now run_names_file_name join root_dir_name update join update datetime_stamp format get_run_name fromkeys opennmt_dir_name root_dir_name dirname mkdir datetime_stamp format print mkdir listdir update decode format print run save_updated_config initialize_experiment save_eval_to_csv mk_missing_dirs run
# Surface realization shallow task Experiment repo for the ACL paper - [Shape of Synth to Come: Why We Should Use Synthetic Data for English Surface Realization](https://www.aclweb.org/anthology/2020.acl-main.665/)
437
HeoTaksung/MIMIC-III_CNN_Attention
['document classification', 'medical code prediction']
['An Explainable CNN Approach for Medical Codes Prediction from Clinical Text', 'Explainable Prediction of Medical Codes from Clinical Text']
evaluate.py.py cnn_attention.py util.py text_cnn_att.py Single_CAML CNN_Attention Multi_CAML Label_Classification preprocess_text embedding_load load_data exist_emb squeeze Adam stack class_num Model summary append Input range compile exist_emb squeeze Adam stack class_num Model summary append Input range compile sub append preprocess_text enumerate get items print dict zeros len
# MIMIC-III_CNN_Attention * Multi-Label Classification * Mullenbach, J., Wiegreffe, S., Duke, J., Sun, J., & Eisenstein, J. (2018, June). Explainable Prediction of Medical Codes from Clinical Text. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers) (pp. 1101-1111). [[Paper]](https://www.aclweb.org/anthology/N18-1100.pdf) * Convolutional Attention for Multi-Label classification (CAML) * Hu, S. Y., & Teng, F. (2021). An Explainable CNN Approach for Medical Codes Prediction from Clinical Text. arXiv preprint arXiv:2101.11430. [[Paper]](https://arxiv.org/pdf/2101.11430.pdf) * Shallow and Wide Attention convolutional Mechanism Convolutional Attention for Multi-Label classification (SWAM_CAML)
438
HiLab-git/ACELoss
['medical image segmentation', 'semantic segmentation']
["Learning Euler's Elastica Model for Medical Image Segmentation"]
aceloss.py ACLoss ACLoss3D ACELoss3D ACLossV2 ACLoss3DV2 FastACELoss3DV2 ACELoss FastACELoss3D elastica region
# Active Contour Euler Elastica Loss Functions Official implementations of paper: [Learning Euler's Elastica Model for Medical Image Segmentation](https://arxiv.org/pdf/2011.00526.pdf), and a short version was accepted by ISBI 2021 . * Implemented a novel active contour-based loss function, a combination of region term, length term, and elastica term (mean curvature). * Reimplemented some popular active contour-based loss functions in different ways, such as 3D Active-Contour-Loss based on Sobel filter and max-and min-pool. ## Introduction and Some Results * ### **Pipeline of ACE loss**. ![](https://github.com/Luoxd1996/Active_Contour_Euler_Elastica_Loss/blob/main/ACELoss_pipeline.png) * ### **2D results and visualization**. ![](https://github.com/Luoxd1996/Active_Contour_Euler_Elastica_Loss/blob/main/table1.png) ![](https://github.com/Luoxd1996/Active_Contour_Euler_Elastica_Loss/blob/main/figure1.png)
439
HiLab-git/UGIR
['brain segmentation']
['Uncertainty-Guided Efficient Interactive Refinement of Fetal Brain Segmentation from Stacks of MRI Slices']
uncertainty_demo/show_uncertanty.py util/network/unet2dres.py util/level_set/ls_util/drlse_reion.py util/level_set/ls_util/get_gradient.py util/custom_net_run.py util/network/MGNet.py util/level_set/ls_util/interactive_ls.py util/level_set/demo/demo_idrlse.py show_seg_uncertainty add_countor map_scalar_to_color get_attention_map gray_to_rgb CustomSegAgent main get_result_for_one_case refine_dlls interactive_level_set get_distance_based_likelihood show_leve_set show_image_and_segmentation interleaved_concate MGNet UNetBlock_DW_CF ResBlock ResBlock_DWGC_CF_BE ResBlock_DW UNetBlock_DW PEBlock UNetBlock UNet2DRes UNetBlock_DW_CF_Res ResBlock_DWGC_BE_CPF get_deconv_layer channel_shuffle VanillaBlock ResBlock_DWGC_CF ResBlock_DWGC_CF_PE get_unet_block size range putpixel copy transpose asarray range len getpixel tuple size new map_scalar_to_color putpixel range fromarray subplot uint8 asarray show min axis add_countor get_attention_map imshow ReadImage GetArrayFromImage title figure savefig max gray_to_rgb str print CustomSegAgent set_network exit parse_config run convert interactive_level_set float32 asarray format refine_dlls arange add_subplot shape meshgrid plot_surface contour plot add_subplot axis where imshow enumerate geodesic2d_raster_scan ones_like exp clf ion drlse_region_interaction show exp show_leve_set ones shape get_distance_based_likelihood show_image_and_segmentation find_contours sum range asarray zoom copy mean float time suptitle print pause float32 figure std shape list reshape cat size view contiguous
### UGIR: Uncertainty-Guided Interactive Refinement for Segmentation This repository provides the code for the following MICCAI 2020 paper ([Arxiv link][arxiv_link], [Demo][demo_link]). If you use some modules of our repository, please cite this paper. * Guotai Wang, Michael Aertsen, Jan Deprest, Sébastien Ourselin, Tom Vercauteren, Shaoting Zhang: Uncertainty-Guided Efficient Interactive Refinement of Fetal Brain Segmentation from Stacks of MRI Slices. MICCAI (4) 2020: 279-288. The code contains two modules: 1), a novel CNN based on convolution in Multiple Groups (MG-Net) that simultaneously obtains an intial segmentation and its uncertainty estimation. 2), Interaction-based level set for fast refinement, which is an extention of the DRLSE algorithm and named as I-DRLSE. ![mg_net](./pictures/mgnet.png) Fig. 1. Structure of MG-Net. ![uncertainty](./pictures/uncertainty.png) Fig. 2. Segmentation with uncertainty estimation. ![refinement](./pictures/refinement.png)
440
HikkaV/StylizerML
['style transfer']
['Exploring the structure of a real-time, arbitrary neural artistic stylization network']
app/utlis.py app/serving.py app/predictor.py Predictor stylize Body crop_center process to_bytes BytesIO format b64decode print len to_bytes shape info array open shape min max crop_to_bounding_box resize stack crop_center
# Stylizer Python server on FastAPI which serves as "stylizer" of the images. Under the hood uses model from tensorflow-hub. Related to: https://arxiv.org/pdf/1705.06830.pdf. For examples refer to test_image.ipynb. # To deploy on gcloud 1) Create a project on gcloud. 2) Install gcloud and authorise with your account. 3) Set project_id. 3) In the folder of the project execute the following: <pre>gcloud app deploy app.yaml -v v1</pre>
441
HilalAsi/APROX-Robust-Stochastic-Optimization-Algorithms
['stochastic optimization']
['The importance of better models in stochastic optimization']
paper-code/losses/topk_accuracy.py example_pytorch.py example_tf.py optimizers_tf/truncated_tf.py optimizers_tf/__init__.py optimizers_pytorch/truncated_pytorch.py paper-code/models/__init__.py paper-code/datasets/stanford_dogs.py optimizers_pytorch/__init__.py paper-code/configs/settings.py paper-code/stability_check.py paper-code/losses/__init__.py paper-code/datasets/__init__.py paper-code/models/vgg.py paper-code/datasets/mnist.py paper-code/datasets/cifar.py paper-code/train.py paper-code/models/resnet.py paper-code/configs/__init__.py paper-code/configs/configs.py paper-code/models/preTrained_models.py paper-code/models/resnet_ELU.py closure LeNet MLPNet run_model TruncatedAdagrad Truncated calc_grad_norm TruncatedAdagrad Truncated PlotTimesToAccuracy TimesToEpsilonAccuracy plot_figures PlotBestAccuracy train_model generate_data create_net get_label NN_optimize_fast create_config TrainConfig load_config save_config seed averaging dropout batch_size ResNet truncate_dataset LeNet VGG sgd sgdTruncation anneal_lr wd adam epochs apply_setting CIFAR100 _randomize_labels Big_CIFAR10 CIFAR10 MNIST _randomize_labels dogs list_dir Stanford_dogs TopKAccuracy pretrained_vgg16_ELU pretrained_ResNet18_ELU FineTuneModel pretrained_AlexNet_ELU ResNet ResNet18 ResNet34 Bottleneck ResNet101 test ResNet50 BasicBlock ResNet152 ResNet34_ELU ResNet152_ELU Bottleneck ResNet101_ELU test ResNet18_ELU ResNet_ELU BasicBlock ResNet50_ELU VGG criterion backward TruncatedAdagrad Truncated mnist evaluate Sequential SGD load_data compile fit load str print get_label min NN_optimize_fast save zeros max range len subplot set_xscale plot xlabel set_yscale ylabel title savefig figure legend range len median plot_figures zeros range len median plot_figures zeros range len DataLoader use_cuda parameters filter startswith cuda int startswith max format criterion model print Variable zero_grad eval zeros train step cuda range train_model str print generate_data create_net DataParallel create_config is_available cuda values device_count list range results_dir join name save load join setattr items items join startswith setattr split dict int int float float int float batch_size list get_rng_state set_rng_state manual_seed Compose _randomize_labels train_labels Compose _randomize_labels train_labels CIFAR10 Compose _randomize_labels train_labels Compose _randomize_labels train_labels dogs print Compose DataLoader stats expanduser list listdir filter resnet18 alexnet vgg16 randn Variable ResNet18 print size net ResNet18_ELU
# APROX: Robust Stochastic Optimization Algorithms TensorFlow and Pytorch open source implementation for the aProx optimization methods from the paper: [*The importance of better models in stochastic optimization*](https://www.pnas.org/content/early/2019/10/29/1908018116) by [Hilal Asi](http://web.stanford.edu/~asi/) and [John Duchi](http://web.stanford.edu/~jduchi/). --- This repository provides implementation for the aProx optimizatin algorithms (Truncated and Truncated-Adagrad), which improve the robustness of classical optimization algorithms (e.g. SGD and Adagrad) to the stepsize value. The folders Optimizers_tf and Optimizers_pytorch include the implementation for TensorFlow and Pytorch, respectively. Examples of using these optimizers can be found in the files example_tf.py and example_pytorch.py. The following plots (from the paper) show the time-to-convergence as a function of the stepsize for various methods for CIFAR10 and Stanfrod-dogs datasets. You can reproduce these plots by running the file paper-code/stability_check.py with the desired dataset. ![CIFAR10 plot](https://github.com/HilalAsi/APOX-Robust-Stochastic-Optimization-Algorithms/blob/master/paper-plots/CIFAR10-plot.PNG "CIFAR10") ![Stanford dogs plot](https://github.com/HilalAsi/APOX-Robust-Stochastic-Optimization-Algorithms/blob/master/paper-plots/Stanford-dogs-plot.PNG "Stanford dogs") ## Contact
442
HilmiK/PS-Gan-modified
['pedestrian detection', 'scene text recognition']
['Pedestrian-Synthesis-GAN: Generating Pedestrian Data in Real Scene and Beyond']
options/train_options.py data/image_folder.py data/aligned_dataset.py data/custom_dataset_data_loader.py data/data_loader.py train.py util/image_pool.py util/png.py util/get_data.py models/base_model.py models/models.py util/html.py data/base_data_loader.py options/base_options.py test.py data/base_dataset.py util/util.py models/networks.py models/test_model.py data/unaligned_dataset.py data/single_dataset.py options/test_options.py util/visualizer.py models/pix2pix_model.py AlignedDataset BaseDataset get_transform __scale_width BaseDataLoader CustomDatasetDataLoader CreateDataset CreateDataLoader is_image_file ImageFolder default_loader make_dataset UnalignedDataset BaseModel create_model define_person_D SPP_NET get_norm_layer GANLoss ResnetGenerator ResnetBlock UnetGenerator UnetSkipConnectionBlock PersonDiscriminator define_image_D weights_init print_network NLayerDiscriminator define_G Pix2PixModel BaseOptions TestOptions TrainOptions GetData HTML ImagePool encode print_numpy tensor2pil varname diagnose_network mkdirs mkdir info save_image tensor2im Visualizer Lambda Scale RandomCrop BICUBIC RandomHorizontalFlip append int size initialize name print AlignedDataset SingleDataset CustomDatasetDataLoader name print initialize is_image_file join sorted append walk initialize model print name TestModel Pix2PixModel normal_ __name__ fill_ BatchNorm2d partial InstanceNorm2d get_norm_layer ResnetGenerator UnetGenerator apply cuda NLayerDiscriminator cuda get_norm_layer apply PersonDiscriminator SPP_NET cuda apply parameters transpose numpy fromarray transpose numpy astype print parameters fromarray save print join search print float64 astype flatten shape mkdir makedirs
# Pedestrian-Synthesis-GAN See arxiv: https://arxiv.org/abs/1804.02047 </br> Pedestrian-Synthesis-GAN: Generating Pedestrian Data in Real Scene and Beyond <img src="imgs/D.png"></img> </br></br></br> ## Preparing Prepare your data before training. The format of your data should follow the file in `datasets`. ## Training stage ```bash
443
Hitsaa/Practice
['data augmentation']
['DENSER: Deep Evolutionary Network Structured Representation']
utils/helper.py configs.py benchmark/convnet.py app.py benchmark/runner.py utils/argparser.py utils/mnist_reader.py visualization/project_zalando.py start_s3_sync get_json_logger touch touch_dir _get_logger main cnn_model_fn PredictJob JobWorker JobManager get_args_request parse_arg get_args_cli now_int upload_result_s3 get_sprite_image invert_grayscale create_sprite_image vector_to_matrix_mnist UploadS3Thread load_mnist UploadS3Thread start Event dirname makedirs makedirs setFormatter touch_dir DEBUG getLogger addHandler StreamHandler Formatter touch setLevel INFO FileHandler setFormatter getLogger addHandler Formatter touch setLevel INFO FileHandler dense max_pooling2d dropout one_hot minimize reshape GradientDescentOptimizer conv2d softmax_cross_entropy asarray evaluate print Estimator shuffle labels images numpy_input_fn train range read_data_sets int append items list defaultdict utcfromtimestamp info int isinstance ones sqrt ceil array range vector_to_matrix_mnist invert_grayscale join
# Fashion-MNIST [![GitHub stars](https://img.shields.io/github/stars/zalandoresearch/fashion-mnist.svg?style=flat&label=Star)](https://github.com/zalandoresearch/fashion-mnist/) [![Gitter](https://badges.gitter.im/zalandoresearch/fashion-mnist.svg)](https://gitter.im/fashion-mnist/Lobby?utm_source=share-link&utm_medium=link&utm_campaign=share-link) [![Readme-CN](https://img.shields.io/badge/README-中文-green.svg)](README.zh-CN.md) [![Readme-JA](https://img.shields.io/badge/README-日本語-green.svg)](README.ja.md) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Year-In-Review](https://img.shields.io/badge/%F0%9F%8E%82-Year%20in%20Review-orange.svg)](https://hanxiao.github.io/2018/09/28/Fashion-MNIST-Year-In-Review/) <details><summary>Table of Contents</summary><p> * [Why we made Fashion-MNIST](#why-we-made-fashion-mnist) * [Get the Data](#get-the-data)
444
HoganZhang/Person_reID_baseline_pytorch
['person retrieval', 'person re identification', 'data augmentation']
['Camera Style Adaptation for Person Re-identification', 'Beyond Part Models: Person Retrieval with Refined Part Pooling (and a Strong Convolutional Baseline)']
model/PCB/model.py model/ft_net_dense/model.py model/ft_ResNet50/train.py model/PCB/train.py model/ft_ResNet50/model.py random_erasing.py prepare.py train.py evaluate.py prepare_static.py test.py evaluate_rerank.py re_ranking.py model/fp16/train.py model.py demo.py model/ft_net_dense/train.py evaluate_gpu.py model/fp16/model.py imshow sort_img compute_mAP evaluate compute_mAP evaluate compute_mAP evaluate ft_net_dense ClassBlock ft_net PCB_test weights_init_classifier ft_net_middle PCB weights_init_kaiming prepare_model RandomErasing k_reciprocal_neigh re_ranking load_network get_id extract_feature fliplr train_model save_network draw_curve ft_net_dense ClassBlock ft_net PCB_test weights_init_classifier ft_net_middle PCB weights_init_kaiming train_model save_network draw_curve ft_net_dense ClassBlock ft_net PCB_test weights_init_classifier ft_net_middle PCB weights_init_kaiming train_model save_network draw_curve ft_net_dense ClassBlock ft_net PCB_test weights_init_classifier ft_net_middle PCB weights_init_kaiming train_model save_network draw_curve ft_net_dense ClassBlock ft_net PCB_test weights_init_classifier ft_net_middle PCB weights_init_kaiming train_model save_network draw_curve title imread pause view argsort intersect1d numpy argwhere in1d cpu mm append setdiff1d compute_mAP dot argsort intersect1d argwhere append flatten argwhere in1d zero_ range len view numpy cpu mm data normal_ kaiming_normal_ __name__ constant_ data normal_ __name__ constant_ time format print shape zeros range zeros_like around max exp transpose append sum range concatenate astype mean unique minimum int float32 argpartition k_reciprocal_neigh zeros len load join which_epoch load_state_dict index_select long norm view FloatTensor print Variable size model div sqrt cuda zero_ expand_as float PCB fliplr range cat append int basename data draw_curve Softmax model zero_grad max sm shape load_state_dict append range state_dict detach format save_network time criterion backward print Variable train step join plot savefig legend append join save is_available cuda state_dict half
<h1 align="center"> Person_reID_baseline_pytorch </h1> [![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/layumi/Person_reID_baseline_pytorch.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/layumi/Person_reID_baseline_pytorch/context:python) [![Build Status](https://travis-ci.org/layumi/Person_reID_baseline_pytorch.svg?branch=master)](https://travis-ci.org/layumi/Person_reID_baseline_pytorch) [![Total alerts](https://img.shields.io/lgtm/alerts/g/layumi/Person_reID_baseline_pytorch.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/layumi/Person_reID_baseline_pytorch/alerts/) [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT) A tiny, friendly, strong baseline code for Person-reID (based on [pytorch](https://pytorch.org)). - **Strong.** It is consistent with the new baseline result in several top-conference works, e.g., [Beyond Part Models: Person Retrieval with Refined Part Pooling(ECCV18)](https://arxiv.org/abs/1711.09349) and [Camera Style Adaptation for Person Re-identification(CVPR18)](https://arxiv.org/abs/1711.10295). We arrived Rank@1=88.24%, mAP=70.68% only with softmax loss. - **Small.** With fp16, our baseline could be trained with only 2GB GPU memory. - **Friendly.** You may use the off-the-shelf options to apply many state-of-the-art tricks in one line. Besides, if you are new to person re-ID, you may check out our **[Tutorial](https://github.com/layumi/Person_reID_baseline_pytorch/tree/master/tutorial)** first (8 min read) :+1: .
445
HoganZhang/few-shot-gnn
['few shot learning', 'active learning']
['Few-Shot Learning with Graph Neural Networks']
data/__init__.py test.py data/omniglot.py data/parser.py utils/io_utils.py data/generator.py models/__init__.py models/gnn_iclr.py main.py data/mini_imagenet.py models/models.py train adjust_learning_rate _init_ train_batch test_one_shot Generator MiniImagenet Omniglot get_image_paths Wcompute gmul GNN_nl_omniglot Gconv GNN_nl GNN_active EmbeddingImagenet create_models load_model EmbeddingOmniglot MetricNN SoftmaxModule IOStream system exp_name makedirs LongTensor backward Variable forward nll_loss metric_nn numpy argmax cuda get_task_batch batch_size train_batch zero_grad SoftmaxModule test_one_shot iterations adjust_learning_rate dataset_root save cuda exp_name str create_models load_model Generator Adam range format print cprint parameters step param_groups int dec_lr argmax int get_task_batch format Variable Generator cprint IOStream metric_nn batch_size_test eval numpy dataset_root train forward cuda range exp_name join filter append walk split bmm size squeeze cat split load cprint print EmbeddingImagenet dataset EmbeddingOmniglot
# Few-Shot Learning with Graph Neural Networks Implementation of [Few-Shot Learning with Graph Neural Networks](https://arxiv.org/pdf/1711.04043.pdf) on Python3, Pytorch 0.3.1 ## Mini-Imagenet ### Download the dataset Create **images.zip** file and copy it inside ```mini_imagenet``` directory: . ├── ... └── datasets └── compressed
446
HolmesShuan/Location-aware-Upsampling-for-Semantic-Segmentation
['semantic segmentation']
['Location-aware Upsampling for Semantic Segmentation']
LaU-reg/detail-api/PythonAPI/detail/__init__.py LaU-reg/encoding/utils/metrics.py LaU-reg/scripts/prepare_ade20k.py LaU-reg/scripts/prepare_pcontext.py LaU-reg/encoding/models/encnet.py LaU-reg/encoding/models/util.py LaU-reg/detail-api/PythonAPI/detail/param.py LaU-reg/detail-api/PythonAPI/detail/instsegEval.py LaU-reg/encoding/nn/__init__.py LaU-reg/experiments/segmentation/option.py LaU-reg/encoding/models/base.py LaU-reg/encoding/datasets/coco.py LaU-reg/encoding/dilated/__init__.py LaU-reg/encoding/dilated/resnet.py LaU-reg/encoding/utils/__init__.py LaU-reg/scripts/prepare_pascal.py LaU-reg/setup.py LaU-reg/encoding/parallel.py LaU-reg/encoding/datasets/pascal_aug.py LaU-reg/encoding/models/model_store.py LaU-reg/encoding/lib/cpu/setup.py LaU-reg/encoding/models/model_zoo.py LaU-reg/encoding/utils/lr_scheduler.py LaU-reg/encoding/nn/encoding.py LaU-reg/detail-api/download.py LaU-reg/encoding/models/deeplabv3.py LaU/modules/__init__.py LaU-reg/encoding/__init__.py LaU-reg/encoding/functions/encoding.py LaU-reg/experiments/segmentation/test.py LaU-reg/experiments/segmentation/test_fps_params.py LaU/modules/location_aware_upsampling.py LaU-reg/detail-api/PythonAPI/detail/bboxEval.py LaU-reg/encoding/nn/syncbn.py LaU/__init__.py LaU-reg/detail-api/PythonAPI/detail/detaileval_kpt.py LaU-reg/encoding/datasets/pascal_voc.py LaU-reg/encoding/datasets/cityscapes.py LaU-reg/detail-api/PythonAPI/setup.py LaU-reg/encoding/functions/__init__.py LaU-reg/encoding/lib/__init__.py LaU-reg/encoding/utils/pallete.py LaU-reg/encoding/functions/syncbn.py LaU/functions/location_aware_upsampling_func.py LaU-reg/scripts/prepare_cityscapes.py LaU-reg/encoding/nn/customize.py LaU-reg/encoding/lib/gpu/setup.py LaU-reg/encoding/utils/files.py LaU-reg/detail-api/PythonAPI/detail/detaileval_cls.py LaU-reg/encoding/models/__init__.py LaU-reg/experiments/segmentation/train.py LaU-reg/encoding/datasets/ade20k.py LaU-reg/encoding/datasets/pcontext.py LaU-reg/encoding/models/lau.py LaU-reg/detail-api/PythonAPI/detail/mask.py LaU/functions/__init__.py LaU-reg/scripts/prepare_coco.py LaU/setup.py LaU-reg/encoding/datasets/base.py LaU-reg/encoding/models/psp.py LaU-reg/encoding/nn/comm.py LaU-reg/encoding/models/fcn.py LaU-reg/encoding/version.py LaU/test.py LaU-reg/encoding/datasets/__init__.py get_extensions check_gradient_lau example_ldu_multi_output example_ldu example_lau LDUFunction LDUMultiOutputFunction LAUFunction LdU_MultiOutput LaU LdU create_version_file develop install input23 printProgress bboxEval DetailEvalCls Params DetailEvalKpt instsegEval encode decode area toBbox Params Detail CallbackContext allreduce AllReduce DataParallelModel _criterion_parallel_apply execute_replication_callbacks Reduce DataParallelCriterion ADE20KSegmentation _get_ade20k_pairs BaseDataset test_batchify_fn get_city_pairs CitySegmentation COCOSegmentation VOCAugSegmentation VOCSegmentation ContextSegmentation get_segmentation_dataset ResNet resnet50 Bottleneck resnet152 conv3x3 resnet34 resnet18 BasicBlock resnet101 scaled_l2 _scaled_l2 aggregate _aggregate _batchnormtrain batchnormtrain _sum_square sum_square module_inference MultiEvalModule BaseNet flip_image resize_image pad_image crop_image ASPP_Module DeepLabV3 get_deeplab AsppPooling ASPPConv DeepLabV3Head EncHead get_encnet_resnet101_ade EncNet get_encnet_resnet50_ade EncModule get_encnet_resnet101_pcontext get_encnet_resnet152_ade get_encnet get_encnet_resnet50_pcontext get_fcn_resnet50_pcontext FCNHead get_fcn_resnet50_ade FCN get_fcn np_repeat_3d th_flatten th_generate_idx th_batch_map_coordinates th_det_batch_map_offsets th_map_coordinates np_repeat_2d th_det_batch_map_coordinates sp_batch_map_offsets th_generate_grid_for_loss th_batch_map_offsets sp_batch_map_coordinates th_generate_grid th_repeat th_gather_2d get_model_file short_hash purge pretrained_model_list get_model PSPHead PSP get_psp_resnet50_ade get_psp DeterminedBiUpsampling DiffBiUpsampling get_segmentation_model SyncMaster FutureResult SlavePipe SeparableConv2d OffsetLosses Normalize SegmentationLosses PyramidPooling Mean JPU Encoding BatchNorm3d SharedTensor _SyncBatchNorm BatchNorm1d BatchNorm2d download save_checkpoint mkdir check_sha1 LR_Scheduler SegmentationMetric batch_pix_accuracy batch_intersection_union get_mask_pallete _get_voc_pallete Options test Trainer parse_args download_ade download_city parse_args parse_args install_coco_api download_coco parse_args download_voc download_aug glob join dirname abspath print gradcheck repeat cuda print lau repeat cuda print cuda ldu print ldu_multioutput cuda print join print int float shape join isinstance _worker len start is_grad_enabled append range Lock list hasattr __data_parallel_replicate__ modules enumerate len print join get_path_pairs len isinstance zip print join get_path_pairs load_url ResNet load_state_dict load_url ResNet load_state_dict load ResNet load_state_dict get_model_file load ResNet load_state_dict get_model_file load ResNet load_state_dict get_model_file flip_image evaluate size resize_ pad array range norm_layer Sequential ReLU Conv2d NUM_CLASS DeepLabV3 load EncNet get_model_file load_state_dict NUM_CLASS load get_model_file load_state_dict NUM_CLASS FCN expand_dims tile expand_dims tile th_flatten index_select size detach clamp size stack type long th_gather_2d array clip concatenate view _get_vals_by_coords size copy_ stack float abs long cat detach view _get_vals_by_coords size copy_ stack float abs long cat detach reshape repeat sp_batch_map_coordinates reshape np_repeat_2d stack meshgrid type cuda range np_repeat_3d stack meshgrid type cuda range long cuda div size view th_batch_map_coordinates div size th_det_batch_map_coordinates get join remove format print check_sha1 expanduser download exists makedirs join remove endswith expanduser listdir lower load PSP get_model_file load_state_dict NUM_CLASS copyfile save makedirs get join isdir print dirname abspath expanduser makedirs sha1 makedirs sum max astype dtype max histogram astype fromarray lower astype putpalette range num_class model save_folder get_mask_pallete DataLoader get_segmentation_model save dataset cuda get_segmentation_dataset load_state_dict format model_zoo Compose eval resume zip enumerate load join SegmentationMetric print tqdm get_model makedirs add_argument ArgumentParser join download mkdir print join mkdir download mkdir rmtree system join download mkdir join download mkdir
# Location-aware-Upsampling [[arXiv]](https://arxiv.org/abs/1911.05250) Pytorch implementation of "Location-aware Upsampling for Semantic Segmentation" (LaU). If you are only interested in the upsampling part, please refer to [LaU.md](./LaU/README.md). ### 1. Dependencies : * **Python 3.5.6** * **PyTorch 1.0.0** * **GCC 7.3.0** ### 2. Usage : ##### 2.1 install requirements ```sh conda create -n LaU python=3.5
447
HolyBayes/VarDropPytorch
['sparse learning']
['Variational Dropout Sparsifies Deep Neural Networks']
examples/boston/boston_ard.py examples/boston/boston_baseline.py torch_ard/torch_ard.py examples/cifar/cifar_baseline.py examples/mnist/mnist_baseline.py examples/mnist/mnist_ard.py setup.py examples/cifar/cifar_ard.py torch_ard/__init__.py examples/models.py LeNet_MNIST LeNet DenseModelARD LeNetARD LeNetARD_MNIST DenseModel get_kl_weight train test get_kl_weight train test train test get_kl_weight train test ELBOLoss get_ard_reg _get_params_cnt LinearARD _get_dropped_params_cnt get_dropped_params_ratio Conv2dARD criterion model print get_kl_weight backward zero_grad mean item append step max enumerate print mean eval mkdir save get_dropped_params_ratio hasattr hasattr any hasattr
# Variational Dropout Sparsifies NN (Pytorch) [![license](https://img.shields.io/github/license/mashape/apistatus.svg?maxAge=2592000)](LICENSE) [![PyPI version](https://badge.fury.io/py/pytorch-ard.svg)](https://badge.fury.io/py/pytorch-ard) Make your neural network 300 times faster! Pytorch implementation of Variational Dropout Sparsifies Deep Neural Networks ([arxiv:1701.05369](https://arxiv.org/abs/1701.05369)). ## Description The discovered approach helps to train both convolutional and dense deep sparsified models without significant loss of quality. Additive Noise Reparameterization and the Local Reparameterization Trick discovered in the paper helps to eliminate weights prior's restrictions (<a href="https://www.codecogs.com/eqnedit.php?latex=\alpha\leq&space;1" target="_blank"><img src="https://latex.codecogs.com/gif.latex?\alpha\leq&space;1" title="\alpha\leq 1" /></a>) and achieve Automatic Relevance Determination (ARD) effect on (typically most) network's parameters. According to the original paper, authors reduced the number of parameters up to 280 times on LeNet architectures and up to 68 times on VGG-like networks with a negligible decrease of accuracy. Experiments with Boston dataset in this repository proves that: 99% of simple dense model were dropped using paper's ARD-prior without any significant loss of MSE. Moreover, this technique helps to significantly reduce overfitting and helps to not worry about model's complexity - all redundant parameters will be dropped automatically. Moreover, you can achieve any degree of regularization variating regularization factor tradeoff (see ***reg_factor*** variable in [boston_ard.py](examples/boston/boston_ard.py) and [cifar_ard.py](examples/cifar/cifar_ard.py) scripts) ## Usage ```python
448
HolyBayes/pytorch_ard
['sparse learning']
['Variational Dropout Sparsifies Deep Neural Networks']
examples/boston/boston_ard.py examples/boston/boston_baseline.py torch_ard/torch_ard.py examples/cifar/cifar_baseline.py examples/mnist/mnist_baseline.py examples/mnist/mnist_ard.py setup.py examples/cifar/cifar_ard.py torch_ard/__init__.py examples/models.py LeNet_MNIST LeNet DenseModelARD LeNetARD LeNetARD_MNIST DenseModel get_kl_weight train test get_kl_weight train test train test get_kl_weight train test ELBOLoss get_ard_reg _get_params_cnt LinearARD _get_dropped_params_cnt get_dropped_params_ratio Conv2dARD criterion model print get_kl_weight backward zero_grad mean item append step max enumerate print mean eval mkdir save get_dropped_params_ratio hasattr hasattr any hasattr
# Variational Dropout Sparsifies NN (Pytorch) [![license](https://img.shields.io/github/license/mashape/apistatus.svg?maxAge=2592000)](LICENSE) [![PyPI version](https://badge.fury.io/py/pytorch-ard.svg)](https://badge.fury.io/py/pytorch-ard) Make your neural network 300 times faster! Pytorch implementation of Variational Dropout Sparsifies Deep Neural Networks ([arxiv:1701.05369](https://arxiv.org/abs/1701.05369)). ## Description The discovered approach helps to train both convolutional and dense deep sparsified models without significant loss of quality. Additive Noise Reparameterization and the Local Reparameterization Trick discovered in the paper helps to eliminate weights prior's restrictions (<a href="https://www.codecogs.com/eqnedit.php?latex=\alpha\leq&space;1" target="_blank"><img src="https://latex.codecogs.com/gif.latex?\alpha\leq&space;1" title="\alpha\leq 1" /></a>) and achieve Automatic Relevance Determination (ARD) effect on (typically most) network's parameters. According to the original paper, authors reduced the number of parameters up to 280 times on LeNet architectures and up to 68 times on VGG-like networks with a negligible decrease of accuracy. Experiments with Boston dataset in this repository proves that: 99% of simple dense model were dropped using paper's ARD-prior without any significant loss of MSE. Moreover, this technique helps to significantly reduce overfitting and helps to not worry about model's complexity - all redundant parameters will be dropped automatically. Moreover, you can achieve any degree of regularization variating regularization factor tradeoff (see ***reg_factor*** variable in [boston_ard.py](examples/boston/boston_ard.py) and [cifar_ard.py](examples/cifar/cifar_ard.py) scripts) ## Usage ```python
449
Homagn/Few_shot_clustering
['few shot learning']
['Few shot clustering for indoor occupancy detection with extremely low-quality images from battery free cameras']
main.py infer.py create_video.py models.py main convert_frames_to_video classify_localize image_names get_names test_image classify_single_image E_step_infer tensorize inference_pair read_image E_step validate get_loss input_pairs M_step train_targets image_names get_names test_image input_pairs_labeled tensorize train read_image input_pairs_validation Siamese Flatten print sort write shape VideoWriter append imread VideoWriter_fourcc range release len convert_frames_to_video imread resize print imshow waitKey get_names repr stack array sum print reshape append to numpy list get_names repr append keys read_image print inference_pair E_step_infer str imwrite print waitKey shape rectangle imshow E_step_infer range inference_pair read_image BCELoss list image_names sample keys read_image append list image_names append keys read_image list image_names append keys read_image print reshape tolist where to numpy range len int imwrite repr append zeros argmax keys range int format get_loss model backward print zero_grad append to step range len E_step validate input_pairs M_step print train_targets input_pairs_labeled save input range state_dict E_step sum M_step print keys abs array input_pairs_validation len
Whats this about ? Detailed code on applying a novel few shot clustering technique (EM style) to cluster images using very few actual labels (few shot clustering) State of the art accuracy acheieved in ImageNet 5-way 5-shot Demonstrated application in few shot building occupancy detection Published paper here -> https://arxiv.org/abs/2008.05654 *Complete Dataset* See Additional_datasets.txt *To run* ================================ python main.py
450
HongtengXu/Relational-Factorization-Model
['graph clustering']
['Gromov-Wasserstein Factorization Models for Graph Clustering']
methods/DataIO.py cmp_algorithm_gwl2.py test_fgwf_noemb.py methods/AlgOT.py methods/FusedGromovWassersteinFactorization.py test_graph_preprocess.py test_fgwf.py test_pointset_preprocess.py dev/util.py cmp_algorithm_gwl.py methods/GromovWassersteinFramework.py find_repo_root find_data_dir find_data_root navigate_parent_dirs navigate_child_dirs makedirs optimal_transport ot_badmm ot_fgw cost_mat ot_ppa structural_data_split csv2tab_edge_files load_txt_community_file load_layer_edge_file add_noisy_edges sparse_mx_to_torch_sparse_tensor extract_graph_info extract_graph_info_dense add_noisy_nodes structure_data_split StructuralDataSampler load_multilayer_edge_file graph_data_split pointset_data_split GraphSampler structural_data_list load_txt_edge_file clustering FGWF visualize_atoms tsne_weights save_model load_model train_ssl train_usl fgwd sinkhorn_knopp_iteration bregman_admm_iteration2 orthogonal_basis node_cost node_cost_st gromov_wasserstein_discrepancy bregman_admm_iteration softmax_grad ras_algorithm update_distribution node_distribution_similarity gromov_wasserstein_barycenter gromov_wasserstein_average join abspath isfile isfile join makedirs find_data_root ones size t sqrt repeat exp ones size t range t exp range exp ones size t zeros sum range exp ones size t cost_mat zeros sum range int lil_matrix csr_matrix close find append zeros keys len iterrows lil_matrix print csr_matrix append zeros range keys read_csv len iterrows lil_matrix csr_matrix append zeros keys read_csv len lil_matrix csr_matrix close find append zeros keys len range close read_csv len lil_matrix nodes edges zeros range len nodes edges zeros range len int deepcopy list add_edge nodes choice edges range add_node len int list deepcopy add_edge nodes choice edges len data Size astype float32 from_numpy shape int64 append train_test_split range len append train_test_split range len append train_test_split range len append range len append train_test_split range len cost_mat data model num_atoms zero_grad max __getitem__ list tsne_weights Adam scatter ot_fgw savefig legend append ot_layers output_atoms StructuralDataSampler range format asarray ot_method close shuffle float gamma deepcopy time int backward print __len__ fgwd parameters figure train step diag data model num_atoms zero_grad unsqueeze max __getitem__ structural_data_split list predictor tsne_weights Adam title scatter ot_fgw savefig legend ot_layers append output_atoms StructuralDataSampler range CrossEntropyLoss asarray format ot_method close shuffle eval gamma Linear int time deepcopy criterion backward print fgwd parameters figure train step diag len eval numpy fit_transform T eval numpy save state_dict load load_state_dict T asarray format plot scatter savefig figure numpy fit_transform range sum format inf print abs diag ones zeros sqrt range T repeat reshape softmax_grad log matmul softmax T exp inf node_cost ones abs shape append zeros sum diag T exp inf ones abs shape zeros sum diag T exp inf ones abs matmul sum T repeat log matmul T matmul exp keys sinkhorn_knopp_iteration T inf node_cost ones node_cost_st abs extend matmul bregman_admm_iteration softmax append zeros sum update_distribution T inf csr_matrix gromov_wasserstein_average abs matmul gromov_wasserstein_discrepancy append sum keys diag len
# Relational-Factorization-Model This package includes the implementation of my AAAI 2020 work **"Gromov-Wasserstein Factorization Models for Graph Clustering"** [https://arxiv.org/abs/1911.08530] The examples include clustering two graph datasets: * AIDS (with node attributes) * IMDB-BINARY (without node attributes) More data can be found at [https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets] # Main Dependencies * matplotlib * networkx * numpy
451
HongyangGao/hConv-gPool-Net
['text categorization']
['Learning Graph Pooling and Hybrid Convolutional Operations for Text Representations']
utils/voc_util.py utils/graph_util.py network.py trainer.py utils/ops.py utils/data_util.py main.py model.py main configure Model DenseNet Trainer get_data GraphDataFlow get_node_info get_X get_edges get_Amatrix gather_idx dense dropout batch_norm graph_pool conv1d simple_conv flat load_vocab get_vocab tokenizer build_vocab flags DEFINE_float DEFINE_string DEFINE_integer train GraphDataFlow BatchData append append range product len add_edges_from Graph nodes add_nodes_from to_numpy_matrix append items sorted reshape value gather concat conv1d matmul dropout fully_connected dropout Dropout BatchNorm expand_dims squeeze items list print set dict pos_tag keys len
# hConv-gPool-Net TensorFlow implementation of Learning Graph Pooling and Hybrid Convolutional Operations for Text Representations (WWW19) Created by [Hongyang Gao](http://people.tamu.edu/~hongyang.gao/) at Texas A&M University, [Yongjun Chen](https://www.eecs.wsu.edu/~ychen3/) at Washington State University, and [Shuiwang Ji](http://people.tamu.edu/~sji/) at Texas A&M University. ## Introduction We propose novel graph pooling layer and hybrid convolutional layer for text representation learning. It has been accepted in WWW19. Detailed information about hConv-gPool-Net is provided in https://arxiv.org/abs/1901.06965. ## Citation ```
452
HongyuGong/EnrichedWordRepresentation
['word embeddings']
['Enriching Word Embeddings with Temporal and Spatial Information']
src/eval/eval_metrics.py src/eval/eval_emb.py src/preprocess/nyt_data_util.py src/preprocess/cooccur_util.py src/preprocess/vocab_util.py src/preprocess/eu_data_util.py src/train/enriched_word_emb.py src/preprocess/ice_data_util.py src/train/train.py src/config/params.py src/eval/test_ice_emb.py src/eval/test_eu_emb.py src/eval/eval_data_util.py src/eval/test_nyt_emb.py readQueryWords evalEmbAlignment evalMP evalMRR findChangingWords saveEuEmb findEuNeighbors findSpatialNeighbors saveSpatialEmb findTemporalNeighbors saveTemporalEmb countCooccur calcScale shufCooccur tokenizeText readSingaporeCorpus readCanadaCorpus readEastAfricaCorpus readUSACorpus readIndiaCorpus readIrelandCorpus readPhilippinesCorpus readJamaicaCorpus cleanText preprocessRegionTexts readHKCorpus duplicateText readText cleanText duplicateText readUrlYear genConditionalVocab genCondVocab genWordVocab EnrichedWordEmb cosSim train_embedding dict deepcopy format evalMRR print readQueryWords dict evalMP len print float format index print float format print genEnrichedEmbed format writer join str format findIntraNeighbors writerow close findInterNeighbors open sortWordChanges genEnrichedEmbed join sorted format defaultdict print dict isfile findInterNeighbors split print genEnrichedEmbed format join int format defaultdict sorted print dict isfile findInterNeighbors split join str getsize append median join str readlines calcScale close system enumerate open join system sub join tokenizeText close write open append split print join listdir cleanText print join listdir cleanText print join listdir cleanText print join listdir cleanText print join walk cleanText print join listdir cleanText print join listdir cleanText print join listdir cleanText print join listdir cleanText readSingaporeCorpus readCanadaCorpus print readEastAfricaCorpus readUSACorpus readIndiaCorpus readIrelandCorpus readPhilippinesCorpus readJamaicaCorpus readHKCorpus int str format join print system dict append getsize float round max range values sub print format dict int join format str print write extend close dict open readUrlYear join str write close Counter most_common open join str readlines close system open norm str format print exit system
README # 1. Download data ## Download corpora - NYT: Download articles-search-1990-2016.json to data/nyt/raw_data/ - ICE: Obtain the written corpora from nine locations (Canada, East Africa, Hong Kong, India, Ireland, Jamaica, Philippines, Singapore, USA) provided by [International Corpus of English](http://ice-corpora.net/ice/index.html). Save these corpora to data/ice/raw_data/ - eu: Put wikicorpus and reddit coprus to data/eu/cond_data ## Download Testset - NYT: Download two testsets to data/nyt/eval/ - ICE: The testset is available in data/ice/eval # 2. Preprocess
453
HongyuGong/Geometry-of-Compositionality
['word embeddings']
['Geometry of Compositionality']
pca.py compositionality_detection.py RandomizedPCA PCA _infer_dimension_ _assess_dimension_ copy log pi sum range len empty range _assess_dimension_ len
# Geometry-of-Compositionality This is Bi-Context datasets used in AAAI 2017 paper, Geometry of Compositionality. Files: (1) bicontext_English.txt: contains 104 English phrases and 208 sentences. (2) bicontext_Chinese.txt: contains 64 Chinese phrases and 128 sentences. Data Format: In each line, the target phrase and its sentence are separated by tab, i.e., "phrase sentence". The phrases listed in the datasets are polysemous, and we provide two sentences containing it. The target phrase has literal meaning (compositional meaning) in one sentence, and has idiomatic meaning (non-compositional meaning) in the other sentence. If you use our data or code, kindly cite our work:
454
HosseinHosseini/Semantic-Adversarial-Examples
['denoising']
['Semantic Adversarial Examples']
utils_cifar10.py color_shift_attack.py color_shift_attack data_normalize load_model_VGG16 cifar_data data_normalize sum str print hsv_to_rgb copy range uniform append zeros float argmax clip predict rgb_to_hsv Sequential add Dense load_weights MaxPooling2D Conv2D Activation BatchNormalization Flatten Dropout astype load_data
# Semantic Adversarial Examples Deep neural networks are known to be vulnerable to adversarial examples, i.e., images that are maliciously perturbed to fool the model. Generating adversarial examples has been mostly limited to finding small perturbations that maximize the model prediction error. Such images, however, contain artificial perturbations that make them somewhat distinguishable from natural images. This property is used by several defense methods to counter adversarial examples by applying denoising filters or training the model to be robust to small perturbations. In our paper, we introduced a new class of adversarial examples, namely *Semantic Adversarial Examples*, as images that are arbitrarily perturbed to fool the model, but in such a way that the modified image semantically represents the same object as the original image. We developed a method for generating such images, by first converting the RGB image into HSV (Hue, Saturation and Value) color space and then randomly shifting the Hue and Saturation components, while keeping the Value component the same. This approach generates mostly smooth and natural-looking images that represent the same object, but with different colors. The code implements the attack on VGG16 network and CIFAR10 dataset. The pretrained weights of VGG16 network can be downloaded from [here](https://github.com/geifmany/cifar-vgg). The maximum number of trials is set to 1000. The results show that, for about 94.5% of CIFAR10 test images, it is possible to change the colors such that the modified image is misclassified by the model. Paper can be found here: https://arxiv.org/abs/1804.00499
455
Hrezaei/SummBot
['text summarization', 'document summarization']
['Features in Extractive Supervised Single-document Summarization: Case of Persian News']
competition.py Learn.py main_doc_eng.py Features.py DummyRegressor.py Evaluate.py utilities.py IdealRegressor.py doc_per.py GenerateDataset.py doc_eng_competition/doc_eng.py Summ.py main.py DUCGenerateFeatures.py doc_eng_competition/Summ.py DUCParser.py doc_eng_competition/learn.py document_feature_set run_one_to_one_compare learn used_features add_features remove_stop_words_and_puncs build_feature_set extract_features pos_ratio_based add_features remove_stop_words build_feature_set document_feature_set RndRegressor paper_evaluate k_fold_evaluate title_similarity_score cue_words linear_poition_score inverse_sentence_freq pos_ratio_based_en inverse_sentence_freq_old tf_isf_score cosine_similarity frequency_score cosine_position_score generate_dataset IdealRegressor run_experiments_without_cross_validation evaluate_summarizer run_experiments best_rouge_f evaluate_model visualize generate_features are_similar build_feature_set are_similar_rouge remove_stop_words load_cache run_one_to_one_compare learn used_features store_cache summarize_cnn_folder extract_features parse_dataset sentences_features summ export_model encode_complex cnn_category_mapping json_read dynamic_mapping avg_bleu_score draw_bar_chart cnn_html_escape split_dataset select_features english_stemmer are_similar print_cm load_dataset write_file print_rouges stop_words cue_words are_similar_rouge json_write load_features write_dataset_csv normalize_dataset average_similarity balance_dataset read_file pasokh_category_mapping remove_stop_words_and_punc add_features build_feature_set full_preprocess read_cnn_directory write_xml document_feature_set evaluate_summarizer learn_models summ join word_tokenize isinstance stop_words hexdigest sum tag len time write_dataset_csv print build_feature_set json_write document_feature_set loads read_file FreqDist str word_tokenize remove join add_features average_similarity tag sent_tokenize copy remove_stop_words_and_puncs id avg_bleu_score append normalize hexdigest split cue_words len tf_isf_score frequency_score sum pos_ratio_based cosine_position_score setrecursionlimit run_experiments_without_cross_validation used_features run_experiments_without_cross_validation list format setrecursionlimit print keys print pos_tag remove_stop_words words set pos_ratio_based_en print evaluate_summarizer mean ShuffleSplit cross_validate append print_rouges print normalize_dataset k_fold_evaluate select_features SVR loads read_file load_dataset array print_rouges pos_tag sum len inverse_sentence_freq sqrt intersection len sqrt sum keys set dump write_dataset_csv print close build_feature_set open show xlabel ylabel title scatter figure legend range len join sent_tokenize remove_stop_words_and_puncs Rouge best_rouge_f summ len print print_cm confusion_matrix predict RndRegressor loads DecisionTreeRegressor IdealRegressor coef_ split_dataset len ComplementNB evaluate_summarizer SVR sum evaluate_model print_rouges format load_features get_params LinearRegression print normalize_dataset read_file array fit load_dataset_splitted RndRegressor loads DecisionTreeRegressor IdealRegressor coef_ select_features ComplementNB len SVR evaluate_summarizer sum evaluate_model print_rouges format get_params LinearRegression print normalize_dataset read_file array fit word_tokenize cue_words len tf_isf_score remove_stop_words frequency_score pos_ratio_based cosine_position_score get_scores float union intersection len FreqDist join word_tokenize normalize popitem sent_tokenize generate_features are_similar append remove_stop_words read_cnn_directory json_write learn_models load sorted used_features print read_cnn_directory write_xml summ open cache dump open load open learn_models FreqDist word_tokenize add_features copy sent_tokenize remove_stop_words_and_puncs append normalize document_feature_set range len items sorted normalize_dataset predict round append document_feature_set array pasokh_category_mapping len maps len read open close write open append loads read_file append cnn_category_mapping random pasokh_category_mapping append index int format print sample append range len reshape scalers index fit are_similar modified_precision SmoothingFunction len isinstance format print sum max range enumerate len print format str sorted join print close writelines append keys open split set words split dump close open load close open dump scalers open cache SnowballStemmer load cache open show arange ylabel bar title rcdefaults xticks keys values len parse replace unescape print set getroot findall listdir append len sorted json_read english_stemmer full_preprocess words wordpunct_tokenize set words wordpunct_tokenize isinstance set str SubElement Element write close append Comment ElementTree open RndRegressor export_model loads DecisionTreeRegressor IdealRegressor coef_ split_dataset len ComplementNB evaluate_summarizer SVR sum evaluate_model print_rouges format load_features get_params LinearRegression join print normalize_dataset balance_dataset read_file array fit sorted full_preprocess pop cnn_category_mapping sum max range
Summarizer Bot
456
HuQyang/learning-direction
['video prediction']
['Learning to Take Directions One Step at a Time']
utils_robot.py ops_sn.py model_robot.py custom_vgg16.py main_robot.py ops.py videomaker.py loadWeightsData custom_Vgg16 main make_project_dir DCGAN lrelu conv_mask upconv2d batch_norm flatten_fully_connected linear fc max_pool conv2d deconv2d normalize_batch_of_images conv instance_norm binary_cross_entropy_with_logits leak_relu conv_cond_concat Batch_Normalization _l2normalize sn_conv2d spectral_normed_weight sn_linear sn_batch_norm sn_lrelu sn_deconv2d scope_has_variables natural_keys get_img get_ms_img_robot save_images atoi save_img center_crop save_img_video get_local_mask_robot centeroidnp inverse_image_new transform inverse_transform imread imsave make_video_with_stroke_robot video_from_batch save_video draw_stroke rgb2bgr bgr2rgb unnormalize_video_data video_batch_side_by_side draw_stroke_robot join print abspath getfile pardir join summary_dir sorted checkpoint_dir map zfill pprint __flags makedirs join makedirs get_shape ones conv2d as_list sqrt moments moments int convolve relu reshape concat shape bias_add split relu sqrt sqrt as_list sqrt as_list while_loop reshape warn add_to_collection assign get_variable inverse_transform zeros int reshape enumerate minimum inverse_transform zeros xrange zeros xrange inverse_transform int round center_crop zeros_like imresize concatenate strip expand_dims imread sum ones int ones_like zeros randint save_video draw_stroke_robot unnormalize_video_data video_batch_side_by_side polylines int32 zip array enumerate polylines int32 zip array enumerate video_from_batch concatenate uint8 rgb2bgr astype uint8 write VideoWriter float VideoWriter_fourcc shape reshape
# learning-direction This repository contains the demo code for the paper ["Learning to Take Directions One Step at a Time"](https://arxiv.org/pdf/1812.01874.pdf) in ICPR2020. - [Conference Paper](https://arxiv.org/pdf/1812.01874.pdf) - [Supplementary Material](https://www.cvg.unibe.ch/media/publications/pdf/hu-icpr2020-supplement.pdf) # Requirements python 2.7 and tensorflow 1.8 # Usage 1. Download PUSH robot dataset https://sites.google.com/site/brainrobotdata/home/push-dataset and replace the data in the folder train_examples 2. run main.py to try the training precedure # Credits
457
HuXiaoling/TopoLoss
['semantic segmentation']
['Topology-Preserving Deep Image Segmentation']
Code/cPers/cPers/pybind11-stable/example/run_test.py Code/__init__.py Code/cPers/cPers/pybind11-stable/example/example13.py Code/cPers/cPers/pybind11-stable/example/example6.py Code/cPers/cPers/pybind11-stable/example/example8.py topoloss.py Code/cPers/cPers/pybind11-stable/example/example9.py Code/cPers/cPers/pybind11-stable/pybind11/_version.py Code/cPers/cPers/pybind11-stable/example/example12.py Code/cPers/cPers/pybind11-stable/setup.py Code/cPers/cPers/pybind11-stable/pybind11/__init__.py Code/cPers/cPers/pybind11-stable/example/example3.py Code/TDFPython/TDFMain.py Code/cPers/cPers/pybind11-stable/example/example2.py Code/cPers/cPers/pybind11-stable/example/example16.py Code/cPers/cPers/pybind11-stable/example/example11.py Code/cPers/cPers/pybind11-stable/example/example4.py topoloss_pytorch.py Code/cPers/cPers/pybind11-stable/example/example15.py Code/cPers/cPers/pybind11-stable/example/issues.py Code/cPers/cPers/pybind11-stable/example/example1.py Code/cPers/cPers/pybind11-stable/example/example5.py Code/cPers/cPers/pybind11-stable/example/example10.py Code/cPers/cPers/pybind11-stable/tools/mkdoc.py Code/cPers/cPers/pybind11-stable/example/example14.py Code/cPers/cPers/pybind11-stable/example/example7.py Code/cPers/cPers/pybind11-stable/docs/conf.py __init__.py Code/cPers/cPers/pybind11-stable/docs/benchmark.py compute_dgm_force getTopoLoss getCriticalPoints compute_dgm_force getTopoLoss getCriticalPoints generate_dummy_code_pybind11 generate_dummy_code_boost ExtendedExample12 func2 func1 func3 PyClass1 PyClass2 sanitize get_include extract ExtractionThread d sanitize_name process_comment compute_topological_loss compute_persistence_2DImg_1DHom compute_dgm_force compute_topological_grad draw_critical_pts save_pers_dgms list abs size set shape sqrt intersection zeros sum range CubicalComplex flatten persistence array cofaces_of_persistence_pairs compute_dgm_force shape getCriticalPoints zeros sum range clone numpy cuda randint range randint range print print str print str join sorted replace print strip len lower sub startswith range split items replace items rstrip replace endswith strip lstrip splitlines sub startswith TextWrapper fill split d sanitize_name process_comment append get_children spelling list min tolist shape pad filter cubePers array compute_dgm_force zeros compute_dgm_force plot arrow random_sample xlabel compute_dgm_force axis ylabel shape title scatter clf figure legend savefig compute_dgm_force axis tight_layout imshow title scatter clf figure legend savefig
# Topology-Preserving Deep Image Segmentation Theoretically speaking, the loss function can be incorporated into any suitable framework. The function is used in PyTorch. And there are two ways to incorporate this loss function into your framework: 1) Update the total gradient (e.g. cross entropy gradient + lambda * topo gradient) when backpropagation; 2) Our loss function is actually defined on critical pixels, and you can conduct your total loss (e.g. cross entropy loss + lambda * topo loss) based on the repository. And do the else as usual. ## Content directory Data/: data folder Results/: storing results Code/TDFPython/TDFMain.py: main python script (run it to start), results are all written in Results folder Code/TDFPython/PersistencePython.so: persistence code, wrapped as a static library (most likely you would have to recompile this library by yourself)
458
Hua-YS/Semantic-Segmentation-with-Sparse-Labels
['semantic segmentation']
['Semantic Segmentation of Remote Sensing Images with Sparse Annotations']
fcn.py train.py test.py utils.py backbone.py loss.py conv2d VGG16 fcn_festa L_festa pred_image TestModel bgr2index eval_image index2bgr dataloader img2patch conv2d Model Input VGG16 output concat reduce_max flatten int_shape gather repeat_elements multiply transpose matmul cast expand_dims sparse_to_dense square stack tile int norm constant reshape reduce_mean reduce_min str uint8 print float32 shape img2patch range len int uint8 float32 bgr2index shape floor append imread range shape shape zeros reshape delete float32 confusion_matrix shape sum range len int shape floor zeros imread range predict img2patch str imwrite print pred_image mean bgr2index mkdir index2bgr zeros imread argmax eval_image range len
# Semantic-Segmentation-with-Sparse-Labels The labels and codes for [Semantic Segmentation of Remote Sensing Images with Sparse Annotations](https://arxiv.org/pdf/2101.03492.pdf). ## Data We provid three types of sparse annotations: polygon, scribble, and point. <img src="./data_example.png" width = "640" height = "380" alt="example" align=center /> ## Usage 1) install dependencies in ```requirements.txt``` 2) download and unzip [data](https://drive.google.com/file/d/1E4bhx3H6P8jTdOQG6hS14G_gBBhvwzWU/view?usp=sharing) in the folder ```data```. The directory structure should be as follows: ``` path/to/data/
459
HuangLongji/GCN-TCN-long-term-receptive-field-
['traffic prediction']
['Incrementally Improving Graph WaveNet Performance on Traffic Prediction']
train.py generate_training_data.py test.py util.py engine.py model.py trainer generate_train_val_test generate_graph_seq2seq_io_data linear gcn nconv gwnet main main load_pickle calculate_scaled_laplacian calculate_normalized_laplacian load_adj masked_mae metric masked_rmse asym_adj DataLoader masked_mape sym_adj load_dataset StandardScaler masked_mse concatenate abs transpose min astype range shape stack append expand_dims dayofweek max timedelta64 values join arange concatenate print sort generate_graph_seq2seq_io_data read_hdf traffic_df_filename output_dir y_start round savez_compressed data gwnet batch_size nodevec2 adjdata numpy device inverse_transform DataFrame max heatmap load_adj transpose squeeze metric savefig load_state_dict load_dataset append to adjtype range cat format randomadj dropout relu get_iterator mean eval softmax num_nodes checkpoint enumerate load print to_csv nodevec1 aptonly mm in_dim gcn_bool weight_decay save round nhid seq_length str addaptadj argmin state_dict trainer shuffle expid train time learning_rate epochs diags flatten coo_matrix sum array flatten coo_matrix diags diags tocoo flatten coo_matrix eye sum array calculate_normalized_laplacian csr_matrix reduce identity shape eigsh load_pickle load join DataLoader transform StandardScaler isnan float zeros_like where zeros_like where isnan float abs zeros_like where isnan float abs item
HuangLongji/GCN-TCN-long-term-receptive-field-
460
HuangLongji/graph-WaveNet
['traffic prediction']
['Graph WaveNet for Deep Spatial-Temporal Graph Modeling', 'Incrementally Improving Graph WaveNet Performance on Traffic Prediction']
train.py generate_training_data.py test.py util.py engine.py model.py trainer generate_train_val_test generate_graph_seq2seq_io_data linear gcn nconv gwnet main main load_pickle calculate_scaled_laplacian calculate_normalized_laplacian load_adj masked_mae metric masked_rmse asym_adj DataLoader masked_mape sym_adj load_dataset StandardScaler masked_mse concatenate abs transpose min astype range shape stack append expand_dims dayofweek max timedelta64 values join arange concatenate print sort generate_graph_seq2seq_io_data read_hdf traffic_df_filename output_dir y_start round savez_compressed data gwnet batch_size nodevec2 adjdata numpy device inverse_transform DataFrame max heatmap load_adj transpose squeeze metric savefig load_state_dict load_dataset append to adjtype range cat format randomadj dropout relu get_iterator mean eval softmax num_nodes checkpoint enumerate load print to_csv nodevec1 aptonly mm in_dim gcn_bool weight_decay save round nhid seq_length str addaptadj argmin state_dict trainer shuffle expid train time learning_rate epochs diags flatten coo_matrix sum array flatten coo_matrix diags diags tocoo flatten coo_matrix eye sum array calculate_normalized_laplacian csr_matrix reduce identity shape eigsh load_pickle load join DataLoader transform StandardScaler isnan float zeros_like where zeros_like where isnan float abs zeros_like where isnan float abs item
HuangLongji/graph-WaveNet
461
HuangLongji/metapath-based-GAT
['network embedding']
['HAHE: Hierarchical Attentive Heterogeneous Information Network Embedding']
utils/process.py models/gat.py utils/process_ppi.py data/DBLP_four_area/hin2kg.py utils/layers.py data/DBLP_four_area/modify_term.py data/exp.py models/__init__.py jhyexp.py preprocess_dblp.py models/base_gattn.py data/DBLP_four_area/untitled0.py ex_acm3025.py sample_mask load_data_dblp my_Kmeans my_KNN split_idx my_Kmeans BaseGAttN HeteGAT_multi HeteGAT HeteGAT_no_coef GAT SimpleAttLayer attn_head attn_head_const_1 sp_attn_head preprocess_features normalize_adj sparse_to_tuple adj_to_bias sample_mask parse_index_file load_data preprocess_adj standardize_data find_split dfs_split test run_dfs process_p2p zeros format print sample_mask shape zeros loadmat int sum permutation format print squeeze len KNeighborsClassifier append f1_score argmax array range predict fit sum format normalized_mutual_info_score print KMeans squeeze adjusted_rand_score len append argmax array range predict fit int format print append argmax range len labels_ silhouette_score value isinstance Variable concat transpose reduce_sum softmax expand_dims random_normal tensordot matmul shape eye empty range append int strip open from_dict_of_lists tuple parse_index_file vstack max list tolist shape range format lil_matrix adjacency_matrix tolil sort print min sample_mask zeros len to_tuple range isinstance len mean todense std diags flatten dot sum array diags flatten coo_matrix sum array normalize_adj eye full range run_dfs range print range max count list todense find_split tolist set_trace len identity shape append range node_link_graph test adjacency_matrix empty enumerate load items int dfs_split print tolil argwhere transform zeros StandardScaler array fit
HuangLongji/metapath-based-GAT
462
HuangXiaoquan127/Google-Landmarks-Retrieval-and-Recognition-2019-19h-Place-Solution
['image retrieval']
['Fine-tuning CNN Image Retrieval with No Human Annotation']
cirtorch/examples/train_cleaned.py cirtorch/for_experiment/view_train_landmarks.py cirtorch/for_experiment/test/DISPLAY_test.py cirtorch/for_experiment/extract_src_mismatched_img.py cirtorch/for_experiment/missing_img_reload.py cirtorch/networks/imageretrievalnet.py cirtorch/for_experiment/test/faiss_on_GPU_test.py cirtorch/for_experiment/train_cleaned_plot.py cirtorch/examples/recognition.py cirtorch/datasets/landmarks_downloader.py cirtorch/for_experiment/clear_no_exist.py cirtorch/for_experiment/log_simplify.py cirtorch/for_experiment/test/DELF/delf_extract_features.py cirtorch/for_experiment/test/DELF/fea_match_time_test.py cirtorch/for_experiment/old/train_val_set_pkl.py cirtorch/datasets/genericdataset.py cirtorch/for_experiment/test/QE_test.py cirtorch/for_experiment/test/val_pkl_2_csv.py cirtorch/for_experiment/gen_query_bbx_img.py cirtorch/datasets/testdataset.py cirtorch/examples/test.py cirtorch/for_experiment/test/pandas_sort_test.py cirtorch/datasets/generate_train_val_test_set.py cirtorch/for_experiment/extract_key_train_info.py cirtorch/for_experiment/list_folder_file.py cirtorch/datasets/datahelpers.py cirtorch/for_experiment/GLD1_pairs_statistics.py cirtorch/for_experiment/train_set_info.py cirtorch/for_experiment/test/spatial_verification.py cirtorch/layers/loss.py cirtorch/for_experiment/test/DELF/extractor.py cirtorch/examples/train.py cirtorch/examples/predict.py cirtorch/for_experiment/GL2_test_pkl.py cirtorch/for_experiment/test/extract_pre_compu_pacw.py cirtorch/for_experiment/smlyaka_GLD2_EDA.py cirtorch/for_experiment/test/faiss_test.py cirtorch/for_experiment/test/multi_GPU_test.py cirtorch/utils/whiten.py cirtorch/for_experiment/old/GL2_test_pkl.py cirtorch/for_experiment/test/extract_file.py cirtorch/for_experiment/test/save_submission_test.py cirtorch/utils/download.py cirtorch/for_experiment/old/clear_no_exist.py cirtorch/layers/functional.py cirtorch/layers/pooling.py cirtorch/for_experiment/train_m2_pkl.py cirtorch/for_experiment/test/plt_show.py cirtorch/utils/general.py cirtorch/for_experiment/test/load_mat_test.py cirtorch/examples/predict_old.py cirtorch/datasets/trainset_clear.py cirtorch/datasets/traindataset.py cirtorch/utils/diffussion.py cirtorch/__init__.py cirtorch/for_experiment/train_log_cat.py cirtorch/for_experiment/test/DELF/match_images.py cirtorch/for_experiment/merge_train_feats.py cirtorch/for_experiment/test/csv_test.py cirtorch/examples/test_old.py cirtorch/for_experiment/test/diffussion_test.py cirtorch/for_experiment/test/knn_1000.py cirtorch/layers/normalization.py cirtorch/for_experiment/test/resave_state.py cirtorch/utils/evaluate.py cirtorch/for_experiment/copy_val_set.py imresize ParseData collate_tuples clear_no_exist default_loader cid2filename accimage_loader flip pil_loader gen_train_val_test_pkl ImagesFromList ImagesFromDataList Run ParseData DownloadImage config_qimname configdataset config_imname TuplesDataset main gen_query_bbx_img main mismatched_img_show_save validate AverageMeter test save_checkpoint set_batchnorm_eval main train validate AverageMeter test save_checkpoint set_batchnorm_eval main train gen_query_bbx_img clear_no_exist DataParallelModel main _ReadImageList ResizeImage MakeExtractor main l2n mac rmac spoc boostgem multipool contrastive_loss roipool gem powerlaw learnpool ContrastiveLoss PowerLaw L2N RMAC MAC SPoC LearnPool MultiPool Rpool BoostGeM GeM ConvPool extract_ss ImageRetrievalNet extract_vectors init_network extract_ssl extract_regional_vectors extract_ms extract_local_vectors extract_ssr sim_kernel cg_diffusion dfs_trunk topK_W find_trunc_graph normalize_connection_graph fsr_rankR download_train download_test compute_ap_at_k compute_ap compute_map compute_map_and_print get_data_root get_root htime sha256_hash pcawhitenlearn cholesky whitenlearn whitenapply thumbnail ANTIALIAS size view reader open writer pop format reader print writerow __contains__ close listdir enumerate open permutation DataFrame max round open writer seed list __contains__ copyfile append sort_values range get_data_root dump format close astype listdir pop join reader print writerow to_numpy len join read BytesIO print convert urlopen save exists open print ParseData exit map mkdir Pool join lower format len htime init_network search gpu_id clear_no_exist whitening save whitenapply cuda open writer list len add load_state_dict append parse_args sum range get get_data_root dump format T Compose close hstack astype choice load_url network_path eval pcawhitenlearn network_offtheshelf Normalize item multiscale enumerate load join time int norm reader collect IndexFlatL2 print meta_repr writerow reshape isfile split join format Draw print len rectangle save split range open axis whitening open str list subplot imshow title savefig imread range gen_query_bbx_img format close eval mkdir multiscale enumerate join print write subplots_adjust array output_path len compute_map_and_print extract_vectors whitenlearn mismatched_img_show_save image_size dot argsort configdataset numpy pool TuplesDataset SGD pretrained DataLoader save_checkpoint arch seed exp Adam epochs regional manual_seed_all val multi_layer_cat directory lr resume manual_seed local_whitening ExponentialLR training_dataset min train step makedirs update time format criterion backward print squeeze AverageMeter zero_grad apply create_epoch_tuples item step cuda range enumerate len update time format criterion print squeeze AverageMeter eval create_epoch_tuples item cuda range enumerate len htime compute_map_and_print extract_vectors whitenapply cuda whitenlearn get_data_root format Compose eval Normalize join time T print dot argsort configdataset test_whiten numpy split copyfile join save eval __name__ DataFrame to_csv update get_data_root configdataset ParseData len isfile range tolist array read_csv DelfConfig list_images_path set_verbosity MakeDirs info output_dir _ReadImageList INFO min_image_size fromarray shape array resize max_image_size max load reshape DelfFeaturePostProcessing model_path get_tensor_by_name subplots image_1_path plot_matches features_2_path axis query AffineTransform column_stack set_title savefig imread ReadFromFile ransac features_1_path image_2_path output_image cKDTree array tuple permute append cuda cat l2n mac spoc append gem list max_pool2d size min tolist expand_as floor item Tensor abs max range int size min tolist unsqueeze floor item append Tensor abs max range eps clamp size pow sqrt permute sum children ReLU constant_ list basename ImageRetrievalNet load_state_dict xavier_normal_ append get get_data_root format load_url startswith keys Linear pop join print bias Rpool weight eval cuda ImagesFromList DataLoader pow zeros clone interpolate eval cuda ImagesFromList DataLoader eval cuda ImagesFromList DataLoader csr_matrix reshape sqrt array diagonal sum diags argsort T range minimum list range extend set T time sim_kernel setdiff1d arange concatenate csr_matrix print minres topK_W argsort find_trunc_graph eye append normalize_connection_graph range len concatenate reshape minres argsort eye append range T concatenate csr_matrix reshape dot argsort append sum diags range eigsh join format print len system mkdir range makedirs join format print len system mkdir range makedirs float arange len arange zeros float range len compute_ap max arange compute_ap_at_k min append zeros float sum array len format compute_map concatenate print around startswith append range len round sha256 dot norm T eig inv mean dot sqrt diag T inv eig mean dot cholesky eye
![](https://storage.googleapis.com/kaggle-competitions/kaggle/11838/logos/header.png?t=2019-03-29-00-01-45) # Kaggle - Google Landmark Retrieval 2019 19th Place Solution 本项目包含了[谷歌地标检索](https://www.kaggle.com/c/landmark-retrieval-2019)和[识别](https://www.kaggle.com/c/landmark-recognition-2019)竞赛中使用的源码: 1. 任务,给定10万张的query set,从包含10万个类70万张的index set中查找同一地标类型的图像; 2. 数据,对包含1.5万个类120万张图像的[Google Landmarks dataset v1](https://www.kaggle.com/c/landmark-recognition-challenge)(GLDv1)进行qurey和positive匹配对提取,最后包含31万个匹配对,119万张图; 3. 模型,以ResNet101为backbone,GeM通用平均池化做全局聚合,接FC进行PAC学习,最后以Siamese架构结合contrastive loss进行微调训练; 4. 提升,对提取的特征再进行PCA whitening,带权重的query expansion,预计算PCA作为FC参数初始化,多层特征图池化串联; 5. 成果,在地标检索及地标识别比赛中各获得银牌,地标检索排名19/144,Top14%,地标识别排名47/281,Top17%。 使用的框架如下: <img src="http://cmp.felk.cvut.cz/cnnimageretrieval/img/cnnimageretrieval_network_medium.png" width=\textwidth/>
463
HugoTessier-lab/SWD
['network pruning']
['Rethinking Weight Decay For Efficient Neural Network Pruning']
utils/mobilenet.py utils/regularization_and_pruning.py experiment.py utils/lenet5.py utils/datasets.py utils/parse_arguments.py utils/resnet.py utils/checkpoint.py train_model compute_migration accuracy_top5 test_model l2_norm display_progress apply_mask get_a Checkpoint load_mnist load_imagenet get_dataset load_cifar100 load_cifar10 F5 C1 C3 F4 LeNet5 C2 InvertedResidual _make_divisible MobileNetV2Model ConvBNReLU mobilenet_v2 MobileNetV2 parse_arguments get_mask_function find_ths_by_dichotomy get_structuredF_mask Regularization get_structured_mask get_unstructured_mask resnet20 Bottleneck get_block_params_count resnet34 resnet152 BasicBlock resnet32 resnet110 Conv resnet_model resnet101 resnet18 ResNet resnet50 resnet44 resnet56 ResNetModel get_block_flops_count Linear write eval zip parameters a_min a_max log zip save_results model batch_size zero_grad save get_a argmax compute_migration test_model apply_mask set_a cross_entropy epoch display_progress enumerate time backward print get_mask regularization write train step len MNIST DataLoader Compose dataset_path DataLoader Compose dataset_path CIFAR10 DataLoader Compose CIFAR100 dataset_path join ImageNet dataset_path DataLoader Normalize int max MobileNetV2 add_argument ArgumentParser compute_params_count cat len list replace find_ths_by_dichotomy named_parameters append float named_modules list replace view find_ths_by_dichotomy named_parameters append float len print shortcut_conv sum module isinstance isinstance sum module images_dims shortcut_conv
# SWD ## How to use : Run (for example): python experiment.py --a 10 --target 950 --epochs 30 --ft_epochs 15 ... (list of parameters) to launch the training. The process can be stopped and resumed later. ## Results : Results are stored into two forms : 1) *.chk, which store the trained model after the training and between each pruning step, as well as the results 2) *.txt which store the results for each epoch.
464
HuiChen24/IMRAM
['cross modal retrieval']
['IMRAM: Iterative Matching with Recurrent Attention Memory for Cross-Modal Image-Text Retrieval']
train_gpus.py data.py evaluation.py opts.py test_gpus.py vocab.py model.py get_loaders get_precomp_loader get_test_loader collate_fn PrecompDataset i2t encode_data shard_xattn_Text_IMRAM shard_xattn_Full_IMRAM shard_xattn_Image_IMRAM shard_xattn_t2i shard_xattn t2i logging_func evalrank shard_xattn_i2t EncoderImageWeightNormPrecomp EncoderImagePrecomp ContrastiveLoss EncoderImage SCAN func_attention l2norm cosine_similarity_a2a EncoderText cosine_similarity l1norm parse_opt main main save_checkpoint adjust_learning_rate logging_func Vocabulary from_txt serialize_vocab deserialize_vocab main build_vocab list LongTensor sort stack zip long enumerate DataLoader PrecompDataset data_path join get_precomp_loader data_path join get_precomp_loader close print eval copy forward_emb zeros dataset max enumerate len shard_xattn_Text_IMRAM shard_xattn_Full_IMRAM shard_xattn_Image_IMRAM int iteration_step xattn_score_Text_IMRAM print numpy cuda range xattn_score_Image_IMRAM len int iteration_step xattn_score_Text_IMRAM print numpy cuda range len int iteration_step print numpy cuda range xattn_score_Image_IMRAM len int print xattn_score_t2i zeros numpy cuda range len int print xattn_score_i2t zeros numpy cuda range len median mean floor zeros range len median T mean floor zeros range len div sum sqrt div EncoderImageWeightNormPrecomp EncoderImagePrecomp l1norm_d bmm view transpose contiguous l2norm clone softmax bmm transpose unsqueeze clamp norm sum parse_args add_argument ArgumentParser workers vocab_path batch_size DataParallel get_test_loader ArgumentParser data_name cuda str SCAN set_device load_state_dict parse_args module gpuid deserialize_vocab evalrank model_path load join fold5 print add_argument data_path split len model clip_grad_norm_ zero_grad adjust_learning_rate save_checkpoint ContrastiveLoss max Adam MSELoss logger_name grad_clip range get_loaders format parse_opt resume logging_func isfile num_epochs enumerate time criterion backward makedirs parameters repeat model_name train step join format print copyfile logging_func save lr_update param_groups learning_rate print word2idx idx idx2word Vocabulary update join word_tokenize decode Vocabulary print add_word from_txt Counter enumerate serialize_vocab build_vocab
## Requirements and Installation We recommended the following dependencies. * Python 3.6 * [PyTorch](http://pytorch.org/) 1.0 * [NumPy](http://www.numpy.org/) * Punkt Sentence Tokenizer: ```python import nltk nltk.download() > d punkt
465
HuiZeng/Grid-Anchor-based-Image-Cropping-Pytorch
['image cropping']
['Grid Anchor based Image Cropping: A New Benchmark and An Efficient Model']
roi_align/_ext/roi_align/__init__.py thop/utils.py croppingDataset.py demo_eval.py thop/__init__.py TrainModel.py ShuffleNetV2.py roi_align/modules/roi_align.py rod_align/_ext/rod_align/__init__.py mobilenetv2.py rod_align/functions/rod_align.py augmentations.py thop/count_hooks.py TestAccuracy.py roi_align/build.py rod_align/modules/rod_align.py rod_align/build.py thop/profile.py roi_align/functions/roi_align.py croppingModel.py SwapChannels ToTensor ToAbsoluteCoords RandomBrightness PhotometricDistort RandomSaturation Resize RandomSampleCrop ToPercentCoords intersect Lambda Compose ConvertColor CropAugmentation Expand SubtractMeans jaccard_numpy RandomHue ConvertFromInts RandomMirror RandomContrast ToCV2Image RandomLightingNoise GAICD TransformFunctionTest generate_bboxes_1_1 generate_bboxes_16_9 generate_bboxes_4_3 setup_test_dataset generate_bboxes TransformFunction crop_model_multi_scale_shared shufflenetv2_base fc_layers build_crop_model vgg_base crop_model_single_scale weights_init resnet50_base xavier mobilenetv2_base crop_model_multi_scale_individual str2bool test conv_1x1_bn conv_3x3_bn InvertedResidual mobilenetv2 _make_divisible MobileNetV2 test train test RoDAlignFunction RoDAlign RoDAlignMax RoDAlignAvg _import_symbols RoIAlignFunction RoIAlign RoIAlignAvg RoIAlignMax _import_symbols count_adap_avgpool count_maxpool count_convtranspose2d count_softmax count_avgpool count_relu count_convNd count_conv2d count_linear count_bn count_adap_maxpool profile clever_format minimum clip maximum intersect append list range append list range append list range append list range Sequential ReLU Conv2d xavier_uniform_ data isinstance Conv2d zero_ xavier imwrite batch_size build_crop_model DataParallel DataLoader output_dir cuda str sorted squeeze load_state_dict append range eval net enumerate load time net_path Variable print Tensor len int max sum sort write index numpy save_folder zero_grad save cuda squeeze repr append range state_dict shuffle test net enumerate backward Variable write Tensor step len dir _wrap_function getattr append callable groups Tensor nelement in_channels kernel_size size out_channels groups in_channels Tensor kernel_size size out_channels groups in_channels nelement Tensor Tensor numel Tensor numel Tensor size Tensor prod numel Tensor squeeze prod numel Tensor prod numel Tensor squeeze prod numel Tensor in_features numel remove training apply modules item device to
# Grid-Anchor-based-Image-Cropping-Pytorch The extension of this work has been accepted by TPAMI. Please read the [paper](https://www4.comp.polyu.edu.hk/~cslzhang/paper/GAIC-PAMI.pdf) for details. ### Requirements python 2.7, pytorch 0.4.1, numpy, cv2, scipy. ### Usage 1. Download the source code, the datasets [[conference version](https://drive.google.com/open?id=1X9xK5O9cx4_MvDkWAs5wVuM-mPWINaqa)], [[journal version](https://drive.google.com/file/d/1tDdQqDe8dMoMIVi9Z0WWI5vtRViy01nR/view?usp=sharing)] and the pretrained models [[conference version](https://drive.google.com/open?id=1kaNWvfIdtbh2GIPNSWXdxqyS-d2DR1F3)] [[journal version](https://drive.google.com/file/d/1KWYQdL6R5hmOC9toTymbMORZDThpiEW4/view?usp=sharing)] 2. Run ``TrainModel.py`` to train a new model on our dataset or Run ``demo_eval.py`` to test the pretrained model on any images. 3. To change the aspect ratio of generated crops, please change the ``generate_bboxes`` function in ``croppingDataset.py`` (line 115). ### Annotation software The executable annotation software can be found [here](https://github.com/lld533/Grid-Anchor-based-Image-Cropping-Pytorch).
466
HumanCompatibleAI/derail
['imitation learning']
['DERAIL: Diagnostic Environments for Reward And Imitation Learning']
src/derail/__init__.py src/derail/algorithms/preferences.py src/derail/plot.py src/derail/algorithms/fu.py src/derail/run.py src/derail/algorithms/imitation_adversarial.py setup.py src/derail/algorithms/__init__.py src/derail/utils.py src/derail/algorithms/tabular_irl.py src/derail/algorithms/stable_baselines.py src/derail/experts.py get_parabola_expert get_early_term_pos_expert get_noisyobs_expert get_largest_sum_expert get_proc_goal_expert get_insertionsort_expert hard_value_iteration hard_mdp_expert soft_mdp_expert get_selectionsort_expert get_early_term_neg_expert soft_value_iteration annotate_heatmap make_boxplot process_results get_full_kwargs get_type_kwargs get_drlhp_kwargs bootstrap_ci new_draw_boxplot setup_styles new_bxp main normalize get_noise_kwargs heatmap empirical_ci bootstrap eval_algorithms run_experiment is_compatible expert_algo random_algo rl_algo get_timestamp get_full_env_name SimpleTask get_horizon get_transition_matrix sample_trajectories fixpoint get_random_policy get_reward_matrix get_initial_state_dist get_raw_env LightweightRLModel tabular_eval_policy get_internal_env monte_carlo_eval_policy make_egreedy get_ppo train_rl force_shape sample_distribution get_raw_policy render_trajectories to_rllab_trajectories fu_irl adversarial_learning build_mlp get_eval_trajectories_fn get_obs_acts_next_obs eval_fn_from_reward get_reward_fn_from_model preferences get_segments RunningMeanVar sequential reward_eval_path_fn get_expert_dataset behavioral_cloning stable_gail maximum_entropy_irl occupancy_match_irl LinearRewardModel max_ent_q_update_fn compute_occupancy_measure mce_q_update_fn get_raw_env get_raw_env LightweightRLModel get_horizon get_reward_matrix get_transition_matrix force_shape reversed zeros empty max range n exp get_horizon get_reward_matrix get_transition_matrix force_shape logsumexp reversed zeros empty range n pop restyle_boxplot asarray add_legend_data hue_offsets boxplot remove_na hue_names enumerate plot_data len items new_vertical arange make_axes_locatable set_xticklabels set_yticklabels get_yticklabels set_yticks get_xticklabels grid colorbar add_axes imshow set_visible set_xticks setp gca tick_params update norm isinstance valfmt text get_array dict StrMethodFormatter masked_invalid append max range bootstrap_ci clf values show set_xlabel get_label ylim savefig append get swarmplot unique get_color boxplot get_cmap enumerate join text breakpoint set_ylabel to_dict makedirs join partial map copy apply array zip keys read_csv parse_args process_results add_argument ArgumentParser train_rl print run run_experiment print sort copy get_timestamp log_result get_experiments makedirs hasattr f hasattr get_random_policy sample_trajectories sum len reward_fn hasattr product empty range n list hasattr action_probability env empty range n evaluate_policy get_horizon force_shape get_reward_matrix get_transition_matrix get_initial_state_dist get_raw_policy shape sum range generate_trajectories print render reset sleep step range predict full get_raw_env n policy_matrix hasattr ones_like get_random_policy learn policy_fn empty stdout to_rllab_trajectories GaussianMLPPolicy int isinstance AIRLStateAction LightweightRLModel action_space IRLTRPO sample_trajectories devnull CategoricalMLPPolicy GAIL GymEnv train get_raw_env TfEnv open RewardVecEnvWrapper DiscrimNetAIRL sample_trajectories run ReplayBuffer pop_transitions DiscrimNetGAIL set_env flatten_trajectories ceil BasicShapedRewardNet range partial learn concatenate action_probability from_data sample int policy_fn disc_loss minimize get_default_session reshape action_space observation_space reduce_mean BufferingWrapper reward_train global_variables_initializer store RewardVecEnvWrapper evaluate_trajectories_fn get_segments concat get_horizon PPO2 sample_trajectories reward_output_train stop_gradient reward_eval_path_fn run placeholder reduce_sum RunningMeanVar ceil append BasicShapedRewardNet range learn concatenate get_reward_fn_from_model log_softmax sequential stack build_mlp get_eval_trajectories_fn int minimize get_default_session reshape action_space observation_space AdamOptimizer reduce_mean global_variables_initializer OrderedDict Dense enumerate layer values concatenate reshape reduce shape gcd sum isinstance state_from_ob hasattr get_raw_env generate_expert_traj get_horizon ExpertDataset get_expert_dataset get_ppo pretrain get_expert_dataset learn GAIL occupancy_match_irl obs from_matrix identity LinearRewardModel get_horizon get_initial_state_dist sample_trajectories zeros n zeros range einsum norm inf get_rewards_and_grads abs shape compute_policy _w zeros compute_occupancy_measure max len
Supporting code for the paper [DERAIL: Diagnostic Environments for Reward and Imitation Learning](https://arxiv.org/abs/2012.01365). The environments are available at the [HumanCompatibleAI/seals](https://github.com/HumanCompatibleAI/seals/) repo. This repo contains the rest of the code for running the experiments in the paper. To reproduce the results: ```bash git clone https://github.com/HumanCompatibleAI/derail cd derail pip install . python -m derail.run -t 500000 -n 15 -p python -m derail.plot -f results/last.csv ```
467
Humboldt-WI/response-dependent-costs
['causal inference']
['Targeting customers under response-dependent costs']
train_treatment_models.py log_result predict_treatment_models get_train_validation_test_split myXBCF deque list rotate concatenate StratifiedKFold best_params_ SingleModel GradientBoostingRegressor list predict_value grid_search_cv ones tolist LogisticRegression GradientBoostingClassifier sum fit_transform grid_search_cv_hurdle predict ColumnTransformer predict_hurdle mean TwoModelRegressor HurdleModel myXBCF int Ridge set_params GridSearchCV print DoubleRobustTransformer split transform fit append
# Customer Targeting under Response-Depedent Costs Code accompanying the paper "Customer targeting under response-dependent costs" on [arXiv](https://arxiv.org/abs/2003.06271). Unfortunately, we are not able to share the proprietary data due to concerns for customer privacy and restriction by the industry partner The code makes use of our more general helper library for uplift modeling: https://github.com/johaupt/treatment-learn
468
Hyeongmin-Cho/Efficient-Data-Quality-Measures-for-High-Dimensional-Classification-Data
['speech recognition']
['Data Quality Measures and Efficient Evaluation Algorithms for Large-Scale High-Dimensional Data']
measure.py utils.py run.py dataset.py dataloader get_lda_object StratifiedSampler DatasetQualityEval load_data save_list load_list to_arff unpickle join arange print reshape load_imagenet32 load_mnist shuffle load_linnaeus shape load_cifar10 load_caltech256 dataloader load_list load_svhn DatasetQualityEval DataLoader StratifiedSampler round max seed list TensorDataset append set mean enumerate int items print min load_data Tensor std len print reshape tolist write close tqdm unique range enumerate open
# Efficient-Data-Quality-Measures-for-High-Dimensional-Classification-Data Implementation of ["Data Quality Measures and Efficient Evaluation Algorithms for Large-Scale High-Dimensional Data"](https://www.mdpi.com/2076-3417/11/2/472/html). ## Directory tree - dataset : Dataset folder ``` Project |--- dataset | |--- mnist | |--- cifar10 | |--- STL10
469
HyeonwooNoh/DeconvNet
['semantic segmentation']
['Learning Deconvolution Network for Semantic Segmentation']
training/003_make_bn_layer_testable/BN_make_INFERENCE_script.py
## DeconvNet: Learning Deconvolution Network for Semantic Segmentation Created by [Hyeonwoo Noh](http://cvlab.postech.ac.kr/~hyeonwoonoh/), [Seunghoon Hong](http://cvlab.postech.ac.kr/~maga33/) and [Bohyung Han](http://cvlab.postech.ac.kr/~bhhan/) at POSTECH Acknowledgements: Thanks to Yangqing Jia and the BVLC team for creating Caffe. ### Introduction DeconvNet is state-of-the-art semantic segmentation system that combines bottom-up region proposals with multi-layer decovolution network. Detailed description of the system will be provided by our technical report [arXiv tech report] http://arxiv.org/abs/1505.04366 ### Citation If you're using this code in a publication, please cite our papers. @article{noh2015learning, title={Learning Deconvolution Network for Semantic Segmentation},
470
HzDmS/gaze_redirection
['gaze estimation']
['Photo-Realistic Monocular Gaze Redirection Using Generative Adversarial Networks']
src/data_loader.py src/archs.py utils/ops.py src/model.py main.py generator vgg_16 discriminator ImageData Model lrelu tanh gram relu l1_loss conv2d deconv2d angular2cart instance_norm content_loss angular_error l2_loss style_loss image_size reshape concat tile constant_initializer random_normal_initializer conv2d_transpose constant_initializer random_normal_initializer reduce_mean abs reduce_mean square reduce_sum size split size split to_float reshape matmul shape stack cos pi sin multiply divide square sqrt angular2cart sum clip
# Photo-Realistic Monocular Gaze Redirection Using Generative Adversarial Networks [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [[Paper](https://arxiv.org/abs/1903.12530)] [[Video](https://www.youtube.com/watch?v=oHx88bHaM18)] Authors: **Zhe He**, Adrian Spurr, Xucong Zhang, Otmar Hilliges Contact: [email protected] <p align="center"> <img src="/imgs/framework.jpg"> </p> The following gifs are made of images generated by our method. For each GIF, the input is a still image. <p align="center">
471
HzFu/AGNet
['semantic segmentation']
['Attention Guided Network for Retinal Image Segmentation']
code/main.py code/test.py code/guided_filter_pytorch/box_filter.py code/core/models.py code/guided_filter_pytorch/guided_filter_attention.py code/core/utils.py code/core/blocks.py ConvBnRelu2d M_Decoder_my_10 StackDecoder M_Conv M_Decoder M_Encoder StackEncoder GridAttentionBlock AG_Net get_data get_model get_img_list calculate_Accuracy diff_y BoxFilter diff_x FastGuidedFilter_attention join float transpose get_label shape stack resize append imread cuda range len asarray astype float32 mean sum cat cat
# Attention Guided Network for Retinal Image Segmentation (AG-Net) The code of "Attention Guided Network for Retinal Image Segmentation" in MICCAI 2019. - The code is based on: Python 2.7 + pytorch 0.4.0. - You can run <AG_Net\_path>/code/test.py for testing any new image directly. - You can run <AG_Net\_path>/code/main.py for training a new model. ## Quick usage on your data: - Put your desired file in "\<AG\_Net\_path\>/data/\<your\_file\_name\>". - Put the images in "\<AG\_Net\_path\>/data/\<your\_file\_name\>/images".
472
HzFu/MNet_DeepCDR
['medical image segmentation', 'cell segmentation', 'semantic segmentation']
['Attention Guided Network for Retinal Image Segmentation', 'CE-Net: Context Encoder Network for 2D Medical Image Segmentation']
mnet_deep_cdr/Model_MNet.py mnet_deep_cdr/Step_3_MNet_test.py setup.py mnet_deep_cdr/Model_DiscSeg.py mnet_deep_cdr/__init__.py mnet_deep_cdr/Step_2_MNet_train.py mnet_deep_cdr/mnet_utils.py mnet_deep_cdr/Step_1_Disc_Crop.py files_with_ext dice_coef2 pro_process dice_coef_loss BW_img disc_crop dice_coef mk_dir train_loader DeepModel DeepModel get_logger resize astype array join asarray load_img reshape astype shape enumerate label argmax regionprops flatten sum dice_coef int abs array zeros makedirs print len average Input concatenate _dictConfig
mnet_deep_cdr ============= ![Python version range](https://img.shields.io/badge/python-2.7%E2%80%933.6+-blue.svg) **Code for TMI 2018 "Joint Optic Disc and Cup Segmentation Based on Multi-label Deep Network and Polar Transformation"** Project homepage:http://hzfu.github.io/proj_glaucoma_fundus.html ## Install dependencies pip install -r requirements.txt ## Install package pip install . OpenCV will need to be installed separately.
473
I-am-Bot/DeepRobust
['adversarial attack']
['Adversarial Attacks and Defenses in Images, Graphs and Text: A Review', 'Adversarial Attacks and Defenses on Graphs: A Review, A Tool and Empirical Studies']
examples/graph/test_ig.py examples/graph/test_fga.py deeprobust/graph/rl/nipa_env.py deeprobust/graph/defense/simpgcn.py deeprobust/graph/rl/nstep_replay_mem.py deeprobust/image/defense/AWP.py deeprobust/graph/global_attack/dice.py examples/graph/test_mettack.py examples/image/test_onepixel.py examples/graph/test_adv_train_evasion.py deeprobust/graph/defense/pgd.py deeprobust/graph/global_attack/topology_attack.py deeprobust/image/attack/BPDA.py examples/graph/test_gcn_jaccard.py deeprobust/image/defense/YOPO.py deeprobust/image/defense/__init__.py get-pip.py examples/image/test_cw.py examples/image/test_ImageNet.py deeprobust/image/defense/LIDclassifier.py examples/graph/test_sgc.py deeprobust/graph/targeted_attack/rnd.py conf.py deeprobust/image/__init__.py deeprobust/image/utils.py examples/graph/test_median_gcn.py deeprobust/graph/global_attack/base_attack.py setup.py examples/graph/test_dice.py deeprobust/image/attack/Universal.py examples/image/test_train.py deeprobust/image/netmodels/CNN.py examples/graph/test_all.py deeprobust/image/defense/trades.py deeprobust/image/defense/fgsmtraining.py deeprobust/graph/targeted_attack/sga.py deeprobust/graph/defense/gat.py examples/graph/test_sga.py deeprobust/image/defense/base_defense.py deeprobust/image/attack/base_attack.py deeprobust/image/attack/fgsm.py examples/graph/test_pgd.py examples/graph/test_simpgcn.py deeprobust/graph/data/attacked_data.py deeprobust/image/attack/onepixel.py examples/image/test_PGD.py docs/conf.py deeprobust/graph/defense/r_gcn.py deeprobust/image/defense/TherEncoding.py deeprobust/graph/defense/sgc.py examples/graph/test_prognn.py deeprobust/image/netmodels/resnet.py deeprobust/graph/targeted_attack/base_attack.py examples/graph/test_gat.py deeprobust/image/attack/lbfgs.py deeprobust/graph/defense/chebnet.py deeprobust/graph/rl/nipa_q_net_node.py deeprobust/graph/rl/q_net_node.py deeprobust/graph/rl/env.py deeprobust/graph/rl/rl_s2v_env.py deeprobust/image/netmodels/train_resnet.py deeprobust/graph/rl/nipa_nstep_replay_mem.py examples/graph/test_nettack.py deeprobust/image/netmodels/train_model.py deeprobust/image/netmodels/preact_resnet.py examples/graph/test_chebnet.py deeprobust/graph/global_attack/__init__.py deeprobust/image/attack/Nattack.py deeprobust/graph/targeted_attack/fga.py deeprobust/graph/global_attack/nipa.py deeprobust/graph/targeted_attack/ig_attack.py deeprobust/graph/global_attack/random_attack.py deeprobust/graph/utils.py deeprobust/graph/targeted_attack/nettack.py deeprobust/graph/defense/node_embedding.py examples/image/test_nattack.py deeprobust/image/netmodels/YOPOCNN.py deeprobust/image/attack/YOPOpgd.py examples/graph/test_min_max.py deeprobust/image/attack/__init__.py examples/graph/test_random.py setup_empty.py deeprobust/image/attack/cw.py examples/image/testprint_mnist.py deeprobust/image/optimizer.py deeprobust/image/netmodels/__init__.py deeprobust/graph/defense/gcn_preprocess.py examples/graph/test_deepwalk.py deeprobust/graph/data/__init__.py deeprobust/graph/defense/adv_training.py deeprobust/image/preprocessing/APE-GAN.py examples/graph/test_node_embedding_attack.py deeprobust/graph/data/dataset.py examples/graph/test_gcn_svd.py deeprobust/graph/defense/prognn.py examples/graph/test_nipa.py deeprobust/image/evaluation_attack.py examples/graph/test_rl_s2v.py deeprobust/image/attack/deepfool.py deeprobust/graph/data/utils.py deeprobust/graph/targeted_attack/__init__.py deeprobust/image/netmodels/CNN_multilayer.py deeprobust/image/netmodels/vgg.py deeprobust/graph/global_attack/node_embedding_attack.py examples/image/test_pgdtraining.py deeprobust/graph/defense/__init__.py examples/image/test_lbfgs.py deeprobust/image/preprocessing/prepare_advdata.py deeprobust/graph/defense/gcn.py deeprobust/graph/black_box.py deeprobust/graph/rl/nipa_config.py examples/image/test_fgsm.py deeprobust/image/defense/pgdtraining.py deeprobust/graph/data/pyg_dataset.py deeprobust/graph/defense/median_gcn.py deeprobust/graph/global_attack/mettack.py deeprobust/image/attack/pgd.py examples/graph/test_gcn.py examples/image/test1.py deeprobust/graph/targeted_attack/rl_s2v.py deeprobust/image/defense/fast.py examples/graph/test_rnd.py deeprobust/image/netmodels/densenet.py examples/graph/test_visualization.py deeprobust/image/config.py examples/image/test_trade.py examples/graph/test_adv_train_poisoning.py examples/graph/test_rgcn.py deeprobust/graph/rl/rl_s2v_config.py deeprobust/__init__.py deeprobust/graph/visualization.py deeprobust/image/attack/l2_attack.py examples/image/test_deepfool.py main bootstrap load_victim_model train_victim_model add_self_loops get_train_val_test_gcn degree_normalize_adj_tensor encode_onehot visualize get_train_val_test normalize_adj normalize_sparse_tensor compute_log_likelihood get_splits_each_class likelihood_ratio_filter reshape_mx get_train_test_labelrate normalize_feature to_tensor update_sum_log_degrees get_degree_squence to_scipy tensor2onehot is_sparse_tensor classification_margin preprocess loss_acc unravel_index get_train_test normalize_adj_tensor degree_normalize_adj sparse_mx_to_torch_sparse_tensor accuracy compute_alpha ravel_multiple_indices degree_sequence_log_likelihood degree_normalize_sparse_tensor updated_log_likelihood_for_edge_changes feature_diff _get_diff degree_dist RandomAttack PtbDataset PrePtbDataset parse_index_file Dataset Pyg2Dpr random_coauthor_amazon_splits Dpr2Pyg mask_to_index index_to_mask AmazonPyg CoauthorPyg AdvTraining ChebNet GAT GraphConvolution GCN dropedge_cosine GCNSVD dropedge_dis dropedge_both GCNJaccard dropedge_jaccard __dropedge_jaccard MedianConv MedianGCN random_choice sample_n2v_random_walks BaseEmbedding Node2Vec sum_of_powers_of_transition_matrix _n2v_random_walk sample_random_walks _random_walk DeepWalk PGD SGD ProxOperators ProGNN EstimateAdj GaussianConvolution RGCN GGCL_D GGCL_F SGC noaug_normalized_adjacency SimPGCN preprocess_adj_noloop AttrSim BaseAttack DICE BaseMeta Metattack MetaApprox NIPA construct_line_graph OtherNodeEmbeddingAttack estimate_delta_eigvals edges_to_sparse sum_of_powers NodeEmbeddingAttack estimate_delta_eigenvecs estimate_loss_with_delta_eigenvals Random MinMax PGDAttack NodeAttackEnv ModifiedGraph StaticGraph GraphNormTool save_args build_kwargs NodeInjectionEnv nipa_hash_state_action NstepReplayMem node_greedy_actions _param_init glorot_uniform NStepQNetNode weights_init QNetNode nipa_hash_state_action hash_state_action NstepReplayMemCell NstepReplaySubMemCell NstepReplayMem node_greedy_actions _param_init glorot_uniform NStepQNetNode weights_init QNetNode save_args build_kwargs NodeAttackEnv ModifiedGraph StaticGraph GraphNormTool BaseAttack FGA IGAttack filter_chisquare filter_singletons compute_alpha compute_log_likelihood Nettack connected_after compute_new_a_hat_uv update_Sx RLS2V RND SGAttack load_net parameter_parser generate_dataloader run_attack AdamOptimizer DifferentialEvolutionSolver differential_evolution download_model load_checkpoint arctanh make_symlink l2_dist reduce_sum l2r_dist tab_printer l1_dist l2_norm save_checkpoint adjust_learning_rate progress_bar l1_norm create_test_dataset onehot_like create_train_dataset BaseAttack clip_bound BPDA_attack image2tensor get_cw_grad label2tensor l2_norm preprocess identity_transform normalize get_img_grad_given_label CarliniWagner DeepFool deepfool zero_gradients FGSM fgm CarliniL2 LBFGS optimize attack NATTACK predict_classes attack_success perturb_image Onepixel pgd_attack PGD proj_lp universal_adversarial_perturbation get_fooling_rate get_model data_input_init FASTPGD diff_in_weights add_into_weights AWP_AT pgd_AWP BaseDefense Fast FGSMtraining get_lid train PGDtraining one_hot one_hot_to_thermometer Thermometer test train TRADES AvgMeter IPGDAttackMethodMaker torch_accuracy load_checkpoint Hamiltonian make_symlink eval_one_epoch SGDOptimizerMaker add_path main PieceWiseConstantLrSchedulerMaker train_one_epoch CrossEntropyWithWeightPenlty FastGradientLayerOneTrainer cal_l2_norm Net train test Net train test DenseNet201 DenseNet161 DenseNet121 Transition DenseNet Bottleneck densenet_cifar test DenseNet169 train PreActBlock PreActResNet18 PreActResNet PreActBottleneck ResNet18 ResNet34 Bottleneck ResNet101 test Net ResNet50 train BasicBlock ResNet152 train feed_dataset train VGG test Net main Generator get_args Discriminator main main test select_nodes multi_test_evasion test single_test multi_test_poison main select_nodes multi_test_evasion test single_test multi_test_poison main select_nodes multi_test_evasion test single_test main main test main test select_nodes multi_test_evasion test single_test multi_test_poison main init_setup add_nodes injecting_nodes generate_injected_features main test main test init_setup main test select_nodes multi_test_evasion test single_test multi_test_poison main Net train test parameter_parser parameter_parser join mkdtemp exit pip_entry_point parse_args install_req_from_line insert join mkdtemp bootstrap load join format name train_victim_model eval load_state_dict to exists GCN join state_dict name system preprocess eval save to GCN fit max eye max eye issparse normalize_adj todense LongTensor FloatTensor sparse_mx_to_torch_sparse_tensor normalize_feature array issparse LongTensor FloatTensor sparse_mx_to_torch_sparse_tensor array diags tolil flatten dot sum array diags tolil flatten dot eye sum array add_self_loops _values size _indices pow shape scatter_add new_full repeat arange cat to_scipy normalize_adj flatten device to sum diag diags tolil flatten dot eye sum array add_self_loops _values size _indices pow shape scatter_add to_scipy degree_normalize_adj flatten device to sum diag sum type_as double LongTensor nll_loss type_as LongTensor exp clone data FloatTensor astype float32 unsqueeze cat t _values _indices is_sparse_tensor seed train_test_split arange seed train_test_split arange seed int permutation arange astype max range len int print get_splits_each_class round max len int permutation arange astype max range len int compute_alpha compute_log_likelihood degree_sequence_log_likelihood shape zeros sum is_cuda cat updated_log_likelihood_for_edge_changes compute_log_likelihood compute_alpha sum len compute_log_likelihood compute_alpha sum update_sum_log_degrees len float sum log log view nonzero show subplots xlabel yticks grid ylabel savefig mkdir legend xticks sum distplot show subplots xlabel yticks grid ylabel savefig mkdir legend distplot xticks _get_diff issparse T tqdm append power sum append int strip open data view index_to_mask append range cat arange zeros count_nonzero range len count_nonzero range len sqrt sum range len norm range len norm sqrt sum range len csr_matrix randint reshape seed choice append range len csr_matrix _n2v_random_walk indptr indices randint seed sum size random_choice any append empty array range enumerate len dot A1 range to float adj_normalizer diags flatten coo_matrix sum array sum_of_powers sqrt zeros sum range len dot shape zeros range len A1 zeros power range enumerate ones T tocsr dot nonzero column_stack directed_edges uniform_ sqrt size prod data glorot_uniform isinstance zero_ Linear _param_init isinstance named_parameters modules ParameterList LongTensor append max range len directed_edges concatenate copy set sqrt connected_after append union range len squeeze sum array multiply sum list format model print choice generate argmax range enumerate load ResNet18 Net eval load_state_dict print DataLoader MNIST CIFAR10 add_argument ArgumentParser DifferentialEvolutionSolver MNIST DataLoader Compose MNIST DataLoader Compose print format urlretrieve print format exists save load format print load_state_dict isfile remove format print symlink exists sorted print draw add_rows vars Texttable keys zeros_like sum dim range reversed reduce_sum abs reduce_sum param_groups int time join format_time write append range flush len reshape transpose normalize requires_grad_ Tensor unsqueeze long array backward model clone zero_grad ce zero_ CrossEntropyLoss model backward l2 clone zero_grad MSELoss ce zero_ CrossEntropyLoss sum numpy square reshape clip argmax sum backward print l2 get_cw_grad label2tensor model l2_norm transform_func from_numpy requires_grad_ MSELoss sign clip_bound numpy range detach_ isinstance zero_ Iterable Tensor norm inf backward abs float32 copy flatten shape requires_grad_ numpy zeros to forward range zero_gradients data norm inf model backward clamp zero_grad grad SGD sign shape zeros to numpy range dtype norm loss print float64 reshape astype lbfgs_b shape tensor to numpy range len forward max clip str from_numpy shape array append sum range debug size mean float enumerate tanh print reshape arctanh __len__ repeat zeros std len astype repeat array split to print to argmax data zeros_like model sign device max clip shape uniform sum range detach format requires_grad_ zero_ float backward print clamp min numpy retain_grad resnet18 eval vgg16 to Compose clamp norm min model print len tqdm parameters unsqueeze to max tf open max time str model print proj_lp getpurb requires_grad_ get_fooling_rate generate to numpy data_input_init enumerate deepfool len items norm OrderedDict zip state_dict keys argmax format batch_size backward print dataset zero_grad calculate_loss item adv_data step enumerate len int asarray concatenate print extend tqdm merge_and_generate_labels estimate ceil float range len model Thermometer flatten permute input nll_loss info format print eval dataset len one_hot_to_thermometer one_hot unsqueeze_ size long scatter_ cumsum topk size t eq mul_ expand_as append sum max print format append norm named_parameters format AvgMeter tqdm OrderedDict mean eval set_description attack set_postfix to batch_size SGDOptimizerMaker layer_one SGD MultiStepLR model_dir save_checkpoint ArgumentParser device Hamiltonian other_layers train_one_epoch parse_args to format create_optimizer IPGDAttackMethodMaker create_evaluation_attack_method Net auto_continue resume PieceWiseConstantLrSchedulerMaker create_test_dataset create_lr_scheduler time print load_checkpoint add_argument parameters CrossEntropyWithWeightPenlty FastGradientLayerOneTrainer step create_train_dataset zero_grad set_description device OrderedDict set_postfix to range detach format grad requires_grad_ uniform_ net enumerate criterion backward tqdm train step cross_entropy str StepLR isdir SGD test parameters mkdir save manual_seed to feed_dataset range state_dict MNIST DataLoader Compose CIFAR10 data zero_grad DataLoader save loss_bce cuda loss_mse squeeze Adam MSELoss epochs TensorDataset range size eval alpha BCELoss checkpoint load G backward beta train parse_args add_argument ArgumentParser MNIST model float Compose PGD load_state_dict CIFAR10 cat nll_loss Adam output accuracy parameters item to GCN fit test A1 int attack modified_adj predict numpy cuda items sorted tolist GCN classification_margin eval to predict fit A1 int select_nodes print tqdm single_test FGA attack to modified_adj len exp predict eval to GCN fit A1 int select_nodes print tqdm single_test FGA attack to modified_adj len modified_features modified_features IGAttack modified_features IGAttack structure_perturbations MedianGCN update_edge_index MedianGCN update_edge_index MedianGCN update_edge_index SGAttack MedianGCN update_edge_index fit cpu Nettack Nettack GCN print permutation reshape_mx tolil normal mean tile int print hstack choice reshape_mx ratio features normalize_feature max generate_injected_features format injecting_nodes graph print nll_loss from_scipy_sparse_matrix predict accuracy GraphNormTool preprocess to_dict_of_lists item to setattr Dataset GCN fit list arange PGDAttack concatenate len predict set cpu GCN fit load_victim_model features normalize_feature feature_perturbations SGAttack
[contributing-image]: https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat [contributing-url]: https://github.com/rusty1s/pytorch_geometric/blob/master/CONTRIBUTING.md <p align="center"> <img center src="https://github.com/DSE-MSU/DeepRobust/blob/master/adversary_examples/Deeprobust.png" width = "450" alt="logo"> </p> --------------------- <!-- <a href="https://github.com/DSE-MSU/DeepRobust/stargazers"><img alt="GitHub stars" src="https://img.shields.io/github/stars/DSE-MSU/DeepRobust"></a> <a href="https://github.com/DSE-MSU/DeepRobust/network/members" ><img alt="GitHub forks" src="https://img.shields.io/github/forks/DSE-MSU/DeepRobust"> </a> -->
474
I-am-Bot/RobustTorch
['adversarial attack']
['Adversarial Attacks and Defenses in Images, Graphs and Text: A Review', 'Adversarial Attacks and Defenses on Graphs: A Review, A Tool and Empirical Studies']
examples/graph/test_ig.py examples/graph/test_fga.py deeprobust/graph/rl/nipa_env.py deeprobust/graph/defense/simpgcn.py deeprobust/graph/rl/nstep_replay_mem.py deeprobust/image/defense/AWP.py deeprobust/graph/global_attack/dice.py examples/graph/test_mettack.py examples/image/test_onepixel.py examples/graph/test_adv_train_evasion.py deeprobust/graph/defense/pgd.py deeprobust/graph/global_attack/topology_attack.py deeprobust/image/attack/BPDA.py examples/graph/test_gcn_jaccard.py deeprobust/image/defense/YOPO.py deeprobust/image/defense/__init__.py get-pip.py examples/image/test_cw.py examples/image/test_ImageNet.py deeprobust/image/defense/LIDclassifier.py examples/graph/test_sgc.py deeprobust/graph/targeted_attack/rnd.py conf.py deeprobust/image/__init__.py deeprobust/image/utils.py examples/graph/test_median_gcn.py deeprobust/graph/global_attack/base_attack.py setup.py examples/graph/test_dice.py deeprobust/image/attack/Universal.py examples/image/test_train.py deeprobust/image/netmodels/CNN.py examples/graph/test_all.py deeprobust/image/defense/trades.py deeprobust/image/defense/fgsmtraining.py deeprobust/graph/targeted_attack/sga.py deeprobust/graph/defense/gat.py examples/graph/test_sga.py deeprobust/image/defense/base_defense.py deeprobust/image/attack/base_attack.py deeprobust/image/attack/fgsm.py examples/graph/test_pgd.py examples/graph/test_simpgcn.py deeprobust/graph/data/attacked_data.py deeprobust/image/attack/onepixel.py examples/image/test_PGD.py docs/conf.py deeprobust/graph/defense/r_gcn.py deeprobust/image/defense/TherEncoding.py deeprobust/graph/defense/sgc.py examples/graph/test_prognn.py deeprobust/image/netmodels/resnet.py deeprobust/graph/targeted_attack/base_attack.py examples/graph/test_gat.py deeprobust/image/attack/lbfgs.py deeprobust/graph/defense/chebnet.py deeprobust/graph/rl/nipa_q_net_node.py deeprobust/graph/rl/q_net_node.py deeprobust/graph/rl/env.py deeprobust/graph/rl/rl_s2v_env.py deeprobust/image/netmodels/train_resnet.py deeprobust/graph/rl/nipa_nstep_replay_mem.py examples/graph/test_nettack.py deeprobust/image/netmodels/train_model.py deeprobust/image/netmodels/preact_resnet.py examples/graph/test_chebnet.py deeprobust/graph/global_attack/__init__.py deeprobust/image/attack/Nattack.py deeprobust/graph/targeted_attack/fga.py deeprobust/graph/global_attack/nipa.py deeprobust/graph/targeted_attack/ig_attack.py deeprobust/graph/global_attack/random_attack.py deeprobust/graph/utils.py deeprobust/graph/targeted_attack/nettack.py deeprobust/graph/defense/node_embedding.py examples/image/test_nattack.py deeprobust/image/netmodels/YOPOCNN.py deeprobust/image/attack/YOPOpgd.py examples/graph/test_min_max.py deeprobust/image/attack/__init__.py examples/graph/test_random.py setup_empty.py deeprobust/image/attack/cw.py examples/image/testprint_mnist.py deeprobust/image/optimizer.py deeprobust/image/netmodels/__init__.py deeprobust/graph/defense/gcn_preprocess.py examples/graph/test_deepwalk.py deeprobust/graph/data/__init__.py deeprobust/graph/defense/adv_training.py deeprobust/image/preprocessing/APE-GAN.py examples/graph/test_node_embedding_attack.py deeprobust/graph/data/dataset.py examples/graph/test_gcn_svd.py deeprobust/graph/defense/prognn.py examples/graph/test_nipa.py deeprobust/image/evaluation_attack.py examples/graph/test_rl_s2v.py deeprobust/image/attack/deepfool.py deeprobust/graph/data/utils.py deeprobust/graph/targeted_attack/__init__.py deeprobust/image/netmodels/CNN_multilayer.py deeprobust/image/netmodels/vgg.py deeprobust/graph/global_attack/node_embedding_attack.py examples/image/test_pgdtraining.py deeprobust/graph/defense/__init__.py examples/image/test_lbfgs.py deeprobust/image/preprocessing/prepare_advdata.py deeprobust/graph/defense/gcn.py deeprobust/graph/black_box.py deeprobust/graph/rl/nipa_config.py examples/image/test_fgsm.py deeprobust/image/defense/pgdtraining.py deeprobust/graph/data/pyg_dataset.py deeprobust/graph/defense/median_gcn.py deeprobust/graph/global_attack/mettack.py deeprobust/image/attack/pgd.py examples/graph/test_gcn.py examples/image/test1.py deeprobust/graph/targeted_attack/rl_s2v.py deeprobust/image/defense/fast.py examples/graph/test_rnd.py deeprobust/image/netmodels/densenet.py examples/graph/test_visualization.py deeprobust/image/config.py examples/image/test_trade.py examples/graph/test_adv_train_poisoning.py examples/graph/test_rgcn.py deeprobust/graph/rl/rl_s2v_config.py deeprobust/__init__.py deeprobust/graph/visualization.py deeprobust/image/attack/l2_attack.py examples/image/test_deepfool.py main bootstrap load_victim_model train_victim_model add_self_loops get_train_val_test_gcn degree_normalize_adj_tensor encode_onehot visualize get_train_val_test normalize_adj normalize_sparse_tensor compute_log_likelihood get_splits_each_class likelihood_ratio_filter reshape_mx get_train_test_labelrate normalize_feature to_tensor update_sum_log_degrees get_degree_squence to_scipy tensor2onehot is_sparse_tensor classification_margin preprocess loss_acc unravel_index get_train_test normalize_adj_tensor degree_normalize_adj sparse_mx_to_torch_sparse_tensor accuracy compute_alpha ravel_multiple_indices degree_sequence_log_likelihood degree_normalize_sparse_tensor updated_log_likelihood_for_edge_changes feature_diff _get_diff degree_dist RandomAttack PtbDataset PrePtbDataset parse_index_file Dataset Pyg2Dpr random_coauthor_amazon_splits Dpr2Pyg mask_to_index index_to_mask AmazonPyg CoauthorPyg AdvTraining ChebNet GAT GraphConvolution GCN dropedge_cosine GCNSVD dropedge_dis dropedge_both GCNJaccard dropedge_jaccard __dropedge_jaccard MedianConv MedianGCN random_choice sample_n2v_random_walks BaseEmbedding Node2Vec sum_of_powers_of_transition_matrix _n2v_random_walk sample_random_walks _random_walk DeepWalk PGD SGD ProxOperators ProGNN EstimateAdj GaussianConvolution RGCN GGCL_D GGCL_F SGC noaug_normalized_adjacency SimPGCN preprocess_adj_noloop AttrSim BaseAttack DICE BaseMeta Metattack MetaApprox NIPA construct_line_graph OtherNodeEmbeddingAttack estimate_delta_eigvals edges_to_sparse sum_of_powers NodeEmbeddingAttack estimate_delta_eigenvecs estimate_loss_with_delta_eigenvals Random MinMax PGDAttack NodeAttackEnv ModifiedGraph StaticGraph GraphNormTool save_args build_kwargs NodeInjectionEnv nipa_hash_state_action NstepReplayMem node_greedy_actions _param_init glorot_uniform NStepQNetNode weights_init QNetNode nipa_hash_state_action hash_state_action NstepReplayMemCell NstepReplaySubMemCell NstepReplayMem node_greedy_actions _param_init glorot_uniform NStepQNetNode weights_init QNetNode save_args build_kwargs NodeAttackEnv ModifiedGraph StaticGraph GraphNormTool BaseAttack FGA IGAttack filter_chisquare filter_singletons compute_alpha compute_log_likelihood Nettack connected_after compute_new_a_hat_uv update_Sx RLS2V RND SGAttack load_net parameter_parser generate_dataloader run_attack AdamOptimizer DifferentialEvolutionSolver differential_evolution download_model load_checkpoint arctanh make_symlink l2_dist reduce_sum l2r_dist tab_printer l1_dist l2_norm save_checkpoint adjust_learning_rate progress_bar l1_norm create_test_dataset onehot_like create_train_dataset BaseAttack clip_bound BPDA_attack image2tensor get_cw_grad label2tensor l2_norm preprocess identity_transform normalize get_img_grad_given_label CarliniWagner DeepFool deepfool zero_gradients FGSM fgm CarliniL2 LBFGS optimize attack NATTACK predict_classes attack_success perturb_image Onepixel pgd_attack PGD proj_lp universal_adversarial_perturbation get_fooling_rate get_model data_input_init FASTPGD diff_in_weights add_into_weights AWP_AT pgd_AWP BaseDefense Fast FGSMtraining get_lid train PGDtraining one_hot one_hot_to_thermometer Thermometer test train TRADES AvgMeter IPGDAttackMethodMaker torch_accuracy load_checkpoint Hamiltonian make_symlink eval_one_epoch SGDOptimizerMaker add_path main PieceWiseConstantLrSchedulerMaker train_one_epoch CrossEntropyWithWeightPenlty FastGradientLayerOneTrainer cal_l2_norm Net train test Net train test DenseNet201 DenseNet161 DenseNet121 Transition DenseNet Bottleneck densenet_cifar test DenseNet169 train PreActBlock PreActResNet18 PreActResNet PreActBottleneck ResNet18 ResNet34 Bottleneck ResNet101 test Net ResNet50 train BasicBlock ResNet152 train feed_dataset train VGG test Net main Generator get_args Discriminator main main test select_nodes multi_test_evasion test single_test multi_test_poison main select_nodes multi_test_evasion test single_test multi_test_poison main select_nodes multi_test_evasion test single_test main main test main test select_nodes multi_test_evasion test single_test multi_test_poison main init_setup add_nodes injecting_nodes generate_injected_features main test main test init_setup main test select_nodes multi_test_evasion test single_test multi_test_poison main Net train test parameter_parser parameter_parser join mkdtemp exit pip_entry_point parse_args install_req_from_line insert join mkdtemp bootstrap load join format name train_victim_model eval load_state_dict to exists GCN join state_dict name system preprocess eval save to GCN fit max eye max eye issparse normalize_adj todense LongTensor FloatTensor sparse_mx_to_torch_sparse_tensor normalize_feature array issparse LongTensor FloatTensor sparse_mx_to_torch_sparse_tensor array diags tolil flatten dot sum array diags tolil flatten dot eye sum array add_self_loops _values size _indices pow shape scatter_add new_full repeat arange cat to_scipy normalize_adj flatten device to sum diag diags tolil flatten dot eye sum array add_self_loops _values size _indices pow shape scatter_add to_scipy degree_normalize_adj flatten device to sum diag sum type_as double LongTensor nll_loss type_as LongTensor exp clone data FloatTensor astype float32 unsqueeze cat t _values _indices is_sparse_tensor seed train_test_split arange seed train_test_split arange seed int permutation arange astype max range len int print get_splits_each_class round max len int permutation arange astype max range len div int compute_alpha compute_log_likelihood degree_sequence_log_likelihood shape zeros sum is_cuda cat updated_log_likelihood_for_edge_changes compute_log_likelihood compute_alpha sum len compute_log_likelihood compute_alpha sum update_sum_log_degrees len float sum log log view nonzero show subplots xlabel yticks grid ylabel savefig mkdir legend xticks sum distplot show subplots xlabel yticks grid ylabel savefig mkdir legend distplot xticks _get_diff issparse T tqdm append power sum append int strip open data view index_to_mask append range cat arange zeros count_nonzero range len count_nonzero range len sqrt sum range len norm range len norm sqrt sum range len csr_matrix randint reshape seed choice append range len csr_matrix _n2v_random_walk indptr indices randint seed sum size random_choice any append empty array range enumerate len dot A1 range to float adj_normalizer diags flatten coo_matrix sum array sum_of_powers sqrt zeros sum range len dot shape zeros range len A1 zeros power range enumerate ones T tocsr dot nonzero column_stack directed_edges uniform_ sqrt size prod data glorot_uniform isinstance zero_ Linear _param_init isinstance named_parameters modules ParameterList LongTensor append max range len directed_edges concatenate copy set sqrt connected_after append union range len squeeze sum array multiply sum list format model print choice generate argmax range enumerate load ResNet18 Net eval load_state_dict print DataLoader MNIST CIFAR10 add_argument ArgumentParser DifferentialEvolutionSolver MNIST DataLoader Compose MNIST DataLoader Compose print format urlretrieve print format exists save load format print load_state_dict isfile remove format print symlink exists sorted print draw add_rows vars Texttable keys zeros_like sum dim range reversed reduce_sum abs reduce_sum param_groups int time join format_time write append range flush len reshape transpose normalize requires_grad_ Tensor unsqueeze long array backward model clone zero_grad ce zero_ CrossEntropyLoss model backward l2 clone zero_grad MSELoss ce zero_ CrossEntropyLoss sum numpy square reshape clip argmax sum backward print l2 get_cw_grad label2tensor model l2_norm transform_func from_numpy requires_grad_ MSELoss sign clip_bound numpy range detach_ isinstance zero_ Iterable Tensor norm inf backward abs float32 copy flatten shape requires_grad_ numpy zeros to forward range zero_gradients data norm inf model backward clamp zero_grad grad SGD sign shape zeros to numpy range dtype norm loss print float64 reshape astype lbfgs_b shape tensor to numpy range len forward max clip str from_numpy shape array append sum range debug size mean float enumerate tanh print reshape arctanh __len__ repeat zeros std len astype repeat array split to print to argmax data zeros_like model sign device max clip shape uniform sum range detach format requires_grad_ zero_ float backward print clamp min numpy retain_grad resnet18 eval vgg16 to Compose clamp norm min model print len tqdm parameters unsqueeze to max tf open max time str model print proj_lp getpurb requires_grad_ get_fooling_rate generate to numpy data_input_init enumerate deepfool len items norm OrderedDict zip state_dict keys argmax format batch_size backward print dataset zero_grad calculate_loss item adv_data step enumerate len int asarray concatenate print extend tqdm merge_and_generate_labels estimate ceil float range len model Thermometer flatten permute input nll_loss info format print eval dataset len one_hot_to_thermometer one_hot unsqueeze_ size long scatter_ cumsum topk size t eq mul_ expand_as append sum max print format append norm named_parameters format AvgMeter tqdm OrderedDict mean eval set_description attack set_postfix to batch_size SGDOptimizerMaker layer_one SGD MultiStepLR model_dir save_checkpoint ArgumentParser device Hamiltonian other_layers train_one_epoch parse_args to format create_optimizer IPGDAttackMethodMaker create_evaluation_attack_method Net auto_continue resume PieceWiseConstantLrSchedulerMaker create_test_dataset create_lr_scheduler time print load_checkpoint add_argument parameters CrossEntropyWithWeightPenlty FastGradientLayerOneTrainer step create_train_dataset zero_grad set_description device OrderedDict set_postfix to range detach format grad requires_grad_ uniform_ net enumerate criterion backward tqdm train step cross_entropy str StepLR isdir SGD test parameters mkdir save manual_seed to feed_dataset range state_dict MNIST DataLoader Compose CIFAR10 data zero_grad DataLoader save loss_bce cuda loss_mse squeeze Adam MSELoss epochs TensorDataset range size eval alpha BCELoss checkpoint load G backward beta train parse_args add_argument ArgumentParser MNIST model float Compose PGD load_state_dict CIFAR10 cat nll_loss Adam output accuracy parameters item to GCN fit test A1 int attack modified_adj predict numpy cuda items sorted tolist GCN classification_margin eval to predict fit A1 int select_nodes print tqdm single_test FGA attack to modified_adj len exp predict eval to GCN fit A1 int select_nodes print tqdm single_test FGA attack to modified_adj len modified_features modified_features IGAttack modified_features IGAttack structure_perturbations MedianGCN update_edge_index MedianGCN update_edge_index MedianGCN update_edge_index SGAttack MedianGCN update_edge_index fit cpu Nettack Nettack GCN print permutation reshape_mx tolil normal mean tile int print hstack choice reshape_mx ratio features normalize_feature max generate_injected_features format injecting_nodes graph print nll_loss from_scipy_sparse_matrix predict accuracy GraphNormTool preprocess to_dict_of_lists item to setattr Dataset GCN fit list arange PGDAttack concatenate len predict set cpu GCN fit load_victim_model features normalize_feature feature_perturbations SGAttack
[contributing-image]: https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat [contributing-url]: https://github.com/rusty1s/pytorch_geometric/blob/master/CONTRIBUTING.md <p align="center"> <img center src="https://github.com/DSE-MSU/DeepRobust/blob/master/adversary_examples/Deeprobust.png" width = "450" alt="logo"> </p> --------------------- <!-- <a href="https://github.com/DSE-MSU/DeepRobust/stargazers"><img alt="GitHub stars" src="https://img.shields.io/github/stars/DSE-MSU/DeepRobust"></a> <a href="https://github.com/DSE-MSU/DeepRobust/network/members" ><img alt="GitHub forks" src="https://img.shields.io/github/forks/DSE-MSU/DeepRobust"> </a> -->
475
IAmS4n/DGSAN
['text generation']
['DGSAN: Discrete Generative Self-Adversarial Network']
trainer_dgsan.py metrics/parallel_metrics.py main_mle.py metrics/bert_distances.py trainer.py metrics/ngram_metrics.py models.py data_manager.py main_dgsan.py utils.py loss.py load load_oracle_dataset test_real_dataset load_real_dataset LanguageModelingDataset test_oracle_dataset DGSANLoss get_loss_func LSTM Trainer DGSAN DGSANStep BigLRDetector NamedStreamAverage metric_names EMBD FBD FBD_EMBD BertFeature calculate_frechet_distance NgramProp SelfBleu get_ngrams Metric Bleu Jaccard _bert_eval _ngram_eval MetricsEval seed load vocab format print mean LanguageModelingDataset max_length len seed load vocab format print LanguageModelingDataset max_length len format numericalize print text mean reverse iter load_real_dataset append next len load_oracle_dataset format print text mean iter append next len Softplus atleast_2d print iscomplexobj atleast_1d dot sqrtm trace eye real abs max imag FBD_EMBD get print put get put eval Bleu Jaccard
IAmS4n/DGSAN
476
IAmS4n/TextGenerationEvaluationMetrics
['text generation']
['Jointly Measuring Diversity and Quality in Text Generation Models']
multiset_distances.py bert_distances.py metric_names EMBD FBD BertFeature calculate_frechet_distance get_ngrams MultisetDistances metric_names atleast_2d print iscomplexobj atleast_1d dot sqrtm trace eye real abs max imag
# Jointly Measuring Diversity and Quality in Text Generation Models This is the implementation of metrics for measuring Diversity and Quality, which are introduced in [this paper](https://arxiv.org/abs/1904.03971). Besides, some other metrics exist. For BLEU and Self-BLEU, [this hyperformance implementation](https://github.com/Danial-Alh/FastBLEU) is used. ## Sample Usage ### Multiset distances Here is an example to compute MS-Jaccard distance. The input of these metrics is a list of tokenized sentences. ```python from multiset_distances import MultisetDistances ref1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures', 'that', 'the', 'military', 'will', 'forever', 'heed', 'Party', 'commands'] ref2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', 'guarantees', 'the', 'military', 'forces', 'always', 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
477
IBM-HRL-MLHLS/IBM-Causal-Inference-Benchmarking-Framework
['causal inference']
['Benchmarking Framework for Performance-Evaluation of Causal Inference Analysis']
causalbenchmark/evaluate.py setup.py tests/test_evaluate.py causalbenchmark/__init__.py causalbenchmark/utils.py __get_parser evaluate __get_weights _score_individual __main _score_population combine_covariates_with_observed TestEvaluate raise_with_traceback join weighted_sum add_prefix abs Series size __get_weights astype index set mean pow sqrt append listdir AssertionError read_csv join sum add_prefix Series size __get_weights astype mean pow sqrt append listdir read_csv Series unique RuntimeError raise_with_traceback _score_individual _score_population add_argument ArgumentParser evaluate print predictions_location to_csv is_individual_prediction cf_dir_location output_path endswith join listdir from_csv
# Causal Inference Benchmarking Framework Framework for evaluating causal inference methods. - [Overview](#overview) - [Data](#data) - [Getting Started](#getting-started) - [Prerequisites](#prerequisites) - [Installation](#installation) - [Usage](#usage) - [Citing](#citing) - [License](#license)
478
IBM-HRL-MLHLS/IBM-Causality-Benchmarking-Framework
['causal inference']
['Benchmarking Framework for Performance-Evaluation of Causal Inference Analysis']
causalbenchmark/evaluate.py setup.py tests/test_evaluate.py causalbenchmark/__init__.py causalbenchmark/utils.py __get_parser evaluate __get_weights _score_individual __main _score_population combine_covariates_with_observed TestEvaluate raise_with_traceback join weighted_sum add_prefix abs Series size __get_weights astype index set mean pow sqrt append listdir AssertionError read_csv join sum add_prefix Series size __get_weights astype mean pow sqrt append listdir read_csv Series unique RuntimeError raise_with_traceback _score_individual _score_population add_argument ArgumentParser evaluate print predictions_location to_csv is_individual_prediction cf_dir_location output_path endswith join listdir from_csv
# Causal Inference Benchmarking Framework Framework for evaluating causal inference methods. - [Overview](#overview) - [Data](#data) - [Getting Started](#getting-started) - [Prerequisites](#prerequisites) - [Installation](#installation) - [Usage](#usage) - [Citing](#citing) - [License](#license)
479
IBM/IRM-games
['domain generalization']
['Treatment Effect Estimation using Invariant Risk Minimization']
LRG_games/IRMv1_regression/sem.py ERM-IRM/sem_Sep8.py ERM-IRM/IRM_methods.py LRG_games/Colored_MNIST_comparison/data_construct.py IRM_ITE/vary_dim_size.py ERM-IRM/IRM_exhaustive_argparse_hper_wo_disc_Sep8.py IRM_ITE/irm_block.py IRM_games/IRM_methods.py LRG_games/irm_games_regression.py ERM-IRM/data_construct.py IRM_ITE/metrics.py LRG_games/IRMv1_regression/main_v1.py IRM_ITE/models.py LRG_games/ERM_per_env/IRM_exhaustive_argparse_hper_wo_disc_Sep8.py IRM_games/data_construct.py ERM-IRM/models_crossval_Sep8.py LRG_games/IRMv1_regression/models_v1.py IRM_ITE/generate_synthetic_data.py LRG_games/ERM_per_env/models_crossval_Sep8.py LRG_games/ERM_per_env/irm_games_regression.py LRG_games/ERM_per_env/sem_Sep8.py IRM_ITE/plotting_dim.py LRG_games/Colored_MNIST_comparison/IRM_methods.py assemble_data_mnist_confounded_child assemble_data_mnist_sb assemble_data_mnist_confounded assemble_data_mnist_child run_experiment_IRM pretty run_irm_exhaustive run_experiment_ERM errors irm_model standard_erm_model InvariantRiskMinimization InvariantCausalPrediction EmpiricalRiskMinimizer pretty ChainEquationModel assemble_data_mnist assemble_data_mnist_fashion irm_model standard_erm_model variable_irm_game_model fixed_irm_game_model generate_E_random generate_outcomes generate_mixture_indicator generate_synthetic_data generate_T generate_x IRM_Tblock IRM_Sblock InvariantRiskMinimization envs_irm_T envs_irm_S ate_error PEHE ERM_Tblock ERM_Sblock main main convert_regn_np_format fixed_irm_game_model_regression assemble_data_mnist assemble_data_mnist_fashion assemble_data_mnist_sb irm_model fixed_irm_game_model standard_erm_model variable_irm_game_model fixed_irm_game_model_cons fixed_irm_game_model_cons_sign variable_irm_game_model_cons run_experiment_IRM run_experiment_ERM errors pretty convert_regn_np_format fixed_irm_game_model_regression InvariantRiskMinimization InvariantCausalPrediction EmpiricalRiskMinimizer pretty ChainEquationModel run_experiment_IRM run_experiment errors pretty InvariantRiskMinimization InvariantCausalPrediction EmpiricalRiskMinimizer pretty ChainEquationModel tolist mean item view len seed items format method_constructor zip set_num_threads solution pretty ChainEquationModel manual_seed append range errors seed int items format method_constructor zip set_num_threads solution pretty ChainEquationModel manual_seed append range errors run_experiment_IRM DataFrame str ylabel savefig legend append range plot mean run_experiment_ERM join norm print xlabel to_csv figure zeros len ne outcome_model sigma_outcome generate_x reshape ntr multiply generate_outcomes feature_model generate_T nd nte mu randint randint randint normal T fill_diagonal multivariate_normal ones sort identity generate_mixture_indicator mean uniform qr zeros sum std range normal T concatenate ones dot uniform zeros range ones multiply astype float32 from_numpy append zeros range cat ones astype float32 from_numpy append range cat mean solution concatenate InvariantRiskMinimization mean solution concatenate InvariantRiskMinimization mean square abs coef_ concatenate ones mean RidgeCV zeros LinearRegression predict fit coef_ concatenate mean RidgeCV LinearRegression predict fit nr Line2D xticks yticks str ylabel savefig legend mu outcome_model errorbar tight_layout mean sqrt feature_model load xlabel rc figure std ERM_Tblock IRM_Tblock generate_E_random model_type envs_irm_T range cat ERM_Sblock ne ntr PEHE generate_synthetic_data time savez print IRM_Sblock zeros envs_irm_S len ones astype append range len sem len seed items format method_constructor zip set_num_threads solution pretty ChainEquationModel manual_seed append range errors
# Out-of-Distribution generalization (OoD) This repository contains four folders: 1. IRM_games: Source code for the [paper](https://arxiv.org/abs/2002.04692) 2. LRG_games: Source code for the [paper](https://arxiv.org/pdf/2010.15234v1.pdf) 3. ERM-IRM: Source code for the [paper](https://arxiv.org/abs/2010.16412) 4. IRM_ITE: Source code for the [paper](https://arxiv.org/pdf/2103.07788.pdf) ### Installation Clone the latest version of this repository: ```bash $ git clone https://github.com/IBM/OoD.git
480
IBM/ZOSVRG-BlackBox-Adv
['stochastic optimization']
['Zeroth-Order Stochastic Variance Reduction for Nonconvex Optimization']
optimization_methods/ZO_SGD.py optimization_methods/ObjectiveFunc.py Universal_Attack.py Utils.py optimization_methods/ZO_SVRG.py SysManager.py setup_mnist.py MNIST extract_data MNISTModel extract_labels SYS_MANAGER main generate_attack_data_set save_img OBJFUNC ZOSGD ZOSVRG argmax OBJFUNC shape expand_dims range predict format Log_MetaData size close ZOSGD flush ZOSVRG tanh print arctanh save_img generate_attack_data_set zeros Add_Parameter fromarray squeeze around save where test_data test_labels append argmax range predict str arange evaluate Loss_Overall print Loss_Attack gradient_estimation write Loss_L2 choice print_current_loss query_count array range str arange evaluate Loss_Overall print Loss_Attack gradient_estimation write Loss_L2 choice print_current_loss query_count array range
# ZOSVRG for Generating Universal Attacks on Black-box Neural Networks ZOSVRG is the proposed new zeroth-order nonconvex optimization method. This repo presents ZOSVRG's application for generating adversarial attacks on black-box neural networks. It contains a pretrained network model for the MNIST classification task, and a Python implementation for attack generation that can directly be applied to the network model. For the ZOSVRG algorithm, see our NIPS 2018 paper “[Zeroth-Order Stochastic Variance Reduction for Nonconvex Optimization](https://arxiv.org/abs/1805.10367)” (Hereinafter referred to as Paper.) ## Description This Python code generates universal adversarial attacks on neural networks for the MNIST classification task under the black-box setting. For an image **x**, the universal attack **d** is first applied to **x** in the *arctanh* space. The final adversarial image is then obtained by applying the *tanh* transform. Summarizing, **x**<sub>adv</sub> = *tanh*(*arctanh*(2**x**) + **d**)/2 Below is a list of parameters that the present code takes: 1. **optimizer**: This parameter specifies the optimizer to use during attack generation. Currently the code supports ZOSGD and ZOSVRG. 2. **q**: The number of random vector to average over when estimating the gradient. 3. **alpha**: The optimizer's step size for updating solutions is alpha/(dimension of **x**) 4. **M**: (For ZOSVRG) The number of batches to apply during each stage.
481
IBM/audioset-classification
['classification', 'audio classification']
['Multi-level Attention Model for Weakly Supervised Audio Classification']
audioset_classify/keras/core.py audioset_classify/utils/data_generator.py audioset_classify/utils/utilities.py audioset_classify/pytorch/main.py audioset_classify/keras/main.py audioset_classify/pytorch/core.py train evaluate max_pooling pooling_shape attention_pooling train average_pooling forward_in_batch train move_data_to_gpu evaluate FeatureLevelSingleAttention EmbeddingLayers DecisionLevelAveragePooling DecisionLevelMultiAttention init_layer init_bn DecisionLevelMaxPooling train Attention DecisionLevelSingleAttention BalancedDataGenerator VanillaDataGenerator create_logging uint8_to_float32 create_folder get_filename get_avg_stats calculate_stats transform_data d_prime load_data bool_to_float32 join time format create_folder dump calculate_stats astype transform_data float32 mean shape info predict open VanillaDataGenerator model batch_size save_weights mini_data save train_on_batch data_dir Adam transform_data shape model_type filename generate format balance_type concatenate workspace DataGenerator BalancedDataGenerator info compile join time learning_rate create_folder evaluate load_data clip isinstance Model summary Input Variable Tensor cuda int move_data_to_gpu model eval cat ceil float range append len forward_in_batch numpy is_cuda binary_cross_entropy zero_grad cuda move_data_to_gpu backward parameters step uniform_ sqrt size fill_ fill_ FeatureLevelSingleAttention DecisionLevelMultiAttention DecisionLevelAveragePooling DecisionLevelMaxPooling DecisionLevelSingleAttention makedirs realpath join basicConfig format create_folder setFormatter addHandler StreamHandler Formatter isfile setLevel INFO ppf norm sqrt uint8_to_float32 bool_to_float32 roc_curve average_precision_score precision_recall_curve append range roc_auc_score open data_dir model_type d_prime bgn_iteration filename append range format interval_iteration balance_type workspace fin_iteration mean info listdir load join time calculate_stats load_data array
# Train and evaluate an audio embedding classifier This developer code pattern will guide you through training a Deep Learning model to classify audio embeddings on IBM's Deep Learning as a Service (DLaaS) platform - Watson Machine Learning - and performing inference/evaluation on IBM Watson Studio. The model will use audio [_embeddings_](https://www.tensorflow.org/programmers_guide/embedding) as an input and generate output probabilities/scores for 527 classes. The classes cover a broad range of sounds like speech, genres of music, natural sounds like rain/lightning, automobiles etc. The full list of sound classes can be found at [Audioset Ontology](https://research.google.com/audioset/ontology/index.html). The model is based on the paper ["Multi-level Attention Model for Weakly Supervised Audio Classification"](https://arxiv.org/abs/1803.02353). As outlined in the paper, the model accepts [_embeddings_](https://www.tensorflow.org/programmers_guide/embedding) of 10-second audio clips as opposed to the raw audio itself. The embedding vectors for raw audio can be generated using VGG-ish [model](https://github.com/tensorflow/models/tree/master/research/audioset). The VGG-ish model converts each second of raw audio into an embedding(vector) of length 128 thus resulting in a tensor of shape 10x128 as the input for the classifier. For the purposes of illustrating the concept and exposing a developer to the features of the IBM Cloud platform, Google's Audioset data is used, where the embeddings have been pre-processed and available readily. Though Audioset data is used here, a developer can leverage this model to create their own custom audio classifier trained on their own audio data. They would however have to first generate the audio embeddings as mentioned above. When the reader has completed this Code Pattern, they will understand how to: * Setup an IBM Cloud Object Storage bucket and upload the training data to the cloud. * Upload a Deep Learning model to Watson ML for training. * Integrate the object storage buckets into IBM Watson Studio. * Perform inference on an evaluation dataset using Jupyter Notebooks over IBM Watson Studio. ![](doc/source/images/flow.png)
482
IBM/lale
['time series']
['Type-Driven Automated Learning with Lale']
lale/lib/autogen/normalizer.py lale/lib/autogen/multi_task_lasso.py lale/lib/sklearn/tfidf_vectorizer.py lale/lib/lale/scan.py lale/search/lale_hyperopt.py lale/lib/lale/grid_search_cv.py lale/lib/sklearn/extra_trees_classifier.py lale/lib/sklearn/k_neighbors_classifier.py lale/lib/sklearn/gaussian_nb.py lale/lib/autogen/gaussian_nb.py lale/lib/autogen/sgd_regressor.py lale/lib/lale/sample_based_voting.py lale/lib/sklearn/ada_boost_classifier.py lale/lib/sklearn/simple_imputer.py lale/lib/imblearn/repeated_edited_nearest_neighbours.py lale/lib/sklearn/decision_tree_classifier.py lale/lib/autogen/lars_cv.py lale/lib/snapml/__init__.py lale/lib/lale/alias.py lale/lib/imblearn/smote.py lale/lib/autogen/logistic_regression_cv.py lale/lib/autogen/pls_canonical.py lale/lib/autoai_ts_libs/localized_flatten_auto_ensembler.py lale/operators.py lale/lib/autogen/elastic_net.py lale/lib/autoai_ts_libs/next.py lale/lib/lale/observing.py lale/lib/autogen/multi_task_lasso_cv.py lale/lib/sklearn/__init__.py lale/lib/autoai_ts_libs/previous.py lale/lib/xgboost/__init__.py lale/lib/sklearn/polynomial_features.py lale/lib/lale/identity_wrapper.py lale/lib/autogen/complement_nb.py lale/lib/autogen/decision_tree_classifier.py lale/util/VisitorPathError.py lale/lib/sklearn/dummy_classifier.py lale/lib/aif360/lfr.py lale/lib/aif360/adversarial_debiasing.py test/test_interoperability.py lale/lib/sklearn/ada_boost_regressor.py lale/lib/sklearn/decision_tree_regressor.py lale/lib/autoai_libs/cat_imputer.py lale/lib/autoai_libs/float_str2_float.py lale/lib/autogen/lasso_lars_cv.py test/test_custom_schemas.py lale/lib/autogen/standard_scaler.py lale/lib/autogen/radius_neighbors_classifier.py lale/lib/autoai_libs/boolean2float.py lale/lib/lightgbm/__init__.py lale/lib/autoai_libs/word2vec_transformer.py lale/lib/autogen/mini_batch_sparse_pca.py lale/lib/autogen/transformed_target_regressor.py lale/lib/autogen/robust_scaler.py lale/lib/sklearn/logistic_regression.py lale/lib/autogen/ransac_regressor.py lale/lib/sklearn/multi_output_regressor.py lale/lib/autogen/plssvd.py lale/lib/snapml/snap_linear_regression.py lale/lib/lale/topk_voting_classifier.py lale/search/search_space_grid.py lale/lib/imblearn/__init__.py test/test_grammar.py lale/lib/autogen/additive_chi2_sampler.py lale/lib/sklearn/isolation_forest.py lale/lib/lale/both.py test/test_autoai_libs.py lale/lib/autoai_ts_libs/mt2r_forecaster.py lale/lib/autoai_libs/tb1.py lale/datasets/multitable/fetch_datasets.py lale/lib/autoai_libs/tb2.py lale/lib/autogen/bernoulli_nb.py lale/lib/lale/batching.py lale/search/lale_grid_search_cv.py lale/lib/autoai_ts_libs/t2r_forecaster.py lale/lib/sklearn/extra_trees_regressor.py lale/lib/autogen/birch.py lale/lib/sklearn/nystroem.py lale/lib/autogen/pls_regression.py lale/lib/snapml/snap_decision_tree_classifier.py lale/lib/autogen/sparse_pca.py lale/pretty_print.py lale/lib/autogen/random_forest_classifier.py lale/lib/lale/relational.py lale/search/PGO.py lale/lib/snapml/snap_random_forest_regressor.py lale/util/hdf5_to_torch_dataset.py lale/lib/autogen/linear_svr.py lale/lib/sklearn/stacking_utils.py lale/lib/autogen/quadratic_discriminant_analysis.py lale/lib/autogen/gradient_boosting_classifier.py lale/lib/sklearn/pca.py lale/lib/aif360/reweighing.py test/test_autogen_lib.py lale/lib/imblearn/base_resampler.py lale/lib/imblearn/adasyn.py lale/lib/lale/spark_explainer.py lale/lib/autogen/ada_boost_regressor.py test/mock_custom_operators.py lale/lib/lale/_common_schemas.py lale/lib/autoai_libs/cat_encoder.py lale/lib/sklearn/sgd_regressor.py lale/util/__init__.py lale/datasets/multitable/util.py lale/lib/sklearn/quantile_transformer.py lale/search/schema2search_space.py lale/lib/autogen/max_abs_scaler.py test/test_type_checking.py lale/lib/autogen/gaussian_process_classifier.py lale/lib/sklearn/random_forest_classifier.py test/test_json_pretty_viz.py lale/lib/autogen/kernel_ridge.py lale/lib/autogen/truncated_svd.py lale/lib/lale/tee.py lale/schema_utils.py lale/lib/sklearn/bagging_classifier.py lale/lib/autogen/mini_batch_k_means.py lale/docstrings.py lale/lib/autogen/logistic_regression.py lale/visualize.py lale/lib/autogen/label_spreading.py lale/lib/autogen/ridge.py lale/lib/sklearn/svc.py lale/lib/aif360/meta_fair_classifier.py test/test_core_regressors.py lale/grammar.py lale/lib/autogen/label_binarizer.py lale/lib/autogen/lasso.py lale/lib/autoai_ts_libs/__init__.py docs/conf.py lale/lib/lale/auto_pipeline.py lale/lib/autogen/function_transformer.py lale/lib/autoai_ts_libs/small_data_window_transformer.py lale/lib/autogen/bayesian_ridge.py lale/lib/autogen/k_bins_discretizer.py lale/lib/autoai_libs/__init__.py lale/lib/snapml/snap_boosting_machine_regressor.py lale/lib/sklearn/min_max_scaler.py lale/lib/rasl/_eval_pandas_df.py lale/lib/lale/time_series_transformer.py lale/lib/rasl/map.py lale/lib/sklearn/random_forest_regressor.py lale/lib/sklearn/robust_scaler.py lale/lib/aif360/__init__.py lale/lib/autogen/lasso_lars.py lale/lib/imblearn/instance_hardness_threshold.py lale/lib/lale/no_op.py lale/lib/lale/optimize_suffix.py lale/lib/sklearn/bagging_regressor.py lale/lib/autogen/bernoulli_rbm.py lale/lib/autogen/linear_svc.py lale/search/__init__.py lale/lib/autogen/passive_aggressive_classifier.py lale/lib/xgboost/xgb_regressor.py lale/lib/autogen/ard_regression.py lale/lib/sklearn/ordinal_encoder.py lale/lib/autoai_ts_libs/flatten_iterative.py lale/lib/lale/filter.py lale/lib/sklearn/fit_spec_proxy.py lale/lib/lale/halving_grid_search_cv.py lale/lib/sklearn/dummy_regressor.py lale/lib/autogen/label_encoder.py lale/lib/autogen/multi_task_elastic_net_cv.py lale/lib/lightgbm/lgbm_regressor.py lale/lib/lale/group_by.py lale/lib/autogen/ridge_classifier.py lale/lib/autogen/__init__.py lale/lib/snapml/snap_decision_tree_regressor.py lale/lib/snapml/snap_logistic_regression.py lale/lib/autoai_libs/numpy_replace_missing_values.py lale/lib/autoai_libs/ta1.py lale/lib/sklearn/stacking_classifier.py lale/lib/autogen/kernel_pca.py lale/lib/aif360/redacting.py test/test_core_misc.py lale/lib/autoai_libs/util.py lale/lib/sklearn/linear_svc.py lale/datasets/uci/__init__.py lale/lib/lale/hyperopt.py lale/lib/autogen/nystroem.py lale/lib/autoai_libs/text_transformer.py lale/lib/sklearn/gradient_boosting_classifier.py lale/lib/aif360/datasets.py lale/util/batch_data_dictionary_dataset.py lale/lib/autogen/decision_tree_regressor.py lale/lib/imblearn/smoteenn.py lale/lib/sklearn/gradient_boosting_regressor.py lale/lib/autogen/locally_linear_embedding.py lale/lib/autoai_ts_libs/small_data_window_target_transformer.py lale/operator_wrapper.py lale/lib/sklearn/passive_aggressive_classifier.py lale/lib/autogen/k_means.py lale/lib/autoai_ts_libs/autoai_windowed_wrapped_regressor.py lale/lib/autogen/quantile_transformer.py lale/lib/sklearn/k_means.py test/test_core_pipeline.py lale/lib/autoai_ts_libs/watfore_forecaster.py lale/lib/autogen/ridge_classifier_cv.py lale/lib/autogen/min_max_scaler.py lale/lib/autogen/skewed_chi2_sampler.py lale/json_operator.py lale/lib/autoai_ts_libs/window_transformer_mts.py lale/lib/autoai_libs/numpy_column_selector.py lale/lib/autogen/binarizer.py lale/lib/autoai_ts_libs/ts_pipeline.py lale/lib/rasl/one_hot_encoder.py lale/lib/autogen/multi_label_binarizer.py lale/lib/sklearn/missing_indicator.py lale/lib/rasl/__init__.py lale/lib/autogen/passive_aggressive_regressor.py lale/lib/aif360/protected_attributes_encoder.py lale/__init__.py lale/lib/rasl/aggregate.py test/test_relational_sklearn.py lale/lib/autogen/isomap.py lale/search/search_space.py lale/lib/sklearn/pipeline.py lale/lib/sklearn/voting_classifier.py lale/lib/autoai_libs/opt_standard_scaler.py lale/lib/autogen/huber_regressor.py lale/settings.py lale/lib/lale/project.py lale/lib/sklearn/multinomial_nb.py lale/schema2enums.py lale/lib/autoai_ts_libs/window_standard_row_mean_center_uts.py lale/datasets/data_schemas.py test/test_sklearn_compat.py setup.py test/test_core_classifiers.py lale/util/numpy_to_torch_dataset.py lale/lib/autogen/theil_sen_regressor.py test/test_snapml.py lale/lib/autogen/multinomial_nb.py lale/search/lale_smac.py lale/lib/sklearn/quadratic_discriminant_analysis.py lale/schema_ranges.py lale/lib/aif360/calibrated_eq_odds_postprocessing.py lale/lib/autogen/orthogonal_matching_pursuit_cv.py lale/lib/autogen/latent_dirichlet_allocation.py lale/lib/autoai_libs/num_imputer.py lale/lib/autogen/linear_regression.py lale/datasets/uci/uci_datasets.py lale/lib/autogen/perceptron.py lale/lib/autogen/sparse_random_projection.py lale/lib/sklearn/standard_scaler.py lale/datasets/openml/__init__.py lale/lib/autogen/cca.py lale/lib/autogen/ada_boost_classifier.py lale/lib/xgboost/_common_schemas.py lale/lib/rasl/_eval_spark_df.py test/mock_module.py lale/lib/autoai_libs/compress_strings.py lale/lib/lale/functions.py test/test_optimizers.py test/test_pgo.py lale/lib/autogen/power_transformer.py lale/lib/rasl/min_max_scaler.py lale/datasets/util.py lale/lib/autogen/mlp_regressor.py lale/lib/autogen/mlp_classifier.py lale/lib/autogen/multi_task_elastic_net.py lale/lib/autoai_ts_libs/flatten_auto_ensembler.py lale/lib/autogen/linear_discriminant_analysis.py lale/lib/autogen/incremental_pca.py lale/datasets/multitable/__init__.py lale/lib/autogen/label_propagation.py lale/lib/sklearn/feature_agglomeration.py lale/lib/sklearn/svr.py lale/lib/autoai_ts_libs/cubic.py lale/lib/sklearn/function_transformer.py lale/lib/imblearn/svm_smote.py lale/lib/autogen/k_neighbors_regressor.py lale/lib/autogen/gradient_boosting_regressor.py lale/sklearn_compat.py lale/lib/autoai_libs/ta2.py test/test_notebooks.py lale/lib/lale/optimize_last.py lale/lib/lale/orderby.py lale/lib/autogen/lasso_lars_ic.py lale/lib/snapml/snap_boosting_machine_classifier.py lale/lib/aif360/gerry_fair_classifier.py lale/lib/autogen/extra_trees_classifier.py lale/lib/aif360/disparate_impact_remover.py lale/lib/autogen/svr.py lale/lib/sklearn/isomap.py lale/lib/sklearn/linear_svr.py lale/lib/autogen/factor_analysis.py lale/lib/sklearn/_common_schemas.py lale/lib/sklearn/column_transformer.py lale/lib/autogen/missing_indicator.py lale/lib/autogen/calibrated_classifier_cv.py lale/lib/snapml/snap_random_forest_classifier.py lale/lib/autogen/lasso_cv.py lale/lib/autoai_libs/fs2.py lale/lib/autogen/pca.py lale/lib/autogen/rbf_sampler.py test/test_replace.py lale/lib/autogen/random_trees_embedding.py lale/lib/aif360/eq_odds_postprocessing.py lale/lib/autoai_libs/numpy_permute_array.py lale/lib/autoai_ts_libs/standard_row_mean_center.py lale/lib/rasl/ordinal_encoder.py lale/lib/xgboost/xgb_classifier.py lale/lib/autogen/elastic_net_cv.py lale/helpers.py lale/lib/autoai_libs/float32_transform.py lale/lib/autoai_libs/t_no_op.py lale/lib/autogen/k_neighbors_classifier.py lale/lib/autogen/radius_neighbors_regressor.py lale/lib/aif360/util.py test/test_core_transformers.py lale/lib/snapml/snap_svm_classifier.py test/test_autoai_output_consumption.py lale/lib/sklearn/mlp_classifier.py lale/lib/autoai_ts_libs/linear.py lale/lib/autogen/gaussian_random_projection.py lale/lib/autoai_ts_libs/fill.py lale/lib/autoai_ts_libs/standard_row_mean_center_mts.py lale/lib/autogen/fast_ica.py lale/datasets/openml/openml_datasets.py lale/lib/autoai_ts_libs/difference_flatten_auto_ensembler.py lale/lib/lale/concat_features.py lale/lib/autoai_libs/numpy_replace_unknown_values.py lale/lib/lale/join.py lale/type_checking.py test/test_relational.py lale/lib/lale/smac.py test/test_pipeline.py test/test_lale_lib_versions.py lale/lib/autogen/nmf.py lale/lib/sklearn/linear_regression.py lale/lib/imblearn/borderline_smote.py lale/datasets/__init__.py lale/lib/sklearn/select_k_best.py lale/lib/autoai_ts_libs/autoai_window_transformed_target_regressor.py lale/lib/sklearn/k_neighbors_regressor.py lale/lib/imblearn/edited_nearest_neighbours.py lale/lib/autogen/lars.py lale/lib/sklearn/variance_threshold.py lale/lib/sklearn/nmf.py lale/lib/autoai_ts_libs/autoai_ts_pipeline.py lale/lib/autoai_libs/fs1.py test/test_halving_gridsearchcv.py lale/lib/autoai_libs/tgen.py lale/lib/aif360/optim_preproc.py lale/schema_simplifier.py lale/lib/autoai_ts_libs/ensemble_regressor.py lale/lib/autogen/sgd_classifier.py lale/lib/autogen/ridge_cv.py lale/datasets/movie_review.py lale/lib/autogen/extra_trees_regressor.py lale/lib/autogen/polynomial_features.py lale/lib/sklearn/normalizer.py lale/lib/aif360/prejudice_remover.py lale/lib/sklearn/stacking_regressor.py lale/lib/autogen/nearest_centroid.py lale/lib/autogen/one_hot_encoder.py test/test_nlp_operators.py lale/search/op2hp.py lale/lib/autogen/random_forest_regressor.py test/test_aif360.py lale/lib/sklearn/ridge_classifier.py lale/lib/lightgbm/lgbm_classifier.py lale/lib/sklearn/sgd_classifier.py lale/lib/imblearn/all_knn.py test/__init__.py lale/lib/autoai_libs/tam.py lale/lib/autogen/svc.py lale/lib/aif360/reject_option_classification.py lale/lib/autogen/gaussian_process_regressor.py lale/lib/sklearn/rfe.py lale/util/VisitorMeta.py lale/lib/autogen/nu_svr.py lale/lib/autoai_ts_libs/auto_regression.py lale/lib/lale/__init__.py lale/util/Visitor.py lale/schemas.py lale/lib/sklearn/one_hot_encoder.py lale/lib/autogen/orthogonal_matching_pursuit.py test/test_aif360_ensembles.py lale/lib/autogen/nu_svc.py lale/lib/sklearn/ridge.py lale/lib/autogen/mini_batch_dictionary_learning.py lale/lib/autogen/simple_imputer.py lale/expressions.py test/test_autoai_ts_libs.py lale/lib/sklearn/voting_regressor.py lale/lib/imblearn/random_over_sampler.py lale/lib/__init__.py lale/lib/autogen/ordinal_encoder.py lale/lib/lale/split_xy.py lale/lib/autogen/dictionary_learning.py lale/datasets/sklearn_to_pandas.py lale/lib/autoai_ts_libs/window_standard_row_mean_center_mts.py lale/lib/imblearn/condensed_nearest_neighbour.py _arg_docstring _indent _kind_tag _cls_docstring _params_docstring _method_docstring _value_docstring _hyperparams_docstring _schema_docstring _paramlist_docstring _set_docstrings_helper set_docstrings _get_hp2constraints hour _make_ast_expr minute recent max count FixUnparser window_variance_trend day_of_year day_of_week window_variance identity window_max_trend _make_call_expr _it_column collect_set Expr sum desc window_mean_trend isnull replace asc distinct_count month window_max max_gap_to_cutoff mean recent_gap_to_cutoff _is_ast_name_it normalized_sum item _make_binop first window_min normalized_count window_min_trend min variance isnotnan isnan fixedUnparse day_of_month trend window_mean isnotnull Grammar NonTerminal make_array_index_name _is_pandas_df _is_df _is_ast_subs_or_attr nest_choice_all_HPparams nest_choice_HPparam is_empty_dict json_lookup println_pos _is_ast_constant partition_sklearn_choice_params get_name_and_index _is_ast_call _is_ast_subscript append_batch data_to_json partition_sklearn_params are_hyperparameters_equal ndarray_to_json make_indexed_name val_wrapper _is_ast_name create_data_loader cross_val_score arg_name write_batch_output_to_file unnest_HPparams cross_val_score_track_trials nest_HPparams assignee_name nest_all_HPparams dict_without instantiate_from_hyperopt_search_space is_numeric_structure nest_choice_HPparams nest_HPparam make_degen_indexed_name add_missing_values import_from_sklearn_pipeline to_graphviz unnest_choice _ast_func_id _is_spark_df make_nested_hyperopt_space split_with_schemas create_individual_op_using_reflection _is_ast_attribute create_instance_from_hyperopt_search_space fold_schema _GenSym from_json _get_state _hps_from_json_rec _op_from_json_rec _camelCase_to_snake _hps_to_json_rec json_op_kind _op_to_json_rec to_json _get_cls2label _get_customize_schema TrainableOperator _pipeline_graph_class TrainedOperator get_available_estimators _WithoutGetParams get_op_from_lale_lib make_choice get_available_transformers PlannedPipeline make_pipeline_graph _PipelineFactory Operator TrainedIndividualOp make_pipeline PlannedIndividualOp OperatorChoice make_union_no_concat PlannedOperator clone_op make_union BasePipeline _mutation_warning get_lib_schemas make_pretrained_operator TrainedPipeline make_operator _DictionaryObjectForEnum IndividualOp with_structured_params _fixup_hyperparams_dict TrainableIndividualOp TrainablePipeline wrap_operator customize_schema get_available_operators wrap_imported_operators _wrap_operators_in_symtab to_string ipython_display _operator_jsn_to_string_rec hyperparams_to_string _CodeGenState _collect_names _introduce_structure _op_kind _combine_lonely_literals _operator_jsn_to_string _format_code _get_module_name json_to_string addDictAsFields schemaToDiscoveredEnums DiscoveredEnums schemaToPythonEnums accumulateDiscoveredEnumsToPythonEnums addSchemaEnumsAsFields discoveredEnumsToPythonEnums JSON AllOf Array Int Null Object Schema AnyOf Float String Undefined Bool Enum Not SchemaRange toAllOfList simplifyNot_ toAnyOfList simplify narrowSimplifyAndFilter enumValues findRelevantFields narrowToRelevantConstraints narrowToRelevantFields liftAllOf set_with_str_for_keys narrowToGivenRelevantFields filterForOptimizer hasAnyOperatorSchemas impossible liftAnyOf simplifyAll simplifyAny hasAllOperatorSchemas simplifyNot atomize_schema_enumerations check_operators_schema getMinimum getExclusiveMaximum getExclusiveMinimum isForOptimizer makeAllOf forOptimizer makeAnyOf makeSingleton_ is_lale_any_schema makeOneOf is_true_schema is_false_schema has_operator getForOptimizer getMaximum set_disable_hyperparams_schema_validation set_disable_data_schema_validation sklearn_compat_clone make_sklearn_compat is_subschema get_hyperparam_names always_validate_schema _json_replace validate_is_schema get_default_schema validate_schema_directly _validate_lale_type validate_method SubschemaError get_hyperparam_defaults _validate_subschema join_schemas validate_schema _json_meta_schema _get_args_schema replace_data_constraints is_schema has_data_constraints _indiv_op_tooltip _json_to_graphviz_rec json_to_graphviz _url_new_tab _get_cluster2reps dataframe_to_schema is_list_tensor get_table_name DataFrameWithSchema strip_schema list_tensor_to_shape_and_dtype add_table_name SeriesWithSchema add_schema add_schema_adjusting_n_rows liac_arff_to_schema ndarray_to_schema list_tensor_to_schema shape_and_dtype_to_schema is_liac_arff NDArrayWithSchema series_to_schema torch_tensor_to_schema csr_matrix_to_schema to_schema dtype_to_schema load_movie_review covtype_df digits_df load_iris_df _bunch_to_df boston_housing_df california_housing_df pandas2spark get_data_from_csv fetch_go_sales_dataset fetch_imdb_dataset multitable_train_test_split add_schemas fetch download tsv_to_Xy fetch_drugscom fetch_household_power_consumption _AdversarialDebiasingImpl _CalibratedEqOddsPostprocessingImpl _get_compas_filepath fetch_ricci_df _fetch_meps_raw_df fetch_tae_df _get_pandas_and_fairness_info_from_meps_dataset FiscalYear fetch_adult_df fetch_compas_violent_df _get_dataframe_from_compas_csv fetch_creditg_df _perform_default_preprocessing _get_utilization_columns _should_drop_column _race _perform_custom_preprocessing _try_download_compas _get_pandas_and_fairness_info_from_compas_dataset Panel fetch_meps_panel20_fy2015_df _fetch_boston_housing_df fetch_meps_panel21_fy2016_df fetch_titanic_df _get_pandas_and_fairness_info_from_compas_csv fetch_bank_df _get_compas_filename fetch_speeddating_df fetch_compas_df fetch_nursery_df _get_total_utilization fetch_meps_panel19_fy2015_df _DisparateImpactRemoverImpl _EqOddsPostprocessingImpl _GerryFairClassifierImpl _LFRImpl _MetaFairClassifierImpl _OptimPreprocImpl _PrejudiceRemoverImpl _group_flag _ProtectedAttributesEncoderImpl _dataframe_replace _redaction_value _RedactingImpl _RejectOptionClassificationImpl _ReweighingImpl _ScorerFactory _ndarray_to_dataframe FairStratifiedKFold theil_index fair_stratified_train_test_split average_odds_difference symmetric_disparate_impact _BasePostEstimatorImpl _AccuracyAndDisparateImpact dataset_to_pandas equal_opportunity_difference disparate_impact _PandasToDatasetConverter _SymmetricDisparateImpact _R2AndDisparateImpact accuracy_and_disparate_impact r2_and_disparate_impact _column_for_stratification _ndarray_to_series _validate_fairness_info _BaseInEstimatorImpl statistical_parity_difference _ensure_str _boolean2floatImpl _CatEncoderImpl _CatImputerImpl _CompressStringsImpl _float32_transformImpl _FloatStr2FloatImpl _FS1Impl _FS2Impl _NumpyColumnSelectorImpl _NumpyPermuteArrayImpl _NumpyReplaceMissingValuesImpl _NumpyReplaceUnknownValuesImpl _NumImputerImpl _OptStandardScalerImpl _TA1Impl _TA2Impl _TAMImpl _TB1Impl _TB2Impl _TextTransformerImpl _TGenImpl _TNoOpImpl wrap_pipeline_segments _Word2VecTransformerImpl _AutoaiTSPipelineImpl _AutoaiWindowedWrappedRegressorImpl _AutoaiWindowTransformedTargetRegressorImpl _DifferenceFlattenAutoEnsemblerImpl _FlattenAutoEnsemblerImpl _LocalizedFlattenAutoEnsemblerImpl _MT2RForecasterImpl _SmallDataWindowTargetTransformerImpl _SmallDataWindowTransformerImpl _StandardRowMeanCenterImpl _StandardRowMeanCenterMTSImpl _T2RForecasterImpl _TSPipelineImpl _WatForeForecasterImpl _WindowStandardRowMeanCenterMTSImpl _WindowStandardRowMeanCenterUTSImpl _WindowTransformerMTSImpl _AdaBoostClassifierImpl _AdaBoostRegressorImpl _AdditiveChi2SamplerImpl _ARDRegressionImpl _BayesianRidgeImpl _BernoulliNBImpl _BernoulliRBMImpl _BinarizerImpl _BirchImpl _CalibratedClassifierCVImpl _CCAImpl _ComplementNBImpl _DecisionTreeClassifierImpl _DecisionTreeRegressorImpl _DictionaryLearningImpl _ElasticNetCVImpl _ExtraTreesClassifierImpl _ExtraTreesRegressorImpl _FactorAnalysisImpl _FastICAImpl _FunctionTransformerImpl _GaussianNBImpl _GaussianProcessClassifierImpl _GaussianProcessRegressorImpl _GaussianRandomProjectionImpl _GradientBoostingClassifierImpl _GradientBoostingRegressorImpl _HuberRegressorImpl _IncrementalPCAImpl _IsomapImpl _KernelPCAImpl _KBinsDiscretizerImpl _KMeansImpl _KNeighborsClassifierImpl _KNeighborsRegressorImpl _LabelBinarizerImpl _LabelEncoderImpl _LabelPropagationImpl _LabelSpreadingImpl _LarsImpl _LarsCVImpl _LassoImpl _LassoCVImpl _LassoLarsImpl _LassoLarsCVImpl _LassoLarsICImpl _LatentDirichletAllocationImpl _LinearRegressionImpl _LinearSVCImpl _LinearSVRImpl _LocallyLinearEmbeddingImpl _LogisticRegressionImpl _LogisticRegressionCVImpl _MaxAbsScalerImpl _MiniBatchDictionaryLearningImpl _MiniBatchKMeansImpl _MiniBatchSparsePCAImpl _MinMaxScalerImpl _MissingIndicatorImpl _MLPClassifierImpl _MLPRegressorImpl _MultinomialNBImpl _MultiLabelBinarizerImpl _ElasticNetImpl _KernelRidgeImpl _LinearDiscriminantAnalysisImpl _MultiTaskElasticNetImpl _PerceptronImpl _RidgeClassifierImpl _MultiTaskElasticNetCVImpl _MultiTaskLassoImpl _MultiTaskLassoCVImpl _NearestCentroidImpl _NMFImpl _NormalizerImpl _NuSVCImpl _NuSVRImpl _NystroemImpl _OneHotEncoderImpl _OrdinalEncoderImpl _OrthogonalMatchingPursuitImpl _OrthogonalMatchingPursuitCVImpl _PassiveAggressiveClassifierImpl _PassiveAggressiveRegressorImpl _PCAImpl _PLSSVDImpl _PLSCanonicalImpl _PLSRegressionImpl _PolynomialFeaturesImpl _PowerTransformerImpl _QuadraticDiscriminantAnalysisImpl _QuantileTransformerImpl _RadiusNeighborsClassifierImpl _RadiusNeighborsRegressorImpl _RandomForestClassifierImpl _RandomForestRegressorImpl _RandomTreesEmbeddingImpl _RANSACRegressorImpl _RBFSamplerImpl _RidgeImpl _RidgeClassifierCVImpl _RidgeCVImpl _RobustScalerImpl _SGDClassifierImpl _SGDRegressorImpl _SimpleImputerImpl _SkewedChi2SamplerImpl _SparsePCAImpl _SparseRandomProjectionImpl _StandardScalerImpl _SVCImpl _SVRImpl _TheilSenRegressorImpl _TransformedTargetRegressorImpl _TruncatedSVDImpl _ADASYNImpl _AllKNNImpl _BaseResamplerImpl _BorderlineSMOTEImpl _CondensedNearestNeighbourImpl _EditedNearestNeighboursImpl _InstanceHardnessThresholdImpl _RandomOverSamplerImpl _RepeatedEditedNearestNeighboursImpl _SMOTEImpl _SMOTEENNImpl _SVMSMOTEImpl _AliasImpl auto_prep _AutoPipelineImpl auto_gbt _BatchingImpl _BothImpl _is_pandas _ConcatFeaturesImpl _FilterImpl filter_isnotnan categorical filter_isnull filter_isnotnull filter_isnan date_time _GridSearchCVImpl _GroupByImpl _HalvingGridSearchCVImpl _HyperoptImpl _IdentityWrapperImpl _JoinImpl _NoOpImpl LoggingObserver _ObservingImpl observe _OptimizeLast _OptimizeSuffix _OrderByImpl _ProjectImpl _columns_schema_to_list _columns_to_list _RelationalImpl _SampleBasedVotingImpl _ScanImpl _SMACImpl SparkExplainer _SplitXyImpl _TeeImpl Log10 Magnitude Eigenvalues TimeCorrelation StandardizeFirst Resample Pipeline StandardizeLast upper_right_triangle CorrelationMatrix FFTWithTimeFreqCorrelation FFT FreqCorrelation _TimeFreqEigenVectorsImpl Slice _TopKVotingClassifierImpl check_scoring_best_score_constraint _LGBMClassifierImpl _LGBMRegressorImpl _AggregateImpl _validate _MapImpl _AccessedColumns _Validate _new_column_name _accessed_columns _MinMaxScalerImpl _df_count _OneHotEncoderImpl _OrdinalEncoderImpl hour replace day_of_year month time_functions _PandasEvaluator day_of_week identity minute day_of_month _eval_ast_expr_pandas_df eval_expr_pandas_df hour eval_expr_spark_df _SparkEvaluator replace day_of_year month time_functions day_of_week identity minute day_of_month _eval_ast_expr_spark_df _AdaBoostClassifierImpl _AdaBoostRegressorImpl _BaggingClassifierImpl _BaggingRegressorImpl _FitSpecProxy _OneHotEncoderImpl _OrdinalEncoderImpl _PipelineImpl _SelectKBestImpl _SimpleImputerImpl _StackingClassifierImpl _StackingRegressorImpl _concatenate_predictions_pandas _TfidfVectorizerImpl _SnapBoostingMachineClassifierImpl _SnapBoostingMachineRegressorImpl _SnapDecisionTreeClassifierImpl _SnapDecisionTreeRegressorImpl _SnapLinearRegressionImpl _SnapLogisticRegressionImpl _SnapRandomForestClassifierImpl _SnapRandomForestRegressorImpl _SnapSVMClassifierImpl _rename_all_features _XGBClassifierImpl _rename_one_feature _rename_all_features _XGBRegressorImpl _rename_one_feature SearchSpaceGridstoGSGrids SearchSpaceGridtoGSGrid get_defaults_as_param_grid HPValuetoGSValue gridsearchcv_grid_to_string get_grid_search_parameter_grids gridsearchcv_grids_to_string SearchSpaceNumberToGSValues get_parameter_grids get_lale_gridsearchcv_op SearchSpaceHPStrVisitor _mk_label SearchSpaceHPExprVisitor make_nested_hyperopt search_space_to_hp_str search_space_to_hp_expr search_space_to_str_for_comparison pgo_sample get_smac_space HPValuetoSMAC SearchSpaceGridtoSMAC addSearchSpaceGrid SearchSpaceNumberToSMAC hp_grids_to_smac_cs addSearchSpaceGrids FakeNone smac_fixup_params lale_trainable_op_from_config lale_op_smac_tae hyperopt_search_space DefaultValue freqsAsFloatValues freqsAsIntegerValues remove_defaults_dict load_pgo_file FrequencyDistribution freqsAsEnumValues normalize_pgo_type load_pgo_data add_sub_space asFreqs SearchSpaceOperatorVisitor freqs_wrapper_lookup get_default op_to_search_space pgo_lookup OperatorSchemaError FreqsWrapper SearchSpaceEnum SearchSpaceNumber SearchSpaceProduct SearchSpaceConstant SearchSpaceArray SearchSpaceSum SearchSpacePrimitive SearchSpaceOperator SearchSpaceEmpty should_print_search_space SearchSpaceDict SearchSpaceObject SearchSpaceBool SearchSpace SearchSpaceError _get_print_search_space_options get_search_space_grids SearchSpaceToGridVisitor op_to_search_space_grids search_space_grids_to_string search_space_to_grids search_space_grid_to_string BatchDataDict HDF5TorchDataset NumpyTorchDataset accept Visitor VisitorMeta AbstractVisitorMeta VisitorPathError TestGrammar _MyLRImpl _IncreaseRowsImpl _CustomParamsCheckerOpImpl UnknownOp BadClassifier TestAIF360Cat TestAIF360Num TestAIF360Datasets TestEnsemblesWithAIF360 TestAutoaiLibs TestAutoaiLibsText TestAutoAIOutputConsumption TestSchemas TestWatForeForecasters TestMT2RForecaster TestPrettyPrint StandardRowMeanCenterTest create_function_test_schemas TestFlattenImputers TestSROMEnsemblers TestAutoaiTSLibs train_test_split TestImportExport get_srom_time_series_estimators TestInterpolatorImputers TimeseriesWindowTransformerTest test_2_steps_classifier test_transformer load_iris test_ordinal_encoder test_failed_transformer load_regression test_2_steps_regressor test_classifier test_missing_indicator test_failed_regressor test_regressors test_failed_classifier test_multi base_test TestSpuriousSideConstraintsClassification TestLogisticRegression TestKNeighborsClassifier create_function_test_classifier TestBaggingClassifier TestClassification TestStackingClassifier TestMLPClassifier TestIsolationForest TestKMeans TestVotingClassifier TestClone TestEmptyY TestTags TestFitPlannedOp TestOperatorFowarding TestHyperparamRanges _OperatorForwardingTestImpl TestTee TestWrappedImpl TestUnparseExpr UserValidatorImpl TestUserValidator _OperatorForwardingTestWrappedImpl TestOperatorErrors TestMethodParameters TestGetParams TestBoth TestOperatorLogging _TestLazyImpl TestOperatorWithoutSchema TestLaleVersion TestWithParams TestScoreIndividualOp TestScore TestOperatorChoice TestAutoPipeline TestPredictLogProba TestCreation TestComposition TestPartialFit TestScoreSamples TestImportExport create_function_test_regressor TestFriedmanMSE TestRegression TestSpuriousSideConstraintsRegression TestRidge TestMissingIndicator TestOrdinalEncoder create_function_test_feature_preprocessor TestConcatFeatures TestTfidfVectorizer TestFeaturePreprocessing TestRFE TestNMF TestFunctionTransformer TestConstraintDropping TestWrapUnknownOps TestConstraintMerging TestFreeze TestCustomSchema TestKNeighborsRegressor TestStandardScaler TestGridSearchCV TestAutoConfigureClassification create_function_test_resampler TestResamplers TestImblearn TestDiff TestPrettyPrint TestToAndFromJSON TestToGraphviz TestGradientBoostingClassifier TestPolynomialFeatures TestExtraTreesClassifier TestLogisticRegression TestRidge TestDecisionTreeRegressor TestRandomForestRegressor TestRandomForestClassifier TestGradientBoostingRegressor TestDecisionTreeClassifier TestXGBRegressor TestFeatureAgglomeration TestLinearRegression TestExtraTreesRegressor TestSVC TestXGBClassifier TestMLPClassifier TestFunctionTransformer TestVotingClassifier create_function_test_encoder TestTextEncoders should_test test_notebook iris_f_min TestSMAC TestCrossValidation TestStandardScaler TestVisitorErrors iris_fmin_tae TestKNeighborsRegressor TestHigherOrderOperators TestAutoConfigureRegression run_hyperopt_on_planned_pipeline TestHyperoptOperatorDuplication TestGridSearchCV iris_f_min_for_folds TestKNeighborsClassifier TestHyperopt TestOptimizeLast TestTopKVotingClassifier f_min TestSelectKBestTransformer TestAutoConfigureClassification TestPGOGridSearchCV TestPGOLoad TestPGOHyperopt TestBatching2 TestPipeline TestExportToSklearnForEstimator TestImportFromSklearnWithCognito TestBatching TestJoinSpark TestMapSpark TestTrainTestSplit TestFilter TestFilterSpark TestExpressions TestGroupBy TestAlias TestMapOnBothPandasAndSpark TestOrderBy TestOrderBySpark TestSplitXy TestMap TestScan TestSplitXySpark TestJoin TestAggregate TestRelationalOperator TestMinMaxScalerSpark TestOrdinalEncoder TestMinMaxScaler TestPipeline TestOneHotEncoder TestReplace TestClone _MutatingOpImpl fit_clone_fit TestSnapMLRegressors TestSnapMLClassifiers TestErrorMessages TestWithScorer TestHyperparamConstraints TestSchemaValidation TestDisablingSchemaValidation TestDatasetSchemas EnableSchemaValidation join rstrip splitlines join _value_docstring isinstance get join _kind_tag isinstance _value_docstring item_docstring startswith append compile get _schema_docstring items isinstance get _arg_docstring items isinstance get keys range len _get_hp2constraints _params_docstring _schema_docstring strip splitlines get make_fun _cls_docstring exec _hyperparams_docstring _paramlist_docstring impl_class arg_name getmodule _schemas _set_docstrings_helper type __name__ FixUnparser StringIO BinOp isinstance isinstance Name Call Expr parse cast pformat value Str isinstance Subscript _is_ast_name_it Constant Attribute parse isinstance Name targets Assign extract_stack value parse isinstance Name Call Expr extract_stack args numpy ndarray isinstance values split hasattr add_schema _safe_split set max fold_schema_aux len check_scoring time scorer isinstance StratifiedKFold split_with_schemas predict_proba split log_loss append fit isinstance scoring StratifiedKFold split_with_schemas split append predict fit join import_module getattr class_ split to_json json_to_graphviz format print search system strftime items list isinstance range len int items isinstance hyperparams instantiate_from_hyperopt_search_space dict edges append steps enumerate _impl wrapper_class transformer_list hasattr Operator FeatureUnion TrainedIndividualOp make_pipeline named_steps make_union find_lale_wrapper lale_op __class__ get_params _schemas Pipeline items deepcopy _name isinstance ndarray isinstance File Tensor get ndarray toarray isinstance view csr_matrix NDArrayWithSchema HDF5TorchDataset Series from_numpy TensorDataset getattr NumpyTorchDataset Tensor DataFrame to_numpy int File create_dataset len int RandomState ndarray isinstance shuffle randint copy shape nan zeros range items split int unnest_choice items split ndarray isinstance Name isinstance TrainedOperator Operator TrainableOperator isinstance items class_name isinstance _get_state stack IndividualOp sub Operator _op_to_json_rec isinstance _impl _camelCase_to_snake class_name hasattr name TrainedIndividualOp OperatorChoice BasePipeline _get_customize_schema get is_frozen_trainable get_lib_schemas _get_state reduced_hyperparams documentation_url _hps_to_json_rec impl_class _schemas IndividualOp enumerate TrainableIndividualOp isinstance is_frozen_trained viz_label gensym steps validate _GenSym _op_to_json_rec _get_cls2label Draft4Validator isinstance get freeze_trainable get_lib_schemas TrainableIndividualOp rfind _hps_from_json_rec make_operator result import_module getattr json_op_kind customize_schema cast warning validate Draft4Validator _op_from_json_rec str isinstance make_operator __module__ import_module getattr startswith _check_schemas __name__ get_op_from_lale_lib list get_lib_schemas hasattr TrainableIndividualOp isinstance endswith TrainedIndividualOp assignee_name PlannedIndividualOp __class__ startswith _check_schemas append isclass get_params _schemas keys set _pipeline_graph_class isinstance make_operator extend edges append steps isinstance make_operator extend edges append _steps isinstance name make_operator extend append steps remove_defaults_dict set_disable_hyperparams_schema_validation deepcopy isinstance _invalidate_enum_attributes schema index validate_is_schema append validate_method hyperparams clone _set_name get int list partition_sklearn_choice_params items isinstance tuple map partition_sklearn_params keys _with_params max items info startswith clone_op get_op_from_lale_lib f_globals f_locals _wrap_operators_in_symtab IndividualOp find_op isinstance find_and_replace make_graph join items format hyperparams_to_string endswith _introduce_structure fullmatch _get_module_name warning startswith append combinators values items join len group fullmatch range compile split rstrip _combine_lonely_literals join _operator_jsn_to_string_rec _collect_names _CodeGenState assigns imports _format_code append dumps _format_code to_json Operator is_schema isinstance to_string Markdown display items join children withEnumValue Enum accumulateDiscoveredEnumsToPythonEnums discoveredEnumsToPythonEnums schemaToDiscoveredEnums error setattr warning items schemaToPythonEnums addDictAsFields toAllOfList toAnyOfList append list always_validate_schema get get hasAllOperatorSchemas SchemaRange fromSchema warning to_schema_with_optimizer simplify is_true_schema max enumValues str list all add iter intersection append chain union range get product debug makeAllOf set_with_str_for_keys info is_false_schema diff enumerate items isinstance is_empty2 error min simplifyAny extend difference is_lale_any_schema remove_point fromSchemaForOptimizer len intersection extend set_with_str_for_keys difference info simplify is_true_schema is_false_schema union append enumValues simplify simplifyAll simplifyAny items simplifyAll simplifyAny copy info is_true_schema is_false_schema append list intersection copy set items copy simplify narrowToRelevantFields narrowToRelevantConstraints filterForOptimizer get isinstance get getForOptimizer get getForOptimizer get getForOptimizer get getForOptimizer isinstance endswith items copy get isinstance any values get items list update isinstance append values len get list all isinstance append values warn clone warn validate data_to_json validate _json_meta_schema validate isinstance _json_meta_schema items isinstance tuple range len _json_replace _validate_subschema to_schema validate_schema_directly reduce get hyperparam_schema startswith startswith append items signature items hasattr __init__ signature default _get_args_schema hasattr __init__ getattr fit recursive_check recursive_replace populate hyperparams_to_string get join list _indiv_op_tooltip items subgraph node _url_new_tab edge Digraph json_op_kind sub attr keys _json_to_graphviz_rec display _get_cluster2reps _internal_names set _internal_names set setattr ndarray SeriesWithSchema isinstance view Series to_schema is_list_tensor validate_is_schema DataFrameWithSchema DataFrame array add_schema setattr ndarray SeriesWithSchema isinstance view Series is_list_tensor DataFrameWithSchema DataFrame array str analyzed identifier Series DataFrame array isinstance dtype number subdtype unsignedinteger isinstance shape shape_and_dtype_to_schema issubdtype validate_is_schema object_ integer fields dtype_to_schema validate_is_schema reversed isinstance isinstance list_tensor_to_shape_and_dtype shape_and_dtype_to_schema list_tensor_to_shape_and_dtype shape dtype shape_and_dtype_to_schema shape validate_is_schema shape validate_is_schema shape is_floating_point reversed items validate_is_schema ndarray isinstance is_liac_arff csr_matrix Series _is_spark_df torch_tensor_to_schema is_schema dataframe_to_schema series_to_schema csr_matrix_to_schema liac_arff_to_schema toPandas ndarray_to_schema validate_is_schema list_tensor_to_schema DataFrame join format urlretrieve asarray print shuffle dirname makedirs data isinstance Series add_schema target train_test_split DataFrame data Series load_iris shuffle target train_test_split DataFrame _bunch_to_df load_digits fetch_covtype _bunch_to_df fetch_california_housing _bunch_to_df _bunch_to_df load_boston SQLContext getOrCreate createDataFrame set getOrCreate join urlretrieve format add_table_name get_data_from_csv dirname info append makedirs join get_data_from_csv dirname splitext info append walk add_table_name int list count _is_pandas_df _is_spark_df add_table_name tolist createDataFrame select choice set getOrCreate range enumerate len list isinstance Series add_schema unique append len LabelEncoder add_schemas pandas2spark DataFrame list number squeeze shape make_pipeline append train_test_split fit_transform format urlretrieve ColumnTransformer astype add_schema liac_arff_to_schema enumerate join get_feature_names print Series makedirs transform drop join makedirs add_schema shape read_csv download tsv_to_Xy download read_csv fetch sort_index Series assign drop fetch sort_index Series float64 astype assign fetch float64 Series astype assign drop __file__ join dirname abspath _get_compas_filepath urlretrieve _get_compas_filename exists dataset_to_pandas _get_compas_filepath _get_compas_filename read_csv sort_index replace copy apply _perform_default_preprocessing _get_dataframe_from_compas_csv sort_index load_preproc_data_compas _try_download_compas StandardDataset _try_download_compas _get_dataframe_from_compas_csv fetch sort_index Series assign drop fetch sort_index Series assign drop list columns fetch sort_index Series extend filter assign append drop median sort_index Series assign boston_housing_df fetch sort_index Series assign drop list columns fetch sort_index Series extend filter assign drop _get_utilization_columns _get_utilization_columns set join sort_index __file__ error tolist apply set filter rename dirname abspath read_csv dataset_to_pandas MEPSDataset19 MEPSDataset20 MEPSDataset21 concat argmax unique Series add_schema ravel features DataFrame validate_schema_directly _check_ranges get _check_overlaps Series getattr add_schema isinstance get isinstance add_schema getattr DataFrame transform apply ProtectedAttributesEncoder concat _validate_fairness_info hasattr add_schema_adjusting_n_rows json_schema _column_for_stratification train_test_split class_name remove_last Pipeline get_last SimpleImputer Project OneHotEncoder len _is_pandas_df _is_pandas_df _is_pandas_df _is_pandas_df to_schema is_subschema isinstance columns isinstance is_schema _columns_schema_to_list callable append range isinstance visit _expr _AccessedColumns visit _expr _Validate _is_pandas_df _is_spark_df visit _PandasEvaluator defaultdict value literal_eval map _eval_ast_expr_pandas_df _eval_ast_expr_pandas_df to_datetime literal_eval visit _SparkEvaluator items otherwise _eval_ast_expr_spark_df when lit _eval_ast_expr_spark_df set_index isinstance Series reshape index passthrough append DataFrame enumerate items replace get_defaults GridSearchCV get_search_space_grids SearchSpaceGridstoGSGrids name print should_print_search_space dtype list distribution tolist getInclusiveMax discrete samples append getInclusiveMin default isinstance smac_fixup_params clone with_params get_search_space_grids name print hp_grids_to_smac_cs should_print_search_space distribution getInclusiveMax discrete getInclusiveMin isinstance SearchSpaceGridtoSMAC add_hyperparameter add_condition EqualsCondition CategoricalHyperparameter addSearchSpaceGrid add_hyperparameter range enumerate len addSearchSpaceGrids name op_to_search_space print should_print_search_space items validate normalize_pgo_type Draft4Validator int float name print should_print_search_space run get validate Draft4Validator forOptimizer get isinstance base isinstance append isinstance get set split name print op_to_search_space_grids should_print_search_space warn ceil round len op_to_search_space search_space_to_grids join dirname deepcopy MT2RForecaster LocalizedFlattenAutoEnsembler EnsembleRegressor append DifferenceFlattenAutoEnsembler FlattenAutoEnsembler format test base_test xfail xfail base_test base_test xfail base_test base_test base_test xfail make_choice base_test make_choice base_test format format format format format startswith warn join cross_val_score load_iris load_iris fit Hyperopt print clone fit
# Lale [![Tests](https://github.com/IBM/lale/workflows/Tests/badge.svg?branch=master)](https://github.com/IBM/lale/actions?query=workflow%3ATests+branch%3Amaster) [![Documentation Status](https://readthedocs.org/projects/lale/badge/?version=latest)](https://lale.readthedocs.io/en/latest/?badge=latest) [![PyPI version shields.io](https://img.shields.io/pypi/v/lale?color=success)](https://pypi.python.org/pypi/lale/) [![Imports: isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) [![linting: pylint](https://img.shields.io/badge/linting-pylint-yellowgreen)](https://github.com/PyCQA/pylint) [![security: bandit](https://img.shields.io/badge/security-bandit-yellow.svg)](https://github.com/PyCQA/bandit) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/5863/badge)](https://bestpractices.coreinfrastructure.org/projects/5863)
483
IBM/lale-gpl
['time series']
['Type-Driven Automated Learning with Lale']
lalegpl/datasets/__init__.py lalegpl/lib/__init__.py lalegpl/lib/weka/j48.py test/test_relational.py lalegpl/lib/lale/nsga2.py lalegpl/lib/r/arules_cba_classifier.py test/test_datasets.py docs/conf.py lalegpl/datasets/multitable/__init__.py lalegpl/__init__.py lalegpl/lib/r/util.py lalegpl/lib/lale/__init__.py test/test_interoperability.py test/test_nsga2.py lalegpl/lib/weka/util.py lalegpl/lib/r/__init__.py test/test_optimizers.py lalegpl/lib/weka/__init__.py lalegpl/datasets/auto_weka.py test/test_notebooks.py setup.py lalegpl/datasets/multitable/fetch_datasets.py test/__init__.py fetch_car fetch get_data_from_csv fetch_imdb_dataset _NSGA2Impl MaxBudgetExceededException _ModelHelper ArulesCBAClassifier_Impl install_r_package create_r_dataframe J48_Impl options sklearn_input_to_weka hyperparam_ranges weka_output_to_sklearn TestJoinSpark TestJoin tearDownModule TestJ48 test_in_subprocess TestArulesCBAClassifier TestJ48test create_test TestNotebooks TestNSGA2 compute_fpr test_iris_fmin_tae test_f_min DontTestCar test_iris_f_min test_iris_f_min_for_folds TestBenchmarkJoinAndFilterPandas TestBenchmarkJoinAndFilterSpark join format print expanduser makedirs getOrCreate fetchall join writer cursor format add_table_name writerow get_data_from_csv close connect open dirname info execute append makedirs join println_pos importr libPaths_fun chooseCRANmirror install_packages expanduser makedirs Series DataFrame format create_instance isinstance Series create_instances create_numeric add_instance unique create_nominal append class_is_last range len call jobject get_enumeration_wrapper options int replace hasMoreElements strip search Option nextElement print started stop format __name__ __module__ ravel round cross_val_score load_iris
# Lalegpl [![Build Status](https://travis-ci.com/IBM/lale-gpl.svg?branch=master)](https://travis-ci.com/IBM/lale-gpl) <br /> <img src="docs/img/lale_logo.jpg" alt="logo" width="55px"/> Lale (https://github.com/IBM/lale) is a Python library for data science with an emphasis on automation, usability, and interoperability. Lalegpl is an extension of Lale with operators from libraries such as Weka and R which have gpl dependencies. * Slack channel: `#lale-users` in the IBM Research org * [Installation instructions](docs/getting_started.md) * Python [API documentation](https://pages.github.ibm.com/Lale/lale/) * arXiv [paper](https://arxiv.org/pdf/1906.03957.pdf)
484
IBM/science-result-extractor
['data augmentation']
['TDMSci: A Specialized Corpus for Scientific Literature Entity Tagging of Tasks Datasets and Metrics', 'Identification of Tasks, Datasets, Evaluation Metrics, and Numeric Scores for Scientific Leaderboards Construction']
data/TDMSci/modelTrainingScript/training_tdm.py bert_tdms/run_classifier_sci.py data/NLP-TDMS/downloader/download_pdfs.py SciProcessor create_model InputFeatures MrpcProcessor ColaProcessor input_fn_builder file_based_input_fn_builder MnliProcessor InputExample _truncate_seq_pair model_fn_builder file_based_convert_examples_to_features convert_examples_to_features DataProcessor main PaddingInputExample XnliProcessor convert_single_example md5 main filesizeKB SciProcessor create_model InputFeatures MrpcProcessor ColaProcessor input_fn_builder file_based_input_fn_builder MnliProcessor InputExample _truncate_seq_pair model_fn_builder file_based_convert_examples_to_features convert_examples_to_features DataProcessor main PaddingInputExample XnliProcessor convert_single_example md5 filesizeKB join text_b isinstance InputFeatures convert_tokens_to_ids len _truncate_seq_pair tokenize guid info append text_a enumerate segment_ids create_int_feature TFRecordWriter write SerializeToString close OrderedDict Example input_mask info input_ids enumerate convert_single_example pop len get_variable get_pooled_output value BertModel segment_ids label_id input_mask append input_ids append enumerate convert_single_example info do_eval get_train_examples TPUClusterResolver init_checkpoint TPUEstimator set_verbosity output_dir do_train do_predict from_json_file model_fn_builder file_based_convert_examples_to_features validate_case_matches_checkpoint eval_batch_size tpu_name data_dir max_seq_length len get_labels do_lower_case bert_config_file append PaddingInputExample use_tpu predict predict_batch_size lower MakeDirs num_train_epochs info INFO FullTokenizer int warmup_proportion join evaluate file_based_input_fn_builder get_dev_examples PER_HOST_V2 get_test_examples train train_batch_size RunConfig get print md5 filesizeKB strip close Request urlopen add_header open split
# Science-result-extractor ## Introduction This repository contains code and a few datasets to extract TDMS (Task, Dataset, Metric, Score) tuples from scientific papers in the NLP domain. We envision three primary uses for this repository: (1) to [extract table content from PDF files](#extract-table-content-from-pdf-files), (2) to [replicate the paper's results or run experiments based on a textual entailment system](#run-experiments-based-on-textual-entailment-system), and (3) to train a model to extract TDM mentions. Please refer to the following paper for the full details: Yufang Hou, Charles Jochim, Martin Gleize, Francesca Bonin, Debasis Ganguly. Identification of Tasks, Datasets, Evaluation Metrics, and Numeric Scores for Scientific Leaderboards Construction. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (ACL 2019), Florence, Italy, 27 July - 2 August 2019 Yufang Hou, Charles Jochim, Martin Gleize, Francesca Bonin, Debasis Ganguly. TDMSci: A Specialized Corpus for Scientific Literature Entity Tagging of Tasks Datasets and Metrics. In Proceedings of the 16th conference of the European Chapter of the Association for Computational Linguistics (EACL 2021), Online, 19-23 April 2021 ## Extract table content from PDF files We developed a deterministic PDF table parser based on [GROBID](https://github.com/kermitt2/grobid). To use our parser, follow the steps below: 1) Fork and clone this repository, e.g.,
485
ICTRC/Parsivar
['morphological analysis']
['Parsivar: A Language Processing Toolkit for Persian']
parsivar/chunker.py parsivar/normalizer.py parsivar/__init__.py setup.py parsivar/postagger.py parsivar/spell_checker.py parsivar/dependency.py parsivar/token_merger.py parsivar/data_helper.py parsivar/tokenizer.py parsivar/stemmer.py FindChunks DataHelper MyMaltParser DependencyParser POSTagger SpellCheck Tokenizer ClassifierChunkParser
parsivar ------------ Python library for Persian text preprocessing. + Text Normalizing + Half space correction in Persian text + Word and sentence tokenizer (splitting words and sentences) + Word stemming + POS tagger
486
IDEALLab/Active-Expansion-Sampling
['active learning']
['Active Expansion Sampling for Learning Feasible Domains in an Unbounded Input Space']
test_highdim_nv.py functions.py test_2d_straddle.py test_2d_aes.py query_strategies.py test_highdim_straddle.py test_highdim_aes.py gpc.py test_2d_nv.py two_spheres branin one_circle two_circles beam hosaki GPClassifier _BGPCL array array logical_and cos pi exp logical_and reduce zeros
# Active Expansion Sampling (AES) Experiment code associated with our paper: Chen W, Fuge M. [Active Expansion Sampling for Learning Feasible Domains in an Unbounded Input Space](http://ideal.umd.edu/papers/paper/samo-aes). Structural and Multidisciplinary Optimization, 57(3), 925-945. Conventional adaptive sampling/active learning | AES :-----------------------------------------------:|:-----------------------------------------------: ![Alt text](/straddle.gif) | ![Alt text](/aes.gif) ## Required packages - numpy - scipy - matplotlib
487
IDSIA-papers/2020-PGM-structural
['causal inference']
['Structural Causal Models Are (Solvable by) Credal Networks']
experiments/results/prec_experiments.py experiments/config.py experiments/experiments.py experiments/utilplots.py run_multiplyconnected strdate run_tree get_args run_polytree run_experiments run_java print_project run_chain plot_rmse plot plot_time compute_mean_size plot_size get_rmse_bounds strdate run_chain_markovian get_args run_chain_nonmarkovian run_chain_terbin_nonmarkovian run_chain_terbin_markovian run_hmm_nonmarkovian run_experiments run_squares_nonmarkovian run_rhmm_markovian run_hmm_markovian run_java run_squares_markovian run_chain run_rhmm_nonmarkovian print prj_path print stdout run disable_java print strdate jarfile run_java items list strdate product print apply dict append DataFrame to_dict StringIO replace set_title print transpose set_xlabel t query set_ylabel vstack unique legend append to_numpy dropna DataFrame copy apply copy apply query filter merge
# Structural Causal Models Are (Solvable by) Credal Networks Here we provide the relevant code for the manuscript entitled "Structural Causal Models Are (Solvable by) Credal Networks" and accepted in the PGM 2020. The code is organised as follows. - [./examples](examples) contains java files with the examples shown in the paper. - [./experiments](experiments) contains the jupyter notebooks for reproducing the numerical tests given. ## Run code examples First, run the following command in a terminal:
488
IITD-DataScience/Sandhi_Prakarana
['morphological analysis']
['Neural Compound-Word (Sandhi) Generation and Splitting in Sanskrit Language']
Sandhi_Vicceda/predict_sandhi_window_bilstm.py Sandhi/devnagri_reader.py Sandhi_Vicceda/sandhi_vicceda.py Sandhi/sandhi_data_prepare.py Sandhi_Vicceda/train_test_data_prepare.py Sandhi_Vicceda/split_sandhi_window_seq2seq_bilstm.py Sandhi_Vicceda/devnagri_reader.py Sandhi/single_dict_seq2seq_bilstm_sandhi.py read_devnagri_text get_xy_data remove_nonslp1_chars get_sandhi_dataset decode_sequence read_devnagri_text train_predict_sandhi_window train_sandhi_split get_xy_data remove_nonslp1_chars get_sandhi_dataset strip sub replace transliterate strip SLP1 len DEVANAGARI remove_nonslp1_chars append read_devnagri_text split get_sandhi_dataset zeros argmax predict Bidirectional Input max pad_sequences len add set_printoptions Model append range predict bilstm set copy zeros compile print reshape dict summary LSTM fit Bidirectional strip save Input max str sorted list add decoder_lstm Model append encoder range set Dense zip zeros compile enumerate decode_sequence print fit decoder_dense dict LSTM len print
# Sandhi_Prakarana Sandhi_Prakarana is a project in Python3 that implements the task of doing Sandhi(word joining) and Sandhi-Vicceda(word split) for Sanskrit ## Sandhi Usage ```python cd Sandhi python3 single_dict_seq2seq_bilstm_sandhi.py ``` ## Sandhi_Vicceda Usage ```python cd Sandhi_Vicceda
489
IMLHF/SE_DCUNet
['speech enhancement']
['Phase-aware Speech Enhancement with Deep Complex U-Net']
train.py enhance_testsets.py models/layers/istft.py models/layers/complexnn.py utils.py se_dataset.py models/unet.py stft main repeat_to_len_2 AudioDataset load_data load_data_list stft wSDRLoss main load_checkpoint set_logger RunningAverage save_checkpoint Params save_dict_to_json pad2d_as padded_cat Decoder Encoder Unet complex_rayleigh_init ComplexBatchNorm RealConvWrapper ComplexConvWrapper CLeakyReLU ISTFT load join ckpt set_printoptions tqdm model_dir DataLoader load_state_dict Params cuda AudioDataset randint tile len print sort tqdm append listdir repeat_to_len_2 read float32 tqdm range len bsum mSDRLoss zero_grad unsqueeze save conf squeeze Adam map range state_dict size mkdir num_epochs net enumerate ExponentialLR backward print wSDRLoss parameters step istft setFormatter getLogger addHandler StreamHandler Formatter setLevel INFO FileHandler join format print copyfile mkdir save load load_state_dict cat pad2d_as rayleigh to tuple cos copy_ shape sin uniform_ float
IMLHF/SE_DCUNet
490
INGEOTEC/EvoMSA
['sentiment analysis']
['EvoMSA: A Multilingual Evolutionary Approach for Sentiment Analysis']
EvoMSA/align.py EvoMSA/tests/test_model.py continuous-integration/move-conda-package.py continuous-integration/binstar-push.py docs/source/conf.py EvoMSA/command_line.py EvoMSA/model.py EvoMSA/tests/test_base.py setup.py EvoMSA/tests/test_command_line.py EvoMSA/tests/test_utils.py EvoMSA/base.py EvoMSA/__init__.py continuous-integration/appveyor/rm_rf.py EvoMSA/utils.py EvoMSA/tests/test_align.py get_token main remove_readonly projection transform kfold_decision_function vector_space EvoMSA CommandLinePerformance CommandLine CommandLineTrain CommandLinePredict performance train fitness_vs predict EvoMSAWrapper Bernoulli BaseTextModel BaseClassifier AggressivenessAr Identity TextModelInv AggressivenessEn ThumbsUpDownAr OutputClassifier ThumbsUpDownEs LabeledDataSet AggressivenessEs Vec SVCWrapper ThumbsUpDownEn Multinomial Corpus linearSVC_array ConfidenceInterval LabelEncoderWrapper bootstrap_confidence_interval compute_p get_model Cache download test_projection _read_words labeledDataSet test_EvoMSA_predict_proba test_EvoMSA_param_TR test_EvoMSA_param_TH test_EvoMSA_regression get_data test_EvoMSA_model test_EvoMSA_evodag_class test_label_encoder test_label_encoder_kwargs test_EvoMSA_identity test_EvoMSA_param_HA test_EvoMSA_fit_svm test_lazy_loading test_EvoMSA_lang_missing test_EvoMSA_param_Emo test_model_instance test_TextModel test_EvoMSA_fit get_dirname test_cache test_EvoMSA_kfold_decision_function StoreDelete test_EvoMSA_cpu_count test_EvoMSA_predict test_evomsa_wrapper test_EvoMSA_empty_string test_sklearn_kfold test_vector_space test_binary_labels_json test_tm_njobs test_EvoMSA_multinomial test_evo_test_set_shuffle test_performance_validation_set test_train test_list_of_text test_evo_kwargs test_performance_public_set test_predict_NearestCentroid test_predict test_max_lines test_decision_function test_raw_outputs test_performance_validation_set2 test_predict_numbers test_evo_test_set test_ThumbsUpDownAr test_ThumbsUpDownEs test_LabeledDataSet test_TextModelInv test_AggressivenessEn test_corpus test_Vec test_bernoulli test_OutputClassifier test_AggressivenessEs test_ThumbsUpDownEn test_multinomial test_AggressivenessAr test_cache test_confidence_interval test_download test_cache_cl chmod func S_IWRITE print rmtree executable load_model KDTree set flatten add tqdm stack transform append enumerate cl isinstance decision_function array fit decision_function transform save_model load_model parse_args CommandLineTrain parse_args CommandLinePredict CommandLinePerformance parse_args mkdir join urlretrieve dirname download flatten vstack array inf mean argsort append argmax range enumerate append randint metric range __file__ join tweet_iterator dirname get_data model EvoMSA model EvoMSA get_data vector_space len kfold_decision_function model EvoMSA get_data transform vector_space fit EvoMSA save_model load_model print get_data unlink fit print get_data mean predict_proba predict fit get_data predict_proba fit str print dumps get_data dict type predict fit model print EvoMSA get_data _textModel model EvoMSA fit_svm print get_data _svc_models zip transform vector_space fit print get_data predict_proba inverse_transform argmax predict fit get_data fit get_data append fit print transform _m fit print _m transform inverse_transform fit print ndim get_data shape decision_function transform predict fit print get_data predict_proba inverse_transform argmax predict fit get_data print EvoMSA get_data EvoMSA get_data zip EvoMSA get_data EvoMSA n_jobs EvoMSA print cpu_count get_data get_data unlink save_model fit get_data predict fit print array sklearn_kfold EvoMSA get_data fit unlink train load_model unlink train load_model print mean unlink train array predict unlink train load_model unlink train predict unlink train predict unlink train predict unlink train load_model train tolist predict fit train tolist predict fit print performance train fitness_vs range print isfile performance train range isfile performance train range predict unlink train Corpus Bernoulli get_data decision_function transform fit num_terms print decision_function transform Multinomial Corpus fit ThumbsUpDownEs print ThumbsUpDownEn ThumbsUpDownAr OutputClassifier unlink decision_function transform Corpus fit AggressivenessEs print AggressivenessEn AggressivenessAr dict Vec unlink create_space dict tokenize TextModelInv append Cache print zip ml_train ml_kfold print zip append Cache get_data estimate dict ConfidenceInterval download
INGEOTEC/EvoMSA
491
INK-USC/NERO
['relation extraction']
['NERO: A Neural Rule Grounding Framework for Label-Efficient Relation Extraction']
tacred_constant.py tacred_loader.py semeval_loader.py semeval_constant.py models/pat_match.py models/string_sim.py tacred.py util.py models/soft_match.py main.py func.py semeval.py dense dropout softmax_mask log mean cosine Cudnn_RNN attention read train log evaluate main read_data get_counter read_glove token2id entity_masks main read_data get_counter read_glove token2id entity_masks merge_batch get_batch sample_data get_mask get_patterns get_id get_feeddict get_word get_pos Pat_Match Soft_Match lstm_match mean_match att_match shape cond as_list expand_dims len expand_dims cast float32 reduce_sum read_glove read_data glove_word_file token2id glove_dim _read glove_word_size get_counter train_file test_file dataset dev_file format print get_patterns shuffle match append ConfigProto sum len asarray get_batch evaluate tolist astype int32 run Counter float sum range values len read train gpu FLAGS list keys list len array UNK_ID keys PAD_ID entity_masks Counter append min max deepcopy join list range append asarray ones patterns get_word split append len list asarray length map shuffle range len keys concatenate shuffle len mean cosine dropout dropout reduce_sum cosine expand_dims attention dropout reduce_sum cosine Cudnn_RNN expand_dims attention rnn
# NERO Code for WWW 2020 paper [NERO: A Neural Rule Grounding Framework for Label-Efficient Relation Extraction](https://arxiv.org/abs/1909.02177). Our slide for WWW presentation can be found at [here](figs/NERO.pptx). In this paper, we present a neural approach to ground rules for RE, named NERO, which jointly learns a relation extraction module and a soft matching module. One can employ any neural relation extraction models as the instantiation for the RE module. The soft matching module learns to match rules with semantically similar sentences such that raw corpora can be automatically labeled and leveraged by the RE module (in a much better coverage) as augmented supervision, in addition to the exactly matched sentences. Extensive experiments and analysis on two public and widely-used datasets demonstrate the effectiveness of the proposed NERO framework, comparing with both rule-based and semi-supervised methods. Through user studies, we find that the time efficiency for a human to annotate rules and sentences are similar (0.30 vs. 0.35 min per label). In particular, NERO’s performance using 270 rules is comparable to the models trained using 3,000 labeled sentences, yielding a 9.5x speedup. <p align="center"><img src="figs/REGD.jpg" width="800"/></p> If you make use of this code or the rules in your work, please kindly cite the following paper: ```bibtex @article{zhou2019nero, title={NERO: A Neural Rule Grounding Framework for Label-Efficient Relation Extraction}, author={Zhou, Wenxuan and Lin, Hongtao and Lin, Bill Yuchen and Wang, Ziqi and Du, Junyi and Neves, Leonardo and Ren, Xiang}, journal={The Web Conference},
492
INK-USC/REGD
['relation extraction']
['NERO: A Neural Rule Grounding Framework for Label-Efficient Relation Extraction']
tacred_constant.py tacred_loader.py semeval_loader.py semeval_constant.py models/pat_match.py models/string_sim.py tacred.py util.py models/soft_match.py main.py func.py semeval.py dense dropout softmax_mask log mean cosine Cudnn_RNN attention read train log evaluate main read_data get_counter read_glove token2id entity_masks main read_data get_counter read_glove token2id entity_masks merge_batch get_batch sample_data get_mask get_patterns get_id get_feeddict get_word get_pos Pat_Match Soft_Match lstm_match mean_match att_match shape cond as_list expand_dims len expand_dims cast float32 reduce_sum read_glove read_data glove_word_file token2id glove_dim _read glove_word_size get_counter train_file test_file dataset dev_file format print get_patterns shuffle match append ConfigProto sum len asarray get_batch evaluate tolist astype int32 run Counter float sum range values len read train gpu FLAGS list keys list len array UNK_ID keys PAD_ID entity_masks Counter append min max deepcopy join list range append asarray ones patterns get_word split append len list asarray length map shuffle range len keys concatenate shuffle len mean cosine dropout dropout reduce_sum cosine expand_dims attention dropout reduce_sum cosine Cudnn_RNN expand_dims attention rnn
# NERO Code for WWW 2020 paper [NERO: A Neural Rule Grounding Framework for Label-Efficient Relation Extraction](https://arxiv.org/abs/1909.02177). Our slide for WWW presentation can be found at [here](figs/NERO.pptx). In this paper, we present a neural approach to ground rules for RE, named NERO, which jointly learns a relation extraction module and a soft matching module. One can employ any neural relation extraction models as the instantiation for the RE module. The soft matching module learns to match rules with semantically similar sentences such that raw corpora can be automatically labeled and leveraged by the RE module (in a much better coverage) as augmented supervision, in addition to the exactly matched sentences. Extensive experiments and analysis on two public and widely-used datasets demonstrate the effectiveness of the proposed NERO framework, comparing with both rule-based and semi-supervised methods. Through user studies, we find that the time efficiency for a human to annotate rules and sentences are similar (0.30 vs. 0.35 min per label). In particular, NERO’s performance using 270 rules is comparable to the models trained using 3,000 labeled sentences, yielding a 9.5x speedup. <p align="center"><img src="figs/REGD.jpg" width="800"/></p> If you make use of this code or the rules in your work, please kindly cite the following paper: ```bibtex @article{zhou2019nero, title={NERO: A Neural Rule Grounding Framework for Label-Efficient Relation Extraction}, author={Zhou, Wenxuan and Lin, Hongtao and Lin, Bill Yuchen and Wang, Ziqi and Du, Junyi and Neves, Leonardo and Ren, Xiang}, journal={The Web Conference},
493
INK-USC/procedural-extraction
['relation extraction']
['Eliciting Knowledge from Experts:Automatic Transcript Parsing for Cognitive Task Analysis']
fuzzy_matching/__init__.py pipeline/target_processor.py utils/spacytokenizer.py action_phrase_extraction.py fuzzy_matching/dist.py models/bert_modeling_posattention.py pattern_extraction/corenlp.py script/reduce.py paper/figure_sample.py fuzzy_matching/dist_exact.py paper/curve_context_acc.py pipeline/source_processor.py pipeline/target_matching.py fuzzy_matching/dist_exbert.py fuzzy_matching/manual_rules.py models/__init__.py extract_samples.py paper/figure_pipeline.py pipeline/__init__.py utils/input_context_sample.py utils/__init__.py pipeline/dsbuilder_relation.py models/bert_modeling_mask.py paper/tablebuilder_sampling.py paper/figure_predict.py pipeline/dsbuilder.py utils/path.py paper/curve_context.py fuzzy_matching/dist_embavg.py fuzzy_matching/measurer_glove.py paper/figure_context.py models/bert_extractor.py paper/figure_mask2.py train_bert_context_classifier.py fuzzy_matching/measurer_embed.py fuzzy_matching/dist_manual.py paper/tablebuilder_context.py pattern_extraction/__init__.py pipeline/dsbuilder_seqlabel.py pipeline/relation_preprocessor.py utils/utilities.py create_dataset.py models/bert_modeling_inputoffsetemb.py main main InputFeatures _truncate_seq_pair warmup_linear convert_examples_to_features main get_nearest_method get_method_names register_dist_adaptor embavg_adaptor exact_adaptor extracted_bert_adaptor manual_adaptor manual_rules L2norm EmbeddingMeasurer mle dot GloveMeasurer read_examples InputFeatures BertExtractor InputExample _truncate_seq_pair convert_examples_to_features BertOffsetembModel BertOffsetEmbeddings BertOffsetForSequenceClassification BertMaskForSequenceClassification BertPosattnForSequenceClassification ln_func ln_func ln_func2 ln_func ln_func ln_func retrieve_head retrieve_result retrieve_head retrieve_result contain_ban_word get_token get_name get_dep filter_verb split_s filter_sen save_aps filter_if get_token_str get_reg main load_aps get_next_punct get_ann register_dsbuilder get_builder_names get_builder builder_relation_dataset builder_seqlabel_dataset RelationProcessor inflate_examples DataProcessor SourceProcessor test match retrieve split TargetProcessor InputContextSampleSentence InputContextSample Tokenizer convert_int posstr test_prune prune basicConfig list dir_extracted get_builder add_argument extracted map parse_known_args builder ArgumentParser info type split datasetid src_ref str tgt retrieve src_retok eval enumerate dir_data SourceProcessor src print output match len inflate_examples join InputFeatures convert_tokens_to_ids _truncate_seq_pair info append enumerate len pop len gradient_accumulation_steps from_pretrained do_eval get_train_examples BertAdam DataParallel device output_dir do_train open seed data_dir get_labels comment device_count dir_check parse_args to manual_seed_all get_dataloader SummaryWriter format dump close do_eval_on_train lower num_train_epochs manual_seed trange do_manual load int join log_dir bert_model train_epoch named_parameters train_batch_size GloveMeasurer add_argument_group add_argument Tokenizer parse_known_args dir_glove Tokenizer parse_known_args BertExtractor add_argument add_argument_group add_argument parse_known_args text_b tokenize text_a append strip InputExample text text append enumerate load join zip append open lower range enumerate len enumerate range range len contain_ban_word range append get_token format get_dep get_token_str append get_next_punct get_token annotate append filter_sen split_s replace strip get_reg get_ann print format isfile seed parse_args add_argument parse_args add_argument list right left convert_block append enumerate print SourceProcessor split_ifthen_cite prune update add_labels list print retrive_positions copy TargetProcessor set shrink_next update_next enumerate split get_toked_ngrams_line open list parse_known_args append range get_toked_ngrams nearest set eval zip info enumerate load addbin print src_sens extend dict no_ref get_nearest_method split method len int range len len strip split print prune
INK-USC/procedural-extraction
494
INK-USC/shifted-label-distribution
['relation extraction']
['Looking Beyond Label Noise: Shifted Label Distribution Matters in Distantly Supervised Relation Extraction']
DataProcessor/mention.py ReHession/model/noCluster.py NeuralATT/model.py DataProcessor/mention_reader.py DataProcessor/__init__.py CoType/eigen-3.2.5/debug/gdb/__init__.py data/source/KBP/generateBClusterInput.py DataProcessor/Feature/em_brown_feature.py DataProcessor/gen_tacred.py ReHession/model/utils.py DataProcessor/feature_generation.py LogisticRegression/liblinear.py CoType/Evaluation/evaluation.py DataProcessor/liblinear_processor.py DataProcessor/pruning_heuristics.py DataProcessor/gen_data_neural.py DataProcessor/Feature/token_feature.py Neural/models/bgru.py CoType/eigen-3.2.5/debug/gdb/printers.py Neural/models/position_aware_lstm.py CoType/eigen-3.2.5/scripts/relicense.py ReHession/model/nce.py DataProcessor/nlp_parse.py DataProcessor/dev_set_partition.py Neural/utils.py data/source/NYT/generateBClusterInput.py CoType/Evaluation/emb_prediction.py DataProcessor/Feature/em_other_feature.py DataProcessor/Feature/other_feature.py Neural/models/pcnn.py DataProcessor/statistic.py LogisticRegression/utils.py DataProcessor/Feature/abstract_feature.py LogisticRegression/test.py CoType/Evaluation/tune_threshold_w_validation.py Neural/models/object.py NeuralATT/network/embedding.py Neural/train.py CoType/Evaluation/tune_threshold.py ReHession/run.py DataProcessor/ner_feature.py CoType/Evaluation/emb_test.py CoType/Evaluation/emb_dev_n_test.py DataProcessor/brown-cluster/cluster-viewer/code/final.py DataProcessor/brown-cluster/cluster-viewer/code/make_html.py DataProcessor/Feature/dependency_feature.py DataProcessor/Feature/em_dependency_feature.py LogisticRegression/Logistic.py Neural/models/palstm.py data/source/TACRED/generateBClusterInput.py NeuralATT/network/selector.py CoType/Evaluation/tune_threshold_w_sampled_dev.py ReHession/model/object.py DataProcessor/cotype_data_transform.py Neural/model.py NeuralATT/train.py CoType/Evaluation/convertPredictionToJson.py LogisticRegression/train.py DataProcessor/Feature/__init__.py Neural/eva.py NeuralATT/utils.py Neural/models/cnn.py Neural/models/lstm.py NeuralATT/network/encoder.py ReHession/model/pack.py NeuralATT/eva.py LogisticRegression/liblinearutil.py ReHession/eva.py DataProcessor/Feature/brown_feature.py DataProcessor/gen_bag_level_data.py DataProcessor/Feature/em_token_feature.py EigenQuaternionPrinter lookup_function register_eigen_printers build_eigen_dictionary EigenMatrixPrinter load_rel2id no_bag reorg_data split find_index transform_data convert_data Sentence EntityMention RelationMention MentionReader distribution supertype AbstractFeature BrownFeature DependencyFeature EMBrownFeature EMDependencyFeature EMContextFeature EMContextGramFeature EMTokenFeature get_lemma EMHeadFeature EMTypeFeature PosFeature NumOfEMBetweenFeature EntityMentionOrderFeature SpecialPatternFeature DistanceFeature ContextFeature EntityMentionTokenFeature HeadFeature BetweenEntityMentionTokenFeature ContextGramFeature get_lemma fillprototype parameter model print_null problem gen_feature_nodearray genFields toPyModel feature_node save_model load_model svm_read_problem train evaluations predict main main load_data get_none_id load_info Model train load_rel2id calcInd get_padded_tensor log2prob get_positions keep_partial_grad calcEntropy get_cv_dataset map_to_ids eval ensure_dir sort_all Dataset log2posprob recover_idx BGRU CNN LSTM softCE_S partCE softCE softKL soft_max PositionAwareRNN PositionAwareAttention PCNN PositionAwareLSTM Model Wrapper train load_rel2id calcInd get_padded_tensor get_positions eval map_to_ids calcEntropy sort_all Dataset recover_idx Embedding PCNN BGRU SelectorBase AttentionSelector NCE_loss noCluster softKL softCE softCE_S partCE repack resample load_qa_corpus initialize_embedding TuneThres load_question_info get_distribution clip_grad load_embedding SampleBias get_distribution_from_list calcInd dropout load_corpus calcEntropy to_scalar eval_score log_sum_exp calcMaxProb shuffle_data soft_max CrossValidation eval_score_with_thres get_none_id append strip_typedefs search tag target type join list items str append int list print shuffle range len range len join append join len lemmatize startswith genFields sorted isinstance filter keys range enumerate len genFields genFields genFields contents open float split encode toPyModel print encode len zip c_double toPyModel print_func C flag_cross_validation set_print_string_function check_parameter set_bias parameter nr_fold isinstance flag_find_C print bias problem flag_C_specified cross_validation evaluations find_parameter_C evaluations int is_regression_model solver_type len get_nr_class bias predict_values is_probability_model gen_feature_nodearray predict_probability info get_nr_feature feature_node split load join print open enumerate dump map load_info load_data Logistic fit join readlines map split open range append len join readlines len open update join format warn tqdm Model fix_bias eval batched_data save info rel2id save_dir range update_lr enumerate exp exp print format makedirs fill_ min tensor max enumerate len list shuffle ceil Dataset range len list enumerate len float range len softmax max zero_ exp view size expand_as gather max str Wrapper max enumerate gather max int list readline isspace asarray FloatTensor rand map sqrt filter zero_ open range append split rand readlines sqrt open len int list readline isspace LongTensor view map zero_ split append max open list isspace LongTensor map open append split shuffle list range len max softmax size ge rand long gt list filter lt int shuffle floor float range len int shuffle floor float range len print clone range len data int get_distribution_from_list calcInd eval_score len eval shuffle_data append test_with_bias array range repack_eva
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) <h3 align="center">Looking Beyond Label Noise:</h3> <h3 align="center">Shifted Label Distribution Matters in Distantly Supervised Relation Extraction</h3> &nbsp; __TL;DR__: We identify __shifted label distribution__, an important yet long-overlooked issue in DSRE; introduce a simple yet effective __bias adjustment__ to adapt a trained model along such shift explicitly; release a RE codebase. ## Table of Contents - [Introduction](#introduction) - [Example](#example) - [Training Recipes](#training-recipes) - [Environment Setup](#environment-setup)
495
INK-USC/temporal-gcn-lstm
['time series']
['Characterizing and Forecasting User Engagement with In-app Action Graph: A Case Study of Snapchat']
multi_channel.py build_temporal.py gcn_model.py utils.py activity_seq_model.py build_graphs.py Predictor LSTMs Data create_data compute_graph compute_graph GCNLayer GCN nx_to_dgl T astype append array values str join items write sum values add_edge list number_of_nodes add_nodes DGLGraph edges
# temporal-gcn-lstm Code for Characterizing and Forecasting User Engagement with In-App Action Graphs: A Case Study of Snapchat **Temporal-gcn-lstm** model encodes temporal evolving action graphs to predict future user engagement. The *end-to-end, multi-channel neural model* also encodes acitivity sequences and other macroscopic features to reach best performance. ### Requirements DGL, NetworkX, PyTorch, Pandas, Numpy, SciKit-Learn, tqdm Deep Graph Library (DGL) https://www.dgl.ai/ Pytorch https://pytorch.org/ ### Building action graphs build_graphs.py: build static graphs for time period
496
IPL-UV/jaxkern
['density estimation', 'gaussian processes']
['Kernel Methods and their derivatives: Concept and perspectives for the Earth system sciences']
jaxkern/gp/mean.py jaxkern/gp/utils.py jaxkern/similarity.py jaxkern/gp/loss.py jaxkern/gp/__init__.py docs/conf.py scripts/demo_dhsic.py jaxkern/data.py tests/test_kernels.py jaxkern/utils.py scripts/demo_rv.py scripts/demo_hsic.py jaxkern/density.py tests/test_dists.py notebooks/uncategorized/derivative/derivatives_numba.py scripts/demo_sigma.py jaxkern/dist.py jaxkern/gp/basic.py jaxkern/sigma.py jaxkern/gp/exact.py jaxkern/kernels.py jaxkern/__init__.py jaxkern/dependence.py setup.py notebooks/uncategorized/derivative/nopython_failure.py UploadCommand get_data near_square_wave kde_pdf kde_cdf_gaussian kde_pdf_gaussian gaussian_kernel nhsic_ka hsic mmd mmd_mi nhsic_cka nhsic_cca nhsic_nbs _hsic_uncentered manhattan_distance pdist_squareform sqeuclidean_distance euclidean_distance distmat gram ard_kernel linear_kernel rq_kernel covariance_matrix rbf_kernel _estimate_sigma_kth estimate_sigma_median_kth gamma_to_sigma sigma_to_gamma scotts_factor estimate_sigma_median kth_percent_distance silvermans_factor estimate_sigma_mean_kth energy_distance rv_coeff_features rv_coeff distance_corr centering ensure_min_eps main posterior predictive_variance gp_prior predictive_mean marginal_likelihood zero_mean saturate cholesky_factorization get_factorizations rbf_full_derivative_loops ard_derivative rbf_derivative_numba ard_derivative_full_numba rbf_full_derivative rbf_derivative_full rbf_derivative rbf_derivative_slow ard_derivative_numba rbf_full_derivative_memory rbf_derivative_full_numba ard_derivative_full main main main test_distmat test_pdist_squareform test_rbf_kernel_gram_2d test_centering test_rbf_kernel_gram_1d test_rbf_kernel_cov_2d test_rbf_kernel_cov_1d seed power linspace sin check_random_state randn sort f linspace kernel gaussian_kernel ndtr sum covariance_matrix centering sum covariance_matrix centering norm sum covariance_matrix dot sqrt trace real eigvals centering sum clip covariance_matrix norm sum gram inv dot eye centering sum dot gram T mean mean gram centering mean gram vmap median pdist_squareform mean _estimate_sigma_kth median _estimate_sigma_kth kth_percent_distance pdist_squareform shape shape int distmat exp sqeuclidean_distance ones eye einsum subplots opt_init get_data jit show list squeeze rmsprop scatter savefig legend posterior partial plot grad tight_layout sqrt get_params saturate fill_between T cov_func dot cho_solve diag get_factorizations dot cov_func get_factorizations T cov_func dot cho_solve diag get_factorizations mu_f cov_f eye logpdf cho_factor cho_solve reshape cov_func mu_func cholesky_factorization eye T reshape dot shape zeros range diag T reshape dot shape zeros range shape range zeros shape range zeros T ard_kernel ones squeeze assert_equal dot shape _check_length_scale zeros range array diag T dot shape tile zeros range diag T shape tile zeros range pairwise_kernels T shape tile zeros range shape range zeros pairwise_kernels T assert_equal dot shape zeros range pairwise_kernels zeros pairwise_kernels arange shape tile pairwise_kernels seed randn print nhsic_ka hsic nhsic_cka estimate_sigma_median nhsic_cca rv_coeff_features rv_coeff estimate_sigma_median_kth scotts_factor silvermans_factor euclidean_distances rand assert_array_almost_equal distmat array randn pdist_squareform pdist assert_array_almost_equal array squareform gram RandomState randn rand assert_array_almost_equal rbf_sklearn array gram RandomState randn covariance_matrix rand assert_array_almost_equal rbf_sklearn array covariance_matrix rand assert_array_almost_equal rbf_sklearn array gram rand assert_array_almost_equal rbf_sklearn array gram rand array assert_array_almost_equal centering rbf_sklearn fit_transform
# Kernel Methods with Jax * Authors: J. Emmanuel Johnson, ISP-Lab * Repo: [github.com/IPL-UV/jaxkern](https://github.com/IPL-UV/jaxkern) * Website: [jaxkern.readthedocs.io](https://jaxkern.readthedocs.io/en/latest/) ## Description This repo contains some code that the ISP labe use quite frequently. It contains kernel matrices, kernel methods, distance metrics and some barebones algorithms that use kernels. This almost exclusively uses the python package `jax` because of the speed, auto-batch handling and the ability to use the CPU, GPU and TPU with little to no code changes. --- ## Installation 1. Make sure [miniconda] is installed. 2. Clone the git repository.
497
IPL-UV/sakame
['density estimation', 'gaussian processes']
['Kernel Methods and their derivatives: Concept and perspectives for the Earth system sciences']
src/features/classification.py src/experiments/sampling.py test_environment.py docs/conf.py src/data/classification.py src/models/classification.py src/data/regression.py src/features/stats.py src/visualization/classification.py src/data/make_dataset.py src/models/hsic.py notebooks/classification/toy_2d/1d_example.py src/models/regression.py src/features/dependence.py src/models/esdc_sampling.py src/features/regression.py src/experiments/esdc/regression.py src/models/dependence.py setup.py src/visualization/dependence.py src/models/gp_demo.py src/visualization/utils.py main DemoParams load_esdc DefaultParams get_xy_indices ToyData get_class_data make_circle_ellipse window_xy get_3dgrid ESDCData ToyData2D load_esdc main SamplingExp main Parameters subset_time get_common_elements DefaultParams extract_region add_drought_mask extract_df standardize_temporal xarray2df GetXYData get_corr_temporal standardize_spatial DependenceExplore get_corr_spatial get_density_cubes calculate_regression_stats calculate_classification_stats predict_batches svm_naive HSICDependence main SamplingModel main DemoGP1D DemoGP DemoKRR main HSIC predict_batches gpr_naive plot_predictions plot_sensitivity plot_toy_data plot_sens_angle plot_sens_mod plot_raw_variables plot_sens_scatters create_grid print major open_zarr make_circle_ellipse make_moons make_circles dict train_test_split fit_transform meshgrid T unique griddata ones int floor shape get_xy_indices range values T check_random_state print hstack cos shape sin append load sel list run_experiment insert add_argument ArgumentParser parse_args SamplingExp seed dump get_density_cubes spatial variable train_size index GPRDerivative load_esdc predict_batches to_netcdf train_test_split calculate_regression_stats values gpr_naive sel load GeoDataFrame LST reset_index rasterize dropna reset_index index intersection mean std data list mean sel append std drop_duplicates to_dataframe DensityCubes get_minicubes DataFrame concat check_array recall_score mean precision_score f1_score to_xarray abs accuracy_score DataFrame concat check_array mean_absolute_error mean sqrt r2_score mean_squared_error to_xarray abs SVC GridSearchCV fit kernel_derivative der_model concatenate predict mask_derivative objective_derivative append gen_batches decision_derivative RBF GaussianProcessRegressor WhiteKernel C fit set_aspect subplots arange ListedColormap values set_yticks set_xlim min set_visible scatter set_xticks meshgrid tick_params max set_ylim set_aspect subplots arange ListedColormap from_list reshape set_yticks shape decision_function scatter contourf set_xticks set_visible meshgrid tick_params values set_aspect subplots ListedColormap set_yticks set_visible scatter set_xticks from_list tick_params values show set_title coastlines text add_subplot pcolormesh gridlines mean savefig figure show subplots set_xlabel colorbar scatter set_ylabel savefig show set_title coastlines text add_subplot pcolormesh gridlines mean savefig figure show set_title coastlines text add_subplot pcolormesh sign mean sqrt gridlines savefig figure abs max reshape squeeze min linspace meshgrid zeros max
2019_sakame ============================== Kernel Derivatives for Kernel Methods. <details> <summary>Abstract</summary> Kernel methods are powerful machine learning techniques which implement generic non-linear functions to solve complex tasks in a simple way. They Have a solid mathematical background and exhibit excellent performance in practice. However, kernel machines are still considered black-box models as the feature mapping is not directly accessible and difficult to interpret.The aim of this work is to show that it is indeed possible to interpret the functions learned by various kernel methods is intuitive despite their complexity. Specifically, we show that derivatives of these functions have a simple mathematical formulation, are easy to compute, and can be applied to many different problems. We note that model function derivatives in kernel machines is proportional to the kernel function derivative. We provide the explicit analytic form of the first and second derivatives of the most common kernel functions with regard to the inputs as well as generic formulas to compute higher order derivatives. We use them to analyze the most used supervised and unsupervised kernel learning methods: Gaussian Processes for regression, Support Vector Machines for classification, Kernel Entropy Component Analysis for density estimation, and the Hilbert-Schmidt Independence Criterion for estimating the dependency between random variables. For all cases we expressed the derivative of the learned function as a linear combination of the kernel function derivative. Moreover we provide intuitive explanations through illustrative toy examples and show how to improve the interpretation of real applications in the context of spatiotemporal Earth system data cubes. This work reflects on the observation that function derivatives may play a crucial role in kernel methods analysis and understanding. </details> -------- <p><small>Project based on the <a target="_blank" href="https://drivendata.github.io/cookiecutter-data-science/">cookiecutter data science project template</a>. #cookiecutterdatascience</small></p>
498
IRVLab/unrolling
['pose prediction']
['IMU-Assisted Learning of Single-View Rolling Shutter Correction']
dataset/pwcnet.py dataset/depthEstimator.py dataset/gsImgRectifier.py network/DataLoader.py network/helpers.py network/train_rsdepthnet.py dataset/tum_process.py network/RsDepthNet.py network/train_rsposenet.py dataset/poseHandler.py dataset/stereoRectifier.py dataset/gtFlowGenerator.py network/RsPoseNet.py network/test.py getRay getFlowBD calculateCurDepth getDepth depthFromTriangulation rectify_gs_imgs getRS2GSFlow getRS2GSFlows projectPoint Imu getPoses Pose cost_volume _interpolate_bilinear ModelPWCNet dense_image_warp stereoRectify stereoRemap TumDataSet DataGenerator flowLossByPose baseNet getFlow iconv_pr RsDepthNet poseConv RsPoseNet rectify_imgs get_flows_pred predict_from_img_pairs range nan full_like ones getRay T fabs hstack matmul getFlowBD depthFromTriangulation nan full range load join str format calculateCurDepth tqdm save imread range makedirs imwrite indices interpolate INTER_LINEAR clip str len array imread range format stack nan listdir load reshape remap tqdm full makedirs ones expand_dims matmul inv matmul empty_like isnan nan projectPoint full range load join str format getRS2GSFlow tqdm save range makedirs Pose save transpose identity matmul as_quat getImuAt savetxt append range as_rotvec splitlines load int join getPoseAt inv tqdm Imu zeros array len leaky_relu slice concat shape pad reduce_mean unstack append range makedirs inv identity indices remap save INTER_NEAREST matrix array CV_32F initUndistortRectifyMap load format imwrite remap tqdm imread range INTER_LINEAR norm T cos identity indices matmul cross reduce_sum stack sin expand_dims split ones_like zeros_like is_nan where getFlow split get shape get_layer min numpy append empty array range predict format imwrite reshape indices remap stack INTER_LINEAR nan interpolate full imread range array clip makedirs
# IMU-Assisted Learning of Single-View Rolling Shutter Correction Follow the README in *dataset/* for dataset information and in *network/* for network training and testing. ## Samples <img src="images/res_img.png" height="400px"/> ## DSO on resulting images <img src="images/dso.jpg" height="1000px"/>
499